From 55e303ae13a4cf49d70f2294092726f2fffb9ef2 Mon Sep 17 00:00:00 2001 From: Apple Date: Sat, 25 Oct 2003 00:08:57 +0000 Subject: [PATCH] xnu-517.tar.gz --- EXTERNAL_HEADERS/bsd/i386/ansi.h | 1 + EXTERNAL_HEADERS/bsd/ppc/ansi.h | 1 + EXTERNAL_HEADERS/mach-o/kld.h | 23 +- EXTERNAL_HEADERS/mach-o/loader.h | 2 +- Makefile | 3 + bsd/conf/MASTER | 6 + bsd/conf/MASTER.ppc | 2 +- bsd/conf/Makefile | 3 +- bsd/conf/Makefile.template | 9 +- bsd/conf/files | 15 + bsd/conf/files.i386 | 1 + bsd/conf/files.ppc | 4 + bsd/conf/param.c | 15 +- bsd/conf/version.major | 2 +- bsd/conf/version.minor | 2 +- bsd/conf/version.variant | 1 + bsd/crypto/blowfish/bf_enc.c | 127 +- bsd/crypto/blowfish/bf_pi.h | 2 +- bsd/crypto/blowfish/bf_skey.c | 10 +- bsd/crypto/blowfish/blowfish.h | 10 +- bsd/crypto/des/des.h | 42 +- bsd/crypto/des/des_ecb.c | 236 +- bsd/crypto/des/des_enc.c | 294 + bsd/crypto/des/des_locl.h | 223 +- bsd/crypto/des/des_setkey.c | 140 +- bsd/crypto/des/spr.h | 301 +- bsd/crypto/sha2/sha2.c | 8 +- bsd/dev/disk.h | 126 +- bsd/dev/disk_label.h | 88 +- bsd/dev/i386/conf.c | 6 +- bsd/dev/i386/km.c | 4 +- bsd/dev/i386/stubs.c | 6 +- bsd/dev/i386/sysctl.c | 138 + bsd/dev/i386/unix_signal.c | 16 +- bsd/dev/ldd.h | 2 +- bsd/dev/memdev.c | 578 ++ bsd/dev/memdev.h | 17 + bsd/dev/ppc/chud/chud_bsd_callback.c | 94 + bsd/dev/ppc/chud/chud_process.c | 57 + bsd/dev/ppc/conf.c | 7 +- bsd/dev/ppc/kern_machdep.c | 50 +- bsd/dev/ppc/km.c | 2 +- bsd/dev/ppc/mem.c | 35 +- bsd/dev/ppc/stubs.c | 32 - iokit/IOKit/adb/adb.h => bsd/dev/ppc/sysctl.c | 16 +- bsd/dev/ppc/systemcalls.c | 86 +- bsd/dev/ppc/unix_signal.c | 402 +- bsd/dev/ppc/unix_startup.c | 10 +- bsd/dev/random/YarrowCoreLib/src/prng.c | 17 +- bsd/dev/random/randomdev.c | 31 +- bsd/dev/vn/shadow.c | 4 +- bsd/dev/vn/vn.c | 63 +- bsd/hfs/hfs.h | 160 +- bsd/hfs/hfs_attrlist.c | 215 +- bsd/hfs/hfs_attrlist.h | 2 +- bsd/hfs/hfs_btreeio.c | 40 +- bsd/hfs/hfs_catalog.c | 412 +- bsd/hfs/hfs_catalog.h | 64 +- bsd/hfs/hfs_chash.c | 18 +- bsd/hfs/hfs_cnode.c | 104 +- bsd/hfs/hfs_cnode.h | 66 +- bsd/hfs/hfs_encodinghint.c | 76 +- bsd/hfs/hfs_encodings.c | 6 +- bsd/hfs/hfs_encodings.h | 6 +- bsd/hfs/hfs_endian.c | 22 + bsd/hfs/hfs_endian.h | 2 +- bsd/hfs/hfs_format.h | 41 +- bsd/hfs/hfs_hotfiles.c | 2156 ++++ bsd/hfs/hfs_hotfiles.h | 124 + bsd/hfs/hfs_link.c | 28 +- bsd/hfs/hfs_lookup.c | 136 +- bsd/hfs/hfs_mount.h | 13 +- bsd/hfs/hfs_notification.c | 71 + bsd/hfs/hfs_quota.c | 16 +- bsd/hfs/hfs_readwrite.c | 1120 +- bsd/hfs/hfs_search.c | 267 +- bsd/hfs/hfs_vfsops.c | 902 +- bsd/hfs/hfs_vfsutils.c | 762 +- bsd/hfs/hfs_vnops.c | 1258 ++- bsd/hfs/hfscommon/BTree/BTree.c | 190 +- bsd/hfs/hfscommon/BTree/BTreeAllocate.c | 6 +- bsd/hfs/hfscommon/BTree/BTreeMiscOps.c | 4 +- bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c | 287 + bsd/hfs/hfscommon/BTree/BTreeScanner.c | 18 + bsd/hfs/hfscommon/Catalog/CatalogIterators.c | 2 +- bsd/hfs/hfscommon/Catalog/FileIDsServices.c | 5 +- bsd/hfs/hfscommon/Misc/FileExtentMapping.c | 374 +- bsd/hfs/hfscommon/Misc/VolumeAllocation.c | 347 +- bsd/hfs/hfscommon/headers/BTreesInternal.h | 29 +- bsd/hfs/hfscommon/headers/BTreesPrivate.h | 10 +- bsd/hfs/hfscommon/headers/CatalogPrivate.h | 2 + bsd/hfs/hfscommon/headers/FileMgrInternal.h | 18 +- bsd/i386/ucontext.h | 7 + bsd/i386/vmparam.h | 6 +- bsd/if/ppc/if_en.c | 1132 -- bsd/if/ppc/if_en.h | 63 - bsd/if/ppc/mace.c | 261 - bsd/if/ppc/mace.h | 371 - bsd/isofs/cd9660/cd9660_bmap.c | 40 +- bsd/isofs/cd9660/cd9660_lookup.c | 111 +- bsd/isofs/cd9660/cd9660_mount.h | 6 +- bsd/isofs/cd9660/cd9660_node.c | 9 +- bsd/isofs/cd9660/cd9660_node.h | 9 +- bsd/isofs/cd9660/cd9660_rrip.c | 3 +- bsd/isofs/cd9660/cd9660_util.c | 77 +- bsd/isofs/cd9660/cd9660_vfsops.c | 367 +- bsd/isofs/cd9660/cd9660_vnops.c | 496 +- bsd/isofs/cd9660/iso.h | 82 +- bsd/kern/bsd_init.c | 114 +- bsd/kern/bsd_stubs.c | 7 +- bsd/kern/init_sysent.c | 171 +- bsd/kern/kdebug.c | 93 +- bsd/kern/kern_aio.c | 2180 ++++ bsd/kern/kern_audit.c | 1592 +++ bsd/kern/kern_bsm_audit.c | 756 ++ bsd/kern/kern_bsm_klib.c | 756 ++ bsd/kern/kern_bsm_token.c | 1344 +++ bsd/kern/kern_clock.c | 12 +- bsd/kern/kern_control.c | 17 +- bsd/kern/kern_core.c | 68 +- bsd/kern/kern_descrip.c | 164 +- bsd/kern/kern_event.c | 1098 +- bsd/kern/kern_exec.c | 516 +- bsd/kern/kern_exit.c | 106 +- bsd/kern/kern_fork.c | 45 +- bsd/kern/kern_ktrace.c | 5 +- bsd/kern/kern_lock.c | 2 +- bsd/kern/kern_malloc.c | 16 +- bsd/kern/kern_mib.c | 8 +- bsd/kern/kern_mman.c | 77 +- bsd/kern/kern_newsysctl.c | 15 +- bsd/kern/kern_panicinfo.c | 4 +- bsd/kern/kern_pcsamples.c | 4 +- bsd/kern/kern_proc.c | 21 + bsd/kern/kern_prot.c | 85 +- bsd/kern/kern_resource.c | 76 +- bsd/kern/kern_shutdown.c | 18 +- bsd/kern/kern_sig.c | 134 +- bsd/kern/kern_subr.c | 63 +- bsd/kern/kern_symfile.c | 16 +- bsd/kern/kern_synch.c | 59 +- bsd/kern/kern_sysctl.c | 625 +- bsd/kern/kern_time.c | 61 +- bsd/kern/kern_xxx.c | 9 +- bsd/kern/mach_fat.c | 72 +- bsd/kern/mach_header.c | 2 +- bsd/kern/mach_loader.c | 192 +- bsd/kern/mach_loader.h | 24 +- bsd/kern/mach_process.c | 23 +- bsd/kern/netboot.c | 6 +- bsd/kern/posix_sem.c | 28 +- bsd/kern/posix_shm.c | 45 +- bsd/kern/qsort.c | 14 +- bsd/kern/subr_log.c | 2 +- bsd/kern/subr_prf.c | 15 +- bsd/kern/subr_prof.c | 23 +- bsd/kern/sys_generic.c | 72 +- bsd/kern/sys_socket.c | 15 +- bsd/kern/syscalls.c | 96 +- bsd/kern/sysctl_init.c | 52 +- bsd/kern/sysv_msg.c | 8 +- bsd/kern/sysv_sem.c | 61 +- bsd/kern/sysv_shm.c | 62 +- bsd/kern/tty_pty.c | 15 +- bsd/kern/ubc_subr.c | 106 +- bsd/kern/uipc_mbuf.c | 231 +- bsd/kern/uipc_mbuf2.c | 2 +- bsd/kern/uipc_socket.c | 369 +- bsd/kern/uipc_socket2.c | 20 +- bsd/kern/uipc_syscalls.c | 114 +- bsd/kern/uipc_usrreq.c | 12 + bsd/man/man2/Makefile | 5 + bsd/man/man2/chflags.2 | 2 +- bsd/man/man2/chmod.2 | 2 +- bsd/man/man2/chown.2 | 4 +- bsd/man/man2/connect.2 | 5 +- bsd/man/man2/execve.2 | 8 +- bsd/man/man2/fork.2 | 2 +- bsd/man/man2/fsctl.2 | 135 + bsd/man/man2/fsync.2 | 4 +- bsd/man/man2/getdirentries.2 | 30 +- bsd/man/man2/getfsstat.2 | 17 +- bsd/man/man2/getsockopt.2 | 9 +- bsd/man/man2/intro.2 | 43 +- bsd/man/man2/kqueue.2 | 499 + bsd/man/man2/mmap.2 | 4 - bsd/man/man2/mount.2 | 139 +- bsd/man/man2/msync.2 | 23 +- bsd/man/man2/munmap.2 | 1 + bsd/man/man2/ptrace.2 | 1 + bsd/man/man2/select.2 | 2 + bsd/man/man2/semctl.2 | 202 + bsd/man/man2/semget.2 | 146 + bsd/man/man2/semop.2 | 289 + bsd/man/man2/setpgid.2 | 2 +- bsd/man/man2/shmat.2 | 7 +- bsd/man/man2/shmctl.2 | 9 +- bsd/man/man2/sigaction.2 | 423 +- bsd/man/man2/socket.2 | 2 +- bsd/man/man2/statfs.2 | 15 +- bsd/man/man2/wait.2 | 6 +- bsd/man/man4/Makefile | 1 - bsd/man/man4/icmp.4 | 2 +- bsd/man/man4/scsi.4 | 156 - bsd/man/man5/core.5 | 12 +- bsd/man/man5/dir.5 | 120 +- bsd/man/man9/Makefile | 1 + bsd/man/man9/intro.9 | 109 + bsd/miscfs/devfs/devfs_tree.c | 11 +- bsd/miscfs/devfs/devfs_vfsops.c | 13 +- bsd/miscfs/devfs/devfs_vnops.c | 40 +- bsd/miscfs/devfs/devfsdefs.h | 14 +- bsd/miscfs/fdesc/fdesc.h | 2 +- bsd/miscfs/fdesc/fdesc_vfsops.c | 4 +- bsd/miscfs/fdesc/fdesc_vnops.c | 4 +- bsd/miscfs/fifofs/fifo_vnops.c | 14 +- bsd/miscfs/specfs/spec_vnops.c | 57 +- bsd/miscfs/specfs/specdev.h | 4 +- bsd/miscfs/synthfs/synthfs_util.c | 32 +- bsd/miscfs/synthfs/synthfs_vfsops.c | 4 +- bsd/miscfs/synthfs/synthfs_vnops.c | 11 +- bsd/miscfs/union/union_vfsops.c | 9 +- bsd/miscfs/union/union_vnops.c | 8 +- bsd/miscfs/volfs/volfs.h | 3 + bsd/miscfs/volfs/volfs_vfsops.c | 15 +- bsd/miscfs/volfs/volfs_vnops.c | 461 +- bsd/net/Makefile | 6 +- bsd/net/bpf.c | 200 +- bsd/net/bpf.h | 1 + bsd/net/dlil.c | 278 +- bsd/net/dlil.h | 155 + bsd/net/ether_if_module.c | 30 +- bsd/net/ether_inet6_pr_module.c | 22 +- bsd/net/ether_inet_pr_module.c | 33 +- bsd/net/ethernet.h | 2 +- bsd/net/firewire.h | 101 + bsd/net/if.c | 142 +- bsd/net/if.h | 1 + bsd/net/if_arp.h | 2 + bsd/net/if_atm.h | 2 - bsd/net/if_ethersubr.c | 3 - bsd/net/if_faith.c | 58 +- bsd/net/if_gif.c | 45 +- bsd/net/if_llc.h | 4 +- bsd/net/if_loop.c | 93 +- bsd/net/if_stf.c | 70 +- bsd/net/if_var.h | 4 +- bsd/net/ndrv.c | 8 +- bsd/net/netisr.h | 2 +- bsd/net/pfkeyv2.h | 16 +- bsd/net/route.c | 32 +- bsd/net/route.h | 5 +- bsd/net/rtsock.c | 51 +- bsd/net/zlib.c | 71 +- bsd/netat/adsp_Close.c | 2 +- bsd/netat/adsp_RxData.c | 2 +- bsd/netat/adsp_Timer.c | 4 +- bsd/netat/asp_proto.c | 119 +- bsd/netat/at_aarp.h | 2 +- bsd/netat/at_snmp.h | 2 +- bsd/netat/at_var.h | 3 + bsd/netat/atp.h | 2 +- bsd/netat/atp_read.c | 10 +- bsd/netat/atp_write.c | 384 +- bsd/netat/aurp_aurpd.c | 2 +- bsd/netat/aurp_ri.c | 3 + bsd/netat/ddp_aarp.c | 23 +- bsd/netat/ddp_brt.c | 2 + bsd/netat/ddp_lap.c | 41 +- bsd/netat/ddp_r_rtmp.c | 4 +- bsd/netat/ddp_r_zip.c | 14 +- bsd/netat/ddp_usrreq.c | 5 +- bsd/netat/drv_dep.c | 2 +- bsd/netat/sys_dep.c | 76 +- bsd/netat/sys_glue.c | 23 +- bsd/netat/sysglue.h | 1 - bsd/netinet/dhcp_options.c | 4 +- bsd/netinet/icmp6.h | 2 +- bsd/netinet/icmp_var.h | 4 +- bsd/netinet/if_ether.c | 2 +- bsd/netinet/igmp.c | 44 +- bsd/netinet/igmp_var.h | 2 +- bsd/netinet/in.c | 63 +- bsd/netinet/in.h | 1 + bsd/netinet/in_bootp.c | 10 +- bsd/netinet/in_pcb.c | 18 +- bsd/netinet/in_pcb.h | 11 +- bsd/netinet/in_rmx.c | 3 + bsd/netinet/in_var.h | 2 +- bsd/netinet/ip_divert.c | 2 + bsd/netinet/ip_flow.c | 2 +- bsd/netinet/ip_icmp.c | 11 +- bsd/netinet/ip_input.c | 30 +- bsd/netinet/ip_output.c | 419 +- bsd/netinet/ip_var.h | 5 +- bsd/netinet/raw_ip.c | 29 +- bsd/netinet/tcp.h | 1 + bsd/netinet/tcp_debug.c | 7 +- bsd/netinet/tcp_input.c | 49 +- bsd/netinet/tcp_output.c | 108 +- bsd/netinet/tcp_subr.c | 37 +- bsd/netinet/tcp_timer.c | 15 +- bsd/netinet/tcp_timer.h | 5 + bsd/netinet/tcp_usrreq.c | 52 +- bsd/netinet/tcp_var.h | 118 +- bsd/netinet/udp_usrreq.c | 84 +- bsd/netinet6/Makefile | 2 +- bsd/netinet6/ah6.h | 2 +- bsd/netinet6/ah_input.c | 38 +- bsd/netinet6/dest6.c | 4 +- bsd/netinet6/esp6.h | 2 +- bsd/netinet6/esp_core.c | 45 +- bsd/netinet6/esp_input.c | 60 +- bsd/netinet6/esp_output.c | 112 +- bsd/netinet6/frag6.c | 4 +- bsd/netinet6/icmp6.c | 46 +- bsd/netinet6/in6.c | 193 +- bsd/netinet6/in6.h | 6 +- bsd/netinet6/in6_gif.c | 74 +- bsd/netinet6/in6_ifattach.c | 130 +- bsd/netinet6/in6_ifattach.h | 2 +- bsd/netinet6/in6_pcb.c | 105 +- bsd/netinet6/in6_pcb.h | 9 +- bsd/netinet6/in6_proto.c | 36 +- bsd/netinet6/in6_src.c | 5 +- bsd/netinet6/in6_var.h | 18 +- bsd/netinet6/ip6_forward.c | 18 +- bsd/netinet6/ip6_fw.h | 9 +- bsd/netinet6/ip6_input.c | 22 +- bsd/netinet6/ip6_mroute.c | 32 +- bsd/netinet6/ip6_output.c | 233 +- bsd/netinet6/ip6_var.h | 8 +- bsd/netinet6/ip6protosw.h | 3 +- bsd/netinet6/ipsec.c | 177 +- bsd/netinet6/ipsec.h | 9 +- bsd/netinet6/mld6.c | 5 + bsd/netinet6/nd6.c | 194 +- bsd/netinet6/nd6.h | 8 +- bsd/netinet6/nd6_nbr.c | 61 +- bsd/netinet6/nd6_rtr.c | 27 +- bsd/netinet6/raw_ip6.c | 4 +- bsd/netinet6/route6.c | 4 +- bsd/netinet6/scope6.c | 4 +- bsd/netinet6/tcp6_var.h | 2 +- bsd/netinet6/udp6_usrreq.c | 10 +- bsd/netinet6/udp6_var.h | 2 +- bsd/netkey/key.c | 1139 +- bsd/netkey/key_debug.c | 17 +- bsd/netkey/key_debug.h | 3 +- bsd/netkey/key_var.h | 27 +- bsd/netkey/keydb.h | 4 + bsd/nfs/Makefile | 1 + bsd/nfs/krpc_subr.c | 7 + bsd/nfs/nfs.h | 136 +- bsd/nfs/nfs_bio.c | 2611 +++-- bsd/nfs/nfs_boot.c | 9 +- bsd/nfs/nfs_lock.c | 512 + bsd/nfs/nfs_lock.h | 102 + bsd/nfs/nfs_node.c | 141 +- bsd/nfs/nfs_nqlease.c | 112 +- bsd/nfs/nfs_serv.c | 142 +- bsd/nfs/nfs_socket.c | 757 +- bsd/nfs/nfs_subs.c | 152 +- bsd/nfs/nfs_syscalls.c | 507 +- bsd/nfs/nfs_vfsops.c | 209 +- bsd/nfs/nfs_vnops.c | 1694 +-- bsd/nfs/nfsm_subs.h | 25 +- bsd/nfs/nfsmount.h | 18 +- bsd/nfs/nfsnode.h | 128 +- bsd/nfs/nfsproto.h | 5 +- bsd/nfs/nlminfo.h | 52 + bsd/ppc/param.h | 2 +- bsd/ppc/ucontext.h | 10 + bsd/ppc/vmparam.h | 2 +- bsd/sys/Makefile | 9 +- bsd/sys/aio.h | 230 + bsd/sys/aio_kern.h | 80 + bsd/sys/attr.h | 122 +- bsd/sys/audit.h | 211 + bsd/sys/bsm_kevents.h | 403 + bsd/sys/bsm_klib.h | 46 + bsd/sys/bsm_token.h | 320 + bsd/sys/bsm_token.save.h | 320 + bsd/sys/bsm_uevents.h | 79 + bsd/sys/buf.h | 2 + bsd/sys/cdefs.h | 20 +- bsd/sys/conf.h | 10 +- bsd/sys/disk.h | 75 +- bsd/sys/errno.h | 8 +- bsd/sys/event.h | 242 + bsd/{ufs/mfs/mfsiom.h => sys/eventvar.h} | 54 +- bsd/sys/fcntl.h | 6 + bsd/sys/file.h | 13 + bsd/sys/filedesc.h | 9 +- bsd/sys/kdebug.h | 12 +- bsd/sys/kern_audit.h | 288 + bsd/sys/lock.h | 6 - bsd/sys/lockf.h | 1 + bsd/sys/malloc.h | 11 +- bsd/sys/mbuf.h | 2 +- bsd/sys/mman.h | 2 +- bsd/sys/mount.h | 74 +- bsd/sys/namei.h | 21 +- bsd/sys/param.h | 4 +- bsd/sys/proc.h | 38 +- bsd/sys/select.h | 32 +- bsd/sys/sem.h | 2 +- bsd/sys/semaphore.h | 4 + bsd/sys/shm.h | 1 + bsd/sys/signal.h | 24 +- bsd/sys/signalvar.h | 1 + bsd/sys/socket.h | 38 +- bsd/sys/socketvar.h | 9 +- bsd/sys/stat.h | 18 +- bsd/sys/syscall.h | 44 +- bsd/sys/sysctl.h | 44 +- bsd/sys/syslimits.h | 6 +- bsd/sys/syslog.h | 2 + bsd/sys/time.h | 2 + bsd/sys/types.h | 1 + bsd/sys/ubc.h | 6 +- bsd/sys/ucontext.h | 12 + bsd/sys/ucred.h | 13 + bsd/sys/uio.h | 10 +- bsd/sys/unistd.h | 32 +- bsd/sys/user.h | 6 +- bsd/sys/utfconv.h | 1 + bsd/sys/vnioctl.h | 2 +- bsd/sys/vnode.h | 20 +- bsd/sys/vnode_if.h | 36 + bsd/ufs/ffs/ffs_alloc.c | 137 +- bsd/ufs/ffs/ffs_balloc.c | 60 +- bsd/ufs/ffs/ffs_extern.h | 2 +- bsd/ufs/ffs/ffs_inode.c | 11 +- bsd/ufs/ffs/ffs_vfsops.c | 176 +- bsd/ufs/ffs/ffs_vnops.c | 8 +- bsd/ufs/ffs/fs.h | 27 +- bsd/ufs/mfs/mfs_vfsops.c | 349 - bsd/ufs/mfs/mfs_vnops.c | 375 - bsd/ufs/mfs/mfsnode.h | 119 - bsd/ufs/ufs/inode.h | 4 + bsd/ufs/ufs/ufs_attrlist.c | 811 ++ bsd/ufs/ufs/ufs_byte_order.c | 22 +- bsd/ufs/ufs/ufs_extern.h | 5 +- bsd/ufs/ufs/ufs_lookup.c | 49 +- bsd/ufs/ufs/ufs_readwrite.c | 67 +- bsd/ufs/ufs/ufs_vfsops.c | 12 +- bsd/ufs/ufs/ufs_vnops.c | 271 +- bsd/uxkern/ux_exception.c | 8 +- bsd/vfs/vfs_bio.c | 148 +- bsd/vfs/vfs_cache.c | 336 +- bsd/vfs/vfs_cluster.c | 2681 +++-- bsd/vfs/vfs_conf.c | 6 +- bsd/vfs/vfs_init.c | 158 +- bsd/vfs/vfs_journal.c | 680 +- bsd/vfs/vfs_journal.h | 7 +- bsd/vfs/vfs_lookup.c | 226 +- bsd/vfs/vfs_subr.c | 835 +- bsd/vfs/vfs_support.c | 11 +- bsd/vfs/vfs_syscalls.c | 972 +- bsd/vfs/vfs_utfconv.c | 96 +- bsd/vfs/vfs_vnops.c | 89 +- bsd/vfs/vnode_if.c | 36 +- bsd/vfs/vnode_if.sh | 16 +- bsd/vfs/vnode_if.src | 22 +- bsd/vm/dp_backing_file.c | 61 + bsd/vm/vm_unix.c | 225 +- bsd/vm/vnode_pager.c | 21 +- config/BSDKernel.exports | 3790 +++++++ config/BSDKernel.i386.exports | 0 config/BSDKernel.ppc.exports | 489 + config/IOKit.exports | 2388 +++++ config/IOKit.i386.exports | 0 config/IOKit.ppc.exports | 184 + config/Libkern.exports | 746 ++ config/Libkern.i386.exports | 0 config/Libkern.ppc.exports | 0 config/Mach.exports | 2070 ++++ config/Mach.i386.exports | 0 config/Mach.ppc.exports | 581 ++ config/Makefile | 71 +- config/System.kext/{Contents => }/Info.plist | 8 +- .../AppleNMI.kext}/Info.plist | 6 +- .../ApplePlatformFamily.kext}/Info.plist | 6 +- .../BSDKernel.kext}/Info.plist | 14 +- .../BSDKernel6.0.kext}/Info.plist | 8 +- .../System.kext/PlugIns/IOKit.kext/Info.plist | 32 + .../IOKit6.0.kext}/Info.plist | 8 +- .../IONVRAMFamily.kext}/Info.plist | 6 +- .../IOSystemManagement.kext}/Info.plist | 6 +- .../PlugIns/Libkern.kext/Info.plist | 32 + .../Libkern6.0.kext}/Info.plist | 8 +- .../System.kext/PlugIns/Mach.kext/Info.plist | 32 + .../Mach6.0.kext}/Info.plist | 8 +- .../PlugIns/System6.0.kext/Info.plist | 32 + config/System6.0.exports | 9236 +++++++++++++++++ config/System6.0.i386.exports | 412 + config/System6.0.ppc.exports | 1256 +++ .../drvAppleI386Generic/AppleI386CPU.cpp | 143 - .../drvAppleI386Generic/AppleI386CPU.h | 70 - .../AppleI386PlatformExpert.cpp | 204 - .../AppleIntelClassicPIC.h | 166 - .../drvAppleIntelClassicPIC/PIC8259.cpp | 322 - .../platform/drvAppleMacIO/AppleMacIO.cpp | 21 +- .../Drivers/platform/drvAppleNMI/AppleNMI.cpp | 12 +- .../drvApplePMU/IOPMUADBController.cpp | 452 - .../platform/drvApplePMU/IOPMUADBController.h | 117 - .../ApplePlatformExpert.cpp | 2 +- .../RootDomainUserClient.cpp | 140 - iokit/Families/IOADBBus/IOADBBusPriv.h | 156 - iokit/Families/IOADBBus/IOADBController.cpp | 806 -- .../IOADBBus/IOADBControllerUserClient.cpp | 128 - .../IOADBBus/IOADBControllerUserClient.h | 71 - iokit/Families/IOADBBus/IOADBDevice.cpp | 193 - iokit/IOKit/IOBSD.h | 4 - iokit/IOKit/IOBufferMemoryDescriptor.h | 12 +- iokit/IOKit/IOCatalogue.h | 3 +- iokit/IOKit/IODeviceTreeSupport.h | 2 +- iokit/IOKit/IOKitDebug.h | 13 +- iokit/IOKit/IOKitKeys.h | 3 + .../IOKitKeysPrivate.h} | 44 +- iokit/IOKit/IOKitServer.h | 13 +- iokit/IOKit/IOLib.h | 82 +- iokit/IOKit/IOLocks.h | 2 +- iokit/IOKit/IOMapper.h | 131 + iokit/IOKit/IOMemoryDescriptor.h | 200 +- iokit/IOKit/IOMessage.h | 3 + iokit/IOKit/IOPMEventSource.h | 59 + iokit/IOKit/IOPlatformExpert.h | 8 +- iokit/IOKit/IOReturn.h | 8 +- iokit/IOKit/IOService.h | 56 +- iokit/IOKit/IOSharedLock.h | 8 +- iokit/IOKit/IOTypes.h | 14 +- iokit/IOKit/IOUserClient.h | 2 +- iokit/IOKit/Makefile | 6 +- iokit/IOKit/adb/IOADBBus.h | 148 - iokit/IOKit/adb/IOADBController.h | 117 - iokit/IOKit/adb/IOADBDevice.h | 64 - iokit/IOKit/adb/Makefile | 36 - iokit/IOKit/i386/IOSharedLockImp.h | 34 +- iokit/IOKit/pci/IOPCIDevice.h | 4 +- iokit/IOKit/ppc/IODBDMA.h | 2 +- iokit/IOKit/ppc/IOSharedLockImp.h | 150 +- iokit/IOKit/pwr_mgt/IOPM.h | 13 + iokit/IOKit/pwr_mgt/IOPMPrivate.h | 2 +- iokit/IOKit/pwr_mgt/RootDomain.h | 8 +- iokit/IOKit/system.h | 2 + iokit/Kernel/IOBufferMemoryDescriptor.cpp | 139 +- iokit/Kernel/IOCPU.cpp | 5 +- iokit/Kernel/IOCatalogue.cpp | 667 +- iokit/Kernel/IOCommandGate.cpp | 24 +- iokit/Kernel/IOConditionLock.cpp | 6 +- iokit/Kernel/IODataQueue.cpp | 4 +- iokit/Kernel/IODeviceMemory.cpp | 7 - iokit/Kernel/IODeviceTreeSupport.cpp | 16 +- iokit/Kernel/IOEventSource.cpp | 2 +- iokit/Kernel/IOFilterInterruptEventSource.cpp | 12 +- iokit/Kernel/IOInterruptEventSource.cpp | 8 +- iokit/Kernel/IOKitDebug.cpp | 57 +- iokit/Kernel/IOLib.c | 147 +- iokit/Kernel/IOMapper.cpp | 389 + iokit/Kernel/IOMemoryCursor.cpp | 76 +- iokit/Kernel/IOMemoryDescriptor.cpp | 1709 +-- iokit/Kernel/IOMultiMemoryDescriptor.cpp | 19 +- iokit/Kernel/IONVRAM.cpp | 4 +- iokit/Kernel/IOPMPowerStateQueue.cpp | 101 + iokit/Kernel/IOPMPowerStateQueue.h | 71 + iokit/Kernel/IOPMchangeNoteList.cpp | 15 + .../IOPMrootDomain.cpp} | 361 +- iokit/Kernel/IOPlatformExpert.cpp | 87 +- iokit/Kernel/IORangeAllocator.cpp | 8 +- iokit/Kernel/IORegistryEntry.cpp | 215 +- iokit/Kernel/IOService.cpp | 148 +- iokit/Kernel/IOServicePM.cpp | 2608 +++-- iokit/Kernel/IOServicePrivate.h | 3 + iokit/Kernel/IOStartIOKit.cpp | 71 +- iokit/Kernel/IOSyncer.cpp | 5 +- iokit/Kernel/IOTimerEventSource.cpp | 2 +- iokit/Kernel/IOUserClient.cpp | 411 +- iokit/Kernel/IOWorkLoop.cpp | 12 +- iokit/Kernel/RootDomainUserClient.cpp | 109 + .../RootDomainUserClient.h | 12 +- iokit/KernelConfigTables.cpp | 121 +- iokit/bsddev/IOKitBSDInit.cpp | 229 +- iokit/conf/MASTER.i386 | 2 +- iokit/conf/MASTER.ppc | 2 +- iokit/conf/Makefile | 3 +- iokit/conf/Makefile.template | 7 +- iokit/conf/files | 6 +- iokit/conf/files.i386 | 10 +- iokit/conf/files.ppc | 9 +- iokit/conf/version.major | 2 +- iokit/conf/version.minor | 2 +- iokit/conf/version.variant | 1 + iokit/include/DeviceTree.h | 208 - iokit/include/drivers/event_status_driver.h | 121 +- osfmk/.gdbinit => kgmacros | 251 +- libkern/Makefile | 2 + libkern/c++/OSArray.cpp | 14 +- libkern/c++/OSCollectionIterator.cpp | 2 +- libkern/c++/OSData.cpp | 50 +- libkern/c++/OSDictionary.cpp | 20 +- libkern/c++/OSMetaClass.cpp | 61 +- libkern/c++/OSNumber.cpp | 4 +- libkern/c++/OSObject.cpp | 127 +- libkern/c++/OSObjectAsm.s | 72 + libkern/c++/OSOrderedSet.cpp | 2 +- libkern/c++/OSSerialize.cpp | 8 +- libkern/c++/OSSet.cpp | 20 +- libkern/c++/OSString.cpp | 6 +- libkern/c++/OSSymbol.cpp | 117 +- libkern/c++/OSUnserializeXML.cpp | 607 +- libkern/c++/OSUnserializeXML.y | 549 +- .../Tests/TestSerialization/CustomInfo.xml | 15 - libkern/c++/Tests/TestSerialization/Makefile | 43 - .../TestSerialization/Makefile.postamble | 100 - .../Tests/TestSerialization/Makefile.preamble | 137 - .../c++/Tests/TestSerialization/PB.project | 17 - .../PBUserInfo/PBUserInfo_root.plist | 1 - .../test1.kmodproj/CustomInfo.xml | 24 - .../TestSerialization/test1.kmodproj/Makefile | 49 - .../test1.kmodproj/Makefile.postamble | 100 - .../test1.kmodproj/Makefile.preamble | 137 - .../test1.kmodproj/PB.project | 25 - .../test1/test1.pbproj/project.pbxproj | 260 + .../{test1.kmodproj => test1}/test1_main.cpp | 0 .../test2.kmodproj/CustomInfo.xml | 24 - .../TestSerialization/test2.kmodproj/Makefile | 47 - .../test2.kmodproj/Makefile.postamble | 100 - .../test2.kmodproj/Makefile.preamble | 137 - .../test2.kmodproj/PB.project | 24 - .../test2/test2.pbproj/project.pbxproj | 260 + .../{test2.kmodproj => test2}/test2_main.cpp | 0 libkern/conf/MASTER | 1 + libkern/conf/MASTER.i386 | 2 +- libkern/conf/MASTER.ppc | 2 +- libkern/conf/Makefile | 3 +- libkern/conf/Makefile.template | 2 +- libkern/conf/files | 1 + libkern/conf/files.ppc | 1 + libkern/conf/version.major | 2 +- libkern/conf/version.minor | 2 +- libkern/conf/version.variant | 1 + libkern/libkern/OSByteOrder.h | 500 +- libkern/libkern/OSTypes.h | 6 +- libkern/libkern/c++/OSData.h | 13 +- libkern/libkern/c++/OSMetaClass.h | 4 + libkern/libkern/c++/OSSymbol.h | 19 +- libkern/libkern/i386/OSByteOrder.h | 212 +- libkern/libkern/machine/OSByteOrder.h | 123 +- libkern/libkern/ppc/OSByteOrder.h | 121 +- libkern/ppc/OSAtomic.s | 57 +- libsa/bootstrap.cpp | 8 +- libsa/catalogue.cpp | 482 +- libsa/conf/MASTER | 2 +- libsa/conf/MASTER.i386 | 2 +- libsa/conf/MASTER.ppc | 2 +- libsa/conf/Makefile | 25 +- libsa/conf/Makefile.template | 6 +- libsa/conf/files | 5 +- libsa/conf/version.major | 2 +- libsa/conf/version.minor | 2 +- libsa/conf/version.variant | 1 + libsa/dgraph.c | 747 ++ libsa/dgraph.h | 171 + libsa/kext.cpp | 746 ++ libsa/kld_patch.c | 453 +- libsa/kmod.cpp | 146 +- libsa/libsa/catalogue.h | 1 + libsa/libsa/{kmod.h => kext.h} | 4 +- libsa/libsa/malloc.h | 5 + libsa/libsa/stdlib.h | 6 + libsa/libsa/vers_rsrc.h | 39 +- libsa/load.c | 2749 +++++ libsa/load.h | 162 + libsa/malloc.c | 610 +- libsa/vers_rsrc.c | 503 +- makedefs/MakeInc.def | 51 +- makedefs/MakeInc.dir | 14 +- makedefs/MakeInc.rule | 45 +- .../UserNotification/KUNCUserNotifications.c | 103 +- osfmk/conf/MASTER | 1 - osfmk/conf/MASTER.i386 | 50 +- osfmk/conf/MASTER.ppc | 4 +- osfmk/conf/Makefile | 3 +- osfmk/conf/Makefile.ppc | 2 +- osfmk/conf/Makefile.template | 2 +- osfmk/conf/files | 11 +- osfmk/conf/files.i386 | 38 +- osfmk/conf/files.ppc | 61 +- osfmk/conf/kernelversion.major | 2 +- osfmk/conf/kernelversion.minor | 2 +- osfmk/conf/kernelversion.variant | 1 + osfmk/conf/version.major | 2 +- osfmk/conf/version.minor | 2 +- osfmk/conf/version.variant | 2 +- {pexpert => osfmk/console}/i386/kdasm.s | 0 osfmk/console/i386/serial_console.c | 50 + .../console}/i386/text_console.c | 41 +- .../mp/mp.h => console/i386/text_console.h} | 35 +- .../i386/video_scroll.c} | 31 +- osfmk/{ppc/iso_font.h => console/iso_font.c} | 1 + osfmk/console/panic_dialog.c | 631 ++ osfmk/console/panic_image.c | 1953 ++++ osfmk/{ => console}/ppc/serial_console.c | 6 +- .../POWERMAC => console/ppc}/video_scroll.s | 4 - osfmk/console/rendered_numbers.c | 376 + osfmk/console/video_console.c | 2412 +++++ osfmk/console/video_console.h | 70 + osfmk/ddb/db_access.c | 42 +- osfmk/ddb/db_break.c | 199 +- osfmk/ddb/db_command.c | 65 +- osfmk/ddb/db_examine.c | 258 +- osfmk/ddb/db_expr.c | 98 +- osfmk/ddb/db_ext_symtab.c | 67 +- osfmk/ddb/db_macro.c | 76 +- osfmk/ddb/db_output.c | 11 +- osfmk/ddb/db_print.c | 32 +- osfmk/ddb/db_sym.c | 318 +- osfmk/ddb/db_task_thread.c | 79 +- osfmk/ddb/db_task_thread.h | 66 +- osfmk/ddb/db_variables.c | 132 +- osfmk/ddb/db_watch.c | 8 +- osfmk/default_pager/default_pager.c | 13 +- osfmk/default_pager/default_pager_internal.h | 2 + osfmk/default_pager/default_pager_types.defs | 18 +- osfmk/default_pager/dp_backing_store.c | 297 +- osfmk/default_pager/dp_memory_object.c | 43 +- osfmk/device/device.defs | 31 + osfmk/device/device_init.c | 6 +- osfmk/device/iokit_rpc.c | 107 +- osfmk/i386/AT386/asm_startup.h | 1 + osfmk/i386/AT386/bbclock.c | 4 +- osfmk/i386/AT386/iso_scan_font.h | 305 - osfmk/i386/AT386/kernBootStruct.h | 144 - osfmk/i386/AT386/misc_protos.h | 4 +- osfmk/i386/AT386/model_dep.c | 492 +- osfmk/i386/AT386/mp/mp.c | 186 - osfmk/i386/AT386/mp/mp_v1_1.c | 207 - osfmk/i386/AT386/mp/mp_v1_1.h | 149 - osfmk/i386/AT386/video_console.c | 1996 ---- osfmk/i386/AT386/video_console.h | 59 - osfmk/i386/Makefile | 7 +- osfmk/i386/apic.h | 4 +- osfmk/i386/asm.h | 4 +- osfmk/i386/bsd_i386.c | 149 +- .../i386/commpage/bcopy_scalar.s | 115 +- osfmk/i386/commpage/bzero_scalar.s | 111 + osfmk/i386/commpage/cacheflush.s | 41 + osfmk/i386/commpage/commpage.c | 299 +- osfmk/i386/commpage/commpage.h | 42 +- .../commpage/commpage_gettimeofday.s} | 24 +- .../commpage/commpage_mach_absolute_time.s | 37 + osfmk/i386/commpage/commpage_sigs.h | 57 + osfmk/i386/commpage/commpage_sigs.s | 69 + osfmk/i386/commpage/pthreads.s | 47 + osfmk/i386/commpage/spinlocks.s | 133 + osfmk/i386/cpu.c | 106 + osfmk/i386/cpu_capabilities.h | 130 +- osfmk/i386/cpu_data.h | 120 +- osfmk/i386/cpu_number.h | 62 +- osfmk/i386/cpuid.c | 750 +- osfmk/i386/cpuid.h | 167 +- osfmk/i386/cswitch.s | 23 +- osfmk/i386/db_machdep.h | 8 +- osfmk/i386/fpu.c | 215 +- osfmk/i386/fpu.h | 21 +- osfmk/i386/genassym.c | 44 +- osfmk/i386/hardclock.c | 89 +- osfmk/i386/hw_lock_types.h | 2 +- osfmk/i386/i386_init.c | 202 + osfmk/i386/i386_lock.s | 231 +- osfmk/i386/i386_vm_init.c | 279 + osfmk/i386/io_map.c | 7 + osfmk/i386/ldt.c | 5 + osfmk/i386/lock.h | 21 - osfmk/i386/locore.s | 209 +- osfmk/i386/loose_ends.c | 147 +- osfmk/i386/machdep_call.c | 5 + osfmk/i386/machine_cpu.h | 56 + osfmk/i386/machine_routines.c | 137 +- osfmk/i386/machine_routines.h | 60 +- osfmk/i386/machparam.h | 9 + osfmk/i386/mcount.s | 74 + osfmk/i386/misc_protos.h | 7 +- osfmk/i386/mp.c | 964 ++ osfmk/i386/{AT386/mp => }/mp.h | 103 +- osfmk/i386/mp_desc.c | 22 +- osfmk/i386/mp_desc.h | 1 + osfmk/i386/mp_events.h | 66 + .../i386/{AT386/mp/boot.h => mp_slave_boot.h} | 3 +- .../mp/slave_boot.s => mp_slave_boot.s} | 24 +- osfmk/i386/pcb.c | 408 +- osfmk/i386/phys.c | 54 +- osfmk/i386/pmap.c | 540 +- osfmk/i386/pmap.h | 43 +- osfmk/i386/proc_reg.h | 77 + osfmk/i386/rtclock.c | 849 +- osfmk/i386/rtclock_entries.h | 3 +- osfmk/i386/seg.h | 3 +- osfmk/i386/start.s | 11 +- osfmk/i386/thread.h | 14 - osfmk/i386/thread_act.h | 15 +- osfmk/i386/trap.c | 4 +- osfmk/i386/xpr.h | 7 +- osfmk/ipc/ipc_init.c | 5 +- osfmk/ipc/ipc_kmsg.c | 40 +- osfmk/ipc/ipc_mqueue.c | 15 +- osfmk/ipc/ipc_notify.c | 258 +- osfmk/ipc/ipc_notify.h | 3 - osfmk/ipc/ipc_object.c | 26 +- osfmk/ipc/ipc_port.c | 31 +- osfmk/ipc/ipc_port.h | 8 + osfmk/ipc/ipc_table.h | 8 +- osfmk/ipc/mach_debug.c | 12 +- osfmk/ipc/mach_msg.c | 117 +- osfmk/ipc/mach_port.c | 8 +- osfmk/kdp/kdp.c | 7 + osfmk/kdp/kdp_core.h | 50 + osfmk/kdp/kdp_internal.h | 7 +- osfmk/kdp/kdp_udp.c | 687 +- osfmk/kdp/kdp_udp.h | 7 +- osfmk/kdp/ml/i386/kdp_machdep.c | 9 +- osfmk/kdp/ml/i386/kdp_vm.c | 8 +- osfmk/kdp/ml/ppc/kdp_asm.s | 16 +- osfmk/kdp/ml/ppc/kdp_machdep.c | 126 +- osfmk/kdp/ml/ppc/kdp_misc.s | 80 +- osfmk/kdp/ml/ppc/kdp_vm.c | 532 +- osfmk/kdp/pe/POWERMAC/kdp_mace.c | 675 -- osfmk/kdp/pe/POWERMAC/kdp_mace.h | 392 - osfmk/kern/ast.c | 132 +- osfmk/kern/ast.h | 24 +- osfmk/kern/bsd_kern.c | 118 +- osfmk/kern/clock.c | 166 +- osfmk/kern/clock.h | 44 +- osfmk/kern/cpu_data.h | 12 - osfmk/kern/debug.c | 20 +- osfmk/kern/debug.h | 9 +- osfmk/kern/exception.c | 73 +- osfmk/kern/exception.h | 5 + osfmk/kern/host.c | 111 +- osfmk/kern/host.h | 8 +- osfmk/kern/host_notify.c | 201 + osfmk/kern/host_notify.h | 49 + osfmk/kern/ipc_host.c | 22 +- osfmk/kern/ipc_kobject.c | 9 +- osfmk/kern/ipc_kobject.h | 2 +- osfmk/kern/ipc_tt.c | 89 +- osfmk/kern/kalloc.c | 4 +- osfmk/kern/kern_types.h | 7 +- osfmk/kern/kmod.c | 61 +- osfmk/kern/lock.c | 120 +- osfmk/kern/lock.h | 52 +- osfmk/kern/mach_clock.c | 14 +- osfmk/kern/mach_factor.c | 37 +- osfmk/kern/mach_param.h | 10 +- osfmk/kern/machine.c | 274 +- osfmk/kern/machine.h | 5 +- osfmk/kern/misc_protos.h | 7 + osfmk/kern/mk_sp.c | 227 +- osfmk/kern/mk_timer.c | 27 +- osfmk/kern/mk_timer.h | 4 +- osfmk/kern/printf.c | 19 + osfmk/kern/priority.c | 9 +- osfmk/kern/processor.c | 93 +- osfmk/kern/processor.h | 111 +- osfmk/kern/profile.c | 6 +- osfmk/kern/sched.h | 29 +- osfmk/kern/sched_prim.c | 1952 ++-- osfmk/kern/sched_prim.h | 59 +- osfmk/kern/simple_lock.h | 2 +- osfmk/kern/startup.c | 64 +- osfmk/kern/sync_sema.c | 19 +- osfmk/kern/syscall_emulation.c | 12 +- osfmk/kern/syscall_subr.c | 3 +- osfmk/kern/syscall_sw.c | 6 +- osfmk/kern/syscall_sw.h | 3 + osfmk/kern/task.c | 252 +- osfmk/kern/task.h | 29 +- osfmk/kern/task_policy.c | 2 +- osfmk/kern/task_swap.c | 1412 +-- osfmk/kern/thread.c | 847 +- osfmk/kern/thread.h | 511 +- osfmk/kern/thread_act.c | 539 +- osfmk/kern/thread_act.h | 332 - osfmk/kern/thread_call.c | 199 +- osfmk/kern/thread_policy.c | 26 +- osfmk/kern/thread_swap.c | 10 +- osfmk/kern/timer_call.c | 73 +- osfmk/kern/wait_queue.c | 35 +- osfmk/kern/wait_queue.h | 2 +- osfmk/kern/zalloc.c | 572 +- osfmk/kern/zalloc.h | 2 +- osfmk/mach/Makefile | 13 +- osfmk/mach/host_notify.h | 42 + osfmk/mach/host_notify_reply.defs | 43 + osfmk/mach/host_priv.defs | 17 +- osfmk/mach/host_security.defs | 2 + osfmk/mach/host_special_ports.h | 135 + osfmk/mach/i386/fp_reg.h | 22 + osfmk/mach/i386/machine_types.defs | 8 +- osfmk/mach/i386/thread_state.h | 2 +- osfmk/mach/i386/thread_status.h | 20 +- osfmk/mach/i386/vm_param.h | 30 +- osfmk/mach/mach_host.defs | 5 + .../mach/mach_notify.defs | 27 +- osfmk/mach/mach_port.defs | 2 +- osfmk/mach/mach_traps.h | 8 +- osfmk/mach/mach_types.defs | 13 +- osfmk/mach/mach_types.h | 10 +- osfmk/mach/machine.h | 1 + osfmk/mach/memory_object.defs | 4 +- osfmk/mach/memory_object_control.defs | 12 +- osfmk/mach/memory_object_default.defs | 4 +- osfmk/mach/memory_object_name.defs | 2 +- osfmk/mach/memory_object_types.h | 101 + osfmk/mach/message.h | 46 +- osfmk/mach/mig_errors.h | 18 +- osfmk/mach/mk_timer.h | 4 +- osfmk/mach/ndr.h | 170 +- osfmk/mach/norma_special_ports.h | 63 +- osfmk/mach/notify.defs | 37 +- osfmk/mach/notify.h | 16 +- osfmk/mach/ppc/exception.h | 1 + osfmk/mach/ppc/machine_types.defs | 8 +- osfmk/mach/ppc/processor_info.h | 6 - osfmk/mach/ppc/syscall_sw.h | 3 + osfmk/mach/ppc/thread_status.h | 78 +- osfmk/mach/ppc/vm_param.h | 13 +- osfmk/mach/ppc/vm_types.h | 7 +- osfmk/mach/processor.defs | 2 +- osfmk/mach/processor_set.defs | 2 +- osfmk/mach/std_types.defs | 10 +- osfmk/mach/syscall_sw.h | 2 + osfmk/mach/task.defs | 2 +- osfmk/mach/task_info.h | 6 +- osfmk/mach/thread_act.defs | 2 +- osfmk/mach/upl.defs | 4 +- osfmk/mach/vm_map.defs | 2 +- osfmk/mach/vm_param.h | 36 +- osfmk/mach/vm_statistics.h | 2 + osfmk/mach/vm_types.h | 28 + .../man/host_security_create_task_token.html | 1 + osfmk/man/host_security_set_task_token.html | 1 + osfmk/man/index.html | 2 +- osfmk/man/task_create.html | 2 +- osfmk/man/task_create_security_token.html | 1 - osfmk/man/task_info.html | 2 +- osfmk/man/task_set_security_token.html | 1 - osfmk/ppc/AltiAssist.s | 6 - osfmk/ppc/Diagnostics.c | 198 +- osfmk/ppc/Diagnostics.h | 22 + osfmk/ppc/Emulate.s | 450 +- osfmk/ppc/Emulate64.s | 945 ++ osfmk/ppc/Firmware.h | 15 +- osfmk/ppc/Firmware.s | 758 +- osfmk/ppc/FirmwareC.c | 7 +- osfmk/ppc/FirmwareCalls.h | 5 +- osfmk/ppc/MPinterfaces.s | 458 - osfmk/ppc/Makefile | 7 +- osfmk/ppc/POWERMAC/dbdma.c | 151 - osfmk/ppc/POWERMAC/mp/MPPlugIn.h | 349 - osfmk/ppc/POWERMAC/mp/MP_2p.s | 2412 ----- osfmk/ppc/POWERMAC/panic_image.c | 269 - osfmk/ppc/POWERMAC/rendered_numbers.c | 374 - osfmk/ppc/POWERMAC/video_console.c | 2923 ------ osfmk/ppc/POWERMAC/video_console.h | 120 - osfmk/ppc/POWERMAC/video_console_entries.h | 98 - osfmk/ppc/PPCcalls.c | 2 + osfmk/ppc/PPCcalls.h | 22 +- osfmk/ppc/Performance.s | 1 - osfmk/ppc/PseudoKernel.c | 42 +- osfmk/ppc/aligned_data.s | 92 +- osfmk/ppc/asm.h | 153 +- osfmk/ppc/ast.h | 4 +- osfmk/ppc/atomic_switch.s | 31 +- osfmk/ppc/bcopy.s | 570 +- osfmk/ppc/bcopytest.c | 626 ++ osfmk/ppc/bsd_asm.s | 132 - osfmk/ppc/bsd_ppc.c | 283 - osfmk/ppc/bzero.s | 545 +- osfmk/ppc/cache.s | 413 +- osfmk/ppc/chud/chud_cpu.c | 459 + osfmk/ppc/chud/chud_cpu_asm.h | 107 + osfmk/ppc/chud/chud_cpu_asm.s | 573 + .../ppc/chud/chud_glue.c | 3 +- .../ppc/chud/chud_memory.c | 43 +- osfmk/ppc/chud/chud_osfmk_callback.c | 421 + osfmk/ppc/chud/chud_spr.h | 269 + osfmk/ppc/chud/chud_thread.c | 585 ++ osfmk/ppc/chud/chud_xnu.h | 204 + .../dbdma.h => chud/chud_xnu_glue.h} | 4 +- osfmk/ppc/commpage/bcopy_64.s | 301 + osfmk/ppc/commpage/bcopy_970.s | 592 ++ osfmk/ppc/commpage/bcopy_g3.s | 274 + osfmk/ppc/commpage/bcopy_g4.s | 621 ++ osfmk/ppc/commpage/bigcopy_970.s | 499 + osfmk/ppc/commpage/bzero_128.s | 156 + osfmk/ppc/commpage/bzero_32.s | 128 + osfmk/ppc/commpage/cacheflush.s | 100 + osfmk/ppc/commpage/commpage.c | 401 +- osfmk/ppc/commpage/commpage_asm.s | 116 + osfmk/ppc/commpage/gettimeofday.s | 212 + osfmk/ppc/commpage/mach_absolute_time.s | 68 + osfmk/ppc/commpage/pthread.s | 103 + osfmk/ppc/commpage/spinlocks.s | 246 + osfmk/ppc/console_feed.c | 2 +- osfmk/ppc/cpu.c | 136 +- osfmk/ppc/cpu_capabilities.h | 110 + osfmk/ppc/cpu_data.h | 21 +- osfmk/ppc/cswtch.s | 2306 ++-- osfmk/ppc/db_asm.s | 64 +- osfmk/ppc/db_disasm.c | 1 + osfmk/ppc/db_interface.c | 199 +- osfmk/ppc/db_low_trace.c | 725 +- osfmk/ppc/db_low_trace.h | 11 + osfmk/ppc/db_machdep.h | 15 +- osfmk/ppc/db_trace.c | 231 +- osfmk/ppc/endian.h | 2 +- osfmk/ppc/exception.h | 381 +- osfmk/ppc/genassym.c | 785 +- osfmk/ppc/hw_counters.h | 18 +- osfmk/ppc/hw_exception.s | 1072 +- osfmk/ppc/hw_lock.s | 2396 +++-- osfmk/ppc/hw_perfmon.c | 945 ++ osfmk/ppc/hw_perfmon.h | 119 + osfmk/ppc/hw_perfmon_mmcr.h | 183 + osfmk/ppc/hw_vm.s | 7954 +++++++++----- osfmk/ppc/instrumentation.h | 58 + osfmk/ppc/interrupt.c | 14 +- osfmk/ppc/io_map.c | 66 +- osfmk/ppc/io_map_entries.h | 1 + osfmk/ppc/lock.h | 25 +- osfmk/ppc/low_trace.h | 38 +- osfmk/ppc/lowglobals.h | 80 + osfmk/ppc/lowmem_vectors.s | 3715 ++++--- osfmk/ppc/machine_routines.c | 71 +- osfmk/ppc/machine_routines.h | 60 +- osfmk/ppc/machine_routines_asm.s | 968 +- osfmk/ppc/mappings.c | 1935 ++-- osfmk/ppc/mappings.h | 357 +- osfmk/ppc/mcount.s | 77 + osfmk/ppc/mem.c | 94 - osfmk/ppc/mem.h | 7 +- osfmk/ppc/misc_asm.s | 53 +- osfmk/ppc/misc_protos.h | 22 +- osfmk/ppc/model_dep.c | 131 +- osfmk/ppc/movc.s | 1318 ++- osfmk/ppc/net_filter.c | 753 -- osfmk/ppc/notify_interrupt.c | 212 - osfmk/ppc/pcb.c | 611 +- osfmk/ppc/pmap.c | 2113 ++-- osfmk/ppc/pmap.h | 179 +- osfmk/ppc/pmap_internals.h | 128 - osfmk/ppc/ppc_disasm.i | 20 +- osfmk/ppc/ppc_init.c | 297 +- osfmk/ppc/ppc_vm_init.c | 446 +- osfmk/ppc/proc_reg.h | 366 +- osfmk/ppc/rtclock.c | 1109 +- osfmk/ppc/savearea.c | 64 +- osfmk/ppc/savearea.h | 253 +- osfmk/ppc/savearea_asm.s | 1681 +-- osfmk/ppc/{POWERMAC => }/scc_8530.h | 0 osfmk/ppc/sched_param.h | 2 +- osfmk/ppc/{POWERMAC => }/serial_io.c | 67 +- osfmk/ppc/{POWERMAC => }/serial_io.h | 4 + osfmk/ppc/skiplists.s | 1304 +++ osfmk/ppc/spec_reg.h | 9 +- osfmk/ppc/start.s | 937 +- osfmk/ppc/status.c | 877 +- osfmk/ppc/thread_act.h | 55 +- osfmk/ppc/trap.c | 285 +- osfmk/ppc/trap.h | 5 +- osfmk/ppc/vmachmon.c | 656 +- osfmk/ppc/vmachmon.h | 304 +- osfmk/ppc/vmachmon_asm.s | 1667 ++- osfmk/profiling/i386/profile-asm.s | 24 +- osfmk/profiling/i386/profile-md.c | 56 +- osfmk/profiling/i386/profile-md.h | 2 + osfmk/vm/bsd_vm.c | 28 +- osfmk/vm/device_vm.c | 4 +- osfmk/vm/memory_object.c | 254 +- osfmk/vm/pmap.h | 61 +- osfmk/vm/task_working_set.c | 68 +- osfmk/vm/task_working_set.h | 2 +- osfmk/vm/vm_debug.c | 18 +- osfmk/vm/vm_external.c | 6 +- osfmk/vm/vm_external.h | 2 +- osfmk/vm/vm_fault.c | 687 +- osfmk/vm/vm_init.c | 3 +- osfmk/vm/vm_kern.c | 79 +- osfmk/vm/vm_map.c | 425 +- osfmk/vm/vm_map.h | 15 + osfmk/vm/vm_object.c | 658 +- osfmk/vm/vm_object.h | 6 +- osfmk/vm/vm_page.h | 17 +- osfmk/vm/vm_pageout.c | 2109 ++-- osfmk/vm/vm_pageout.h | 10 + osfmk/vm/vm_print.h | 7 +- osfmk/vm/vm_resident.c | 288 +- osfmk/vm/vm_shared_memory_server.c | 405 +- osfmk/vm/vm_shared_memory_server.h | 36 +- osfmk/vm/vm_user.c | 598 +- pexpert/conf/Makefile | 3 +- pexpert/conf/Makefile.i386 | 2 +- pexpert/conf/Makefile.ppc | 2 +- pexpert/conf/Makefile.template | 4 +- pexpert/conf/files.i386 | 8 +- pexpert/conf/version.major | 2 +- pexpert/conf/version.minor | 2 +- pexpert/conf/version.variant | 1 + pexpert/gen/bootargs.c | 29 +- pexpert/i386/boot_images.h | 145 +- pexpert/i386/fakePPCDeviceTree.c | 4 +- pexpert/i386/pe_bootargs.c | 2 +- pexpert/i386/pe_identify_machine.c | 2 +- pexpert/i386/pe_init.c | 70 +- pexpert/i386/pe_interrupt.c | 23 +- pexpert/i386/pe_kprintf.c | 46 +- pexpert/i386/pe_serial.c | 165 + pexpert/i386/video_console.h | 59 - pexpert/pexpert/Makefile | 3 +- pexpert/pexpert/device_tree.h | 13 + pexpert/pexpert/i386/boot.h | 124 +- pexpert/pexpert/i386/protos.h | 10 +- pexpert/pexpert/machine/Makefile | 4 +- pexpert/pexpert/pe_images.h | 2 +- pexpert/pexpert/pexpert.h | 4 +- pexpert/pexpert/ppc/Makefile | 1 - pexpert/pexpert/ppc/boot.h | 3 +- pexpert/pexpert/ppc/dbdma.h | 179 - pexpert/pexpert/ppc/interrupts.h | 2 +- pexpert/pexpert/ppc/protos.h | 102 +- pexpert/pexpert/protos.h | 1 + pexpert/ppc/pe_identify_machine.c | 85 +- pexpert/ppc/pe_init.c | 20 +- pexpert/ppc/pe_kprintf.c | 24 +- 1137 files changed, 135863 insertions(+), 65773 deletions(-) create mode 100644 bsd/crypto/des/des_enc.c create mode 100644 bsd/dev/i386/sysctl.c create mode 100644 bsd/dev/memdev.c create mode 100644 bsd/dev/memdev.h create mode 100644 bsd/dev/ppc/chud/chud_bsd_callback.c create mode 100644 bsd/dev/ppc/chud/chud_process.c rename iokit/IOKit/adb/adb.h => bsd/dev/ppc/sysctl.c (80%) create mode 100644 bsd/hfs/hfs_hotfiles.c create mode 100644 bsd/hfs/hfs_hotfiles.h create mode 100644 bsd/hfs/hfs_notification.c create mode 100644 bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c delete mode 100644 bsd/if/ppc/if_en.c delete mode 100644 bsd/if/ppc/if_en.h delete mode 100644 bsd/if/ppc/mace.c delete mode 100644 bsd/if/ppc/mace.h create mode 100644 bsd/kern/kern_aio.c create mode 100644 bsd/kern/kern_audit.c create mode 100644 bsd/kern/kern_bsm_audit.c create mode 100644 bsd/kern/kern_bsm_klib.c create mode 100644 bsd/kern/kern_bsm_token.c create mode 100644 bsd/man/man2/fsctl.2 create mode 100644 bsd/man/man2/kqueue.2 create mode 100644 bsd/man/man2/semctl.2 create mode 100644 bsd/man/man2/semget.2 create mode 100644 bsd/man/man2/semop.2 delete mode 100644 bsd/man/man4/scsi.4 create mode 100644 bsd/man/man9/intro.9 create mode 100644 bsd/net/firewire.h create mode 100644 bsd/nfs/nfs_lock.c create mode 100644 bsd/nfs/nfs_lock.h create mode 100644 bsd/nfs/nlminfo.h create mode 100644 bsd/sys/aio.h create mode 100644 bsd/sys/aio_kern.h create mode 100644 bsd/sys/audit.h create mode 100644 bsd/sys/bsm_kevents.h create mode 100644 bsd/sys/bsm_klib.h create mode 100644 bsd/sys/bsm_token.h create mode 100644 bsd/sys/bsm_token.save.h create mode 100644 bsd/sys/bsm_uevents.h create mode 100644 bsd/sys/event.h rename bsd/{ufs/mfs/mfsiom.h => sys/eventvar.h} (64%) create mode 100644 bsd/sys/kern_audit.h delete mode 100644 bsd/ufs/mfs/mfs_vfsops.c delete mode 100644 bsd/ufs/mfs/mfs_vnops.c delete mode 100644 bsd/ufs/mfs/mfsnode.h create mode 100644 bsd/ufs/ufs/ufs_attrlist.c create mode 100644 config/BSDKernel.exports create mode 100644 config/BSDKernel.i386.exports create mode 100644 config/BSDKernel.ppc.exports create mode 100644 config/IOKit.exports create mode 100644 config/IOKit.i386.exports create mode 100644 config/IOKit.ppc.exports create mode 100644 config/Libkern.exports create mode 100644 config/Libkern.i386.exports create mode 100644 config/Libkern.ppc.exports create mode 100644 config/Mach.exports create mode 100644 config/Mach.i386.exports create mode 100644 config/Mach.ppc.exports rename config/System.kext/{Contents => }/Info.plist (85%) rename config/System.kext/{Contents/PlugIns/AppleNMI.kext/Contents => PlugIns/AppleNMI.kext}/Info.plist (87%) rename config/System.kext/{Contents/PlugIns/ApplePlatformFamily.kext/Contents => PlugIns/ApplePlatformFamily.kext}/Info.plist (93%) rename config/System.kext/{Contents/PlugIns/IOADBFamily.kext/Contents => PlugIns/BSDKernel.kext}/Info.plist (72%) rename config/System.kext/{Contents/PlugIns/BSDKernel.kext/Contents => PlugIns/BSDKernel6.0.kext}/Info.plist (80%) create mode 100644 config/System.kext/PlugIns/IOKit.kext/Info.plist rename config/System.kext/{Contents/PlugIns/IOKit.kext/Contents => PlugIns/IOKit6.0.kext}/Info.plist (80%) rename config/System.kext/{Contents/PlugIns/IONVRAMFamily.kext/Contents => PlugIns/IONVRAMFamily.kext}/Info.plist (88%) rename config/System.kext/{Contents/PlugIns/IOSystemManagement.kext/Contents => PlugIns/IOSystemManagement.kext}/Info.plist (94%) create mode 100644 config/System.kext/PlugIns/Libkern.kext/Info.plist rename config/System.kext/{Contents/PlugIns/Libkern.kext/Contents => PlugIns/Libkern6.0.kext}/Info.plist (80%) create mode 100644 config/System.kext/PlugIns/Mach.kext/Info.plist rename config/System.kext/{Contents/PlugIns/Mach.kext/Contents => PlugIns/Mach6.0.kext}/Info.plist (80%) create mode 100644 config/System.kext/PlugIns/System6.0.kext/Info.plist create mode 100644 config/System6.0.exports create mode 100644 config/System6.0.i386.exports create mode 100644 config/System6.0.ppc.exports delete mode 100644 iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp delete mode 100644 iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.h delete mode 100644 iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp delete mode 100644 iokit/Drivers/platform/drvAppleIntelClassicPIC/AppleIntelClassicPIC.h delete mode 100644 iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp delete mode 100644 iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp delete mode 100644 iokit/Drivers/platform/drvApplePMU/IOPMUADBController.h delete mode 100644 iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp delete mode 100644 iokit/Families/IOADBBus/IOADBBusPriv.h delete mode 100644 iokit/Families/IOADBBus/IOADBController.cpp delete mode 100644 iokit/Families/IOADBBus/IOADBControllerUserClient.cpp delete mode 100644 iokit/Families/IOADBBus/IOADBControllerUserClient.h delete mode 100644 iokit/Families/IOADBBus/IOADBDevice.cpp rename iokit/{Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.h => IOKit/IOKitKeysPrivate.h} (53%) create mode 100644 iokit/IOKit/IOMapper.h create mode 100644 iokit/IOKit/IOPMEventSource.h delete mode 100644 iokit/IOKit/adb/IOADBBus.h delete mode 100644 iokit/IOKit/adb/IOADBController.h delete mode 100644 iokit/IOKit/adb/IOADBDevice.h delete mode 100644 iokit/IOKit/adb/Makefile create mode 100644 iokit/Kernel/IOMapper.cpp create mode 100644 iokit/Kernel/IOPMPowerStateQueue.cpp create mode 100644 iokit/Kernel/IOPMPowerStateQueue.h rename iokit/{Drivers/platform/drvAppleRootDomain/RootDomain.cpp => Kernel/IOPMrootDomain.cpp} (82%) create mode 100644 iokit/Kernel/RootDomainUserClient.cpp rename iokit/{Drivers/platform/drvAppleRootDomain => Kernel}/RootDomainUserClient.h (83%) delete mode 100644 iokit/include/DeviceTree.h rename osfmk/.gdbinit => kgmacros (83%) create mode 100644 libkern/c++/OSObjectAsm.s delete mode 100644 libkern/c++/Tests/TestSerialization/CustomInfo.xml delete mode 100644 libkern/c++/Tests/TestSerialization/Makefile delete mode 100644 libkern/c++/Tests/TestSerialization/Makefile.postamble delete mode 100644 libkern/c++/Tests/TestSerialization/Makefile.preamble delete mode 100644 libkern/c++/Tests/TestSerialization/PB.project delete mode 100644 libkern/c++/Tests/TestSerialization/PBUserInfo/PBUserInfo_root.plist delete mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/CustomInfo.xml delete mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile delete mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.postamble delete mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.preamble delete mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/PB.project create mode 100644 libkern/c++/Tests/TestSerialization/test1/test1.pbproj/project.pbxproj rename libkern/c++/Tests/TestSerialization/{test1.kmodproj => test1}/test1_main.cpp (100%) delete mode 100755 libkern/c++/Tests/TestSerialization/test2.kmodproj/CustomInfo.xml delete mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile delete mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.postamble delete mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.preamble delete mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/PB.project create mode 100644 libkern/c++/Tests/TestSerialization/test2/test2.pbproj/project.pbxproj rename libkern/c++/Tests/TestSerialization/{test2.kmodproj => test2}/test2_main.cpp (100%) create mode 100644 libsa/dgraph.c create mode 100644 libsa/dgraph.h create mode 100644 libsa/kext.cpp rename libsa/libsa/{kmod.h => kext.h} (76%) create mode 100644 libsa/load.c create mode 100644 libsa/load.h rename {pexpert => osfmk/console}/i386/kdasm.s (100%) create mode 100644 osfmk/console/i386/serial_console.c rename {pexpert => osfmk/console}/i386/text_console.c (90%) rename osfmk/{ppc/POWERMAC/mp/mp.h => console/i386/text_console.h} (64%) rename osfmk/{i386/AT386/mp/mp_events.h => console/i386/video_scroll.c} (67%) rename osfmk/{ppc/iso_font.h => console/iso_font.c} (99%) create mode 100644 osfmk/console/panic_dialog.c create mode 100644 osfmk/console/panic_image.c rename osfmk/{ => console}/ppc/serial_console.c (98%) rename osfmk/{ppc/POWERMAC => console/ppc}/video_scroll.s (93%) create mode 100644 osfmk/console/rendered_numbers.c create mode 100644 osfmk/console/video_console.c create mode 100644 osfmk/console/video_console.h delete mode 100644 osfmk/i386/AT386/iso_scan_font.h delete mode 100644 osfmk/i386/AT386/kernBootStruct.h delete mode 100644 osfmk/i386/AT386/mp/mp.c delete mode 100644 osfmk/i386/AT386/mp/mp_v1_1.c delete mode 100644 osfmk/i386/AT386/mp/mp_v1_1.h delete mode 100644 osfmk/i386/AT386/video_console.c delete mode 100644 osfmk/i386/AT386/video_console.h rename bsd/ufs/mfs/mfs_extern.h => osfmk/i386/commpage/bcopy_scalar.s (57%) create mode 100644 osfmk/i386/commpage/bzero_scalar.s create mode 100644 osfmk/i386/commpage/cacheflush.s rename osfmk/{ppc/POWERMAC/mp/mp.c => i386/commpage/commpage_gettimeofday.s} (76%) create mode 100644 osfmk/i386/commpage/commpage_mach_absolute_time.s create mode 100644 osfmk/i386/commpage/commpage_sigs.h create mode 100644 osfmk/i386/commpage/commpage_sigs.s create mode 100644 osfmk/i386/commpage/pthreads.s create mode 100644 osfmk/i386/commpage/spinlocks.s create mode 100644 osfmk/i386/i386_init.c create mode 100644 osfmk/i386/i386_vm_init.c create mode 100644 osfmk/i386/machine_cpu.h create mode 100644 osfmk/i386/mcount.s create mode 100644 osfmk/i386/mp.c rename osfmk/i386/{AT386/mp => }/mp.h (74%) create mode 100644 osfmk/i386/mp_events.h rename osfmk/i386/{AT386/mp/boot.h => mp_slave_boot.h} (96%) rename osfmk/i386/{AT386/mp/slave_boot.s => mp_slave_boot.s} (95%) create mode 100644 osfmk/kdp/kdp_core.h delete mode 100644 osfmk/kdp/pe/POWERMAC/kdp_mace.c delete mode 100644 osfmk/kdp/pe/POWERMAC/kdp_mace.h create mode 100644 osfmk/kern/host_notify.c create mode 100644 osfmk/kern/host_notify.h create mode 100644 osfmk/mach/host_notify.h create mode 100644 osfmk/mach/host_notify_reply.defs create mode 100644 osfmk/mach/host_special_ports.h rename iokit/Families/IOADBBus/IOADBBus.cpp => osfmk/mach/mach_notify.defs (67%) create mode 100755 osfmk/man/host_security_create_task_token.html create mode 100755 osfmk/man/host_security_set_task_token.html delete mode 100755 osfmk/man/task_create_security_token.html delete mode 100755 osfmk/man/task_set_security_token.html create mode 100644 osfmk/ppc/Emulate64.s delete mode 100644 osfmk/ppc/MPinterfaces.s delete mode 100644 osfmk/ppc/POWERMAC/dbdma.c delete mode 100644 osfmk/ppc/POWERMAC/mp/MPPlugIn.h delete mode 100644 osfmk/ppc/POWERMAC/mp/MP_2p.s delete mode 100644 osfmk/ppc/POWERMAC/panic_image.c delete mode 100644 osfmk/ppc/POWERMAC/rendered_numbers.c delete mode 100644 osfmk/ppc/POWERMAC/video_console.c delete mode 100644 osfmk/ppc/POWERMAC/video_console.h delete mode 100644 osfmk/ppc/POWERMAC/video_console_entries.h create mode 100644 osfmk/ppc/bcopytest.c delete mode 100644 osfmk/ppc/bsd_asm.s delete mode 100644 osfmk/ppc/bsd_ppc.c create mode 100644 osfmk/ppc/chud/chud_cpu.c create mode 100644 osfmk/ppc/chud/chud_cpu_asm.h create mode 100644 osfmk/ppc/chud/chud_cpu_asm.s rename libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.h => osfmk/ppc/chud/chud_glue.c (93%) rename iokit/IOKit/adb/IOADBLib.h => osfmk/ppc/chud/chud_memory.c (60%) create mode 100644 osfmk/ppc/chud/chud_osfmk_callback.c create mode 100644 osfmk/ppc/chud/chud_spr.h create mode 100644 osfmk/ppc/chud/chud_thread.c create mode 100644 osfmk/ppc/chud/chud_xnu.h rename osfmk/ppc/{POWERMAC/dbdma.h => chud/chud_xnu_glue.h} (91%) create mode 100644 osfmk/ppc/commpage/bcopy_64.s create mode 100644 osfmk/ppc/commpage/bcopy_970.s create mode 100644 osfmk/ppc/commpage/bcopy_g3.s create mode 100644 osfmk/ppc/commpage/bcopy_g4.s create mode 100644 osfmk/ppc/commpage/bigcopy_970.s create mode 100644 osfmk/ppc/commpage/bzero_128.s create mode 100644 osfmk/ppc/commpage/bzero_32.s create mode 100644 osfmk/ppc/commpage/cacheflush.s create mode 100644 osfmk/ppc/commpage/gettimeofday.s create mode 100644 osfmk/ppc/commpage/mach_absolute_time.s create mode 100644 osfmk/ppc/commpage/pthread.s create mode 100644 osfmk/ppc/commpage/spinlocks.s create mode 100644 osfmk/ppc/hw_perfmon.c create mode 100644 osfmk/ppc/hw_perfmon.h create mode 100644 osfmk/ppc/hw_perfmon_mmcr.h create mode 100644 osfmk/ppc/instrumentation.h create mode 100644 osfmk/ppc/lowglobals.h create mode 100644 osfmk/ppc/mcount.s delete mode 100644 osfmk/ppc/mem.c delete mode 100644 osfmk/ppc/net_filter.c delete mode 100644 osfmk/ppc/notify_interrupt.c delete mode 100644 osfmk/ppc/pmap_internals.h rename osfmk/ppc/{POWERMAC => }/scc_8530.h (100%) rename osfmk/ppc/{POWERMAC => }/serial_io.c (90%) rename osfmk/ppc/{POWERMAC => }/serial_io.h (97%) create mode 100644 osfmk/ppc/skiplists.s create mode 100644 pexpert/i386/pe_serial.c delete mode 100644 pexpert/i386/video_console.h delete mode 100644 pexpert/pexpert/ppc/dbdma.h diff --git a/EXTERNAL_HEADERS/bsd/i386/ansi.h b/EXTERNAL_HEADERS/bsd/i386/ansi.h index 8bb3e31af..c9a818376 100644 --- a/EXTERNAL_HEADERS/bsd/i386/ansi.h +++ b/EXTERNAL_HEADERS/bsd/i386/ansi.h @@ -80,6 +80,7 @@ #define _BSD_SSIZE_T_ int /* byte count or error */ #define _BSD_TIME_T_ long /* time() */ #define _BSD_VA_LIST_ void * /* va_list */ +#define _BSD_SOCKLEN_T_ int32_t /* socklen_t (duh) */ /* * Runes (wchar_t) is declared to be an ``int'' instead of the more natural diff --git a/EXTERNAL_HEADERS/bsd/ppc/ansi.h b/EXTERNAL_HEADERS/bsd/ppc/ansi.h index e9c40a6b1..02afc9472 100644 --- a/EXTERNAL_HEADERS/bsd/ppc/ansi.h +++ b/EXTERNAL_HEADERS/bsd/ppc/ansi.h @@ -80,6 +80,7 @@ #define _BSD_SSIZE_T_ int /* byte count or error */ #define _BSD_TIME_T_ long /* time() */ #define _BSD_VA_LIST_ char * /* va_list */ +#define _BSD_SOCKLEN_T_ int32_t /* socklen_t (duh) */ /* * Runes (wchar_t) is declared to be an ``int'' instead of the more natural diff --git a/EXTERNAL_HEADERS/mach-o/kld.h b/EXTERNAL_HEADERS/mach-o/kld.h index 02e7b69e9..269b4d79c 100644 --- a/EXTERNAL_HEADERS/mach-o/kld.h +++ b/EXTERNAL_HEADERS/mach-o/kld.h @@ -33,6 +33,9 @@ * These API's are in libkld. Both kmodload(8) and /mach_kernel should * link with -lkld and then ld(1) will expand -lkld to libkld.dylib or * libkld.a depending on if -dynamic or -static is in effect. + * + * Note: we are using the __DYNAMIC__ flag to indicate user space kernel + * linking and __STATIC__ as a synonym of KERNEL. */ /* @@ -42,7 +45,7 @@ extern void kld_error_vprintf(const char *format, va_list ap); /* - * This two are only in libkld.dylib for use by kmodload(8) (user code compiled + * These two are only in libkld.dylib for use by kmodload(8) (user code compiled * with the default -dynamic). */ #ifdef __DYNAMIC__ @@ -54,6 +57,13 @@ __private_extern__ long kld_load( struct mach_header **header_addr, const char *object_filename, const char *output_filename); + +__private_extern__ long kld_load_from_memory( + struct mach_header **header_addr, + const char *object_name, + char *object_addr, + long object_size, + const char *output_filename); #endif /* __DYNAMIC__ */ /* @@ -69,6 +79,11 @@ __private_extern__ long kld_load_from_memory( long object_size); #endif /* __STATIC__ */ +__private_extern__ long kld_load_basefile_from_memory( + const char *base_filename, + char *base_addr, + long base_size); + __private_extern__ long kld_unload_all( long deallocate_sets); @@ -82,4 +97,10 @@ __private_extern__ long kld_forget_symbol( __private_extern__ void kld_address_func( unsigned long (*func)(unsigned long size, unsigned long headers_size)); +#define KLD_STRIP_ALL 0x00000000 +#define KLD_STRIP_NONE 0x00000001 + +__private_extern__ void kld_set_link_options( + unsigned long link_options); + #endif /* _MACHO_KLD_H_ */ diff --git a/EXTERNAL_HEADERS/mach-o/loader.h b/EXTERNAL_HEADERS/mach-o/loader.h index f4d37ef7d..44f2a2b23 100644 --- a/EXTERNAL_HEADERS/mach-o/loader.h +++ b/EXTERNAL_HEADERS/mach-o/loader.h @@ -723,4 +723,4 @@ struct fvmfile_command { unsigned long header_addr; /* files virtual address */ }; -#endif _MACHO_LOADER_H_ +#endif /* _MACHO_LOADER_H_ */ diff --git a/Makefile b/Makefile index d843488d3..d2938322b 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,8 @@ ALL_SUBDIRS = \ libkern \ libsa +CONFIG_SUBDIRS = config + INSTINC_SUBDIRS = $(ALL_SUBDIRS) INSTINC_SUBDIRS_PPC = $(INSTINC_SUBDIRS) @@ -45,6 +47,7 @@ EXPINC_SUBDIRS_I386 = $(EXPINC_SUBDIRS) COMP_SUBDIRS = $(ALL_SUBDIRS) + INST_SUBDIRS = \ libkern \ libsa \ diff --git a/bsd/conf/MASTER b/bsd/conf/MASTER index aa6d7ff53..10ac705f2 100644 --- a/bsd/conf/MASTER +++ b/bsd/conf/MASTER @@ -144,6 +144,7 @@ options TCPDEBUG # TCP debug # options RANDOM_IP_ID # random (not sequential) ip ids # options TCP_DROP_SYNFIN # Drop TCP packets with SYN+FIN set # options ICMP_BANDLIM # ICMP bandwidth limiting sysctl +options AUDIT # Security event auditing # # @@ -152,6 +153,7 @@ options ICMP_BANDLIM # ICMP bandwidth limiting sysctl options COMPAT_43 # 4.3 BSD compatibility # options DIAGNOSTIC # diagnostics # options KTRACE # ktrace support # +options GPROF # build profiling # # # 4.4 filesystems @@ -246,6 +248,10 @@ pseudo-device pty 128 init pty_init # vnode device pseudo-device vndevice 4 init vndevice_init +# +# memory device +pseudo-device mdevdevice 1 init mdevinit + # # # packet filter device diff --git a/bsd/conf/MASTER.ppc b/bsd/conf/MASTER.ppc index 46cb6811a..897190b4f 100644 --- a/bsd/conf/MASTER.ppc +++ b/bsd/conf/MASTER.ppc @@ -47,7 +47,7 @@ # # RELEASE = [ppc mach medium vol pst gdb simple_clock kernstack nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 revfs noprofiling hfs volfs devfs synthfs netat mrouting ipdivert ipfirewall ktrace inet6 ipsec tcpdrop_synfin gif stf] # RELEASE_TRACE = [RELEASE kdebug] -# PROFILE = [ppc mach medium vol pst gdb debug simple_clock kernstack nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 revfs profile hfs volfs devfs synthfs netat mrouting ipdivert ipfirewall ktrace inet6 ipsec tcpdrop_synfin gif stf] +# PROFILE = [ppc mach medium vol pst gdb simple_clock kernstack nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 revfs profile hfs volfs devfs synthfs netat mrouting ipdivert ipfirewall ktrace inet6 ipsec tcpdrop_synfin gif stf] # DEBUG = [ppc mach medium vol pst gdb debug simple_clock kernstack nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 revfs profiling hfs volfs devfs synthfs netat mrouting mach_assert ipdivert ipfirewall ktrace inet6 ipsec tcpdrop_synfin gif stf] # DEBUG_TRACE = [DEBUG kdebug] # diff --git a/bsd/conf/Makefile b/bsd/conf/Makefile index fee0600ae..3fbb79f00 100644 --- a/bsd/conf/Makefile +++ b/bsd/conf/Makefile @@ -18,7 +18,7 @@ ifndef BSD_KERNEL_CONFIG export BSD_KERNEL_CONFIG = $(KERNEL_CONFIG) endif -COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) +export COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: make build_setup @@ -53,6 +53,7 @@ do_all: do_setup_conf SOURCE=$${next_source} \ TARGET=$(TARGET) \ INCL_MAKEDEP=FALSE \ + KERNEL_CONFIG=$(BSD_KERNEL_CONFIG) \ build_all; \ echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(BSD_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; diff --git a/bsd/conf/Makefile.template b/bsd/conf/Makefile.template index c209ca49e..cd0355d62 100644 --- a/bsd/conf/Makefile.template +++ b/bsd/conf/Makefile.template @@ -69,6 +69,13 @@ COPYRIGHT_FILES = \ %ORDERED %MACHDEP +# +# This rule insures that the subr_prof.c does NOT get compiled with +# profiling. It implements mcount() and profiling it leads to recursion. +# + +subr_prof.o_CFLAGS_RM = -pg + # # OBJSDEPS is the set of files (defined in the machine dependent # template if necessary) which all objects depend on (such as an @@ -84,7 +91,7 @@ LDOBJS = $(OBJS) $(COMPONENT).o: $(LDOBJS) @echo "[ creating $(COMPONENT).o ]" $(RM) $(RMFLAGS) vers.c - $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + $(COMPOBJROOT)/newvers \ `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c @echo [ updating $(COMPONENT).o ${BSD_KERNEL_CONFIG} ] diff --git a/bsd/conf/files b/bsd/conf/files index 817d99f42..f69f45d7e 100644 --- a/bsd/conf/files +++ b/bsd/conf/files @@ -60,6 +60,7 @@ OPTIONS/diagnostic optional diagnostic OPTIONS/ktrace optional ktrace OPTIONS/profiling optional profiling OPTIONS/vndevice optional vndevice +OPTIONS/audit optional audit # # Network options @@ -115,6 +116,9 @@ bsd/dev/random/YarrowCoreLib/src/comp.c standard bsd/dev/random/YarrowCoreLib/src/prng.c standard bsd/dev/random/YarrowCoreLib/src/sha1mod.c standard bsd/dev/random/YarrowCoreLib/src/yarrowUtils.c standard + +bsd/dev/memdev.c standard + bsd/dev/vn/vn.c optional vndevice bsd/dev/vn/shadow.c optional vndevice @@ -289,6 +293,7 @@ bsd/kern/md5c.c optional crypto bsd/crypto/sha1.c optional crypto bsd/crypto/sha2/sha2.c optional crypto bsd/crypto/des/des_ecb.c optional crypto +bsd/crypto/des/des_enc.c optional crypto bsd/crypto/des/des_setkey.c optional crypto bsd/crypto/blowfish/bf_enc.c optional crypto bsd/crypto/blowfish/bf_skey.c optional crypto @@ -380,6 +385,7 @@ bsd/nfs/nfs_subs.c optional nfsclient nfsserver bsd/nfs/nfs_syscalls.c optional nfsclient nfsserver bsd/nfs/nfs_vfsops.c optional nfsclient bsd/nfs/nfs_vnops.c optional nfsclient +bsd/nfs/nfs_lock.c optional nfsclient bsd/kern/netboot.c optional nfsclient @@ -392,6 +398,7 @@ bsd/ufs/ffs/ffs_vfsops.c standard bsd/ufs/ffs/ffs_vnops.c standard bsd/ufs/mfs/mfs_vfsops.c optional mfs bsd/ufs/mfs/mfs_vnops.c optional mfs +bsd/ufs/ufs/ufs_attrlist.c standard bsd/ufs/ufs/ufs_bmap.c standard bsd/ufs/ufs/ufs_byte_order.c optional rev_endian_fs bsd/ufs/ufs/ufs_ihash.c standard @@ -410,9 +417,11 @@ bsd/hfs/hfs_cnode.c optional hfs bsd/hfs/hfs_encodinghint.c optional hfs bsd/hfs/hfs_encodings.c optional hfs bsd/hfs/hfs_endian.c optional hfs +bsd/hfs/hfs_hotfiles.c optional hfs bsd/hfs/hfs_link.c optional hfs bsd/hfs/hfs_lockf.c optional hfs bsd/hfs/hfs_lookup.c optional hfs +bsd/hfs/hfs_notification.c optional hfs bsd/hfs/hfs_quota.c optional quota bsd/hfs/hfs_readwrite.c optional hfs bsd/hfs/hfs_search.c optional hfs @@ -425,6 +434,7 @@ bsd/hfs/hfscommon/BTree/BTree.c optional hfs bsd/hfs/hfscommon/BTree/BTreeAllocate.c optional hfs bsd/hfs/hfscommon/BTree/BTreeMiscOps.c optional hfs bsd/hfs/hfscommon/BTree/BTreeNodeOps.c optional hfs +bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c optional hfs bsd/hfs/hfscommon/BTree/BTreeScanner.c optional hfs bsd/hfs/hfscommon/BTree/BTreeTreeOps.c optional hfs bsd/hfs/hfscommon/Catalog/Catalog.c optional hfs @@ -440,6 +450,11 @@ bsd/kern/bsd_init.c standard bsd/kern/init_sysent.c standard bsd/kern/kdebug.c standard bsd/kern/kern_acct.c standard +bsd/kern/kern_aio.c standard +bsd/kern/kern_audit.c standard +bsd/kern/kern_bsm_token.c standard +bsd/kern/kern_bsm_audit.c standard +bsd/kern/kern_bsm_klib.c standard bsd/kern/kern_clock.c standard bsd/kern/kern_core.c standard bsd/kern/kern_symfile.c standard diff --git a/bsd/conf/files.i386 b/bsd/conf/files.i386 index 80015fc5f..73da06e97 100644 --- a/bsd/conf/files.i386 +++ b/bsd/conf/files.i386 @@ -11,6 +11,7 @@ bsd/dev/i386/kern_machdep.c standard bsd/dev/i386/memmove.c standard bsd/dev/i386/stubs.c standard bsd/dev/i386/lock_stubs.c standard +bsd/dev/i386/sysctl.c standard bsd/dev/i386/unix_signal.c standard bsd/dev/i386/unix_startup.c standard diff --git a/bsd/conf/files.ppc b/bsd/conf/files.ppc index 1cfa5685c..d1b636110 100644 --- a/bsd/conf/files.ppc +++ b/bsd/conf/files.ppc @@ -17,6 +17,10 @@ bsd/dev/ppc/stubs.c standard bsd/dev/ppc/systemcalls.c standard bsd/dev/ppc/km.c standard bsd/dev/ppc/xsumas.s standard +bsd/dev/ppc/sysctl.c standard + +bsd/dev/ppc/chud/chud_bsd_callback.c standard +bsd/dev/ppc/chud/chud_process.c standard bsd/kern/bsd_stubs.c standard diff --git a/bsd/conf/param.c b/bsd/conf/param.c index 6594795be..1010930ab 100644 --- a/bsd/conf/param.c +++ b/bsd/conf/param.c @@ -77,16 +77,19 @@ #include #include #include +#include struct timezone tz = { TIMEZONE, PST }; #define NPROC (20 + 16 * MAXUSERS) +#define HNPROC (20 + 64 * MAXUSERS) int maxproc = NPROC; +__private_extern__ int hard_maxproc = HNPROC; /* hardcoded limit */ int nprocs = 0; /* XXX */ #define NTEXT (80 + NPROC / 8) /* actually the object cache */ #define NVNODE (NPROC + NTEXT + 300) -int desiredvnodes = NVNODE + 350; +int desiredvnodes = NVNODE + 700; #define MAXFILES (OPEN_MAX + 2048) int maxfiles = MAXFILES; @@ -98,6 +101,16 @@ int nport = NPROC / 2; #define MAXSOCKETS NMBCLUSTERS int maxsockets = MAXSOCKETS; +/* + * async IO (aio) configurable limits + */ +#define AIO_MAX 90 /* system wide limit of async IO requests */ +#define AIO_PROCESS_MAX AIO_LISTIO_MAX /* process limit of async IO requests */ +#define AIO_THREAD_COUNT 4 /* number of async IO worker threads created */ +int aio_max_requests = AIO_MAX; +int aio_max_requests_per_process = AIO_PROCESS_MAX; +int aio_worker_threads = AIO_THREAD_COUNT; + /* * These have to be allocated somewhere; allocating * them here forces loader errors if this file is omitted diff --git a/bsd/conf/version.major b/bsd/conf/version.major index 1e8b31496..7f8f011eb 100644 --- a/bsd/conf/version.major +++ b/bsd/conf/version.major @@ -1 +1 @@ -6 +7 diff --git a/bsd/conf/version.minor b/bsd/conf/version.minor index 45a4fb75d..573541ac9 100644 --- a/bsd/conf/version.minor +++ b/bsd/conf/version.minor @@ -1 +1 @@ -8 +0 diff --git a/bsd/conf/version.variant b/bsd/conf/version.variant index e69de29bb..573541ac9 100644 --- a/bsd/conf/version.variant +++ b/bsd/conf/version.variant @@ -0,0 +1 @@ +0 diff --git a/bsd/crypto/blowfish/bf_enc.c b/bsd/crypto/blowfish/bf_enc.c index afdc9cf4c..4a31e2ee9 100644 --- a/bsd/crypto/blowfish/bf_enc.c +++ b/bsd/crypto/blowfish/bf_enc.c @@ -1,12 +1,12 @@ -/* $FreeBSD: src/sys/crypto/blowfish/bf_enc.c,v 1.1.2.2 2001/07/03 11:01:28 ume Exp $ */ -/* $KAME: bf_enc.c,v 1.5 2000/09/18 21:21:19 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/blowfish/bf_enc.c,v 1.1.2.3 2002/03/26 10:12:23 ume Exp $ */ +/* $KAME: bf_enc.c,v 1.7 2002/02/27 01:33:59 itojun Exp $ */ /* crypto/bf/bf_enc.c */ -/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written - * by Eric Young (eay@mincom.oz.au). + * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as @@ -14,7 +14,7 @@ * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@mincom.oz.au). + * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. @@ -34,12 +34,12 @@ * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by - * Eric Young (eay@mincom.oz.au)" + * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)" + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE @@ -75,10 +75,9 @@ to modify the code. /* XXX "data" is host endian */ void -BF_encrypt(data, key, encrypt) +BF_encrypt(data, key) BF_LONG *data; BF_KEY *key; - int encrypt; { register BF_LONG l, r, *p, *s; @@ -87,57 +86,73 @@ BF_encrypt(data, key, encrypt) l = data[0]; r = data[1]; - if (encrypt) { - l^=p[0]; - BF_ENC(r, l, s, p[ 1]); - BF_ENC(l, r, s, p[ 2]); - BF_ENC(r, l, s, p[ 3]); - BF_ENC(l, r, s, p[ 4]); - BF_ENC(r, l, s, p[ 5]); - BF_ENC(l, r, s, p[ 6]); - BF_ENC(r, l, s, p[ 7]); - BF_ENC(l, r, s, p[ 8]); - BF_ENC(r, l, s, p[ 9]); - BF_ENC(l, r, s, p[10]); - BF_ENC(r, l, s, p[11]); - BF_ENC(l, r, s, p[12]); - BF_ENC(r, l, s, p[13]); - BF_ENC(l, r, s, p[14]); - BF_ENC(r, l, s, p[15]); - BF_ENC(l, r, s, p[16]); + l^=p[0]; + BF_ENC(r, l, s, p[ 1]); + BF_ENC(l, r, s, p[ 2]); + BF_ENC(r, l, s, p[ 3]); + BF_ENC(l, r, s, p[ 4]); + BF_ENC(r, l, s, p[ 5]); + BF_ENC(l, r, s, p[ 6]); + BF_ENC(r, l, s, p[ 7]); + BF_ENC(l, r, s, p[ 8]); + BF_ENC(r, l, s, p[ 9]); + BF_ENC(l, r, s, p[10]); + BF_ENC(r, l, s, p[11]); + BF_ENC(l, r, s, p[12]); + BF_ENC(r, l, s, p[13]); + BF_ENC(l, r, s, p[14]); + BF_ENC(r, l, s, p[15]); + BF_ENC(l, r, s, p[16]); #if BF_ROUNDS == 20 - BF_ENC(r, l, s, p[17]); - BF_ENC(l, r, s, p[18]); - BF_ENC(r, l, s, p[19]); - BF_ENC(l, r, s, p[20]); + BF_ENC(r, l, s, p[17]); + BF_ENC(l, r, s, p[18]); + BF_ENC(r, l, s, p[19]); + BF_ENC(l, r, s, p[20]); #endif - r ^= p[BF_ROUNDS + 1]; - } else { - l ^= p[BF_ROUNDS + 1]; + r ^= p[BF_ROUNDS + 1]; + + data[1] = l & 0xffffffff; + data[0] = r & 0xffffffff; +} + +/* XXX "data" is host endian */ +void +BF_decrypt(data, key) + BF_LONG *data; + BF_KEY *key; +{ + register BF_LONG l, r, *p, *s; + + p = key->P; + s= &key->S[0]; + l = data[0]; + r = data[1]; + + l ^= p[BF_ROUNDS + 1]; #if BF_ROUNDS == 20 - BF_ENC(r, l, s, p[20]); - BF_ENC(l, r, s, p[19]); - BF_ENC(r, l, s, p[18]); - BF_ENC(l, r, s, p[17]); + BF_ENC(r, l, s, p[20]); + BF_ENC(l, r, s, p[19]); + BF_ENC(r, l, s, p[18]); + BF_ENC(l, r, s, p[17]); #endif - BF_ENC(r, l, s, p[16]); - BF_ENC(l, r, s, p[15]); - BF_ENC(r, l, s, p[14]); - BF_ENC(l, r, s, p[13]); - BF_ENC(r, l, s, p[12]); - BF_ENC(l, r, s, p[11]); - BF_ENC(r, l, s, p[10]); - BF_ENC(l, r, s, p[ 9]); - BF_ENC(r, l, s, p[ 8]); - BF_ENC(l, r, s, p[ 7]); - BF_ENC(r, l, s, p[ 6]); - BF_ENC(l, r, s, p[ 5]); - BF_ENC(r, l, s, p[ 4]); - BF_ENC(l, r, s, p[ 3]); - BF_ENC(r, l, s, p[ 2]); - BF_ENC(l, r, s, p[ 1]); - r ^= p[0]; - } + BF_ENC(r, l, s, p[16]); + BF_ENC(l, r, s, p[15]); + BF_ENC(r, l, s, p[14]); + BF_ENC(l, r, s, p[13]); + BF_ENC(r, l, s, p[12]); + BF_ENC(l, r, s, p[11]); + BF_ENC(r, l, s, p[10]); + BF_ENC(l, r, s, p[ 9]); + BF_ENC(r, l, s, p[ 8]); + BF_ENC(l, r, s, p[ 7]); + BF_ENC(r, l, s, p[ 6]); + BF_ENC(l, r, s, p[ 5]); + BF_ENC(r, l, s, p[ 4]); + BF_ENC(l, r, s, p[ 3]); + BF_ENC(r, l, s, p[ 2]); + BF_ENC(l, r, s, p[ 1]); + r ^= p[0]; + data[1] = l & 0xffffffff; data[0] = r & 0xffffffff; } diff --git a/bsd/crypto/blowfish/bf_pi.h b/bsd/crypto/blowfish/bf_pi.h index ae5d7803b..d2f80f0b4 100644 --- a/bsd/crypto/blowfish/bf_pi.h +++ b/bsd/crypto/blowfish/bf_pi.h @@ -59,7 +59,7 @@ * [including the GNU Public Licence.] */ -static BF_KEY bf_init= { +static const BF_KEY bf_init= { { 0x243f6a88L, 0x85a308d3L, 0x13198a2eL, 0x03707344L, 0xa4093822L, 0x299f31d0L, 0x082efa98L, 0xec4e6c89L, diff --git a/bsd/crypto/blowfish/bf_skey.c b/bsd/crypto/blowfish/bf_skey.c index 4dc8e934b..3f64cf95e 100644 --- a/bsd/crypto/blowfish/bf_skey.c +++ b/bsd/crypto/blowfish/bf_skey.c @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/crypto/blowfish/bf_skey.c,v 1.1.2.2 2001/07/03 11:01:28 ume Exp $ */ -/* $KAME: bf_skey.c,v 1.5 2000/11/06 13:58:08 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/blowfish/bf_skey.c,v 1.1.2.3 2002/03/26 10:12:23 ume Exp $ */ +/* $KAME: bf_skey.c,v 1.7 2002/02/27 01:33:59 itojun Exp $ */ /* crypto/bf/bf_skey.c */ /* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) @@ -76,7 +76,7 @@ BF_set_key(key, len, data) BF_LONG *p, ri, in[2]; unsigned char *d, *end; - memcpy((char *)key, (char *)&bf_init, sizeof(BF_KEY)); + memcpy((char *)key, (const char *)&bf_init, sizeof(BF_KEY)); p = key->P; if (len > ((BF_ROUNDS + 2) * 4)) @@ -106,14 +106,14 @@ BF_set_key(key, len, data) in[0] = 0L; in[1] = 0L; for (i = 0; i < BF_ROUNDS + 2; i += 2) { - BF_encrypt(in, key, BF_ENCRYPT); + BF_encrypt(in, key); p[i ] = in[0]; p[i+1] = in[1]; } p = key->S; for (i = 0; i < 4 * 256; i += 2) { - BF_encrypt(in, key, BF_ENCRYPT); + BF_encrypt(in, key); p[i ] = in[0]; p[i+1] = in[1]; } diff --git a/bsd/crypto/blowfish/blowfish.h b/bsd/crypto/blowfish/blowfish.h index fdfd34121..69b902426 100644 --- a/bsd/crypto/blowfish/blowfish.h +++ b/bsd/crypto/blowfish/blowfish.h @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/crypto/blowfish/blowfish.h,v 1.1.2.2 2001/07/03 11:01:28 ume Exp $ */ -/* $KAME: blowfish.h,v 1.10 2000/09/18 21:21:20 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/blowfish/blowfish.h,v 1.1.2.3 2002/03/26 10:12:23 ume Exp $ */ +/* $KAME: blowfish.h,v 1.12 2002/02/27 01:33:59 itojun Exp $ */ /* crypto/bf/blowfish.h */ /* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) @@ -81,7 +81,11 @@ typedef struct bf_key_st { } BF_KEY; void BF_set_key __P((BF_KEY *, int, unsigned char *)); -void BF_encrypt __P((BF_LONG *, BF_KEY *, int)); +void BF_encrypt __P((BF_LONG *, BF_KEY *)); +void BF_decrypt __P((BF_LONG *, BF_KEY *)); +void BF_cbc_encrypt(const unsigned char *, unsigned char *, long, + const BF_KEY *, unsigned char *, int); + #ifdef __cplusplus } #endif diff --git a/bsd/crypto/des/des.h b/bsd/crypto/des/des.h index 88a4fdc57..a21b6bfa2 100644 --- a/bsd/crypto/des/des.h +++ b/bsd/crypto/des/des.h @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/crypto/des/des.h,v 1.1.2.2 2001/07/03 11:01:31 ume Exp $ */ -/* $KAME: des.h,v 1.7 2000/09/18 20:59:21 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/des/des.h,v 1.1.2.3 2002/03/26 10:12:24 ume Exp $ */ +/* $KAME: des.h,v 1.8 2001/09/10 04:03:57 itojun Exp $ */ /* lib/des/des.h */ /* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) @@ -61,15 +61,14 @@ extern "C" { typedef unsigned char des_cblock[8]; typedef struct des_ks_struct { - union { - des_cblock _; - /* make sure things are correct size on machines with - * 8 byte longs */ - DES_LONG pad[2]; - } ks; -#undef _ -#define _ ks._ - } des_key_schedule[16]; + union { + des_cblock cblock; + /* make sure things are correct size on machines with + * 8 byte longs */ + DES_LONG deslong[2]; + } ks; + int weak_key; +} des_key_schedule[16]; #define DES_KEY_SZ (sizeof(des_cblock)) #define DES_SCHEDULE_SZ (sizeof(des_key_schedule)) @@ -85,13 +84,32 @@ extern int des_check_key; /* defaults to false */ char *des_options __P((void)); void des_ecb_encrypt __P((des_cblock *, des_cblock *, des_key_schedule, int)); -void des_encrypt __P((DES_LONG *, des_key_schedule, int)); + +void des_encrypt1 __P((DES_LONG *, des_key_schedule, int)); void des_encrypt2 __P((DES_LONG *, des_key_schedule, int)); +void des_encrypt3 __P((DES_LONG *, des_key_schedule, des_key_schedule, + des_key_schedule)); +void des_decrypt3 __P((DES_LONG *, des_key_schedule, des_key_schedule, + des_key_schedule)); + +void des_ecb3_encrypt __P((des_cblock *, des_cblock *, des_key_schedule, + des_key_schedule, des_key_schedule, int)); + +void des_ncbc_encrypt __P((const unsigned char *, unsigned char *, long, + des_key_schedule, des_cblock *, int)); + +void des_ede3_cbc_encrypt(const unsigned char *, unsigned char *, long, + des_key_schedule, des_key_schedule, + des_key_schedule, des_cblock *, int); void des_set_odd_parity __P((des_cblock *)); +void des_fixup_key_parity __P((des_cblock *)); int des_is_weak_key __P((des_cblock *)); int des_set_key __P((des_cblock *, des_key_schedule)); int des_key_sched __P((des_cblock *, des_key_schedule)); +int des_set_key_checked __P((des_cblock *, des_key_schedule)); +void des_set_key_unchecked __P((des_cblock *, des_key_schedule)); +int des_check_key_parity __P((des_cblock *)); #ifdef __cplusplus } diff --git a/bsd/crypto/des/des_ecb.c b/bsd/crypto/des/des_ecb.c index 2ff242ebf..cc9c5697b 100644 --- a/bsd/crypto/des/des_ecb.c +++ b/bsd/crypto/des/des_ecb.c @@ -1,8 +1,8 @@ -/* $FreeBSD: src/sys/crypto/des/des_ecb.c,v 1.1.2.2 2001/07/03 11:01:31 ume Exp $ */ -/* $KAME: des_ecb.c,v 1.5 2000/11/06 13:58:08 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/des/des_ecb.c,v 1.1.2.3 2002/03/26 10:12:24 ume Exp $ */ +/* $KAME: des_ecb.c,v 1.6 2001/09/10 04:03:58 itojun Exp $ */ /* crypto/des/ecb_enc.c */ -/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) +/* Copyright (C) 1995-1998 Eric Young (eay@mincom.oz.au) * All rights reserved. * * This file is part of an SSL implementation written @@ -53,182 +53,84 @@ #include #include -char *libdes_version="libdes v 3.24 - 20-Apr-1996 - eay"; -char *DES_version="DES part of SSLeay 0.6.4 30-Aug-1996"; +/* char *libdes_version="libdes v 3.24 - 20-Apr-1996 - eay"; */ /* wrong */ +/* char *DES_version="DES part of SSLeay 0.6.4 30-Aug-1996"; */ + +char *des_options(void) + { + static int init=1; + static char buf[32]; + + if (init) + { + const char *ptr,*unroll,*risc,*size; -char *des_options() - { #ifdef DES_PTR - if (sizeof(DES_LONG) != sizeof(long)) - return("des(ptr,int)"); - else - return("des(ptr,long)"); + ptr="ptr"; #else - if (sizeof(DES_LONG) != sizeof(long)) - return("des(idx,int)"); - else - return("des(idx,long)"); + ptr="idx"; #endif - } - - -void des_ecb_encrypt(input, output, ks, encrypt) -des_cblock (*input); -des_cblock (*output); -des_key_schedule ks; -int encrypt; - { +#if defined(DES_RISC1) || defined(DES_RISC2) +#ifdef DES_RISC1 + risc="risc1"; +#endif +#ifdef DES_RISC2 + risc="risc2"; +#endif +#else + risc="cisc"; +#endif +#ifdef DES_UNROLL + unroll="16"; +#else + unroll="4"; +#endif + if (sizeof(DES_LONG) != sizeof(long)) + size="int"; + else + size="long"; + sprintf(buf,"des(%s,%s,%s,%s)",ptr,risc,unroll,size); + init=0; + } + return(buf); +} +void des_ecb_encrypt(des_cblock *input, des_cblock *output, + des_key_schedule ks, int enc) +{ register DES_LONG l; - register unsigned char *in,*out; DES_LONG ll[2]; + const unsigned char *in=&(*input)[0]; + unsigned char *out = &(*output)[0]; - in=(unsigned char *)input; - out=(unsigned char *)output; c2l(in,l); ll[0]=l; c2l(in,l); ll[1]=l; - des_encrypt(ll,ks,encrypt); + des_encrypt1(ll,ks,enc); l=ll[0]; l2c(l,out); l=ll[1]; l2c(l,out); l=ll[0]=ll[1]=0; - } - -void des_encrypt(data, ks, encrypt) -DES_LONG *data; -des_key_schedule ks; -int encrypt; - { - register DES_LONG l,r,t,u; -#ifdef DES_PTR - register unsigned char *des_SP=(unsigned char *)des_SPtrans; -#endif -#ifdef undef - union fudge { - DES_LONG l; - unsigned short s[2]; - unsigned char c[4]; - } U,T; -#endif - register int i; - register DES_LONG *s; +} - u=data[0]; - r=data[1]; - - IP(u,r); - /* Things have been modified so that the initial rotate is - * done outside the loop. This required the - * des_SPtrans values in sp.h to be rotated 1 bit to the right. - * One perl script later and things have a 5% speed up on a sparc2. - * Thanks to Richard Outerbridge <71755.204@CompuServe.COM> - * for pointing this out. */ - l=(r<<1)|(r>>31); - r=(u<<1)|(u>>31); - - /* clear the top bits on machines with 8byte longs */ - l&=0xffffffffL; - r&=0xffffffffL; - - s=(DES_LONG *)ks; - /* I don't know if it is worth the effort of loop unrolling the - * inner loop - */ - if (encrypt) - { - for (i=0; i<32; i+=8) - { - D_ENCRYPT(l,r,i+0); /* 1 */ - D_ENCRYPT(r,l,i+2); /* 2 */ - D_ENCRYPT(l,r,i+4); /* 3 */ - D_ENCRYPT(r,l,i+6); /* 4 */ - } - } - else - { - for (i=30; i>0; i-=8) - { - D_ENCRYPT(l,r,i-0); /* 16 */ - D_ENCRYPT(r,l,i-2); /* 15 */ - D_ENCRYPT(l,r,i-4); /* 14 */ - D_ENCRYPT(r,l,i-6); /* 13 */ - } - } - l=(l>>1)|(l<<31); - r=(r>>1)|(r<<31); - /* clear the top bits on machines with 8byte longs */ - l&=0xffffffffL; - r&=0xffffffffL; - - FP(r,l); - data[0]=l; - data[1]=r; - l=r=t=u=0; - } - -void des_encrypt2(data, ks, encrypt) -DES_LONG *data; -des_key_schedule ks; -int encrypt; - { - register DES_LONG l,r,t,u; -#ifdef DES_PTR - register unsigned char *des_SP=(unsigned char *)des_SPtrans; -#endif -#ifdef undef - union fudge { - DES_LONG l; - unsigned short s[2]; - unsigned char c[4]; - } U,T; -#endif - register int i; - register DES_LONG *s; - - u=data[0]; - r=data[1]; - - /* Things have been modified so that the initial rotate is - * done outside the loop. This required the - * des_SPtrans values in sp.h to be rotated 1 bit to the right. - * One perl script later and things have a 5% speed up on a sparc2. - * Thanks to Richard Outerbridge <71755.204@CompuServe.COM> - * for pointing this out. */ - l=(r<<1)|(r>>31); - r=(u<<1)|(u>>31); - - /* clear the top bits on machines with 8byte longs */ - l&=0xffffffffL; - r&=0xffffffffL; - - s=(DES_LONG *)ks; - /* I don't know if it is worth the effort of loop unrolling the - * inner loop */ - if (encrypt) - { - for (i=0; i<32; i+=8) - { - D_ENCRYPT(l,r,i+0); /* 1 */ - D_ENCRYPT(r,l,i+2); /* 2 */ - D_ENCRYPT(l,r,i+4); /* 3 */ - D_ENCRYPT(r,l,i+6); /* 4 */ - } - } +void des_ecb3_encrypt(des_cblock *input, des_cblock *output, + des_key_schedule ks1, des_key_schedule ks2, des_key_schedule ks3, + int enc) +{ + register DES_LONG l0,l1; + DES_LONG ll[2]; + const unsigned char *in = &(*input)[0]; + unsigned char *out = &(*output)[0]; + + c2l(in,l0); + c2l(in,l1); + ll[0]=l0; + ll[1]=l1; + + if (enc) + des_encrypt3(ll,ks1,ks2,ks3); else - { - for (i=30; i>0; i-=8) - { - D_ENCRYPT(l,r,i-0); /* 16 */ - D_ENCRYPT(r,l,i-2); /* 15 */ - D_ENCRYPT(l,r,i-4); /* 14 */ - D_ENCRYPT(r,l,i-6); /* 13 */ - } - } - l=(l>>1)|(l<<31); - r=(r>>1)|(r<<31); - /* clear the top bits on machines with 8byte longs */ - l&=0xffffffffL; - r&=0xffffffffL; + des_decrypt3(ll,ks1,ks2,ks3); - data[0]=l; - data[1]=r; - l=r=t=u=0; - } + l0=ll[0]; + l1=ll[1]; + l2c(l0,out); + l2c(l1,out); +} diff --git a/bsd/crypto/des/des_enc.c b/bsd/crypto/des/des_enc.c new file mode 100644 index 000000000..f5e269eaf --- /dev/null +++ b/bsd/crypto/des/des_enc.c @@ -0,0 +1,294 @@ +/* $KAME: kame/kame/sys/crypto/des/des_enc.c,v 1.1 2001/09/10 04:03:58 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/des/des_enc.c,v 1.1.2.1 2002/03/26 10:12:24 ume Exp $ */ + +/* crypto/des/des_enc.c */ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include +#include + +extern const DES_LONG des_SPtrans[8][64]; + +void des_encrypt1(DES_LONG *data, des_key_schedule ks, int enc) +{ + register DES_LONG l,r,t,u; +#ifdef DES_PTR + register const unsigned char *des_SP=(const unsigned char *)des_SPtrans; +#endif +#ifndef DES_UNROLL + register int i; +#endif + register DES_LONG *s; + + r=data[0]; + l=data[1]; + + IP(r,l); + /* Things have been modified so that the initial rotate is + * done outside the loop. This required the + * des_SPtrans values in sp.h to be rotated 1 bit to the right. + * One perl script later and things have a 5% speed up on a sparc2. + * Thanks to Richard Outerbridge <71755.204@CompuServe.COM> + * for pointing this out. */ + /* clear the top bits on machines with 8byte longs */ + /* shift left by 2 */ + r=ROTATE(r,29)&0xffffffffL; + l=ROTATE(l,29)&0xffffffffL; + + s=ks->ks.deslong; + /* I don't know if it is worth the effort of loop unrolling the + * inner loop */ + if (enc) + { +#ifdef DES_UNROLL + D_ENCRYPT(l,r, 0); /* 1 */ + D_ENCRYPT(r,l, 2); /* 2 */ + D_ENCRYPT(l,r, 4); /* 3 */ + D_ENCRYPT(r,l, 6); /* 4 */ + D_ENCRYPT(l,r, 8); /* 5 */ + D_ENCRYPT(r,l,10); /* 6 */ + D_ENCRYPT(l,r,12); /* 7 */ + D_ENCRYPT(r,l,14); /* 8 */ + D_ENCRYPT(l,r,16); /* 9 */ + D_ENCRYPT(r,l,18); /* 10 */ + D_ENCRYPT(l,r,20); /* 11 */ + D_ENCRYPT(r,l,22); /* 12 */ + D_ENCRYPT(l,r,24); /* 13 */ + D_ENCRYPT(r,l,26); /* 14 */ + D_ENCRYPT(l,r,28); /* 15 */ + D_ENCRYPT(r,l,30); /* 16 */ +#else + for (i=0; i<32; i+=8) + { + D_ENCRYPT(l,r,i+0); /* 1 */ + D_ENCRYPT(r,l,i+2); /* 2 */ + D_ENCRYPT(l,r,i+4); /* 3 */ + D_ENCRYPT(r,l,i+6); /* 4 */ + } +#endif + } + else + { +#ifdef DES_UNROLL + D_ENCRYPT(l,r,30); /* 16 */ + D_ENCRYPT(r,l,28); /* 15 */ + D_ENCRYPT(l,r,26); /* 14 */ + D_ENCRYPT(r,l,24); /* 13 */ + D_ENCRYPT(l,r,22); /* 12 */ + D_ENCRYPT(r,l,20); /* 11 */ + D_ENCRYPT(l,r,18); /* 10 */ + D_ENCRYPT(r,l,16); /* 9 */ + D_ENCRYPT(l,r,14); /* 8 */ + D_ENCRYPT(r,l,12); /* 7 */ + D_ENCRYPT(l,r,10); /* 6 */ + D_ENCRYPT(r,l, 8); /* 5 */ + D_ENCRYPT(l,r, 6); /* 4 */ + D_ENCRYPT(r,l, 4); /* 3 */ + D_ENCRYPT(l,r, 2); /* 2 */ + D_ENCRYPT(r,l, 0); /* 1 */ +#else + for (i=30; i>0; i-=8) + { + D_ENCRYPT(l,r,i-0); /* 16 */ + D_ENCRYPT(r,l,i-2); /* 15 */ + D_ENCRYPT(l,r,i-4); /* 14 */ + D_ENCRYPT(r,l,i-6); /* 13 */ + } +#endif + } + + /* rotate and clear the top bits on machines with 8byte longs */ + l=ROTATE(l,3)&0xffffffffL; + r=ROTATE(r,3)&0xffffffffL; + + FP(r,l); + data[0]=l; + data[1]=r; + l=r=t=u=0; +} + +void des_encrypt2(DES_LONG *data, des_key_schedule ks, int enc) +{ + register DES_LONG l,r,t,u; +#ifdef DES_PTR + register const unsigned char *des_SP=(const unsigned char *)des_SPtrans; +#endif +#ifndef DES_UNROLL + register int i; +#endif + register DES_LONG *s; + + r=data[0]; + l=data[1]; + + /* Things have been modified so that the initial rotate is + * done outside the loop. This required the + * des_SPtrans values in sp.h to be rotated 1 bit to the right. + * One perl script later and things have a 5% speed up on a sparc2. + * Thanks to Richard Outerbridge <71755.204@CompuServe.COM> + * for pointing this out. */ + /* clear the top bits on machines with 8byte longs */ + r=ROTATE(r,29)&0xffffffffL; + l=ROTATE(l,29)&0xffffffffL; + + s=ks->ks.deslong; + /* I don't know if it is worth the effort of loop unrolling the + * inner loop */ + if (enc) + { +#ifdef DES_UNROLL + D_ENCRYPT(l,r, 0); /* 1 */ + D_ENCRYPT(r,l, 2); /* 2 */ + D_ENCRYPT(l,r, 4); /* 3 */ + D_ENCRYPT(r,l, 6); /* 4 */ + D_ENCRYPT(l,r, 8); /* 5 */ + D_ENCRYPT(r,l,10); /* 6 */ + D_ENCRYPT(l,r,12); /* 7 */ + D_ENCRYPT(r,l,14); /* 8 */ + D_ENCRYPT(l,r,16); /* 9 */ + D_ENCRYPT(r,l,18); /* 10 */ + D_ENCRYPT(l,r,20); /* 11 */ + D_ENCRYPT(r,l,22); /* 12 */ + D_ENCRYPT(l,r,24); /* 13 */ + D_ENCRYPT(r,l,26); /* 14 */ + D_ENCRYPT(l,r,28); /* 15 */ + D_ENCRYPT(r,l,30); /* 16 */ +#else + for (i=0; i<32; i+=8) + { + D_ENCRYPT(l,r,i+0); /* 1 */ + D_ENCRYPT(r,l,i+2); /* 2 */ + D_ENCRYPT(l,r,i+4); /* 3 */ + D_ENCRYPT(r,l,i+6); /* 4 */ + } +#endif + } + else + { +#ifdef DES_UNROLL + D_ENCRYPT(l,r,30); /* 16 */ + D_ENCRYPT(r,l,28); /* 15 */ + D_ENCRYPT(l,r,26); /* 14 */ + D_ENCRYPT(r,l,24); /* 13 */ + D_ENCRYPT(l,r,22); /* 12 */ + D_ENCRYPT(r,l,20); /* 11 */ + D_ENCRYPT(l,r,18); /* 10 */ + D_ENCRYPT(r,l,16); /* 9 */ + D_ENCRYPT(l,r,14); /* 8 */ + D_ENCRYPT(r,l,12); /* 7 */ + D_ENCRYPT(l,r,10); /* 6 */ + D_ENCRYPT(r,l, 8); /* 5 */ + D_ENCRYPT(l,r, 6); /* 4 */ + D_ENCRYPT(r,l, 4); /* 3 */ + D_ENCRYPT(l,r, 2); /* 2 */ + D_ENCRYPT(r,l, 0); /* 1 */ +#else + for (i=30; i>0; i-=8) + { + D_ENCRYPT(l,r,i-0); /* 16 */ + D_ENCRYPT(r,l,i-2); /* 15 */ + D_ENCRYPT(l,r,i-4); /* 14 */ + D_ENCRYPT(r,l,i-6); /* 13 */ + } +#endif + } + /* rotate and clear the top bits on machines with 8byte longs */ + data[0]=ROTATE(l,3)&0xffffffffL; + data[1]=ROTATE(r,3)&0xffffffffL; + l=r=t=u=0; +} + +void des_encrypt3(DES_LONG *data, des_key_schedule ks1, des_key_schedule ks2, + des_key_schedule ks3) +{ + register DES_LONG l,r; + + l=data[0]; + r=data[1]; + IP(l,r); + data[0]=l; + data[1]=r; + des_encrypt2((DES_LONG *)data,ks1,DES_ENCRYPT); + des_encrypt2((DES_LONG *)data,ks2,DES_DECRYPT); + des_encrypt2((DES_LONG *)data,ks3,DES_ENCRYPT); + l=data[0]; + r=data[1]; + FP(r,l); + data[0]=l; + data[1]=r; +} + +void des_decrypt3(DES_LONG *data, des_key_schedule ks1, des_key_schedule ks2, + des_key_schedule ks3) +{ + register DES_LONG l,r; + + l=data[0]; + r=data[1]; + IP(l,r); + data[0]=l; + data[1]=r; + des_encrypt2((DES_LONG *)data,ks3,DES_DECRYPT); + des_encrypt2((DES_LONG *)data,ks2,DES_ENCRYPT); + des_encrypt2((DES_LONG *)data,ks1,DES_DECRYPT); + l=data[0]; + r=data[1]; + FP(r,l); + data[0]=l; + data[1]=r; +} diff --git a/bsd/crypto/des/des_locl.h b/bsd/crypto/des/des_locl.h index 2f8ed9275..e894cb2f5 100644 --- a/bsd/crypto/des/des_locl.h +++ b/bsd/crypto/des/des_locl.h @@ -1,8 +1,8 @@ -/* $FreeBSD: src/sys/crypto/des/des_locl.h,v 1.2.2.2 2001/07/03 11:01:31 ume Exp $ */ -/* $KAME: des_locl.h,v 1.6 2000/11/06 13:58:09 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/des/des_locl.h,v 1.2.2.3 2002/03/26 10:12:25 ume Exp $ */ +/* $KAME: des_locl.h,v 1.7 2001/09/10 04:03:58 itojun Exp $ */ -/* lib/des/des_locl.h */ -/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) +/* crypto/des/des_locl.h */ +/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) * All rights reserved. * * This file is part of an SSL implementation written @@ -47,13 +47,6 @@ * copied and put under another distribution licence * [including the GNU Public Licence.] */ -/* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING - * - * Always modify des_locl.org since des_locl.h is automatically generated from - * it during SSLeay configuration. - * - * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING - */ #ifndef HEADER_DES_LOCL_H #define HEADER_DES_LOCL_H @@ -130,6 +123,11 @@ #define ROTATE(a,n) (((a)>>(n))+((a)<<(32-(n)))) +#define LOAD_DATA_tmp(a,b,c,d,e,f) LOAD_DATA(a,b,c,d,e,f,g) +#define LOAD_DATA(R,S,u,t,E0,E1,tmp) \ + u=R^s[S ]; \ + t=R^s[S+1] + /* The changes to this macro may help or hinder, depending on the * compiler and the achitecture. gcc2 always seems to do well :-). * Inspired by Dana How @@ -138,49 +136,170 @@ * bytes, probably an issue of accessing non-word aligned objects :-( */ #ifdef DES_PTR -#define D_ENCRYPT(L,R,S) { \ - u=((R^s[S ])<<2); \ - t= R^s[S+1]; \ - t=ROTATE(t,2); \ - L^= (\ - *(DES_LONG *)((unsigned char *)des_SP+0x100+((t )&0xfc))+ \ - *(DES_LONG *)((unsigned char *)des_SP+0x300+((t>> 8)&0xfc))+ \ - *(DES_LONG *)((unsigned char *)des_SP+0x500+((t>>16)&0xfc))+ \ - *(DES_LONG *)((unsigned char *)des_SP+0x700+((t>>24)&0xfc))+ \ - *(DES_LONG *)((unsigned char *)des_SP +((u )&0xfc))+ \ - *(DES_LONG *)((unsigned char *)des_SP+0x200+((u>> 8)&0xfc))+ \ - *(DES_LONG *)((unsigned char *)des_SP+0x400+((u>>16)&0xfc))+ \ - *(DES_LONG *)((unsigned char *)des_SP+0x600+((u>>24)&0xfc))); } +/* It recently occurred to me that 0^0^0^0^0^0^0 == 0, so there + * is no reason to not xor all the sub items together. This potentially + * saves a register since things can be xored directly into L */ + +#if defined(DES_RISC1) || defined(DES_RISC2) +#ifdef DES_RISC1 +#define D_ENCRYPT(LL,R,S) { \ + unsigned int u1,u2,u3; \ + LOAD_DATA(R,S,u,t,E0,E1,u1); \ + u2=(int)u>>8L; \ + u1=(int)u&0xfc; \ + u2&=0xfc; \ + t=ROTATE(t,4); \ + u>>=16L; \ + LL^= *(const DES_LONG *)(des_SP +u1); \ + LL^= *(const DES_LONG *)(des_SP+0x200+u2); \ + u3=(int)(u>>8L); \ + u1=(int)u&0xfc; \ + u3&=0xfc; \ + LL^= *(const DES_LONG *)(des_SP+0x400+u1); \ + LL^= *(const DES_LONG *)(des_SP+0x600+u3); \ + u2=(int)t>>8L; \ + u1=(int)t&0xfc; \ + u2&=0xfc; \ + t>>=16L; \ + LL^= *(const DES_LONG *)(des_SP+0x100+u1); \ + LL^= *(const DES_LONG *)(des_SP+0x300+u2); \ + u3=(int)t>>8L; \ + u1=(int)t&0xfc; \ + u3&=0xfc; \ + LL^= *(const DES_LONG *)(des_SP+0x500+u1); \ + LL^= *(const DES_LONG *)(des_SP+0x700+u3); } +#endif /* DES_RISC1 */ +#ifdef DES_RISC2 +#define D_ENCRYPT(LL,R,S) { \ + unsigned int u1,u2,s1,s2; \ + LOAD_DATA(R,S,u,t,E0,E1,u1); \ + u2=(int)u>>8L; \ + u1=(int)u&0xfc; \ + u2&=0xfc; \ + t=ROTATE(t,4); \ + LL^= *(const DES_LONG *)(des_SP +u1); \ + LL^= *(const DES_LONG *)(des_SP+0x200+u2); \ + s1=(int)(u>>16L); \ + s2=(int)(u>>24L); \ + s1&=0xfc; \ + s2&=0xfc; \ + LL^= *(const DES_LONG *)(des_SP+0x400+s1); \ + LL^= *(const DES_LONG *)(des_SP+0x600+s2); \ + u2=(int)t>>8L; \ + u1=(int)t&0xfc; \ + u2&=0xfc; \ + LL^= *(const DES_LONG *)(des_SP+0x100+u1); \ + LL^= *(const DES_LONG *)(des_SP+0x300+u2); \ + s1=(int)(t>>16L); \ + s2=(int)(t>>24L); \ + s1&=0xfc; \ + s2&=0xfc; \ + LL^= *(const DES_LONG *)(des_SP+0x400+s1); \ + LL^= *(const DES_LONG *)(des_SP+0x600+s2); \ + u2=(int)t>>8L; \ + u1=(int)t&0xfc; \ + u2&=0xfc; \ + LL^= *(const DES_LONG *)(des_SP+0x100+u1); \ + LL^= *(const DES_LONG *)(des_SP+0x300+u2); \ + s1=(int)(t>>16L); \ + s2=(int)(t>>24L); \ + s1&=0xfc; \ + s2&=0xfc; \ + LL^= *(const DES_LONG *)(des_SP+0x500+s1); \ + LL^= *(const DES_LONG *)(des_SP+0x700+s2); } +#endif /* DES_RISC2 */ +#else /* DES_RISC1 || DES_RISC2 */ +#define D_ENCRYPT(LL,R,S) { \ + LOAD_DATA_tmp(R,S,u,t,E0,E1); \ + t=ROTATE(t,4); \ + LL^= \ + *(const DES_LONG *)(des_SP +((u )&0xfc))^ \ + *(const DES_LONG *)(des_SP+0x200+((u>> 8L)&0xfc))^ \ + *(const DES_LONG *)(des_SP+0x400+((u>>16L)&0xfc))^ \ + *(const DES_LONG *)(des_SP+0x600+((u>>24L)&0xfc))^ \ + *(const DES_LONG *)(des_SP+0x100+((t )&0xfc))^ \ + *(const DES_LONG *)(des_SP+0x300+((t>> 8L)&0xfc))^ \ + *(const DES_LONG *)(des_SP+0x500+((t>>16L)&0xfc))^ \ + *(const DES_LONG *)(des_SP+0x700+((t>>24L)&0xfc)); } +#endif /* DES_RISC1 || DES_RISC2 */ #else /* original version */ -#ifdef undef -#define D_ENCRYPT(L,R,S) \ - U.l=R^s[S+1]; \ - T.s[0]=((U.s[0]>>4)|(U.s[1]<<12))&0x3f3f; \ - T.s[1]=((U.s[1]>>4)|(U.s[0]<<12))&0x3f3f; \ - U.l=(R^s[S ])&0x3f3f3f3fL; \ - L^= des_SPtrans[1][(T.c[0])]| \ - des_SPtrans[3][(T.c[1])]| \ - des_SPtrans[5][(T.c[2])]| \ - des_SPtrans[7][(T.c[3])]| \ - des_SPtrans[0][(U.c[0])]| \ - des_SPtrans[2][(U.c[1])]| \ - des_SPtrans[4][(U.c[2])]| \ - des_SPtrans[6][(U.c[3])]; -#else -#define D_ENCRYPT(Q,R,S) {\ - u=(R^s[S ]); \ - t=R^s[S+1]; \ + +#if defined(DES_RISC1) || defined(DES_RISC2) +#ifdef DES_RISC1 +#define D_ENCRYPT(LL,R,S) {\ + unsigned int u1,u2,u3; \ + LOAD_DATA(R,S,u,t,E0,E1,u1); \ + u>>=2L; \ + t=ROTATE(t,6); \ + u2=(int)u>>8L; \ + u1=(int)u&0x3f; \ + u2&=0x3f; \ + u>>=16L; \ + LL^=des_SPtrans[0][u1]; \ + LL^=des_SPtrans[2][u2]; \ + u3=(int)u>>8L; \ + u1=(int)u&0x3f; \ + u3&=0x3f; \ + LL^=des_SPtrans[4][u1]; \ + LL^=des_SPtrans[6][u3]; \ + u2=(int)t>>8L; \ + u1=(int)t&0x3f; \ + u2&=0x3f; \ + t>>=16L; \ + LL^=des_SPtrans[1][u1]; \ + LL^=des_SPtrans[3][u2]; \ + u3=(int)t>>8L; \ + u1=(int)t&0x3f; \ + u3&=0x3f; \ + LL^=des_SPtrans[5][u1]; \ + LL^=des_SPtrans[7][u3]; } +#endif /* DES_RISC1 */ +#ifdef DES_RISC2 +#define D_ENCRYPT(LL,R,S) {\ + unsigned int u1,u2,s1,s2; \ + LOAD_DATA(R,S,u,t,E0,E1,u1); \ + u>>=2L; \ + t=ROTATE(t,6); \ + u2=(int)u>>8L; \ + u1=(int)u&0x3f; \ + u2&=0x3f; \ + LL^=des_SPtrans[0][u1]; \ + LL^=des_SPtrans[2][u2]; \ + s1=(int)u>>16L; \ + s2=(int)u>>24L; \ + s1&=0x3f; \ + s2&=0x3f; \ + LL^=des_SPtrans[4][s1]; \ + LL^=des_SPtrans[6][s2]; \ + u2=(int)t>>8L; \ + u1=(int)t&0x3f; \ + u2&=0x3f; \ + LL^=des_SPtrans[1][u1]; \ + LL^=des_SPtrans[3][u2]; \ + s1=(int)t>>16; \ + s2=(int)t>>24L; \ + s1&=0x3f; \ + s2&=0x3f; \ + LL^=des_SPtrans[5][s1]; \ + LL^=des_SPtrans[7][s2]; } +#endif /* DES_RISC2 */ + +#else /* DES_RISC1 || DES_RISC2 */ + +#define D_ENCRYPT(LL,R,S) {\ + LOAD_DATA_tmp(R,S,u,t,E0,E1); \ t=ROTATE(t,4); \ - Q^= des_SPtrans[1][(t )&0x3f]| \ - des_SPtrans[3][(t>> 8L)&0x3f]| \ - des_SPtrans[5][(t>>16L)&0x3f]| \ - des_SPtrans[7][(t>>24L)&0x3f]| \ - des_SPtrans[0][(u )&0x3f]| \ - des_SPtrans[2][(u>> 8L)&0x3f]| \ - des_SPtrans[4][(u>>16L)&0x3f]| \ - des_SPtrans[6][(u>>24L)&0x3f]; } -#endif -#endif + LL^=\ + des_SPtrans[0][(u>> 2L)&0x3f]^ \ + des_SPtrans[2][(u>>10L)&0x3f]^ \ + des_SPtrans[4][(u>>18L)&0x3f]^ \ + des_SPtrans[6][(u>>26L)&0x3f]^ \ + des_SPtrans[1][(t>> 2L)&0x3f]^ \ + des_SPtrans[3][(t>>10L)&0x3f]^ \ + des_SPtrans[5][(t>>18L)&0x3f]^ \ + des_SPtrans[7][(t>>26L)&0x3f]; } +#endif /* DES_RISC1 || DES_RISC2 */ +#endif /* DES_PTR */ /* IP and FP * The problem is more of a geometric problem that random bit fiddling. diff --git a/bsd/crypto/des/des_setkey.c b/bsd/crypto/des/des_setkey.c index 72b4d1a48..5b7f5dec2 100644 --- a/bsd/crypto/des/des_setkey.c +++ b/bsd/crypto/des/des_setkey.c @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/crypto/des/des_setkey.c,v 1.1.2.3 2001/07/10 09:46:35 ume Exp $ */ -/* $KAME: des_setkey.c,v 1.6 2001/07/03 14:27:53 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/des/des_setkey.c,v 1.1.2.4 2002/03/26 10:12:25 ume Exp $ */ +/* $KAME: des_setkey.c,v 1.7 2001/09/10 04:03:58 itojun Exp $ */ /* crypto/des/set_key.c */ /* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) @@ -61,22 +61,18 @@ #include #include -static int check_parity __P((des_cblock (*))); - int des_check_key=0; -void des_set_odd_parity(key) -des_cblock (*key); - { +void des_set_odd_parity(des_cblock *key) +{ int i; for (i=0; i>(n))^(b))&(m)),\ * (b)^=(t),\ * (a)=((a)^((t)<<(n)))) @@ -141,49 +138,48 @@ des_cblock (*key); #define HPERM_OP(a,t,n,m) ((t)=((((a)<<(16-(n)))^(a))&(m)),\ (a)=(a)^(t)^(t>>(16-(n)))) +int des_set_key(des_cblock *key, des_key_schedule schedule) +{ + if (des_check_key) + { + return des_set_key_checked(key, schedule); + } + else + { + des_set_key_unchecked(key, schedule); + return 0; + } +} + /* return 0 if key parity is odd (correct), * return -1 if key parity error, * return -2 if illegal weak key. */ -int des_set_key(key, schedule) -des_cblock (*key); -des_key_schedule schedule; - { +int des_set_key_checked(des_cblock *key, des_key_schedule schedule) +{ + if (!des_check_key_parity(key)) + return(-1); + if (des_is_weak_key(key)) + return(-2); + des_set_key_unchecked(key, schedule); + return 0; +} + +void des_set_key_unchecked(des_cblock *key, des_key_schedule schedule) +{ static int shifts2[16]={0,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0}; - register DES_LONG c,d,t,s; - register unsigned char *in; + register DES_LONG c,d,t,s,t2; + register const unsigned char *in; register DES_LONG *k; register int i; - if (des_check_key) - { - if (!check_parity(key)) - return(-1); - - if (des_is_weak_key(key)) - return(-2); - } - - k=(DES_LONG *)schedule; - in=(unsigned char *)key; + k = &schedule->ks.deslong[0]; + in = &(*key)[0]; c2l(in,c); c2l(in,d); - /* do PC1 in 60 simple operations */ -/* PERM_OP(d,c,t,4,0x0f0f0f0fL); - HPERM_OP(c,t,-2, 0xcccc0000L); - HPERM_OP(c,t,-1, 0xaaaa0000L); - HPERM_OP(c,t, 8, 0x00ff0000L); - HPERM_OP(c,t,-1, 0xaaaa0000L); - HPERM_OP(d,t,-8, 0xff000000L); - HPERM_OP(d,t, 8, 0x00ff0000L); - HPERM_OP(d,t, 2, 0x33330000L); - d=((d&0x00aa00aaL)<<7L)|((d&0x55005500L)>>7L)|(d&0xaa55aa55L); - d=(d>>8)|((c&0xf0000000L)>>4); - c&=0x0fffffffL; */ - - /* I now do it in 47 simple operations :-) + /* do PC1 in 47 simple operations :-) * Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov) * for the inspiration. :-) */ PERM_OP (d,c,t,4,0x0f0f0f0fL); @@ -197,7 +193,7 @@ des_key_schedule schedule; c&=0x0fffffffL; for (i=0; i>2L)|(c<<26L)); d=((d>>2L)|(d<<26L)); } else @@ -205,30 +201,32 @@ des_key_schedule schedule; c&=0x0fffffffL; d&=0x0fffffffL; /* could be a few less shifts but I am to lazy at this - * point in time to investigate */ + * point in time to investigate */ s= des_skb[0][ (c )&0x3f ]| - des_skb[1][((c>> 6)&0x03)|((c>> 7L)&0x3c)]| - des_skb[2][((c>>13)&0x0f)|((c>>14L)&0x30)]| - des_skb[3][((c>>20)&0x01)|((c>>21L)&0x06) | - ((c>>22L)&0x38)]; + des_skb[1][((c>> 6L)&0x03)|((c>> 7L)&0x3c)]| + des_skb[2][((c>>13L)&0x0f)|((c>>14L)&0x30)]| + des_skb[3][((c>>20L)&0x01)|((c>>21L)&0x06) | + ((c>>22L)&0x38)]; t= des_skb[4][ (d )&0x3f ]| des_skb[5][((d>> 7L)&0x03)|((d>> 8L)&0x3c)]| des_skb[6][ (d>>15L)&0x3f ]| des_skb[7][((d>>21L)&0x0f)|((d>>22L)&0x30)]; /* table contained 0213 4657 */ - *(k++)=((t<<16L)|(s&0x0000ffffL))&0xffffffffL; - s= ((s>>16L)|(t&0xffff0000L)); - - s=(s<<4L)|(s>>28L); - *(k++)=s&0xffffffffL; - } - return(0); + t2=((t<<16L)|(s&0x0000ffffL))&0xffffffffL; + *(k++)=ROTATE(t2,30)&0xffffffffL; + + t2=((s>>16L)|(t&0xffff0000L)); + *(k++)=ROTATE(t2,26)&0xffffffffL; } +} -int des_key_sched(key, schedule) -des_cblock (*key); -des_key_schedule schedule; - { +int des_key_sched(des_cblock *key, des_key_schedule schedule) +{ return(des_set_key(key,schedule)); - } +} + +void des_fixup_key_parity(des_cblock *key) +{ + des_set_odd_parity(key); +} diff --git a/bsd/crypto/des/spr.h b/bsd/crypto/des/spr.h index 21dea800c..e7d8626dc 100644 --- a/bsd/crypto/des/spr.h +++ b/bsd/crypto/des/spr.h @@ -1,23 +1,28 @@ -/* $FreeBSD: src/sys/crypto/des/spr.h,v 1.1.2.1 2000/07/15 07:14:22 kris Exp $ */ -/* $KAME: spr.h,v 1.3 2000/03/27 04:36:35 sumikawa Exp $ */ +/* $FreeBSD: src/sys/crypto/des/spr.h,v 1.1.2.2 2002/03/26 10:12:25 ume Exp $ */ +/* $KAME: spr.h,v 1.4 2001/09/10 04:03:58 itojun Exp $ */ /* crypto/des/spr.h */ -/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * - * This file is part of an SSL implementation written - * by Eric Young (eay@mincom.oz.au). - * The implementation was written so as to conform with Netscapes SSL - * specification. This library and applications are - * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE - * as long as the following conditions are aheared to. - * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. If this code is used in a product, - * Eric Young should be given attribution as the author of the parts used. + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -28,8 +33,14 @@ * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: - * This product includes software developed by Eric Young (eay@mincom.oz.au) - * + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE @@ -41,156 +52,156 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ -static const DES_LONG des_SPtrans[8][64]={ +const DES_LONG des_SPtrans[8][64]={ { /* nibble 0 */ -0x00820200L, 0x00020000L, 0x80800000L, 0x80820200L, -0x00800000L, 0x80020200L, 0x80020000L, 0x80800000L, -0x80020200L, 0x00820200L, 0x00820000L, 0x80000200L, -0x80800200L, 0x00800000L, 0x00000000L, 0x80020000L, -0x00020000L, 0x80000000L, 0x00800200L, 0x00020200L, -0x80820200L, 0x00820000L, 0x80000200L, 0x00800200L, -0x80000000L, 0x00000200L, 0x00020200L, 0x80820000L, -0x00000200L, 0x80800200L, 0x80820000L, 0x00000000L, -0x00000000L, 0x80820200L, 0x00800200L, 0x80020000L, -0x00820200L, 0x00020000L, 0x80000200L, 0x00800200L, -0x80820000L, 0x00000200L, 0x00020200L, 0x80800000L, -0x80020200L, 0x80000000L, 0x80800000L, 0x00820000L, -0x80820200L, 0x00020200L, 0x00820000L, 0x80800200L, -0x00800000L, 0x80000200L, 0x80020000L, 0x00000000L, -0x00020000L, 0x00800000L, 0x80800200L, 0x00820200L, -0x80000000L, 0x80820000L, 0x00000200L, 0x80020200L, +0x02080800L, 0x00080000L, 0x02000002L, 0x02080802L, +0x02000000L, 0x00080802L, 0x00080002L, 0x02000002L, +0x00080802L, 0x02080800L, 0x02080000L, 0x00000802L, +0x02000802L, 0x02000000L, 0x00000000L, 0x00080002L, +0x00080000L, 0x00000002L, 0x02000800L, 0x00080800L, +0x02080802L, 0x02080000L, 0x00000802L, 0x02000800L, +0x00000002L, 0x00000800L, 0x00080800L, 0x02080002L, +0x00000800L, 0x02000802L, 0x02080002L, 0x00000000L, +0x00000000L, 0x02080802L, 0x02000800L, 0x00080002L, +0x02080800L, 0x00080000L, 0x00000802L, 0x02000800L, +0x02080002L, 0x00000800L, 0x00080800L, 0x02000002L, +0x00080802L, 0x00000002L, 0x02000002L, 0x02080000L, +0x02080802L, 0x00080800L, 0x02080000L, 0x02000802L, +0x02000000L, 0x00000802L, 0x00080002L, 0x00000000L, +0x00080000L, 0x02000000L, 0x02000802L, 0x02080800L, +0x00000002L, 0x02080002L, 0x00000800L, 0x00080802L, },{ /* nibble 1 */ -0x10042004L, 0x00000000L, 0x00042000L, 0x10040000L, -0x10000004L, 0x00002004L, 0x10002000L, 0x00042000L, -0x00002000L, 0x10040004L, 0x00000004L, 0x10002000L, -0x00040004L, 0x10042000L, 0x10040000L, 0x00000004L, -0x00040000L, 0x10002004L, 0x10040004L, 0x00002000L, -0x00042004L, 0x10000000L, 0x00000000L, 0x00040004L, -0x10002004L, 0x00042004L, 0x10042000L, 0x10000004L, -0x10000000L, 0x00040000L, 0x00002004L, 0x10042004L, -0x00040004L, 0x10042000L, 0x10002000L, 0x00042004L, -0x10042004L, 0x00040004L, 0x10000004L, 0x00000000L, -0x10000000L, 0x00002004L, 0x00040000L, 0x10040004L, -0x00002000L, 0x10000000L, 0x00042004L, 0x10002004L, -0x10042000L, 0x00002000L, 0x00000000L, 0x10000004L, -0x00000004L, 0x10042004L, 0x00042000L, 0x10040000L, -0x10040004L, 0x00040000L, 0x00002004L, 0x10002000L, -0x10002004L, 0x00000004L, 0x10040000L, 0x00042000L, +0x40108010L, 0x00000000L, 0x00108000L, 0x40100000L, +0x40000010L, 0x00008010L, 0x40008000L, 0x00108000L, +0x00008000L, 0x40100010L, 0x00000010L, 0x40008000L, +0x00100010L, 0x40108000L, 0x40100000L, 0x00000010L, +0x00100000L, 0x40008010L, 0x40100010L, 0x00008000L, +0x00108010L, 0x40000000L, 0x00000000L, 0x00100010L, +0x40008010L, 0x00108010L, 0x40108000L, 0x40000010L, +0x40000000L, 0x00100000L, 0x00008010L, 0x40108010L, +0x00100010L, 0x40108000L, 0x40008000L, 0x00108010L, +0x40108010L, 0x00100010L, 0x40000010L, 0x00000000L, +0x40000000L, 0x00008010L, 0x00100000L, 0x40100010L, +0x00008000L, 0x40000000L, 0x00108010L, 0x40008010L, +0x40108000L, 0x00008000L, 0x00000000L, 0x40000010L, +0x00000010L, 0x40108010L, 0x00108000L, 0x40100000L, +0x40100010L, 0x00100000L, 0x00008010L, 0x40008000L, +0x40008010L, 0x00000010L, 0x40100000L, 0x00108000L, },{ /* nibble 2 */ -0x41000000L, 0x01010040L, 0x00000040L, 0x41000040L, -0x40010000L, 0x01000000L, 0x41000040L, 0x00010040L, -0x01000040L, 0x00010000L, 0x01010000L, 0x40000000L, -0x41010040L, 0x40000040L, 0x40000000L, 0x41010000L, -0x00000000L, 0x40010000L, 0x01010040L, 0x00000040L, -0x40000040L, 0x41010040L, 0x00010000L, 0x41000000L, -0x41010000L, 0x01000040L, 0x40010040L, 0x01010000L, -0x00010040L, 0x00000000L, 0x01000000L, 0x40010040L, -0x01010040L, 0x00000040L, 0x40000000L, 0x00010000L, -0x40000040L, 0x40010000L, 0x01010000L, 0x41000040L, -0x00000000L, 0x01010040L, 0x00010040L, 0x41010000L, -0x40010000L, 0x01000000L, 0x41010040L, 0x40000000L, -0x40010040L, 0x41000000L, 0x01000000L, 0x41010040L, -0x00010000L, 0x01000040L, 0x41000040L, 0x00010040L, -0x01000040L, 0x00000000L, 0x41010000L, 0x40000040L, -0x41000000L, 0x40010040L, 0x00000040L, 0x01010000L, +0x04000001L, 0x04040100L, 0x00000100L, 0x04000101L, +0x00040001L, 0x04000000L, 0x04000101L, 0x00040100L, +0x04000100L, 0x00040000L, 0x04040000L, 0x00000001L, +0x04040101L, 0x00000101L, 0x00000001L, 0x04040001L, +0x00000000L, 0x00040001L, 0x04040100L, 0x00000100L, +0x00000101L, 0x04040101L, 0x00040000L, 0x04000001L, +0x04040001L, 0x04000100L, 0x00040101L, 0x04040000L, +0x00040100L, 0x00000000L, 0x04000000L, 0x00040101L, +0x04040100L, 0x00000100L, 0x00000001L, 0x00040000L, +0x00000101L, 0x00040001L, 0x04040000L, 0x04000101L, +0x00000000L, 0x04040100L, 0x00040100L, 0x04040001L, +0x00040001L, 0x04000000L, 0x04040101L, 0x00000001L, +0x00040101L, 0x04000001L, 0x04000000L, 0x04040101L, +0x00040000L, 0x04000100L, 0x04000101L, 0x00040100L, +0x04000100L, 0x00000000L, 0x04040001L, 0x00000101L, +0x04000001L, 0x00040101L, 0x00000100L, 0x04040000L, },{ /* nibble 3 */ -0x00100402L, 0x04000400L, 0x00000002L, 0x04100402L, -0x00000000L, 0x04100000L, 0x04000402L, 0x00100002L, -0x04100400L, 0x04000002L, 0x04000000L, 0x00000402L, -0x04000002L, 0x00100402L, 0x00100000L, 0x04000000L, -0x04100002L, 0x00100400L, 0x00000400L, 0x00000002L, -0x00100400L, 0x04000402L, 0x04100000L, 0x00000400L, -0x00000402L, 0x00000000L, 0x00100002L, 0x04100400L, -0x04000400L, 0x04100002L, 0x04100402L, 0x00100000L, -0x04100002L, 0x00000402L, 0x00100000L, 0x04000002L, -0x00100400L, 0x04000400L, 0x00000002L, 0x04100000L, -0x04000402L, 0x00000000L, 0x00000400L, 0x00100002L, -0x00000000L, 0x04100002L, 0x04100400L, 0x00000400L, -0x04000000L, 0x04100402L, 0x00100402L, 0x00100000L, -0x04100402L, 0x00000002L, 0x04000400L, 0x00100402L, -0x00100002L, 0x00100400L, 0x04100000L, 0x04000402L, -0x00000402L, 0x04000000L, 0x04000002L, 0x04100400L, +0x00401008L, 0x10001000L, 0x00000008L, 0x10401008L, +0x00000000L, 0x10400000L, 0x10001008L, 0x00400008L, +0x10401000L, 0x10000008L, 0x10000000L, 0x00001008L, +0x10000008L, 0x00401008L, 0x00400000L, 0x10000000L, +0x10400008L, 0x00401000L, 0x00001000L, 0x00000008L, +0x00401000L, 0x10001008L, 0x10400000L, 0x00001000L, +0x00001008L, 0x00000000L, 0x00400008L, 0x10401000L, +0x10001000L, 0x10400008L, 0x10401008L, 0x00400000L, +0x10400008L, 0x00001008L, 0x00400000L, 0x10000008L, +0x00401000L, 0x10001000L, 0x00000008L, 0x10400000L, +0x10001008L, 0x00000000L, 0x00001000L, 0x00400008L, +0x00000000L, 0x10400008L, 0x10401000L, 0x00001000L, +0x10000000L, 0x10401008L, 0x00401008L, 0x00400000L, +0x10401008L, 0x00000008L, 0x10001000L, 0x00401008L, +0x00400008L, 0x00401000L, 0x10400000L, 0x10001008L, +0x00001008L, 0x10000000L, 0x10000008L, 0x10401000L, },{ /* nibble 4 */ -0x02000000L, 0x00004000L, 0x00000100L, 0x02004108L, -0x02004008L, 0x02000100L, 0x00004108L, 0x02004000L, -0x00004000L, 0x00000008L, 0x02000008L, 0x00004100L, -0x02000108L, 0x02004008L, 0x02004100L, 0x00000000L, -0x00004100L, 0x02000000L, 0x00004008L, 0x00000108L, -0x02000100L, 0x00004108L, 0x00000000L, 0x02000008L, -0x00000008L, 0x02000108L, 0x02004108L, 0x00004008L, -0x02004000L, 0x00000100L, 0x00000108L, 0x02004100L, -0x02004100L, 0x02000108L, 0x00004008L, 0x02004000L, -0x00004000L, 0x00000008L, 0x02000008L, 0x02000100L, -0x02000000L, 0x00004100L, 0x02004108L, 0x00000000L, -0x00004108L, 0x02000000L, 0x00000100L, 0x00004008L, -0x02000108L, 0x00000100L, 0x00000000L, 0x02004108L, -0x02004008L, 0x02004100L, 0x00000108L, 0x00004000L, -0x00004100L, 0x02004008L, 0x02000100L, 0x00000108L, -0x00000008L, 0x00004108L, 0x02004000L, 0x02000008L, +0x08000000L, 0x00010000L, 0x00000400L, 0x08010420L, +0x08010020L, 0x08000400L, 0x00010420L, 0x08010000L, +0x00010000L, 0x00000020L, 0x08000020L, 0x00010400L, +0x08000420L, 0x08010020L, 0x08010400L, 0x00000000L, +0x00010400L, 0x08000000L, 0x00010020L, 0x00000420L, +0x08000400L, 0x00010420L, 0x00000000L, 0x08000020L, +0x00000020L, 0x08000420L, 0x08010420L, 0x00010020L, +0x08010000L, 0x00000400L, 0x00000420L, 0x08010400L, +0x08010400L, 0x08000420L, 0x00010020L, 0x08010000L, +0x00010000L, 0x00000020L, 0x08000020L, 0x08000400L, +0x08000000L, 0x00010400L, 0x08010420L, 0x00000000L, +0x00010420L, 0x08000000L, 0x00000400L, 0x00010020L, +0x08000420L, 0x00000400L, 0x00000000L, 0x08010420L, +0x08010020L, 0x08010400L, 0x00000420L, 0x00010000L, +0x00010400L, 0x08010020L, 0x08000400L, 0x00000420L, +0x00000020L, 0x00010420L, 0x08010000L, 0x08000020L, },{ /* nibble 5 */ -0x20000010L, 0x00080010L, 0x00000000L, 0x20080800L, -0x00080010L, 0x00000800L, 0x20000810L, 0x00080000L, -0x00000810L, 0x20080810L, 0x00080800L, 0x20000000L, -0x20000800L, 0x20000010L, 0x20080000L, 0x00080810L, -0x00080000L, 0x20000810L, 0x20080010L, 0x00000000L, -0x00000800L, 0x00000010L, 0x20080800L, 0x20080010L, -0x20080810L, 0x20080000L, 0x20000000L, 0x00000810L, -0x00000010L, 0x00080800L, 0x00080810L, 0x20000800L, -0x00000810L, 0x20000000L, 0x20000800L, 0x00080810L, -0x20080800L, 0x00080010L, 0x00000000L, 0x20000800L, -0x20000000L, 0x00000800L, 0x20080010L, 0x00080000L, -0x00080010L, 0x20080810L, 0x00080800L, 0x00000010L, -0x20080810L, 0x00080800L, 0x00080000L, 0x20000810L, -0x20000010L, 0x20080000L, 0x00080810L, 0x00000000L, -0x00000800L, 0x20000010L, 0x20000810L, 0x20080800L, -0x20080000L, 0x00000810L, 0x00000010L, 0x20080010L, +0x80000040L, 0x00200040L, 0x00000000L, 0x80202000L, +0x00200040L, 0x00002000L, 0x80002040L, 0x00200000L, +0x00002040L, 0x80202040L, 0x00202000L, 0x80000000L, +0x80002000L, 0x80000040L, 0x80200000L, 0x00202040L, +0x00200000L, 0x80002040L, 0x80200040L, 0x00000000L, +0x00002000L, 0x00000040L, 0x80202000L, 0x80200040L, +0x80202040L, 0x80200000L, 0x80000000L, 0x00002040L, +0x00000040L, 0x00202000L, 0x00202040L, 0x80002000L, +0x00002040L, 0x80000000L, 0x80002000L, 0x00202040L, +0x80202000L, 0x00200040L, 0x00000000L, 0x80002000L, +0x80000000L, 0x00002000L, 0x80200040L, 0x00200000L, +0x00200040L, 0x80202040L, 0x00202000L, 0x00000040L, +0x80202040L, 0x00202000L, 0x00200000L, 0x80002040L, +0x80000040L, 0x80200000L, 0x00202040L, 0x00000000L, +0x00002000L, 0x80000040L, 0x80002040L, 0x80202000L, +0x80200000L, 0x00002040L, 0x00000040L, 0x80200040L, },{ /* nibble 6 */ -0x00001000L, 0x00000080L, 0x00400080L, 0x00400001L, -0x00401081L, 0x00001001L, 0x00001080L, 0x00000000L, -0x00400000L, 0x00400081L, 0x00000081L, 0x00401000L, -0x00000001L, 0x00401080L, 0x00401000L, 0x00000081L, -0x00400081L, 0x00001000L, 0x00001001L, 0x00401081L, -0x00000000L, 0x00400080L, 0x00400001L, 0x00001080L, -0x00401001L, 0x00001081L, 0x00401080L, 0x00000001L, -0x00001081L, 0x00401001L, 0x00000080L, 0x00400000L, -0x00001081L, 0x00401000L, 0x00401001L, 0x00000081L, -0x00001000L, 0x00000080L, 0x00400000L, 0x00401001L, -0x00400081L, 0x00001081L, 0x00001080L, 0x00000000L, -0x00000080L, 0x00400001L, 0x00000001L, 0x00400080L, -0x00000000L, 0x00400081L, 0x00400080L, 0x00001080L, -0x00000081L, 0x00001000L, 0x00401081L, 0x00400000L, -0x00401080L, 0x00000001L, 0x00001001L, 0x00401081L, -0x00400001L, 0x00401080L, 0x00401000L, 0x00001001L, +0x00004000L, 0x00000200L, 0x01000200L, 0x01000004L, +0x01004204L, 0x00004004L, 0x00004200L, 0x00000000L, +0x01000000L, 0x01000204L, 0x00000204L, 0x01004000L, +0x00000004L, 0x01004200L, 0x01004000L, 0x00000204L, +0x01000204L, 0x00004000L, 0x00004004L, 0x01004204L, +0x00000000L, 0x01000200L, 0x01000004L, 0x00004200L, +0x01004004L, 0x00004204L, 0x01004200L, 0x00000004L, +0x00004204L, 0x01004004L, 0x00000200L, 0x01000000L, +0x00004204L, 0x01004000L, 0x01004004L, 0x00000204L, +0x00004000L, 0x00000200L, 0x01000000L, 0x01004004L, +0x01000204L, 0x00004204L, 0x00004200L, 0x00000000L, +0x00000200L, 0x01000004L, 0x00000004L, 0x01000200L, +0x00000000L, 0x01000204L, 0x01000200L, 0x00004200L, +0x00000204L, 0x00004000L, 0x01004204L, 0x01000000L, +0x01004200L, 0x00000004L, 0x00004004L, 0x01004204L, +0x01000004L, 0x01004200L, 0x01004000L, 0x00004004L, },{ /* nibble 7 */ -0x08200020L, 0x08208000L, 0x00008020L, 0x00000000L, -0x08008000L, 0x00200020L, 0x08200000L, 0x08208020L, -0x00000020L, 0x08000000L, 0x00208000L, 0x00008020L, -0x00208020L, 0x08008020L, 0x08000020L, 0x08200000L, -0x00008000L, 0x00208020L, 0x00200020L, 0x08008000L, -0x08208020L, 0x08000020L, 0x00000000L, 0x00208000L, -0x08000000L, 0x00200000L, 0x08008020L, 0x08200020L, -0x00200000L, 0x00008000L, 0x08208000L, 0x00000020L, -0x00200000L, 0x00008000L, 0x08000020L, 0x08208020L, -0x00008020L, 0x08000000L, 0x00000000L, 0x00208000L, -0x08200020L, 0x08008020L, 0x08008000L, 0x00200020L, -0x08208000L, 0x00000020L, 0x00200020L, 0x08008000L, -0x08208020L, 0x00200000L, 0x08200000L, 0x08000020L, -0x00208000L, 0x00008020L, 0x08008020L, 0x08200000L, -0x00000020L, 0x08208000L, 0x00208020L, 0x00000000L, -0x08000000L, 0x08200020L, 0x00008000L, 0x00208020L, +0x20800080L, 0x20820000L, 0x00020080L, 0x00000000L, +0x20020000L, 0x00800080L, 0x20800000L, 0x20820080L, +0x00000080L, 0x20000000L, 0x00820000L, 0x00020080L, +0x00820080L, 0x20020080L, 0x20000080L, 0x20800000L, +0x00020000L, 0x00820080L, 0x00800080L, 0x20020000L, +0x20820080L, 0x20000080L, 0x00000000L, 0x00820000L, +0x20000000L, 0x00800000L, 0x20020080L, 0x20800080L, +0x00800000L, 0x00020000L, 0x20820000L, 0x00000080L, +0x00800000L, 0x00020000L, 0x20000080L, 0x20820080L, +0x00020080L, 0x20000000L, 0x00000000L, 0x00820000L, +0x20800080L, 0x20020080L, 0x20020000L, 0x00800080L, +0x20820000L, 0x00000080L, 0x00800080L, 0x20020000L, +0x20820080L, 0x00800000L, 0x20800000L, 0x20000080L, +0x00820000L, 0x00020080L, 0x20020080L, 0x20800000L, +0x00000080L, 0x20820000L, 0x00820080L, 0x00000000L, +0x20000000L, 0x20800080L, 0x00020000L, 0x00820080L, }}; diff --git a/bsd/crypto/sha2/sha2.c b/bsd/crypto/sha2/sha2.c index 9ea6f468b..53b5b201c 100644 --- a/bsd/crypto/sha2/sha2.c +++ b/bsd/crypto/sha2/sha2.c @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/crypto/sha2/sha2.c,v 1.2.2.1 2001/07/03 11:01:36 ume Exp $ */ -/* $KAME: sha2.c,v 1.6 2001/03/12 11:31:04 itojun Exp $ */ +/* $FreeBSD: src/sys/crypto/sha2/sha2.c,v 1.2.2.2 2002/03/05 08:36:47 ume Exp $ */ +/* $KAME: sha2.c,v 1.8 2001/11/08 01:07:52 itojun Exp $ */ /* * sha2.c @@ -565,7 +565,7 @@ void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) { /* Begin padding with a 1 bit: */ context->buffer[usedspace++] = 0x80; - if (usedspace < SHA256_SHORT_BLOCK_LENGTH) { + if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) { /* Set-up for the last transform: */ bzero(&context->buffer[usedspace], SHA256_SHORT_BLOCK_LENGTH - usedspace); } else { @@ -882,7 +882,7 @@ void SHA512_Last(SHA512_CTX* context) { /* Begin padding with a 1 bit: */ context->buffer[usedspace++] = 0x80; - if (usedspace < SHA512_SHORT_BLOCK_LENGTH) { + if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) { /* Set-up for the last transform: */ bzero(&context->buffer[usedspace], SHA512_SHORT_BLOCK_LENGTH - usedspace); } else { diff --git a/bsd/dev/disk.h b/bsd/dev/disk.h index 8d672d97c..61614fb00 100644 --- a/bsd/dev/disk.h +++ b/bsd/dev/disk.h @@ -22,129 +22,5 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* @(#)disk.h 1.0 08/29/87 (c) 1987 NeXT */ -#ifndef _BSD_DEV_DISK_ -#define _BSD_DEV_DISK_ -#ifndef _SYS_DISK_H_ -#define _SYS_DISK_H_ - -#include -#include -#include -#include -#include -#include -#include - -/* - * USE INSTEAD (NOTE: DKIOCGETBLOCKCOUNT -> DKIOCGETBLOCKCOUNT32) - */ - -#ifdef __APPLE_API_OBSOLETE - -#define DR_CMDSIZE 32 -#define DR_ERRSIZE 32 - -struct disk_req { - int dr_bcount; /* byte count for data transfers */ - caddr_t dr_addr; /* memory addr for data transfers */ - struct timeval dr_exec_time; /* execution time of operation */ - - /* - * interpretation of cmdblk and errblk is driver specific. - */ - char dr_cmdblk[DR_CMDSIZE]; - char dr_errblk[DR_ERRSIZE]; -}; - -struct sdc_wire { - vm_offset_t start, end; - boolean_t new_pageable; -}; - - -#define BAD_BLK_OFF 4 /* offset of bad blk tbl from label */ -#define NBAD_BLK (12 * 1024 / sizeof (int)) - -struct bad_block { /* bad block table, sized to be 12KB */ - int bad_blk[NBAD_BLK]; -}; - -/* - * sector bitmap states (2 bits per sector) - */ -#define SB_UNTESTED 0 /* must be zero */ -#define SB_BAD 1 -#define SB_WRITTEN 2 -#define SB_ERASED 3 - -struct drive_info { /* info about drive hardware */ - char di_name[MAXDNMLEN]; /* drive type name */ - int di_label_blkno[NLABELS];/* label loc'ns in DEVICE SECTORS */ - int di_devblklen; /* device sector size */ - int di_maxbcount; /* max bytes per transfer request */ -}; - -#define DS_STATSIZE 32 - -struct disk_stats { - int s_ecccnt; /* avg ECC corrections per sector */ - int s_maxecc; /* max ECC corrections observed */ - - /* - * interpretation of s_stats is driver specific - */ - char s_stats[DS_STATSIZE]; -}; - -struct drive_location { - char location[ 128 ]; -}; - -#define DKIOCGLABEL _IOR('d', 0,struct disk_label) /* read label */ -#define DKIOCSLABEL _IOW('d', 1,struct disk_label) /* write label */ -#define DKIOCGBITMAP _IO('d', 2) /* read bitmap */ -#define DKIOCSBITMAP _IO('d', 3) /* write bitmap */ -#define DKIOCREQ _IOWR('d', 4, struct disk_req) /* cmd request */ -#define DKIOCINFO _IOR('d', 5, struct drive_info) /* get drive info */ -#define DKIOCZSTATS _IO('d',7) /* zero statistics */ -#define DKIOCGSTATS _IO('d', 8) /* get statistics */ -#define DKIOCRESET _IO('d', 9) /* reset disk */ -#define DKIOCGFLAGS _IOR('d', 11, int) /* get driver flags */ -#define DKIOCSFLAGS _IOW('d', 12, int) /* set driver flags */ -#define DKIOCSDCWIRE _IOW('d', 14, struct sdc_wire) /* sdc wire memory */ -#define DKIOCSDCLOCK _IO('d', 15) /* sdc lock */ -#define DKIOCSDCUNLOCK _IO('d', 16) /* sdc unlock */ -#define DKIOCGFREEVOL _IOR('d', 17, int) /* get free volume # */ -#define DKIOCGBBT _IO('d', 18) /* read bad blk tbl */ -#define DKIOCSBBT _IO('d', 19) /* write bad blk tbl */ -#define DKIOCMNOTIFY _IOW('d', 20, int) /* message on insert */ -#define DKIOCEJECT _IO('d', 21) /* eject disk */ -#define DKIOCPANELPRT _IOW('d', 22, int) /* register Panel */ - /* Request port */ -#define DKIOCSFORMAT _IOW('d', 23, int) /* set 'Formatted' flag */ -#define DKIOCGFORMAT _IOR('d', 23, int) /* get 'Formatted' flag */ -#define DKIOCBLKSIZE _IOR('d', 24, int) /* device sector size */ -#define DKIOCNUMBLKS _IOR('d', 25, int) /* number of sectors */ -#define DKIOCCHECKINSERT _IO('d',26) /* manually poll removable */ - /* media drive */ -#define DKIOCCANCELAUTOMOUNT _IOW('d',27, dev_t) /* cancel automount request */ -#define DKIOCGLOCATION _IOR('d',28, struct drive_location) /* arch dependent location descrip */ -#define DKIOCSETBLOCKSIZE _IOW('d', 24, int) /* set media's preferred sector size */ -#define DKIOCGETBLOCKSIZE DKIOCBLKSIZE /* get media's preferred sector size */ -#define DKIOCGETBLOCKCOUNT32 DKIOCNUMBLKS /* get media's sector count */ -#define DKIOCGETBLOCKCOUNT64 _IOR('d', 25, u_int64_t) /* get media's sector count */ -#define DKIOCGETLOCATION DKIOCGLOCATION /* get media's location description */ -#define DKIOCISFORMATTED DKIOCGFORMAT /* is media formatted? */ -#define DKIOCISWRITABLE _IOR('d', 29, int) /* is media writable? */ - -#define DKIOCGETMAXBLOCKCOUNTREAD _IOR('d', 64, u_int64_t) /* get device's maximum block count for read requests */ -#define DKIOCGETMAXBLOCKCOUNTWRITE _IOR('d', 65, u_int64_t) /* get device's maximum block count for write requests */ -#define DKIOCGETMAXSEGMENTCOUNTREAD _IOR('d', 66, u_int64_t) /* get device's maximum physical segment count for read buffers */ -#define DKIOCGETMAXSEGMENTCOUNTWRITE _IOR('d', 67, u_int64_t) /* get device's maximum physical segment count for write buffers */ - -#endif /* __APPLE_API_OBSOLETE */ - -#endif /* _SYS_DISK_H_ */ -#endif /* _BSD_DEV_DISK_ */ +#warning is obsolete, please use instead diff --git a/bsd/dev/disk_label.h b/bsd/dev/disk_label.h index 20e7a0ece..10e60cfef 100644 --- a/bsd/dev/disk_label.h +++ b/bsd/dev/disk_label.h @@ -22,91 +22,5 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* Copyright (c) 1991 by NeXT Computer, Inc. - * - * File: bsd/dev/disk_label.h - NeXT disk label definition. - * - */ - -#ifndef _BSD_DEV_DISK_LABEL_ -#define _BSD_DEV_DISK_LABEL_ - -#include -#include - -#ifdef __APPLE_API_OBSOLETE - -#define NLABELS 4 /* # of labels on a disk */ -#define MAXLBLLEN 24 /* dl_label[] size */ -#define NBAD 1670 /* sized to make label ~= 8KB */ - -/* - * if dl_version >= DL_V3 then the bad block table is relocated - * to a structure separate from the disk label. - */ -typedef union { - unsigned short DL_v3_checksum; - int DL_bad[NBAD]; /* block number that is bad */ -} dl_un_t; - -typedef struct disk_label { - int dl_version; // label version number - int dl_label_blkno; // block # where this label is - int dl_size; // size of media area (sectors) - char dl_label[MAXLBLLEN]; // media label - unsigned dl_flags; // flags (see DL_xxx, below) - unsigned dl_tag; // volume tag - struct disktab dl_dt; // common info in disktab - dl_un_t dl_un; - unsigned short dl_checksum; // ones complement checksum - - /* add things here so dl_checksum stays in a fixed place */ -} disk_label_t; - -/* - * Known label versions. - */ -#define DL_V1 0x4e655854 /* version #1: "NeXT" */ -#define DL_V2 0x646c5632 /* version #2: "dlV2" */ -#define DL_V3 0x646c5633 /* version #3: "dlV3" */ -#define DL_VERSION DL_V3 /* default version */ - - -/* - * dl_flags values - */ -#define DL_UNINIT 0x80000000 /* label is uninitialized */ - -/* - * Aliases for disktab fields - */ -#define dl_name dl_dt.d_name -#define dl_type dl_dt.d_type -#define dl_part dl_dt.d_partitions -#define dl_front dl_dt.d_front -#define dl_back dl_dt.d_back -#define dl_ngroups dl_dt.d_ngroups -#define dl_ag_size dl_dt.d_ag_size -#define dl_ag_alts dl_dt.d_ag_alts -#define dl_ag_off dl_dt.d_ag_off -#define dl_secsize dl_dt.d_secsize -#define dl_ncyl dl_dt.d_ncylinders -#define dl_nsect dl_dt.d_nsectors -#define dl_ntrack dl_dt.d_ntracks -#define dl_rpm dl_dt.d_rpm -#define dl_bootfile dl_dt.d_bootfile -#define dl_boot0_blkno dl_dt.d_boot0_blkno -#define dl_hostname dl_dt.d_hostname -#define dl_rootpartition dl_dt.d_rootpartition -#define dl_rwpartition dl_dt.d_rwpartition - -/* - * Other aliases - */ -#define dl_v3_checksum dl_un.DL_v3_checksum -#define dl_bad dl_un.DL_bad - -#endif /* __APPLE_API_OBSOLETE */ - -#endif /* _BSD_DEV_DISK_LABEL_ */ +#warning is obsolete diff --git a/bsd/dev/i386/conf.c b/bsd/dev/i386/conf.c index 854d04873..d25e4cf08 100644 --- a/bsd/dev/i386/conf.c +++ b/bsd/dev/i386/conf.c @@ -109,7 +109,7 @@ extern int volopen(),volclose(),volioctl(); extern int cttyopen(), cttyread(), cttywrite(), cttyioctl(), cttyselect(); extern int mmread(),mmwrite(); -#define mmselect seltrue +#define mmselect (select_fcn_t *)seltrue #define mmmmap eno_mmap #include @@ -138,8 +138,6 @@ extern int logopen(),logclose(),logread(),logioctl(),logselect(); extern int fdesc_open(), fdesc_read(), fdesc_write(), fdesc_ioctl(), fdesc_select(); -extern int seltrue(); - struct cdevsw cdevsw[] = { /* @@ -241,7 +239,7 @@ struct cdevsw cdevsw[] = NO_CDEVICE, /*41*/ { volopen, volclose, eno_rdwrt, eno_rdwrt, /*42*/ - volioctl, eno_stop, eno_reset, 0, seltrue, + volioctl, eno_stop, eno_reset, 0, (select_fcn_t *)seltrue, eno_mmap, eno_strat, eno_getc, eno_putc, 0 }, }; diff --git a/bsd/dev/i386/km.c b/bsd/dev/i386/km.c index 2ce9ce21c..307898736 100644 --- a/bsd/dev/i386/km.c +++ b/bsd/dev/i386/km.c @@ -62,7 +62,6 @@ int disableConsoleOutput; int initialized = 0; static int kmoutput(struct tty *tp); -static void kmtimeout(struct tty *tp); static void kmstart(struct tty *tp); extern void KeyboardOpen(void); @@ -311,9 +310,10 @@ out: } static void -kmtimeout( struct tty *tp) +kmtimeout(void *arg) { boolean_t funnel_state; + struct tty *tp = (struct tty *) arg; funnel_state = thread_funnel_set(kernel_flock, TRUE); kmoutput(tp); diff --git a/bsd/dev/i386/stubs.c b/bsd/dev/i386/stubs.c index beb116a10..491a189e9 100644 --- a/bsd/dev/i386/stubs.c +++ b/bsd/dev/i386/stubs.c @@ -60,10 +60,12 @@ copyoutstr(from, to, maxlen, lencopied) int slen,len,error=0; slen = strlen(from) + 1; + if (slen > maxlen) + error = ENAMETOOLONG; len = min(maxlen,slen); if (copyout(from, to, len)) - error = EIO; + error = EFAULT; *lencopied = len; return error; @@ -111,8 +113,6 @@ size_t count; return 0; } -cpu_number() {return(0);} - set_bsduthreadargs(thread_t th, void * pcb, void *ignored_arg) { struct uthread * ut; diff --git a/bsd/dev/i386/sysctl.c b/bsd/dev/i386/sysctl.c new file mode 100644 index 000000000..f25b64633 --- /dev/null +++ b/bsd/dev/i386/sysctl.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +static int +hw_cpu_sysctl SYSCTL_HANDLER_ARGS +{ + i386_cpu_info_t cpu_info; + void *ptr = (uint8_t *)&cpu_info + (uint32_t)arg1; + int value; + + cpuid_get_info(&cpu_info); + + if (arg2 == sizeof(uint8_t)) { + value = (uint32_t) *(uint8_t *)ptr; + ptr = &value; + arg2 = sizeof(uint32_t); + } + return SYSCTL_OUT(req, ptr, arg2 ? arg2 : strlen((char *)ptr)+1); + return 0; +} + +static int +hw_cpu_features SYSCTL_HANDLER_ARGS +{ + i386_cpu_info_t cpu_info; + char buf[256]; + vm_size_t size; + + cpuid_get_info(&cpu_info); + buf[0] = '\0'; + cpuid_get_feature_names(cpu_info.cpuid_features, buf, sizeof(buf)); + + return SYSCTL_OUT(req, buf, strlen(buf) + 1); +} + +SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW, 0, + "CPU info"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, vendor, CTLTYPE_STRING | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_vendor), 0, + hw_cpu_sysctl, "A", "CPU vendor"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string, CTLTYPE_STRING | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_brand_string), 0, + hw_cpu_sysctl, "A", "CPU brand string"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, value, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_value), sizeof(uint32_t), + hw_cpu_sysctl, "I", "CPU value"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, family, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_family), sizeof(uint8_t), + hw_cpu_sysctl, "I", "CPU family"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, model, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_model), sizeof(uint8_t), + hw_cpu_sysctl, "I", "CPU model"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, extmodel, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_extmodel), sizeof(uint8_t), + hw_cpu_sysctl, "I", "CPU extended model"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfamily, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_extfamily), sizeof(uint8_t), + hw_cpu_sysctl, "I", "CPU extended family"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, stepping, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_stepping), sizeof(uint8_t), + hw_cpu_sysctl, "I", "CPU stepping"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, feature_bits, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_features), sizeof(uint32_t), + hw_cpu_sysctl, "I", "CPU features"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, signature, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_signature), sizeof(uint32_t), + hw_cpu_sysctl, "I", "CPU signature"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand, CTLTYPE_INT | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, cpuid_brand), sizeof(uint8_t), + hw_cpu_sysctl, "I", "CPU brand"); + +#if 0 +SYSCTL_PROC(_machdep_cpu, OID_AUTO, model_string, CTLTYPE_STRING | CTLFLAG_RD, + (void *)offsetof(i386_cpu_info_t, model_string), 0, + hw_cpu_sysctl, "A", "CPU model string"); +#endif + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, features, CTLTYPE_STRING | CTLFLAG_RD, + 0, 0, + hw_cpu_features, "A", "CPU feature names"); + + +struct sysctl_oid *machdep_sysctl_list[] = +{ + &sysctl__machdep_cpu, + &sysctl__machdep_cpu_vendor, + &sysctl__machdep_cpu_brand_string, + &sysctl__machdep_cpu_value, + &sysctl__machdep_cpu_family, + &sysctl__machdep_cpu_model, + &sysctl__machdep_cpu_extmodel, + &sysctl__machdep_cpu_extfamily, + &sysctl__machdep_cpu_feature_bits, + &sysctl__machdep_cpu_stepping, + &sysctl__machdep_cpu_signature, + &sysctl__machdep_cpu_brand, + &sysctl__machdep_cpu_features, + (struct sysctl_oid *) 0 +}; + diff --git a/bsd/dev/i386/unix_signal.c b/bsd/dev/i386/unix_signal.c index 2725a3709..de40b260d 100644 --- a/bsd/dev/i386/unix_signal.c +++ b/bsd/dev/i386/unix_signal.c @@ -52,9 +52,11 @@ #define USER_CS 0x17 #define USER_DS 0x1f +#define USER_CTHREAD 0x27 #define UDATA_SEL USER_DS #define UCODE_SEL USER_CS +#define UCTHREAD_SEL USER_CTHREAD #define valid_user_code_selector(x) (TRUE) #define valid_user_data_selector(x) (TRUE) @@ -63,6 +65,10 @@ #define NULL_SEG 0 +/* Signal handler flavors supported */ +/* These defns should match the Libc implmn */ +#define UC_TRAD 1 + /* * Send an interrupt to process. * @@ -95,7 +101,8 @@ sendsig(p, catcher, sig, mask, code) thread_t thread = current_thread(); thread_act_t th_act = current_act(); struct uthread * ut; - struct i386_saved_state * saved_state = get_user_regs(th_act); + struct i386_saved_state * saved_state = (struct i386_saved_state *) + get_user_regs(th_act); sig_t trampact; ut = get_bsdthread_info(th_act); @@ -116,7 +123,7 @@ sendsig(p, catcher, sig, mask, code) /* Handler should call sigreturn to get out of it */ frame.retaddr = 0xffffffff; frame.catcher = catcher; - frame.sigstyle = 1; + frame.sigstyle = UC_TRAD; frame.sig = sig; if (sig == SIGILL || sig == SIGFPE) { @@ -179,7 +186,7 @@ sendsig(p, catcher, sig, mask, code) saved_state->ds = UDATA_SEL; saved_state->es = UDATA_SEL; saved_state->fs = NULL_SEG; - saved_state->gs = NULL_SEG; + saved_state->gs = USER_CTHREAD; return; bad: @@ -217,7 +224,8 @@ sigreturn(p, uap, retval) thread_t thread = current_thread(); thread_act_t th_act = current_act(); int error; - struct i386_saved_state* saved_state = get_user_regs(th_act); + struct i386_saved_state* saved_state = (struct i386_saved_state*) + get_user_regs(th_act); struct uthread * ut; diff --git a/bsd/dev/ldd.h b/bsd/dev/ldd.h index 1995216ed..6dda85f1c 100644 --- a/bsd/dev/ldd.h +++ b/bsd/dev/ldd.h @@ -44,7 +44,7 @@ #define _BSD_DEV_LDD_PRIV_ #include -#include +#include typedef int (*PFI)(); diff --git a/bsd/dev/memdev.c b/bsd/dev/memdev.c new file mode 100644 index 000000000..490d030cd --- /dev/null +++ b/bsd/dev/memdev.c @@ -0,0 +1,578 @@ +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah Hdr: vn.c 1.13 94/04/02 + * + * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 + * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $ + */ + +/* + * RAM disk driver. + * + * Block interface to a ramdisk. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include + +static open_close_fcn_t mdevopen; +static open_close_fcn_t mdevclose; +static psize_fcn_t mdevsize; +static strategy_fcn_t mdevstrategy; +static int mdevbioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); +static int mdevcioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); +static int mdevrw(dev_t dev, struct uio *uio, int ioflag); +static char *nonspace(char *pos, char *end); +static char *getspace(char *pos, char *end); +static char *cvtnum(char *pos, char *end, unsigned int *num); + +/* + * cdevsw + * D_DISK we want to look like a disk + * D_CANFREE We support B_FREEBUF + */ + +static struct bdevsw mdevbdevsw = { + /* open */ mdevopen, + /* close */ mdevclose, + /* strategy */ mdevstrategy, + /* ioctl */ mdevbioctl, + /* dump */ eno_dump, + /* psize */ mdevsize, + /* flags */ D_DISK, +}; + +static struct cdevsw mdevcdevsw = { + /* open */ mdevopen, + /* close */ mdevclose, + /* read */ mdevrw, + /* write */ mdevrw, + /* ioctl */ mdevcioctl, + /* stop */ eno_stop, + /* reset */ eno_reset, + /* ttys */ 0, + /* select */ eno_select, + /* mmap */ eno_mmap, + /* strategy */ eno_strat, + /* getc */ eno_getc, + /* putc */ eno_putc, + /* flags */ D_DISK, +}; + +struct mdev { + vm_offset_t mdBase; /* file size in bytes */ + uint32_t mdSize; /* file size in bytes */ + int mdFlags; /* flags */ + int mdSecsize; /* sector size */ + int mdBDev; /* Block device number */ + int mdCDev; /* Character device number */ + void * mdbdevb; + void * mdcdevb; +} mdev[16]; + +/* mdFlags */ +#define mdInited 0x01 /* This device defined */ +#define mdRO 0x02 /* This device is read-only */ +#define mdPhys 0x04 /* This device is in physical memory */ + +int mdevBMajor = -1; +int mdevCMajor = -1; + +static int mdevioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p, int is_char); +dev_t mdevadd(int devid, ppnum_t base, unsigned int size, int phys); +dev_t mdevlookup(int devid); + +static int mdevclose(dev_t dev, int flags, int devtype, struct proc *p) { + return (0); +} + +static int mdevopen(dev_t dev, int flags, int devtype, struct proc *p) { + + int devid; + + devid = minor(dev); /* Get minor device number */ + + if (devid > 16) return (ENXIO); /* Not valid */ + + if ((flags & FWRITE) && (mdev[devid].mdFlags & mdRO)) return (EACCES); /* Currently mounted RO */ + + return(0); +} + +static int mdevrw(dev_t dev, struct uio *uio, int ioflag) { + int status; + int unit; + addr64_t mdata; + int devid; + enum uio_seg saveflag; + + devid = minor(dev); /* Get minor device number */ + + if (devid > 16) return (ENXIO); /* Not valid */ + if (!(mdev[devid].mdFlags & mdInited)) return (ENXIO); /* Have we actually been defined yet? */ + + mdata = ((addr64_t)mdev[devid].mdBase << 12) + uio->uio_offset; /* Point to the area in "file" */ + + saveflag = uio->uio_segflg; /* Remember what the request is */ + if (mdev[devid].mdFlags & mdPhys) uio->uio_segflg = UIO_PHYS_USERSPACE; /* Make sure we are moving from physical ram if physical device */ + status = uiomove64(mdata, uio->uio_resid, uio); /* Move the data */ + uio->uio_segflg = saveflag; /* Restore the flag */ + + return (status); +} + +static void mdevstrategy(struct buf *bp) { + int unmap; + unsigned int sz, left, lop, csize; + kern_return_t ret; + vm_offset_t vaddr, blkoff; + struct buf *tbuf; + int devid; + addr64_t paddr, fvaddr; + ppnum_t pp; + + devid = minor(bp->b_dev); /* Get minor device number */ + + if ((mdev[devid].mdFlags & mdInited) == 0) { /* Have we actually been defined yet? */ + bp->b_error = ENXIO; + bp->b_flags |= B_ERROR; + biodone(bp); + return; + } + + bp->b_resid = bp->b_bcount; /* Set byte count */ + + blkoff = bp->b_blkno * mdev[devid].mdSecsize; /* Get offset into file */ + +/* + * Note that reading past end is an error, but reading at end is an EOF. For these + * we just return with b_resid == b_bcount. + */ + + if (blkoff >= (mdev[devid].mdSize << 12)) { /* Are they trying to read/write at/after end? */ + if(blkoff != (mdev[devid].mdSize << 12)) { /* Are we trying to read after EOF? */ + bp->b_error = EINVAL; /* Yeah, this is an error */ + bp->b_flags |= B_ERROR | B_INVAL; + } + biodone(bp); /* Return */ + return; + } + + if ((blkoff + bp->b_bcount) > (mdev[devid].mdSize << 12)) { /* Will this read go past end? */ + bp->b_bcount = ((mdev[devid].mdSize << 12) - blkoff); /* Yes, trim to max */ + } + + vaddr = 0; /* Assume not mapped yet */ + unmap = 0; + + if (bp->b_flags & B_VECTORLIST) { /* Do we have a list of UPLs? */ + tbuf = (struct buf *)bp->b_real_bp; /* Get this for C's inadequacies */ + if((bp->b_flags & B_NEED_IODONE) && /* If we have a UPL, is it already mapped? */ + tbuf && + tbuf->b_data) { + vaddr = (vm_offset_t)tbuf->b_data; /* We already have this mapped in, get base address */ + } + else { /* Not mapped yet */ + ret = ubc_upl_map(bp->b_pagelist, &vaddr); /* Map it in */ + if(ret != KERN_SUCCESS) panic("ramstrategy: ubc_upl_map failed, rc = %08X\n", ret); + unmap = 1; /* Remember to unmap later */ + } + vaddr = vaddr += bp->b_uploffset; /* Calculate actual vaddr */ + } + else vaddr = (vm_offset_t)bp->b_data; /* No UPL, we already have address */ + + fvaddr = (mdev[devid].mdBase << 12) + blkoff; /* Point to offset into ram disk */ + + if(bp->b_flags & B_READ) { /* Is this a read? */ + if(!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */ + bcopy((void *)((uintptr_t)fvaddr), + (void *)vaddr, (size_t)bp->b_bcount); /* This is virtual, just get the data */ + } + else { + left = bp->b_bcount; /* Init the amount left to copy */ + while(left) { /* Go until it is all copied */ + + lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095))); /* Get smallest amount left on sink and source */ + csize = min(lop, left); /* Don't move more than we need to */ + + pp = pmap_find_phys(kernel_pmap, (addr64_t)((unsigned int)vaddr)); /* Get the sink physical address */ + if(!pp) { /* Not found, what gives? */ + panic("mdevstrategy: sink address %016llX not mapped\n", (addr64_t)((unsigned int)vaddr)); + } + paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ + bcopy_phys(fvaddr, paddr, csize); /* Copy this on in */ + mapping_set_mod(paddr >> 12); /* Make sure we know that it is modified */ + + left = left - csize; /* Calculate what is left */ + vaddr = vaddr + csize; /* Move to next sink address */ + fvaddr = fvaddr + csize; /* Bump to next physical address */ + } + } + } + else { /* This is a write */ + if(!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */ + bcopy((void *)vaddr, (void *)((uintptr_t)fvaddr), + (size_t)bp->b_bcount); /* This is virtual, just put the data */ + } + else { + left = bp->b_bcount; /* Init the amount left to copy */ + while(left) { /* Go until it is all copied */ + + lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095))); /* Get smallest amount left on sink and source */ + csize = min(lop, left); /* Don't move more than we need to */ + + pp = pmap_find_phys(kernel_pmap, (addr64_t)((unsigned int)vaddr)); /* Get the source physical address */ + if(!pp) { /* Not found, what gives? */ + panic("mdevstrategy: source address %016llX not mapped\n", (addr64_t)((unsigned int)vaddr)); + } + paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ + + bcopy_phys(paddr, fvaddr, csize); /* Move this on out */ + + left = left - csize; /* Calculate what is left */ + vaddr = vaddr + csize; /* Move to next sink address */ + fvaddr = fvaddr + csize; /* Bump to next physical address */ + } + } + } + + if (unmap) { /* Do we need to unmap this? */ + ubc_upl_unmap(bp->b_pagelist); /* Yes, unmap it */ + } + + bp->b_resid = 0; /* Nothing more to do */ + biodone(bp); /* Say we've finished */ +} + +static int mdevbioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { + return (mdevioctl(dev, cmd, data, flag, p, 0)); +} + +static int mdevcioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { + return (mdevioctl(dev, cmd, data, flag, p, 1)); +} + +static int mdevioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p, int is_char) { + + int error; + u_long *f; + u_int64_t *o; + int devid; + + devid = minor(dev); /* Get minor device number */ + + if (devid > 16) return (ENXIO); /* Not valid */ + + error = suser(p->p_ucred, &p->p_acflag); /* Are we superman? */ + if (error) return (error); /* Nope... */ + + f = (u_long*)data; + o = (u_int64_t *)data; + + switch (cmd) { + + case DKIOCGETMAXBLOCKCOUNTREAD: + *o = 32; + break; + + case DKIOCGETMAXBLOCKCOUNTWRITE: + *o = 32; + break; + + case DKIOCGETMAXSEGMENTCOUNTREAD: + *o = 32; + break; + + case DKIOCGETMAXSEGMENTCOUNTWRITE: + *o = 32; + break; + + case DKIOCGETBLOCKSIZE: + *f = mdev[devid].mdSecsize; + break; + + case DKIOCSETBLOCKSIZE: + if (is_char) return (ENODEV); /* We can only do this for a block */ + + if (*f < DEV_BSIZE) return (EINVAL); /* Too short? */ + + mdev[devid].mdSecsize = *f; /* set the new block size */ + break; + + case DKIOCISWRITABLE: + *f = 1; + break; + + case DKIOCGETBLOCKCOUNT32: + if(!(mdev[devid].mdFlags & mdInited)) return (ENXIO); + *f = ((mdev[devid].mdSize << 12) + mdev[devid].mdSecsize - 1) / mdev[devid].mdSecsize; + break; + + case DKIOCGETBLOCKCOUNT: + if(!(mdev[devid].mdFlags & mdInited)) return (ENXIO); + *o = ((mdev[devid].mdSize << 12) + mdev[devid].mdSecsize - 1) / mdev[devid].mdSecsize; + break; + + default: + error = ENOTTY; + break; + } + return(error); +} + + +static int mdevsize(dev_t dev) { + + int devid; + + devid = minor(dev); /* Get minor device number */ + if (devid > 16) return (ENXIO); /* Not valid */ + + if ((mdev[devid].mdFlags & mdInited) == 0) return(-1); /* Not inited yet */ + + return(mdev[devid].mdSecsize); +} + +#include + +void mdevinit(int cnt) { + + int devid, phys; + ppnum_t base; + unsigned int size; + char *ba, *lp; + dev_t dev; + + + ba = PE_boot_args(); /* Get the boot arguments */ + lp = ba + 256; /* Point to the end */ + + while(1) { /* Step through, looking for our keywords */ + phys = 0; /* Assume virtual memory device */ + ba = nonspace(ba, lp); /* Find non-space */ + if(ba >= lp) return; /* We are done if no more... */ + if(((ba[0] != 'v') && (ba[0] != 'p')) + || (ba[1] != 'm') || (ba[2] != 'd') || (ba[4] != '=') + || (ba[3] < '0') || (ba[3] > 'f') + || ((ba[3] > '9') && (ba[3] < 'a'))) { /* Is this of form "vmdx=" or "pmdx=" where x is hex digit? */ + + ba = getspace(ba, lp); /* Find next white space or end */ + continue; /* Start looking for the next one */ + } + + if(ba[0] == 'p') phys = 1; /* Set physical memory disk */ + + devid = ba[3] & 0xF; /* Assume digit */ + if(ba[3] > '9') devid += 9; /* Adjust for hex digits */ + + ba = &ba[5]; /* Step past keyword */ + ba = cvtnum(ba, lp, &base); /* Convert base of memory disk */ + if(ba >= lp) return; /* Malformed one at the end, leave */ + if(ba[0] != '.') continue; /* If not length separater, try next... */ + if(base & 0xFFF) continue; /* Only allow page aligned stuff */ + + ba++; /* Step past '.' */ + ba = cvtnum(ba, lp, &size); /* Try to convert it */ + if(!size || (size & 0xFFF)) continue; /* Allow only non-zer page size multiples */ + if(ba < lp) { /* If we are not at end, check end character */ + if((ba[0] != ' ') && (ba[0] != 0)) continue; /* End must be null or space */ + } + + dev = mdevadd(devid, base >> 12, size >> 12, phys); /* Go add the device */ + } + + return; + +} + +char *nonspace(char *pos, char *end) { /* Find next non-space in string */ + + if(pos >= end) return end; /* Don't go past end */ + if(pos[0] == 0) return end; /* If at null, make end */ + + while(1) { /* Keep going */ + if(pos[0] != ' ') return pos; /* Leave if we found one */ + pos++; /* Stop */ + if(pos >= end) return end; /* Quit if we run off end */ + } +} + +char *getspace(char *pos, char *end) { /* Find next non-space in string */ + + while(1) { /* Keep going */ + if(pos >= end) return end; /* Don't go past end */ + if(pos[0] == 0) return end; /* Leave if we hit null */ + if(pos[0] == ' ') return pos; /* Leave if we found one */ + pos++; /* Stop */ + } +} + +char *cvtnum(char *pos, char *end, unsigned int *num) { /* Convert to a number */ + + int rad, dig; + + *num = 0; /* Set answer to 0 to start */ + rad = 10; + + if(pos >= end) return end; /* Don't go past end */ + if(pos[0] == 0) return end; /* If at null, make end */ + + if(pos[0] == '0' && ((pos[1] == 'x') || (pos[1] == 'x'))) { /* A hex constant? */ + rad = 16; + pos += 2; /* Point to the number */ + } + + while(1) { /* Convert it */ + + if(pos >= end) return end; /* Don't go past end */ + if(pos[0] == 0) return end; /* If at null, make end */ + if(pos[0] < '0') return pos; /* Leave if non-digit */ + dig = pos[0] & 0xF; /* Extract digit */ + if(pos[0] > '9') { /* Is it bigger than 9? */ + if(rad == 10) return pos; /* Leave if not base 10 */ + if(!(((pos[0] >= 'A') && (pos[0] <= 'F')) + || ((pos[0] >= 'a') && (pos[0] <= 'f')))) return pos; /* Leave if bogus char */ + dig = dig + 9; /* Adjust for character */ + } + *num = (*num * rad) + dig; /* Accumulate the number */ + pos++; /* Step on */ + } +} + +dev_t mdevadd(int devid, ppnum_t base, unsigned int size, int phys) { + + int i; + + if(devid < 0) { + + devid = -1; + for(i = 0; i < 16; i++) { /* Search all known memory devices */ + if(!(mdev[i].mdFlags & mdInited)) { /* Is this a free one? */ + if(devid < 0)devid = i; /* Remember first free one */ + continue; /* Skip check */ + } + if(!(((base + size -1 ) < mdev[i].mdBase) || ((mdev[i].mdBase + mdev[i].mdSize - 1) < base))) { /* Is there any overlap? */ + panic("mdevadd: attempt to add overlapping memory device at %08X-%08X\n", mdev[i].mdBase, mdev[i].mdBase + mdev[i].mdSize - 1); + } + } + if(devid < 0) { /* Do we have free slots? */ + panic("mdevadd: attempt to add more than 16 memory devices\n"); + } + } + else { + if(devid >= 16) { /* Giving us something bogus? */ + panic("mdevadd: attempt to explicitly add a bogus memory device: &08X\n", devid); + } + if(mdev[devid].mdFlags &mdInited) { /* Already there? */ + panic("mdevadd: attempt to explicitly add a previously defined memory device: &08X\n", devid); + } + } + + if(mdevBMajor < 0) { /* Have we gotten a major number yet? */ + mdevBMajor = bdevsw_add(-1, &mdevbdevsw); /* Add to the table and figure out a major number */ + if (mdevBMajor < 0) { + printf("mdevadd: error - bdevsw_add() returned %d\n", mdevBMajor); + return -1; + } + } + + if(mdevCMajor < 0) { /* Have we gotten a major number yet? */ + mdevCMajor = cdevsw_add_with_bdev(-1, &mdevcdevsw, mdevBMajor); /* Add to the table and figure out a major number */ + if (mdevCMajor < 0) { + printf("ramdevice_init: error - cdevsw_add() returned %d\n", mdevCMajor); + return -1; + } + } + + mdev[devid].mdBDev = makedev(mdevBMajor, devid); /* Get the device number */ + mdev[devid].mdbdevb = devfs_make_node(mdev[devid].mdBDev, DEVFS_BLOCK, /* Make the node */ + UID_ROOT, GID_OPERATOR, + 0600, "md%d", devid); + if (mdev[devid].mdbdevb == NULL) { /* Did we make one? */ + printf("mdevadd: devfs_make_node for block failed!\n"); + return -1; /* Nope... */ + } + + mdev[devid].mdCDev = makedev(mdevCMajor, devid); /* Get the device number */ + mdev[devid].mdcdevb = devfs_make_node(mdev[devid].mdCDev, DEVFS_CHAR, /* Make the node */ + UID_ROOT, GID_OPERATOR, + 0600, "rmd%d", devid); + if (mdev[devid].mdcdevb == NULL) { /* Did we make one? */ + printf("mdevadd: devfs_make_node for character failed!\n"); + return -1; /* Nope... */ + } + + mdev[devid].mdBase = base; /* Set the base address of ram disk */ + mdev[devid].mdSize = size; /* Set the length of the ram disk */ + mdev[devid].mdSecsize = DEV_BSIZE; /* Set starting block size */ + if(phys) mdev[devid].mdFlags |= mdPhys; /* Show that we are in physical memory */ + mdev[devid].mdFlags |= mdInited; /* Show we are all set up */ + printf("Added memory device md%x/rmd%x (%08X/%08X) at %08X for %08X\n", + devid, devid, mdev[devid].mdBDev, mdev[devid].mdCDev, base << 12, size << 12); + return mdev[devid].mdBDev; +} + + +dev_t mdevlookup(int devid) { + + if((devid < 0) || (devid > 15)) return -1; /* Filter any bogus requests */ + if(!(mdev[devid].mdFlags & mdInited)) return -1; /* This one hasn't been defined */ + return mdev[devid].mdBDev; /* Return the device number */ +} diff --git a/bsd/dev/memdev.h b/bsd/dev/memdev.h new file mode 100644 index 000000000..a07b5d2a6 --- /dev/null +++ b/bsd/dev/memdev.h @@ -0,0 +1,17 @@ + +#ifndef _SYS_MEMDEV_H_ +#define _SYS_MEMDEV_H_ + +#include + +#ifdef KERNEL_PRIVATE + +#ifdef __APPLE_API_PRIVATE + +void mdevinit(vm_offset_t base, unsigned int size); + +#endif /* __APPLE_API_PRIVATE */ + +#endif KERNEL_PRIVATE + +#endif /* _SYS_MEMDEV_H_*/ diff --git a/bsd/dev/ppc/chud/chud_bsd_callback.c b/bsd/dev/ppc/chud/chud_bsd_callback.c new file mode 100644 index 000000000..9fd930c96 --- /dev/null +++ b/bsd/dev/ppc/chud/chud_bsd_callback.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +#include +#include /* u_int */ +#include /* struct proc */ +#include /* struct sysent */ + +struct exit_args { + int rval; +}; +extern void exit(struct proc *p, struct exit_args *uap, int *retval); +extern struct sysent sysent[]; + +#pragma mark **** kern debug **** +typedef void (*chudxnu_kdebug_callback_func_t)(uint32_t debugid, uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4); +static chudxnu_kdebug_callback_func_t kdebug_callback_fn = NULL; + +extern void kdbg_control_chudxnu(int val, void *fn); +extern unsigned int kdebug_enable; + +static void chudxnu_private_kdebug_callback(unsigned int debugid, unsigned int arg0, unsigned int arg1, unsigned int arg2, unsigned int arg3, unsigned int arg4) +{ + if(kdebug_callback_fn) { + (kdebug_callback_fn)(debugid, arg0, arg1, arg2, arg3, arg4); + } +} + +__private_extern__ +kern_return_t chudxnu_kdebug_callback_enter(chudxnu_kdebug_callback_func_t func) +{ + kdebug_callback_fn = func; + + kdbg_control_chud(TRUE, (void *)chudxnu_private_kdebug_callback); + kdebug_enable |= 0x10; + + return KERN_SUCCESS; +} + +__private_extern__ +kern_return_t chudxnu_kdebug_callback_cancel(void) +{ + kdebug_callback_fn = NULL; + kdbg_control_chud(FALSE, NULL); + kdebug_enable &= ~(0x10); + + return KERN_SUCCESS; +} + +#pragma mark **** task will exit **** + +typedef kern_return_t (*chudxnu_exit_callback_func_t)(int pid); + +__private_extern__ +kern_return_t chudxnu_exit_callback_enter(chudxnu_exit_callback_func_t func) +{ + + return KERN_FAILURE; + +} + +__private_extern__ +kern_return_t chudxnu_exit_callback_cancel(void) +{ + + return KERN_FAILURE; + +} diff --git a/bsd/dev/ppc/chud/chud_process.c b/bsd/dev/ppc/chud/chud_process.c new file mode 100644 index 000000000..38fc362b9 --- /dev/null +++ b/bsd/dev/ppc/chud/chud_process.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +__private_extern__ +int chudxnu_pid_for_task(task_t task) +{ + struct proc *p; + + if(task!=TASK_NULL) { + p = (struct proc *)(get_bsdtask_info(task)); + if(p) { + return (p->p_pid); + } + } + return -1; +} + +__private_extern__ +task_t chudxnu_task_for_pid(int pid) +{ + struct proc *p = pfind(pid); + if(p) { + return p->task; + } + return TASK_NULL; +} + +__private_extern__ +int chudxnu_current_pid(void) +{ + return current_proc()->p_pid; +} diff --git a/bsd/dev/ppc/conf.c b/bsd/dev/ppc/conf.c index ba13fc3ee..1b51cac58 100644 --- a/bsd/dev/ppc/conf.c +++ b/bsd/dev/ppc/conf.c @@ -103,6 +103,9 @@ extern int mmread(),mmwrite(); #define mmselect seltrue #if 1 +#ifdef NPTY +#undef NPTY +#endif /* NPTY */ #define NPTY 32 #else /* 1 */ #include @@ -147,7 +150,7 @@ struct cdevsw cdevsw[] = { consopen, consclose, consread, conswrite, /* 0*/ consioctl, nulldev, nulldev, 0, consselect, - eno_mmap, eno_strat, cons_getc, cons_putc, D_TTY + eno_mmap, eno_strat, (getc_fcn_t *)cons_getc, (putc_fcn_t *)cons_putc, D_TTY }, NO_CDEVICE, /* 1*/ { @@ -157,7 +160,7 @@ struct cdevsw cdevsw[] = }, { nulldev, nulldev, mmread, mmwrite, /* 3*/ - eno_ioctl, nulldev, nulldev, 0, mmselect, + eno_ioctl, nulldev, nulldev, 0, (select_fcn_t *)mmselect, eno_mmap, eno_strat, eno_getc, eno_putc, 0 }, { diff --git a/bsd/dev/ppc/kern_machdep.c b/bsd/dev/ppc/kern_machdep.c index ec8165413..d4422a1b9 100644 --- a/bsd/dev/ppc/kern_machdep.c +++ b/bsd/dev/ppc/kern_machdep.c @@ -55,18 +55,14 @@ check_cpu_subtype(cpu_subtype_t cpu_subtype) if (cpu_subtype == ms->cpu_subtype) return (TRUE); - if (cpu_subtype == CPU_SUBTYPE_POWERPC_601) - return (FALSE); - switch (cpu_subtype) { + case CPU_SUBTYPE_POWERPC_970: + /* Do not allow a 970 binary to run on non-970 systems */ + if (ms->cpu_subtype != CPU_SUBTYPE_POWERPC_970) + break; case CPU_SUBTYPE_POWERPC_7450: case CPU_SUBTYPE_POWERPC_7400: case CPU_SUBTYPE_POWERPC_750: - case CPU_SUBTYPE_POWERPC_604e: - case CPU_SUBTYPE_POWERPC_604: - case CPU_SUBTYPE_POWERPC_603ev: - case CPU_SUBTYPE_POWERPC_603e: - case CPU_SUBTYPE_POWERPC_603: case CPU_SUBTYPE_POWERPC_ALL: return (TRUE); } @@ -93,43 +89,35 @@ grade_cpu_subtype(cpu_subtype_t cpu_subtype) * cctools project. As of 2/16/98 this is what has been agreed upon for * the PowerPC subtypes. If an exact match is not found the subtype will * be picked from the following order: - * 7400, 750, 604e, 604, 603ev, 603e, 603, ALL + * 970(but only on 970), 7450, 7400, 750, ALL * Note the 601 is NOT in the list above. It is only picked via an exact * match. For details see Radar 2213821. * * To implement this function to follow what was agreed upon above, we use - * the fact there are currently 10 different subtypes. Exact matches return - * the value 10, the value 0 is returned for 601 that is not an exact match, - * and the values 9 thru 1 are returned for the subtypes listed in the order - * above. + * the fact there are currently 4 different subtypes. Exact matches return + * the value 6, and the values 5 thru 1 are returned for the + * subtypes listed in the order above. */ if (ms->cpu_subtype == cpu_subtype) - return 10; - if (cpu_subtype == CPU_SUBTYPE_POWERPC_601) - return 0; + return 6; switch (cpu_subtype) { - case CPU_SUBTYPE_POWERPC_7450: - return 9; - case CPU_SUBTYPE_POWERPC_7400: - return 8; - case CPU_SUBTYPE_POWERPC_750: - return 7; - case CPU_SUBTYPE_POWERPC_604e: - return 6; - case CPU_SUBTYPE_POWERPC_604: + case CPU_SUBTYPE_POWERPC_970: + /* Do not allow a 970 binary to run on non-970 systems */ + if (ms->cpu_subtype != CPU_SUBTYPE_POWERPC_970) + break; return 5; - case CPU_SUBTYPE_POWERPC_603ev: + case CPU_SUBTYPE_POWERPC_7450: return 4; - case CPU_SUBTYPE_POWERPC_603e: + case CPU_SUBTYPE_POWERPC_7400: return 3; - case CPU_SUBTYPE_POWERPC_603: + case CPU_SUBTYPE_POWERPC_750: return 2; case CPU_SUBTYPE_POWERPC_ALL: return 1; } /* - * If we get here it is because it is a cpusubtype we don't support (602 and - * 620) or new cpusubtype that was added since this code was written. Both + * If we get here it is because it is a cpusubtype we don't support + * or a new cpusubtype that was added since this code was written. Both * will be considered unacceptable. */ return 0; @@ -144,7 +132,7 @@ kernacc( off_t base; off_t end; - base = trunc_page(start); + base = trunc_page_64(start); end = start + len; while (base < end) { diff --git a/bsd/dev/ppc/km.c b/bsd/dev/ppc/km.c index 4885c7abb..ea1a6558b 100644 --- a/bsd/dev/ppc/km.c +++ b/bsd/dev/ppc/km.c @@ -345,7 +345,7 @@ kmoutput( } } if (tp->t_outq.c_cc > 0) { - timeout(kmtimeout, tp, hz); + timeout((timeout_fcn_t)kmtimeout, tp, hz); } tp->t_state &= ~TS_BUSY; ttwwakeup(tp); diff --git a/bsd/dev/ppc/mem.c b/bsd/dev/ppc/mem.c index b0078086a..45d715a40 100644 --- a/bsd/dev/ppc/mem.c +++ b/bsd/dev/ppc/mem.c @@ -89,7 +89,6 @@ static caddr_t devzerobuf; -extern vm_offset_t mem_actual; extern pmap_t kernel_pmap; mmread(dev, uio) @@ -115,6 +114,7 @@ mmrw(dev, uio, rw) { register int o; register u_int c, v; + addr64_t vll; register struct iovec *iov; int error = 0; vm_offset_t where; @@ -135,45 +135,48 @@ mmrw(dev, uio, rw) /* minor device 0 is physical memory */ case 0: - v = trunc_page(uio->uio_offset); - if (uio->uio_offset >= ((dgWork.dgFlags & enaDiagDM) ? mem_actual : mem_size)) + vll = trunc_page_64(uio->uio_offset); + if(((vll >> 31) == 1) || vll >= ((dgWork.dgFlags & enaDiagDM) ? mem_actual : max_mem)) goto fault; - size= PAGE_SIZE; - if(dgWork.dgFlags & enaDiagDM) { /* Can we really get all memory? */ - if (kmem_alloc_pageable(kernel_map, &where, size) != KERN_SUCCESS) { + if (kmem_alloc_pageable(kernel_map, &where, PAGE_SIZE) != KERN_SUCCESS) { goto fault; } else { - (void)mapping_make(kernel_pmap, 0, where, v, - VM_PROT_READ, 2, 0); /* Map it in for the moment */ + addr64_t collad; + + collad = mapping_make(kernel_pmap, (addr64_t)where, (ppnum_t)(vll >> 12), 0, 1, VM_PROT_READ); /* Map it in for the moment */ + if(collad) { /* See if it failed (shouldn't happen) */ + kmem_free(kernel_map, where, PAGE_SIZE); /* Toss the page */ + goto fault; /* Kill the transfer */ + } } } else { - if (kmem_alloc(kernel_map, &where, size) + if (kmem_alloc(kernel_map, &where, 4096) != KERN_SUCCESS) { goto fault; } } - o = uio->uio_offset - v; + o = uio->uio_offset - vll; c = min(PAGE_SIZE - o, (u_int)iov->iov_len); - error = uiomove((caddr_t) (where + o), c, uio); + error = uiomove((caddr_t)(where + o), c, uio); - if(dgWork.dgFlags & enaDiagDM) (void)mapping_remove(kernel_pmap, where); /* Unmap it */ + if(dgWork.dgFlags & enaDiagDM) (void)mapping_remove(kernel_pmap, (addr64_t)where); /* Unmap it */ kmem_free(kernel_map, where, PAGE_SIZE); continue; /* minor device 1 is kernel memory */ case 1: /* Do some sanity checking */ - if (((caddr_t)uio->uio_offset >= VM_MAX_KERNEL_ADDRESS) || - ((caddr_t)uio->uio_offset <= VM_MIN_KERNEL_ADDRESS)) + if (((addr64_t)uio->uio_offset > vm_last_addr) || + ((addr64_t)uio->uio_offset < VM_MIN_KERNEL_ADDRESS)) goto fault; c = iov->iov_len; - if (!kernacc((caddr_t)uio->uio_offset, c)) + if (!kernacc(uio->uio_offset, c)) goto fault; - error = uiomove((caddr_t)uio->uio_offset, (int)c, uio); + error = uiomove64(uio->uio_offset, (int)c, uio); continue; /* minor device 2 is EOF/RATHOLE */ diff --git a/bsd/dev/ppc/stubs.c b/bsd/dev/ppc/stubs.c index ddb4174d4..9ba788aee 100644 --- a/bsd/dev/ppc/stubs.c +++ b/bsd/dev/ppc/stubs.c @@ -42,38 +42,6 @@ #include -/* - * copy a null terminated string from the kernel address space into - * the user address space. - * - if the user is denied write access, return EFAULT. - * - if the end of string isn't found before - * maxlen bytes are copied, return ENAMETOOLONG, - * indicating an incomplete copy. - * - otherwise, return 0, indicating success. - * the number of bytes copied is always returned in lencopied. - */ -int -copyoutstr(from, to, maxlen, lencopied) - void * from, * to; - size_t maxlen, *lencopied; -{ - int slen,len,error=0; - - /* XXX Must optimize this */ - - slen = strlen(from) + 1; - if (slen > maxlen) - error = ENAMETOOLONG; - - len = min(maxlen,slen); - if (copyout(from, to, len)) - error = EFAULT; - *lencopied = len; - - return error; -} - - /* * copy a null terminated string from one point to another in * the kernel address space. diff --git a/iokit/IOKit/adb/adb.h b/bsd/dev/ppc/sysctl.c similarity index 80% rename from iokit/IOKit/adb/adb.h rename to bsd/dev/ppc/sysctl.c index 1c923e9ac..120e9ff3d 100644 --- a/iokit/IOKit/adb/adb.h +++ b/bsd/dev/ppc/sysctl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,7 +22,13 @@ * * @APPLE_LICENSE_HEADER_END@ */ -typedef UInt8 IOADBAddress; -typedef UInt8 IOADBRegister; -typedef void (*ADB_callback_func) - (IOService * client, UInt8 adbCommand, IOByteCount length, UInt8 * data); + +#include +#include +#include + +struct sysctl_oid *machdep_sysctl_list[] = +{ + (struct sysctl_oid *) 0 +}; + diff --git a/bsd/dev/ppc/systemcalls.c b/bsd/dev/ppc/systemcalls.c index 43ed693f8..769da1ec3 100644 --- a/bsd/dev/ppc/systemcalls.c +++ b/bsd/dev/ppc/systemcalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -39,6 +40,7 @@ #include #include #include +#include extern void unix_syscall( @@ -49,8 +51,8 @@ extern struct savearea * find_user_regs( thread_act_t act); -extern enter_funnel_section(funnel_t *funnel_lock); -extern exit_funnel_section(funnel_t *funnel_lock); +extern void enter_funnel_section(funnel_t *funnel_lock); +extern void exit_funnel_section(void); /* * Function: unix_syscall @@ -73,6 +75,21 @@ unix_syscall( boolean_t flavor; int funnel_type; + flavor = (((unsigned int)regs->save_r0) == NULL)? 1: 0; + + if (flavor) + code = regs->save_r3; + else + code = regs->save_r0; + + if (kdebug_enable && (code != 180)) { + if (flavor) + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0); + else + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0); + } thread_act = current_act(); uthread = get_bsdthread_info(thread_act); @@ -81,15 +98,8 @@ unix_syscall( else proc = current_proc(); - flavor = (regs->save_r0 == NULL)? 1: 0; - uthread->uu_ar0 = (int *)regs; - if (flavor) - code = regs->save_r3; - else - code = regs->save_r0; - callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; #ifdef DEBUG @@ -118,24 +128,12 @@ unix_syscall( } } - callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; - - if (kdebug_enable && (code != 180)) { - if (flavor) - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, - regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0); - else - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, - regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0); - } - funnel_type = (int)callp->sy_funnel; - if(funnel_type == KERNEL_FUNNEL) + if (funnel_type == KERNEL_FUNNEL) enter_funnel_section(kernel_flock); else if (funnel_type == NETWORK_FUNNEL) enter_funnel_section(network_flock); - uthread->uu_rval[0] = 0; /* @@ -156,7 +154,9 @@ unix_syscall( if (KTRPOINT(proc, KTR_SYSCALL)) ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg, funnel_type); + AUDIT_CMD(audit_syscall_enter(code, proc, uthread)); error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0])); + AUDIT_CMD(audit_syscall_exit(error, proc, uthread)); regs = find_user_regs(thread_act); @@ -164,7 +164,7 @@ unix_syscall( regs->save_srr0 -= 8; } else if (error != EJUSTRETURN) { if (error) { - regs->save_r3 = error; + regs->save_r3 = (long long)error; /* set the "pc" to execute cerror routine */ regs->save_srr0 -= 4; } else { /* (not error) */ @@ -177,10 +177,7 @@ unix_syscall( if (KTRPOINT(proc, KTR_SYSRET)) ktrsysret(proc, code, error, uthread->uu_rval[0], funnel_type); - if(funnel_type == KERNEL_FUNNEL) - exit_funnel_section(kernel_flock); - else if (funnel_type == NETWORK_FUNNEL) - exit_funnel_section(network_flock); + exit_funnel_section(); if (kdebug_enable && (code != 180)) { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, @@ -214,7 +211,7 @@ unix_syscall_return(error) regs->save_srr0 -= 8; } else if (error != EJUSTRETURN) { if (error) { - regs->save_r3 = error; + regs->save_r3 = (long long)error; /* set the "pc" to execute cerror routine */ regs->save_srr0 -= 4; } else { /* (not error) */ @@ -236,10 +233,7 @@ unix_syscall_return(error) if (KTRPOINT(proc, KTR_SYSRET)) ktrsysret(proc, code, error, uthread->uu_rval[0], funnel_type); - if(funnel_type == KERNEL_FUNNEL) - exit_funnel_section(kernel_flock); - else if (funnel_type == NETWORK_FUNNEL) - exit_funnel_section(network_flock); + exit_funnel_section(); if (kdebug_enable && (code != 180)) { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, @@ -263,33 +257,31 @@ struct gettimeofday_args{ struct timeval *tp; struct timezone *tzp; }; -/* NOTE THIS implementation is for ppc architectures only */ +/* NOTE THIS implementation is for ppc architectures only. + * It is infrequently called, since the commpage intercepts + * most calls in user mode. + */ int ppc_gettimeofday(p, uap, retval) struct proc *p; register struct gettimeofday_args *uap; register_t *retval; { - struct timeval atv; int error = 0; - struct timezone ltz; - //struct savearea *child_state; - extern simple_lock_data_t tz_slock; - - if (uap->tp) { - microtime(&atv); - retval[0] = atv.tv_sec; - retval[1] = atv.tv_usec; - } + + if (uap->tp) + clock_gettimeofday(&retval[0], &retval[1]); if (uap->tzp) { + struct timezone ltz; + extern simple_lock_data_t tz_slock; + usimple_lock(&tz_slock); ltz = tz; usimple_unlock(&tz_slock); - error = copyout((caddr_t)<z, (caddr_t)uap->tzp, - sizeof (tz)); + error = copyout((caddr_t)<z, (caddr_t)uap->tzp, sizeof (tz)); } - return(error); + return (error); } diff --git a/bsd/dev/ppc/unix_signal.c b/bsd/dev/ppc/unix_signal.c index 2b3fc8318..2bbb6674e 100644 --- a/bsd/dev/ppc/unix_signal.c +++ b/bsd/dev/ppc/unix_signal.c @@ -41,7 +41,6 @@ #include #include #include -#define __ELF__ 0 #include #define C_REDZONE_LEN 224 @@ -50,6 +49,42 @@ #define C_LINKAGE_LEN 48 #define TRUNC_DOWN(a,b,c) (((((unsigned)a)-(b))/(c)) * (c)) +/* + * The stack layout possibilities (info style); This needs to mach with signal trampoline code + * + * Traditional: 1 + * Traditional64: 20 + * Traditional64with vec: 25 + * 32bit context 30 + * 32bit context with vector 35 + * 64bit context 40 + * 64bit context with vector 45 + * Dual context 50 + * Dual context with vector 55 + * + */ + +#define UC_TRAD 1 +#define UC_TRAD_VEC 6 +#define UC_TRAD64 20 +#define UC_TRAD64_VEC 25 +#define UC_FLAVOR 30 +#define UC_FLAVOR_VEC 35 +#define UC_FLAVOR64 40 +#define UC_FLAVOR64_VEC 45 +#define UC_DUAL 50 +#define UC_DUAL_VEC 55 + + /* The following are valid mcontext sizes */ +#define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)) + +#define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int)) + +#define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)) + +#define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int)) + + /* * Arrange for this process to run a signal handler */ @@ -61,7 +96,9 @@ sendsig(p, catcher, sig, mask, code) int sig, mask; u_long code; { + kern_return_t kretn; struct mcontext mctx, *p_mctx; + struct mcontext64 mctx64, *p_mctx64; struct ucontext uctx, *p_uctx; siginfo_t sinfo, *p_sinfo; struct sigacts *ps = p->p_sigacts; @@ -72,42 +109,114 @@ sendsig(p, catcher, sig, mask, code) thread_act_t th_act; struct uthread *ut; unsigned long paramp,linkp; - int infostyle = 1; + int infostyle = UC_TRAD; + int dualcontext =0; sig_t trampact; int vec_used = 0; int stack_size = 0; int stack_flags = 0; + void * tstate; + int flavor; + int ctx32 = 1; + int is_64signalregset(void); th_act = current_act(); ut = get_bsdthread_info(th_act); - state_count = PPC_EXCEPTION_STATE_COUNT; - if (act_machine_get_state(th_act, PPC_EXCEPTION_STATE, &mctx.es, &state_count) != KERN_SUCCESS) { - goto bad; - } + + if (p->p_sigacts->ps_siginfo & sigmask(sig)) { + infostyle = UC_FLAVOR; + } + if(is_64signalregset() && (infostyle == UC_FLAVOR)) { + dualcontext = 1; + infostyle = UC_DUAL; + } + if (p->p_sigacts->ps_64regset & sigmask(sig)) { + dualcontext = 0; + ctx32 = 0; + infostyle = UC_FLAVOR64; + } + if (is_64signalregset() && (infostyle == UC_TRAD)) { + ctx32=0; + infostyle = UC_TRAD64; + } + + /* I need this for SIGINFO anyway */ + flavor = PPC_THREAD_STATE; + tstate = (void *)&mctx.ss; state_count = PPC_THREAD_STATE_COUNT; - if (act_machine_get_state(th_act, PPC_THREAD_STATE, &mctx.ss, &state_count) != KERN_SUCCESS) { - goto bad; - } - state_count = PPC_FLOAT_STATE_COUNT; - if (act_machine_get_state(th_act, PPC_FLOAT_STATE, &mctx.fs, &state_count) != KERN_SUCCESS) { + if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; - } - vec_save(th_act); - if (find_user_vec(th_act)) { - vec_used = 1; - state_count = PPC_VECTOR_STATE_COUNT; - if (act_machine_get_state(th_act, PPC_VECTOR_STATE, &mctx.vs, &state_count) != KERN_SUCCESS) { - goto bad; - } - + if ((ctx32 == 0) || dualcontext) { + flavor = PPC_THREAD_STATE64; + tstate = (void *)&mctx64.ss; + state_count = PPC_THREAD_STATE64_COUNT; + if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) + goto bad; } + if ((ctx32 == 1) || dualcontext) { + flavor = PPC_EXCEPTION_STATE; + tstate = (void *)&mctx.es; + state_count = PPC_EXCEPTION_STATE_COUNT; + if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) + goto bad; + } + + if ((ctx32 == 0) || dualcontext) { + flavor = PPC_EXCEPTION_STATE64; + tstate = (void *)&mctx64.es; + state_count = PPC_EXCEPTION_STATE64_COUNT; + + if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) + goto bad; + + } + + + if ((ctx32 == 1) || dualcontext) { + flavor = PPC_FLOAT_STATE; + tstate = (void *)&mctx.fs; + state_count = PPC_FLOAT_STATE_COUNT; + if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) + goto bad; + } + + if ((ctx32 == 0) || dualcontext) { + flavor = PPC_FLOAT_STATE; + tstate = (void *)&mctx64.fs; + state_count = PPC_FLOAT_STATE_COUNT; + if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) + goto bad; + + } + + + if (find_user_vec_curr()) { + vec_used = 1; + + if ((ctx32 == 1) || dualcontext) { + flavor = PPC_VECTOR_STATE; + tstate = (void *)&mctx.vs; + state_count = PPC_VECTOR_STATE_COUNT; + if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) + goto bad; + infostyle += 5; + } + + if ((ctx32 == 0) || dualcontext) { + flavor = PPC_VECTOR_STATE; + tstate = (void *)&mctx64.vs; + state_count = PPC_VECTOR_STATE_COUNT; + if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) + goto bad; + infostyle += 5; + } + } + trampact = ps->ps_trampact[sig]; oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; - if (p->p_sigacts->ps_siginfo & sigmask(sig)) - infostyle = 2; /* figure out where our new stack lives */ if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack && @@ -117,13 +226,30 @@ sendsig(p, catcher, sig, mask, code) stack_size = ps->ps_sigstk.ss_size; ps->ps_sigstk.ss_flags |= SA_ONSTACK; } - else - sp = mctx.ss.r1; + else { + if (ctx32 == 0) + sp = (unsigned int)mctx64.ss.r1; + else + sp = mctx.ss.r1; + } + + /* put siginfo on top */ + /* preserve RED ZONE area */ sp = TRUNC_DOWN(sp, C_REDZONE_LEN, C_STK_ALIGN); - /* context goes first on stack */ + /* next are the saved registers */ + if ((ctx32 == 0) || dualcontext) { + sp -= sizeof(*p_mctx64); + p_mctx64 = (struct mcontext64 *)sp; + } + if ((ctx32 == 1) || dualcontext) { + sp -= sizeof(*p_mctx); + p_mctx = (struct mcontext *)sp; + } + + /* context goes first on stack */ sp -= sizeof(*p_uctx); p_uctx = (struct ucontext *) sp; @@ -131,13 +257,9 @@ sendsig(p, catcher, sig, mask, code) sp -= sizeof(*p_sinfo); p_sinfo = (siginfo_t *) sp; - /* next are the saved registers */ - sp -= sizeof(*p_mctx); - p_mctx = (struct mcontext *)sp; - /* C calling conventions, create param save and linkage - * areas - */ + * areas + */ sp = TRUNC_DOWN(sp, C_PARAMSAVE_LEN, C_STK_ALIGN); paramp = sp; @@ -152,14 +274,25 @@ sendsig(p, catcher, sig, mask, code) uctx.uc_stack.ss_flags |= SS_ONSTACK; uctx.uc_link = 0; - uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)); + if (ctx32 == 0) + uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)); + else + uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)); + if (vec_used) uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int)); - uctx.uc_mcontext = p_mctx; + + if (ctx32 == 0) + uctx.uc_mcontext = (void *)p_mctx64; + else + uctx.uc_mcontext = (void *)p_mctx; /* setup siginfo */ bzero((caddr_t)&sinfo, sizeof(siginfo_t)); sinfo.si_signo = sig; + sinfo.si_addr = (void *)mctx.ss.srr0; + sinfo.pad[0] = (unsigned int)mctx.ss.r1; + switch (sig) { case SIGCHLD: sinfo.si_pid = p->si_pid; @@ -233,13 +366,23 @@ sendsig(p, catcher, sig, mask, code) break; } + /* copy info out to user space */ if (copyout((caddr_t)&uctx, (caddr_t)p_uctx, sizeof(struct ucontext))) goto bad; if (copyout((caddr_t)&sinfo, (caddr_t)p_sinfo, sizeof(siginfo_t))) goto bad; - if (copyout((caddr_t)&mctx, (caddr_t)p_mctx, uctx.uc_mcsize)) + if ((ctx32 == 0) || dualcontext) { + tstate = &mctx64; + if (copyout((caddr_t)tstate, (caddr_t)p_mctx64, uctx.uc_mcsize)) + goto bad; + } + if ((ctx32 == 1) || dualcontext) { + tstate = &mctx; + if (copyout((caddr_t)tstate, (caddr_t)p_mctx, uctx.uc_mcsize)) goto bad; + } + /* Place our arguments in arg registers: rtm dependent */ @@ -253,10 +396,9 @@ sendsig(p, catcher, sig, mask, code) mctx.ss.srr1 = get_msr_exportmask(); /* MSR_EXPORT_MASK_SET */ mctx.ss.r1 = sp; state_count = PPC_THREAD_STATE_COUNT; - if (act_machine_set_state(th_act, PPC_THREAD_STATE, &mctx.ss, &state_count) != KERN_SUCCESS) { - goto bad; + if ((kretn = thread_setstatus(th_act, PPC_THREAD_STATE, &mctx.ss, &state_count)) != KERN_SUCCESS) { + panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn); } - return; bad: @@ -280,8 +422,122 @@ bad: * psl to gain improper priviledges or to cause * a machine fault. */ + +#define FOR64_TRANSITION 1 + + +#ifdef FOR64_TRANSITION + +struct osigreturn_args { + struct ucontext *uctx; +}; + +/* ARGSUSED */ +int +osigreturn(p, uap, retval) + struct proc *p; + struct osigreturn_args *uap; + int *retval; +{ + struct ucontext uctx; + struct ucontext *p_uctx; + struct mcontext64 mctx64; + struct mcontext64 *p_64mctx; + struct mcontext *p_mctx; + int error; + thread_act_t th_act; + struct sigacts *ps = p->p_sigacts; + sigset_t mask; + register sig_t action; + unsigned long state_count; + unsigned int state_flavor; + struct uthread * ut; + int vec_used = 0; + void *tsptr, *fptr, *vptr, *mactx; + void ppc_checkthreadstate(void *, int); + + th_act = current_act(); + /* lets use the larger one */ + mactx = (void *)&mctx64; + + ut = (struct uthread *)get_bsdthread_info(th_act); + if (error = copyin(uap->uctx, &uctx, sizeof(struct ucontext))) { + return(error); + } + if (error = copyin(uctx.uc_mcontext, mactx, uctx.uc_mcsize)) { + return(error); + } + + if (uctx.uc_onstack & 01) + p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; + else + p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; + + ut->uu_sigmask = uctx.uc_sigmask & ~sigcantmask; + if (ut->uu_siglist & ~ut->uu_sigmask) + signal_setast(current_act()); + + vec_used = 0; + switch (uctx.uc_mcsize) { + case UC_FLAVOR64_VEC_SIZE : + vec_used = 1; + case UC_FLAVOR64_SIZE : { + p_64mctx = (struct mcontext64 *)mactx; + tsptr = (void *)&p_64mctx->ss; + fptr = (void *)&p_64mctx->fs; + vptr = (void *)&p_64mctx->vs; + state_flavor = PPC_THREAD_STATE64; + state_count = PPC_THREAD_STATE64_COUNT; + } + break; + case UC_FLAVOR_VEC_SIZE : + vec_used = 1; + case UC_FLAVOR_SIZE: + default: { + p_mctx = (struct mcontext *)mactx; + tsptr = (void *)&p_mctx->ss; + fptr = (void *)&p_mctx->fs; + vptr = (void *)&p_mctx->vs; + state_flavor = PPC_THREAD_STATE; + state_count = PPC_THREAD_STATE_COUNT; + } + break; + } /* switch () */ + + /* validate the thread state, set/reset appropriate mode bits in srr1 */ + (void)ppc_checkthreadstate(tsptr, state_flavor); + + if (thread_setstatus(th_act, state_flavor, tsptr, &state_count) != KERN_SUCCESS) { + return(EINVAL); + } + + state_count = PPC_FLOAT_STATE_COUNT; + if (thread_setstatus(th_act, PPC_FLOAT_STATE, fptr, &state_count) != KERN_SUCCESS) { + return(EINVAL); + } + + mask = sigmask(SIGFPE); + if (((ut->uu_sigmask & mask) == 0) && (p->p_sigcatch & mask) && ((p->p_sigignore & mask) == 0)) { + action = ps->ps_sigact[SIGFPE]; + if((action != SIG_DFL) && (action != SIG_IGN)) { + thread_enable_fpe(th_act, 1); + } + } + + if (vec_used) { + state_count = PPC_VECTOR_STATE_COUNT; + if (thread_setstatus(th_act, PPC_VECTOR_STATE, vptr, &state_count) != KERN_SUCCESS) { + return(EINVAL); + } + } + return (EJUSTRETURN); +} + +#endif /* FOR64_TRANSITION */ + struct sigreturn_args { struct ucontext *uctx; + int infostyle; }; /* ARGSUSED */ @@ -291,19 +547,23 @@ sigreturn(p, uap, retval) struct sigreturn_args *uap; int *retval; { - struct ucontext uctx, *p_uctx; - struct mcontext mctx, *p_mctx; + struct ucontext uctx; + struct ucontext *p_uctx; + char mactx[sizeof(struct mcontext64)]; + struct mcontext *p_mctx; + struct mcontext64 *p_64mctx; int error; thread_act_t th_act; - struct ppc_float_state fs; - struct ppc_exception_state es; struct sigacts *ps = p->p_sigacts; sigset_t mask; register sig_t action; unsigned long state_count; - unsigned int nbits, rbits; + unsigned int state_flavor; struct uthread * ut; int vec_used = 0; + void *tsptr, *fptr, *vptr; + int infostyle = uap->infostyle; + void ppc_checkthreadstate(void *, int); th_act = current_act(); @@ -311,7 +571,9 @@ sigreturn(p, uap, retval) if (error = copyin(uap->uctx, &uctx, sizeof(struct ucontext))) { return(error); } - if (error = copyin(uctx.uc_mcontext, &mctx, sizeof(struct mcontext))) { + + + if (error = copyin(uctx.uc_mcontext, mactx, uctx.uc_mcsize)) { return(error); } @@ -319,32 +581,51 @@ sigreturn(p, uap, retval) p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; else p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; - ut->uu_sigmask = uctx.uc_sigmask & ~sigcantmask; - + ut->uu_sigmask = uctx.uc_sigmask & ~sigcantmask; if (ut->uu_siglist & ~ut->uu_sigmask) signal_setast(current_act()); - nbits = get_msr_nbits(); - rbits = get_msr_rbits(); - /* adjust the critical fields */ - /* make sure naughty bits are off */ - mctx.ss.srr1 &= ~(nbits); - /* make sure necessary bits are on */ - mctx.ss.srr1 |= (rbits); + vec_used = 0; + switch (infostyle) { + case UC_FLAVOR64_VEC: + case UC_TRAD64_VEC: + vec_used = 1; + case UC_TRAD64: + case UC_FLAVOR64: { + p_64mctx = (struct mcontext64 *)mactx; + tsptr = (void *)&p_64mctx->ss; + fptr = (void *)&p_64mctx->fs; + vptr = (void *)&p_64mctx->vs; + state_flavor = PPC_THREAD_STATE64; + state_count = PPC_THREAD_STATE64_COUNT; + } + break; + case UC_FLAVOR_VEC : + case UC_TRAD_VEC : + vec_used = 1; + case UC_FLAVOR : + case UC_TRAD : + default: { + p_mctx = (struct mcontext *)mactx; + tsptr = (void *)&p_mctx->ss; + fptr = (void *)&p_mctx->fs; + vptr = (void *)&p_mctx->vs; + state_flavor = PPC_THREAD_STATE; + state_count = PPC_THREAD_STATE_COUNT; + } + break; + } /* switch () */ - state_count = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)); + /* validate the thread state, set/reset appropriate mode bits in srr1 */ + (void)ppc_checkthreadstate(tsptr, state_flavor); - if (uctx.uc_mcsize > state_count) - vec_used = 1; - - state_count = PPC_THREAD_STATE_COUNT; - if (act_machine_set_state(th_act, PPC_THREAD_STATE, &mctx.ss, &state_count) != KERN_SUCCESS) { + if (thread_setstatus(th_act, state_flavor, tsptr, &state_count) != KERN_SUCCESS) { return(EINVAL); } state_count = PPC_FLOAT_STATE_COUNT; - if (act_machine_set_state(th_act, PPC_FLOAT_STATE, &mctx.fs, &state_count) != KERN_SUCCESS) { + if (thread_setstatus(th_act, PPC_FLOAT_STATE, fptr, &state_count) != KERN_SUCCESS) { return(EINVAL); } @@ -358,11 +639,10 @@ sigreturn(p, uap, retval) if (vec_used) { state_count = PPC_VECTOR_STATE_COUNT; - if (act_machine_set_state(th_act, PPC_VECTOR_STATE, &mctx.vs, &state_count) != KERN_SUCCESS) { + if (thread_setstatus(th_act, PPC_VECTOR_STATE, vptr, &state_count) != KERN_SUCCESS) { return(EINVAL); } } - return (EJUSTRETURN); } diff --git a/bsd/dev/ppc/unix_startup.c b/bsd/dev/ppc/unix_startup.c index 05b16e597..4e92e7bab 100644 --- a/bsd/dev/ppc/unix_startup.c +++ b/bsd/dev/ppc/unix_startup.c @@ -68,7 +68,7 @@ bsd_startupearly() kern_return_t ret; if (nbuf == 0) - nbuf = atop(mem_size / 100); /* 1% */ + nbuf = atop_64(sane_size / 100); /* Get 1% of ram, but no more than we can map */ if (nbuf > 8192) nbuf = 8192; if (nbuf < 256) @@ -82,7 +82,7 @@ bsd_startupearly() niobuf = 128; size = (nbuf + niobuf) * sizeof (struct buf); - size = round_page(size); + size = round_page_32(size); ret = kmem_suballoc(kernel_map, &firstaddr, @@ -106,13 +106,13 @@ bsd_startupearly() buf = (struct buf * )firstaddr; bzero(buf,size); - if ((mem_size > (64 * 1024 * 1024)) || ncl) { + if ((sane_size > (64 * 1024 * 1024)) || ncl) { int scale; extern u_long tcp_sendspace; extern u_long tcp_recvspace; if ((nmbclusters = ncl) == 0) { - if ((nmbclusters = ((mem_size / 16) / MCLBYTES)) > 16384) + if ((nmbclusters = ((sane_size / 16) / MCLBYTES)) > 16384) nmbclusters = 16384; } if ((scale = nmbclusters / NMBCLUSTERS) > 1) { @@ -137,7 +137,7 @@ bsd_bufferinit() bsd_startupearly(); ret = kmem_suballoc(kernel_map, - &mbutl, + (vm_offset_t *) &mbutl, (vm_size_t) (nmbclusters * MCLBYTES), FALSE, TRUE, diff --git a/bsd/dev/random/YarrowCoreLib/src/prng.c b/bsd/dev/random/YarrowCoreLib/src/prng.c index aaf58cbaa..e91b22150 100644 --- a/bsd/dev/random/YarrowCoreLib/src/prng.c +++ b/bsd/dev/random/YarrowCoreLib/src/prng.c @@ -343,8 +343,8 @@ prngForceReseed(PRNG *p, LONGLONG ticks) #if defined(macintosh) || defined(__APPLE__) #if (defined(TARGET_API_MAC_OSX) || defined(KERNEL_BUILD)) struct timeval tv; - int32_t endTime; - #else TARGET_API_MAC_CARBON + int64_t endTime, curTime; + #else /* TARGET_API_MAC_CARBON */ UnsignedWide uwide; /* struct needed for Microseconds() */ LONGLONG start; LONGLONG now; @@ -360,15 +360,11 @@ prngForceReseed(PRNG *p, LONGLONG ticks) #if (defined(TARGET_API_MAC_OSX) || defined(KERNEL_BUILD)) /* note we can't loop for more than a million microseconds */ #ifdef KERNEL_BUILD - microtime (&tv); + microuptime (&tv); #else gettimeofday(&tv, NULL); #endif - endTime = tv.tv_usec + ticks; - if(endTime > 1000000) { - /* handle rollover now */ - endTime -= 1000000; - } + endTime = (int64_t)tv.tv_sec*1000000LL + (int64_t)tv.tv_usec + ticks; #else /* TARGET_API_MAC_OSX */ Microseconds(&uwide); start = UnsignedWideToUInt64(uwide); @@ -393,9 +389,10 @@ prngForceReseed(PRNG *p, LONGLONG ticks) #ifdef TARGET_API_MAC_OSX gettimeofday(&tv, NULL); #else - microtime (&tv); + microuptime (&tv); + curTime = (int64_t)tv.tv_sec*1000000LL + (int64_t)tv.tv_usec; #endif - } while(tv.tv_usec < endTime); + } while(curTime < endTime); #else Microseconds(&uwide); now = UnsignedWideToUInt64(uwide); diff --git a/bsd/dev/random/randomdev.c b/bsd/dev/random/randomdev.c index 96f997e24..63d66a6ea 100644 --- a/bsd/dev/random/randomdev.c +++ b/bsd/dev/random/randomdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999, 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -40,6 +40,8 @@ #define RANDOM_MAJOR -1 /* let the kernel pick the device number */ +d_ioctl_t random_ioctl; + /* * A struct describing which functions will get invoked for certain * actions. @@ -50,7 +52,7 @@ static struct cdevsw random_cdevsw = random_close, /* close */ random_read, /* read */ random_write, /* write */ - eno_ioctl, /* ioctl */ + random_ioctl, /* ioctl */ nulldev, /* stop */ nulldev, /* reset */ NULL, /* tty's */ @@ -142,14 +144,33 @@ random_init() } devfs_make_node(makedev (ret, 0), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0644, "random", 0); + UID_ROOT, GID_WHEEL, 0666, "random", 0); /* * also make urandom * (which is exactly the same thing in our context) */ devfs_make_node(makedev (ret, 1), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0644, "urandom", 0); + UID_ROOT, GID_WHEEL, 0666, "urandom", 0); +} + +int +random_ioctl(dev, cmd, data, flag, p) + dev_t dev; + u_long cmd; + caddr_t data; + int flag; + struct proc *p; +{ + switch (cmd) { + case FIONBIO: + case FIOASYNC: + break; + default: + return ENODEV; + } + + return (0); } /* @@ -172,8 +193,10 @@ random_open(dev_t dev, int flags, int devtype, struct proc *p) if (flags & FWRITE) { if (securelevel >= 2) return (EPERM); +#ifndef __APPLE__ if ((securelevel >= 1) && suser(p->p_ucred, &p->p_acflag)) return (EPERM); +#endif /* !__APPLE__ */ } return (0); diff --git a/bsd/dev/vn/shadow.c b/bsd/dev/vn/shadow.c index 20b78be74..502c42770 100644 --- a/bsd/dev/vn/shadow.c +++ b/bsd/dev/vn/shadow.c @@ -61,11 +61,11 @@ #include #define my_malloc(a) malloc(a) #define my_free(a) free(a) -#else TEST_SHADOW +#else /* !TEST_SHADOW */ #include #define my_malloc(a) _MALLOC(a, M_TEMP, M_WAITOK) #define my_free(a) FREE(a, M_TEMP) -#endif TEST_SHADOW +#endif /* TEST_SHADOW */ #include "shadow.h" diff --git a/bsd/dev/vn/vn.c b/bsd/dev/vn/vn.c index 0e29677cf..19f246616 100644 --- a/bsd/dev/vn/vn.c +++ b/bsd/dev/vn/vn.c @@ -73,11 +73,10 @@ #include #include #include -#include #include #include #include -#include +#include #include #include @@ -91,6 +90,17 @@ #include +extern void +vfs_io_maxsegsize(struct vnode *vp, + int flags, /* B_READ or B_WRITE */ + int *maxsegsize); + +extern void +vfs_io_attributes(struct vnode *vp, + int flags, /* B_READ or B_WRITE */ + int *iosize, + int *vectors); + #include "shadow.h" static ioctl_fcn_t vnioctl_chr; @@ -388,7 +398,7 @@ shadow_write(struct vn_softc * vn, struct buf * bp, char * base, VOP_TRUNCATE(vn->sc_shadow_vp, size, IO_SYNC, vn->sc_cred, p); VOP_UNLOCK(vn->sc_shadow_vp, 0, p); -#endif 0 +#endif } error = file_io(vn->sc_shadow_vp, vn->sc_cred, UIO_WRITE, base + start, @@ -494,8 +504,10 @@ vnstrategy(struct buf *bp) * simply read or write less. */ if (bp->b_blkno >= vn->sc_size) { - bp->b_error = EINVAL; - bp->b_flags |= B_ERROR | B_INVAL; + if (bp->b_blkno > vn->sc_size) { + bp->b_error = EINVAL; + bp->b_flags |= B_ERROR | B_INVAL; + } biodone(bp); return; } @@ -531,8 +543,10 @@ vnioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p, struct vn_ioctl *vio; int error; u_long *f; + int num = 0; u_int64_t * o; int unit; + int size = 0; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { @@ -548,10 +562,15 @@ vnioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p, o = (u_int64_t *)data; switch (cmd) { case VNIOCDETACH: + case DKIOCGETBLOCKSIZE: + case DKIOCSETBLOCKSIZE: case DKIOCGETMAXBLOCKCOUNTREAD: case DKIOCGETMAXBLOCKCOUNTWRITE: case DKIOCGETMAXSEGMENTCOUNTREAD: case DKIOCGETMAXSEGMENTCOUNTWRITE: + case DKIOCGETMAXSEGMENTBYTECOUNTREAD: + case DKIOCGETMAXSEGMENTBYTECOUNTWRITE: + case DKIOCGETBLOCKCOUNT: case DKIOCGETBLOCKCOUNT32: if ((vn->sc_flags & VNF_INITED) == 0) { return (ENXIO); @@ -562,16 +581,36 @@ vnioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p, } switch (cmd) { case DKIOCGETMAXBLOCKCOUNTREAD: - *o = vn->sc_vp->v_mount->mnt_maxreadcnt / vn->sc_secsize; + vfs_io_attributes(vn->sc_vp, B_READ, &size, &num); + *o = size / vn->sc_secsize; break; case DKIOCGETMAXBLOCKCOUNTWRITE: - *o = vn->sc_vp->v_mount->mnt_maxwritecnt / vn->sc_secsize; + vfs_io_attributes(vn->sc_vp, B_WRITE, &size, &num); + *o = size / vn->sc_secsize; + break; + case DKIOCGETMAXBYTECOUNTREAD: + vfs_io_attributes(vn->sc_vp, B_READ, &size, &num); + *o = size; + break; + case DKIOCGETMAXBYTECOUNTWRITE: + vfs_io_attributes(vn->sc_vp, B_WRITE, &size, &num); + *o = size; break; case DKIOCGETMAXSEGMENTCOUNTREAD: - *o = vn->sc_vp->v_mount->mnt_segreadcnt; + vfs_io_attributes(vn->sc_vp, B_READ, &size, &num); + *o = num; break; case DKIOCGETMAXSEGMENTCOUNTWRITE: - *o = vn->sc_vp->v_mount->mnt_segwritecnt; + vfs_io_attributes(vn->sc_vp, B_WRITE, &size, &num); + *o = num; + break; + case DKIOCGETMAXSEGMENTBYTECOUNTREAD: + vfs_io_maxsegsize(vn->sc_vp, B_READ, &size); + *o = size; + break; + case DKIOCGETMAXSEGMENTBYTECOUNTWRITE: + vfs_io_maxsegsize(vn->sc_vp, B_WRITE, &size); + *o = size; break; case DKIOCGETBLOCKSIZE: *f = vn->sc_secsize; @@ -598,7 +637,7 @@ vnioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p, case DKIOCGETBLOCKCOUNT32: *f = vn->sc_size; break; - case DKIOCGETBLOCKCOUNT64: + case DKIOCGETBLOCKCOUNT: *o = vn->sc_size; break; case VNIOCSHADOW: @@ -757,7 +796,7 @@ vniocattach_file(struct vn_softc *vn, vn->sc_size = (quad_t)vio->vn_size * PAGE_SIZE / vn->sc_secsize; else vn->sc_size = vattr.va_size / vn->sc_secsize; -#endif 0 +#endif vn->sc_secsize = DEV_BSIZE; vn->sc_fsize = vattr.va_size; vn->sc_size = vattr.va_size / vn->sc_secsize; @@ -980,4 +1019,4 @@ vndevice_init() printf("vninit: devfs_make_node failed!\n"); } } -#endif NVNDEVICE +#endif /* NVNDEVICE */ diff --git a/bsd/hfs/hfs.h b/bsd/hfs/hfs.h index 1cfb21d13..721668939 100644 --- a/bsd/hfs/hfs.h +++ b/bsd/hfs/hfs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -26,6 +26,8 @@ #ifndef __HFS__ #define __HFS__ +#define HFS_SPARSE_DEV 1 + #include #ifdef KERNEL @@ -38,6 +40,7 @@ #include #include #include +#include #include @@ -46,6 +49,7 @@ #include #include #include +#include struct uio; // This is more effective than #include in case KERNEL is undefined... @@ -60,6 +64,11 @@ struct hfslockf; /* For advisory locking */ #define HFS_MAX_DEFERED_ALLOC (1024*1024) +// 32 gigs is a "big" file (i.e. one that when deleted +// would touch enough data that we should break it into +// multiple separate transactions +#define HFS_BIGFILE_SIZE (32LL * 1024LL * 1024LL * 1024LL) + enum { kMDBSize = 512 }; /* Size of I/O transfer to read entire MDB */ @@ -104,7 +113,24 @@ extern struct timezone gTimeZone; * superuser may continue to allocate blocks. */ #define HFS_MINFREE 1 -#define HFS_MAXRESERVE (u_int64_t)(250*1024*1024) +#define HFS_MAXRESERVE ((u_int64_t)(250*1024*1024)) + +/* + * The system distinguishes between the desirable low-disk + * notifiaction levels for root volumes and non-root volumes. + * The various thresholds are computed as a fraction of the + * volume size, all capped at a certain fixed level + */ + +#define HFS_ROOTLOWDISKTRIGGERFRACTION 5 +#define HFS_ROOTLOWDISKTRIGGERLEVEL ((u_int64_t)(250*1024*1024)) +#define HFS_ROOTLOWDISKSHUTOFFFRACTION 6 +#define HFS_ROOTLOWDISKSHUTOFFLEVEL ((u_int64_t)(375*1024*1024)) + +#define HFS_LOWDISKTRIGGERFRACTION 1 +#define HFS_LOWDISKTRIGGERLEVEL ((u_int64_t)(50*1024*1024)) +#define HFS_LOWDISKSHUTOFFFRACTION 2 +#define HFS_LOWDISKSHUTOFFLEVEL ((u_int64_t)(75*1024*1024)) /* Internal Data structures*/ @@ -183,10 +209,7 @@ typedef struct vfsVCB { /* This structure describes the HFS specific mount structure data. */ typedef struct hfsmount { - u_int8_t hfs_fs_ronly; /* Whether this was mounted as read-initially */ - u_int8_t hfs_unknownpermissions; /* Whether this was mounted with MNT_UNKNOWNPERMISSIONS */ - u_int8_t hfs_media_writeable; - u_int8_t hfs_orphans_cleaned; + u_int32_t hfs_flags; /* see below */ /* Physical Description */ u_long hfs_phys_block_count; /* Num of PHYSICAL blocks of volume */ @@ -206,9 +229,6 @@ typedef struct hfsmount { mode_t hfs_file_mask; /* mask to and with file protection bits */ u_long hfs_encoding; /* Defualt encoding for non hfs+ volumes */ - /* simple lock for shared meta renaming */ - simple_lock_data_t hfs_renamelock; - /* HFS Specific */ struct vfsVCB hfs_vcb; struct cat_desc hfs_privdir_desc; @@ -217,19 +237,66 @@ typedef struct hfsmount { hfs_to_unicode_func_t hfs_get_unicode; unicode_to_hfs_func_t hfs_get_hfsname; + /* Quota variables: */ struct quotafile hfs_qfiles[MAXQUOTAS]; /* quota files */ - // XXXdbg + /* Journaling variables: */ void *jnl; // the journal for this volume (if one exists) struct vnode *jvp; // device where the journal lives (may be equal to devvp) u_int32_t jnl_start; // start block of the journal file (so we don't delete it) + u_int32_t jnl_size; u_int32_t hfs_jnlfileid; u_int32_t hfs_jnlinfoblkid; - volatile int readers; + volatile int readers; volatile int blocker; + + /* Notification variables: */ + unsigned long hfs_notification_conditions; + u_int32_t hfs_freespace_notify_warninglimit; + u_int32_t hfs_freespace_notify_desiredlevel; + + /* Metadata allocation zone variables: */ + u_int32_t hfs_metazone_start; + u_int32_t hfs_metazone_end; + u_int32_t hfs_hotfile_start; + u_int32_t hfs_hotfile_end; + int hfs_hotfile_freeblks; + int hfs_hotfile_maxblks; + int hfs_overflow_maxblks; + int hfs_catalog_maxblks; + + /* Hot File Clustering variables: */ + enum hfc_stage hfc_stage; /* what are we up to... */ + time_t hfc_timebase; /* recording period start time */ + time_t hfc_timeout; /* recording period stop time */ + void * hfc_recdata; /* recording data (opaque) */ + int hfc_maxfiles; /* maximum files to track */ + struct vnode * hfc_filevp; + +#ifdef HFS_SPARSE_DEV + /* Sparse device variables: */ + struct vnode * hfs_backingfs_rootvp; + int hfs_sparsebandblks; +#endif } hfsmount_t; -#define hfs_private_metadata_dir hfs_privdir_desc.cd_cnid + +/* HFS mount point flags */ +#define HFS_READ_ONLY 0x001 +#define HFS_UNKNOWN_PERMS 0x002 +#define HFS_WRITEABLE_MEDIA 0x004 +#define HFS_CLEANED_ORPHANS 0x008 +#define HFS_X 0x010 +#define HFS_CASE_SENSITIVE 0x020 +#define HFS_STANDARD 0x040 +#define HFS_METADATA_ZONE 0x080 +#define HFS_FRAGMENTED_FREESPACE 0x100 +#define HFS_NEED_JNL_RESET 0x200 + +#ifdef HFS_SPARSE_DEV +#define HFS_HAS_SPARSE_DEVICE 0x400 +#endif + #define hfs_global_shared_lock_acquire(hfsmp) \ do { \ @@ -276,16 +343,6 @@ typedef struct filefork FCB; #define MAKE_INODE_NAME(name,linkno) \ (void) sprintf((name), "%s%d", HFS_INODE_PREFIX, (linkno)) -/* - * Write check macro - */ -#define WRITE_CK(VNODE, FUNC_NAME) { \ - if ((VNODE)->v_mount->mnt_flag & MNT_RDONLY) { \ - DBG_ERR(("%s: ATTEMPT TO WRITE A READONLY VOLUME\n", \ - FUNC_NAME)); \ - return(EROFS); \ - } \ -} /* structure to hold a "." or ".." directory entry (12 bytes) */ typedef struct hfsdotentry { @@ -304,55 +361,6 @@ typedef struct hfsdotentry { ((sizeof(struct dirent) - (NAME_MAX+1)) + (((namlen)+1 + 3) &~ 3)) -enum { - kCatalogFolderNode = 1, - kCatalogFileNode = 2 -}; - -/* - * CatalogNodeData has same layout as the on-disk HFS Plus file/dir records. - * Classic hfs file/dir records are converted to match this layout. - * - * The cnd_extra padding allows big hfs plus thread records (520 bytes max) - * to be read onto this stucture during a cnid lookup. - * - */ -struct CatalogNodeData { - int16_t cnd_type; - u_int16_t cnd_flags; - u_int32_t cnd_valence; /* dirs only */ - u_int32_t cnd_nodeID; - u_int32_t cnd_createDate; - u_int32_t cnd_contentModDate; - u_int32_t cnd_attributeModDate; - u_int32_t cnd_accessDate; - u_int32_t cnd_backupDate; - u_int32_t cnd_ownerID; - u_int32_t cnd_groupID; - u_int8_t cnd_adminFlags; /* super-user changeable flags */ - u_int8_t cnd_ownerFlags; /* owner changeable flags */ - u_int16_t cnd_mode; /* file type + permission bits */ - union { - u_int32_t cndu_iNodeNum; /* indirect links only */ - u_int32_t cndu_linkCount; /* indirect nodes only */ - u_int32_t cndu_rawDevice; /* special files (FBLK and FCHR) only */ - } cnd_un; - u_int8_t cnd_finderInfo[32]; - u_int32_t cnd_textEncoding; - u_int32_t cnd_reserved; - HFSPlusForkData cnd_datafork; - HFSPlusForkData cnd_rsrcfork; - u_int32_t cnd_iNodeNumCopy; - u_int32_t cnd_linkCNID; /* for hard links only */ - u_int8_t cnd_extra[264]; /* make struct at least 520 bytes long */ -}; -typedef struct CatalogNodeData CatalogNodeData; - -#define cnd_iNodeNum cnd_un.cndu_iNodeNum -#define cnd_linkCount cnd_un.cndu_linkCount -#define cnd_rawDevice cnd_un.cndu_rawDevice - - enum { kHFSPlusMaxFileNameBytes = kHFSPlusMaxFileNameChars * 3 }; @@ -388,6 +396,9 @@ enum { kdirentMaxNameBytes = NAME_MAX }; #define FCBTOVCB(FCB) (&(((struct hfsmount *)((FCB)->ff_cp->c_vp->v_mount->mnt_data))->hfs_vcb.vcb_vcb)) +#define HFS_KNOTE(vp, hint) KNOTE(&VTOC(vp)->c_knotes, (hint)) + + #define E_NONE 0 #define kHFSBlockSize 512 @@ -411,11 +422,10 @@ enum { kdirentMaxNameBytes = NAME_MAX }; u_int32_t to_bsd_time(u_int32_t hfs_time); u_int32_t to_hfs_time(u_int32_t bsd_time); -int hfs_flushfiles(struct mount *mp, int flags, struct proc *p); int hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush); #define HFS_ALTFLUSH 1 -short hfsUnmount(struct hfsmount *hfsmp, struct proc *p); +extern int hfsUnmount(struct hfsmount *hfsmp, struct proc *p); extern int hfs_getcnode(struct hfsmount *hfsmp, cnid_t cnid, struct cat_desc *descp, @@ -494,6 +504,10 @@ extern void replace_desc(struct cnode *cp, struct cat_desc *cdp); extern int hfs_namecmp(const char *, size_t, const char *, size_t); +extern int hfs_virtualmetafile(struct cnode *); + +void hfs_generate_volume_notifications(struct hfsmount *hfsmp); + #endif /* __APPLE_API_PRIVATE */ #endif /* KERNEL */ diff --git a/bsd/hfs/hfs_attrlist.c b/bsd/hfs/hfs_attrlist.c index b52bed88f..c73610c72 100644 --- a/bsd/hfs/hfs_attrlist.c +++ b/bsd/hfs/hfs_attrlist.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -45,9 +45,6 @@ -extern uid_t console_user; - - /* Routines that are shared by hfs_setattr: */ extern int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags); @@ -71,22 +68,22 @@ extern void hfs_relnamehint(struct cnode *dcp, int index); static void packvolcommonattr(struct attrblock *abp, struct hfsmount *hfsmp, - struct vnode *vp); + struct vnode *vp, struct proc *p); static void packvolattr(struct attrblock *abp, struct hfsmount *hfsmp, - struct vnode *vp); + struct vnode *vp, struct proc *p); static void packcommonattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp, struct cat_desc * cdp, - struct cat_attr * cap); + struct cat_attr * cap, struct proc *p); static void packfileattr(struct attrblock *abp, struct hfsmount *hfsmp, struct cat_attr *cattrp, struct cat_fork *datafork, - struct cat_fork *rsrcfork); + struct cat_fork *rsrcfork, struct proc *p); static void packdirattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp, struct cat_desc * descp, - struct cat_attr * cattrp); + struct cat_attr * cattrp, struct proc *p); static void unpackattrblk(struct attrblock *abp, struct vnode *vp); @@ -192,39 +189,34 @@ hfs_getattrlist(ap) (alist->commonattr & ATTR_CMN_OBJPERMANENTID) && (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)) { - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + cat_cookie_t cookie = {0}; + + if (hfsmp->hfs_flags & HFS_READ_ONLY) return (EROFS); if ((error = hfs_write_access(vp, ap->a_cred, ap->a_p, false)) != 0) return (error); - // XXXdbg - hfs_global_shared_lock_acquire(hfsmp); - if (hfsmp->jnl) { - if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { - hfs_global_shared_lock_release(hfsmp); - return error; - } - } + /* + * Reserve some space in the Catalog file. + */ + error = cat_preflight(hfsmp, CAT_CREATE, &cookie, ap->a_p); + if (error) + return (error); /* Lock catalog b-tree */ - error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, ap->a_p); + error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, + LK_EXCLUSIVE, ap->a_p); if (error) { - if (hfsmp->jnl) { - journal_end_transaction(hfsmp->jnl); - } - hfs_global_shared_lock_release(hfsmp); - return (error); + cat_postflight(hfsmp, &cookie, ap->a_p); + return (error); } error = cat_insertfilethread(hfsmp, &cp->c_desc); - /* Unlock catalog b-tree */ - (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p); + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, + ap->a_p); - if (hfsmp->jnl) { - journal_end_transaction(hfsmp->jnl); - } - hfs_global_shared_lock_release(hfsmp); + cat_postflight(hfsmp, &cookie, ap->a_p); if (error) return (error); @@ -291,7 +283,7 @@ hfs_getattrlist(ap) attrblk.ab_blocksize = attrblocksize; hfs_packattrblk(&attrblk, hfsmp, vp, &cp->c_desc, &cp->c_attr, - datafp, rsrcfp); + datafp, rsrcfp, ap->a_p); /* Don't copy out more data than was generated */ attrbufsize = MIN(attrbufsize, (u_int)varptr - (u_int)attrbufptr); @@ -346,7 +338,7 @@ hfs_setattrlist(ap) u_long saved_flags; int error = 0; - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + if (hfsmp->hfs_flags & HFS_READ_ONLY) return (EROFS); if ((alist->bitmapcount != ATTR_BIT_MAP_COUNT) || ((alist->commonattr & ~ATTR_CMN_SETMASK) != 0) || @@ -378,7 +370,7 @@ hfs_setattrlist(ap) if (hfsmp->jnl && cp->c_datafork) { struct HFSPlusExtentDescriptor *extd; - extd = &cp->c_datafork->ff_data.cf_extents[0]; + extd = &cp->c_datafork->ff_extents[0]; if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { return EPERM; } @@ -503,6 +495,10 @@ hfs_setattrlist(ap) struct cat_desc to_desc = {0}; struct cat_desc todir_desc = {0}; struct cat_desc new_desc = {0}; + cat_cookie_t cookie = {0}; + int catreserve = 0; + int catlocked = 0; + int started_tr = 0; todir_desc.cd_parentcnid = kRootParID; todir_desc.cd_cnid = kRootParID; @@ -517,38 +513,38 @@ hfs_setattrlist(ap) // XXXdbg hfs_global_shared_lock_acquire(hfsmp); if (hfsmp->jnl) { - if (journal_start_transaction(hfsmp->jnl) != 0) { - hfs_global_shared_lock_release(hfsmp); - error = EINVAL; - /* Restore the old name in the VCB */ - copystr(cp->c_desc.cd_nameptr, vcb->vcbVN, sizeof(vcb->vcbVN), NULL); - vcb->vcbFlags |= 0xFF00; - goto ErrorExit; - } + if ((error = journal_start_transaction(hfsmp->jnl) != 0)) { + goto rename_out; + } + started_tr = 1; } + /* + * Reserve some space in the Catalog file. + */ + error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p); + if (error) { + goto rename_out; + } + catreserve = 1; /* Lock catalog b-tree */ error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); if (error) { - if (hfsmp->jnl) { - journal_end_transaction(hfsmp->jnl); - } - hfs_global_shared_lock_release(hfsmp); - - /* Restore the old name in the VCB */ - copystr(cp->c_desc.cd_nameptr, vcb->vcbVN, sizeof(vcb->vcbVN), NULL); - vcb->vcbFlags |= 0xFF00; - goto ErrorExit; + goto rename_out; } + catlocked = 1; error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc); - - /* Unlock the Catalog */ - (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); - - if (hfsmp->jnl) { - journal_end_transaction(hfsmp->jnl); +rename_out: + if (catlocked) { + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + } + if (catreserve) { + cat_postflight(hfsmp, &cookie, p); + } + if (started_tr) { + journal_end_transaction(hfsmp->jnl); } hfs_global_shared_lock_release(hfsmp); @@ -565,7 +561,7 @@ hfs_setattrlist(ap) cp->c_desc.cd_nameptr = 0; cp->c_desc.cd_namelen = 0; cp->c_desc.cd_flags &= ~CD_HASBUF; - FREE(name, M_TEMP); + remove_name(name); } /* Update cnode's catalog descriptor */ replace_desc(cp, &new_desc); @@ -788,14 +784,12 @@ hfs_readdirattr(ap) cdescp = &cp->c_desc; cattrp = &cp->c_attr; if (cp->c_datafork) { - c_datafork.cf_size = cp->c_datafork->ff_data.cf_size; - c_datafork.cf_clump = cp->c_datafork->ff_data.cf_clump; - c_datafork.cf_blocks = cp->c_datafork->ff_data.cf_blocks; + c_datafork.cf_size = cp->c_datafork->ff_size; + c_datafork.cf_blocks = cp->c_datafork->ff_blocks; } if (cp->c_rsrcfork) { - c_rsrcfork.cf_size = cp->c_rsrcfork->ff_data.cf_size; - c_rsrcfork.cf_clump = cp->c_rsrcfork->ff_data.cf_clump; - c_rsrcfork.cf_blocks = cp->c_rsrcfork->ff_data.cf_blocks; + c_rsrcfork.cf_size = cp->c_rsrcfork->ff_size; + c_rsrcfork.cf_blocks = cp->c_rsrcfork->ff_blocks; } } } @@ -808,7 +802,7 @@ hfs_readdirattr(ap) /* Pack catalog entries into attribute buffer. */ hfs_packattrblk(&attrblk, hfsmp, vp, cdescp, cattrp, - &c_datafork, &c_rsrcfork); + &c_datafork, &c_rsrcfork, p); currattrbufsize = ((char *)varptr - (char *)attrbufptr); /* All done with cnode. */ @@ -910,25 +904,26 @@ hfs_packattrblk(struct attrblock *abp, struct cat_desc *descp, struct cat_attr *attrp, struct cat_fork *datafork, - struct cat_fork *rsrcfork) + struct cat_fork *rsrcfork, + struct proc *p) { struct attrlist *attrlistp = abp->ab_attrlist; if (attrlistp->volattr) { if (attrlistp->commonattr) - packvolcommonattr(abp, hfsmp, vp); + packvolcommonattr(abp, hfsmp, vp, p); if (attrlistp->volattr & ~ATTR_VOL_INFO) - packvolattr(abp, hfsmp, vp); + packvolattr(abp, hfsmp, vp, p); } else { if (attrlistp->commonattr) - packcommonattr(abp, hfsmp, vp, descp, attrp); + packcommonattr(abp, hfsmp, vp, descp, attrp, p); if (attrlistp->dirattr && S_ISDIR(attrp->ca_mode)) - packdirattr(abp, hfsmp, vp, descp,attrp); + packdirattr(abp, hfsmp, vp, descp,attrp, p); if (attrlistp->fileattr && !S_ISDIR(attrp->ca_mode)) - packfileattr(abp, hfsmp, attrp, datafork, rsrcfork); + packfileattr(abp, hfsmp, attrp, datafork, rsrcfork, p); } } @@ -966,7 +961,8 @@ packnameattr( struct attrblock *abp, struct vnode *vp, char *name, - int namelen) + int namelen, + struct proc *p) { void *varbufptr; struct attrreference * attr_refptr; @@ -1022,7 +1018,7 @@ packnameattr( * Pack common volume attributes. */ static void -packvolcommonattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp) +packvolcommonattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp, struct proc *p) { attrgroup_t attr; void *attrbufptr = *abp->ab_attrbufpp; @@ -1035,7 +1031,7 @@ packvolcommonattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *v attr = abp->ab_attrlist->commonattr; if (ATTR_CMN_NAME & attr) { - packnameattr(abp, vp, cp->c_desc.cd_nameptr, cp->c_desc.cd_namelen); + packnameattr(abp, vp, cp->c_desc.cd_nameptr, cp->c_desc.cd_namelen, p); attrbufptr = *abp->ab_attrbufpp; varbufptr = *abp->ab_varbufpp; } @@ -1107,7 +1103,7 @@ packvolcommonattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *v } if (ATTR_CMN_OWNERID & attr) { if (cp->c_uid == UNKNOWNUID) - *((uid_t *)attrbufptr)++ = console_user; + *((uid_t *)attrbufptr)++ = p->p_ucred->cr_uid; else *((uid_t *)attrbufptr)++ = cp->c_uid; } @@ -1154,7 +1150,7 @@ packvolcommonattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *v static void -packvolattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp) +packvolattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp, struct proc *p) { attrgroup_t attr; void *attrbufptr = *abp->ab_attrbufpp; @@ -1179,7 +1175,6 @@ packvolattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp) if (ATTR_VOL_SPACEFREE & attr) { *((off_t *)attrbufptr)++ = (off_t)hfs_freeblks(hfsmp, 0) * (off_t)vcb->blockSize; - } if (ATTR_VOL_SPACEAVAIL & attr) { *((off_t *)attrbufptr)++ = (off_t)hfs_freeblks(hfsmp, 1) * @@ -1263,31 +1258,70 @@ packvolattr(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp) vcapattrptr = (vol_capabilities_attr_t *)attrbufptr; if (vcb->vcbSigWord == kHFSPlusSigWord) { + u_int32_t journal_active; + u_int32_t case_sensitive; + + if (hfsmp->jnl) + journal_active = VOL_CAP_FMT_JOURNAL_ACTIVE; + else + journal_active = 0; + + if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) + case_sensitive = VOL_CAP_FMT_CASE_SENSITIVE; + else + case_sensitive = 0; + vcapattrptr->capabilities[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_SYMBOLICLINKS | - VOL_CAP_FMT_HARDLINKS; + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_JOURNAL | + journal_active | + case_sensitive | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS ; } else { /* Plain HFS */ vcapattrptr->capabilities[VOL_CAPABILITIES_FORMAT] = - VOL_CAP_FMT_PERSISTENTOBJECTIDS; + VOL_CAP_FMT_PERSISTENTOBJECTIDS | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS ; } vcapattrptr->capabilities[VOL_CAPABILITIES_INTERFACES] = VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT | - VOL_CAP_INT_READDIRATTR ; + VOL_CAP_INT_READDIRATTR | + VOL_CAP_INT_EXCHANGEDATA | + VOL_CAP_INT_ALLOCATE | + VOL_CAP_INT_VOL_RENAME | + VOL_CAP_INT_ADVLOCK | + VOL_CAP_INT_FLOCK ; vcapattrptr->capabilities[VOL_CAPABILITIES_RESERVED1] = 0; vcapattrptr->capabilities[VOL_CAPABILITIES_RESERVED2] = 0; vcapattrptr->valid[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_SYMBOLICLINKS | - VOL_CAP_FMT_HARDLINKS; + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_JOURNAL | + VOL_CAP_FMT_JOURNAL_ACTIVE | + VOL_CAP_FMT_NO_ROOT_TIMES | + VOL_CAP_FMT_SPARSE_FILES | + VOL_CAP_FMT_ZERO_RUNS | + VOL_CAP_FMT_CASE_SENSITIVE | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS ; vcapattrptr->valid[VOL_CAPABILITIES_INTERFACES] = VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT | - VOL_CAP_INT_READDIRATTR ; + VOL_CAP_INT_READDIRATTR | + VOL_CAP_INT_EXCHANGEDATA | + VOL_CAP_INT_COPYFILE | + VOL_CAP_INT_ALLOCATE | + VOL_CAP_INT_VOL_RENAME | + VOL_CAP_INT_ADVLOCK | + VOL_CAP_INT_FLOCK ; vcapattrptr->valid[VOL_CAPABILITIES_RESERVED1] = 0; vcapattrptr->valid[VOL_CAPABILITIES_RESERVED2] = 0; @@ -1322,7 +1356,8 @@ packcommonattr( struct hfsmount *hfsmp, struct vnode *vp, struct cat_desc * cdp, - struct cat_attr * cap) + struct cat_attr * cap, + struct proc *p) { attrgroup_t attr = abp->ab_attrlist->commonattr; struct mount *mp = HFSTOVFS(hfsmp); @@ -1331,7 +1366,7 @@ packcommonattr( u_long attrlength = 0; if (ATTR_CMN_NAME & attr) { - packnameattr(abp, vp, cdp->cd_nameptr, cdp->cd_namelen); + packnameattr(abp, vp, cdp->cd_nameptr, cdp->cd_namelen, p); attrbufptr = *abp->ab_attrbufpp; varbufptr = *abp->ab_varbufpp; } @@ -1409,7 +1444,7 @@ packcommonattr( } if (ATTR_CMN_OWNERID & attr) { *((uid_t *)attrbufptr)++ = - (cap->ca_uid == UNKNOWNUID) ? console_user : cap->ca_uid; + (cap->ca_uid == UNKNOWNUID) ? p->p_ucred->cr_uid : cap->ca_uid; } if (ATTR_CMN_GRPID & attr) { *((gid_t *)attrbufptr)++ = cap->ca_gid; @@ -1459,7 +1494,8 @@ packdirattr( struct hfsmount *hfsmp, struct vnode *vp, struct cat_desc * descp, - struct cat_attr * cattrp) + struct cat_attr * cattrp, + struct proc *p) { attrgroup_t attr = abp->ab_attrlist->dirattr; void *attrbufptr = *abp->ab_attrbufpp; @@ -1470,7 +1506,7 @@ packdirattr( u_long entries = cattrp->ca_entries; if (descp->cd_parentcnid == kRootParID) { - if (hfsmp->hfs_private_metadata_dir != 0) + if (hfsmp->hfs_privdir_desc.cd_cnid != 0) --entries; /* hide private dir */ if (hfsmp->jnl) entries -= 2; /* hide the journal files */ @@ -1493,7 +1529,8 @@ packfileattr( struct hfsmount *hfsmp, struct cat_attr *cattrp, struct cat_fork *datafork, - struct cat_fork *rsrcfork) + struct cat_fork *rsrcfork, + struct proc *p) { attrgroup_t attr = abp->ab_attrlist->fileattr; void *attrbufptr = *abp->ab_attrbufpp; @@ -1517,7 +1554,7 @@ packfileattr( *((u_long *)attrbufptr)++ = hfsmp->hfs_logBlockSize; } if (ATTR_FILE_CLUMPSIZE & attr) { - *((u_long *)attrbufptr)++ = datafork->cf_clump; /* XXX ambiguity */ + *((u_long *)attrbufptr)++ = HFSTOVCB(hfsmp)->vcbClpSiz; } if (ATTR_FILE_DEVTYPE & attr) { if (S_ISBLK(cattrp->ca_mode) || S_ISCHR(cattrp->ca_mode)) @@ -1870,7 +1907,7 @@ DerivePermissionSummary(uid_t obj_uid, gid_t obj_gid, mode_t obj_mode, int i; if (obj_uid == UNKNOWNUID) - obj_uid = console_user; + obj_uid = p->p_ucred->cr_uid; /* User id 0 (root) always gets access. */ if (cred->cr_uid == 0) { diff --git a/bsd/hfs/hfs_attrlist.h b/bsd/hfs/hfs_attrlist.h index c3ba90752..3e03a5190 100644 --- a/bsd/hfs/hfs_attrlist.h +++ b/bsd/hfs/hfs_attrlist.h @@ -64,7 +64,7 @@ extern unsigned long DerivePermissionSummary(uid_t obj_uid, gid_t obj_gid, extern void hfs_packattrblk(struct attrblock *abp, struct hfsmount *hfsmp, struct vnode *vp, struct cat_desc *descp, struct cat_attr *attrp, - struct cat_fork *datafork, struct cat_fork *rsrcfork); + struct cat_fork *datafork, struct cat_fork *rsrcfork, struct proc *p); #endif /* __APPLE_API_PRIVATE */ #endif /* KERNEL */ diff --git a/bsd/hfs/hfs_btreeio.c b/bsd/hfs/hfs_btreeio.c index 3f7c3cc0c..d8dd669f0 100644 --- a/bsd/hfs/hfs_btreeio.c +++ b/bsd/hfs/hfs_btreeio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -142,6 +142,27 @@ void ModifyBlockStart(FileReference vp, BlockDescPtr blockPtr) blockPtr->isModified = 1; } +static int +btree_journal_modify_block_end(struct hfsmount *hfsmp, struct buf *bp) +{ +#if BYTE_ORDER == LITTLE_ENDIAN + struct vnode *vp = bp->b_vp; + BlockDescriptor block; + + /* Prepare the block pointer */ + block.blockHeader = bp; + block.buffer = bp->b_data; + /* not found in cache ==> came from disk */ + block.blockReadFromDisk = (bp->b_flags & B_CACHE) == 0; + block.blockSize = bp->b_bcount; + + // XXXdbg have to swap the data before it goes in the journal + SWAP_BT_NODE (&block, ISHFSPLUS (VTOVCB(vp)), VTOC(vp)->c_fileid, 1); +#endif + + return journal_modify_block_end(hfsmp->jnl, bp); +} + __private_extern__ OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlockOptions options) @@ -171,7 +192,8 @@ OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlock if (blockPtr->isModified == 0) { panic("hfs: releaseblock: modified is 0 but forcewrite set! bp 0x%x\n", bp); } - retval = journal_modify_block_end(hfsmp->jnl, bp); + + retval = btree_journal_modify_block_end(hfsmp, bp); blockPtr->isModified = 0; } else { retval = VOP_BWRITE(bp); @@ -206,7 +228,7 @@ OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlock if (blockPtr->isModified == 0) { panic("hfs: releaseblock: modified is 0 but markdirty set! bp 0x%x\n", bp); } - retval = journal_modify_block_end(hfsmp->jnl, bp); + retval = btree_journal_modify_block_end(hfsmp, bp); blockPtr->isModified = 0; } else if (bdwrite_internal(bp, 1) != 0) { hfs_btsync(vp, 0); @@ -226,7 +248,7 @@ OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlock // // journal_modify_block_abort(hfsmp->jnl, bp); //panic("hfs: releaseblock called for 0x%x but mod_block_start previously called.\n", bp); - journal_modify_block_end(hfsmp->jnl, bp); + btree_journal_modify_block_end(hfsmp, bp); blockPtr->isModified = 0; } else { brelse(bp); /* note: B-tree code will clear blockPtr->blockHeader and blockPtr->buffer */ @@ -311,7 +333,9 @@ OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF) // is at least the node size then we break out of the loop and let // the error propagate back up. do { - retval = ExtendFileC(vcb, filePtr, bytesToAdd, 0, kEFContigMask, &actualBytesAdded); + retval = ExtendFileC(vcb, filePtr, bytesToAdd, 0, + kEFContigMask | kEFMetadataMask, + &actualBytesAdded); if (retval == dskFulErr && actualBytesAdded == 0) { if (bytesToAdd == btInfo.nodeSize || bytesToAdd < (minEOF - origSize)) { @@ -336,6 +360,7 @@ OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF) * there's plenty of room to grow. */ if ((retval == 0) && + ((VCBTOHFS(vcb)->hfs_flags & HFS_METADATA_ZONE) == 0) && (vcb->nextAllocation > startAllocation) && ((vcb->nextAllocation + fileblocks) < vcb->totalBlocks)) { vcb->nextAllocation += fileblocks; @@ -418,6 +443,11 @@ OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF) ) { MarkVCBDirty( vcb ); ret = hfs_flushvolumeheader(VCBTOHFS(vcb), MNT_WAIT, HFS_ALTFLUSH); + } else { + struct timeval tv = time; + + VTOC(vp)->c_flag |= C_CHANGE | C_UPDATE; + (void) VOP_UPDATE(vp, &tv, &tv, MNT_WAIT); } ret = ClearBTNodes(vp, btInfo.nodeSize, filePtr->fcbEOF - actualBytesAdded, actualBytesAdded); diff --git a/bsd/hfs/hfs_catalog.c b/bsd/hfs/hfs_catalog.c index 32bc234de..b9c7f6f79 100644 --- a/bsd/hfs/hfs_catalog.c +++ b/bsd/hfs/hfs_catalog.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -89,6 +89,8 @@ extern int unicode_to_hfs(ExtendedVCB *vcb, ByteCount srcLen, int resolvelink(struct hfsmount *hfsmp, u_long linkref, struct HFSPlusCatalogFile *recp); +static int resolvelinkid(struct hfsmount *hfsmp, u_long linkref, ino_t *ino); + static int getkey(struct hfsmount *hfsmp, cnid_t cnid, CatalogKey * key); static int buildkey(struct hfsmount *hfsmp, struct cat_desc *descp, @@ -118,8 +120,46 @@ static int isadir(const CatalogRecord *crp); static int buildthread(void *keyp, void *recp, int std_hfs, int directory); +__private_extern__ +int +cat_preflight(struct hfsmount *hfsmp, catops_t ops, cat_cookie_t *cookie, struct proc *p) +{ + FCB *fcb; + int result; + + fcb = GetFileControlBlock(HFSTOVCB(hfsmp)->catalogRefNum); + + /* Lock catalog b-tree */ + result = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (result) + return (result); + + result = BTReserveSpace(fcb, ops, (void*)cookie); + + /* Unlock catalog b-tree */ + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + + MacToVFSError(result); +} + +__private_extern__ +void +cat_postflight(struct hfsmount *hfsmp, cat_cookie_t *cookie, struct proc *p) +{ + FCB *fcb; + int error; + + fcb = GetFileControlBlock(HFSTOVCB(hfsmp)->catalogRefNum); + + error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); + (void) BTReleaseReserve(fcb, (void*)cookie); + if (error == 0) { + hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + } +} + - +__private_extern__ void cat_convertattr( struct hfsmount *hfsmp, @@ -145,11 +185,39 @@ cat_convertattr( promotefork(hfsmp, (HFSCatalogFile *)&recp->hfsFile, 0, datafp); promotefork(hfsmp, (HFSCatalogFile *)&recp->hfsFile, 1, rsrcfp); } else { - bcopy(&recp->hfsPlusFile.dataFork, datafp, sizeof(*datafp)); - bcopy(&recp->hfsPlusFile.resourceFork, rsrcfp, sizeof(*rsrcfp)); + /* Convert the data fork. */ + datafp->cf_size = recp->hfsPlusFile.dataFork.logicalSize; + datafp->cf_blocks = recp->hfsPlusFile.dataFork.totalBlocks; + if ((hfsmp->hfc_stage == HFC_RECORDING) && + (attrp->ca_atime >= hfsmp->hfc_timebase)) { + datafp->cf_bytesread = + recp->hfsPlusFile.dataFork.clumpSize * + HFSTOVCB(hfsmp)->blockSize; + } else { + datafp->cf_bytesread = 0; + } + datafp->cf_vblocks = 0; + bcopy(&recp->hfsPlusFile.dataFork.extents[0], + &datafp->cf_extents[0], sizeof(HFSPlusExtentRecord)); + + /* Convert the resource fork. */ + rsrcfp->cf_size = recp->hfsPlusFile.resourceFork.logicalSize; + rsrcfp->cf_blocks = recp->hfsPlusFile.resourceFork.totalBlocks; + if ((hfsmp->hfc_stage == HFC_RECORDING) && + (attrp->ca_atime >= hfsmp->hfc_timebase)) { + datafp->cf_bytesread = + recp->hfsPlusFile.resourceFork.clumpSize * + HFSTOVCB(hfsmp)->blockSize; + } else { + datafp->cf_bytesread = 0; + } + rsrcfp->cf_vblocks = 0; + bcopy(&recp->hfsPlusFile.resourceFork.extents[0], + &rsrcfp->cf_extents[0], sizeof(HFSPlusExtentRecord)); } } +__private_extern__ int cat_convertkey( struct hfsmount *hfsmp, @@ -181,6 +249,7 @@ cat_convertkey( /* * cat_releasedesc */ +__private_extern__ void cat_releasedesc(struct cat_desc *descp) { @@ -195,7 +264,7 @@ cat_releasedesc(struct cat_desc *descp) descp->cd_nameptr = NULL; descp->cd_namelen = 0; descp->cd_flags &= ~CD_HASBUF; - FREE(name, M_TEMP); + remove_name(name); } descp->cd_nameptr = NULL; descp->cd_namelen = 0; @@ -209,6 +278,7 @@ cat_releasedesc(struct cat_desc *descp) /* * cat_lookup - lookup a catalog node using a cnode decriptor */ +__private_extern__ int cat_lookup(struct hfsmount *hfsmp, struct cat_desc *descp, int wantrsrc, struct cat_desc *outdescp, struct cat_attr *attrp, @@ -243,6 +313,7 @@ exit: return (result); } +__private_extern__ int cat_insertfilethread(struct hfsmount *hfsmp, struct cat_desc *descp) { @@ -264,11 +335,6 @@ cat_insertfilethread(struct hfsmount *hfsmp, struct cat_desc *descp) if (result) goto exit; - // XXXdbg - preflight all btree operations to make sure there's enough space - result = BTCheckFreeSpace(fcb); - if (result) - goto exit; - BDINIT(file_data, &file_rec); result = BTSearchRecord(fcb, &iterator[0], &file_data, &datasize, &iterator[0]); if (result) @@ -306,6 +372,7 @@ exit: /* * cat_idlookup - lookup a catalog node using a cnode id */ +__private_extern__ int cat_idlookup(struct hfsmount *hfsmp, cnid_t cnid, struct cat_desc *outdescp, struct cat_attr *attrp, struct cat_fork *forkp) @@ -473,14 +540,41 @@ cat_lookupbykey(struct hfsmount *hfsmp, CatalogKey *keyp, u_long hint, int wantr } } if (forkp != NULL) { - if (isadir(recp)) + if (isadir(recp)) { bzero(forkp, sizeof(*forkp)); - else if (std_hfs) + } else if (std_hfs) { promotefork(hfsmp, (HFSCatalogFile *)&recp->hfsFile, wantrsrc, forkp); - else if (wantrsrc) - bcopy(&recp->hfsPlusFile.resourceFork, forkp, sizeof(*forkp)); - else - bcopy(&recp->hfsPlusFile.dataFork, forkp, sizeof(*forkp)); + } else if (wantrsrc) { + /* Convert the resource fork. */ + forkp->cf_size = recp->hfsPlusFile.resourceFork.logicalSize; + forkp->cf_blocks = recp->hfsPlusFile.resourceFork.totalBlocks; + if ((hfsmp->hfc_stage == HFC_RECORDING) && + (to_bsd_time(recp->hfsPlusFile.accessDate) >= hfsmp->hfc_timebase)) { + forkp->cf_bytesread = + recp->hfsPlusFile.resourceFork.clumpSize * + HFSTOVCB(hfsmp)->blockSize; + } else { + forkp->cf_bytesread = 0; + } + forkp->cf_vblocks = 0; + bcopy(&recp->hfsPlusFile.resourceFork.extents[0], + &forkp->cf_extents[0], sizeof(HFSPlusExtentRecord)); + } else { + /* Convert the data fork. */ + forkp->cf_size = recp->hfsPlusFile.dataFork.logicalSize; + forkp->cf_blocks = recp->hfsPlusFile.dataFork.totalBlocks; + if ((hfsmp->hfc_stage == HFC_RECORDING) && + (to_bsd_time(recp->hfsPlusFile.accessDate) >= hfsmp->hfc_timebase)) { + forkp->cf_bytesread = + recp->hfsPlusFile.dataFork.clumpSize * + HFSTOVCB(hfsmp)->blockSize; + } else { + forkp->cf_bytesread = 0; + } + forkp->cf_vblocks = 0; + bcopy(&recp->hfsPlusFile.dataFork.extents[0], + &forkp->cf_extents[0], sizeof(HFSPlusExtentRecord)); + } } if (descp != NULL) { HFSPlusCatalogKey * pluskey = NULL; @@ -508,6 +602,7 @@ exit: /* * cat_create - create a node in the catalog */ +__private_extern__ int cat_create(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp, struct cat_desc *out_descp) @@ -547,11 +642,6 @@ cat_create(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attr hfs_setencodingbits(hfsmp, encoding); } - // XXXdbg - preflight all btree operations to make sure there's enough space - result = BTCheckFreeSpace(fcb); - if (result) - goto exit; - /* * Insert the thread record first */ @@ -660,6 +750,7 @@ exit: * 4. BTDeleteRecord(from_thread); * 5. BTInsertRecord(to_thread); */ +__private_extern__ int cat_rename ( struct hfsmount * hfsmp, @@ -700,11 +791,6 @@ cat_rename ( if ((result = buildkey(hfsmp, to_cdp, (HFSPlusCatalogKey *)&to_iterator->key, 0))) goto exit; - // XXXdbg - preflight all btree operations to make sure there's enough space - result = BTCheckFreeSpace(fcb); - if (result) - goto exit; - to_key = (HFSPlusCatalogKey *)&to_iterator->key; MALLOC(recp, CatalogRecord *, sizeof(CatalogRecord), M_TEMP, M_WAITOK); BDINIT(btdata, recp); @@ -753,7 +839,7 @@ cat_rename ( if (result) goto exit; - /* Update the text encoding (on disk and in descriptor */ + /* Update the text encoding (on disk and in descriptor) */ if (!std_hfs) { encoding = hfs_pickencoding(to_key->nodeName.unicode, to_key->nodeName.length); @@ -871,6 +957,14 @@ cat_rename ( if (std_hfs) { MALLOC(pluskey, HFSPlusCatalogKey *, sizeof(HFSPlusCatalogKey), M_TEMP, M_WAITOK); promotekey(hfsmp, (HFSCatalogKey *)&to_iterator->key, pluskey, &encoding); + + /* Save the real encoding hint in the Finder Info (field 4). */ + if (directory && from_cdp->cd_cnid == kHFSRootFolderID) { + u_long realhint; + + realhint = hfs_pickencoding(pluskey->nodeName.unicode, pluskey->nodeName.length); + vcb->vcbFndrInfo[4] = SET_HFS_TEXT_ENCODING(realhint); + } } else pluskey = (HFSPlusCatalogKey *)&to_iterator->key; @@ -901,6 +995,7 @@ exit: * 2. BTDeleteRecord(thread); * 3. BTUpdateRecord(parent); */ +__private_extern__ int cat_delete(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp) { @@ -945,11 +1040,6 @@ cat_delete(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attr if (result) goto exit; - // XXXdbg - preflight all btree operations to make sure there's enough space - result = BTCheckFreeSpace(fcb); - if (result) - goto exit; - /* Delete record */ result = BTDeleteRecord(fcb, iterator); if (result) @@ -973,6 +1063,7 @@ exit: * cnode_update - update the catalog node described by descp * using the data from attrp and forkp. */ +__private_extern__ int cat_update(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *attrp, struct cat_fork *dataforkp, struct cat_fork *rsrcforkp) @@ -1217,6 +1308,9 @@ catrec_update(const CatalogKey *ckp, CatalogRecord *crp, u_int16_t reclen, file->resourceFork.totalBlocks = forkp->cf_blocks; bcopy(&forkp->cf_extents[0], &file->resourceFork.extents, sizeof(HFSPlusExtentRecord)); + /* Push blocks read to disk */ + file->resourceFork.clumpSize = + howmany(forkp->cf_bytesread, blksize); } if (state->s_datafork) { forkp = state->s_datafork; @@ -1224,6 +1318,9 @@ catrec_update(const CatalogKey *ckp, CatalogRecord *crp, u_int16_t reclen, file->dataFork.totalBlocks = forkp->cf_blocks; bcopy(&forkp->cf_extents[0], &file->dataFork.extents, sizeof(HFSPlusExtentRecord)); + /* Push blocks read to disk */ + file->resourceFork.clumpSize = + howmany(forkp->cf_bytesread, blksize); } if ((file->resourceFork.extents[0].startBlock != 0) && @@ -1295,7 +1392,7 @@ catrec_readattr(const CatalogKey *key, const CatalogRecord *rec, /* Hide the private meta data directory and journal files */ if (parentcnid == kRootDirID) { if ((rec->recordType == kHFSPlusFolderRecord) && - (rec->hfsPlusFolder.folderID == hfsmp->hfs_private_metadata_dir)) { + (rec->hfsPlusFolder.folderID == hfsmp->hfs_privdir_desc.cd_cnid)) { return (1); /* continue */ } if (hfsmp->jnl && @@ -1355,6 +1452,7 @@ catrec_readattr(const CatalogKey *key, const CatalogRecord *rec, /* * Note: index is zero relative */ +__private_extern__ int cat_getentriesattr(struct hfsmount *hfsmp, struct cat_desc *prevdesc, int index, struct cat_entrylist *ce_list) @@ -1463,6 +1561,10 @@ exit: return MacToVFSError(result); } +struct linkinfo { + u_long link_ref; + void * dirent_addr; +}; struct read_state { u_int32_t cbs_parentID; @@ -1472,20 +1574,40 @@ struct read_state { off_t cbs_lastoffset; struct uio * cbs_uio; ExtendedVCB * cbs_vcb; - int16_t cbs_hfsPlus; + int8_t cbs_hfsPlus; + int8_t cbs_case_sensitive; int16_t cbs_result; + int32_t cbs_numresults; + u_long *cbs_cookies; + int32_t cbs_ncookies; + int32_t cbs_nlinks; + int32_t cbs_maxlinks; + struct linkinfo *cbs_linkinfo; }; +/* Map file mode type to directory entry types */ +u_char modetodirtype[16] = { + DT_REG, DT_FIFO, DT_CHR, DT_UNKNOWN, + DT_DIR, DT_UNKNOWN, DT_BLK, DT_UNKNOWN, + DT_REG, DT_UNKNOWN, DT_LNK, DT_UNKNOWN, + DT_SOCK, DT_UNKNOWN, DT_WHT, DT_UNKNOWN +}; + +#define MODE_TO_DT(mode) (modetodirtype[((mode) & S_IFMT) >> 12]) static int catrec_read(const CatalogKey *ckp, const CatalogRecord *crp, u_int16_t recordLen, struct read_state *state) { + struct hfsmount *hfsmp; CatalogName *cnp; size_t utf8chars; u_int32_t curID; OSErr result; struct dirent catent; + time_t itime; + u_long ilinkref = 0; + void * uiobase; if (state->cbs_hfsPlus) curID = ckp->hfsPlus.parentID; @@ -1529,7 +1651,18 @@ lastitem: catent.d_fileno = crp->hfsPlusFolder.folderID; break; case kHFSPlusFileRecord: - catent.d_type = DT_REG; + itime = to_bsd_time(crp->hfsPlusFile.createDate); + hfsmp = VCBTOHFS(state->cbs_vcb); + /* + * When a hardlink link is encountered save its link ref. + */ + if ((SWAP_BE32(crp->hfsPlusFile.userInfo.fdType) == kHardLinkFileType) && + (SWAP_BE32(crp->hfsPlusFile.userInfo.fdCreator) == kHFSPlusCreator) && + ((itime == state->cbs_vcb->vcbCrDate) || + (itime == hfsmp->hfs_metadata_createdate))) { + ilinkref = crp->hfsPlusFile.bsdInfo.special.iNodeNum; + } + catent.d_type = MODE_TO_DT(crp->hfsPlusFile.bsdInfo.fileMode); catent.d_fileno = crp->hfsPlusFile.fileID; break; default: @@ -1575,37 +1708,76 @@ lastitem: /* hide our private meta data directory */ if (curID == kRootDirID && catent.d_fileno == state->cbs_hiddenDirID && - catent.d_type == DT_DIR) - goto lastitem; - + catent.d_type == DT_DIR) { + if (state->cbs_case_sensitive) { + // This is how we skip over these entries. The next + // time we fill in a real item the uio_offset will + // point to the correct place in the "virtual" directory + // so that PositionIterator() will do the right thing + // when scanning to get to a particular position in the + // directory. + state->cbs_uio->uio_offset += catent.d_reclen; + state->cbs_lastoffset = state->cbs_uio->uio_offset; + + return (1); /* skip and continue */ + } else + goto lastitem; + } + /* Hide the journal files */ if ((curID == kRootDirID) && (catent.d_type == DT_REG) && ((catent.d_fileno == state->cbs_hiddenJournalID) || (catent.d_fileno == state->cbs_hiddenInfoBlkID))) { + // see comment up above for why this is here + state->cbs_uio->uio_offset += catent.d_reclen; + state->cbs_lastoffset = state->cbs_uio->uio_offset; + return (1); /* skip and continue */ } state->cbs_lastoffset = state->cbs_uio->uio_offset; + uiobase = state->cbs_uio->uio_iov->iov_base; /* if this entry won't fit then we're done */ - if (catent.d_reclen > state->cbs_uio->uio_resid) + if (catent.d_reclen > state->cbs_uio->uio_resid || + (ilinkref != 0 && state->cbs_nlinks == state->cbs_maxlinks) || + (state->cbs_ncookies != 0 && state->cbs_numresults >= state->cbs_ncookies)) return (0); /* stop */ state->cbs_result = uiomove((caddr_t) &catent, catent.d_reclen, state->cbs_uio); + /* + * Record any hard links for post processing. + */ + if ((ilinkref != 0) && + (state->cbs_result == 0) && + (state->cbs_nlinks < state->cbs_maxlinks)) { + state->cbs_linkinfo[state->cbs_nlinks].dirent_addr = uiobase; + state->cbs_linkinfo[state->cbs_nlinks].link_ref = ilinkref; + state->cbs_nlinks++; + } + + if (state->cbs_cookies) { + state->cbs_cookies[state->cbs_numresults++] = state->cbs_uio->uio_offset; + } else { + state->cbs_numresults++; + } + /* continue iteration if there's room */ return (state->cbs_result == 0 && state->cbs_uio->uio_resid >= AVERAGE_HFSDIRENTRY_SIZE); } +#define SMALL_DIRENTRY_SIZE (sizeof(struct dirent) - (MAXNAMLEN + 1) + 8) /* * */ +__private_extern__ int -cat_getdirentries(struct hfsmount *hfsmp, struct cat_desc *descp, - struct uio *uio, int *eofflag) +cat_getdirentries(struct hfsmount *hfsmp, struct cat_desc *descp, int entrycnt, + struct uio *uio, int *eofflag, u_long *cookies, int ncookies) { ExtendedVCB *vcb = HFSTOVCB(hfsmp); BTreeIterator * iterator; @@ -1614,13 +1786,24 @@ cat_getdirentries(struct hfsmount *hfsmp, struct cat_desc *descp, u_int16_t op; struct read_state state; u_int32_t dirID = descp->cd_cnid; + void * buffer; + int bufsize; + int maxdirentries; int result; diroffset = uio->uio_offset; *eofflag = 0; + maxdirentries = MIN(entrycnt, uio->uio_resid / SMALL_DIRENTRY_SIZE); - MALLOC(iterator, BTreeIterator *, sizeof(*iterator), M_TEMP, M_WAITOK); - bzero(iterator, sizeof(*iterator)); + /* Get a buffer for collecting link info and for a btree iterator */ + bufsize = (maxdirentries * sizeof(struct linkinfo)) + sizeof(*iterator); + MALLOC(buffer, void *, bufsize, M_TEMP, M_WAITOK); + bzero(buffer, bufsize); + + state.cbs_nlinks = 0; + state.cbs_maxlinks = maxdirentries; + state.cbs_linkinfo = (struct linkinfo *) buffer; + iterator = (BTreeIterator *) ((char *)buffer + (maxdirentries * sizeof(struct linkinfo))); /* get an iterator and position it */ cip = GetCatalogIterator(vcb, dirID, diroffset); @@ -1634,7 +1817,7 @@ cat_getdirentries(struct hfsmount *hfsmp, struct cat_desc *descp, } else if ((result = MacToVFSError(result))) goto cleanup; - state.cbs_hiddenDirID = hfsmp->hfs_private_metadata_dir; + state.cbs_hiddenDirID = hfsmp->hfs_privdir_desc.cd_cnid; if (hfsmp->jnl) { state.cbs_hiddenJournalID = hfsmp->hfs_jnlfileid; state.cbs_hiddenInfoBlkID = hfsmp->hfs_jnlinfoblkid; @@ -1645,16 +1828,58 @@ cat_getdirentries(struct hfsmount *hfsmp, struct cat_desc *descp, state.cbs_uio = uio; state.cbs_result = 0; state.cbs_parentID = dirID; + if (diroffset <= 2*sizeof(struct hfsdotentry)) { + state.cbs_numresults = diroffset/sizeof(struct hfsdotentry); + } else { + state.cbs_numresults = 0; + } + state.cbs_cookies = cookies; + state.cbs_ncookies = ncookies; if (vcb->vcbSigWord == kHFSPlusSigWord) state.cbs_hfsPlus = 1; else state.cbs_hfsPlus = 0; + if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) + state.cbs_case_sensitive = 1; + else + state.cbs_case_sensitive = 0; + /* process as many entries as possible... */ result = BTIterateRecords(GetFileControlBlock(vcb->catalogRefNum), op, iterator, (IterateCallBackProcPtr)catrec_read, &state); + /* + * Post process any hard links to get the real file id. + */ + if (state.cbs_nlinks > 0) { + struct iovec aiov; + struct uio auio; + u_int32_t fileid; + int i; + u_int32_t tempid; + + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_segflg = uio->uio_segflg; + auio.uio_rw = UIO_READ; /* read kernel memory into user memory */ + auio.uio_procp = uio->uio_procp; + + for (i = 0; i < state.cbs_nlinks; ++i) { + fileid = 0; + + if (resolvelinkid(hfsmp, state.cbs_linkinfo[i].link_ref, &fileid) != 0) + continue; + + /* Update the file id in the user's buffer */ + aiov.iov_base = (char *) state.cbs_linkinfo[i].dirent_addr; + aiov.iov_len = sizeof(fileid); + auio.uio_offset = 0; + auio.uio_resid = aiov.iov_len; + (void) uiomove((caddr_t)&fileid, sizeof(fileid), &auio); + } + } if (state.cbs_result) result = state.cbs_result; else @@ -1679,12 +1904,70 @@ cleanup: } (void) ReleaseCatalogIterator(cip); - FREE(iterator, M_TEMP); + FREE(buffer, M_TEMP); return (result); } +/* + * cat_binarykeycompare - compare two HFS Plus catalog keys. + + * The name portion of the key is comapred using a 16-bit binary comparison. + * This is called from the b-tree code. + */ +__private_extern__ +int +cat_binarykeycompare(HFSPlusCatalogKey *searchKey, HFSPlusCatalogKey *trialKey) +{ + u_int32_t searchParentID, trialParentID; + int result; + + searchParentID = searchKey->parentID; + trialParentID = trialKey->parentID; + result = 0; + + if (searchParentID > trialParentID) { + ++result; + } else if (searchParentID < trialParentID) { + --result; + } else { + u_int16_t * str1 = &searchKey->nodeName.unicode[0]; + u_int16_t * str2 = &trialKey->nodeName.unicode[0]; + int length1 = searchKey->nodeName.length; + int length2 = trialKey->nodeName.length; + u_int16_t c1, c2; + int length; + + if (length1 < length2) { + length = length1; + --result; + } else if (length1 > length2) { + length = length2; + ++result; + } else { + length = length1; + } + + while (length--) { + c1 = *(str1++); + c2 = *(str2++); + + if (c1 > c2) { + result = 1; + break; + } + if (c1 < c2) { + result = -1; + break; + } + } + } + + return result; +} + + /* * buildkey - build a Catalog b-tree key from a cnode descriptor */ @@ -1766,7 +2049,7 @@ resolvelink(struct hfsmount *hfsmp, u_long linkref, struct HFSPlusCatalogFile *r bzero(iterator, sizeof(*iterator)); /* Build a descriptor for private dir. */ - idesc.cd_parentcnid = hfsmp->hfs_private_metadata_dir; + idesc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; idesc.cd_nameptr = inodename; idesc.cd_namelen = strlen(inodename); idesc.cd_flags = 0; @@ -1790,6 +2073,25 @@ resolvelink(struct hfsmount *hfsmp, u_long linkref, struct HFSPlusCatalogFile *r return (result ? ENOENT : 0); } +/* + * Resolve hard link reference to obtain the inode number. + */ +static int +resolvelinkid(struct hfsmount *hfsmp, u_long linkref, ino_t *ino) +{ + struct HFSPlusCatalogFile record; + int error; + + error = resolvelink(hfsmp, linkref, &record); + if (error == 0) { + if (record.fileID == 0) + error = ENOENT; + else + *ino = record.fileID; + } + return (error); +} + /* * getkey - get a key from id by doing a thread lookup */ @@ -1947,10 +2249,15 @@ builddesc(const HFSPlusCatalogKey *key, cnid_t cnid, u_long hint, u_long encodin char * nameptr; long bufsize; size_t utf8len; + char tmpbuff[128]; /* guess a size... */ bufsize = (3 * key->nodeName.length) + 1; - MALLOC(nameptr, char *, bufsize, M_TEMP, M_WAITOK); + if (bufsize >= sizeof(tmpbuff)-1) { + MALLOC(nameptr, char *, bufsize, M_TEMP, M_WAITOK); + } else { + nameptr = &tmpbuff[0]; + } result = utf8_encodestr(key->nodeName.unicode, key->nodeName.length * sizeof(UniChar), @@ -1970,14 +2277,17 @@ builddesc(const HFSPlusCatalogKey *key, cnid_t cnid, u_long hint, u_long encodin bufsize, ':', 0); } descp->cd_parentcnid = key->parentID; - descp->cd_nameptr = nameptr; + descp->cd_nameptr = add_name(nameptr, utf8len, 0, 0); descp->cd_namelen = utf8len; descp->cd_cnid = cnid; descp->cd_hint = hint; descp->cd_flags = CD_DECOMPOSED | CD_HASBUF; if (isdir) - descp->cd_flags |= CD_ISDIR; + descp->cd_flags |= CD_ISDIR; descp->cd_encoding = encoding; + if (nameptr != &tmpbuff[0]) { + FREE(nameptr, M_TEMP); + } return result; } @@ -2115,6 +2425,8 @@ promotefork(struct hfsmount *hfsmp, const struct HFSCatalogFile *filep, if (resource) { forkp->cf_size = filep->rsrcLogicalSize; forkp->cf_blocks = filep->rsrcPhysicalSize / blocksize; + forkp->cf_bytesread = 0; + forkp->cf_vblocks = 0; xp[0].startBlock = (u_int32_t)filep->rsrcExtents[0].startBlock; xp[0].blockCount = (u_int32_t)filep->rsrcExtents[0].blockCount; xp[1].startBlock = (u_int32_t)filep->rsrcExtents[1].startBlock; @@ -2124,6 +2436,8 @@ promotefork(struct hfsmount *hfsmp, const struct HFSCatalogFile *filep, } else { forkp->cf_size = filep->dataLogicalSize; forkp->cf_blocks = filep->dataPhysicalSize / blocksize; + forkp->cf_bytesread = 0; + forkp->cf_vblocks = 0; xp[0].startBlock = (u_int32_t)filep->dataExtents[0].startBlock; xp[0].blockCount = (u_int32_t)filep->dataExtents[0].blockCount; xp[1].startBlock = (u_int32_t)filep->dataExtents[1].startBlock; diff --git a/bsd/hfs/hfs_catalog.h b/bsd/hfs/hfs_catalog.h index 8ed86ba31..6f6becd2d 100644 --- a/bsd/hfs/hfs_catalog.h +++ b/bsd/hfs/hfs_catalog.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2002-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -65,6 +65,7 @@ struct cat_desc { /* cd_flags */ #define CD_HASBUF 0x01 /* allocated filename buffer */ #define CD_DECOMPOSED 0x02 /* name is fully decomposed */ +#define CD_ISMETA 0x40 /* describes a metadata file */ #define CD_ISDIR 0x80 /* describes a directory */ /* @@ -95,15 +96,24 @@ struct cat_attr { #define ca_entries ca_union.cau_entries /* - * Catalog Node Fork (runtime + on disk) + * Catalog Node Fork (runtime) + * + * NOTE: this is not the same as a struct HFSPlusForkData */ struct cat_fork { - u_int64_t cf_size; /* fork's logical size in bytes */ - u_int32_t cf_clump; /* fork's clump size in bytes */ - u_int32_t cf_blocks; /* total blocks used by this fork */ - struct HFSPlusExtentDescriptor cf_extents[8]; /* initial set of extents */ + u_int64_t cf_size; /* fork's logical size in bytes */ + union { + u_int32_t cfu_clump; /* fork's clump size in bytes (sys files only) */ + u_int64_t cfu_bytesread; /* bytes read from this fork */ + } cf_union; + u_int32_t cf_vblocks; /* virtual (unalloated) blocks */ + u_int32_t cf_blocks; /* total blocks used by this fork */ + struct HFSPlusExtentDescriptor cf_extents[8]; /* initial set of extents */ }; +#define cf_clump cf_union.cfu_clump +#define cf_bytesread cf_union.cfu_bytesread + /* * Catalog Node Entry @@ -131,6 +141,28 @@ struct cat_entrylist { struct cat_entry entry[MAXCATENTRIES]; /* array of entries */ }; +/* + * Catalog Operations Hint + * + * lower 16 bits: count of B-tree insert operations + * upper 16 bits: count of B-tree delete operations + * + */ +#define CAT_DELETE 0x00020000 +#define CAT_CREATE 0x00000002 +#define CAT_RENAME 0x00020002 +#define CAT_EXCHANGE 0x00020002 + +typedef u_int32_t catops_t; + +/* + * The size of cat_cookie_t much match the size of + * the nreserve struct (in BTreeNodeReserve.c). + */ +typedef struct cat_cookie_t { + char opaque[24]; +} cat_cookie_t; + /* * Catalog Interface * @@ -186,13 +218,31 @@ extern int cat_update ( struct hfsmount *hfsmp, extern int cat_getdirentries( struct hfsmount *hfsmp, struct cat_desc *descp, + int entrycnt, struct uio *uio, - int *eofflag); + int *eofflag, + u_long *cookies, + int ncookies); extern int cat_insertfilethread ( struct hfsmount *hfsmp, struct cat_desc *descp); +extern int cat_preflight( + struct hfsmount *hfsmp, + catops_t ops, + cat_cookie_t *cookie, + struct proc *p); + +extern void cat_postflight( + struct hfsmount *hfsmp, + cat_cookie_t *cookie, + struct proc *p); + +extern int cat_binarykeycompare( + HFSPlusCatalogKey *searchKey, + HFSPlusCatalogKey *trialKey); + #endif /* __APPLE_API_PRIVATE */ #endif /* KERNEL */ #endif /* __HFS_CATALOG__ */ diff --git a/bsd/hfs/hfs_chash.c b/bsd/hfs/hfs_chash.c index 7bf16e253..28722a659 100644 --- a/bsd/hfs/hfs_chash.c +++ b/bsd/hfs/hfs_chash.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2002-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -140,7 +140,7 @@ loop: (void)tsleep((caddr_t)cp, PINOD, "hfs_chashget-2", 0); goto loop; } - if (cp->c_flag & C_NOEXISTS) + if (cp->c_flag & (C_NOEXISTS | C_DELETED)) continue; /* @@ -177,7 +177,7 @@ loop: */ if (wantrsrc && *rvpp == NULL && cp->c_rsrc_vp) { error = vget(cp->c_rsrc_vp, 0, p); - vput(*vpp); /* ref no longer needed */ + vrele(*vpp); /* ref no longer needed */ *vpp = NULL; if (error) goto loop; @@ -185,7 +185,7 @@ loop: } else if (!wantrsrc && *vpp == NULL && cp->c_vp) { error = vget(cp->c_vp, 0, p); - vput(*rvpp); /* ref no longer needed */ + vrele(*rvpp); /* ref no longer needed */ *rvpp = NULL; if (error) goto loop; @@ -205,11 +205,11 @@ __private_extern__ void hfs_chashinsert(struct cnode *cp) { - if (cp->c_fileid == 0) - panic("hfs_chashinsert: trying to insert file id 0"); - simple_lock(&hfs_chash_slock); - LIST_INSERT_HEAD(CNODEHASH(cp->c_dev, cp->c_fileid), cp, c_hash); - simple_unlock(&hfs_chash_slock); + if (cp->c_fileid != 0) { + simple_lock(&hfs_chash_slock); + LIST_INSERT_HEAD(CNODEHASH(cp->c_dev, cp->c_fileid), cp, c_hash); + simple_unlock(&hfs_chash_slock); + } } diff --git a/bsd/hfs/hfs_cnode.c b/bsd/hfs/hfs_cnode.c index 913454fdf..b644dced2 100644 --- a/bsd/hfs/hfs_cnode.c +++ b/bsd/hfs/hfs_cnode.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2002-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -66,6 +66,8 @@ hfs_inactive(ap) int forkcount = 0; int truncated = 0; int started_tr = 0, grabbed_lock = 0; + cat_cookie_t cookie; + int cat_reserve = 0; if (prtactive && vp->v_usecount != 0) vprint("hfs_inactive: pushing active", vp); @@ -76,7 +78,7 @@ hfs_inactive(ap) if (cp->c_mode == 0) goto out; - if (vp->v_mount->mnt_flag & MNT_RDONLY) + if (hfsmp->hfs_flags & HFS_READ_ONLY) goto out; if (cp->c_datafork) @@ -85,15 +87,14 @@ hfs_inactive(ap) ++forkcount; /* If needed, get rid of any fork's data for a deleted file */ - if ((cp->c_flag & C_DELETED) && - vp->v_type == VREG && - (VTOF(vp)->ff_blocks != 0)) { - error = VOP_TRUNCATE(vp, (off_t)0, IO_NDELAY, NOCRED, p); - truncated = 1; - // have to do this to prevent the lost ubc_info panic - SET(cp->c_flag, C_TRANSIT); + if ((vp->v_type == VREG) && (cp->c_flag & C_DELETED)) { + if (VTOF(vp)->ff_blocks != 0) { + error = VOP_TRUNCATE(vp, (off_t)0, IO_NDELAY, NOCRED, p); + if (error) + goto out; + truncated = 1; + } recycle = 1; - if (error) goto out; } /* @@ -102,13 +103,13 @@ hfs_inactive(ap) */ if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) { /* - * Mark cnode in transit so that one can get this + * Mark cnode in transit so that no one can get this * cnode from cnode hash. */ SET(cp->c_flag, C_TRANSIT); cp->c_flag &= ~C_DELETED; cp->c_rdev = 0; - + // XXXdbg hfs_global_shared_lock_acquire(hfsmp); grabbed_lock = 1; @@ -120,6 +121,15 @@ hfs_inactive(ap) started_tr = 1; } + /* + * Reserve some space in the Catalog file. + */ + if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) { + goto out; + } + cat_reserve = 1; + + /* Lock catalog b-tree */ error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); if (error) goto out; @@ -158,18 +168,20 @@ hfs_inactive(ap) hfs_volupdate(hfsmp, VOL_RMFILE, 0); } - /* Push any defered access times to disk */ - if (cp->c_flag & C_ATIMEMOD) { - cp->c_flag &= ~C_ATIMEMOD; - if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSPlusSigWord) - cp->c_flag |= C_MODIFIED; - } - if (cp->c_flag & (C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE)) { tv = time; + // if the only thing being updated is the access time + // then set the modified bit too so that update will + // flush it to disk. otherwise it'll get dropped. + if ((cp->c_flag & C_CHANGEMASK) == C_ACCESS) { + cp->c_flag |= C_MODIFIED; + } VOP_UPDATE(vp, &tv, &tv, 0); } out: + if (cat_reserve) + cat_postflight(hfsmp, &cookie, p); + // XXXdbg - have to do this because a goto could have come here if (started_tr) { journal_end_transaction(hfsmp->jnl); @@ -211,7 +223,12 @@ hfs_reclaim(ap) if (prtactive && vp->v_usecount != 0) vprint("hfs_reclaim(): pushing active", vp); - devvp = cp->c_devvp; /* For later releasing */ + /* + * Keep track of an inactive hot file. + */ + (void) hfs_addhotfile(vp); + + devvp = cp->c_devvp; /* For later releasing */ /* * Find file fork for this vnode (if any) @@ -224,6 +241,9 @@ hfs_reclaim(ap) } else if ((fp = cp->c_rsrcfork) && (cp->c_rsrc_vp == vp)) { cp->c_rsrcfork = NULL; cp->c_rsrc_vp = NULL; + if (VPARENT(vp) == cp->c_vp) { + cp->c_flag &= ~C_VPREFHELD; + } altfp = cp->c_datafork; } else { cp->c_vp = NULL; @@ -288,7 +308,7 @@ hfs_reclaim(ap) cp->c_desc.cd_nameptr = 0; cp->c_desc.cd_flags &= ~CD_HASBUF; cp->c_desc.cd_namelen = 0; - FREE(nameptr, M_TEMP); + remove_name(nameptr); } CLR(cp->c_flag, (C_ALLOC | C_TRANSIT)); if (ISSET(cp->c_flag, C_WALLOC) || ISSET(cp->c_flag, C_WTRANSIT)) @@ -333,8 +353,8 @@ hfs_getcnode(struct hfsmount *hfsmp, cnid_t cnid, struct cat_desc *descp, int wa cp = hfs_chashget(dev, cnid, wantrsrc, &vp, &rvp); if (cp != NULL) { /* hide open files that have been deleted */ - if ((hfsmp->hfs_private_metadata_dir != 0) - && (cp->c_parentcnid == hfsmp->hfs_private_metadata_dir) + if ((hfsmp->hfs_privdir_desc.cd_cnid != 0) + && (cp->c_parentcnid == hfsmp->hfs_privdir_desc.cd_cnid) && (cp->c_nlink == 0)) { retval = ENOENT; goto exit; @@ -393,6 +413,7 @@ hfs_getcnode(struct hfsmount *hfsmp, cnid_t cnid, struct cat_desc *descp, int wa cnattr.ca_fileid = kRootParID; cnattr.ca_nlink = 2; + cnattr.ca_entries = 1; cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO); } else { /* Lock catalog b-tree */ @@ -408,8 +429,9 @@ hfs_getcnode(struct hfsmount *hfsmp, cnid_t cnid, struct cat_desc *descp, int wa goto exit; /* Hide open files that have been deleted */ - if ((hfsmp->hfs_private_metadata_dir != 0) && - (cndesc.cd_parentcnid == hfsmp->hfs_private_metadata_dir)) { + if ((hfsmp->hfs_privdir_desc.cd_cnid != 0) && + (cndesc.cd_parentcnid == hfsmp->hfs_privdir_desc.cd_cnid) && + (cnattr.ca_nlink == 0)) { cat_releasedesc(&cndesc); retval = ENOENT; goto exit; @@ -426,14 +448,16 @@ hfs_getcnode(struct hfsmount *hfsmp, cnid_t cnid, struct cat_desc *descp, int wa && cndesc.cd_namelen > 0) { replace_desc(VTOC(new_vp), &cndesc); } + cat_releasedesc(&cndesc); } + exit: /* Release reference taken on opposite vnode (if any). */ if (vp) - vput(vp); + vrele(vp); else if (rvp) - vput(rvp); + vrele(rvp); if (retval) { *vpp = NULL; @@ -445,7 +469,8 @@ done: if (vp == NULL) panic("hfs_getcnode: missing vp!"); - UBCINFOCHECK("hfs_getcnode", vp); + if (UBCISVALID(vp)) + UBCINFOCHECK("hfs_getcnode", vp); *vpp = vp; return (0); } @@ -478,12 +503,13 @@ hfs_getnewvnode(struct hfsmount *hfsmp, struct cnode *cp, int retval; dev_t dev; struct proc *p = current_proc(); - +#if 0 /* Bail when unmount is in progress */ if (mp->mnt_kern_flag & MNTK_UNMOUNT) { *vpp = NULL; return (EPERM); } +#endif #if !FIFO if (IFTOVT(attrp->ca_mode) == VFIFO) { @@ -502,6 +528,11 @@ hfs_getnewvnode(struct hfsmount *hfsmp, struct cnode *cp, SET(cp2->c_flag, C_ALLOC); cp2->c_cnid = descp->cd_cnid; cp2->c_fileid = attrp->ca_fileid; + if (cp2->c_fileid == 0) { + FREE_ZONE(cp2, sizeof(struct cnode), M_HFSNODE); + *vpp = NULL; + return (ENOENT); + } cp2->c_dev = dev; lockinit(&cp2->c_lock, PINOD, "cnode", 0, 0); (void) lockmgr(&cp2->c_lock, LK_EXCLUSIVE, (struct slock *)0, p); @@ -560,9 +591,9 @@ hfs_getnewvnode(struct hfsmount *hfsmp, struct cnode *cp, /* Release reference taken on opposite vnode (if any). */ if (rvp) - vput(rvp); + vrele(rvp); if (vp) - vput(vp); + vrele(vp); vp = new_vp; vp->v_ubcinfo = UBC_NOINFO; @@ -604,9 +635,7 @@ hfs_getnewvnode(struct hfsmount *hfsmp, struct cnode *cp, bzero(fp, sizeof(struct filefork)); fp->ff_cp = cp; if (forkp) - bcopy(forkp, &fp->ff_data, sizeof(HFSPlusForkData)); - if (fp->ff_clumpsize == 0) - fp->ff_clumpsize = HFSTOVCB(hfsmp)->vcbClpSiz; + bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork)); rl_init(&fp->ff_invalidranges); if (wantrsrc) { if (cp->c_rsrcfork != NULL) @@ -632,7 +661,7 @@ hfs_getnewvnode(struct hfsmount *hfsmp, struct cnode *cp, vp->v_type = IFTOVT(cp->c_mode); /* Tag system files */ - if ((descp->cd_cnid < kHFSFirstUserCatalogNodeID) && (vp->v_type == VREG)) + if ((descp->cd_flags & CD_ISMETA) && (vp->v_type == VREG)) vp->v_flag |= VSYSTEM; /* Tag root directory */ if (cp->c_cnid == kRootDirID) @@ -673,6 +702,11 @@ hfs_getnewvnode(struct hfsmount *hfsmp, struct cnode *cp, #endif } + /* + * Stop tracking an active hot file. + */ + (void) hfs_removehotfile(vp); + /* Vnode is now initialized - see if anyone was waiting for it. */ CLR(cp->c_flag, C_ALLOC); if (ISSET(cp->c_flag, C_WALLOC)) { diff --git a/bsd/hfs/hfs_cnode.h b/bsd/hfs/hfs_cnode.h index eeaee0617..fc0852397 100644 --- a/bsd/hfs/hfs_cnode.h +++ b/bsd/hfs/hfs_cnode.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2002-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -47,27 +47,30 @@ struct filefork { struct cnode *ff_cp; /* cnode associated with this fork */ struct rl_head ff_invalidranges; /* Areas of disk that should read back as zeroes */ + long ff_evtonly_refs; /* number of vnode references used solely for events (O_EVTONLY) */ union { struct hfslockf *ffu_lockf; /* Head of byte-level lock list. */ void *ffu_sysdata; /* private data for system files */ char *ffu_symlinkptr; /* symbolic link pathname */ } ff_un; struct cat_fork ff_data; - u_int32_t ff_unallocblocks; /* unallocated blocks (until cmap) */ }; +typedef struct filefork filefork_t; /* Aliases for common fields */ #define ff_size ff_data.cf_size #define ff_clumpsize ff_data.cf_clump +#define ff_bytesread ff_data.cf_bytesread #define ff_blocks ff_data.cf_blocks #define ff_extents ff_data.cf_extents +#define ff_unallocblocks ff_data.cf_vblocks + #define ff_symlinkptr ff_un.ffu_symlinkptr #define ff_lockf ff_un.ffu_lockf /* The btree code still needs these... */ #define fcbEOF ff_size -#define fcbClmpSize ff_clumpsize #define fcbExtents ff_extents #define fcbBTCBPtr ff_un.ffu_sysdata @@ -97,13 +100,16 @@ struct cnode { struct vnode *c_devvp; /* vnode for block I/O */ dev_t c_dev; /* cnode's device */ struct dquot *c_dquot[MAXQUOTAS]; /* cnode's quota info */ + struct klist c_knotes; /* knotes attached to this vnode */ cnid_t c_childhint; /* catalog hint for children */ struct cat_desc c_desc; /* cnode's descriptor */ struct cat_attr c_attr; /* cnode's attributes */ SLIST_HEAD(hfs_indexhead, hfs_index) c_indexlist; /* directory index list */ + long c_evtonly_refs; /* number of vnode references used solely for events (O_EVTONLY) */ struct filefork *c_datafork; /* cnode's data fork */ struct filefork *c_rsrcfork; /* cnode's rsrc fork */ }; +typedef struct cnode cnode_t; /* Aliases for common cnode fields */ #define c_cnid c_desc.cd_cnid @@ -131,23 +137,28 @@ struct cnode { /* Runtime cnode flags (kept in c_flag) */ -#define C_ACCESS 0x0001 /* Access time update request */ -#define C_CHANGE 0x0002 /* Change time update request */ -#define C_UPDATE 0x0004 /* Modification time update request */ -#define C_MODIFIED 0x0008 /* CNode has been modified */ -#define C_ATIMEMOD 0x0010 /* Access time has been modified */ +#define C_ACCESS 0x00001 /* Access time update request */ +#define C_CHANGE 0x00002 /* Change time update request */ +#define C_UPDATE 0x00004 /* Modification time update request */ +#define C_MODIFIED 0x00008 /* CNode has been modified */ + +#define C_RELOCATING 0x00010 /* CNode's fork is being relocated */ +#define C_NOEXISTS 0x00020 /* CNode has been deleted, catalog entry is gone */ +#define C_DELETED 0x00040 /* CNode has been marked to be deleted */ +#define C_HARDLINK 0x00080 /* CNode is a hard link */ -#define C_NOEXISTS 0x0020 /* CNode has been deleted, catalog entry is gone */ -#define C_DELETED 0x0040 /* CNode has been marked to be deleted */ -#define C_HARDLINK 0x0080 /* CNode is a hard link */ +#define C_ALLOC 0x00100 /* CNode is being allocated */ +#define C_WALLOC 0x00200 /* Waiting for allocation to finish */ +#define C_TRANSIT 0x00400 /* CNode is getting recycled */ +#define C_WTRANSIT 0x00800 /* Waiting for cnode getting recycled */ +#define C_NOBLKMAP 0x01000 /* CNode blocks cannot be mapped */ +#define C_WBLKMAP 0x02000 /* Waiting for block map */ -#define C_ALLOC 0x0100 /* CNode is being allocated */ -#define C_WALLOC 0x0200 /* Waiting for allocation to finish */ -#define C_TRANSIT 0x0400 /* CNode is getting recycled */ -#define C_WTRANSIT 0x0800 /* Waiting for cnode getting recycled */ +#define C_ZFWANTSYNC 0x04000 /* fsync requested and file has holes */ +#define C_VPREFHELD 0x08000 /* resource fork has done a vget() on c_vp (for its parent ptr) */ -#define C_RENAME 0x1000 /* CNode is being renamed */ -#define C_ZFWANTSYNC 0x2000 /* fsync requested and file has holes */ +#define C_FROMSYNC 0x10000 /* fsync was called from sync */ +#define C_FORCEUPDATE 0x20000 /* force the catalog entry update */ #define ZFTIMELIMIT (5 * 60) @@ -176,6 +187,8 @@ struct cnode { FTOC(fp)->c_rsrc_vp : \ FTOC(fp)->c_vp) +#define EVTONLYREFS(vp) ((vp->v_type == VREG) ? VTOF(vp)->ff_evtonly_refs : VTOC(vp)->c_evtonly_refs) + /* * Test for a resource fork */ @@ -189,23 +202,18 @@ struct cnode { */ #define C_TIMEMASK (C_ACCESS | C_CHANGE | C_UPDATE) -#define ATIME_ACCURACY 60 +#define C_CHANGEMASK (C_ACCESS | C_CHANGE | C_UPDATE | C_MODIFIED) + +#define ATIME_ACCURACY 1 +#define ATIME_ONDISK_ACCURACY 300 #define CTIMES(cp, t1, t2) { \ if ((cp)->c_flag & C_TIMEMASK) { \ /* \ - * If only the access time is changing then defer \ - * updating it on-disk util later (in hfs_inactive). \ - * If it was recently updated then skip the update. \ + * Only do the update if it is more than just \ + * the C_ACCESS field being updated. \ */ \ - if (((cp)->c_flag & (C_TIMEMASK | C_MODIFIED)) == C_ACCESS) { \ - if (((cp)->c_flag & C_ATIMEMOD) || \ - (t1)->tv_sec > ((cp)->c_atime + ATIME_ACCURACY)) { \ - (cp)->c_atime = (t1)->tv_sec; \ - (cp)->c_flag |= C_ATIMEMOD; \ - } \ - (cp)->c_flag &= ~C_ACCESS; \ - } else { \ + if (((cp)->c_flag & C_CHANGEMASK) != C_ACCESS) { \ if ((cp)->c_flag & C_ACCESS) { \ (cp)->c_atime = (t1)->tv_sec; \ } \ diff --git a/bsd/hfs/hfs_encodinghint.c b/bsd/hfs/hfs_encodinghint.c index 630779e24..9a42b1ba0 100644 --- a/bsd/hfs/hfs_encodinghint.c +++ b/bsd/hfs/hfs_encodinghint.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2001-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -34,16 +34,16 @@ #define CJK_CHINESE_SIMP 0x8 #define CJK_ALL 0xF -#define CJK_CHINESE (CJK_CHINESE_TRAD | CJK_CHINESE_SIMP) -#define CJK_KATAKANA (CJK_JAPAN | CJK_CHINESE_SIMP | CJK_KOREAN) +#define CJK_CHINESE (CJK_CHINESE_TRAD | CJK_CHINESE_SIMP) +#define CJK_KATAKANA (CJK_JAPAN) /* Remember the last unique CJK bit */ u_int8_t cjk_lastunique = 0; -/* CJK encoding bias */ +/* Encoding bias */ u_int32_t hfs_encodingbias = 0; - +int hfs_islatinbias = 0; /* Map CJK bits to Mac encoding */ u_int8_t cjk_encoding[] = { @@ -793,6 +793,14 @@ hfs_pickencoding(const u_int16_t *src, int len) cjkstate = CJK_ALL; continue; } + if (hfs_islatinbias && ch >= 0x0300 && ch <= 0x0329) { + guess = hfs_encodingbias; + continue; + } + if (ch <= 0x03CE && ch >= 0x0384) { + guess = kTextEncodingMacGreek; + continue; + } if (ch <= 0x0491 && ch >= 0x0401) { guess = kTextEncodingMacCyrillic; continue; @@ -806,6 +814,35 @@ hfs_pickencoding(const u_int16_t *src, int len) if (ch >= 0x0E00 && ch <= 0x0E5B) { return kTextEncodingMacThai; } + /* Catch a few Shift-JIS strays */ + if (guess == 0 || guess == kTextEncodingMacUnicode) { + if (ch == 0x2010 || ch == 0x2014 || ch == 0x2015 || ch == 0x2016) { + guess = kTextEncodingMacJapanese; + if ((cjkstate == 0) || (cjkstate & CJK_JAPAN)) + cjkstate = CJK_JAPAN; + else + cjkstate |= CJK_JAPAN; + continue; + } + if ((hfs_encodingbias == kTextEncodingMacJapanese) && + (ch == 0x00A2 || ch == 0x00A3 || ch == 0x00AC)) { + guess = kTextEncodingMacJapanese; + continue; + } + /* TM char depends on the Mac encoding used. */ + if (ch == 0x2122) { + switch(hfs_encodingbias) { + case kTextEncodingMacJapanese: + case kTextEncodingMacChineseTrad: + case kTextEncodingMacKorean: + case kTextEncodingMacGreek: + case kTextEncodingMacThai: + case kTextEncodingMacChineseSimp: + guess = hfs_encodingbias; + break; + } + } + } if (guess == 0 && ch > 0x2122) { guess = kTextEncodingMacUnicode; } @@ -853,3 +890,32 @@ hfs_pickencoding(const u_int16_t *src, int len) } +__private_extern__ +u_int32_t +hfs_getencodingbias() +{ + return (hfs_encodingbias); +} + + +__private_extern__ +void +hfs_setencodingbias(u_int32_t bias) +{ + hfs_encodingbias = bias; + + switch (bias) { + case kTextEncodingMacRoman: + case kTextEncodingMacCentralEurRoman: + case kTextEncodingMacTurkish: + case kTextEncodingMacCroatian: + case kTextEncodingMacIcelandic: + case kTextEncodingMacRomanian: + hfs_islatinbias = 1; + break; + default: + hfs_islatinbias = 0; + break; + } +} + diff --git a/bsd/hfs/hfs_encodings.c b/bsd/hfs/hfs_encodings.c index fdf03bc70..323ed75ca 100644 --- a/bsd/hfs/hfs_encodings.c +++ b/bsd/hfs/hfs_encodings.c @@ -55,7 +55,7 @@ extern struct host realhost; #define MAX_HFS_UNICODE_CHARS (15*5) -int mac_roman_to_unicode(Str31 hfs_str, UniChar *uni_str, UInt32 maxCharLen, UInt32 *usedCharLen); +int mac_roman_to_unicode(const Str31 hfs_str, UniChar *uni_str, UInt32 maxCharLen, UInt32 *usedCharLen); static int unicode_to_mac_roman(UniChar *uni_str, UInt32 unicodeChars, Str31 hfs_str); @@ -202,7 +202,7 @@ hfs_relconverter(UInt32 encoding) encp = NULL; simple_unlock(&hfs_encoding_list_slock); - kmod_destroy(host_priv_self(), id); + kmod_destroy((host_priv_t) host_priv_self(), id); simple_lock(&hfs_encoding_list_slock); } break; @@ -614,7 +614,7 @@ static UniChar gHiBitCombUnicode[128] = { * Unicode output is fully decomposed */ int -mac_roman_to_unicode(Str31 hfs_str, UniChar *uni_str, +mac_roman_to_unicode(const Str31 hfs_str, UniChar *uni_str, UInt32 maxCharLen, UInt32 *unicodeChars) { const UInt8 *p; diff --git a/bsd/hfs/hfs_encodings.h b/bsd/hfs/hfs_encodings.h index d637fdaa2..38d24f3f0 100644 --- a/bsd/hfs/hfs_encodings.h +++ b/bsd/hfs/hfs_encodings.h @@ -32,10 +32,6 @@ #include #ifdef __APPLE_API_UNSTABLE -/* - * Sysctl value for HFS Unicode encoding matching. - */ -#define HFS_ENCODINGBIAS 1 /* encoding matching CJK bias */ #define CTL_HFS_NAMES { \ { 0, 0 }, \ @@ -55,7 +51,7 @@ * encoding conversion routines. */ -typedef int (* hfs_to_unicode_func_t)(Str31 hfs_str, UniChar *uni_str, +typedef int (* hfs_to_unicode_func_t)(const Str31 hfs_str, UniChar *uni_str, UInt32 maxCharLen, UInt32 *usedCharLen); typedef int (* unicode_to_hfs_func_t)(UniChar *uni_str, UInt32 unicodeChars, diff --git a/bsd/hfs/hfs_endian.c b/bsd/hfs/hfs_endian.c index 47bb66a7f..c46c2295b 100644 --- a/bsd/hfs/hfs_endian.c +++ b/bsd/hfs/hfs_endian.c @@ -358,10 +358,32 @@ hfs_swap_HFSPlusBTInternalNode ( if (unswap) srcPtr[0] = SWAP_BE16 (srcPtr[0]); } + } else if (fileID > kHFSFirstUserCatalogNodeID) { + HotFileKey *srcKey; + UInt32 *srcRec; + + for (i = 0; i < srcDesc->numRecords; i++) { + srcKey = (HotFileKey *)((char *)src->buffer + srcOffs[i]); + + if (!unswap) + srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + srcRec = (u_int32_t *)((char *)srcKey + srcKey->keyLength + 2); + if (unswap) + srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + + /* Don't swap srcKey->forkType */ + /* Don't swap srcKey->pad */ + + srcKey->temperature = SWAP_BE32 (srcKey->temperature); + srcKey->fileID = SWAP_BE32 (srcKey->fileID); + + *((UInt32 *)srcRec) = SWAP_BE32 (*((UInt32 *)srcRec)); + } } else { panic ("%s unrecognized B-Tree type", "hfs_swap_BTNode:"); } + return (0); } diff --git a/bsd/hfs/hfs_endian.h b/bsd/hfs/hfs_endian.h index 96e121b6e..3996cd635 100644 --- a/bsd/hfs/hfs_endian.h +++ b/bsd/hfs/hfs_endian.h @@ -51,7 +51,7 @@ /* HFS is always big endian, no swapping needed */ #define SWAP_HFS_PLUS_FORK_DATA(__a) - #define SWAP_BT_NODE(__a, __b, __c) + #define SWAP_BT_NODE(__a, __b, __c, __d) /************************/ /* LITTLE ENDIAN Macros */ diff --git a/bsd/hfs/hfs_format.h b/bsd/hfs/hfs_format.h index 2bb4ae55b..acebb47d2 100644 --- a/bsd/hfs/hfs_format.h +++ b/bsd/hfs/hfs_format.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -25,6 +25,9 @@ #ifndef __HFS_FORMAT__ #define __HFS_FORMAT__ +#ifndef __HFSVOLUMES__ + +#include #include /* @@ -48,9 +51,11 @@ extern "C" { enum { kHFSSigWord = 0x4244, /* 'BD' in ASCII */ kHFSPlusSigWord = 0x482B, /* 'H+' in ASCII */ - kHFSJSigWord = 0x484a, /* 'HJ' in ASCII */ - kHFSPlusVersion = 0x0004, /* will change as format changes */ - /* version 4 shipped with Mac OS 8.1 */ + kHFSXSigWord = 0x4858, /* 'HX' in ASCII */ + + kHFSPlusVersion = 0x0004, /* 'H+' volumes are version 4 only */ + kHFSXVersion = 0x0005, /* 'HX' volumes start with version 5 */ + kHFSPlusMountVersion = 0x31302E30, /* '10.0' for Mac OS X */ kHFSJMountVersion = 0x4846534a /* 'HFSJ' for journaled HFS+ on OS X */ }; @@ -89,6 +94,7 @@ enum { }; +#ifndef __FILES__ /* Unicode strings are used for HFS Plus file and folder names */ struct HFSUniStr255 { u_int16_t length; /* number of unicode characters */ @@ -96,6 +102,7 @@ struct HFSUniStr255 { }; typedef struct HFSUniStr255 HFSUniStr255; typedef const HFSUniStr255 *ConstHFSUniStr255Param; +#endif /* __FILES__ */ enum { kHFSMaxVolumeNameChars = 27, @@ -228,6 +235,7 @@ enum { kHFSAllocationFileID = 6, /* File ID of the allocation file (HFS Plus only) */ kHFSStartupFileID = 7, /* File ID of the startup file (HFS Plus only) */ kHFSAttributesFileID = 8, /* File ID of the attribute file (HFS Plus only) */ + kHFSRepairCatalogFileID = 14, /* Used when rebuilding Catalog B-tree */ kHFSBogusExtentFileID = 15, /* Used for exchanging extents in extents file */ kHFSFirstUserCatalogNodeID = 16 }; @@ -458,7 +466,7 @@ enum { kHFSBootVolumeInconsistentBit = 11, /* boot volume is inconsistent (System 7.6 and later) */ kHFSCatalogNodeIDsReusedBit = 12, kHFSVolumeJournaledBit = 13, /* this volume has a journal on it */ - /* Bit 14 is reserved for future use */ + kHFSVolumeInconsistentBit = 14, /* serious inconsistencies detected at runtime */ kHFSVolumeSoftwareLockBit = 15, /* volume is locked by software */ kHFSVolumeHardwareLockMask = 1 << kHFSVolumeHardwareLockBit, @@ -468,6 +476,7 @@ enum { kHFSBootVolumeInconsistentMask = 1 << kHFSBootVolumeInconsistentBit, kHFSCatalogNodeIDsReusedMask = 1 << kHFSCatalogNodeIDsReusedBit, kHFSVolumeJournaledMask = 1 << kHFSVolumeJournaledBit, + kHFSVolumeInconsistentMask = 1 << kHFSVolumeInconsistentBit, kHFSVolumeSoftwareLockMask = 1 << kHFSVolumeSoftwareLockBit, kHFSMDBAttributesMask = 0x8380 }; @@ -509,6 +518,14 @@ struct HFSMasterDirectoryBlock { typedef struct HFSMasterDirectoryBlock HFSMasterDirectoryBlock; +#ifdef __APPLE_API_UNSTABLE +#define SET_HFS_TEXT_ENCODING(hint) \ + (0x656e6300 | ((hint) & 0xff)) +#define GET_HFS_TEXT_ENCODING(hint) \ + (((hint) & 0xffffff00) == 0x656e6300 ? (hint) & 0x000000ff : 0xffffffffU) +#endif /* __APPLE_API_UNSTABLE */ + + /* HFS Plus Volume Header - 512 bytes */ /* Stored at sector #2 (3rd sector) and second-to-last sector. */ struct HFSPlusVolumeHeader { @@ -516,7 +533,6 @@ struct HFSPlusVolumeHeader { u_int16_t version; /* == kHFSPlusVersion */ u_int32_t attributes; /* volume attributes */ u_int32_t lastMountedVersion; /* implementation version which last mounted volume */ -//XXXdbg u_int32_t reserved; /* reserved - initialized as zero */ u_int32_t journalInfoBlock; /* block addr of journal info (if volume is journaled, zero otherwise) */ u_int32_t createDate; /* date and time of volume creation */ @@ -596,7 +612,7 @@ struct BTHeaderRec { u_int16_t reserved1; /* unused */ u_int32_t clumpSize; /* reserved */ u_int8_t btreeType; /* reserved */ - u_int8_t reserved2; /* reserved */ + u_int8_t keyCompareType; /* Key string Comparison Type */ u_int32_t attributes; /* persistent attributes about the tree */ u_int32_t reserved3[16]; /* reserved */ }; @@ -609,6 +625,13 @@ enum { kBTVariableIndexKeysMask = 0x00000004 /* keys in index nodes are variable length */ }; + +/* Catalog Key Name Comparison Type */ +enum { + kHFSCaseFolding = 0xCF, /* case folding (case-insensitive) */ + kHFSBinaryCompare = 0xBC, /* binary compare (case-sensitive) */ +}; + /* JournalInfoBlock - Structure that describes where our journal lives */ struct JournalInfoBlock { u_int32_t flags; @@ -632,4 +655,8 @@ enum { } #endif +#else +#warning hfs_format.h is not compatible with HFSVolumes.h (include only one) +#endif /* __HFSVOLUMES__ */ + #endif /* __HFS_FORMAT__ */ diff --git a/bsd/hfs/hfs_hotfiles.c b/bsd/hfs/hfs_hotfiles.c new file mode 100644 index 000000000..5b0cd57a9 --- /dev/null +++ b/bsd/hfs/hfs_hotfiles.c @@ -0,0 +1,2156 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "hfscommon/headers/BTreeScanner.h" + + +#define HFC_DEBUG 0 +#define HFC_VERBOSE 0 + + + +/* + * Hot File List (runtime). + */ +typedef struct hotfileinfo { + u_int32_t hf_fileid; + u_int32_t hf_temperature; + u_int32_t hf_blocks; +} hotfileinfo_t; + +typedef struct hotfilelist { + u_int32_t hfl_magic; + u_int32_t hfl_version; + time_t hfl_duration; /* duration of sample period */ + int hfl_count; /* count of hot files recorded */ + int hfl_next; /* next file to move */ + int hfl_totalblocks; /* total hot file blocks */ + int hfl_reclaimblks; /* blocks to reclaim in HFV */ + u_int32_t hfl_spare[2]; + hotfileinfo_t hfl_hotfile[1]; /* array of hot files */ +} hotfilelist_t; + + +/* + * Hot File Entry (runtime). + */ +typedef struct hotfile_entry { + struct hotfile_entry *left; + struct hotfile_entry *right; + u_int32_t fileid; + u_int32_t temperature; + u_int32_t blocks; +} hotfile_entry_t; + +/* + * Hot File Recording Data (runtime). + */ +typedef struct hotfile_data { + struct hfsmount *hfsmp; + long refcount; + int activefiles; /* active number of hot files */ + u_int32_t threshold; + u_int32_t maxblocks; + hotfile_entry_t *rootentry; + hotfile_entry_t *freelist; + hotfile_entry_t *coldest; + hotfile_entry_t entries[1]; +} hotfile_data_t; + + + +/* + * Hot File Data recording functions (in-memory binary tree). + */ +static void hf_insert (hotfile_data_t *, hotfile_entry_t *); +static void hf_delete (hotfile_data_t *, u_int32_t, u_int32_t); +static hotfile_entry_t * hf_lookup (hotfile_data_t *, u_int32_t, u_int32_t); +static hotfile_entry_t * hf_coldest (hotfile_data_t *); +static hotfile_entry_t * hf_getnewentry (hotfile_data_t *); +static int hf_getsortedlist (hotfile_data_t *, hotfilelist_t *); +static void hf_printtree (hotfile_entry_t *); + +/* + * Hot File misc support functions. + */ +static int hotfiles_collect (struct hfsmount *, struct proc *); +static int hotfiles_age (struct hfsmount *, struct proc *); +static int hotfiles_adopt (struct hfsmount *, struct proc *); +static int hotfiles_evict (struct hfsmount *, struct proc *); +static int hotfiles_refine (struct hfsmount *, struct proc *); +static int hotextents(struct hfsmount *, HFSPlusExtentDescriptor *); + +/* + * Hot File Cluster B-tree (on disk) functions. + */ +static int hfc_btree_create (struct hfsmount *, int, int); +static int hfc_btree_open (struct hfsmount *, struct vnode **); +static int hfc_btree_close (struct hfsmount *, struct vnode *); +static int hfc_comparekeys (HotFileKey *, HotFileKey *); + + +char hfc_tag[] = "CLUSTERED HOT FILES B-TREE "; + + +/* + *======================================================================== + * HOT FILE INTERFACE ROUTINES + *======================================================================== + */ + +/* + * Start recording the hotest files on a file system. + * + */ +__private_extern__ +int +hfs_recording_start(struct hfsmount *hfsmp, struct proc *p) +{ + hotfile_data_t *hotdata; + int maxentries; + size_t size; + int i; + int error; + + if ((hfsmp->hfs_flags & HFS_READ_ONLY) || + (hfsmp->jnl == NULL) || + (hfsmp->hfs_flags & HFS_METADATA_ZONE) == 0) { + return (EPERM); + } + if (HFSTOVCB(hfsmp)->freeBlocks < (2 * hfsmp->hfs_hotfile_maxblks)) { + return (ENOSPC); + } + if (hfsmp->hfc_stage != HFC_IDLE) { + return (EBUSY); + } + hfsmp->hfc_stage = HFC_BUSY; + + /* + * Dump previous recording data. + */ + if (hfsmp->hfc_recdata) { + void * tmp; + + tmp = hfsmp->hfc_recdata; + hfsmp->hfc_recdata = NULL; + FREE(tmp, M_TEMP); + } + + /* + * On first startup check for suspended recording. + */ + if (hfsmp->hfc_timebase == 0 && + hfc_btree_open(hfsmp, &hfsmp->hfc_filevp) == 0) { + HotFilesInfo hotfileinfo; + + if ((BTGetUserData(VTOF(hfsmp->hfc_filevp), &hotfileinfo, + sizeof(hotfileinfo)) == 0) && + (SWAP_BE32 (hotfileinfo.magic) == HFC_MAGIC) && + (SWAP_BE32 (hotfileinfo.timeleft) > 0) && + (SWAP_BE32 (hotfileinfo.timebase) > 0)) { + hfsmp->hfc_maxfiles = SWAP_BE32 (hotfileinfo.maxfilecnt); + hfsmp->hfc_timeout = SWAP_BE32 (hotfileinfo.timeleft) + time.tv_sec ; + hfsmp->hfc_timebase = SWAP_BE32 (hotfileinfo.timebase); +#if HFC_VERBOSE + printf("HFS: resume recording hot files (%d left)\n", SWAP_BE32 (hotfileinfo.timeleft)); +#endif + } else { + hfsmp->hfc_maxfiles = HFC_DEFAULT_FILE_COUNT; + hfsmp->hfc_timebase = time.tv_sec + 1; + hfsmp->hfc_timeout = hfsmp->hfc_timebase + HFC_DEFAULT_DURATION; + } + (void) hfc_btree_close(hfsmp, hfsmp->hfc_filevp); + hfsmp->hfc_filevp = NULL; + } else { + struct cat_attr cattr; + u_int32_t cnid; + + /* + * Make sure a btree file exists. + */ + cnid = GetFileInfo(HFSTOVCB(hfsmp), kRootDirID, HFC_FILENAME, &cattr, NULL); + if ((cnid == 0) && + !S_ISREG(cattr.ca_mode) && + (error = hfc_btree_create(hfsmp, HFSTOVCB(hfsmp)->blockSize, HFC_DEFAULT_FILE_COUNT))) { + hfsmp->hfc_stage = HFC_IDLE; + wakeup((caddr_t)&hfsmp->hfc_stage); + return (error); + } +#if HFC_VERBOSE + printf("HFS: begin recording hot files\n"); +#endif + hfsmp->hfc_maxfiles = HFC_DEFAULT_FILE_COUNT; + hfsmp->hfc_timeout = time.tv_sec + HFC_DEFAULT_DURATION; + + /* Reset time base. */ + if (hfsmp->hfc_timebase == 0) { + hfsmp->hfc_timebase = time.tv_sec + 1; + } else { + u_int32_t cumulativebase; + u_int32_t oldbase = hfsmp->hfc_timebase; + + cumulativebase = hfsmp->hfc_timeout - (HFC_CUMULATIVE_CYCLES * HFC_DEFAULT_DURATION); + hfsmp->hfc_timebase = MAX(hfsmp->hfc_timebase, cumulativebase); + } + } + + if ((hfsmp->hfc_maxfiles == 0) || + (hfsmp->hfc_maxfiles > HFC_MAXIMUM_FILE_COUNT)) { + hfsmp->hfc_maxfiles = HFC_DEFAULT_FILE_COUNT; + } + maxentries = hfsmp->hfc_maxfiles; + + size = sizeof(hotfile_data_t) + (maxentries * sizeof(hotfile_entry_t)); + MALLOC(hotdata, hotfile_data_t *, size, M_TEMP, M_WAITOK); + bzero(hotdata, size); + + for (i = 1; i < maxentries ; i++) + hotdata->entries[i-1].right = &hotdata->entries[i]; + + hotdata->freelist = &hotdata->entries[0]; + /* + * Establish minimum temperature and maximum file size. + */ + hotdata->threshold = HFC_MINIMUM_TEMPERATURE; + hotdata->maxblocks = HFC_MAXIMUM_FILESIZE / HFSTOVCB(hfsmp)->blockSize; + hotdata->hfsmp = hfsmp; + + hfsmp->hfc_recdata = hotdata; +out: + hfsmp->hfc_stage = HFC_RECORDING; + wakeup((caddr_t)&hfsmp->hfc_stage); + return (0); +} + +/* + * Stop recording the hotest files on a file system. + */ +__private_extern__ +int +hfs_recording_stop(struct hfsmount *hfsmp, struct proc *p) +{ + hotfile_data_t *hotdata; + hotfilelist_t *listp; + size_t size; + enum hfc_stage newstage = HFC_IDLE; + void * tmp; + int error; + + + if (hfsmp->hfc_stage != HFC_RECORDING) + return (EPERM); + + hotfiles_collect(hfsmp, p); + + if (hfsmp->hfc_stage != HFC_RECORDING) + return (0); + + hfsmp->hfc_stage = HFC_BUSY; + + /* + * Convert hot file data into a simple file id list.... + * + * then dump the sample data + */ +#if HFC_VERBOSE + printf("HFS: end of hot file recording\n"); +#endif + hotdata = (hotfile_data_t *)hfsmp->hfc_recdata; + if (hotdata == NULL) + return (0); + hfsmp->hfc_recdata = NULL; + hfsmp->hfc_stage = HFC_EVALUATION; + wakeup((caddr_t)&hfsmp->hfc_stage); + +#if HFC_VERBOSE + printf(" curentries: %d\n", hotdata->activefiles); +#endif + /* + * If no hot files recorded then we're done. + */ + if (hotdata->rootentry == NULL) { + error = 0; + goto out; + } + + /* Open the B-tree file for writing... */ + if (hfsmp->hfc_filevp) + panic("hfs_recording_stop: hfc_filevp exists (vp = 0x%08x)", hfsmp->hfc_filevp); + + error = hfc_btree_open(hfsmp, &hfsmp->hfc_filevp); + if (error) { + goto out; + } + + /* + * Age the previous set of clustered hot files. + */ + error = hotfiles_age(hfsmp, p); + if (error) { + (void) hfc_btree_close(hfsmp, hfsmp->hfc_filevp); + hfsmp->hfc_filevp = NULL; + goto out; + } + + /* + * Create a sorted list of hotest files. + */ + size = sizeof(hotfilelist_t); + size += sizeof(hotfileinfo_t) * (hotdata->activefiles - 1); + MALLOC(listp, hotfilelist_t *, size, M_TEMP, M_WAITOK); + bzero(listp, size); + + hf_getsortedlist(hotdata, listp); + listp->hfl_duration = time.tv_sec - hfsmp->hfc_timebase; + hfsmp->hfc_recdata = listp; + + /* + * Account for duplicates. + */ + error = hotfiles_refine(hfsmp, p); + if (error) { + (void) hfc_btree_close(hfsmp, hfsmp->hfc_filevp); + hfsmp->hfc_filevp = NULL; + goto out; + } + + /* + * Compute the amount of space to reclaim... + */ + if (listp->hfl_totalblocks > hfsmp->hfs_hotfile_freeblks) { + listp->hfl_reclaimblks = + MIN(listp->hfl_totalblocks, hfsmp->hfs_hotfile_maxblks) - + hfsmp->hfs_hotfile_freeblks; +#if HFC_VERBOSE + printf("hfs_recording_stop: need to reclaim %d blocks\n", listp->hfl_reclaimblks); +#endif + if (listp->hfl_reclaimblks) + newstage = HFC_EVICTION; + else + newstage = HFC_ADOPTION; + } else { + newstage = HFC_ADOPTION; + } + + if (newstage == HFC_ADOPTION && listp->hfl_totalblocks == 0) { + (void) hfc_btree_close(hfsmp, hfsmp->hfc_filevp); + hfsmp->hfc_filevp = NULL; + newstage = HFC_IDLE; + } +out: +#if HFC_VERBOSE + if (newstage == HFC_EVICTION) + printf("HFS: evicting coldest files\n"); + else if (newstage == HFC_ADOPTION) + printf("HFS: adopting hotest files\n"); +#endif + FREE(hotdata, M_TEMP); + + hfsmp->hfc_stage = newstage; + wakeup((caddr_t)&hfsmp->hfc_stage); + return (error); +} + +/* + * Suspend recording the hotest files on a file system. + */ +__private_extern__ +int +hfs_recording_suspend(struct hfsmount *hfsmp, struct proc *p) +{ + HotFilesInfo hotfileinfo; + hotfile_data_t *hotdata; + int error; + + if (hfsmp->hfc_stage != HFC_RECORDING) + return (0); + + hotdata = (hotfile_data_t *)hfsmp->hfc_recdata; + if (hotdata == NULL) { + hfsmp->hfc_stage = HFC_DISABLED; + return (0); + } + hfsmp->hfc_stage = HFC_BUSY; + +#if HFC_VERBOSE + printf("HFS: suspend hot file recording\n"); +#endif + error = hfc_btree_open(hfsmp, &hfsmp->hfc_filevp); + if (error) { + printf("hfs_recording_suspend: err %d opening btree\n", error); + goto out; + } + + hfs_global_shared_lock_acquire(hfsmp); + if (hfsmp->jnl) { + if (journal_start_transaction(hfsmp->jnl) != 0) { + hfs_global_shared_lock_release(hfsmp); + error = EINVAL; + goto out; + } + } + vn_lock(hfsmp->hfc_filevp, LK_EXCLUSIVE | LK_RETRY, p); + + hotfileinfo.magic = SWAP_BE32 (HFC_MAGIC); + hotfileinfo.version = SWAP_BE32 (HFC_VERSION); + hotfileinfo.duration = SWAP_BE32 (HFC_DEFAULT_DURATION); + hotfileinfo.timebase = SWAP_BE32 (hfsmp->hfc_timebase); + hotfileinfo.timeleft = SWAP_BE32 (hfsmp->hfc_timeout - time.tv_sec); + hotfileinfo.threshold = SWAP_BE32 (hotdata->threshold); + hotfileinfo.maxfileblks = SWAP_BE32 (hotdata->maxblocks); + hotfileinfo.maxfilecnt = SWAP_BE32 (HFC_DEFAULT_FILE_COUNT); + strcpy(hotfileinfo.tag, hfc_tag); + (void) BTSetUserData(VTOF(hfsmp->hfc_filevp), &hotfileinfo, sizeof(hotfileinfo)); + + (void) VOP_UNLOCK(hfsmp->hfc_filevp, 0, p); + if (hfsmp->jnl) { + journal_end_transaction(hfsmp->jnl); + } + hfs_global_shared_lock_release(hfsmp); + + (void) hfc_btree_close(hfsmp, hfsmp->hfc_filevp); + hfsmp->hfc_filevp = NULL; +out: + FREE(hotdata, M_TEMP); + + hfsmp->hfc_stage = HFC_DISABLED; + wakeup((caddr_t)&hfsmp->hfc_stage); + return (error); +} + +/* + * Abort a hot file recording session. + */ +__private_extern__ +int +hfs_recording_abort(struct hfsmount *hfsmp, struct proc *p) +{ + void * tmp; + + if (hfsmp->hfc_stage == HFC_DISABLED) + return (0); + + if (hfsmp->hfc_stage == HFC_BUSY) { + (void) tsleep((caddr_t)&hfsmp->hfc_stage, PINOD, "hfs_recording_abort", 0); + } + hfsmp->hfc_stage = HFC_BUSY; + + printf("HFS: terminate hot file recording\n"); + + if (hfsmp->hfc_recdata) { + tmp = hfsmp->hfc_recdata; + hfsmp->hfc_recdata = NULL; + FREE(tmp, M_TEMP); + } + hfsmp->hfc_stage = HFC_DISABLED; + wakeup((caddr_t)&hfsmp->hfc_stage); + return (0); +} + +/* + * + */ +__private_extern__ +int +hfs_recording_init(struct hfsmount *hfsmp, struct proc *p) +{ + CatalogKey * keyp; + CatalogRecord * datap; + u_int32_t dataSize; + HFSPlusCatalogFile *filep; + BTScanState scanstate; + BTreeIterator * iterator; + FSBufferDescriptor record; + HotFileKey * key; + filefork_t * filefork; + u_int32_t data; + struct cat_attr cattr; + u_int32_t cnid; + int error = 0; + + int inserted = 0; /* debug variables */ + int filecount = 0; + + /* + * If the Hot File btree exists then metadata zone is ready. + */ + cnid = GetFileInfo(HFSTOVCB(hfsmp), kRootDirID, HFC_FILENAME, &cattr, NULL); + if (cnid != 0 && S_ISREG(cattr.ca_mode)) { + if (hfsmp->hfc_stage == HFC_DISABLED) + hfsmp->hfc_stage = HFC_IDLE; + return (0); + } + /* + * For now, only the boot volume is supported. + */ + if ((HFSTOVFS(hfsmp)->mnt_flag & MNT_ROOTFS) == 0) { + hfsmp->hfs_flags &= ~HFS_METADATA_ZONE; + return (EPERM); + } + error = hfc_btree_create(hfsmp, HFSTOVCB(hfsmp)->blockSize, HFC_DEFAULT_FILE_COUNT); + if (error) { + return (error); + } + /* + * Open the Hot File B-tree file for writing. + */ + if (hfsmp->hfc_filevp) + panic("hfs_recording_init: hfc_filevp exists (vp = 0x%08x)", hfsmp->hfc_filevp); + error = hfc_btree_open(hfsmp, &hfsmp->hfc_filevp); + if (error) { + return (error); + } + MALLOC(iterator, BTreeIterator *, sizeof(*iterator), M_TEMP, M_WAITOK); + bzero(iterator, sizeof(*iterator)); + key = (HotFileKey*) &iterator->key; + key->keyLength = HFC_KEYLENGTH; + + record.bufferAddress = &data; + record.itemSize = sizeof(u_int32_t); + record.itemCount = 1; +#if HFC_VERBOSE + printf("Evaluating space for \"%s\" metadata zone...\n", HFSTOVCB(hfsmp)->vcbVN); +#endif + /* + * Get ready to scan the Catalog file. + */ + error = BTScanInitialize(VTOF(HFSTOVCB(hfsmp)->catalogRefNum), 0, 0, 0, + kCatSearchBufferSize, &scanstate); + if (error) { + printf("hfs_recording_init: err %d BTScanInit\n", error); + goto out2; + } + + /* + * The writes to Hot File B-tree file are journaled. + */ + hfs_global_shared_lock_acquire(hfsmp); + if (hfsmp->jnl) { + if (journal_start_transaction(hfsmp->jnl) != 0) { + hfs_global_shared_lock_release(hfsmp); + error = EINVAL; + goto out1; + } + } + vn_lock(hfsmp->hfc_filevp, LK_EXCLUSIVE | LK_RETRY, p); + filefork = VTOF(hfsmp->hfc_filevp); + + /* + * Visit all the catalog btree leaf records. + */ + for (;;) { + error = BTScanNextRecord(&scanstate, 0, (void **)&keyp, (void **)&datap, &dataSize); + if (error) { + if (error == btNotFound) + error = 0; + else + printf("hfs_recording_init: err %d BTScanNext\n", error); + break; + } + if ((datap->recordType != kHFSPlusFileRecord) || + (dataSize != sizeof(HFSPlusCatalogFile))) { + continue; + } + filep = (HFSPlusCatalogFile *)datap; + filecount++; + if (filep->dataFork.totalBlocks == 0) { + continue; + } + /* + * Any file that has blocks inside the hot file + * space is recorded for later eviction. + * + * For now, resource forks are ignored. + */ + if (!hotextents(hfsmp, &filep->dataFork.extents[0])) { + continue; + } + cnid = filep->fileID; + + /* Skip over journal files. */ + if (cnid == hfsmp->hfs_jnlfileid || cnid == hfsmp->hfs_jnlinfoblkid) { + continue; + } + /* + * XXX - need to skip quota files as well. + */ + + /* Insert a hot file entry. */ + key->keyLength = HFC_KEYLENGTH; + key->temperature = HFC_MINIMUM_TEMPERATURE; + key->fileID = cnid; + key->forkType = 0; + data = 0x3f3f3f3f; + error = BTInsertRecord(filefork, iterator, &record, sizeof(data)); + if (error) { + printf("hfs_recording_init: BTInsertRecord failed %d (fileid %d)\n", error, key->fileID); + error = MacToVFSError(error); + break; + } + + /* Insert the corresponding thread record. */ + key->keyLength = HFC_KEYLENGTH; + key->temperature = HFC_LOOKUPTAG; + key->fileID = cnid; + key->forkType = 0; + data = HFC_MINIMUM_TEMPERATURE; + error = BTInsertRecord(filefork, iterator, &record, sizeof(data)); + if (error) { + printf("hfs_recording_init: BTInsertRecord failed %d (fileid %d)\n", error, key->fileID); + error = MacToVFSError(error); + break; + } + inserted++; + } + (void) BTFlushPath(filefork); + (void) VOP_UNLOCK(hfsmp->hfc_filevp, 0, p); + + if (hfsmp->jnl) { + journal_end_transaction(hfsmp->jnl); + } + hfs_global_shared_lock_release(hfsmp); +#if HFC_VERBOSE + printf("%d files identified out of %d\n", inserted, filecount); +#endif + +out1: + (void) BTScanTerminate(&scanstate, &data, &data, &data); +out2: + FREE(iterator, M_TEMP); + if (hfsmp->hfc_filevp) { + (void) hfc_btree_close(hfsmp, hfsmp->hfc_filevp); + hfsmp->hfc_filevp = NULL; + } + if (error == 0) + hfsmp->hfc_stage = HFC_IDLE; + + return (error); +} + +/* + * Use sync to perform ocassional background work. + */ +__private_extern__ +int +hfs_hotfilesync(struct hfsmount *hfsmp, struct proc *p) +{ + if ((HFSTOVFS(hfsmp)->mnt_kern_flag & MNTK_UNMOUNT) == 0 && hfsmp->hfc_stage) { + switch (hfsmp->hfc_stage) { + case HFC_IDLE: + (void) hfs_recording_start(hfsmp, p); + break; + + case HFC_RECORDING: + if (time.tv_sec > hfsmp->hfc_timeout) + (void) hfs_recording_stop(hfsmp, p); + break; + + case HFC_EVICTION: + (void) hotfiles_evict(hfsmp, p); + break; + + case HFC_ADOPTION: + (void) hotfiles_adopt(hfsmp, p); + break; + } + } + return (0); +} + +/* + * Add a hot file to the recording list. + * + * This can happen when a hot file gets reclaimed or at the + * end of the recording period for any active hot file. + * + * NOTE: Since both the data and resource fork can be hot, + * there can be two entries for the same file id. + * + */ +__private_extern__ +int +hfs_addhotfile(struct vnode *vp) +{ + hotfile_data_t *hotdata; + hotfile_entry_t *entry; + hfsmount_t *hfsmp; + cnode_t *cp; + filefork_t *ffp; + u_int32_t temperature; + + hfsmp = VTOHFS(vp); + if (hfsmp->hfc_stage != HFC_RECORDING) + return (0); + + if (!(vp->v_type == VREG || vp->v_type == VLNK) || + (vp->v_flag & (VSYSTEM | VSWAP))) { + return (0); + } + /* Skip resource forks for now. */ + if (VNODE_IS_RSRC(vp)) { + return (0); + } + if ((hotdata = (hotfile_data_t *)hfsmp->hfc_recdata) == NULL) { + return (0); + } + ffp = VTOF(vp); + cp = VTOC(vp); + + if ((ffp->ff_bytesread == 0) || + (ffp->ff_blocks == 0) || + (ffp->ff_blocks > hotdata->maxblocks) || + (cp->c_flag & (C_DELETED | C_NOEXISTS)) || + (cp->c_flags & UF_NODUMP) || + (cp->c_atime < hfsmp->hfc_timebase)) { + return (0); + } + + temperature = ffp->ff_bytesread / ffp->ff_size; + if (temperature < hotdata->threshold) { + return (0); + } + /* + * If there is room or this file is hotter than + * the coldest one then add it to the list. + * + */ + if ((hotdata->activefiles < hfsmp->hfc_maxfiles) || + (hotdata->coldest == NULL) || + (temperature > hotdata->coldest->temperature)) { + ++hotdata->refcount; + entry = hf_getnewentry(hotdata); + entry->temperature = temperature; + entry->fileid = cp->c_fileid; + entry->blocks = ffp->ff_blocks; + hf_insert(hotdata, entry); + --hotdata->refcount; + } + + return (0); +} + +/* + * Remove a hot file to the recording list. + * + * This can happen when a hot file becomes + * an active vnode (active hot files are + * not kept in the recording list until the + * end of the recording period). + * + */ +__private_extern__ +int +hfs_removehotfile(struct vnode *vp) +{ + hotfile_data_t *hotdata; + hfsmount_t *hfsmp; + cnode_t *cp; + filefork_t *ffp; + u_int32_t temperature; + + hfsmp = VTOHFS(vp); + if (hfsmp->hfc_stage != HFC_RECORDING) + return (0); + + if (!(vp->v_type == VREG || vp->v_type == VLNK) || + (vp->v_flag & (VSYSTEM | VSWAP))) { + return (0); + } + if ((hotdata = (hotfile_data_t *)hfsmp->hfc_recdata) == NULL) + return (0); + + ffp = VTOF(vp); + cp = VTOC(vp); + + if ((ffp->ff_bytesread == 0) || (ffp->ff_blocks == 0) || + (cp->c_atime < hfsmp->hfc_timebase)) { + return (0); + } + + temperature = ffp->ff_bytesread / ffp->ff_size; + if (temperature < hotdata->threshold) + return (0); + + if (hotdata->coldest && (temperature >= hotdata->coldest->temperature)) { + ++hotdata->refcount; + hf_delete(hotdata, VTOC(vp)->c_fileid, temperature); + --hotdata->refcount; + } + + return (0); +} + + +/* + *======================================================================== + * HOT FILE MAINTENANCE ROUTINES + *======================================================================== + */ + +/* + * Add all active hot files to the recording list. + */ +static int +hotfiles_collect(struct hfsmount *hfsmp, struct proc *p) +{ + struct mount *mp = HFSTOVFS(hfsmp); + struct vnode *nvp, *vp; + struct cnode *cp; + int error; + + if (vfs_busy(mp, LK_NOWAIT, 0, p)) + return (0); +loop: + simple_lock(&mntvnode_slock); + for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { + if (vp->v_mount != mp) { + simple_unlock(&mntvnode_slock); + goto loop; + } + simple_lock(&vp->v_interlock); + nvp = vp->v_mntvnodes.le_next; + + if ((vp->v_flag & VSYSTEM) || + !(vp->v_type == VREG || vp->v_type == VLNK)) { + simple_unlock(&vp->v_interlock); + continue; + } + + cp = VTOC(vp); + if (cp == NULL || vp->v_flag & (VXLOCK|VORECLAIM)) { + simple_unlock(&vp->v_interlock); + continue; + } + + simple_unlock(&mntvnode_slock); + error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); + if (error) { + if (error == ENOENT) + goto loop; + simple_lock(&mntvnode_slock); + continue; + } + (void) hfs_addhotfile(vp); + vput(vp); + + simple_lock(&mntvnode_slock); + } + + simple_unlock(&mntvnode_slock); + + vfs_unbusy(mp, p); + + return (0); +} + + +/* + * Update the data of a btree record + * This is called from within BTUpdateRecord. + */ +static int +update_callback(const HotFileKey *key, u_int32_t *data, u_int16_t datalen, u_int32_t *state) +{ + if (key->temperature == HFC_LOOKUPTAG) + *data = *state; + return (0); +} + +/* + * Identify files already in hot area. + */ +static int +hotfiles_refine(struct hfsmount *hfsmp, struct proc *p) +{ + BTreeIterator * iterator; + struct mount *mp; + struct vnode *vp; + filefork_t * filefork; + hotfilelist_t *listp; + FSBufferDescriptor record; + HotFileKey * key; + u_int32_t data; + int i; + int error = 0; + + + if ((listp = (hotfilelist_t *)hfsmp->hfc_recdata) == NULL) + return (0); + + mp = HFSTOVFS(hfsmp); + + MALLOC(iterator, BTreeIterator *, sizeof(*iterator), M_TEMP, M_WAITOK); + bzero(iterator, sizeof(*iterator)); + key = (HotFileKey*) &iterator->key; + + record.bufferAddress = &data; + record.itemSize = sizeof(u_int32_t); + record.itemCount = 1; + + hfs_global_shared_lock_acquire(hfsmp); + if (hfsmp->jnl) { + if (journal_start_transaction(hfsmp->jnl) != 0) { + hfs_global_shared_lock_release(hfsmp); + error = EINVAL; + goto out; + } + } + vn_lock(hfsmp->hfc_filevp, LK_EXCLUSIVE | LK_RETRY, p); + filefork = VTOF(hfsmp->hfc_filevp); + + for (i = 0; i < listp->hfl_count; ++i) { + /* + * Check if entry (thread) is already in hot area. + */ + key->keyLength = HFC_KEYLENGTH; + key->temperature = HFC_LOOKUPTAG; + key->fileID = listp->hfl_hotfile[i].hf_fileid; + key->forkType = 0; + (void) BTInvalidateHint(iterator); + if (BTSearchRecord(filefork, iterator, &record, NULL, iterator) != 0) { + continue; /* not in hot area, so skip */ + } + + /* + * Update thread entry with latest temperature. + */ + error = BTUpdateRecord(filefork, iterator, + (IterateCallBackProcPtr)update_callback, + &listp->hfl_hotfile[i].hf_temperature); + if (error) { + printf("hotfiles_refine: BTUpdateRecord failed %d (file %d)\n", error, key->fileID); + error = MacToVFSError(error); + // break; + } + /* + * Re-key entry with latest temperature. + */ + key->keyLength = HFC_KEYLENGTH; + key->temperature = data; + key->fileID = listp->hfl_hotfile[i].hf_fileid; + key->forkType = 0; + /* Pick up record data. */ + (void) BTInvalidateHint(iterator); + (void) BTSearchRecord(filefork, iterator, &record, NULL, iterator); + error = BTDeleteRecord(filefork, iterator); + if (error) { + printf("hotfiles_refine: BTDeleteRecord failed %d (file %d)\n", error, key->fileID); + error = MacToVFSError(error); + break; + } + key->keyLength = HFC_KEYLENGTH; + key->temperature = listp->hfl_hotfile[i].hf_temperature; + key->fileID = listp->hfl_hotfile[i].hf_fileid; + key->forkType = 0; + error = BTInsertRecord(filefork, iterator, &record, sizeof(data)); + if (error) { + printf("hotfiles_refine: BTInsertRecord failed %d (file %d)\n", error, key->fileID); + error = MacToVFSError(error); + break; + } + + /* + * Invalidate this entry in the list. + */ + listp->hfl_hotfile[i].hf_temperature = 0; + listp->hfl_totalblocks -= listp->hfl_hotfile[i].hf_blocks; + + } /* end for */ + + (void) BTFlushPath(filefork); + (void) VOP_UNLOCK(hfsmp->hfc_filevp, 0, p); + + if (hfsmp->jnl) { + journal_end_transaction(hfsmp->jnl); + } + hfs_global_shared_lock_release(hfsmp); +out: + FREE(iterator, M_TEMP); + return (error); +} + +/* + * Move new hot files into hot area. + */ +static int +hotfiles_adopt(struct hfsmount *hfsmp, struct proc *p) +{ + BTreeIterator * iterator; + struct mount *mp; + struct vnode *vp; + filefork_t * filefork; + hotfilelist_t *listp; + FSBufferDescriptor record; + HotFileKey * key; + u_int32_t data; + enum hfc_stage stage; + int fileblocks; + int blksmoved; + int i; + int last; + int error = 0; + int startedtrans = 0; + int aquiredlock = 0; + + if ((listp = (hotfilelist_t *)hfsmp->hfc_recdata) == NULL) + return (0); + + if (hfsmp->hfc_stage != HFC_ADOPTION) { + return (EBUSY); + } + stage = hfsmp->hfc_stage; + hfsmp->hfc_stage = HFC_BUSY; + + mp = HFSTOVFS(hfsmp); + blksmoved = 0; + last = listp->hfl_next + HFC_FILESPERSYNC; + if (last > listp->hfl_count) + last = listp->hfl_count; + + MALLOC(iterator, BTreeIterator *, sizeof(*iterator), M_TEMP, M_WAITOK); + bzero(iterator, sizeof(*iterator)); + key = (HotFileKey*) &iterator->key; + key->keyLength = HFC_KEYLENGTH; + + record.bufferAddress = &data; + record.itemSize = sizeof(u_int32_t); + record.itemCount = 1; + + vn_lock(hfsmp->hfc_filevp, LK_EXCLUSIVE | LK_RETRY, p); + filefork = VTOF(hfsmp->hfc_filevp); + + for (i = listp->hfl_next; (i < last) && (blksmoved < HFC_BLKSPERSYNC); ++i) { + /* + * Skip invalid entries (already in hot area). + */ + if (listp->hfl_hotfile[i].hf_temperature == 0) { + listp->hfl_next++; + continue; + } + /* + * Acquire a vnode for this file. + */ + error = VFS_VGET(mp, &listp->hfl_hotfile[i].hf_fileid, &vp); + if (error) { + if (error == ENOENT) { + error = 0; + listp->hfl_next++; + continue; /* stale entry, go to next */ + } + break; + } + if (vp->v_type != VREG && vp->v_type != VLNK) { + printf("hotfiles_adopt: huh, not a file %d (%d)\n", listp->hfl_hotfile[i].hf_fileid, VTOC(vp)->c_cnid); + vput(vp); + listp->hfl_hotfile[i].hf_temperature == 0; + listp->hfl_next++; + continue; /* stale entry, go to next */ + } + if (hotextents(hfsmp, &VTOF(vp)->ff_extents[0])) { + vput(vp); + listp->hfl_hotfile[i].hf_temperature == 0; + listp->hfl_next++; + listp->hfl_totalblocks -= listp->hfl_hotfile[i].hf_blocks; + continue; /* stale entry, go to next */ + } + fileblocks = VTOF(vp)->ff_blocks; + if (fileblocks > hfsmp->hfs_hotfile_freeblks) { + vput(vp); + listp->hfl_next++; + listp->hfl_totalblocks -= fileblocks; + continue; /* entry too big, go to next */ + } + + if ((blksmoved > 0) && + (blksmoved + fileblocks) > HFC_BLKSPERSYNC) { + vput(vp); + break; + } + /* Start a new transaction. */ + hfs_global_shared_lock_acquire(hfsmp); + aquiredlock = 1; + if (hfsmp->jnl) { + if (journal_start_transaction(hfsmp->jnl) != 0) { + error = EINVAL; + vput(vp); + break; + } + startedtrans = 1; + } + + error = hfs_relocate(vp, hfsmp->hfs_hotfile_start, p->p_ucred, p); + vput(vp); + if (error) + break; + + /* Keep hot file free space current. */ + hfsmp->hfs_hotfile_freeblks -= fileblocks; + listp->hfl_totalblocks -= fileblocks; + + /* Insert hot file entry */ + key->keyLength = HFC_KEYLENGTH; + key->temperature = listp->hfl_hotfile[i].hf_temperature; + key->fileID = listp->hfl_hotfile[i].hf_fileid; + key->forkType = 0; + if (VTOC(vp)->c_desc.cd_nameptr) + data = *(u_int32_t *)(VTOC(vp)->c_desc.cd_nameptr); + else + data = 0x3f3f3f3f; + + error = BTInsertRecord(filefork, iterator, &record, sizeof(data)); + if (error) { + printf("hotfiles_adopt: BTInsertRecord failed %d (fileid %d)\n", error, key->fileID); + error = MacToVFSError(error); + stage = HFC_IDLE; + break; + } + + /* Insert thread record */ + key->keyLength = HFC_KEYLENGTH; + key->temperature = HFC_LOOKUPTAG; + key->fileID = listp->hfl_hotfile[i].hf_fileid; + key->forkType = 0; + data = listp->hfl_hotfile[i].hf_temperature; + error = BTInsertRecord(filefork, iterator, &record, sizeof(data)); + if (error) { + printf("hotfiles_adopt: BTInsertRecord failed %d (fileid %d)\n", error, key->fileID); + error = MacToVFSError(error); + stage = HFC_IDLE; + break; + } + (void) BTFlushPath(filefork); + + /* Transaction complete. */ + if (startedtrans) { + journal_end_transaction(hfsmp->jnl); + startedtrans = 0; + } + hfs_global_shared_lock_release(hfsmp); + aquiredlock = 0; + + blksmoved += fileblocks; + listp->hfl_next++; + if (listp->hfl_next >= listp->hfl_count) { + break; + } + if (hfsmp->hfs_hotfile_freeblks <= 0) { +#if HFC_VERBOSE + printf("hotfiles_adopt: free space exhausted (%d)\n", hfsmp->hfs_hotfile_freeblks); +#endif + break; + } + } /* end for */ + +#if HFC_VERBOSE + printf("hotfiles_adopt: [%d] adopted %d blocks (%d left)\n", listp->hfl_next, blksmoved, listp->hfl_totalblocks); +#endif + /* Finish any outstanding transactions. */ + if (startedtrans) { + (void) BTFlushPath(filefork); + journal_end_transaction(hfsmp->jnl); + startedtrans = 0; + } + if (aquiredlock) { + hfs_global_shared_lock_release(hfsmp); + aquiredlock = 0; + } + (void) VOP_UNLOCK(hfsmp->hfc_filevp, 0, p); + + if ((listp->hfl_next >= listp->hfl_count) || (hfsmp->hfs_hotfile_freeblks <= 0)) { +#if HFC_VERBOSE + printf("hotfiles_adopt: all done relocating %d files\n", listp->hfl_count); + printf("hotfiles_adopt: %d blocks free in hot file band\n", hfsmp->hfs_hotfile_freeblks); +#endif + stage = HFC_IDLE; + } + FREE(iterator, M_TEMP); + + if (stage != HFC_ADOPTION && hfsmp->hfc_filevp) { + (void) hfc_btree_close(hfsmp, hfsmp->hfc_filevp); + hfsmp->hfc_filevp = NULL; + } + hfsmp->hfc_stage = stage; + wakeup((caddr_t)&hfsmp->hfc_stage); + return (error); +} + +/* + * Reclaim space by evicting the coldest files. + */ +static int +hotfiles_evict(struct hfsmount *hfsmp, struct proc *p) +{ + BTreeIterator * iterator; + struct mount *mp; + struct vnode *vp; + HotFileKey * key; + filefork_t * filefork; + hotfilelist_t *listp; + enum hfc_stage stage; + int blksmoved; + int filesmoved; + int fileblocks; + int error = 0; + int startedtrans = 0; + int aquiredlock = 0; + + if (hfsmp->hfc_stage != HFC_EVICTION) { + return (EBUSY); + } + + if ((listp = (hotfilelist_t *)hfsmp->hfc_recdata) == NULL) + return (0); + + stage = hfsmp->hfc_stage; + hfsmp->hfc_stage = HFC_BUSY; + + mp = HFSTOVFS(hfsmp); + filesmoved = blksmoved = 0; + + MALLOC(iterator, BTreeIterator *, sizeof(*iterator), M_TEMP, M_WAITOK); + bzero(iterator, sizeof(*iterator)); + key = (HotFileKey*) &iterator->key; + + vn_lock(hfsmp->hfc_filevp, LK_EXCLUSIVE | LK_RETRY, p); + filefork = VTOF(hfsmp->hfc_filevp); + + while (listp->hfl_reclaimblks > 0 && + blksmoved < HFC_BLKSPERSYNC && + filesmoved < HFC_FILESPERSYNC) { + + /* + * Obtain the first record (ie the coldest one). + */ + if (BTIterateRecord(filefork, kBTreeFirstRecord, iterator, NULL, NULL) != 0) { +#if HFC_VERBOSE + printf("hotfiles_evict: no more records\n"); +#endif + error = 0; + stage = HFC_ADOPTION; + break; + } + if (key->keyLength != HFC_KEYLENGTH) { + printf("hotfiles_evict: invalid key length %d\n", key->keyLength); + error = EFTYPE; + break; + } + if (key->temperature == HFC_LOOKUPTAG) { +#if HFC_VERBOSE + printf("hotfiles_evict: ran into thread records\n"); +#endif + error = 0; + stage = HFC_ADOPTION; + break; + } + /* + * Aquire the vnode for this file. + */ + error = VFS_VGET(mp, &key->fileID, &vp); + + /* Start a new transaction. */ + hfs_global_shared_lock_acquire(hfsmp); + aquiredlock = 1; + if (hfsmp->jnl) { + if (journal_start_transaction(hfsmp->jnl) != 0) { + if (error == 0) + vput(vp); + error = EINVAL; + break; + } + startedtrans = 1; + } + if (error) { + if (error == ENOENT) { + (void) BTDeleteRecord(filefork, iterator); + key->temperature = HFC_LOOKUPTAG; + (void) BTDeleteRecord(filefork, iterator); + goto next; /* stale entry, go to next */ + } else { + printf("hotfiles_evict: err %d getting file %d (%d)\n", + error, key->fileID); + } + break; + } + if (vp->v_type != VREG && vp->v_type != VLNK) { + printf("hotfiles_evict: huh, not a file %d\n", key->fileID); + vput(vp); + (void) BTDeleteRecord(filefork, iterator); + key->temperature = HFC_LOOKUPTAG; + (void) BTDeleteRecord(filefork, iterator); + goto next; /* invalid entry, go to next */ + } + fileblocks = VTOF(vp)->ff_blocks; + if ((blksmoved > 0) && + (blksmoved + fileblocks) > HFC_BLKSPERSYNC) { + vput(vp); + break; + } + /* + * Make sure file is in the hot area. + */ + if (!hotextents(hfsmp, &VTOF(vp)->ff_extents[0])) { +#if HFC_VERBOSE + printf("hotfiles_evict: file %d isn't hot!\n", key->fileID); +#endif + vput(vp); + (void) BTDeleteRecord(filefork, iterator); + key->temperature = HFC_LOOKUPTAG; + (void) BTDeleteRecord(filefork, iterator); + goto next; /* go to next */ + } + + /* + * Relocate file out of hot area. + */ + error = hfs_relocate(vp, HFSTOVCB(hfsmp)->nextAllocation, p->p_ucred, p); + if (error) { + /* XXX skip to next record here! */ + printf("hotfiles_evict: err % relocating file\n", error, key->fileID); + vput(vp); + break; + } + (void) VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p); + + vput(vp); + + hfsmp->hfs_hotfile_freeblks += fileblocks; + listp->hfl_reclaimblks -= fileblocks; + if (listp->hfl_reclaimblks < 0) + listp->hfl_reclaimblks = 0; + blksmoved += fileblocks; + filesmoved++; + + error = BTDeleteRecord(filefork, iterator); + if (error) { + printf("hotfiles_evict: BTDeleteRecord failed %d (fileid %d)\n", error, key->fileID); + error = MacToVFSError(error); + break; + } + key->temperature = HFC_LOOKUPTAG; + error = BTDeleteRecord(filefork, iterator); + if (error) { + printf("hotfiles_evict: BTDeleteRecord thread failed %d (fileid %d)\n", error, key->fileID); + error = MacToVFSError(error); + break; + } +next: + (void) BTFlushPath(filefork); + + /* Transaction complete. */ + if (startedtrans) { + journal_end_transaction(hfsmp->jnl); + startedtrans = 0; + } + hfs_global_shared_lock_release(hfsmp); + aquiredlock = 0; + + } /* end while */ + +#if HFC_VERBOSE + printf("hotfiles_evict: moved %d files (%d blks, %d to go)\n", filesmoved, blksmoved, listp->hfl_reclaimblks); +#endif + /* Finish any outstanding transactions. */ + if (startedtrans) { + (void) BTFlushPath(filefork); + journal_end_transaction(hfsmp->jnl); + startedtrans = 0; + } + if (aquiredlock) { + hfs_global_shared_lock_release(hfsmp); + aquiredlock = 0; + } + (void) VOP_UNLOCK(hfsmp->hfc_filevp, 0, p); + + /* + * Move to next stage when finished. + */ + if (listp->hfl_reclaimblks <= 0) { + stage = HFC_ADOPTION; +#if HFC_VERBOSE + printf("hotfiles_evict: %d blocks free in hot file band\n", hfsmp->hfs_hotfile_freeblks); +#endif + } + FREE(iterator, M_TEMP); + hfsmp->hfc_stage = stage; + wakeup((caddr_t)&hfsmp->hfc_stage); + return (error); +} + +/* + * Age the existing records in the hot files b-tree. + */ +static int +hotfiles_age(struct hfsmount *hfsmp, struct proc *p) +{ + BTreeInfoRec btinfo; + BTreeIterator * iterator; + BTreeIterator * prev_iterator; + FSBufferDescriptor record; + FSBufferDescriptor prev_record; + HotFileKey * key; + HotFileKey * prev_key; + filefork_t * filefork; + u_int32_t data; + u_int32_t prev_data; + u_int32_t newtemp; + int error; + int i; + int numrecs; + int aged = 0; + u_int16_t reclen; + + + MALLOC(iterator, BTreeIterator *, 2 * sizeof(*iterator), M_TEMP, M_WAITOK); + bzero(iterator, 2 * sizeof(*iterator)); + key = (HotFileKey*) &iterator->key; + + prev_iterator = &iterator[1]; + prev_key = (HotFileKey*) &prev_iterator->key; + + record.bufferAddress = &data; + record.itemSize = sizeof(data); + record.itemCount = 1; + prev_record.bufferAddress = &prev_data; + prev_record.itemSize = sizeof(prev_data); + prev_record.itemCount = 1; + + /* + * Capture b-tree changes inside a transaction + */ + hfs_global_shared_lock_acquire(hfsmp); + if (hfsmp->jnl) { + if (journal_start_transaction(hfsmp->jnl) != 0) { + hfs_global_shared_lock_release(hfsmp); + error = EINVAL; + goto out2; + } + } + vn_lock(hfsmp->hfc_filevp, LK_EXCLUSIVE | LK_RETRY, p); + filefork = VTOF(hfsmp->hfc_filevp); + + error = BTGetInformation(filefork, 0, &btinfo); + if (error) { + error = MacToVFSError(error); + goto out; + } + if (btinfo.numRecords < 2) { + error = 0; + goto out; + } + + /* Only want 1st half of leaf records */ + numrecs = (btinfo.numRecords /= 2) - 1; + + error = BTIterateRecord(filefork, kBTreeFirstRecord, iterator, &record, &reclen); + if (error) { + printf("hfs_agehotfiles: BTIterateRecord: %d\n", error); + error = MacToVFSError(error); + goto out; + } + bcopy(iterator, prev_iterator, sizeof(BTreeIterator)); + prev_data = data; + + for (i = 0; i < numrecs; ++i) { + error = BTIterateRecord(filefork, kBTreeNextRecord, iterator, &record, &reclen); + if (error == 0) { + if (key->temperature < prev_key->temperature) { + printf("hfs_agehotfiles: out of order keys!\n"); + error = EFTYPE; + break; + } + if (reclen != sizeof(data)) { + printf("hfs_agehotfiles: invalid record length %d\n", reclen); + error = EFTYPE; + break; + } + if (key->keyLength != HFC_KEYLENGTH) { + printf("hfs_agehotfiles: invalid key length %d\n", key->keyLength); + error = EFTYPE; + break; + } + } else if ((error == fsBTEndOfIterationErr || error == fsBTRecordNotFoundErr) && + (i == (numrecs - 1))) { + error = 0; + } else if (error) { + printf("hfs_agehotfiles: %d of %d BTIterateRecord: %d\n", i, numrecs, error); + error = MacToVFSError(error); + break; + } + if (prev_key->temperature == HFC_LOOKUPTAG) { +#if HFC_VERBOSE + printf("hfs_agehotfiles: ran into thread record\n"); +#endif + error = 0; + break; + } + error = BTDeleteRecord(filefork, prev_iterator); + if (error) { + printf("hfs_agehotfiles: BTDeleteRecord failed %d (file %d)\n", error, prev_key->fileID); + error = MacToVFSError(error); + break; + } + + /* Age by halving the temperature (floor = 4) */ + newtemp = MAX(prev_key->temperature >> 1, 4); + prev_key->temperature = newtemp; + + error = BTInsertRecord(filefork, prev_iterator, &prev_record, sizeof(data)); + if (error) { + printf("hfs_agehotfiles: BTInsertRecord failed %d (file %d)\n", error, prev_key->fileID); + error = MacToVFSError(error); + break; + } + ++aged; + /* + * Update thread entry with latest temperature. + */ + prev_key->temperature = HFC_LOOKUPTAG; + error = BTUpdateRecord(filefork, prev_iterator, + (IterateCallBackProcPtr)update_callback, + &newtemp); + if (error) { + printf("hfs_agehotfiles: %d of %d BTUpdateRecord failed %d (file %d, %d)\n", + i, numrecs, error, prev_key->fileID, newtemp); + error = MacToVFSError(error); + // break; + } + + bcopy(iterator, prev_iterator, sizeof(BTreeIterator)); + prev_data = data; + + } /* end for */ + +#if HFC_VERBOSE + if (error == 0) + printf("hfs_agehotfiles: aged %d records out of %d\n", aged, btinfo.numRecords); +#endif + (void) BTFlushPath(filefork); +out: + (void) VOP_UNLOCK(hfsmp->hfc_filevp, 0, p); + + if (hfsmp->jnl) { + // hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); + journal_end_transaction(hfsmp->jnl); + } + hfs_global_shared_lock_release(hfsmp); +out2: + FREE(iterator, M_TEMP); + return (error); +} + +/* + * Return true if any blocks (or all blocks if all is true) + * are contained in the hot file region. + */ +static int +hotextents(struct hfsmount *hfsmp, HFSPlusExtentDescriptor * extents) +{ + u_int32_t b1, b2; + int i; + int inside = 0; + + for (i = 0; i < kHFSPlusExtentDensity; ++i) { + b1 = extents[i].startBlock; + if (b1 == 0) + break; + b2 = b1 + extents[i].blockCount - 1; + if ((b1 >= hfsmp->hfs_hotfile_start && + b2 <= hfsmp->hfs_hotfile_end) || + (b1 < hfsmp->hfs_hotfile_end && + b2 > hfsmp->hfs_hotfile_end)) { + inside = 1; + break; + } + } + return (inside); +} + + +/* + *======================================================================== + * HOT FILE B-TREE ROUTINES + *======================================================================== + */ + +/* + * Open the hot files b-tree for writing. + * + * On successful exit the vnode has a reference but is unlocked. + */ +static int +hfc_btree_open(struct hfsmount *hfsmp, struct vnode **vpp) +{ + struct proc *p; + struct vnode *vp; + struct cat_desc cdesc = {0}; + struct cat_attr cattr; + struct cat_fork cfork; + static char filename[] = HFC_FILENAME; + int error; + int retry = 0; + + *vpp = NULL; + p = current_proc(); + + cdesc.cd_parentcnid = kRootDirID; + cdesc.cd_nameptr = filename; + cdesc.cd_namelen = strlen(filename); + + /* Lock catalog b-tree */ + error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p); + if (error) + return (error); + + error = cat_lookup(hfsmp, &cdesc, 0, &cdesc, &cattr, &cfork); + + /* Unlock catalog b-tree */ + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + + if (error) { + printf("hfc_btree_open: cat_lookup error %d\n", error); + return (error); + } +again: + cdesc.cd_flags |= CD_ISMETA; + error = hfs_getnewvnode(hfsmp, NULL, &cdesc, 0, &cattr, &cfork, &vp); + if (error) { + printf("hfc_btree_open: hfs_getnewvnode error %d\n", error); + cat_releasedesc(&cdesc); + return (error); + } + if ((vp->v_flag & VSYSTEM) == 0) { +#if HFC_VERBOSE + printf("hfc_btree_open: file has UBC, try again\n"); +#endif + vput(vp); + vgone(vp); + if (retry++ == 0) + goto again; + else + return (EBUSY); + } + + /* Open the B-tree file for writing... */ + error = BTOpenPath(VTOF(vp), (KeyCompareProcPtr) hfc_comparekeys); + if (error) { + printf("hfc_btree_open: BTOpenPath error %d\n", error); + error = MacToVFSError(error); + } else { +#if HFC_VERBOSE + struct BTreeInfoRec btinfo; + + if (BTGetInformation(VTOF(vp), 0, &btinfo) == 0) { + printf("btinfo: nodeSize %d\n", btinfo.nodeSize); + printf("btinfo: maxKeyLength %d\n", btinfo.maxKeyLength); + printf("btinfo: treeDepth %d\n", btinfo.treeDepth); + printf("btinfo: numRecords %d\n", btinfo.numRecords); + printf("btinfo: numNodes %d\n", btinfo.numNodes); + printf("btinfo: numFreeNodes %d\n", btinfo.numFreeNodes); + } +#endif + } + + VOP_UNLOCK(vp, 0, p); /* unlocked with a single reference */ + if (error) + vrele(vp); + else + *vpp = vp; + + if ((vp->v_flag & VSYSTEM) == 0) + panic("hfc_btree_open: not a system file (vp = 0x%08x)", vp); + + if (UBCINFOEXISTS(vp)) + panic("hfc_btree_open: has UBCInfo (vp = 0x%08x)", vp); + + return (error); +} + +/* + * Close the hot files b-tree. + * + * On entry the vnode is not locked but has a reference. + */ +static int +hfc_btree_close(struct hfsmount *hfsmp, struct vnode *vp) +{ + struct proc *p = current_proc(); + int error; + + + if (hfsmp->jnl) { + journal_flush(hfsmp->jnl); + } + + if (vget(vp, LK_EXCLUSIVE, p) == 0) { + (void) VOP_FSYNC(vp, NOCRED, MNT_WAIT, p); + error = BTClosePath(VTOF(vp)); + if (error) + printf("hfc_btree_close: BTClosePath error %d\n", error); + vput(vp); + } + vrele(vp); + vgone(vp); + vp = NULL; + + return (0); +} + +/* + * Create a hot files btree file. + * + */ +static int +hfc_btree_create(struct hfsmount *hfsmp, int nodesize, int entries) +{ + struct proc *p; + struct nameidata nd; + struct vnode *vp; + char path[128]; + int error; + + + if (hfsmp->hfc_filevp) + panic("hfc_btree_create: hfc_filevp exists (vp = 0x%08x)", hfsmp->hfc_filevp); + + p = current_proc(); + snprintf(path, sizeof(path), "%s/%s", + hfsmp->hfs_mp->mnt_stat.f_mntonname, HFC_FILENAME); + NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p); + if ((error = vn_open(&nd, O_CREAT | FWRITE, S_IRUSR | S_IWUSR)) != 0) { + return (error); + } + vp = nd.ni_vp; + + /* Don't use non-regular files or files with links. */ + if (vp->v_type != VREG || VTOC(vp)->c_nlink != 1) { + error = EFTYPE; + goto out; + } + + printf("HFS: created HFBT on %s\n", HFSTOVCB(hfsmp)->vcbVN); + + if (VTOF(vp)->ff_size < nodesize) { + caddr_t buffer; + u_int16_t *index; + u_int16_t offset; + BTNodeDescriptor *ndp; + BTHeaderRec *bthp; + HotFilesInfo *hotfileinfo; + int nodecnt; + int filesize; + int entirespernode; + + /* + * Mark it invisible (truncate will pull these changes). + */ + ((FndrFileInfo *)&VTOC(vp)->c_finderinfo[0])->fdFlags |= + SWAP_BE16 (kIsInvisible + kNameLocked); + + if (kmem_alloc(kernel_map, (vm_offset_t *)&buffer, nodesize)) { + error = ENOMEM; + goto out; + } + bzero(buffer, nodesize); + index = (int16_t *)buffer; + + entirespernode = (nodesize - sizeof(BTNodeDescriptor) - 2) / + (sizeof(HotFileKey) + 6); + nodecnt = 2 + howmany(entries * 2, entirespernode); + nodecnt = roundup(nodecnt, 8); + filesize = nodecnt * nodesize; + + /* FILL IN THE NODE DESCRIPTOR: */ + ndp = (BTNodeDescriptor *)buffer; + ndp->kind = kBTHeaderNode; + ndp->numRecords = SWAP_BE16 (3); + offset = sizeof(BTNodeDescriptor); + index[(nodesize / 2) - 1] = SWAP_BE16 (offset); + + /* FILL IN THE HEADER RECORD: */ + bthp = (BTHeaderRec *)((UInt8 *)buffer + offset); + bthp->nodeSize = SWAP_BE16 (nodesize); + bthp->totalNodes = SWAP_BE32 (filesize / nodesize); + bthp->freeNodes = SWAP_BE32 (nodecnt - 1); + bthp->clumpSize = SWAP_BE32 (filesize); + bthp->btreeType = kUserBTreeType; /* non-metadata */ + bthp->attributes |= SWAP_BE32 (kBTBigKeysMask); + bthp->maxKeyLength = SWAP_BE16 (HFC_KEYLENGTH); + offset += sizeof(BTHeaderRec); + index[(nodesize / 2) - 2] = SWAP_BE16 (offset); + + /* FILL IN THE USER RECORD: */ + hotfileinfo = (HotFilesInfo *)((UInt8 *)buffer + offset); + hotfileinfo->magic = SWAP_BE32 (HFC_MAGIC); + hotfileinfo->version = SWAP_BE32 (HFC_VERSION); + hotfileinfo->duration = SWAP_BE32 (HFC_DEFAULT_DURATION); + hotfileinfo->timebase = 0; + hotfileinfo->timeleft = 0; + hotfileinfo->threshold = SWAP_BE32 (HFC_MINIMUM_TEMPERATURE); + hotfileinfo->maxfileblks = SWAP_BE32 (HFC_MAXIMUM_FILESIZE / HFSTOVCB(hfsmp)->blockSize); + hotfileinfo->maxfilecnt = SWAP_BE32 (HFC_DEFAULT_FILE_COUNT); + strcpy(hotfileinfo->tag, hfc_tag); + offset += kBTreeHeaderUserBytes; + index[(nodesize / 2) - 3] = SWAP_BE16 (offset); + + /* FILL IN THE MAP RECORD (only one node in use). */ + *((u_int8_t *)buffer + offset) = 0x80; + offset += nodesize - sizeof(BTNodeDescriptor) - sizeof(BTHeaderRec) + - kBTreeHeaderUserBytes - (4 * sizeof(int16_t)); + index[(nodesize / 2) - 4] = SWAP_BE16 (offset); + + vp->v_flag |= VNOFLUSH; + error = VOP_TRUNCATE(vp, (off_t)filesize, IO_NDELAY, NOCRED, p); + if (error == 0) { + struct iovec aiov; + struct uio auio; + + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + aiov.iov_base = buffer; + aiov.iov_len = filesize; + auio.uio_resid = nodesize; + auio.uio_offset = (off_t)(0); + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_WRITE; + auio.uio_procp = (struct proc *)0; + error = VOP_WRITE(vp, &auio, 0, kernproc->p_ucred); + } + kmem_free(kernel_map, (vm_offset_t)buffer, nodesize); + } +out: + (void) VOP_UNLOCK(vp, 0, p); + (void) vn_close(vp, FWRITE, kernproc->p_ucred, p); + vgone(vp); + return (error); +} + +/* + * Compare two hot file b-tree keys. + * + * Result: +n search key > trial key + * 0 search key = trial key + * -n search key < trial key + */ +static int +hfc_comparekeys(HotFileKey *searchKey, HotFileKey *trialKey) +{ + /* + * Compared temperatures first. + */ + if (searchKey->temperature == trialKey->temperature) { + /* + * Temperatures are equal so compare file ids. + */ + if (searchKey->fileID == trialKey->fileID) { + /* + * File ids are equal so compare fork types. + */ + if (searchKey->forkType == trialKey->forkType) { + return (0); + } else if (searchKey->forkType > trialKey->forkType) { + return (1); + } + } else if (searchKey->fileID > trialKey->fileID) { + return (1); + } + } else if (searchKey->temperature > trialKey->temperature) { + return (1); + } + + return (-1); +} + + +/* + *======================================================================== + * HOT FILE DATA COLLECTING ROUTINES + *======================================================================== + */ + +/* + * Lookup a hot file entry in the tree. + */ +static hotfile_entry_t * +hf_lookup(hotfile_data_t *hotdata, u_int32_t fileid, u_int32_t temperature) +{ + hotfile_entry_t *entry = hotdata->rootentry; + + while (entry && + entry->temperature != temperature && + entry->fileid != fileid) { + + if (temperature > entry->temperature) + entry = entry->right; + else if (temperature < entry->temperature) + entry = entry->left; + else if (fileid > entry->fileid) + entry = entry->right; + else + entry = entry->left; + } + return (entry); +} + +/* + * Insert a hot file entry into the tree. + */ +static void +hf_insert(hotfile_data_t *hotdata, hotfile_entry_t *newentry) +{ + hotfile_entry_t *entry = hotdata->rootentry; + u_int32_t fileid = newentry->fileid; + u_int32_t temperature = newentry->temperature; + + if (entry == NULL) { + hotdata->rootentry = newentry; + hotdata->coldest = newentry; + hotdata->activefiles++; + return; + } + + while (entry) { + if (temperature > entry->temperature) { + if (entry->right) + entry = entry->right; + else { + entry->right = newentry; + break; + } + } else if (temperature < entry->temperature) { + if (entry->left) + entry = entry->left; + else { + entry->left = newentry; + break; + } + } else if (fileid > entry->fileid) { + if (entry->right) + entry = entry->right; + else { + if (entry->fileid != fileid) + entry->right = newentry; + break; + } + } else { + if (entry->left) + entry = entry->left; + else { + if (entry->fileid != fileid) + entry->left = newentry; + break; + } + } + } + + hotdata->activefiles++; +} + +/* + * Find the coldest entry in the tree. + */ +static hotfile_entry_t * +hf_coldest(hotfile_data_t *hotdata) +{ + hotfile_entry_t *entry = hotdata->rootentry; + + if (entry) { + while (entry->left) + entry = entry->left; + } + return (entry); +} + +/* + * Delete a hot file entry from the tree. + */ +static void +hf_delete(hotfile_data_t *hotdata, u_int32_t fileid, u_int32_t temperature) +{ + hotfile_entry_t *entry, *parent, *next; + + parent = NULL; + entry = hotdata->rootentry; + + while (entry && + entry->temperature != temperature && + entry->fileid != fileid) { + + parent = entry; + if (temperature > entry->temperature) + entry = entry->right; + else if (temperature < entry->temperature) + entry = entry->left; + else if (fileid > entry->fileid) + entry = entry->right; + else + entry = entry->left; + } + + if (entry) { + /* + * Reorginize the sub-trees spanning from our entry. + */ + if ((next = entry->right)) { + hotfile_entry_t *pnextl, *psub; + /* + * Tree pruning: take the left branch of the + * current entry and place it at the lowest + * left branch of the current right branch + */ + psub = next; + + /* Walk the Right/Left sub tree from current entry */ + while ((pnextl = psub->left)) + psub = pnextl; + + /* Plug the old left tree to the new ->Right leftmost entry */ + psub->left = entry->left; + + } else /* only left sub-tree, simple case */ { + next = entry->left; + } + /* + * Now, plug the current entry sub tree to + * the good pointer of our parent entry. + */ + if (parent == NULL) + hotdata->rootentry = next; + else if (parent->left == entry) + parent->left = next; + else + parent->right = next; + + /* Place entry back on the free-list */ + entry->left = 0; + entry->fileid = 0; + entry->temperature = 0; + + entry->right = hotdata->freelist; + hotdata->freelist = entry; + hotdata->activefiles--; + + if (hotdata->coldest == entry || hotdata->coldest == NULL) { + hotdata->coldest = hf_coldest(hotdata); + } + + } +} + +/* + * Get a free hot file entry. + */ +static hotfile_entry_t * +hf_getnewentry(hotfile_data_t *hotdata) +{ + hotfile_entry_t * entry; + + /* + * When the free list is empty then steal the coldest one + */ + if (hotdata->freelist == NULL) { + entry = hf_coldest(hotdata); + hf_delete(hotdata, entry->fileid, entry->temperature); + } + entry = hotdata->freelist; + hotdata->freelist = entry->right; + entry->right = 0; + + return (entry); +} + + +/* + * Visit the tree in desending order. + */ +static void +hf_sortlist(hotfile_entry_t * root, int *index, hotfilelist_t *sortedlist) +{ + if (root) { + int i; + + hf_sortlist(root->right, index, sortedlist); + i = *index; + ++(*index); + sortedlist->hfl_hotfile[i].hf_fileid = root->fileid; + sortedlist->hfl_hotfile[i].hf_temperature = root->temperature; + sortedlist->hfl_hotfile[i].hf_blocks = root->blocks; + sortedlist->hfl_totalblocks += root->blocks; + hf_sortlist(root->left, index, sortedlist); + } +} + +/* + * Generate a sorted list of hot files. + */ +static int +hf_getsortedlist(hotfile_data_t * hotdata, hotfilelist_t *sortedlist) +{ + int index = 0; + + hf_sortlist(hotdata->rootentry, &index, sortedlist); + + sortedlist->hfl_count = hotdata->activefiles; + + return (index); +} + + +#if HFC_DEBUG +static void +hf_maxdepth(hotfile_entry_t * root, int depth, int *maxdepth) +{ + if (root) { + depth++; + if (depth > *maxdepth) + *maxdepth = depth; + hf_maxdepth(root->left, depth, maxdepth); + hf_maxdepth(root->right, depth, maxdepth); + } +} + +static void +hf_printtree(hotfile_entry_t * root) +{ + if (root) { + hf_printtree(root->left); + printf("temperature: % 8d, fileid %d\n", root->temperature, root->fileid); + hf_printtree(root->right); + } +} +#endif diff --git a/bsd/hfs/hfs_hotfiles.h b/bsd/hfs/hfs_hotfiles.h new file mode 100644 index 000000000..4c7b7fdf7 --- /dev/null +++ b/bsd/hfs/hfs_hotfiles.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __HFS_HOTFILES__ +#define __HFS_HOTFILES__ + +#include + +#ifdef KERNEL +#ifdef __APPLE_API_PRIVATE + + +#define HFC_FILENAME ".hotfiles.btree" + + +/* + * Temperature measurement constraints. + */ +#define HFC_DEFAULT_FILE_COUNT 1000 +#define HFC_DEFAULT_DURATION (3600 * 60) +#define HFC_CUMULATIVE_CYCLES 4 +#define HFC_MAXIMUM_FILE_COUNT 5000 +#define HFC_MAXIMUM_FILESIZE (10 * 1024 * 1024) +#define HFC_MINIMUM_TEMPERATURE 16 + + +/* + * Sync constraints. + */ +#define HFC_BLKSPERSYNC 300 +#define HFC_FILESPERSYNC 50 + + +/* + * Hot file clustering stages. + */ +enum hfc_stage { + HFC_DISABLED, + HFC_IDLE, + HFC_BUSY, + HFC_RECORDING, + HFC_EVALUATION, + HFC_EVICTION, + HFC_ADOPTION, +}; + + +/* + * B-tree file key format (on-disk). + */ +struct HotFileKey { + u_int16_t keyLength; /* length of key, excluding this field */ + u_int8_t forkType; /* 0 = data fork, FF = resource fork */ + u_int8_t pad; /* make the other fields align on 32-bit boundary */ + u_int32_t temperature; /* temperature recorded */ + u_int32_t fileID; /* file ID */ +}; +typedef struct HotFileKey HotFileKey; + +#define HFC_LOOKUPTAG 0xFFFFFFFF +#define HFC_KEYLENGTH (sizeof(HotFileKey) - sizeof(u_int16_t)) + +/* + * B-tree header node user info (on-disk). + */ +struct HotFilesInfo { + u_int32_t magic; + u_int32_t version; + u_int32_t duration; /* duration of sample period */ + u_int32_t timebase; /* recording period start time */ + u_int32_t timeleft; /* recording period stop time */ + u_int32_t threshold; + u_int32_t maxfileblks; + u_int32_t maxfilecnt; + u_int8_t tag[32]; +}; +typedef struct HotFilesInfo HotFilesInfo; + +#define HFC_MAGIC 0xFF28FF26 +#define HFC_VERSION 1 + + +struct hfsmount; +struct proc; +struct vnode; + +/* + * Hot File interface functions. + */ +int hfs_hotfilesync (struct hfsmount *, struct proc *); + +int hfs_recording_init(struct hfsmount *, struct proc *); +int hfs_recording_start (struct hfsmount *, struct proc *); +int hfs_recording_stop (struct hfsmount *, struct proc *); +int hfs_recording_suspend (struct hfsmount *, struct proc *); +int hfs_recording_abort (struct hfsmount *, struct proc *); + +int hfs_addhotfile (struct vnode *); +int hfs_removehotfile (struct vnode *); + +#endif /* __APPLE_API_PRIVATE */ +#endif /* KERNEL */ +#endif /* __HFS_HOTFILES__ */ diff --git a/bsd/hfs/hfs_link.c b/bsd/hfs/hfs_link.c index 602e96380..54f31f6f0 100644 --- a/bsd/hfs/hfs_link.c +++ b/bsd/hfs/hfs_link.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -116,6 +116,7 @@ hfs_makelink(struct hfsmount *hfsmp, struct cnode *cp, struct cnode *dcp, struct cat_desc to_desc; int newlink = 0; int retval; + cat_cookie_t cookie = {0}; /* We don't allow link nodes in our Private Meta Data folder! */ @@ -125,10 +126,15 @@ hfs_makelink(struct hfsmount *hfsmp, struct cnode *cp, struct cnode *dcp, if (hfs_freeblks(hfsmp, 0) == 0) return (ENOSPC); + /* Reserve some space in the Catalog file. */ + if ((retval = cat_preflight(hfsmp, (2 * CAT_CREATE)+ CAT_RENAME, &cookie, p))) { + return (retval); + } + /* Lock catalog b-tree */ retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); if (retval) { - return retval; + goto out2; } /* @@ -219,7 +225,8 @@ hfs_makelink(struct hfsmount *hfsmp, struct cnode *cp, struct cnode *dcp, out: /* Unlock catalog b-tree */ (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); - +out2: + cat_postflight(hfsmp, &cookie, p); return (retval); } @@ -235,6 +242,7 @@ out: IN struct componentname *cnp; */ +__private_extern__ int hfs_link(ap) struct vop_link_args /* { @@ -267,7 +275,7 @@ hfs_link(ap) if (VTOVCB(tdvp)->vcbSigWord != kHFSPlusSigWord) return err_link(ap); /* hfs disks don't support hard links */ - if (hfsmp->hfs_private_metadata_dir == 0) + if (hfsmp->hfs_privdir_desc.cd_cnid == 0) return err_link(ap); /* no private metadata dir, no links possible */ if (tdvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE, p))) { @@ -329,13 +337,23 @@ hfs_link(ap) // XXXdbg - need to do this here as well because cp could have changed error = VOP_UPDATE(vp, &tv, &tv, 1); - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); if (hfsmp->jnl) { journal_end_transaction(hfsmp->jnl); } hfs_global_shared_lock_release(hfsmp); + /* free the pathname buffer */ + { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); + } + + HFS_KNOTE(vp, NOTE_LINK); + HFS_KNOTE(tdvp, NOTE_WRITE); + out1: if (tdvp != vp) VOP_UNLOCK(vp, 0, p); diff --git a/bsd/hfs/hfs_lookup.c b/bsd/hfs/hfs_lookup.c index c4013f4c3..a12e77b22 100644 --- a/bsd/hfs/hfs_lookup.c +++ b/bsd/hfs/hfs_lookup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -274,16 +274,15 @@ notfound: retval = EJUSTRETURN; goto exit; } - + /* * Insert name into cache (as non-existent) if appropriate. * - * Disable negative caching since HFS is case-insensitive. + * Only done for case-sensitive HFS+ volumes. */ -#if 0 - if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) + if ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) && + (cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) cache_enter(dvp, *vpp, cnp); -#endif retval = ENOENT; goto exit; } @@ -456,6 +455,34 @@ found: cache_enter(dvp, *vpp, cnp); } + + // + // have to patch up the resource fork name because + // it won't happen properly in the layers above us. + // + if (wantrsrc) { + if (VTOC(*vpp)->c_vp == NULL) { + if (VNAME(*vpp) == NULL) { + VNAME(*vpp) = add_name(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0); + } + if (VPARENT(*vpp) == NULL) { + vget(dvp, 0, p); + VPARENT(*vpp) = dvp; + } + } else { + if (VNAME(*vpp) == NULL) { + // the +1/-2 thing is to skip the leading "/" on the rsrc fork spec + // and to not count the trailing null byte at the end of the string. + VNAME(*vpp) = add_name(_PATH_RSRCFORKSPEC+1, sizeof(_PATH_RSRCFORKSPEC)-2, 0, 0); + } + if (VPARENT(*vpp) == NULL && *vpp != VTOC(*vpp)->c_vp) { + VPARENT(*vpp) = VTOC(*vpp)->c_vp; + VTOC(*vpp)->c_flag |= C_VPREFHELD; + vget(VTOC(*vpp)->c_vp, 0, p); + } + } + } + exit: cat_releasedesc(&desc); return (retval); @@ -483,6 +510,8 @@ exit: * */ +#define S_IXALL 0000111 + __private_extern__ int hfs_cache_lookup(ap) @@ -495,16 +524,15 @@ hfs_cache_lookup(ap) struct vnode *dvp; struct vnode *vp; struct cnode *cp; + struct cnode *dcp; int lockparent; int error; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; - struct ucred *cred = cnp->cn_cred; int flags = cnp->cn_flags; struct proc *p = cnp->cn_proc; u_long vpid; /* capability number of vnode */ - *vpp = NULL; dvp = ap->a_dvp; lockparent = flags & LOCKPARENT; @@ -514,11 +542,17 @@ hfs_cache_lookup(ap) if (dvp->v_type != VDIR) return (ENOTDIR); if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && - (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) - return (EROFS); - if ((error = VOP_ACCESS(dvp, VEXEC, cred, cnp->cn_proc))) - return (error); + (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { + error = EROFS; + goto err_exit; + } + dcp = VTOC(dvp); + if (((dcp->c_mode & S_IXALL) != S_IXALL) && (cnp->cn_cred->cr_uid != 0)) { + if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p))) { + goto err_exit; + } + } /* * Lookup an entry in the cache * If the lookup succeeds, the vnode is returned in *vpp, and a status of -1 is @@ -527,14 +561,15 @@ hfs_cache_lookup(ap) * fails, a status of zero is returned. */ error = cache_lookup(dvp, vpp, cnp); - if (error == 0) { /* Unsuccessfull */ - error = hfs_lookup(ap); - return (error); + if (error != -1) { + if (error == 0) { /* Unsuccessfull */ + goto lookup; + } + + if (error == ENOENT) { + goto err_exit; + } } - - if (error == ENOENT) - return (error); - /* We have a name that matched */ vp = *vpp; vpid = vp->v_id; @@ -583,20 +618,51 @@ hfs_cache_lookup(ap) int wantrsrc = 0; cnp->cn_consume = forkcomponent(cnp, &wantrsrc); - - /* Fork names are only for lookups */ - if (cnp->cn_consume && - (cnp->cn_nameiop != LOOKUP && cnp->cn_nameiop != CREATE)) - return (EPERM); - /* - * We only store data forks in the name cache. - */ - if (wantrsrc) - return (hfs_lookup(ap)); + if (cnp->cn_consume) { + flags |= ISLASTCN; + /* Fork names are only for lookups */ + if (cnp->cn_nameiop != LOOKUP && + cnp->cn_nameiop != CREATE) { + error = EPERM; + + goto err_exit; + } + } + + if (wantrsrc) { + /* Use cnode's rsrcfork vnode (if available) */ + if (cp->c_rsrc_vp != NULL) { + *vpp = vp = cp->c_rsrc_vp; + if (VNAME(vp) == NULL) { + // the +1/-2 thing is to skip the leading "/" on the rsrc fork spec + // and to not count the trailing null byte at the end of the string. + VNAME(vp) = add_name(_PATH_RSRCFORKSPEC+1, sizeof(_PATH_RSRCFORKSPEC)-2, 0, 0); + } + if (VPARENT(vp) == NULL) { + vget(cp->c_vp, 0, p); + VPARENT(vp) = cp->c_vp; + } + vpid = vp->v_id; + } else { + goto lookup; + } + } + } + error = vget(vp, 0, p); + if (error == 0) { + if (VTOC(vp) == NULL || vp->v_data != (void *)cp) { + panic("hfs: cache lookup: my cnode disappeared/went bad! vp 0x%x 0x%x 0x%x\n", + vp, vp->v_data, cp); + } + if (cnp->cn_nameiop == LOOKUP && + (!(flags & ISLASTCN) || (flags & SHAREDLEAF))) + error = lockmgr(&VTOC(vp)->c_lock, LK_SHARED, NULL, p); + else + error = lockmgr(&VTOC(vp)->c_lock, LK_EXCLUSIVE, NULL, p); + } + if (!lockparent || error || !(flags & ISLASTCN)) { + (void) lockmgr(&dcp->c_lock, LK_RELEASE, NULL, p); } - error = vget(vp, LK_EXCLUSIVE, p); - if (!lockparent || error || !(flags & ISLASTCN)) - VOP_UNLOCK(dvp, 0, p); } /* * Check that the capability number did not change @@ -616,8 +682,12 @@ hfs_cache_lookup(ap) if ((error = vn_lock(dvp, LK_EXCLUSIVE, p))) return (error); - +lookup: return (hfs_lookup(ap)); + +err_exit: + *vpp = NULL; + return (error); } diff --git a/bsd/hfs/hfs_mount.h b/bsd/hfs/hfs_mount.h index 38aac250e..b9b656f6e 100644 --- a/bsd/hfs/hfs_mount.h +++ b/bsd/hfs/hfs_mount.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -64,6 +64,17 @@ struct hfs_mount_args { #define HFSFSMNT_WRAPPER 0x2 /* mount HFS wrapper (if it exists) */ #define HFSFSMNT_EXTENDED_ARGS 0x4 /* indicates new fields after "flags" are valid */ +/* + * Sysctl values for HFS + */ +#define HFS_ENCODINGBIAS 1 /* encoding matching CJK bias */ +#define HFS_EXTEND_FS 2 +#define HFS_ENCODINGHINT 3 /* guess encoding for string */ +#define HFS_ENABLE_JOURNALING 0x082969 +#define HFS_DISABLE_JOURNALING 0x031272 +#define HFS_GET_JOURNAL_INFO 0x6a6e6c69 +#define HFS_SET_PKG_EXTENSIONS 0x121031 + #endif /* __APPLE_API_UNSTABLE */ #endif /* ! _HFS_MOUNT_H_ */ diff --git a/bsd/hfs/hfs_notification.c b/bsd/hfs/hfs_notification.c new file mode 100644 index 000000000..67f245511 --- /dev/null +++ b/bsd/hfs/hfs_notification.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "hfs.h" +#include "hfs_catalog.h" +#include "hfs_cnode.h" +#include "hfs_lockf.h" +#include "hfs_dbg.h" +#include "hfs_mount.h" +#include "hfs_quota.h" +#include "hfs_endian.h" + +#include "hfscommon/headers/BTreesInternal.h" +#include "hfscommon/headers/FileMgrInternal.h" + + + +void hfs_generate_volume_notifications(struct hfsmount *hfsmp) { + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + + if (hfsmp->hfs_notification_conditions & VQ_LOWDISK) { + /* Check to see whether the free space is back above the minimal level: */ + if (hfs_freeblks(hfsmp, 1) > hfsmp->hfs_freespace_notify_desiredlevel) { + hfsmp->hfs_notification_conditions &= ~VQ_LOWDISK; + vfs_event_signal(&HFSTOVFS(hfsmp)->mnt_stat.f_fsid, hfsmp->hfs_notification_conditions, NULL); + } + } else { + /* Check to see whether the free space fell below the requested limit: */ + if (hfs_freeblks(hfsmp, 1) < hfsmp->hfs_freespace_notify_warninglimit) { + hfsmp->hfs_notification_conditions |= VQ_LOWDISK; + vfs_event_signal(&HFSTOVFS(hfsmp)->mnt_stat.f_fsid, hfsmp->hfs_notification_conditions, NULL); + } + }; +} diff --git a/bsd/hfs/hfs_quota.c b/bsd/hfs/hfs_quota.c index c486d3a1c..e294f9dc1 100644 --- a/bsd/hfs/hfs_quota.c +++ b/bsd/hfs/hfs_quota.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2002-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -707,11 +707,9 @@ again: for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) { if (vp->v_mount != mp) goto again; - nextvp = vp->v_mntvnodes.le_next; simple_lock(&vp->v_interlock); simple_unlock(&mntvnode_slock); - error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); if (error) { simple_lock(&mntvnode_slock); @@ -720,13 +718,11 @@ again: continue; } - // Make sure that this is really an hfs vnode. - // - if ( vp->v_mount != mp - || vp->v_type == VNON - || vp->v_tag != VT_HFS - || VTOC(vp) == NULL) { - + /* Make sure that this is really an hfs vnode. */ + if (vp->v_mount != mp || + vp->v_type == VNON || + vp->v_tag != VT_HFS || + VTOC(vp) == NULL) { vput(vp); simple_lock(&mntvnode_slock); goto again; diff --git a/bsd/hfs/hfs_readwrite.c b/bsd/hfs/hfs_readwrite.c index 8bde675da..10b3a271e 100644 --- a/bsd/hfs/hfs_readwrite.c +++ b/bsd/hfs/hfs_readwrite.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -66,6 +67,10 @@ enum { extern u_int32_t GetLogicalBlockSize(struct vnode *vp); +static int hfs_clonelink(struct vnode *, int, struct ucred *, struct proc *); +static int hfs_clonefile(struct vnode *, int, int, int, struct ucred *, struct proc *); +static int hfs_clonesysfile(struct vnode *, int, int, int, struct ucred *, struct proc *); + /***************************************************************************** * @@ -97,18 +102,16 @@ hfs_read(ap) register struct vnode *vp = ap->a_vp; struct cnode *cp; struct filefork *fp; - struct buf *bp; - daddr_t logBlockNo; - u_long fragSize, moveSize, startOffset, ioxfersize; int devBlockSize = 0; - off_t bytesRemaining; int retval = 0; off_t filesize; off_t filebytes; + off_t start_resid = uio->uio_resid; + /* Preflight checks */ - if (vp->v_type != VREG && vp->v_type != VLNK) - return (EISDIR); /* HFS can only read files */ + if ((vp->v_type != VREG) || !UBCINFOEXISTS(vp)) + return (EPERM); /* can only read regular files */ if (uio->uio_resid == 0) return (0); /* Nothing left to do */ if (uio->uio_offset < 0) @@ -130,105 +133,29 @@ hfs_read(ap) KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_START, (int)uio->uio_offset, uio->uio_resid, (int)filesize, (int)filebytes, 0); - if (UBCISVALID(vp)) { - retval = cluster_read(vp, uio, filesize, devBlockSize, 0); - } else { - - for (retval = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { - - if ((bytesRemaining = (filesize - uio->uio_offset)) <= 0) - break; - - logBlockNo = (daddr_t)(uio->uio_offset / PAGE_SIZE_64); - startOffset = (u_long) (uio->uio_offset & PAGE_MASK_64); - fragSize = PAGE_SIZE; - - if (((logBlockNo * PAGE_SIZE) + fragSize) < filesize) - ioxfersize = fragSize; - else { - ioxfersize = filesize - (logBlockNo * PAGE_SIZE); - ioxfersize = (ioxfersize + (devBlockSize - 1)) & ~(devBlockSize - 1); - } - moveSize = ioxfersize; - moveSize -= startOffset; - - if (bytesRemaining < moveSize) - moveSize = bytesRemaining; - - if (uio->uio_resid < moveSize) { - moveSize = uio->uio_resid; - }; - if (moveSize == 0) { - break; - }; - - if (( uio->uio_offset + fragSize) >= filesize) { - retval = bread(vp, logBlockNo, ioxfersize, NOCRED, &bp); - - } else if (logBlockNo - 1 == vp->v_lastr && !(vp->v_flag & VRAOFF)) { - daddr_t nextLogBlockNo = logBlockNo + 1; - int nextsize; - - if (((nextLogBlockNo * PAGE_SIZE) + - (daddr_t)fragSize) < filesize) - nextsize = fragSize; - else { - nextsize = filesize - (nextLogBlockNo * PAGE_SIZE); - nextsize = (nextsize + (devBlockSize - 1)) & ~(devBlockSize - 1); - } - retval = breadn(vp, logBlockNo, ioxfersize, &nextLogBlockNo, &nextsize, 1, NOCRED, &bp); - } else { - retval = bread(vp, logBlockNo, ioxfersize, NOCRED, &bp); - }; - - if (retval != E_NONE) { - if (bp) { - brelse(bp); - bp = NULL; - } - break; - }; - vp->v_lastr = logBlockNo; - - /* - * We should only get non-zero b_resid when an I/O retval - * has occurred, which should cause us to break above. - * However, if the short read did not cause an retval, - * then we want to ensure that we do not uiomove bad - * or uninitialized data. - */ - ioxfersize -= bp->b_resid; - - if (ioxfersize < moveSize) { /* XXX PPD This should take the offset into account, too! */ - if (ioxfersize == 0) - break; - moveSize = ioxfersize; - } - if ((startOffset + moveSize) > bp->b_bcount) - panic("hfs_read: bad startOffset or moveSize\n"); - - if ((retval = uiomove((caddr_t)bp->b_data + startOffset, (int)moveSize, uio))) - break; - - if (S_ISREG(cp->c_mode) && - (((startOffset + moveSize) == fragSize) || (uio->uio_offset == filesize))) { - bp->b_flags |= B_AGE; - }; - - brelse(bp); - /* Start of loop resets bp to NULL before reaching outside this block... */ - } - - if (bp != NULL) { - brelse(bp); - } - } + retval = cluster_read(vp, uio, filesize, devBlockSize, 0); cp->c_flag |= C_ACCESS; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_END, (int)uio->uio_offset, uio->uio_resid, (int)filesize, (int)filebytes, 0); + /* + * Keep track blocks read + */ + if (VTOHFS(vp)->hfc_stage == HFC_RECORDING && retval == 0) { + /* + * If this file hasn't been seen since the start of + * the current sampling period then start over. + */ + if (cp->c_atime < VTOHFS(vp)->hfc_timebase) { + fp->ff_bytesread = start_resid - uio->uio_resid; + cp->c_atime = time.tv_sec; + } else { + fp->ff_bytesread += start_resid - uio->uio_resid; + } + } + return (retval); } @@ -256,37 +183,32 @@ hfs_write(ap) struct uio *uio = ap->a_uio; struct cnode *cp; struct filefork *fp; - struct buf *bp; struct proc *p; struct timeval tv; ExtendedVCB *vcb; - int devBlockSize = 0; - daddr_t logBlockNo; - long fragSize; - off_t origFileSize, currOffset, writelimit, bytesToAdd; - off_t actualBytesAdded; - u_long blkoffset, resid, xfersize, clearSize; - int eflags, ioflag; - int retval; + int devBlockSize = 0; + off_t origFileSize, writelimit, bytesToAdd; + off_t actualBytesAdded; + u_long resid; + int eflags, ioflag; + int retval; off_t filebytes; - u_long fileblocks; struct hfsmount *hfsmp; int started_tr = 0, grabbed_lock = 0; - ioflag = ap->a_ioflag; if (uio->uio_offset < 0) return (EINVAL); if (uio->uio_resid == 0) return (E_NONE); - if (vp->v_type != VREG && vp->v_type != VLNK) - return (EISDIR); /* Can only write files */ + if ((vp->v_type != VREG) || !UBCINFOEXISTS(vp)) + return (EPERM); /* Can only write regular files */ + ioflag = ap->a_ioflag; cp = VTOC(vp); fp = VTOF(vp); vcb = VTOVCB(vp); - fileblocks = fp->ff_blocks; - filebytes = (off_t)fileblocks * (off_t)vcb->blockSize; + filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize; if (ioflag & IO_APPEND) uio->uio_offset = fp->ff_size; @@ -297,7 +219,7 @@ hfs_write(ap) if (VTOHFS(vp)->jnl && cp->c_datafork) { struct HFSPlusExtentDescriptor *extd; - extd = &cp->c_datafork->ff_data.cf_extents[0]; + extd = &cp->c_datafork->ff_extents[0]; if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) { return EPERM; } @@ -324,19 +246,6 @@ hfs_write(ap) eflags = kEFDeferMask; /* defer file block allocations */ filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize; - /* - * NOTE: In the following loop there are two positions tracked: - * currOffset is the current I/O starting offset. currOffset - * is never >LEOF; the LEOF is nudged along with currOffset as - * data is zeroed or written. uio->uio_offset is the start of - * the current I/O operation. It may be arbitrarily beyond - * currOffset. - * - * The following is true at all times: - * currOffset <= LEOF <= uio->uio_offset <= writelimit - */ - currOffset = MIN(uio->uio_offset, fp->ff_size); - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_START, (int)uio->uio_offset, uio->uio_resid, (int)fp->ff_size, (int)filebytes, 0); retval = 0; @@ -356,6 +265,20 @@ hfs_write(ap) #endif /* QUOTA */ hfsmp = VTOHFS(vp); + +#ifdef HFS_SPARSE_DEV + /* + * When the underlying device is sparse and space + * is low (< 8MB), stop doing delayed allocations + * and begin doing synchronous I/O. + */ + if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && + (hfs_freeblks(hfsmp, 0) < 2048)) { + eflags &= ~kEFDeferMask; + ioflag |= IO_SYNC; + } +#endif /* HFS_SPARSE_DEV */ + if (writelimit > filebytes) { hfs_global_shared_lock_acquire(hfsmp); grabbed_lock = 1; @@ -369,16 +292,19 @@ hfs_write(ap) } while (writelimit > filebytes) { - bytesToAdd = writelimit - filebytes; - if (suser(ap->a_cred, NULL) != 0) + if (ap->a_cred && suser(ap->a_cred, NULL) != 0) eflags |= kEFReserveMask; /* lock extents b-tree (also protects volume bitmap) */ retval = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_EXCLUSIVE, current_proc()); if (retval != E_NONE) break; - + + /* Files that are changing size are not hot file candidates. */ + if (hfsmp->hfc_stage == HFC_RECORDING) { + fp->ff_bytesread = 0; + } retval = MacToVFSError(ExtendFileC (vcb, (FCB*)fp, bytesToAdd, 0, eflags, &actualBytesAdded)); @@ -394,6 +320,9 @@ hfs_write(ap) // XXXdbg if (started_tr) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 1); + hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); journal_end_transaction(hfsmp->jnl); started_tr = 0; @@ -403,7 +332,7 @@ hfs_write(ap) grabbed_lock = 0; } - if (UBCISVALID(vp) && retval == E_NONE) { + if (retval == E_NONE) { off_t filesize; off_t zero_off; off_t tail_off; @@ -427,8 +356,10 @@ hfs_write(ap) of the transfer to see whether is invalid and should be zero-filled as part of the transfer: */ - if (rl_scan(&fp->ff_invalidranges, zero_off, uio->uio_offset - 1, &invalid_range) != RL_NOOVERLAP) - lflag |= IO_HEADZEROFILL; + if (uio->uio_offset > zero_off) { + if (rl_scan(&fp->ff_invalidranges, zero_off, uio->uio_offset - 1, &invalid_range) != RL_NOOVERLAP) + lflag |= IO_HEADZEROFILL; + } } else { off_t eof_page_base = fp->ff_size & ~PAGE_MASK_64; @@ -528,105 +459,10 @@ hfs_write(ap) } if (resid > uio->uio_resid) cp->c_flag |= C_CHANGE | C_UPDATE; - } else { - while (retval == E_NONE && uio->uio_resid > 0) { - logBlockNo = currOffset / PAGE_SIZE; - blkoffset = currOffset & PAGE_MASK; - - if ((filebytes - currOffset) < PAGE_SIZE_64) - fragSize = filebytes - ((off_t)logBlockNo * PAGE_SIZE_64); - else - fragSize = PAGE_SIZE; - xfersize = fragSize - blkoffset; - - /* Make any adjustments for boundary conditions */ - if (currOffset + (off_t)xfersize > writelimit) - xfersize = writelimit - currOffset; - - /* - * There is no need to read into bp if: - * We start on a block boundary and will overwrite the whole block - * - * OR - */ - if ((blkoffset == 0) && (xfersize >= fragSize)) { - bp = getblk(vp, logBlockNo, fragSize, 0, 0, BLK_READ); - retval = 0; - - if (bp->b_blkno == -1) { - brelse(bp); - retval = EIO; /* XXX */ - break; - } - } else { - - if (currOffset == fp->ff_size && blkoffset == 0) { - bp = getblk(vp, logBlockNo, fragSize, 0, 0, BLK_READ); - retval = 0; - if (bp->b_blkno == -1) { - brelse(bp); - retval = EIO; /* XXX */ - break; - } - } else { - /* - * This I/O transfer is not sufficiently aligned, - * so read the affected block into a buffer: - */ - retval = bread(vp, logBlockNo, fragSize, ap->a_cred, &bp); - if (retval != E_NONE) { - if (bp) - brelse(bp); - break; - } - } - } - - /* See if we are starting to write within file boundaries: - * If not, then we need to present a "hole" for the area - * between the current EOF and the start of the current - * I/O operation: - * - * Note that currOffset is only less than uio_offset if - * uio_offset > LEOF... - */ - if (uio->uio_offset > currOffset) { - clearSize = MIN(uio->uio_offset - currOffset, xfersize); - bzero(bp->b_data + blkoffset, clearSize); - currOffset += clearSize; - blkoffset += clearSize; - xfersize -= clearSize; - } - - if (xfersize > 0) { - retval = uiomove((caddr_t)bp->b_data + blkoffset, (int)xfersize, uio); - currOffset += xfersize; - } - - if (ioflag & IO_SYNC) { - (void)VOP_BWRITE(bp); - } else if ((xfersize + blkoffset) == fragSize) { - bp->b_flags |= B_AGE; - bawrite(bp); - } else { - bdwrite(bp); - } - - /* Update the EOF if we just extended the file - * (the PEOF has already been moved out and the - * block mapping table has been updated): - */ - if (currOffset > fp->ff_size) { - fp->ff_size = currOffset; - if (UBCISVALID(vp)) - ubc_setsize(vp, fp->ff_size); /* XXX check errors */ - } - if (retval || (resid == 0)) - break; - cp->c_flag |= C_CHANGE | C_UPDATE; - } /* endwhile */ } + HFS_KNOTE(vp, NOTE_WRITE); + ioerr_exit: /* * If we successfully wrote any data, and we are not the superuser @@ -648,6 +484,7 @@ ioerr_exit: tv = time; retval = VOP_UPDATE(vp, &tv, &tv, 1); } + vcb->vcbWrCnt++; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_END, (int)uio->uio_offset, uio->uio_resid, (int)fp->ff_size, (int)filebytes, 0); @@ -656,6 +493,22 @@ ioerr_exit: } +#ifdef HFS_SPARSE_DEV +struct hfs_backingstoreinfo { + int signature; /* == 3419115 */ + int version; /* version of this struct (1) */ + int backingfd; /* disk image file (on backing fs) */ + int bandsize; /* sparse disk image band size */ +}; + +#define HFSIOC_SETBACKINGSTOREINFO _IOW('h', 7, struct hfs_backingstoreinfo) +#define HFSIOC_CLRBACKINGSTOREINFO _IO('h', 8) + +#define HFS_SETBACKINGSTOREINFO IOCBASECMD(HFSIOC_SETBACKINGSTOREINFO) +#define HFS_CLRBACKINGSTOREINFO IOCBASECMD(HFSIOC_CLRBACKINGSTOREINFO) + +#endif /* HFS_SPARSE_DEV */ + /* #% ioctl vp U U U @@ -684,10 +537,127 @@ hfs_ioctl(ap) } */ *ap; { switch (ap->a_command) { - case 1: { + +#ifdef HFS_SPARSE_DEV + case HFS_SETBACKINGSTOREINFO: { + struct hfsmount * hfsmp; + struct vnode * bsfs_rootvp; + struct vnode * di_vp; + struct file * di_fp; + struct hfs_backingstoreinfo *bsdata; + int error = 0; + + hfsmp = VTOHFS(ap->a_vp); + if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) { + return (EALREADY); + } + if (ap->a_p->p_ucred->cr_uid != 0 && + ap->a_p->p_ucred->cr_uid != (HFSTOVFS(hfsmp))->mnt_stat.f_owner) { + return (EACCES); /* must be owner of file system */ + } + bsdata = (struct hfs_backingstoreinfo *)ap->a_data; + if (bsdata == NULL) { + return (EINVAL); + } + if (error = fdgetf(ap->a_p, bsdata->backingfd, &di_fp)) { + return (error); + } + if (fref(di_fp) == -1) { + return (EBADF); + } + if (di_fp->f_type != DTYPE_VNODE) { + frele(di_fp); + return (EINVAL); + } + di_vp = (struct vnode *)di_fp->f_data; + if (ap->a_vp->v_mount == di_vp->v_mount) { + frele(di_fp); + return (EINVAL); + } + + /* + * Obtain the backing fs root vnode and keep a reference + * on it. This reference will be dropped in hfs_unmount. + */ + error = VFS_ROOT(di_vp->v_mount, &bsfs_rootvp); + if (error) { + frele(di_fp); + return (error); + } + VOP_UNLOCK(bsfs_rootvp, 0, ap->a_p); /* Hold on to the reference */ + + hfsmp->hfs_backingfs_rootvp = bsfs_rootvp; + hfsmp->hfs_flags |= HFS_HAS_SPARSE_DEVICE; + hfsmp->hfs_sparsebandblks = bsdata->bandsize / HFSTOVCB(hfsmp)->blockSize; + hfsmp->hfs_sparsebandblks *= 4; + + frele(di_fp); + return (0); + } + case HFS_CLRBACKINGSTOREINFO: { + struct hfsmount * hfsmp; + struct vnode * tmpvp; + + hfsmp = VTOHFS(ap->a_vp); + if (ap->a_p->p_ucred->cr_uid != 0 && + ap->a_p->p_ucred->cr_uid != (HFSTOVFS(hfsmp))->mnt_stat.f_owner) { + return (EACCES); /* must be owner of file system */ + } + if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && + hfsmp->hfs_backingfs_rootvp) { + + hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE; + tmpvp = hfsmp->hfs_backingfs_rootvp; + hfsmp->hfs_backingfs_rootvp = NULLVP; + hfsmp->hfs_sparsebandblks = 0; + vrele(tmpvp); + } + return (0); + } +#endif /* HFS_SPARSE_DEV */ + + case 6: { + int error; + + ap->a_vp->v_flag |= VFULLFSYNC; + error = VOP_FSYNC(ap->a_vp, ap->a_cred, MNT_NOWAIT, ap->a_p); + ap->a_vp->v_flag &= ~VFULLFSYNC; + + return error; + } + case 5: { + register struct vnode *vp; register struct cnode *cp; + struct filefork *fp; + int error; + + vp = ap->a_vp; + cp = VTOC(vp); + fp = VTOF(vp); + + if (vp->v_type != VREG) + return EINVAL; + + VOP_LEASE(vp, ap->a_p, ap->a_cred, LEASE_READ); + error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); + if (error) + return (error); + + /* + * used by regression test to determine if + * all the dirty pages (via write) have been cleaned + * after a call to 'fsysnc'. + */ + error = is_file_clean(vp, fp->ff_size); + VOP_UNLOCK(vp, 0, ap->a_p); + + return (error); + } + + case 1: { register struct vnode *vp; register struct radvisory *ra; + register struct cnode *cp; struct filefork *fp; int devBlockSize = 0; int error; @@ -992,6 +962,7 @@ hfs_cmap(ap) struct rl_entry *invalid_range; enum rl_overlaptype overlaptype; int started_tr = 0, grabbed_lock = 0; + struct timeval tv; /* * Check for underlying vnode requests and ensure that logical @@ -1001,6 +972,17 @@ hfs_cmap(ap) return (0); p = current_proc(); + + if (ISSET(VTOC(ap->a_vp)->c_flag, C_NOBLKMAP)) { + /* + * File blocks are getting remapped. Wait until its finished. + */ + SET(VTOC(ap->a_vp)->c_flag, C_WBLKMAP); + (void) tsleep((caddr_t)VTOC(ap->a_vp), PINOD, "hfs_cmap", 0); + if (ISSET(VTOC(ap->a_vp)->c_flag, C_NOBLKMAP)) + panic("hfs_cmap: no mappable blocks"); + } + retry: if (fp->ff_unallocblocks) { lockExtBtree = 1; @@ -1040,7 +1022,7 @@ hfs_cmap(ap) if (fp->ff_unallocblocks) { SInt64 reqbytes, actbytes; - // + // // Make sure we have a transaction. It's possible // that we came in and fp->ff_unallocblocks was zero // but during the time we blocked acquiring the extents @@ -1052,7 +1034,7 @@ hfs_cmap(ap) (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p); lockExtBtree = 0; } - + goto retry; } @@ -1071,6 +1053,10 @@ hfs_cmap(ap) fp->ff_blocks -= fp->ff_unallocblocks; fp->ff_unallocblocks = 0; + /* Files that are changing size are not hot file candidates. */ + if (hfsmp->hfc_stage == HFC_RECORDING) { + fp->ff_bytesread = 0; + } while (retval == 0 && reqbytes > 0) { retval = MacToVFSError(ExtendFileC(HFSTOVCB(hfsmp), (FCB*)fp, reqbytes, 0, @@ -1090,7 +1076,11 @@ hfs_cmap(ap) if (retval) { (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p); + VTOC(ap->a_vp)->c_flag |= C_MODIFIED; if (started_tr) { + tv = time; + VOP_UPDATE(ap->a_vp, &tv, &tv, 1); + hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); journal_end_transaction(hfsmp->jnl); } @@ -1099,7 +1089,6 @@ hfs_cmap(ap) } return (retval); } - VTOC(ap->a_vp)->c_flag |= C_MODIFIED; } retval = MacToVFSError( @@ -1115,6 +1104,9 @@ hfs_cmap(ap) // XXXdbg if (started_tr) { + tv = time; + retval = VOP_UPDATE(ap->a_vp, &tv, &tv, 1); + hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); journal_end_transaction(hfsmp->jnl); started_tr = 0; @@ -1361,21 +1353,7 @@ hfs_strategy(ap) } -/* -# -#% truncate vp L L L -# -vop_truncate { - IN struct vnode *vp; - IN off_t length; - IN int flags; (IO_SYNC) - IN struct ucred *cred; - IN struct proc *p; -}; - * Truncate a cnode to at most length size, freeing (or adding) the - * disk blocks. - */ -int hfs_truncate(ap) +static int do_hfs_truncate(ap) struct vop_truncate_args /* { struct vnode *a_vp; off_t a_length; @@ -1420,6 +1398,11 @@ int hfs_truncate(ap) tv = time; retval = E_NONE; + /* Files that are changing size are not hot file candidates. */ + if (hfsmp->hfc_stage == HFC_RECORDING) { + fp->ff_bytesread = 0; + } + /* * We cannot just check if fp->ff_size == length (as an optimization) * since there may be extra physical blocks that also need truncation. @@ -1447,13 +1430,23 @@ int hfs_truncate(ap) */ if (length > filebytes) { int eflags; + u_long blockHint = 0; /* All or nothing and don't round up to clumpsize. */ eflags = kEFAllMask | kEFNoClumpMask; - if (suser(ap->a_cred, NULL) != 0) + if (ap->a_cred && suser(ap->a_cred, NULL) != 0) eflags |= kEFReserveMask; /* keep a reserve */ + /* + * Allocate Journal and Quota files in metadata zone. + */ + if (filebytes == 0 && + hfsmp->hfs_flags & HFS_METADATA_ZONE && + hfs_virtualmetafile(cp)) { + eflags |= kEFMetadataMask; + blockHint = hfsmp->hfs_metazone_start; + } // XXXdbg hfs_global_shared_lock_acquire(hfsmp); if (hfsmp->jnl) { @@ -1479,7 +1472,7 @@ int hfs_truncate(ap) retval = MacToVFSError(ExtendFileC(VTOVCB(vp), (FCB*)fp, bytesToAdd, - 0, + blockHint, eflags, &actualBytesAdded)); @@ -1495,6 +1488,9 @@ int hfs_truncate(ap) // XXXdbg if (hfsmp->jnl) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 1); + hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); journal_end_transaction(hfsmp->jnl); } @@ -1642,6 +1638,9 @@ int hfs_truncate(ap) // XXXdbg if (hfsmp->jnl) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 1); + hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); journal_end_transaction(hfsmp->jnl); } @@ -1676,6 +1675,83 @@ Err_Exit: } +/* +# +#% truncate vp L L L +# +vop_truncate { + IN struct vnode *vp; + IN off_t length; + IN int flags; (IO_SYNC) + IN struct ucred *cred; + IN struct proc *p; +}; + * Truncate a cnode to at most length size, freeing (or adding) the + * disk blocks. + */ +int hfs_truncate(ap) + struct vop_truncate_args /* { + struct vnode *a_vp; + off_t a_length; + int a_flags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct cnode *cp = VTOC(vp); + struct filefork *fp = VTOF(vp); + off_t length; + off_t filebytes; + u_long fileblocks; + int blksize, error; + u_int64_t nsize; + + if (vp->v_type != VREG && vp->v_type != VLNK) + return (EISDIR); /* cannot truncate an HFS directory! */ + + length = ap->a_length; + blksize = VTOVCB(vp)->blockSize; + fileblocks = fp->ff_blocks; + filebytes = (off_t)fileblocks * (off_t)blksize; + + // have to loop truncating or growing files that are + // really big because otherwise transactions can get + // enormous and consume too many kernel resources. + if (length < filebytes && (filebytes - length) > HFS_BIGFILE_SIZE) { + while (filebytes > length) { + if ((filebytes - length) > HFS_BIGFILE_SIZE) { + filebytes -= HFS_BIGFILE_SIZE; + } else { + filebytes = length; + } + + ap->a_length = filebytes; + error = do_hfs_truncate(ap); + if (error) + break; + } + } else if (length > filebytes && (length - filebytes) > HFS_BIGFILE_SIZE) { + while (filebytes < length) { + if ((length - filebytes) > HFS_BIGFILE_SIZE) { + filebytes += HFS_BIGFILE_SIZE; + } else { + filebytes = (length - filebytes); + } + + ap->a_length = filebytes; + error = do_hfs_truncate(ap); + if (error) + break; + } + } else { + error = do_hfs_truncate(ap); + } + + return error; +} + + /* # @@ -1706,6 +1782,7 @@ int hfs_allocate(ap) struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct filefork *fp = VTOF(vp); + ExtendedVCB *vcb = VTOVCB(vp); off_t length = ap->a_length; off_t startingPEOF; off_t moreBytesRequested; @@ -1716,31 +1793,30 @@ int hfs_allocate(ap) struct timeval tv; int retval, retval2; UInt32 blockHint; - UInt32 extendFlags =0; /* For call to ExtendFileC */ + UInt32 extendFlags; /* For call to ExtendFileC */ struct hfsmount *hfsmp; hfsmp = VTOHFS(vp); *(ap->a_bytesallocated) = 0; fileblocks = fp->ff_blocks; - filebytes = (off_t)fileblocks * (off_t)VTOVCB(vp)->blockSize; + filebytes = (off_t)fileblocks * (off_t)vcb->blockSize; if (length < (off_t)0) return (EINVAL); - if (vp->v_type != VREG && vp->v_type != VLNK) + if (vp->v_type != VREG) return (EISDIR); - if ((ap->a_flags & ALLOCATEFROMVOL) && (length <= filebytes)) + if ((ap->a_flags & ALLOCATEFROMVOL) && (length < filebytes)) return (EINVAL); /* Fill in the flags word for the call to Extend the file */ + extendFlags = kEFNoClumpMask; if (ap->a_flags & ALLOCATECONTIG) extendFlags |= kEFContigMask; - if (ap->a_flags & ALLOCATEALL) extendFlags |= kEFAllMask; - - if (suser(ap->a_cred, NULL) != 0) + if (ap->a_cred && suser(ap->a_cred, NULL) != 0) extendFlags |= kEFReserveMask; tv = time; @@ -1767,12 +1843,31 @@ int hfs_allocate(ap) #if QUOTA retval = hfs_chkdq(cp, - (int64_t)(roundup(moreBytesRequested, VTOVCB(vp)->blockSize)), + (int64_t)(roundup(moreBytesRequested, vcb->blockSize)), ap->a_cred, 0); if (retval) return (retval); #endif /* QUOTA */ + /* + * Metadata zone checks. + */ + if (hfsmp->hfs_flags & HFS_METADATA_ZONE) { + /* + * Allocate Journal and Quota files in metadata zone. + */ + if (hfs_virtualmetafile(cp)) { + extendFlags |= kEFMetadataMask; + blockHint = hfsmp->hfs_metazone_start; + } else if ((blockHint >= hfsmp->hfs_metazone_start) && + (blockHint <= hfsmp->hfs_metazone_end)) { + /* + * Move blockHint outside metadata zone. + */ + blockHint = hfsmp->hfs_metazone_end + 1; + } + } + // XXXdbg hfs_global_shared_lock_acquire(hfsmp); if (hfsmp->jnl) { @@ -1792,7 +1887,7 @@ int hfs_allocate(ap) goto Err_Exit; } - retval = MacToVFSError(ExtendFileC(VTOVCB(vp), + retval = MacToVFSError(ExtendFileC(vcb, (FCB*)fp, moreBytesRequested, blockHint, @@ -1800,12 +1895,15 @@ int hfs_allocate(ap) &actualBytesAdded)); *(ap->a_bytesallocated) = actualBytesAdded; - filebytes = (off_t)fp->ff_blocks * (off_t)VTOVCB(vp)->blockSize; + filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize; (void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); // XXXdbg if (hfsmp->jnl) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 1); + hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); journal_end_transaction(hfsmp->jnl); } @@ -1827,7 +1925,7 @@ int hfs_allocate(ap) */ if ((actualBytesAdded != 0) && (moreBytesRequested < actualBytesAdded)) *(ap->a_bytesallocated) = - roundup(moreBytesRequested, (off_t)VTOVCB(vp)->blockSize); + roundup(moreBytesRequested, (off_t)vcb->blockSize); } else { /* Shorten the size of the file */ @@ -1863,14 +1961,17 @@ int hfs_allocate(ap) retval = MacToVFSError( TruncateFileC( - VTOVCB(vp), + vcb, (FCB*)fp, length, false)); (void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); - filebytes = (off_t)fp->ff_blocks * (off_t)VTOVCB(vp)->blockSize; + filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize; if (hfsmp->jnl) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 1); + hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); journal_end_transaction(hfsmp->jnl); } @@ -1925,7 +2026,7 @@ hfs_pagein(ap) int devBlockSize = 0; int error; - if (vp->v_type != VREG && vp->v_type != VLNK) + if (vp->v_type != VREG) panic("hfs_pagein: vp not UBC type\n"); VOP_DEVBLOCKSIZE(VTOC(vp)->c_devvp, &devBlockSize); @@ -1933,6 +2034,25 @@ hfs_pagein(ap) error = cluster_pagein(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, ap->a_size, (off_t)VTOF(vp)->ff_size, devBlockSize, ap->a_flags); + /* + * Keep track blocks read + */ + if (VTOHFS(vp)->hfc_stage == HFC_RECORDING && error == 0) { + struct cnode *cp; + + cp = VTOC(vp); + /* + * If this file hasn't been seen since the start of + * the current sampling period then start over. + */ + if (cp->c_atime < VTOHFS(vp)->hfc_timebase) + VTOF(vp)->ff_bytesread = ap->a_size; + else + VTOF(vp)->ff_bytesread += ap->a_size; + + cp->c_flag |= C_ACCESS; + } + return (error); } @@ -1966,10 +2086,18 @@ hfs_pageout(ap) filesize = fp->ff_size; end_of_range = ap->a_f_offset + ap->a_size - 1; + if (cp->c_flag & C_RELOCATING) { + if (end_of_range < (filesize / 2)) { + return (EBUSY); + } + } + if (end_of_range >= filesize) end_of_range = (off_t)(filesize - 1); - if (ap->a_f_offset < filesize) + if (ap->a_f_offset < filesize) { rl_remove(ap->a_f_offset, end_of_range, &fp->ff_invalidranges); + cp->c_flag |= C_MODIFIED; /* leof is dirty */ + } retval = cluster_pageout(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, ap->a_size, filesize, devBlockSize, ap->a_flags); @@ -2036,3 +2164,459 @@ hfs_bwrite(ap) return (retval); } + +/* + * Relocate a file to a new location on disk + * cnode must be locked on entry + * + * Relocation occurs by cloning the file's data from its + * current set of blocks to a new set of blocks. During + * the relocation all of the blocks (old and new) are + * owned by the file. + * + * ----------------- + * |///////////////| + * ----------------- + * 0 N (file offset) + * + * ----------------- ----------------- + * |///////////////| | | STEP 1 (aquire new blocks) + * ----------------- ----------------- + * 0 N N+1 2N + * + * ----------------- ----------------- + * |///////////////| |///////////////| STEP 2 (clone data) + * ----------------- ----------------- + * 0 N N+1 2N + * + * ----------------- + * |///////////////| STEP 3 (head truncate blocks) + * ----------------- + * 0 N + * + * During steps 2 and 3 page-outs to file offsets less + * than or equal to N are suspended. + * + * During step 3 page-ins to the file get supended. + */ +__private_extern__ +int +hfs_relocate(vp, blockHint, cred, p) + struct vnode *vp; + u_int32_t blockHint; + struct ucred *cred; + struct proc *p; +{ + struct filefork *fp; + struct hfsmount *hfsmp; + ExtendedVCB *vcb; + + u_int32_t headblks; + u_int32_t datablks; + u_int32_t blksize; + u_int32_t realsize; + u_int32_t growsize; + u_int32_t nextallocsave; + u_int32_t sector_a; + u_int32_t sector_b; + int eflags; + u_int32_t oldstart; /* debug only */ + off_t newbytes; + int retval; + + if (vp->v_type != VREG && vp->v_type != VLNK) { + return (EPERM); + } + + hfsmp = VTOHFS(vp); + if (hfsmp->hfs_flags & HFS_FRAGMENTED_FREESPACE) { + return (ENOSPC); + } + + fp = VTOF(vp); + if (fp->ff_unallocblocks) + return (EINVAL); + vcb = VTOVCB(vp); + blksize = vcb->blockSize; + if (blockHint == 0) + blockHint = vcb->nextAllocation; + + if ((fp->ff_size > (u_int64_t)0x7fffffff) || + (vp->v_type == VLNK && fp->ff_size > blksize)) { + return (EFBIG); + } + + headblks = fp->ff_blocks; + datablks = howmany(fp->ff_size, blksize); + growsize = datablks * blksize; + realsize = fp->ff_size; + eflags = kEFContigMask | kEFAllMask | kEFNoClumpMask; + if (blockHint >= hfsmp->hfs_metazone_start && + blockHint <= hfsmp->hfs_metazone_end) + eflags |= kEFMetadataMask; + + hfs_global_shared_lock_acquire(hfsmp); + if (hfsmp->jnl) { + if (journal_start_transaction(hfsmp->jnl) != 0) { + return (EINVAL); + } + } + + /* Lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, p); + if (retval) + goto out2; + + retval = MapFileBlockC(vcb, (FCB *)fp, 1, growsize - 1, §or_a, NULL); + if (retval) { + retval = MacToVFSError(retval); + goto out; + } + + /* + * STEP 1 - aquire new allocation blocks. + */ + nextallocsave = vcb->nextAllocation; + retval = ExtendFileC(vcb, (FCB*)fp, growsize, blockHint, eflags, &newbytes); + if (eflags & kEFMetadataMask) + vcb->nextAllocation = nextallocsave; + + retval = MacToVFSError(retval); + if (retval == 0) { + VTOC(vp)->c_flag |= C_MODIFIED; + if (newbytes < growsize) { + retval = ENOSPC; + goto restore; + } else if (fp->ff_blocks < (headblks + datablks)) { + printf("hfs_relocate: allocation failed"); + retval = ENOSPC; + goto restore; + } + + retval = MapFileBlockC(vcb, (FCB *)fp, 1, growsize, §or_b, NULL); + if (retval) { + retval = MacToVFSError(retval); + } else if ((sector_a + 1) == sector_b) { + retval = ENOSPC; + goto restore; + } else if ((eflags & kEFMetadataMask) && + ((((u_int64_t)sector_b * hfsmp->hfs_phys_block_size) / blksize) > + hfsmp->hfs_metazone_end)) { + printf("hfs_relocate: didn't move into metadata zone\n"); + retval = ENOSPC; + goto restore; + } + } + if (retval) { + /* + * Check to see if failure is due to excessive fragmentation. + */ + if (retval == ENOSPC && + hfs_freeblks(hfsmp, 0) > (datablks * 2)) { + hfsmp->hfs_flags |= HFS_FRAGMENTED_FREESPACE; + } + goto out; + } + + fp->ff_size = fp->ff_blocks * blksize; + if (UBCISVALID(vp)) + (void) ubc_setsize(vp, fp->ff_size); + + /* + * STEP 2 - clone data into the new allocation blocks. + */ + + if (vp->v_type == VLNK) + retval = hfs_clonelink(vp, blksize, cred, p); + else if (vp->v_flag & VSYSTEM) + retval = hfs_clonesysfile(vp, headblks, datablks, blksize, cred, p); + else + retval = hfs_clonefile(vp, headblks, datablks, blksize, cred, p); + + if (retval) + goto restore; + + oldstart = fp->ff_extents[0].startBlock; + + /* + * STEP 3 - switch to clone and remove old blocks. + */ + SET(VTOC(vp)->c_flag, C_NOBLKMAP); /* suspend page-ins */ + + retval = HeadTruncateFile(vcb, (FCB*)fp, headblks); + + CLR(VTOC(vp)->c_flag, C_NOBLKMAP); /* resume page-ins */ + if (ISSET(VTOC(vp)->c_flag, C_WBLKMAP)) + wakeup(VTOC(vp)); + if (retval) + goto restore; + + fp->ff_size = realsize; + if (UBCISVALID(vp)) { + (void) ubc_setsize(vp, realsize); + (void) vinvalbuf(vp, V_SAVE, cred, p, 0, 0); + } + + CLR(VTOC(vp)->c_flag, C_RELOCATING); /* Resume page-outs for this file. */ +out: + (void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, p); + + retval = VOP_FSYNC(vp, cred, MNT_WAIT, p); +out2: + if (hfsmp->jnl) { + if (VTOC(vp)->c_cnid < kHFSFirstUserCatalogNodeID) + (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH); + else + (void) hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0); + journal_end_transaction(hfsmp->jnl); + } + hfs_global_shared_lock_release(hfsmp); + + return (retval); + +restore: + /* + * Give back any newly allocated space. + */ + if (fp->ff_size != realsize) + fp->ff_size = realsize; + (void) TruncateFileC(vcb, (FCB*)fp, fp->ff_size, false); + if (UBCISVALID(vp)) + (void) ubc_setsize(vp, fp->ff_size); + CLR(VTOC(vp)->c_flag, C_RELOCATING); + goto out; +} + + +/* + * Clone a symlink. + * + */ +static int +hfs_clonelink(struct vnode *vp, int blksize, struct ucred *cred, struct proc *p) +{ + struct buf *head_bp = NULL; + struct buf *tail_bp = NULL; + int error; + + + error = meta_bread(vp, 0, blksize, cred, &head_bp); + if (error) + goto out; + + tail_bp = getblk(vp, 1, blksize, 0, 0, BLK_META); + if (tail_bp == NULL) { + error = EIO; + goto out; + } + bcopy(head_bp->b_data, tail_bp->b_data, blksize); + error = bwrite(tail_bp); +out: + if (head_bp) { + head_bp->b_flags |= B_INVAL; + brelse(head_bp); + } + (void) vinvalbuf(vp, V_SAVE, cred, p, 0, 0); + + return (error); +} + +/* + * Clone a file's data within the file. + * + */ +static int +hfs_clonefile(struct vnode *vp, int blkstart, int blkcnt, int blksize, + struct ucred *cred, struct proc *p) +{ + caddr_t bufp; + size_t writebase; + size_t bufsize; + size_t copysize; + size_t iosize; + size_t filesize; + size_t offset; + struct uio auio; + struct iovec aiov; + int devblocksize; + int didhold; + int error; + + + if ((error = vinvalbuf(vp, V_SAVE, cred, p, 0, 0))) { + printf("hfs_clonefile: vinvalbuf failed - %d\n", error); + return (error); + } + + if (!ubc_clean(vp, 1)) { + printf("hfs_clonefile: not ubc_clean\n"); + return (EIO); /* XXX error code */ + } + + /* + * Suspend page-outs for this file. + */ + SET(VTOC(vp)->c_flag, C_RELOCATING); + + filesize = VTOF(vp)->ff_size; + writebase = blkstart * blksize; + copysize = blkcnt * blksize; + iosize = bufsize = MIN(copysize, 4096 * 16); + offset = 0; + + if (kmem_alloc(kernel_map, (vm_offset_t *)&bufp, bufsize)) { + return (ENOMEM); + } + + VOP_DEVBLOCKSIZE(VTOC(vp)->c_devvp, &devblocksize); + + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_procp = p; + + while (offset < copysize) { + iosize = MIN(copysize - offset, iosize); + + aiov.iov_base = bufp; + aiov.iov_len = iosize; + auio.uio_resid = iosize; + auio.uio_offset = offset; + auio.uio_rw = UIO_READ; + + error = cluster_read(vp, &auio, copysize, devblocksize, 0); + if (error) { + printf("hfs_clonefile: cluster_read failed - %d\n", error); + break; + } + if (auio.uio_resid != 0) { + printf("clonedata: cluster_read: uio_resid = %d\n", (int)auio.uio_resid); + error = EIO; + break; + } + + + aiov.iov_base = bufp; + aiov.iov_len = iosize; + auio.uio_resid = iosize; + auio.uio_offset = writebase + offset; + auio.uio_rw = UIO_WRITE; + + error = cluster_write(vp, &auio, filesize + offset, + filesize + offset + iosize, + auio.uio_offset, 0, devblocksize, 0); + if (error) { + printf("hfs_clonefile: cluster_write failed - %d\n", error); + break; + } + if (auio.uio_resid != 0) { + printf("hfs_clonefile: cluster_write failed - uio_resid not zero\n"); + error = EIO; + break; + } + offset += iosize; + } + if (error == 0) { + /* Clean the pages in VM. */ + didhold = ubc_hold(vp); + if (didhold) + (void) ubc_clean(vp, 1); + + /* + * Clean out all associated buffers. + */ + (void) vinvalbuf(vp, V_SAVE, cred, p, 0, 0); + + if (didhold) + ubc_rele(vp); + } + kmem_free(kernel_map, (vm_offset_t)bufp, bufsize); + + return (error); +} + +/* + * Clone a system (metadata) file. + * + */ +static int +hfs_clonesysfile(struct vnode *vp, int blkstart, int blkcnt, int blksize, + struct ucred *cred, struct proc *p) +{ + caddr_t bufp; + char * offset; + size_t bufsize; + size_t iosize; + struct buf *bp = NULL; + daddr_t blkno; + daddr_t blk; + int breadcnt; + int i; + int error = 0; + + + iosize = GetLogicalBlockSize(vp); + bufsize = MIN(blkcnt * blksize, 1024 * 1024) & ~(iosize - 1); + breadcnt = bufsize / iosize; + + if (kmem_alloc(kernel_map, (vm_offset_t *)&bufp, bufsize)) { + return (ENOMEM); + } + blkstart = (blkstart * blksize) / iosize; + blkcnt = (blkcnt * blksize) / iosize; + blkno = 0; + + while (blkno < blkcnt) { + /* + * Read up to a megabyte + */ + offset = bufp; + for (i = 0, blk = blkno; (i < breadcnt) && (blk < blkcnt); ++i, ++blk) { + error = meta_bread(vp, blk, iosize, cred, &bp); + if (error) { + printf("hfs_clonesysfile: meta_bread error %d\n", error); + goto out; + } + if (bp->b_bcount != iosize) { + printf("hfs_clonesysfile: b_bcount is only %d\n", bp->b_bcount); + goto out; + } + + bcopy(bp->b_data, offset, iosize); + bp->b_flags |= B_INVAL; + brelse(bp); + bp = NULL; + offset += iosize; + } + + /* + * Write up to a megabyte + */ + offset = bufp; + for (i = 0; (i < breadcnt) && (blkno < blkcnt); ++i, ++blkno) { + bp = getblk(vp, blkstart + blkno, iosize, 0, 0, BLK_META); + if (bp == NULL) { + printf("hfs_clonesysfile: getblk failed on blk %d\n", blkstart + blkno); + error = EIO; + goto out; + } + bcopy(offset, bp->b_data, iosize); + error = bwrite(bp); + bp = NULL; + if (error) + goto out; + offset += iosize; + } + } +out: + if (bp) { + brelse(bp); + } + + kmem_free(kernel_map, (vm_offset_t)bufp, bufsize); + + error = VOP_FSYNC(vp, cred, MNT_WAIT, p); + + return (error); +} + diff --git a/bsd/hfs/hfs_search.c b/bsd/hfs/hfs_search.c index 589e04431..008f78fe0 100644 --- a/bsd/hfs/hfs_search.c +++ b/bsd/hfs/hfs_search.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1997-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -102,7 +102,7 @@ static int CheckCriteria( ExtendedVCB *vcb, searchinfospec_t *searchInfo2, Boolean lookForDup ); -static int CheckAccess(ExtendedVCB *vcb, CatalogKey *key, struct proc *p); +static int CheckAccess(ExtendedVCB *vcb, u_long searchBits, CatalogKey *key, struct proc *p); static int InsertMatch(struct vnode *vp, struct uio *a_uio, CatalogRecord *rec, CatalogKey *key, struct attrlist *returnAttrList, @@ -161,6 +161,7 @@ vop_searchfs { }; */ +__private_extern__ int hfs_search( ap ) struct vop_searchfs_args *ap; /* @@ -198,6 +199,7 @@ hfs_search( ap ) BTScanState myBTScanState; void *user_start = NULL; int user_len; + int32_t searchTime; /* XXX Parameter check a_searchattrs? */ @@ -206,10 +208,32 @@ hfs_search( ap ) if (ap->a_options & ~SRCHFS_VALIDOPTIONSMASK) return (EINVAL); + /* SRCHFS_SKIPLINKS requires root access. + * This option cannot be used with either + * the ATTR_CMN_NAME or ATTR_CMN_PAROBJID + * attributes. + */ + if (ap->a_options & SRCHFS_SKIPLINKS) { + attrgroup_t attrs; + + attrs = ap->a_searchattrs->commonattr | ap->a_returnattrs->commonattr; + if (attrs & (ATTR_CMN_NAME | ATTR_CMN_PAROBJID)) + return (EINVAL); + if ((err = suser(p->p_ucred, &p->p_acflag))) + return (err); + } + if (ap->a_uio->uio_resid <= 0) return (EINVAL); isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord); + + searchTime = kMaxMicroSecsInKernel; + if (ap->a_timelimit->tv_sec == 0 && + ap->a_timelimit->tv_usec > 0 && + ap->a_timelimit->tv_usec < kMaxMicroSecsInKernel) { + searchTime = ap->a_timelimit->tv_usec; + } /* UnPack the search boundries, searchInfo1, searchInfo2 */ err = UnpackSearchAttributeBlock(ap->a_vp, ap->a_searchattrs, @@ -256,6 +280,10 @@ hfs_search( ap ) /* Starting a new search. */ /* Make sure the on-disk Catalog file is current */ (void) VOP_FSYNC(vcb->catalogRefNum, NOCRED, MNT_WAIT, p); + if (VTOHFS(ap->a_vp)->jnl) { + journal_flush(VTOHFS(ap->a_vp)->jnl); + } + ap->a_options &= ~SRCHFS_START; bzero( (caddr_t)myCatPositionPtr, sizeof( *myCatPositionPtr ) ); err = BTScanInitialize(catalogFCB, 0, 0, 0, kCatSearchBufferSize, &myBTScanState); @@ -289,7 +317,7 @@ hfs_search( ap ) if ( result == E_NONE ) { if (CheckCriteria(vcb, ap->a_options, ap->a_searchattrs, &rec, keyp, &searchInfo1, &searchInfo2, false) && - CheckAccess(vcb, keyp, ap->a_uio->uio_procp)) { + CheckAccess(vcb, ap->a_options, keyp, ap->a_uio->uio_procp)) { result = InsertMatch(ap->a_vp, ap->a_uio, &rec, keyp, ap->a_returnattrs, @@ -340,12 +368,12 @@ hfs_search( ap ) break; /* Resolve any hardlinks */ - if (isHFSPlus) + if (isHFSPlus && (ap->a_options & SRCHFS_SKIPLINKS) == 0) ResolveHardlink(vcb, (HFSPlusCatalogFile *) myCurrentDataPtr); if (CheckCriteria( vcb, ap->a_options, ap->a_searchattrs, myCurrentDataPtr, myCurrentKeyPtr, &searchInfo1, &searchInfo2, true ) - && CheckAccess(vcb, myCurrentKeyPtr, ap->a_uio->uio_procp)) { + && CheckAccess(vcb, ap->a_options, myCurrentKeyPtr, ap->a_uio->uio_procp)) { err = InsertMatch(ap->a_vp, ap->a_uio, myCurrentDataPtr, myCurrentKeyPtr, ap->a_returnattrs, attributesBuffer, variableBuffer, @@ -373,7 +401,7 @@ hfs_search( ap ) timersub(&myCurrentTime, &myBTScanState.startTime, &myElapsedTime); /* Note: assumes kMaxMicroSecsInKernel is less than 1,000,000 */ if (myElapsedTime.tv_sec > 0 - || myElapsedTime.tv_usec >= kMaxMicroSecsInKernel) { + || myElapsedTime.tv_usec >= searchTime) { timerExpired = true; } } @@ -418,7 +446,12 @@ ResolveHardlink(ExtendedVCB *vcb, HFSPlusCatalogFile *recp) && (SWAP_BE32(recp->userInfo.fdCreator) == kHFSPlusCreator) && ((to_bsd_time(recp->createDate) == vcb->vcbCrDate) || (to_bsd_time(recp->createDate) == VCBTOHFS(vcb)->hfs_metadata_createdate))) { + cnid_t saved_cnid; + + /* Export link's cnid (a unique value) instead of inode's cnid */ + saved_cnid = recp->fileID; (void) resolvelink(VCBTOHFS(vcb), recp->bsdInfo.special.iNodeNum, recp); + recp->fileID = saved_cnid; } } @@ -484,12 +517,130 @@ ComparePartialPascalName ( register ConstStr31Param str, register ConstStr31Para } + +static char *extension_table=NULL; +static int nexts; +static int max_ext_width; + +static int +extension_cmp(void *a, void *b) +{ + return (strlen((char *)a) - strlen((char *)b)); +} + + +// +// This is the api LaunchServices uses to inform the kernel +// the list of package extensions to ignore. +// +// Internally we keep the list sorted by the length of the +// the extension (from longest to shortest). We sort the +// list of extensions so that we can speed up our searches +// when comparing file names -- we only compare extensions +// that could possibly fit into the file name, not all of +// them (i.e. a short 8 character name can't have an 8 +// character extension). +// +__private_extern__ int +set_package_extensions_table(void *data, int nentries, int maxwidth) +{ + char *new_exts, *ptr; + int error, i, len; + + if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) { + return EINVAL; + } + + MALLOC(new_exts, char *, nentries * maxwidth, M_TEMP, M_WAITOK); + + error = copyin(data, new_exts, nentries * maxwidth); + if (error) { + FREE(new_exts, M_TEMP); + return error; + } + + if (extension_table) { + FREE(extension_table, M_TEMP); + } + extension_table = new_exts; + nexts = nentries; + max_ext_width = maxwidth; + + qsort(extension_table, nexts, maxwidth, extension_cmp); + + return 0; +} + + +static int +is_package_name(char *name, int len) +{ + int i, extlen; + char *ptr, *name_ext; + + if (len <= 3) { + return 0; + } + + name_ext = NULL; + for(ptr=name; *ptr != '\0'; ptr++) { + if (*ptr == '.') { + name_ext = ptr; + } + } + + // if there is no "." extension, it can't match + if (name_ext == NULL) { + return 0; + } + + // advance over the "." + name_ext++; + + // now iterate over all the extensions to see if any match + ptr = &extension_table[0]; + for(i=0; i < nexts; i++, ptr+=max_ext_width) { + extlen = strlen(ptr); + if (strncmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') { + // aha, a match! + return 1; + } + } + + // if we get here, no extension matched + return 0; +} + +// +// Determine if a name is "inappropriate" where the definition +// of "inappropriate" is up to higher level execs. Currently +// that's limited to /System. +// +static int +is_inappropriate_name(char *name, int len) +{ + char *bad_names[] = { "System" }; + int bad_len[] = { 6 }; + int i; + + for(i=0; i < sizeof(bad_names) / sizeof(bad_names[0]); i++) { + if (len == bad_len[i] && strcmp(name, bad_names[i]) == 0) { + return 1; + } + } + + // if we get here, no name matched + return 0; +} + + + /* * Check to see if caller has access rights to this item */ static int -CheckAccess(ExtendedVCB *theVCBPtr, CatalogKey *theKeyPtr, struct proc *theProcPtr) +CheckAccess(ExtendedVCB *theVCBPtr, u_long searchBits, CatalogKey *theKeyPtr, struct proc *theProcPtr) { Boolean isHFSPlus; int myErr; @@ -499,6 +650,8 @@ CheckAccess(ExtendedVCB *theVCBPtr, CatalogKey *theKeyPtr, struct proc *theProcP hfsmount_t * my_hfsmountPtr; struct cat_desc my_cat_desc; struct cat_attr my_cat_attr; + struct FndrDirInfo *finder_info; + myResult = 0; /* default to "no access" */ my_cat_desc.cd_nameptr = NULL; @@ -527,10 +680,34 @@ CheckAccess(ExtendedVCB *theVCBPtr, CatalogKey *theKeyPtr, struct proc *theProcP if ( myErr ) goto ExitThisRoutine; /* no access */ + if (searchBits & SRCHFS_SKIPPACKAGES) { + if (is_package_name(my_cat_desc.cd_nameptr, my_cat_desc.cd_namelen)) { + myResult = 0; + goto ExitThisRoutine; + } + } + + if (searchBits & SRCHFS_SKIPINAPPROPRIATE) { + if ( my_cat_desc.cd_parentcnid == kRootDirID + && is_inappropriate_name(my_cat_desc.cd_nameptr, my_cat_desc.cd_namelen)) { + myResult = 0; + goto ExitThisRoutine; + } + } + + finder_info = (struct FndrDirInfo *)&my_cat_attr.ca_finderinfo[0]; + if ( (searchBits & SRCHFS_SKIPINVISIBLE) + && (SWAP_BE16(finder_info->frFlags) & kIsInvisible)) { + + myResult = 0; + goto ExitThisRoutine; + } + myNodeID = my_cat_desc.cd_parentcnid; /* move up the hierarchy */ myPerms = DerivePermissionSummary(my_cat_attr.ca_uid, my_cat_attr.ca_gid, my_cat_attr.ca_mode, my_hfsmountPtr->hfs_mp, theProcPtr->p_ucred, theProcPtr ); + cat_releasedesc( &my_cat_desc ); if ( (myPerms & X_OK) == 0 ) @@ -574,7 +751,29 @@ CheckCriteria( ExtendedVCB *vcb, break; case kHFSFileRecord: + if ( (searchBits & SRCHFS_MATCHFILES) == 0 ) { /* If we are NOT searching files */ + matched = false; + goto TestDone; + } + break; + case kHFSPlusFileRecord: + /* Check if hardlink links should be skipped. */ + if (searchBits & SRCHFS_SKIPLINKS) { + cnid_t parid = key->hfsPlus.parentID; + HFSPlusCatalogFile *filep = (HFSPlusCatalogFile *)rec; + + if ((SWAP_BE32(filep->userInfo.fdType) == kHardLinkFileType) && + (SWAP_BE32(filep->userInfo.fdCreator) == kHFSPlusCreator)) { + return (false); /* skip over link records */ + } else if ((parid == VCBTOHFS(vcb)->hfs_privdir_desc.cd_cnid) && + (filep->bsdInfo.special.linkCount == 0)) { + return (false); /* skip over unlinked files */ + } + } else if (key->hfsPlus.parentID == VCBTOHFS(vcb)->hfs_privdir_desc.cd_cnid) { + return (false); /* skip over private files */ + } + if ( (searchBits & SRCHFS_MATCHFILES) == 0 ) { /* If we are NOT searching files */ matched = false; goto TestDone; @@ -636,6 +835,42 @@ CheckCriteria( ExtendedVCB *vcb, /* Convert catalog record into cat_attr format. */ cat_convertattr(VCBTOHFS(vcb), rec, &c_attr, &datafork, &rsrcfork); + if (searchBits & SRCHFS_SKIPINVISIBLE) { + int flags; + + switch (rec->recordType) { + case kHFSFolderRecord: + case kHFSPlusFolderRecord: { + struct FndrDirInfo *finder_info; + + finder_info = (struct FndrDirInfo *)&c_attr.ca_finderinfo[0]; + flags = SWAP_BE16(finder_info->frFlags); + break; + } + + case kHFSFileRecord: + case kHFSPlusFileRecord: { + struct FndrFileInfo *finder_info; + + finder_info = (struct FndrFileInfo *)&c_attr.ca_finderinfo[0]; + flags = SWAP_BE16(finder_info->fdFlags); + break; + } + + default: { + flags = kIsInvisible; + break; + } + } + + if (flags & kIsInvisible) { + matched = false; + goto TestDone; + } + } + + + /* Now that we have a record worth searching, see if it matches the search attributes */ if (rec->recordType == kHFSFileRecord || rec->recordType == kHFSPlusFileRecord) { @@ -862,7 +1097,7 @@ InsertMatch( struct vnode *root_vp, struct uio *a_uio, CatalogRecord *rec, u_long packedBufferSize; ExtendedVCB *vcb = VTOVCB(root_vp); Boolean isHFSPlus = vcb->vcbSigWord == kHFSPlusSigWord; - u_long privateDir = VTOHFS(root_vp)->hfs_private_metadata_dir; + u_long privateDir = VTOHFS(root_vp)->hfs_privdir_desc.cd_cnid; struct attrblock attrblk; struct cat_desc c_desc = {0}; struct cat_attr c_attr = {0}; @@ -899,19 +1134,13 @@ InsertMatch( struct vnode *root_vp, struct uio *a_uio, CatalogRecord *rec, c_desc.cd_parentcnid = key->hfs.parentID; } - /* hide open files that have been deleted */ - if ((privateDir != 0) && (c_desc.cd_parentcnid == privateDir)) { - err = 0; - goto exit; - } - attrblk.ab_attrlist = returnAttrList; attrblk.ab_attrbufpp = &rovingAttributesBuffer; attrblk.ab_varbufpp = &rovingVariableBuffer; attrblk.ab_flags = 0; attrblk.ab_blocksize = 0; - hfs_packattrblk(&attrblk, VTOHFS(root_vp), NULL, &c_desc, &c_attr, &datafork, &rsrcfork); + hfs_packattrblk(&attrblk, VTOHFS(root_vp), NULL, &c_desc, &c_attr, &datafork, &rsrcfork, a_uio->uio_procp); packedBufferSize = (char*)rovingVariableBuffer - (char*)attributesBuffer; @@ -1014,12 +1243,8 @@ UnpackSearchAttributeBlock( struct vnode *vp, struct attrlist *alist, searchinfo ++((struct timespec *)attributeBuffer); } if ( a & ATTR_CMN_FNDRINFO ) { - bcopy( attributeBuffer, searchInfo->finderInfo, sizeof(u_long) * 8 ); - (u_long *)attributeBuffer += 8; - } - if ( a & ATTR_CMN_BKUPTIME ) { - searchInfo->lastBackupDate = *((struct timespec *)attributeBuffer); - ++((struct timespec *)attributeBuffer); + bcopy( attributeBuffer, searchInfo->finderInfo, sizeof(u_long) * 8 ); + (u_long *)attributeBuffer += 8; } if ( a & ATTR_CMN_OWNERID ) { searchInfo->uid = *((uid_t *)attributeBuffer); diff --git a/bsd/hfs/hfs_vfsops.c b/bsd/hfs/hfs_vfsops.c index 513f49ca6..d05facee6 100644 --- a/bsd/hfs/hfs_vfsops.c +++ b/bsd/hfs/hfs_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -74,11 +74,14 @@ #include #include #include +#include #include #include #include #include #include +#include +#include // XXXdbg #include @@ -118,11 +121,14 @@ static int hfs_mountfs __P((struct vnode *devvp, struct mount *mp, struct proc * struct hfs_mount_args *args)); static int hfs_statfs __P((struct mount *mp, register struct statfs *sbp, struct proc *p)); +static int hfs_flushfiles __P((struct mount *, int, struct proc *)); +static int hfs_extendfs __P((struct mount *, u_int64_t, struct proc *)); /* * Called by vfs_mountroot when mounting HFS Plus as root. */ +__private_extern__ int hfs_mountroot() { @@ -146,9 +152,13 @@ hfs_mountroot() } if ((error = hfs_mountfs(rootvp, mp, p, NULL))) { mp->mnt_vfc->vfc_refcount--; + + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + FREE(mp->mnt_xinfo_ptr, M_TEMP); vfs_unbusy(mp, p); + vrele(rootvp); /* release the reference from bdevvp() */ - _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); return (error); } simple_lock(&mountlist_slock); @@ -208,7 +218,8 @@ hfs_mount(mp, path, data, ndp, p) if (mp->mnt_flag & MNT_UPDATE) { hfsmp = VFSTOHFS(mp); - if ((hfsmp->hfs_fs_ronly == 0) && (mp->mnt_flag & MNT_RDONLY)) { + if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) && + (mp->mnt_flag & MNT_RDONLY)) { /* use VFS_SYNC to push out System (btree) files */ retval = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p); @@ -221,7 +232,7 @@ hfs_mount(mp, path, data, ndp, p) if ((retval = hfs_flushfiles(mp, flags, p))) goto error_exit; - hfsmp->hfs_fs_ronly = 1; + hfsmp->hfs_flags |= HFS_READ_ONLY; retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); /* also get the volume bitmap blocks */ @@ -229,16 +240,30 @@ hfs_mount(mp, path, data, ndp, p) retval = VOP_FSYNC(hfsmp->hfs_devvp, NOCRED, MNT_WAIT, p); if (retval) { - hfsmp->hfs_fs_ronly = 0; + hfsmp->hfs_flags &= ~HFS_READ_ONLY; goto error_exit; } + + if (hfsmp->jnl) { + hfs_global_exclusive_lock_acquire(hfsmp); + + journal_close(hfsmp->jnl); + hfsmp->jnl = NULL; + + // Note: we explicitly don't want to shutdown + // access to the jvp because we may need + // it later if we go back to being read-write. + + hfs_global_exclusive_lock_release(hfsmp); + } } if ((mp->mnt_flag & MNT_RELOAD) && (retval = hfs_reload(mp, ndp->ni_cnd.cn_cred, p))) goto error_exit; - if (hfsmp->hfs_fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { + if ((hfsmp->hfs_flags & HFS_READ_ONLY) && + (mp->mnt_kern_flag & MNTK_WANTRDWR)) { /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. @@ -257,16 +282,61 @@ hfs_mount(mp, path, data, ndp, p) if (retval != E_NONE) goto error_exit; - /* only change hfs_fs_ronly after a successfull write */ - hfsmp->hfs_fs_ronly = 0; + // If the journal was shut-down previously because we were + // asked to be read-only, let's start it back up again now + + if ( (HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) + && hfsmp->jnl == NULL + && hfsmp->jvp != NULL) { + int flags; + + if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) { + flags = JOURNAL_RESET; + } else { + flags = 0; + } + + hfs_global_exclusive_lock_acquire(hfsmp); + + hfsmp->jnl = journal_open(hfsmp->jvp, + (hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset, + hfsmp->jnl_size, + hfsmp->hfs_devvp, + hfsmp->hfs_phys_block_size, + flags, + 0, + hfs_sync_metadata, hfsmp->hfs_mp); + + hfs_global_exclusive_lock_release(hfsmp); + + if (hfsmp->jnl == NULL) { + retval = EINVAL; + goto error_exit; + } else { + hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET; + } + + } + + /* Only clear HFS_READ_ONLY after a successfull write */ + hfsmp->hfs_flags &= ~HFS_READ_ONLY; } - if ((hfsmp->hfs_fs_ronly == 0) && + if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) && (HFSTOVCB(hfsmp)->vcbSigWord == kHFSPlusSigWord)) { /* setup private/hidden directory for unlinked files */ - hfsmp->hfs_private_metadata_dir = FindMetaDataDirectory(HFSTOVCB(hfsmp)); + FindMetaDataDirectory(HFSTOVCB(hfsmp)); if (hfsmp->jnl) hfs_remove_orphans(hfsmp); + + /* + * Allow hot file clustering if conditions allow. + */ + if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) && + (mp->mnt_flag & MNT_RDONLY) && + (mp->mnt_kern_flag & MNTK_WANTRDWR)) { + (void) hfs_recording_init(hfsmp, p); + } } if (args.fspec == 0) { @@ -374,15 +444,22 @@ hfs_changefs(mp, args, p) hfsmp = VFSTOHFS(mp); vcb = HFSTOVCB(hfsmp); - permswitch = (((hfsmp->hfs_unknownpermissions != 0) && ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) == 0)) || - ((hfsmp->hfs_unknownpermissions == 0) && ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) != 0))); + permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) && + ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) == 0)) || + (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) && + (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS))); + /* The root filesystem must operate with actual permissions: */ if (permswitch && (mp->mnt_flag & MNT_ROOTFS) && (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS)) { mp->mnt_flag &= ~MNT_UNKNOWNPERMISSIONS; /* Just say "No". */ return EINVAL; - }; - hfsmp->hfs_unknownpermissions = ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) != 0); - namefix = permfix = 0; + } + if (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) + hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS; + else + hfsmp->hfs_flags &= ~HFS_UNKNOWN_PERMS; + + namefix = permfix = 0; /* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */ if (args->hfs_timezone.tz_minuteswest != VNOVAL) { @@ -413,7 +490,7 @@ hfs_changefs(mp, args, p) /* Change the hfs encoding value (hfs only) */ if ((HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) && - (hfsmp->hfs_encoding != (u_long)VNOVAL) && + (args->hfs_encoding != (u_long)VNOVAL) && (hfsmp->hfs_encoding != args->hfs_encoding)) { retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func); @@ -482,6 +559,7 @@ loop: continue; } + /* Get the real uid/gid and perm mask from disk. */ if (permswitch || permfix) { cp->c_uid = cnattr.ca_uid; cp->c_gid = cnattr.ca_gid; @@ -614,7 +692,6 @@ loop: return (error); } - /* update cnode's catalog descriptor */ (void) replace_desc(cp, &desc); } @@ -640,8 +717,10 @@ loop: vhp = (HFSPlusVolumeHeader *) (bp->b_data + HFS_PRI_OFFSET(sectorsize)); /* Do a quick sanity check */ - if (SWAP_BE16(vhp->signature) != kHFSPlusSigWord || - SWAP_BE16(vhp->version) != kHFSPlusVersion || + if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord && + SWAP_BE16(vhp->signature) != kHFSXSigWord) || + (SWAP_BE16(vhp->version) != kHFSPlusVersion && + SWAP_BE16(vhp->version) != kHFSXVersion) || SWAP_BE32(vhp->blockSize) != vcb->blockSize) { brelse(bp); return (EIO); @@ -723,8 +802,11 @@ loop: cat_releasedesc(&cndesc); /* Re-establish private/hidden directory for unlinked files */ - hfsmp->hfs_private_metadata_dir = FindMetaDataDirectory(vcb); + FindMetaDataDirectory(vcb); + /* In case any volume information changed to trigger a notification */ + hfs_generate_volume_notifications(hfsmp); + return (0); } @@ -924,8 +1006,6 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK); bzero(hfsmp, sizeof(struct hfsmount)); - - simple_lock_init(&hfsmp->hfs_renamelock); /* * Init the volume information structure @@ -937,9 +1017,11 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, hfsmp->hfs_devvp = devvp; hfsmp->hfs_phys_block_size = blksize; hfsmp->hfs_phys_block_count = blkcnt; - hfsmp->hfs_media_writeable = 1; - hfsmp->hfs_fs_ronly = ronly; - hfsmp->hfs_unknownpermissions = ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) != 0); + hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA; + if (ronly) + hfsmp->hfs_flags |= HFS_READ_ONLY; + if (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) + hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS; for (i = 0; i < MAXQUOTAS; i++) hfsmp->hfs_qfiles[i].qf_vp = NULLVP; @@ -974,9 +1056,9 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, /* Find out if disk media is writable. */ if (VOP_IOCTL(devvp, DKIOCISWRITABLE, (caddr_t)&iswritable, 0, cred, p) == 0) { if (iswritable) - hfsmp->hfs_media_writeable = 1; + hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA; else - hfsmp->hfs_media_writeable = 0; + hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA; } /* Mount a standard HFS disk */ @@ -1104,8 +1186,43 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) { mp->mnt_flag |= MNT_JOURNALED; } else { - retval = EINVAL; - goto error_exit; + // if the journal failed to open, then set the lastMountedVersion + // to be "FSK!" which fsck_hfs will see and force the fsck instead + // of just bailing out because the volume is journaled. + if (ronly != 0 || devvp == rootvp) { + HFSPlusVolumeHeader *vhp; + + hfsmp->hfs_flags |= HFS_NEED_JNL_RESET; + + if (mdb_offset == 0) { + mdb_offset = (embeddedOffset / blksize) + HFS_PRI_SECTOR(blksize); + } + + bp = NULL; + retval = meta_bread(devvp, mdb_offset, blksize, cred, &bp); + if (retval == 0) { + vhp = (HFSPlusVolumeHeader *)(bp->b_data + HFS_PRI_OFFSET(blksize)); + + if (SWAP_BE16(vhp->signature) == kHFSPlusSigWord || SWAP_BE16(vhp->signature) == kHFSXSigWord) { + vhp->lastMountedVersion = SWAP_BE32('FSK!'); + bwrite(bp); + } else { + brelse(bp); + } + bp = NULL; + } else if (bp) { + brelse(bp); + } + } + + // if this isn't the root device just bail out. + // if it is the root device we just continue on + // in the hopes that fsck_hfs will be able to + // fix any damage that exists on the volume. + if (devvp != rootvp) { + retval = EINVAL; + goto error_exit; + } } } // XXXdbg @@ -1134,6 +1251,15 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, hfsmp->hfs_phys_block_count *= hfsmp->hfs_phys_block_size / blksize; hfsmp->hfs_phys_block_size = blksize; + if (hfsmp->jnl) { + // close and re-open this with the new block size + journal_close(hfsmp->jnl); + hfsmp->jnl = NULL; + if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) { + mp->mnt_flag |= MNT_JOURNALED; + } + } + /* Try again with a smaller block size... */ retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args); } @@ -1150,6 +1276,45 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, mp->mnt_maxsymlinklen = 0; devvp->v_specflags |= SI_MOUNTEDON; + if (args) { + /* + * Set the free space warning levels for a non-root volume: + * + * Set the lower freespace limit (the level that will trigger a warning) + * to 5% of the volume size or 250MB, whichever is less, and the desired + * level (which will cancel the alert request) to 1/2 above that limit. + * Start looking for free space to drop below this level and generate a + * warning immediately if needed: + */ + hfsmp->hfs_freespace_notify_warninglimit = + MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, + (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION); + hfsmp->hfs_freespace_notify_desiredlevel = + MIN(HFS_LOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize, + (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKSHUTOFFFRACTION); + } else { + /* + * Set the free space warning levels for the root volume: + * + * Set the lower freespace limit (the level that will trigger a warning) + * to 1% of the volume size or 50MB, whichever is less, and the desired + * level (which will cancel the alert request) to 2% or 75MB, whichever is less. + */ + hfsmp->hfs_freespace_notify_warninglimit = + MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize, + (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION); + hfsmp->hfs_freespace_notify_desiredlevel = + MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize, + (HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION); + }; + + /* + * Start looking for free space to drop below this level and generate a + * warning immediately if needed: + */ + hfsmp->hfs_notification_conditions = 0; + hfs_generate_volume_notifications(hfsmp); + if (ronly == 0) { (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0); } @@ -1214,13 +1379,16 @@ hfs_unmount(mp, mntflags, p) if ((retval = hfs_flushfiles(mp, flags, p)) && !force) return (retval); + if (hfsmp->hfs_flags & HFS_METADATA_ZONE) + (void) hfs_recording_suspend(hfsmp, p); + /* * Flush out the b-trees, volume bitmap and Volume Header */ - if (hfsmp->hfs_fs_ronly == 0) { + if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) { hfs_global_shared_lock_acquire(hfsmp); grabbed_lock = 1; - if (hfsmp->jnl) { + if (hfsmp->jnl) { journal_start_transaction(hfsmp->jnl); started_tr = 1; } @@ -1242,18 +1410,27 @@ hfs_unmount(mp, mntflags, p) } } + if (hfsmp->hfc_filevp && (hfsmp->hfc_filevp->v_flag & VSYSTEM)) { + retval = VOP_FSYNC(hfsmp->hfc_filevp, NOCRED, MNT_WAIT, p); + if (retval && !force) + goto err_exit; + } + if (retval = VOP_FSYNC(hfsmp->hfs_devvp, NOCRED, MNT_WAIT, p)) { if (!force) goto err_exit; } - + +#if 0 /* See if this volume is damaged, is so do not unmount cleanly */ if (HFSTOVCB(hfsmp)->vcbFlags & kHFS_DamagedVolume) { HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask; } else { HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask; } - +#else + HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask; +#endif retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1); if (retval) { HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask; @@ -1280,26 +1457,45 @@ hfs_unmount(mp, mntflags, p) */ (void) hfsUnmount(hfsmp, p); + /* + * Last chance to dump unreferenced system files. + */ + (void) vflush(mp, NULLVP, FORCECLOSE); + if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) (void) hfs_relconverter(hfsmp->hfs_encoding); // XXXdbg if (hfsmp->jnl) { journal_close(hfsmp->jnl); + hfsmp->jnl = NULL; } if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) { - retval = VOP_CLOSE(hfsmp->jvp, hfsmp->hfs_fs_ronly ? FREAD : FREAD|FWRITE, + retval = VOP_CLOSE(hfsmp->jvp, + hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, NOCRED, p); vrele(hfsmp->jvp); - hfsmp->jvp = NULL; + hfsmp->jvp = NULL; } // XXXdbg +#ifdef HFS_SPARSE_DEV + /* Drop our reference on the backing fs (if any). */ + if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingfs_rootvp) { + struct vnode * tmpvp; + + hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE; + tmpvp = hfsmp->hfs_backingfs_rootvp; + hfsmp->hfs_backingfs_rootvp = NULLVP; + vrele(tmpvp); + } +#endif /* HFS_SPARSE_DEV */ + hfsmp->hfs_devvp->v_specflags &= ~SI_MOUNTEDON; retval = VOP_CLOSE(hfsmp->hfs_devvp, - hfsmp->hfs_fs_ronly ? FREAD : FREAD|FWRITE, - NOCRED, p); + hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, + NOCRED, p); if (retval && !force) return(retval); @@ -1551,9 +1747,8 @@ hfs_sync(mp, waitfor, cred, p) return (0); hfsmp = VFSTOHFS(mp); - if (hfsmp->hfs_fs_ronly != 0) { - panic("update: rofs mod"); - }; + if (hfsmp->hfs_flags & HFS_READ_ONLY) + return (EROFS); #if 0 // XXXdbg first go through and flush out any modified @@ -1590,12 +1785,7 @@ loop: // restart our whole search if this guy is locked // or being reclaimed. - // XXXdbg - at some point this should go away or we - // need to change all file systems to have - // this same code. vget() should never return - // success if either of these conditions is - // true. - if (vp->v_tag != VT_HFS || cp == NULL) { + if (vp->v_tag != VT_HFS || cp == NULL || vp->v_flag & (VXLOCK|VORECLAIM)) { simple_unlock(&vp->v_interlock); continue; } @@ -1619,9 +1809,15 @@ loop: } didhold = ubc_hold(vp); + + // mark the cnode so that fsync won't flush + // the journal since we're going to do that... + cp->c_flag |= C_FROMSYNC; if ((error = VOP_FSYNC(vp, cred, waitfor, p))) { allerror = error; }; + cp->c_flag &= ~C_FROMSYNC; + VOP_UNLOCK(vp, 0, p); if (didhold) ubc_rele(vp); @@ -1675,6 +1871,8 @@ loop: #if QUOTA hfs_qsync(mp); #endif /* QUOTA */ + + hfs_hotfilesync(hfsmp, p); /* * Write back modified superblock. */ @@ -1731,7 +1929,7 @@ hfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) * Get the export permission structure for this tuple. */ np = vfs_export_lookup(mp, &VFSTOHFS(mp)->hfs_export, nam); - if (np == NULL) { + if (nam && (np == NULL)) { return EACCES; }; @@ -1755,9 +1953,23 @@ hfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) return (ESTALE); }; + if (VNAME(nvp) == NULL) { + struct cnode *cp = VTOC(nvp); + + if (nvp == cp->c_rsrc_vp) { + // the +1/-2 thing is to skip the leading "/" on the rsrc fork spec + // and to not count the trailing null byte at the end of the string. + VNAME(nvp) = add_name(_PATH_RSRCFORKSPEC+1, sizeof(_PATH_RSRCFORKSPEC)-2, 0, 0); + } else { + VNAME(nvp) = add_name(cp->c_desc.cd_nameptr, cp->c_desc.cd_namelen, 0, 0); + } + } + *vpp = nvp; - *exflagsp = np->netc_exflags; - *credanonp = &np->netc_anon; + if (np) { + *exflagsp = np->netc_exflags; + *credanonp = &np->netc_anon; + } return (0); } @@ -1782,7 +1994,7 @@ hfs_vptofh(vp, fhp) hfsfhp = (struct hfsfid *)fhp; hfsfhp->hfsfid_len = sizeof(struct hfsfid); hfsfhp->hfsfid_pad = 0; - hfsfhp->hfsfid_cnid = cp->c_cnid; + hfsfhp->hfsfid_cnid = cp->c_fileid; hfsfhp->hfsfid_gen = cp->c_itime; return (0); @@ -1807,6 +2019,8 @@ hfs_init(vfsp) dqinit(); #endif /* QUOTA */ + BTReserveSetup(); + /* * Allocate Catalog Iterator cache... */ @@ -1815,6 +2029,31 @@ hfs_init(vfsp) return (0); } +static int +hfs_getmountpoint(vp, hfsmpp) + struct vnode *vp; + struct hfsmount **hfsmpp; +{ + struct hfsmount * hfsmp; + + if (vp == NULL) + return (EINVAL); + + if ((vp->v_flag & VROOT) == 0) + return (EINVAL); + + if (strcmp(vp->v_mount->mnt_stat.f_fstypename, "hfs") != 0) + return (EINVAL); + + hfsmp = VTOHFS(vp); + + if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) + return (EINVAL); + + *hfsmpp = hfsmp; + + return (0); +} // XXXdbg #include @@ -1833,17 +2072,68 @@ hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) size_t newlen; struct proc *p; { - extern u_int32_t hfs_encodingbias; + extern u_int32_t hfs_getencodingbias(void); + extern void hfs_setencodingbias(u_int32_t); + + int error; + struct sysctl_req *req; + struct vfsidctl vc; + struct mount *mp; + struct hfsmount *hfsmp; + struct vfsquery vq; /* all sysctl names at this level are terminal */ - if (name[0] == HFS_ENCODINGBIAS) - return (sysctl_int(oldp, oldlenp, newp, newlen, - &hfs_encodingbias)); - else if (name[0] == 0x082969) { + if (name[0] == HFS_ENCODINGBIAS) { + u_int32_t bias; + + bias = hfs_getencodingbias(); + error = sysctl_int(oldp, oldlenp, newp, newlen, &bias); + if (error == 0 && newp) + hfs_setencodingbias(bias); + return (error); + + } else if (name[0] == HFS_EXTEND_FS) { + u_int64_t newsize; + + if (newp == NULL) + return (EINVAL); + if ((error = hfs_getmountpoint(p->p_fd->fd_cdir, &hfsmp))) + return (error); + error = sysctl_quad(oldp, oldlenp, newp, newlen, &newsize); + if (error) + return (error); + + error = hfs_extendfs(HFSTOVFS(hfsmp), newsize, p); + return (error); + + } else if (name[0] == HFS_ENCODINGHINT) { + size_t bufsize; + size_t bytes; + u_int32_t hint; + u_int16_t *unicode_name; + char *filename; + + bufsize = MAX(newlen * 3, MAXPATHLEN); + MALLOC(filename, char *, newlen, M_TEMP, M_WAITOK); + MALLOC(unicode_name, u_int16_t *, bufsize, M_TEMP, M_WAITOK); + + error = copyin(newp, (caddr_t)filename, newlen); + if (error == 0) { + error = utf8_decodestr(filename, newlen - 1, unicode_name, + &bytes, bufsize, 0, UTF_DECOMPOSED); + if (error == 0) { + hint = hfs_pickencoding(unicode_name, bytes / 2); + error = sysctl_int(oldp, oldlenp, NULL, NULL, &hint); + } + } + FREE(unicode_name, M_TEMP); + FREE(filename, M_TEMP); + return (error); + + } else if (name[0] == HFS_ENABLE_JOURNALING) { // make the file system journaled... struct vnode *vp = p->p_fd->fd_cdir, *jvp; - struct hfsmount *hfsmp; ExtendedVCB *vcb; int retval; struct cat_attr jnl_attr, jinfo_attr; @@ -1851,11 +2141,12 @@ hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) void *jnl = NULL; /* Only root can enable journaling */ - if (current_proc()->p_ucred->cr_uid != 0) { + if (current_proc()->p_ucred->cr_uid != 0) { return (EPERM); } + hfsmp = VTOHFS(vp); - if (hfsmp->hfs_fs_ronly) { + if (hfsmp->hfs_flags & HFS_READ_ONLY) { return EROFS; } if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) { @@ -1893,7 +2184,7 @@ hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) jnl = journal_create(jvp, (off_t)name[2] * (off_t)HFSTOVCB(hfsmp)->blockSize + HFSTOVCB(hfsmp)->hfsPlusIOPosOffset, - (off_t)name[3], + (off_t)((unsigned)name[3]), hfsmp->hfs_devvp, hfsmp->hfs_phys_block_size, 0, @@ -1903,7 +2194,7 @@ hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) if (jnl == NULL) { printf("hfs: FAILED to create the journal!\n"); if (jvp && jvp != hfsmp->hfs_devvp) { - VOP_CLOSE(jvp, hfsmp->hfs_fs_ronly ? FREAD : FREAD|FWRITE, FSCRED, p); + VOP_CLOSE(jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, FSCRED, p); } jvp = NULL; @@ -1919,6 +2210,7 @@ hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) // save this off for the hack-y check in hfs_remove() hfsmp->jnl_start = (u_int32_t)name[2]; + hfsmp->jnl_size = (off_t)((unsigned)name[3]); hfsmp->hfs_jnlinfoblkid = jinfo_attr.ca_fileid; hfsmp->hfs_jnlfileid = jnl_attr.ca_fileid; @@ -1928,21 +2220,18 @@ hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1); return 0; - } else if (name[0] == 0x031272) { + } else if (name[0] == HFS_DISABLE_JOURNALING) { // clear the journaling bit struct vnode *vp = p->p_fd->fd_cdir; - struct hfsmount *hfsmp; void *jnl; int retval; /* Only root can disable journaling */ - if (current_proc()->p_ucred->cr_uid != 0) { + if (current_proc()->p_ucred->cr_uid != 0) { return (EPERM); } + hfsmp = VTOHFS(vp); - if (hfsmp->jnl == NULL) { - return EINVAL; - } printf("hfs: disabling journaling for mount @ 0x%x\n", vp->v_mount); @@ -1955,7 +2244,7 @@ hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) journal_close(jnl); if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) { - VOP_CLOSE(hfsmp->jvp, hfsmp->hfs_fs_ronly ? FREAD : FREAD|FWRITE, FSCRED, p); + VOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, FSCRED, p); } hfsmp->jnl = NULL; hfsmp->jvp = NULL; @@ -1970,7 +2259,45 @@ hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1); return 0; - } + } else if (name[0] == HFS_GET_JOURNAL_INFO) { + struct vnode *vp = p->p_fd->fd_cdir; + off_t jnl_start, jnl_size; + + hfsmp = VTOHFS(vp); + if (hfsmp->jnl == NULL) { + jnl_start = 0; + jnl_size = 0; + } else { + jnl_start = (off_t)(hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset; + jnl_size = (off_t)hfsmp->jnl_size; + } + + if ((error = copyout((caddr_t)&jnl_start, (void *)name[1], sizeof(off_t))) != 0) { + return error; + } + if ((error = copyout((caddr_t)&jnl_size, (void *)name[2], sizeof(off_t))) != 0) { + return error; + } + + return 0; + } else if (name[0] == HFS_SET_PKG_EXTENSIONS) { + + return set_package_extensions_table((void *)name[1], name[2], name[3]); + + } else if (name[0] == VFS_CTL_QUERY) { + req = oldp; /* we're new style vfs sysctl. */ + + error = SYSCTL_IN(req, &vc, sizeof(vc)); + if (error) return (error); + + mp = vfs_getvfs(&vc.vc_fsid); + if (mp == NULL) return (ENOENT); + + hfsmp = VFSTOHFS(mp); + bzero(&vq, sizeof(vq)); + vq.vq_flags = hfsmp->hfs_notification_conditions; + return SYSCTL_OUT(req, &vq, sizeof(vq));; + }; return (EOPNOTSUPP); } @@ -1998,37 +2325,150 @@ hfs_vget(mp, ino, vpp) return (hfs_getcnode(VFSTOHFS(mp), cnid, NULL, 0, NULL, NULL, vpp)); } +/* + * Check to see if a given vnode is only referenced for events: + * [ entered with vp->v_interlock locked ] + */ +static int +hfs_evtonly(struct vnode *vp) +{ + int ubc_refcount; + + ubc_refcount = UBCINFOEXISTS(vp) ? 1 : 0; + return (vp->v_usecount == (ubc_refcount + EVTONLYREFS(vp))); +} + +/* + * Check to see if all non-system vnodes for a given mountpoint are events-only + */ +static int +hfs_flush_evtonly(struct mount *mp, int flags, int dispose, struct proc *p) +{ + struct vnode *vp, *nvp; + int busy = 0; + + simple_lock(&mntvnode_slock); +loop: + for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { + if (vp->v_mount != mp) goto loop; + nvp = vp->v_mntvnodes.le_next; + + simple_lock(&vp->v_interlock); + /* + * Skip over a vnodes marked VSYSTEM or VNOFLUSH. + */ + if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) { + simple_unlock(&vp->v_interlock); + continue; + }; + /* + * Skip over a vnodes marked VSWAP. + */ + if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) { + simple_unlock(&vp->v_interlock); + continue; + } + if (hfs_evtonly(vp)) { + if (dispose) { + /* "dispose" implies "forcibly", a la "FORCECLOSE": */ + simple_unlock(&mntvnode_slock); + vgonel(vp, p); + simple_lock(&mntvnode_slock); + } else { + simple_unlock(&vp->v_interlock); + }; + continue; + }; + + simple_unlock(&vp->v_interlock); + ++busy; + /* If asked to dispose, keep trying. If only checking, the answer is now known. */ + if (dispose) { + continue; + } else { + break; + }; + } + simple_unlock(&mntvnode_slock); + + return (busy == 0); +} + /* * Flush out all the files in a filesystem. */ -int +static int hfs_flushfiles(struct mount *mp, int flags, struct proc *p) { - register struct hfsmount *hfsmp; + struct hfsmount *hfsmp; + struct vnode *skipvp = NULLVP; + struct vnode *rsrcvp; + int quotafilecnt; int i; int error; -#if QUOTA hfsmp = VFSTOHFS(mp); +#if QUOTA + /* + * The open quota files have an indirect reference on + * the root directory vnode. We must account for this + * extra reference when doing the intial vflush. + */ + quotafilecnt = 0; + if (mp->mnt_flag & MNT_QUOTA) { + + /* Find out how many quota files we have open. */ + for (i = 0; i < MAXQUOTAS; i++) { + if (hfsmp->hfs_qfiles[i].qf_vp != NULLVP) + ++quotafilecnt; + } + + /* Obtain the root vnode so we can skip over it. */ + if (hfs_chashget(hfsmp->hfs_raw_dev, kRootDirID, 0, + &skipvp, &rsrcvp) == NULL) { + skipvp = NULLVP; + } + } +#endif /* QUOTA */ + + error = vflush(mp, skipvp, SKIPSYSTEM | SKIPSWAP | flags); + /* + * If the vflush() call failed solely because there are + * some event-only vnodes in the list, then forcibly get + * rid of those vnodes before the final vflush() pass. + */ + if ((error == EBUSY) && hfs_flush_evtonly(mp, SKIPSYSTEM | SKIPSWAP, 0, p)) { + (void) hfs_flush_evtonly(mp, SKIPSYSTEM | SKIPSWAP, 1, p); + }; + error = vflush(mp, skipvp, SKIPSYSTEM | flags); + +#if QUOTA if (mp->mnt_flag & MNT_QUOTA) { - if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) + if (skipvp) { + /* + * See if there are additional references on the + * root vp besides the ones obtained from the open + * quota files and the hfs_chashget call above. + */ + if ((error == 0) && + (skipvp->v_usecount > (1 + quotafilecnt))) { + error = EBUSY; /* root directory is still open */ + } + vput(skipvp); + } + if (error && (flags & FORCECLOSE) == 0) return (error); + for (i = 0; i < MAXQUOTAS; i++) { if (hfsmp->hfs_qfiles[i].qf_vp == NULLVP) continue; hfs_quotaoff(p, mp, i); } - /* - * Here we fall through to vflush again to ensure - * that we have gotten rid of all the system vnodes. - */ + error = vflush(mp, NULLVP, SKIPSYSTEM | flags); } #endif /* QUOTA */ - error = vflush(mp, NULLVP, (SKIPSYSTEM | SKIPSWAP | flags)); - error = vflush(mp, NULLVP, (SKIPSYSTEM | flags)); - return (error); } @@ -2056,8 +2496,8 @@ hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding) break; } - if (index < 128) { - HFSTOVCB(hfsmp)->encodingsBitmap |= (1 << index); + if (index < 64) { + HFSTOVCB(hfsmp)->encodingsBitmap |= (u_int64_t)(1ULL << index); HFSTOVCB(hfsmp)->vcbFlags |= 0xFF00; } } @@ -2209,7 +2649,14 @@ hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush) return (retval); } - +/* + * Flush any dirty in-memory mount data to the on-disk + * volume header. + * + * Note: the on-disk volume signature is intentionally + * not flushed since the on-disk "H+" and "HX" signatures + * are always stored in-memory as "H+". + */ __private_extern__ int hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) @@ -2223,7 +2670,12 @@ hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) int sectorsize; int priIDSector; int critical = 0; + u_int16_t signature; + u_int16_t version; + if (hfsmp->hfs_flags & HFS_READ_ONLY) { + return(0); + } if (vcb->vcbSigWord == kHFSSigWord) return hfs_flushMDB(hfsmp, waitfor, altflush); @@ -2252,6 +2704,7 @@ hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) } hfs_global_shared_lock_release(hfsmp); + printf("HFS: err %d reading VH blk (%s)\n", retval, vcb->vcbVN); return (retval); } @@ -2261,6 +2714,24 @@ hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) volumeHeader = (HFSPlusVolumeHeader *)((char *)bp->b_data + HFS_PRI_OFFSET(sectorsize)); + /* + * Sanity check what we just read. + */ + signature = SWAP_BE16 (volumeHeader->signature); + version = SWAP_BE16 (volumeHeader->version); + if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) || + (version < kHFSPlusVersion) || (version > 100) || + (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) { +#if 1 + panic("HFS: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d", + vcb->vcbVN, signature, version, + SWAP_BE32 (volumeHeader->blockSize)); +#endif + printf("HFS: corrupt VH blk (%s)\n", vcb->vcbVN); + brelse(bp); + return (EIO); + } + /* * For embedded HFS+ volumes, update create date if it changed * (ie from a setattrlist call) @@ -2303,28 +2774,6 @@ hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) } } -// XXXdbg - only monkey around with the volume signature on non-root volumes -// -#if 0 - if (hfsmp->jnl && - hfsmp->hfs_fs_ronly == 0 && - (HFSTOVFS(hfsmp)->mnt_flag & MNT_ROOTFS) == 0) { - - int old_sig = volumeHeader->signature; - - if (vcb->vcbAtrb & kHFSVolumeUnmountedMask) { - volumeHeader->signature = kHFSPlusSigWord; - } else { - volumeHeader->signature = kHFSJSigWord; - } - - if (old_sig != volumeHeader->signature) { - altflush = 1; - } - } -#endif -// XXXdbg - /* Note: only update the lower 16 bits worth of attributes */ volumeHeader->attributes = SWAP_BE32 ((SWAP_BE32 (volumeHeader->attributes) & 0xFFFF0000) + (UInt16) vcb->vcbAtrb); volumeHeader->journalInfoBlock = SWAP_BE32(vcb->vcbJinfoBlock); @@ -2436,6 +2885,251 @@ hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush) } +/* + * Extend a file system. + */ +static int +hfs_extendfs(struct mount *mp, u_int64_t newsize, struct proc *p) +{ + struct vnode *vp; + struct vnode *devvp; + struct buf *bp; + struct hfsmount *hfsmp; + struct filefork *fp = NULL; + ExtendedVCB *vcb; + struct cat_fork forkdata; + u_int64_t oldsize; + u_int64_t newblkcnt; + u_int32_t addblks; + u_int64_t sectorcnt; + u_int32_t sectorsize; + daddr_t prev_alt_sector; + daddr_t bitmapblks; + int error; + + hfsmp = VFSTOHFS(mp); + devvp = hfsmp->hfs_devvp; + vcb = HFSTOVCB(hfsmp); + + /* + * - HFS Plus file systems only. + * - Journaling must be enabled. + * - No embedded volumes. + */ + if ((vcb->vcbSigWord == kHFSSigWord) || + (hfsmp->jnl == NULL) || + (vcb->hfsPlusIOPosOffset != 0)) { + return (EPERM); + } + /* + * If extending file system by non-root, then verify + * ownership and check permissions. + */ + if (p->p_ucred->cr_uid != 0) { + error = hfs_root(mp, &vp); + if (error) + return (error); + error = hfs_owner_rights(hfsmp, VTOC(vp)->c_uid, p->p_ucred, p, 0); + if (error == 0) { + error = hfs_write_access(vp, p->p_ucred, p, false); + } + vput(vp); + if (error) + return (error); + + vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_ACCESS(devvp, VREAD | VWRITE, p->p_ucred, p); + VOP_UNLOCK(devvp, 0, p); + if (error) + return (error); + } + if (VOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)§orsize, 0, FSCRED, p)) { + return (ENXIO); + } + if (sectorsize != hfsmp->hfs_phys_block_size) { + return (ENXIO); + } + if (VOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)§orcnt, 0, FSCRED, p)) { + return (ENXIO); + } + if ((sectorsize * sectorcnt) < newsize) { + printf("hfs_extendfs: not enough space on device\n"); + return (ENOSPC); + } + oldsize = (u_int64_t)hfsmp->hfs_phys_block_count * + (u_int64_t)hfsmp->hfs_phys_block_size; + + /* + * Validate new size. + */ + if ((newsize <= oldsize) || (newsize % vcb->blockSize)) { + printf("hfs_extendfs: invalid size\n"); + return (EINVAL); + } + newblkcnt = newsize / vcb->blockSize; + if (newblkcnt > (u_int64_t)0xFFFFFFFF) + return (EOVERFLOW); + + addblks = newblkcnt - vcb->totalBlocks; + + printf("hfs_extendfs: growing %s by %d blocks\n", vcb->vcbVN, addblks); + /* + * Enclose changes inside a transaction. + */ + hfs_global_shared_lock_acquire(hfsmp); + if (journal_start_transaction(hfsmp->jnl) != 0) { + hfs_global_shared_lock_release(hfsmp); + return (EINVAL); + } + + /* + * Remember the location of existing alternate VH. + */ + prev_alt_sector = (vcb->hfsPlusIOPosOffset / sectorsize) + + HFS_ALT_SECTOR(sectorsize, hfsmp->hfs_phys_block_count); + + vp = vcb->allocationsRefNum; + error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (error) { + goto out2; + } + fp = VTOF(vp); + bcopy(&fp->ff_data, &forkdata, sizeof(forkdata)); + + /* + * Calculate additional space required (if any) by allocation bitmap. + */ + bitmapblks = roundup(newblkcnt / 8, vcb->vcbVBMIOSize) / vcb->blockSize; + if (bitmapblks > fp->ff_blocks) + bitmapblks -= fp->ff_blocks; + else + bitmapblks = 0; + + if (bitmapblks > 0) { + daddr_t blkno; + daddr_t blkcnt; + + /* + * Add a new extent to the allocation bitmap file. + */ + error = AddFileExtent(vcb, fp, vcb->totalBlocks, bitmapblks); + if (error) { + printf("hfs_extendfs: error %d adding extents\n", error); + goto out; + } + blkcnt = bitmapblks; + blkno = fp->ff_blocks; + fp->ff_blocks += bitmapblks; + fp->ff_size += (u_int64_t)bitmapblks * (u_int64_t)vcb->blockSize; + VTOC(vp)->c_blocks = fp->ff_blocks; + /* + * Zero out the new bitmap blocks. + */ + { + + bp = NULL; + while (blkcnt > 0) { + error = meta_bread(vp, blkno, vcb->blockSize, NOCRED, &bp); + if (error) { + if (bp) { + brelse(bp); + } + break; + } + bzero((char *)bp->b_data, vcb->blockSize); + bp->b_flags |= B_AGE; + error = bwrite(bp); + if (error) + break; + --blkcnt; + ++blkno; + } + } + if (error) { + printf("hfs_extendfs: error %d clearing blocks\n", error); + goto out; + } + /* + * Mark the new bitmap space as allocated. + */ + error = BlockMarkAllocated(vcb, vcb->totalBlocks, bitmapblks); + if (error) { + printf("hfs_extendfs: error %d setting bitmap\n", error); + goto out; + } + } + /* + * Mark the new alternate VH as allocated. + */ + if (vcb->blockSize == 512) + error = BlockMarkAllocated(vcb, vcb->totalBlocks + addblks - 2, 2); + else + error = BlockMarkAllocated(vcb, vcb->totalBlocks + addblks - 1, 1); + if (error) { + printf("hfs_extendfs: error %d setting bitmap (VH)\n", error); + goto out; + } + /* + * Mark the old alternate VH as free. + */ + if (vcb->blockSize == 512) + (void) BlockMarkFree(vcb, vcb->totalBlocks - 2, 2); + else + (void) BlockMarkFree(vcb, vcb->totalBlocks - 1, 1); + + /* + * Adjust file system variables for new space. + */ + vcb->totalBlocks += addblks; + vcb->freeBlocks += addblks - bitmapblks; + hfsmp->hfs_phys_block_count = newsize / sectorsize; + + MarkVCBDirty(vcb); + error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH); + if (error) { + printf("hfs_extendfs: couldn't flush volume headers (%d)", error); + /* + * Restore to old state. + */ + fp->ff_size -= (u_int64_t)bitmapblks * (u_int64_t)vcb->blockSize; + vcb->totalBlocks -= addblks; + vcb->freeBlocks -= addblks - bitmapblks; + hfsmp->hfs_phys_block_count = oldsize / sectorsize; + MarkVCBDirty(vcb); + if (vcb->blockSize == 512) + (void) BlockMarkAllocated(vcb, vcb->totalBlocks - 2, 2); + else + (void) BlockMarkAllocated(vcb, vcb->totalBlocks - 1, 1); + goto out; + } + /* + * Invalidate the old alternate volume header. + */ + bp = NULL; + if (meta_bread(hfsmp->hfs_devvp, prev_alt_sector, sectorsize, + NOCRED, &bp) == 0) { + journal_modify_block_start(hfsmp->jnl, bp); + bzero(bp->b_data + HFS_ALT_OFFSET(sectorsize), kMDBSize); + journal_modify_block_end(hfsmp->jnl, bp); + } else if (bp) { + brelse(bp); + } +out: + if (error && fp) { + /* Restore allocation fork. */ + bcopy(&forkdata, &fp->ff_data, sizeof(forkdata)); + VTOC(vp)->c_blocks = fp->ff_blocks; + + } + VOP_UNLOCK(vp, 0, p); +out2: + journal_end_transaction(hfsmp->jnl); + hfs_global_shared_lock_release(hfsmp); + + return (error); +} + + /* * hfs vfs operations. */ diff --git a/bsd/hfs/hfs_vfsutils.c b/bsd/hfs/hfs_vfsutils.c index 63aa2a90e..98a559393 100644 --- a/bsd/hfs/hfs_vfsutils.c +++ b/bsd/hfs/hfs_vfsutils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -54,12 +54,16 @@ extern int count_lock_queue __P((void)); -extern uid_t console_user; static void ReleaseMetaFileVNode(struct vnode *vp); static int hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_args); +static void hfs_metadatazone_init(struct hfsmount *); +static u_int32_t hfs_hotfile_freeblocks(struct hfsmount *); + + + u_int32_t GetLogicalBlockSize(struct vnode *vp); /* BTree accessor routines */ @@ -86,6 +90,7 @@ char hfs_vbmname[] = "Volume Bitmap"; char hfs_privdirname[] = "\xE2\x90\x80\xE2\x90\x80\xE2\x90\x80\xE2\x90\x80HFS+ Private Data"; +__private_extern__ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, struct proc *p) { @@ -102,9 +107,11 @@ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, return (EINVAL); /* don't mount a writeable volume if its dirty, it must be cleaned by fsck_hfs */ - if ((hfsmp->hfs_fs_ronly == 0) && ((SWAP_BE16(mdb->drAtrb) & kHFSVolumeUnmountedMask) == 0)) + if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) && + ((SWAP_BE16(mdb->drAtrb) & kHFSVolumeUnmountedMask) == 0)) { return (EINVAL); - + } + hfsmp->hfs_flags |= HFS_STANDARD; /* * The MDB seems OK: transfer info from it into VCB * Note - the VCB starts out clear (all zeros) @@ -130,7 +137,7 @@ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, vcb->vcbFilCnt = SWAP_BE32 (mdb->drFilCnt); vcb->vcbDirCnt = SWAP_BE32 (mdb->drDirCnt); bcopy(mdb->drFndrInfo, vcb->vcbFndrInfo, sizeof(vcb->vcbFndrInfo)); - if (!hfsmp->hfs_fs_ronly) + if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) vcb->vcbWrCnt++; /* Compensate for write of MDB on last flush */ /* convert hfs encoded name into UTF-8 string */ @@ -149,6 +156,7 @@ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, bzero(&cndesc, sizeof(cndesc)); cndesc.cd_parentcnid = kRootParID; + cndesc.cd_flags |= CD_ISMETA; bzero(&cnattr, sizeof(cnattr)); cnattr.ca_nlink = 1; cnattr.ca_mode = S_IFREG; @@ -163,6 +171,7 @@ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, fork.cf_size = SWAP_BE32(mdb->drXTFlSize); fork.cf_blocks = fork.cf_size / vcb->blockSize; fork.cf_clump = SWAP_BE32(mdb->drXTClpSiz); + fork.cf_vblocks = 0; fork.cf_extents[0].startBlock = SWAP_BE16(mdb->drXTExtRec[0].startBlock); fork.cf_extents[0].blockCount = SWAP_BE16(mdb->drXTExtRec[0].blockCount); fork.cf_extents[1].startBlock = SWAP_BE16(mdb->drXTExtRec[1].startBlock); @@ -175,9 +184,7 @@ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, &vcb->extentsRefNum); if (error) goto MtVolErr; error = MacToVFSError(BTOpenPath(VTOF(vcb->extentsRefNum), - (KeyCompareProcPtr)CompareExtentKeys, - GetBTreeBlock, ReleaseBTreeBlock, - ExtendBTreeFile, SetBTreeBlockSize)); + (KeyCompareProcPtr)CompareExtentKeys)); if (error) { VOP_UNLOCK(vcb->extentsRefNum, 0, p); goto MtVolErr; @@ -192,6 +199,7 @@ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, fork.cf_size = SWAP_BE32(mdb->drCTFlSize); fork.cf_blocks = fork.cf_size / vcb->blockSize; fork.cf_clump = SWAP_BE32(mdb->drCTClpSiz); + fork.cf_vblocks = 0; fork.cf_extents[0].startBlock = SWAP_BE16(mdb->drCTExtRec[0].startBlock); fork.cf_extents[0].blockCount = SWAP_BE16(mdb->drCTExtRec[0].blockCount); fork.cf_extents[1].startBlock = SWAP_BE16(mdb->drCTExtRec[1].startBlock); @@ -207,9 +215,7 @@ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, goto MtVolErr; } error = MacToVFSError(BTOpenPath(VTOF(vcb->catalogRefNum), - (KeyCompareProcPtr)CompareCatalogKeys, - GetBTreeBlock, ReleaseBTreeBlock, - ExtendBTreeFile, SetBTreeBlockSize)); + (KeyCompareProcPtr)CompareCatalogKeys)); if (error) { VOP_UNLOCK(vcb->catalogRefNum, 0, p); VOP_UNLOCK(vcb->extentsRefNum, 0, p); @@ -249,38 +255,57 @@ CmdDone: // //******************************************************************************* +__private_extern__ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, off_t embeddedOffset, u_int64_t disksize, struct proc *p, void *args) { register ExtendedVCB *vcb; struct cat_desc cndesc; struct cat_attr cnattr; + struct cat_fork cfork; UInt32 blockSize; + u_int64_t volumesize; + struct BTreeInfoRec btinfo; + u_int16_t signature; + u_int16_t version; + int i; OSErr retval; - // XXXdbg - added the kHFSJSigWord case - if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord && - SWAP_BE16(vhp->signature) != kHFSJSigWord) || - SWAP_BE16(vhp->version) != kHFSPlusVersion) { - // XXXdbg - printf("hfs: mount: sig 0x%x and version 0x%x are not HFS or HFS+.\n", - vhp->signature, vhp->version); + signature = SWAP_BE16(vhp->signature); + version = SWAP_BE16(vhp->version); + + if (signature == kHFSPlusSigWord) { + if (version != kHFSPlusVersion) { + printf("hfs_mount: invalid HFS+ version: %d\n", version); + return (EINVAL); + } + } else if (signature == kHFSXSigWord) { + if (version != kHFSXVersion) { + printf("hfs_mount: invalid HFSX version: %d\n", version); + return (EINVAL); + } + /* The in-memory signature is always 'H+'. */ + signature = kHFSPlusSigWord; + hfsmp->hfs_flags |= HFS_X; + } else { + printf("hfs_mount: invalid HFS+ sig 0x%04x\n", signature); return (EINVAL); } /* Block size must be at least 512 and a power of 2 */ blockSize = SWAP_BE32(vhp->blockSize); - if (blockSize < 512 || (blockSize & (blockSize-1)) != 0) + if (blockSize < 512 || !powerof2(blockSize)) return (EINVAL); /* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */ - if (hfsmp->hfs_fs_ronly == 0 && hfsmp->jnl == NULL && (SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) == 0) + if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0 && hfsmp->jnl == NULL && + (SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) == 0) return (EINVAL); /* Make sure we can live with the physical block size. */ if ((disksize & (hfsmp->hfs_phys_block_size - 1)) || (embeddedOffset & (hfsmp->hfs_phys_block_size - 1)) || - (SWAP_BE32(vhp->blockSize) < hfsmp->hfs_phys_block_size)) { + (blockSize < hfsmp->hfs_phys_block_size)) { return (ENXIO); } /* @@ -289,13 +314,7 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, */ vcb = HFSTOVCB(hfsmp); - vcb->vcbSigWord = SWAP_BE16(vhp->signature); - - // XXXdbg - remap this in case we've mounted a dirty journaled volume - if (vcb->vcbSigWord == kHFSJSigWord) { - vcb->vcbSigWord = kHFSPlusSigWord; - } - + vcb->vcbSigWord = signature; vcb->vcbJinfoBlock = SWAP_BE32(vhp->journalInfoBlock); vcb->vcbLsMod = to_bsd_time(SWAP_BE32(vhp->modifyDate)); vcb->vcbAtrb = (UInt16)SWAP_BE32(vhp->attributes); @@ -310,7 +329,7 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo)); vcb->vcbAlBlSt = 0; /* hfs+ allocation blocks start at first block of volume */ - if (!hfsmp->hfs_fs_ronly) + if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) vcb->vcbWrCnt++; /* compensate for write of Volume Header on last flush */ VCB_LOCK_INIT(vcb); @@ -319,7 +338,7 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, vcb->nextAllocation = SWAP_BE32(vhp->nextAllocation); vcb->totalBlocks = SWAP_BE32(vhp->totalBlocks); vcb->freeBlocks = SWAP_BE32(vhp->freeBlocks); - vcb->blockSize = SWAP_BE32(vhp->blockSize); + vcb->blockSize = blockSize; vcb->encodingsBitmap = SWAP_BE64(vhp->encodingsBitmap); vcb->localCreateDate = SWAP_BE32(vhp->createDate); @@ -338,6 +357,7 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, bzero(&cndesc, sizeof(cndesc)); cndesc.cd_parentcnid = kRootParID; + cndesc.cd_flags |= CD_ISMETA; bzero(&cnattr, sizeof(cnattr)); cnattr.ca_nlink = 1; cnattr.ca_mode = S_IFREG; @@ -349,19 +369,23 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, cndesc.cd_namelen = strlen(hfs_extname); cndesc.cd_cnid = cnattr.ca_fileid = kHFSExtentsFileID; - SWAP_HFS_PLUS_FORK_DATA (&vhp->extentsFile); - cnattr.ca_blocks = vhp->extentsFile.totalBlocks; - - retval = hfs_getnewvnode(hfsmp, NULL, &cndesc, 0, &cnattr, - (struct cat_fork *)&vhp->extentsFile, + cfork.cf_size = SWAP_BE64 (vhp->extentsFile.logicalSize); + cfork.cf_clump = SWAP_BE32 (vhp->extentsFile.clumpSize); + cfork.cf_blocks = SWAP_BE32 (vhp->extentsFile.totalBlocks); + cfork.cf_vblocks = 0; + cnattr.ca_blocks = cfork.cf_blocks; + for (i = 0; i < kHFSPlusExtentDensity; i++) { + cfork.cf_extents[i].startBlock = + SWAP_BE32 (vhp->extentsFile.extents[i].startBlock); + cfork.cf_extents[i].blockCount = + SWAP_BE32 (vhp->extentsFile.extents[i].blockCount); + } + retval = hfs_getnewvnode(hfsmp, NULL, &cndesc, 0, &cnattr, &cfork, &vcb->extentsRefNum); - SWAP_HFS_PLUS_FORK_DATA (&vhp->extentsFile); if (retval) goto ErrorExit; retval = MacToVFSError(BTOpenPath(VTOF(vcb->extentsRefNum), - (KeyCompareProcPtr) CompareExtentKeysPlus, - GetBTreeBlock, ReleaseBTreeBlock, - ExtendBTreeFile, SetBTreeBlockSize)); + (KeyCompareProcPtr) CompareExtentKeysPlus)); if (retval) { VOP_UNLOCK(vcb->extentsRefNum, 0, p); goto ErrorExit; @@ -374,26 +398,39 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, cndesc.cd_namelen = strlen(hfs_catname); cndesc.cd_cnid = cnattr.ca_fileid = kHFSCatalogFileID; - SWAP_HFS_PLUS_FORK_DATA(&vhp->catalogFile); - cnattr.ca_blocks = vhp->catalogFile.totalBlocks; - - retval = hfs_getnewvnode(hfsmp, NULL, &cndesc, 0, &cnattr, - (struct cat_fork *)&vhp->catalogFile, + cfork.cf_size = SWAP_BE64 (vhp->catalogFile.logicalSize); + cfork.cf_clump = SWAP_BE32 (vhp->catalogFile.clumpSize); + cfork.cf_blocks = SWAP_BE32 (vhp->catalogFile.totalBlocks); + cfork.cf_vblocks = 0; + cnattr.ca_blocks = cfork.cf_blocks; + for (i = 0; i < kHFSPlusExtentDensity; i++) { + cfork.cf_extents[i].startBlock = + SWAP_BE32 (vhp->catalogFile.extents[i].startBlock); + cfork.cf_extents[i].blockCount = + SWAP_BE32 (vhp->catalogFile.extents[i].blockCount); + } + retval = hfs_getnewvnode(hfsmp, NULL, &cndesc, 0, &cnattr, &cfork, &vcb->catalogRefNum); - SWAP_HFS_PLUS_FORK_DATA(&vhp->catalogFile); if (retval) { VOP_UNLOCK(vcb->extentsRefNum, 0, p); goto ErrorExit; } retval = MacToVFSError(BTOpenPath(VTOF(vcb->catalogRefNum), - (KeyCompareProcPtr) CompareExtendedCatalogKeys, - GetBTreeBlock, ReleaseBTreeBlock, - ExtendBTreeFile, SetBTreeBlockSize)); + (KeyCompareProcPtr) CompareExtendedCatalogKeys)); if (retval) { VOP_UNLOCK(vcb->catalogRefNum, 0, p); VOP_UNLOCK(vcb->extentsRefNum, 0, p); goto ErrorExit; } + if ((hfsmp->hfs_flags & HFS_X) && + BTGetInformation(VTOF(vcb->catalogRefNum), 0, &btinfo) == 0) { + if (btinfo.keyCompareType == kHFSBinaryCompare) { + hfsmp->hfs_flags |= HFS_CASE_SENSITIVE; + /* Install a case-sensitive key compare */ + (void) BTOpenPath(VTOF(vcb->catalogRefNum), + (KeyCompareProcPtr)cat_binarykeycompare); + } + } /* * Set up Allocation file vnode @@ -402,13 +439,19 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, cndesc.cd_namelen = strlen(hfs_vbmname); cndesc.cd_cnid = cnattr.ca_fileid = kHFSAllocationFileID; - SWAP_HFS_PLUS_FORK_DATA(&vhp->allocationFile); - cnattr.ca_blocks = vhp->allocationFile.totalBlocks; - - retval = hfs_getnewvnode(hfsmp, NULL, &cndesc, 0, &cnattr, - (struct cat_fork *)&vhp->allocationFile, + cfork.cf_size = SWAP_BE64 (vhp->allocationFile.logicalSize); + cfork.cf_clump = SWAP_BE32 (vhp->allocationFile.clumpSize); + cfork.cf_blocks = SWAP_BE32 (vhp->allocationFile.totalBlocks); + cfork.cf_vblocks = 0; + cnattr.ca_blocks = cfork.cf_blocks; + for (i = 0; i < kHFSPlusExtentDensity; i++) { + cfork.cf_extents[i].startBlock = + SWAP_BE32 (vhp->allocationFile.extents[i].startBlock); + cfork.cf_extents[i].blockCount = + SWAP_BE32 (vhp->allocationFile.extents[i].blockCount); + } + retval = hfs_getnewvnode(hfsmp, NULL, &cndesc, 0, &cnattr, &cfork, &vcb->allocationsRefNum); - SWAP_HFS_PLUS_FORK_DATA(&vhp->allocationFile); if (retval) { VOP_UNLOCK(vcb->catalogRefNum, 0, p); VOP_UNLOCK(vcb->extentsRefNum, 0, p); @@ -430,7 +473,7 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, /* mark the volume dirty (clear clean unmount bit) */ vcb->vcbAtrb &= ~kHFSVolumeUnmountedMask; - if (hfsmp->jnl && hfsmp->hfs_fs_ronly == 0) { + if (hfsmp->jnl && (hfsmp->hfs_flags & HFS_READ_ONLY) == 0) { hfs_flushvolumeheader(hfsmp, TRUE, TRUE); } @@ -441,17 +484,6 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, VOP_UNLOCK(vcb->catalogRefNum, 0, p); VOP_UNLOCK(vcb->extentsRefNum, 0, p); - /* setup private/hidden directory for unlinked files */ - hfsmp->hfs_private_metadata_dir = FindMetaDataDirectory(vcb); - if (hfsmp->jnl && (hfsmp->hfs_fs_ronly == 0)) - hfs_remove_orphans(hfsmp); - - if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected - { - MarkVCBDirty( vcb ); // mark VCB dirty so it will be written - } - - // // Check if we need to do late journal initialization. This only // happens if a previous version of MacOS X (or 9) touched the disk. @@ -482,6 +514,40 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, } } + /* + * Establish a metadata allocation zone. + */ + hfs_metadatazone_init(hfsmp); + + /* + * Make any metadata zone adjustments. + */ + if (hfsmp->hfs_flags & HFS_METADATA_ZONE) { + /* Keep the roving allocator out of the metadata zone. */ + if (vcb->nextAllocation >= hfsmp->hfs_metazone_start && + vcb->nextAllocation <= hfsmp->hfs_metazone_end) { + vcb->nextAllocation = hfsmp->hfs_metazone_end + 1; + } + } + + /* setup private/hidden directory for unlinked files */ + FindMetaDataDirectory(vcb); + if (hfsmp->jnl && ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)) + hfs_remove_orphans(hfsmp); + + if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected + { + MarkVCBDirty( vcb ); // mark VCB dirty so it will be written + } + + + /* + * Allow hot file clustering if conditions allow. + */ + if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) && + ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)) { + (void) hfs_recording_init(hfsmp, p); + } return (0); @@ -527,13 +593,20 @@ static void ReleaseMetaFileVNode(struct vnode *vp) * *************************************************************/ -short hfsUnmount( register struct hfsmount *hfsmp, struct proc *p) +__private_extern__ +int +hfsUnmount( register struct hfsmount *hfsmp, struct proc *p) { ExtendedVCB *vcb = HFSTOVCB(hfsmp); int retval = E_NONE; InvalidateCatalogCache( vcb ); + if (hfsmp->hfc_filevp) { + ReleaseMetaFileVNode(hfsmp->hfc_filevp); + hfsmp->hfc_filevp = NULL; + } + if (vcb->vcbSigWord == kHFSPlusSigWord) ReleaseMetaFileVNode(vcb->allocationsRefNum); @@ -545,16 +618,11 @@ short hfsUnmount( register struct hfsmount *hfsmp, struct proc *p) /* - * Some 3rd party kexts link against hfs_getcatalog so keep a stub for now. + * Test is fork has overflow extents. */ -short -hfs_getcatalog(void *p1, u_long p2, void *p3, short p4, void *p5) -{ - return ENOENT; -} - - -int overflow_extents(struct filefork *fp) +__private_extern__ +int +overflow_extents(struct filefork *fp) { u_long blocks; @@ -583,7 +651,10 @@ int overflow_extents(struct filefork *fp) } -/* __private_extern__ */ +/* + * Lock/Unlock a metadata file. + */ +__private_extern__ int hfs_metafilelocking(struct hfsmount *hfsmp, u_long fileID, u_int flags, struct proc *p) { @@ -610,19 +681,19 @@ hfs_metafilelocking(struct hfsmount *hfsmp, u_long fileID, u_int flags, struct p panic("hfs_lockmetafile: invalid fileID"); } - /* Release, if necesary any locked buffer caches */ - if ((flags & LK_TYPE_MASK) == LK_RELEASE) { + if ((flags & LK_TYPE_MASK) != LK_RELEASE) { + flags |= LK_RETRY; + } else if (hfsmp->jnl == NULL) { struct timeval tv = time; u_int32_t lastfsync = tv.tv_sec; (void) BTGetLastSync((FCB*)VTOF(vp), &lastfsync); numOfLockedBuffs = count_lock_queue(); - if ((numOfLockedBuffs > kMaxLockedMetaBuffers) || ((numOfLockedBuffs>1) && ((tv.tv_sec - lastfsync) > kMaxSecsForFsync))) { + if ((numOfLockedBuffs > kMaxLockedMetaBuffers) || + ((numOfLockedBuffs > 1) && ((tv.tv_sec - lastfsync) > kMaxSecsForFsync))) { hfs_btsync(vp, HFS_SYNCTRANS); } - } else { - flags |= LK_RETRY; } retval = lockmgr(&VTOC(vp)->c_lock, flags, &vp->v_interlock, p); @@ -645,7 +716,7 @@ void RequireFileLock(FileReference vp, int shareable) void * self; pid = current_proc()->p_pid; - self = (void *) current_thread(); + self = (void *) current_act(); lkp = &VTOC(vp)->c_lock; simple_lock(&lkp->lk_interlock); @@ -680,13 +751,11 @@ void RequireFileLock(FileReference vp, int shareable) * There are three ways to qualify for ownership rights on an object: * * 1. (a) Your UID matches the cnode's UID. - * (b) The object in question is owned by "unknown" and - * your UID matches the console user's UID. + * (b) The object in question is owned by "unknown" * 2. (a) Permissions on the filesystem are being ignored and * your UID matches the replacement UID. * (b) Permissions on the filesystem are being ignored and - * the replacement UID is "unknown" and - * your UID matches the console user UID. + * the replacement UID is "unknown". * 3. You are root. * */ @@ -695,11 +764,10 @@ hfs_owner_rights(struct hfsmount *hfsmp, uid_t cnode_uid, struct ucred *cred, struct proc *p, int invokesuperuserstatus) { if ((cred->cr_uid == cnode_uid) || /* [1a] */ - ((cnode_uid == UNKNOWNUID) && (cred->cr_uid == console_user)) || /* [1b] */ + (cnode_uid == UNKNOWNUID) || /* [1b] */ ((HFSTOVFS(hfsmp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) && /* [2] */ ((cred->cr_uid == hfsmp->hfs_uid) || /* [2a] */ - ((hfsmp->hfs_uid == UNKNOWNUID) && /* [2b] */ - (cred->cr_uid == console_user)))) || + (hfsmp->hfs_uid == UNKNOWNUID))) || /* [2b] */ (invokesuperuserstatus && (suser(cred, &p->p_acflag) == 0))) { /* [3] */ return (0); } else { @@ -755,8 +823,9 @@ unsigned long BestBlockSizeFit(unsigned long allocationBlockSize, * To make the HFS Plus filesystem follow UFS unlink semantics, a remove * of an active vnode is translated to a move/rename so the file appears * deleted. The destination folder for these move/renames is setup here - * and a reference to it is place in hfsmp->hfs_private_metadata_dir. + * and a reference to it is place in hfsmp->hfs_privdir_desc. */ +__private_extern__ u_long FindMetaDataDirectory(ExtendedVCB *vcb) { @@ -765,7 +834,9 @@ FindMetaDataDirectory(ExtendedVCB *vcb) struct cnode * dcp = NULL; struct FndrDirInfo * fndrinfo; struct cat_desc out_desc = {0}; + struct proc *p = current_proc(); struct timeval tv; + cat_cookie_t cookie; int error; if (vcb->vcbSigWord != kHFSPlusSigWord) @@ -781,28 +852,52 @@ FindMetaDataDirectory(ExtendedVCB *vcb) } /* Lock catalog b-tree */ - error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, current_proc()); - if (error) + if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p) != 0) return (0); error = cat_lookup(hfsmp, &hfsmp->hfs_privdir_desc, 0, NULL, &hfsmp->hfs_privdir_attr, NULL); + /* Unlock catalog b-tree */ + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + if (error == 0) { - /* Unlock catalog b-tree */ - (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, current_proc()); hfsmp->hfs_metadata_createdate = hfsmp->hfs_privdir_attr.ca_itime; + hfsmp->hfs_privdir_desc.cd_cnid = hfsmp->hfs_privdir_attr.ca_fileid; + /* + * Clear the system immutable flag if set... + */ + if ((hfsmp->hfs_privdir_attr.ca_flags & SF_IMMUTABLE) && + (hfsmp->hfs_flags & HFS_READ_ONLY) == 0) { + hfsmp->hfs_privdir_attr.ca_flags &= ~SF_IMMUTABLE; + + hfs_global_shared_lock_acquire(hfsmp); + if (hfsmp->jnl) { + if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { + hfs_global_shared_lock_release(hfsmp); + return (hfsmp->hfs_privdir_attr.ca_fileid); + } + } + if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p) == 0) { + (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc, + &hfsmp->hfs_privdir_attr, NULL, NULL); + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + } + if (hfsmp->jnl) { + journal_end_transaction(hfsmp->jnl); + } + hfs_global_shared_lock_release(hfsmp); + } return (hfsmp->hfs_privdir_attr.ca_fileid); - } else if (hfsmp->hfs_fs_ronly) { - /* Unlock catalog b-tree */ - (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, current_proc()); + + } else if (hfsmp->hfs_flags & HFS_READ_ONLY) { + return (0); } /* Setup the default attributes */ bzero(&hfsmp->hfs_privdir_attr, sizeof(struct cat_attr)); hfsmp->hfs_privdir_attr.ca_mode = S_IFDIR; - hfsmp->hfs_privdir_attr.ca_flags = SF_IMMUTABLE; hfsmp->hfs_privdir_attr.ca_nlink = 2; hfsmp->hfs_privdir_attr.ca_itime = vcb->vcbCrDate; hfsmp->hfs_privdir_attr.ca_mtime = time.tv_sec; @@ -821,12 +916,24 @@ FindMetaDataDirectory(ExtendedVCB *vcb) return (0); } } + /* Reserve some space in the Catalog file. */ + if (cat_preflight(hfsmp, CAT_CREATE, &cookie, p) != 0) { + if (hfsmp->jnl) { + journal_end_transaction(hfsmp->jnl); + } + hfs_global_shared_lock_release(hfsmp); + return (0); + } - error = cat_create(hfsmp, &hfsmp->hfs_privdir_desc, - &hfsmp->hfs_privdir_attr, &out_desc); + if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p) == 0) { + error = cat_create(hfsmp, &hfsmp->hfs_privdir_desc, + &hfsmp->hfs_privdir_attr, &out_desc); - /* Unlock catalog b-tree */ - (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, current_proc()); + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + } + + cat_postflight(hfsmp, &cookie, p); + if (error) { if (hfsmp->jnl) { journal_end_transaction(hfsmp->jnl); @@ -896,7 +1003,7 @@ GetFileInfo(ExtendedVCB *vcb, u_int32_t dirid, char *name, if (error == 0) { return (fattr->ca_fileid); - } else if (hfsmp->hfs_fs_ronly) { + } else if (hfsmp->hfs_flags & HFS_READ_ONLY) { return (0); } } @@ -916,15 +1023,20 @@ hfs_remove_orphans(struct hfsmount * hfsmp) struct FSBufferDescriptor btdata; struct HFSPlusCatalogFile filerec; struct HFSPlusCatalogKey * keyp; + struct proc *p = current_proc(); FCB *fcb; ExtendedVCB *vcb; char filename[32]; char tempname[32]; size_t namelen; + cat_cookie_t cookie = {0}; int catlock = 0; - int result, started_tr = 0; + int catreserve = 0; + int started_tr = 0; + int shared_lock = 0; + int result; - if (hfsmp->hfs_orphans_cleaned) + if (hfsmp->hfs_flags & HFS_CLEANED_ORPHANS) return; vcb = HFSTOVCB(hfsmp); @@ -937,38 +1049,34 @@ hfs_remove_orphans(struct hfsmount * hfsmp) MALLOC(iterator, struct BTreeIterator *, sizeof(*iterator), M_TEMP, M_WAITOK); bzero(iterator, sizeof(*iterator)); keyp = (HFSPlusCatalogKey*)&iterator->key; - keyp->parentID = hfsmp->hfs_private_metadata_dir; - - // XXXdbg - hfs_global_shared_lock_acquire(hfsmp); - if (hfsmp->jnl) { - if (journal_start_transaction(hfsmp->jnl) != 0) { - hfs_global_shared_lock_release(hfsmp); - return; - } - started_tr = 1; - } + keyp->parentID = hfsmp->hfs_privdir_desc.cd_cnid; - /* Lock catalog b-tree */ - result = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, current_proc()); + result = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); if (result) goto exit; - catlock = 1; - /* * Position the iterator at the folder thread record. * (i.e. one record before first child) */ result = BTSearchRecord(fcb, iterator, NULL, NULL, iterator); + + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); if (result) goto exit; /* Visit all the children in the HFS+ private directory. */ for (;;) { + result = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (result) + goto exit; + result = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL); + + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); if (result) break; - if (keyp->parentID != hfsmp->hfs_private_metadata_dir) + + if (keyp->parentID != hfsmp->hfs_privdir_desc.cd_cnid) break; if (filerec.recordType != kHFSPlusFileRecord) continue; @@ -982,46 +1090,92 @@ hfs_remove_orphans(struct hfsmount * hfsmp) * Delete all files named "tempxxx", where * xxx is the file's cnid in decimal. * - * Delete all files named "iNodexxx", that - * have a link count of zero. */ if (bcmp(tempname, filename, namelen) == 0) { - struct filefork fork = {0}; - struct cnode cnode = {0}; + struct filefork dfork = {0}; + struct filefork rfork = {0}; + struct cnode cnode = {0}; + + // XXXdbg + hfs_global_shared_lock_acquire(hfsmp); + shared_lock = 1; + if (hfsmp->jnl) { + if (journal_start_transaction(hfsmp->jnl) != 0) { + goto exit; + } + started_tr = 1; + } + + /* + * Reserve some space in the Catalog file. + */ + if (cat_preflight(hfsmp, CAT_DELETE, &cookie, p) != 0) { + goto exit; + } + catreserve = 1; - // XXXdebug - //printf("hfs_remove_orphans: removing %s\n", filename); + /* Lock catalog b-tree */ + if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, + LK_EXCLUSIVE, p) != 0) { + goto exit; + } + catlock = 1; /* Build a fake cnode */ - cnode.c_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir; + cat_convertattr(hfsmp, (CatalogRecord *)&filerec, &cnode.c_attr, + &dfork.ff_data, &rfork.ff_data); + cnode.c_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; cnode.c_desc.cd_nameptr = filename; cnode.c_desc.cd_namelen = namelen; - cnode.c_desc.cd_cnid = filerec.fileID; - cnode.c_attr.ca_fileid = filerec.fileID; - cnode.c_blocks = filerec.dataFork.totalBlocks + - filerec.resourceFork.totalBlocks; + cnode.c_desc.cd_cnid = cnode.c_attr.ca_fileid; + cnode.c_blocks = dfork.ff_blocks + rfork.ff_blocks; /* Position iterator at previous entry */ if (BTIterateRecord(fcb, kBTreePrevRecord, iterator, - NULL, NULL) != 0) + NULL, NULL) != 0) { break; - + } + /* Truncate the file to zero (both forks) */ - if (filerec.dataFork.totalBlocks > 0) { - fork.ff_cp = &cnode; - cnode.c_datafork = ⋔ - bcopy(&filerec.dataFork, &fork.ff_data, sizeof(struct cat_fork)); - if (TruncateFileC(vcb, (FCB*)&fork, 0, false) != 0) { - printf("error truncting data fork!\n"); - break; + if (dfork.ff_blocks > 0) { + u_int64_t fsize; + + dfork.ff_cp = &cnode; + cnode.c_datafork = &dfork; + cnode.c_rsrcfork = NULL; + fsize = (u_int64_t)dfork.ff_blocks * (u_int64_t)HFSTOVCB(hfsmp)->blockSize; + while (fsize > 0) { + if (fsize > HFS_BIGFILE_SIZE) { + fsize -= HFS_BIGFILE_SIZE; + } else { + fsize = 0; + } + + if (TruncateFileC(vcb, (FCB*)&dfork, fsize, false) != 0) { + printf("error truncting data fork!\n"); + break; + } + + // + // if we're iteratively truncating this file down, + // then end the transaction and start a new one so + // that no one transaction gets too big. + // + if (fsize > 0 && started_tr) { + journal_end_transaction(hfsmp->jnl); + if (journal_start_transaction(hfsmp->jnl) != 0) { + started_tr = 0; + break; + } + } } } - if (filerec.resourceFork.totalBlocks > 0) { - fork.ff_cp = &cnode; + + if (rfork.ff_blocks > 0) { + rfork.ff_cp = &cnode; cnode.c_datafork = NULL; - cnode.c_rsrcfork = ⋔ - bcopy(&filerec.resourceFork, &fork.ff_data, sizeof(struct cat_fork)); - if (TruncateFileC(vcb, (FCB*)&fork, 0, false) != 0) { + cnode.c_rsrcfork = &rfork; + if (TruncateFileC(vcb, (FCB*)&rfork, 0, false) != 0) { printf("error truncting rsrc fork!\n"); break; } @@ -1038,21 +1192,37 @@ hfs_remove_orphans(struct hfsmount * hfsmp) (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc, &hfsmp->hfs_privdir_attr, NULL, NULL); hfs_volupdate(hfsmp, VOL_RMFILE, 0); - } - } + + /* Drop locks and end the transaction */ + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + cat_postflight(hfsmp, &cookie, p); + catlock = catreserve = 0; + if (started_tr) { + journal_end_transaction(hfsmp->jnl); + started_tr = 0; + } + hfs_global_shared_lock_release(hfsmp); + shared_lock = 0; + + } /* end if */ + } /* end for */ exit: - /* Unlock catalog b-tree */ - if (catlock) - (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, current_proc()); - + if (catlock) { + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + } + if (catreserve) { + cat_postflight(hfsmp, &cookie, p); + } if (started_tr) { journal_end_transaction(hfsmp->jnl); } - hfs_global_shared_lock_release(hfsmp); + if (shared_lock) { + hfs_global_shared_lock_release(hfsmp); + } FREE(iterator, M_TEMP); - hfsmp->hfs_orphans_cleaned = 1; + hfsmp->hfs_flags |= HFS_CLEANED_ORPHANS; } @@ -1111,8 +1281,47 @@ hfs_freeblks(struct hfsmount * hfsmp, int wantreserve) else freeblks = 0; } + if (freeblks > vcb->loanedBlocks) + freeblks -= vcb->loanedBlocks; + else + freeblks = 0; + +#ifdef HFS_SPARSE_DEV + /* + * When the underlying device is sparse, check the + * available space on the backing store volume. + */ + if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingfs_rootvp) { + struct statfs statbuf; /* 272 bytes */ + u_int32_t vfreeblks; + u_int32_t loanedblks; + struct mount * backingfs_mp; + + backingfs_mp = hfsmp->hfs_backingfs_rootvp->v_mount; + + if (VFS_STATFS(backingfs_mp, &statbuf, current_proc()) == 0) { + vfreeblks = statbuf.f_bavail; + /* Normalize block count if needed. */ + if (statbuf.f_bsize != vcb->blockSize) { + vfreeblks = ((u_int64_t)vfreeblks * (u_int64_t)statbuf.f_bsize) / vcb->blockSize; + } + if (vfreeblks > hfsmp->hfs_sparsebandblks) + vfreeblks -= hfsmp->hfs_sparsebandblks; + else + vfreeblks = 0; + + /* Take into account any delayed allocations. */ + loanedblks = 2 * vcb->loanedBlocks; + if (vfreeblks > loanedblks) + vfreeblks -= loanedblks; + else + vfreeblks = 0; + + freeblks = MIN(vfreeblks, freeblks); + } + } +#endif /* HFS_SPARSE_DEV */ - freeblks -= vcb->loanedBlocks; return (freeblks); } @@ -1127,9 +1336,8 @@ short MacToVFSError(OSErr err) switch (err) { case dskFulErr: /* -34 */ + case btNoSpaceAvail: /* -32733 */ return ENOSPC; - case btNoSpaceAvail: /* -32733 */ - return EFBIG; case fxOvFlErr: /* -32750 */ return EOVERFLOW; @@ -1184,7 +1392,7 @@ hfs_getnamehint(struct cnode *dcp, int index) void *self; if (index > 0) { - self = current_thread(); + self = current_act(); SLIST_FOREACH(entry, &dcp->c_indexlist, hi_link) { if ((entry->hi_index == index) && (entry->hi_thread == self)) @@ -1211,7 +1419,7 @@ hfs_savenamehint(struct cnode *dcp, int index, const char * namehint) MALLOC(entry, struct hfs_index *, len + sizeof(struct hfs_index), M_TEMP, M_WAITOK); entry->hi_index = index; - entry->hi_thread = current_thread(); + entry->hi_thread = current_act(); bcopy(namehint, entry->hi_name, len + 1); SLIST_INSERT_HEAD(&dcp->c_indexlist, entry, hi_link); } @@ -1229,7 +1437,7 @@ hfs_relnamehint(struct cnode *dcp, int index) void *self; if (index > 0) { - self = current_thread(); + self = current_act(); SLIST_FOREACH(entry, &dcp->c_indexlist, hi_link) { if ((entry->hi_index == index) && (entry->hi_thread == self)) { @@ -1341,6 +1549,7 @@ hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, // save this off for the hack-y check in hfs_remove() hfsmp->jnl_start = jibp->offset / SWAP_BE32(vhp->blockSize); + hfsmp->jnl_size = jibp->size; if (jibp->flags & kJIJournalNeedInitMask) { printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n", @@ -1358,6 +1567,8 @@ hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, // we'd just re-init it on the next mount. jibp->flags &= ~kJIJournalNeedInitMask; jibp->flags = SWAP_BE32(jibp->flags); + jibp->offset = SWAP_BE64(jibp->offset); + jibp->size = SWAP_BE64(jibp->size); bwrite(jinfo_bp); jinfo_bp = NULL; jibp = NULL; @@ -1382,6 +1593,9 @@ hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, if (hfsmp->jnl && mdbp) { // reload the mdb because it could have changed // if the journal had to be replayed. + if (mdb_offset == 0) { + mdb_offset = (embeddedOffset / blksize) + HFS_PRI_SECTOR(blksize); + } retval = meta_bread(devvp, mdb_offset, blksize, cred, &bp); if (retval) { brelse(bp); @@ -1401,9 +1615,7 @@ hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, // if we expected the journal to be there and we couldn't // create it or open it then we have to bail out. if (hfsmp->jnl == NULL) { - hfsmp->jnl_start = 0; - - printf("hfs: failed to open/create the journal (retval %d).\n", retval); + printf("hfs: early jnl init: failed to open/create the journal (retval %d).\n", retval); return EINVAL; } @@ -1524,6 +1736,7 @@ hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_a // save this off for the hack-y check in hfs_remove() hfsmp->jnl_start = jibp->offset / SWAP_BE32(vhp->blockSize); + hfsmp->jnl_size = jibp->size; if (jibp->flags & kJIJournalNeedInitMask) { printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n", @@ -1585,11 +1798,226 @@ hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_a // if we expected the journal to be there and we couldn't // create it or open it then we have to bail out. if (hfsmp->jnl == NULL) { - hfsmp->jnl_start = 0; - - printf("hfs: failed to open/create the journal (retval %d).\n", retval); + printf("hfs: late jnl init: failed to open/create the journal (retval %d).\n", retval); return EINVAL; } return 0; } + +/* + * Calculate the allocation zone for metadata. + * + * This zone includes the following: + * Allocation Bitmap file + * Overflow Extents file + * Journal file + * Quota files + * Clustered Hot files + * Catalog file + * + * METADATA ALLOCATION ZONE + * ____________________________________________________________________________ + * | | | | | | | + * | BM | JF | OEF | CATALOG |---> | HOT FILES | + * |____|____|_____|_______________|______________________________|___________| + * + * <------------------------------- N * 128 MB -------------------------------> + * + */ +#define GIGABYTE (u_int64_t)(1024*1024*1024) + +#define OVERFLOW_DEFAULT_SIZE (4*1024*1024) +#define OVERFLOW_MAXIMUM_SIZE (128*1024*1024) +#define JOURNAL_DEFAULT_SIZE (8*1024*1024) +#define JOURNAL_MAXIMUM_SIZE (512*1024*1024) +#define HOTBAND_MINIMUM_SIZE (10*1024*1024) +#define HOTBAND_MAXIMUM_SIZE (512*1024*1024) + +static void +hfs_metadatazone_init(struct hfsmount *hfsmp) +{ + ExtendedVCB *vcb; + struct BTreeInfoRec btinfo; + u_int64_t fs_size; + u_int64_t zonesize; + u_int64_t temp; + u_int64_t filesize; + u_int32_t blk; + int items; + + vcb = HFSTOVCB(hfsmp); + fs_size = (u_int64_t)vcb->blockSize * (u_int64_t)vcb->totalBlocks; + + /* + * For volumes less than 10 GB, don't bother. + */ + if (fs_size < ((u_int64_t)10 * GIGABYTE)) + return; + /* + * Skip non-journaled volumes as well. + */ + if (hfsmp->jnl == NULL) + return; + + /* + * Start with allocation bitmap (a fixed size). + */ + zonesize = roundup(vcb->totalBlocks / 8, vcb->vcbVBMIOSize); + + /* + * Overflow Extents file gets 4 MB per 100 GB. + */ + items = fs_size / ((u_int64_t)100 * GIGABYTE); + filesize = (u_int64_t)(items + 1) * OVERFLOW_DEFAULT_SIZE; + if (filesize > OVERFLOW_MAXIMUM_SIZE) + filesize = OVERFLOW_MAXIMUM_SIZE; + zonesize += filesize; + hfsmp->hfs_overflow_maxblks = filesize / vcb->blockSize; + + /* + * Plan for at least 8 MB of journal for each + * 100 GB of disk space (up to a 512 MB). + */ + items = fs_size / ((u_int64_t)100 * GIGABYTE); + filesize = (u_int64_t)(items + 1) * JOURNAL_DEFAULT_SIZE; + if (filesize > JOURNAL_MAXIMUM_SIZE) + filesize = JOURNAL_MAXIMUM_SIZE; + zonesize += filesize; + + /* + * Catalog file gets 10 MB per 1 GB. + * + * How about considering the current catalog size (used nodes * node size) + * and the current file data size to help estimate the required + * catalog size. + */ + filesize = MIN((fs_size / 1024) * 10, GIGABYTE); + hfsmp->hfs_catalog_maxblks = filesize / vcb->blockSize; + zonesize += filesize; + + /* + * Add space for hot file region. + * + * ...for now, use 5 MB per 1 GB (0.5 %) + */ + filesize = (fs_size / 1024) * 5; + if (filesize > HOTBAND_MAXIMUM_SIZE) + filesize = HOTBAND_MAXIMUM_SIZE; + else if (filesize < HOTBAND_MINIMUM_SIZE) + filesize = HOTBAND_MINIMUM_SIZE; + /* + * Calculate user quota file requirements. + */ + items = QF_USERS_PER_GB * (fs_size / GIGABYTE); + if (items < QF_MIN_USERS) + items = QF_MIN_USERS; + else if (items > QF_MAX_USERS) + items = QF_MAX_USERS; + if (!powerof2(items)) { + int x = items; + items = 4; + while (x>>1 != 1) { + x = x >> 1; + items = items << 1; + } + } + filesize += (items + 1) * sizeof(struct dqblk); + /* + * Calculate group quota file requirements. + * + */ + items = QF_GROUPS_PER_GB * (fs_size / GIGABYTE); + if (items < QF_MIN_GROUPS) + items = QF_MIN_GROUPS; + else if (items > QF_MAX_GROUPS) + items = QF_MAX_GROUPS; + if (!powerof2(items)) { + int x = items; + items = 4; + while (x>>1 != 1) { + x = x >> 1; + items = items << 1; + } + } + filesize += (items + 1) * sizeof(struct dqblk); + hfsmp->hfs_hotfile_maxblks = filesize / vcb->blockSize; + zonesize += filesize; + + /* + * Round up entire zone to a bitmap block's worth. + * The extra space goes to the catalog file and hot file area. + */ + temp = zonesize; + zonesize = roundup(zonesize, vcb->vcbVBMIOSize * 8 * vcb->blockSize); + temp = zonesize - temp; /* temp has extra space */ + filesize += temp / 3; + hfsmp->hfs_catalog_maxblks += (temp - (temp / 3)) / vcb->blockSize; + + /* Convert to allocation blocks. */ + blk = zonesize / vcb->blockSize; + + /* The default metadata zone location is at the start of volume. */ + hfsmp->hfs_metazone_start = 1; + hfsmp->hfs_metazone_end = blk - 1; + + /* The default hotfile area is at the end of the zone. */ + hfsmp->hfs_hotfile_start = blk - (filesize / vcb->blockSize); + hfsmp->hfs_hotfile_end = hfsmp->hfs_metazone_end; + hfsmp->hfs_hotfile_freeblks = hfs_hotfile_freeblocks(hfsmp); +#if 0 + printf("HFS: metadata zone is %d to %d\n", hfsmp->hfs_metazone_start, hfsmp->hfs_metazone_end); + printf("HFS: hot file band is %d to %d\n", hfsmp->hfs_hotfile_start, hfsmp->hfs_hotfile_end); + printf("HFS: hot file band free blocks = %d\n", hfsmp->hfs_hotfile_freeblks); +#endif + hfsmp->hfs_flags |= HFS_METADATA_ZONE; +} + + +static u_int32_t +hfs_hotfile_freeblocks(struct hfsmount *hfsmp) +{ + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + int freeblocks; + + freeblocks = MetaZoneFreeBlocks(vcb); + /* Minus Extents overflow file reserve. */ + freeblocks -= + hfsmp->hfs_overflow_maxblks - VTOF(vcb->extentsRefNum)->ff_blocks; + /* Minus catalog file reserve. */ + freeblocks -= + hfsmp->hfs_catalog_maxblks - VTOF(vcb->catalogRefNum)->ff_blocks; + if (freeblocks < 0) + freeblocks = 0; + + return MIN(freeblocks, hfsmp->hfs_hotfile_maxblks); +} + +/* + * Determine if a file is a "virtual" metadata file. + * This includes journal and quota files. + */ +__private_extern__ +int +hfs_virtualmetafile(struct cnode *cp) +{ + char * filename; + + + if (cp->c_parentcnid != kHFSRootFolderID) + return (0); + + filename = cp->c_desc.cd_nameptr; + if (filename == NULL) + return (0); + + if ((strcmp(filename, ".journal") == 0) || + (strcmp(filename, ".journal_info_block") == 0) || + (strcmp(filename, ".quota.user") == 0) || + (strcmp(filename, ".quota.group") == 0) || + (strcmp(filename, ".hotfiles.btree") == 0)) + return (1); + + return (0); +} + diff --git a/bsd/hfs/hfs_vnops.c b/bsd/hfs/hfs_vnops.c index b53d8eb2f..1c6444b2d 100644 --- a/bsd/hfs/hfs_vnops.c +++ b/bsd/hfs/hfs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -35,6 +35,8 @@ #include #include #include +#include +#include #include #include @@ -58,14 +60,16 @@ #define MAKE_DELETED_NAME(NAME,FID) \ (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID)) +#define KNDETACH_VNLOCKED 0x00000001 -extern uid_t console_user; +#define CARBON_TEMP_DIR_NAME "Cleanup At Startup" -extern unsigned long strtoul(const char *, char **, int); /* Global vfs data structures for hfs */ +extern unsigned long strtoul(const char *, char **, int); + extern int groupmember(gid_t gid, struct ucred *cred); static int hfs_makenode(int mode, struct vnode *dvp, struct vnode **vpp, @@ -76,6 +80,19 @@ static int hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, static int hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p); +static int hfs_removedir(struct vnode *, struct vnode *, struct componentname *, + int); + +static int hfs_removefile(struct vnode *, struct vnode *, struct componentname *, + int); + +/* Options for hfs_removedir and hfs_removefile */ +#define HFSRM_PARENT_LOCKED 0x01 +#define HFSRM_SKIP_RESERVE 0x02 +#define HFSRM_SAVE_NAME 0x04 +#define HFSRM_RENAMEOPTS 0x07 + + int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags); int hfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred, @@ -200,6 +217,8 @@ hfs_open(ap) } */ *ap; { struct vnode *vp = ap->a_vp; + struct filefork *fp = VTOF(vp); + struct timeval tv; /* * Files marked append-only must be opened for appending. @@ -208,6 +227,36 @@ hfs_open(ap) (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE) return (EPERM); + if (ap->a_mode & O_EVTONLY) { + if (vp->v_type == VREG) { + ++VTOF(vp)->ff_evtonly_refs; + } else { + ++VTOC(vp)->c_evtonly_refs; + }; + }; + + /* + * On the first (non-busy) open of a fragmented + * file attempt to de-frag it (if its less than 20MB). + */ + if ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) || + !UBCISVALID(vp) || ubc_isinuse(vp, 1)) { + return (0); + } + fp = VTOF(vp); + if (fp->ff_blocks && + fp->ff_extents[7].blockCount != 0 && + fp->ff_size <= (20 * 1024 * 1024)) { + /* + * Wait until system bootup is done (3 min). + */ + microuptime(&tv); + if (tv.tv_sec < (60 * 3)) { + return (0); + } + (void) hfs_relocate(vp, VTOVCB(vp)->nextAllocation + 4096, ap->a_cred, ap->a_p); + } + return (0); } @@ -252,6 +301,14 @@ hfs_close(ap) } simple_unlock(&vp->v_interlock); + if (ap->a_fflag & O_EVTONLY) { + if (vp->v_type == VREG) { + --VTOF(vp)->ff_evtonly_refs; + } else { + --VTOC(vp)->c_evtonly_refs; + }; + }; + /* * VOP_CLOSE can be called with vp locked (from vclean). * We check for this case using VOP_ISLOCKED and bail. @@ -263,7 +320,9 @@ hfs_close(ap) leof = fp->ff_size; - if ((fp->ff_blocks > 0) && !ISSET(cp->c_flag, C_DELETED)) { + if ((fp->ff_blocks > 0) && + !ISSET(cp->c_flag, C_DELETED) && + ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) == 0)) { enum vtype our_type = vp->v_type; u_long our_id = vp->v_id; int was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA); @@ -336,6 +395,8 @@ hfs_close(ap) } VOP_UNLOCK(vp, 0, p); } + if ((vp->v_flag & VSYSTEM) && (vp->v_usecount == 1)) + vgone(vp); return (0); } @@ -378,7 +439,7 @@ hfs_access(ap) case VDIR: case VLNK: case VREG: - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) return (EROFS); #if QUOTA if ((error = hfs_getinoquota(cp))) @@ -386,20 +447,20 @@ hfs_access(ap) #endif /* QUOTA */ break; } + /* If immutable bit set, nobody gets to write it. */ + if (cp->c_flags & IMMUTABLE) + return (EPERM); } - /* If immutable bit set, nobody gets to write it. */ - if ((mode & VWRITE) && (cp->c_flags & IMMUTABLE)) - return (EPERM); /* Otherwise, user id 0 always gets access. */ - if (ap->a_cred->cr_uid == 0) + if (cred->cr_uid == 0) return (0); mask = 0; /* Otherwise, check the owner. */ - if (hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, ap->a_p, false) == 0) { + if ( (cp->c_uid == cred->cr_uid) || (cp->c_uid == UNKNOWNUID) ) { if (mode & VEXEC) mask |= S_IXUSR; if (mode & VREAD) @@ -466,6 +527,8 @@ hfs_getattr(ap) CTIMES(cp, &tv, &tv); vap->va_type = vp->v_type; + vap->va_mode = cp->c_mode; + vap->va_nlink = cp->c_nlink; /* * [2856576] Since we are dynamically changing the owner, also * effectively turn off the set-user-id and set-group-id bits, @@ -473,9 +536,12 @@ hfs_getattr(ap) * a security hole where set-user-id programs run as whoever is * logged on (or root if nobody is logged in yet!) */ - vap->va_mode = (cp->c_uid == UNKNOWNUID) ? cp->c_mode & ~(S_ISUID | S_ISGID) : cp->c_mode; - vap->va_nlink = cp->c_nlink; - vap->va_uid = (cp->c_uid == UNKNOWNUID) ? console_user : cp->c_uid; + if (cp->c_uid == UNKNOWNUID) { + vap->va_mode &= ~(S_ISUID | S_ISGID); + vap->va_uid = ap->a_cred->cr_uid; + } else { + vap->va_uid = cp->c_uid; + } vap->va_gid = cp->c_gid; vap->va_fsid = cp->c_dev; /* @@ -502,7 +568,6 @@ hfs_getattr(ap) vap->va_rdev = 0; vap->va_blocksize = VTOVFS(vp)->mnt_stat.f_iosize; vap->va_filerev = 0; - vap->va_spare = 0; if (vp->v_type == VDIR) { vap->va_size = cp->c_nlink * AVERAGE_HFSDIRENTRY_SIZE; vap->va_bytes = 0; @@ -555,8 +620,19 @@ hfs_setattr(ap) return (EINVAL); } + // XXXdbg + // don't allow people to set the attributes of symlinks + // (nfs has a bad habit of doing ths and it can cause + // problems for journaling). + // + if (vp->v_type == VLNK) { + return 0; + } + + + if (vap->va_flags != VNOVAL) { - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) return (EROFS); if ((error = hfs_chflags(vp, vap->va_flags, cred, p))) return (error); @@ -571,7 +647,7 @@ hfs_setattr(ap) if (VTOHFS(vp)->jnl && cp->c_datafork) { struct HFSPlusExtentDescriptor *extd; - extd = &cp->c_datafork->ff_data.cf_extents[0]; + extd = &cp->c_datafork->ff_extents[0]; if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) { return EPERM; } @@ -581,7 +657,7 @@ hfs_setattr(ap) * Go through the fields and update iff not VNOVAL. */ if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) return (EROFS); if ((error = hfs_chown(vp, vap->va_uid, vap->va_gid, cred, p))) return (error); @@ -597,7 +673,7 @@ hfs_setattr(ap) return (EISDIR); case VLNK: case VREG: - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) return (EROFS); break; default: @@ -608,7 +684,7 @@ hfs_setattr(ap) } cp = VTOC(vp); if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) return (EROFS); if (((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) && ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || @@ -640,10 +716,11 @@ hfs_setattr(ap) } error = 0; if (vap->va_mode != (mode_t)VNOVAL) { - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) return (EROFS); error = hfs_chmod(vp, (int)vap->va_mode, cred, p); } + HFS_KNOTE(vp, NOTE_ATTRIB); return (error); } @@ -652,6 +729,7 @@ hfs_setattr(ap) * Change the mode on a file. * cnode must be locked before calling. */ +__private_extern__ int hfs_chmod(vp, mode, cred, p) register struct vnode *vp; @@ -669,7 +747,7 @@ hfs_chmod(vp, mode, cred, p) if (VTOHFS(vp)->jnl && cp && cp->c_datafork) { struct HFSPlusExtentDescriptor *extd; - extd = &cp->c_datafork->ff_data.cf_extents[0]; + extd = &cp->c_datafork->ff_extents[0]; if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) { return EPERM; } @@ -695,6 +773,7 @@ hfs_chmod(vp, mode, cred, p) } +__private_extern__ int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags) { @@ -712,9 +791,9 @@ hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean c case VDIR: case VLNK: case VREG: - if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) return (EROFS); - break; + break; default: break; } @@ -747,6 +826,7 @@ hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean c * Change the flags on a file or directory. * cnode must be locked before calling. */ +__private_extern__ int hfs_chflags(vp, flags, cred, p) register struct vnode *vp; @@ -789,6 +869,7 @@ hfs_chflags(vp, flags, cred, p) * Perform chown operation on cnode cp; * code must be locked prior to call. */ +__private_extern__ int hfs_chown(vp, uid, gid, cred, p) register struct vnode *vp; @@ -934,14 +1015,13 @@ hfs_exchange(ap) { struct vnode *from_vp = ap->a_fvp; struct vnode *to_vp = ap->a_tvp; - struct vnode *from_rvp = NULL; - struct vnode *to_rvp = NULL; struct cnode *from_cp = VTOC(from_vp); struct cnode *to_cp = VTOC(to_vp); struct hfsmount *hfsmp = VTOHFS(from_vp); struct cat_desc tempdesc; struct cat_attr tempattr; int error = 0, started_tr = 0, grabbed_lock = 0; + cat_cookie_t cookie = {0}; /* The files must be on the same volume. */ if (from_vp->v_mount != to_vp->v_mount) @@ -958,45 +1038,20 @@ hfs_exchange(ap) struct HFSPlusExtentDescriptor *extd; if (from_cp->c_datafork) { - extd = &from_cp->c_datafork->ff_data.cf_extents[0]; + extd = &from_cp->c_datafork->ff_extents[0]; if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { return EPERM; } } if (to_cp->c_datafork) { - extd = &to_cp->c_datafork->ff_data.cf_extents[0]; + extd = &to_cp->c_datafork->ff_extents[0]; if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { return EPERM; } } } - from_rvp = from_cp->c_rsrc_vp; - to_rvp = to_cp->c_rsrc_vp; - - /* If one of the resource forks is open then get the other one. */ - if (from_rvp || to_rvp) { - error = hfs_vgetrsrc(hfsmp, from_vp, &from_rvp, ap->a_p); - if (error) - return (error); - error = hfs_vgetrsrc(hfsmp, to_vp, &to_rvp, ap->a_p); - if (error) { - vrele(from_rvp); - return (error); - } - } - - /* Ignore any errors, we are doing a 'best effort' on flushing */ - if (from_vp) - (void) vinvalbuf(from_vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); - if (to_vp) - (void) vinvalbuf(to_vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); - if (from_rvp) - (void) vinvalbuf(from_rvp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); - if (to_rvp) - (void) vinvalbuf(to_rvp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); - // XXXdbg hfs_global_shared_lock_acquire(hfsmp); grabbed_lock = 1; @@ -1007,6 +1062,13 @@ hfs_exchange(ap) started_tr = 1; } + /* + * Reserve some space in the Catalog file. + */ + if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, ap->a_p))) { + goto Err_Exit; + } + /* Lock catalog b-tree */ error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, ap->a_p); if (error) goto Err_Exit; @@ -1091,18 +1153,17 @@ hfs_exchange(ap) from_cp->c_flags &= ~UF_NODUMP; from_cp->c_flag |= C_CHANGE; } - if ((to_cp->c_flags & UF_NODUMP) && (to_cp->c_parentcnid != from_cp->c_parentcnid)) { to_cp->c_flags &= ~UF_NODUMP; to_cp->c_flag |= C_CHANGE; } + HFS_KNOTE(from_vp, NOTE_ATTRIB); + HFS_KNOTE(to_vp, NOTE_ATTRIB); + Err_Exit: - if (to_rvp) - vrele(to_rvp); - if (from_rvp) - vrele(from_rvp); + cat_postflight(hfsmp, &cookie, ap->a_p); // XXXdbg if (started_tr) { @@ -1161,12 +1222,7 @@ hfs_fsync(ap) if (vp->v_flag & VSYSTEM) { if (VTOF(vp)->fcbBTCBPtr != NULL) { // XXXdbg - if (hfsmp->jnl) { - if (BTIsDirty(VTOF(vp))) { - panic("hfs: system file vp 0x%x has dirty blocks (jnl 0x%x)\n", - vp, hfsmp->jnl); - } - } else { + if (hfsmp->jnl == NULL) { BTFlushPath(VTOF(vp)); } } @@ -1311,6 +1367,18 @@ metasync: !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) { hfs_metasync(VTOHFS(vp), cp->c_hint, ap->a_p); } + + // make sure that we've really been called from the user + // fsync() and if so push out any pending transactions + // that this file might is a part of (and get them on + // stable storage). + if (vp->v_flag & VFULLFSYNC) { + if (hfsmp->jnl) { + journal_flush(hfsmp->jnl); + } else { + VOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NOCRED, ap->a_p); + } + } } return (retval); @@ -1443,13 +1511,25 @@ hfs_rmdir(ap) struct componentname *a_cnp; } */ *ap; { - struct vnode *vp = ap->a_vp; - struct vnode *dvp = ap->a_dvp; - struct proc *p = ap->a_cnp->cn_proc; + return (hfs_removedir(ap->a_dvp, ap->a_vp, ap->a_cnp, 0)); +} + +/* + * hfs_removedir + */ +static int +hfs_removedir(dvp, vp, cnp, options) + struct vnode *dvp; + struct vnode *vp; + struct componentname *cnp; + int options; +{ + struct proc *p = cnp->cn_proc; struct cnode *cp; struct cnode *dcp; struct hfsmount * hfsmp; struct timeval tv; + cat_cookie_t cookie = {0}; int error = 0, started_tr = 0, grabbed_lock = 0; cp = VTOC(vp); @@ -1465,7 +1545,6 @@ hfs_rmdir(ap) #if QUOTA (void)hfs_getinoquota(cp); #endif - // XXXdbg hfs_global_shared_lock_acquire(hfsmp); grabbed_lock = 1; @@ -1476,6 +1555,15 @@ hfs_rmdir(ap) started_tr = 1; } + if (!(options & HFSRM_SKIP_RESERVE)) { + /* + * Reserve some space in the Catalog file. + */ + if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) { + goto out; + } + } + /* * Verify the directory is empty (and valid). * (Rmdir ".." won't be valid since @@ -1520,16 +1608,22 @@ hfs_rmdir(ap) dcp->c_flag |= C_CHANGE | C_UPDATE; tv = time; (void) VOP_UPDATE(dvp, &tv, &tv, 0); + HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID)); cp->c_mode = 0; /* Makes the vnode go away...see inactive */ cp->c_flag |= C_NOEXISTS; out: - if (dvp) + if (!(options & HFSRM_PARENT_LOCKED)) { vput(dvp); + } + HFS_KNOTE(vp, NOTE_DELETE); vput(vp); + if (!(options & HFSRM_SKIP_RESERVE)) { + cat_postflight(hfsmp, &cookie, p); + } // XXXdbg if (started_tr) { journal_end_transaction(hfsmp->jnl); @@ -1561,23 +1655,42 @@ hfs_remove(ap) struct componentname *a_cnp; } */ *ap; { - struct vnode *vp = ap->a_vp; - struct vnode *dvp = ap->a_dvp; + return (hfs_removefile(ap->a_dvp, ap->a_vp, ap->a_cnp, 0)); +} + + + +/* + * hfs_removefile + * + * Similar to hfs_remove except there are additional options. + */ +static int +hfs_removefile(dvp, vp, cnp, options) + struct vnode *dvp; + struct vnode *vp; + struct componentname *cnp; + int options; +{ struct vnode *rvp = NULL; struct cnode *cp; struct cnode *dcp; struct hfsmount *hfsmp; - struct proc *p = current_proc(); + struct proc *p = cnp->cn_proc; int dataforkbusy = 0; int rsrcforkbusy = 0; int truncated = 0; struct timeval tv; + cat_cookie_t cookie = {0}; int error = 0; int started_tr = 0, grabbed_lock = 0; + int refcount, isbigfile = 0; - /* Redirect directories to rmdir */ - if (vp->v_type == VDIR) - return (hfs_rmdir(ap)); + /* Directories should call hfs_rmdir! */ + if (vp->v_type == VDIR) { + error = EISDIR; + goto out; + } cp = VTOC(vp); dcp = VTOC(dvp); @@ -1610,7 +1723,7 @@ hfs_remove(ap) if (hfsmp->jnl && cp->c_datafork) { struct HFSPlusExtentDescriptor *extd; - extd = &cp->c_datafork->ff_data.cf_extents[0]; + extd = &cp->c_datafork->ff_extents[0]; if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { error = EPERM; goto out; @@ -1624,18 +1737,27 @@ hfs_remove(ap) * vnode (vp). And we took a ref on the resource vnode (rvp). * Hence set 1 in the tookref parameter of ubc_isinuse(). */ - if (UBCISVALID(vp) && ubc_isinuse(vp, 1)) + if (VTOC(vp)->c_flag & C_VPREFHELD) { + refcount = 2; + } else { + refcount = 1; + } + if (UBCISVALID(vp) && ubc_isinuse(vp, refcount)) dataforkbusy = 1; if (rvp && UBCISVALID(rvp) && ubc_isinuse(rvp, 1)) rsrcforkbusy = 1; + // need this to check if we have to break the deletion + // into multiple pieces + isbigfile = (VTOC(vp)->c_datafork->ff_size >= HFS_BIGFILE_SIZE); + /* * Carbon semantics prohibit deleting busy files. * (enforced when NODELETEBUSY is requested) */ if ((dataforkbusy || rsrcforkbusy) && - ((ap->a_cnp->cn_flags & NODELETEBUSY) || - (hfsmp->hfs_private_metadata_dir == 0))) { + ((cnp->cn_flags & NODELETEBUSY) || + (hfsmp->hfs_privdir_desc.cd_cnid == 0))) { error = EBUSY; goto out; } @@ -1654,6 +1776,15 @@ hfs_remove(ap) started_tr = 1; } + if (!(options & HFSRM_SKIP_RESERVE)) { + /* + * Reserve some space in the Catalog file. + */ + if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) { + goto out; + } + } + /* Remove our entry from the namei cache. */ cache_purge(vp); @@ -1695,7 +1826,7 @@ hfs_remove(ap) if ((cp->c_flag & C_HARDLINK) == 0) { int mode = cp->c_mode; - if (!dataforkbusy && cp->c_datafork->ff_blocks != 0) { + if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) { cp->c_mode = 0; /* Suppress VOP_UPDATES */ error = VOP_TRUNCATE(vp, (off_t)0, IO_NDELAY, NOCRED, p); cp->c_mode = mode; @@ -1722,16 +1853,16 @@ hfs_remove(ap) if (cp->c_flag & C_HARDLINK) { struct cat_desc desc; - if ((ap->a_cnp->cn_flags & HASBUF) == 0 || - ap->a_cnp->cn_nameptr[0] == '\0') { + if ((cnp->cn_flags & HASBUF) == 0 || + cnp->cn_nameptr[0] == '\0') { error = ENOENT; /* name missing! */ goto out; } /* Setup a descriptor for the link */ bzero(&desc, sizeof(desc)); - desc.cd_nameptr = ap->a_cnp->cn_nameptr; - desc.cd_namelen = ap->a_cnp->cn_namelen; + desc.cd_nameptr = cnp->cn_nameptr; + desc.cd_namelen = cnp->cn_namelen; desc.cd_parentcnid = dcp->c_cnid; /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */ desc.cd_cnid = cp->c_cnid; @@ -1760,7 +1891,7 @@ hfs_remove(ap) bzero(&from_desc, sizeof(from_desc)); from_desc.cd_nameptr = inodename; from_desc.cd_namelen = strlen(inodename); - from_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir; + from_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; from_desc.cd_flags = 0; from_desc.cd_cnid = cp->c_fileid; @@ -1768,7 +1899,7 @@ hfs_remove(ap) bzero(&to_desc, sizeof(to_desc)); to_desc.cd_nameptr = delname; to_desc.cd_namelen = strlen(delname); - to_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir; + to_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; to_desc.cd_flags = 0; to_desc.cd_cnid = cp->c_fileid; @@ -1780,10 +1911,6 @@ hfs_remove(ap) /* Unlock the Catalog */ (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); - /* All done with component name... */ - if ((ap->a_cnp->cn_flags & (HASBUF | SAVENAME)) == (HASBUF | SAVENAME)) - FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); - if (error != 0) goto out; @@ -1793,7 +1920,7 @@ hfs_remove(ap) hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID)); - } else if (dataforkbusy || rsrcforkbusy) { + } else if (dataforkbusy || rsrcforkbusy || isbigfile) { char delname[32]; struct cat_desc to_desc; struct cat_desc todir_desc; @@ -1808,7 +1935,7 @@ hfs_remove(ap) bzero(&to_desc, sizeof(to_desc)); to_desc.cd_nameptr = delname; to_desc.cd_namelen = strlen(delname); - to_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir; + to_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; to_desc.cd_flags = 0; to_desc.cd_cnid = cp->c_cnid; @@ -1839,9 +1966,14 @@ hfs_remove(ap) } else /* Not busy */ { if (cp->c_blocks > 0) { - printf("hfs_remove: attempting to delete a non-empty file!"); +#if 0 + panic("hfs_remove: attempting to delete a non-empty file!"); +#else + printf("hfs_remove: attempting to delete a non-empty file %s\n", + cp->c_desc.cd_nameptr); error = EBUSY; goto out; +#endif } /* Lock catalog b-tree */ @@ -1852,10 +1984,10 @@ hfs_remove(ap) error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr); if (error && error != ENXIO && error != ENOENT && truncated) { - if ((cp->c_datafork && cp->c_datafork->ff_data.cf_size != 0) || - (cp->c_rsrcfork && cp->c_rsrcfork->ff_data.cf_size != 0)) { + if ((cp->c_datafork && cp->c_datafork->ff_size != 0) || + (cp->c_rsrcfork && cp->c_rsrcfork->ff_size != 0)) { panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)", - error, cp->c_datafork->ff_data.cf_size, cp->c_rsrcfork->ff_data.cf_size); + error, cp->c_datafork->ff_size, cp->c_rsrcfork->ff_size); } else { printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n", cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error); @@ -1871,6 +2003,7 @@ hfs_remove(ap) #endif /* QUOTA */ cp->c_mode = 0; + truncated = 0; // because the catalog entry is gone cp->c_flag |= C_CHANGE | C_NOEXISTS; --cp->c_nlink; hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID)); @@ -1894,39 +2027,29 @@ hfs_remove(ap) dcp->c_flag |= C_CHANGE | C_UPDATE; tv = time; (void) VOP_UPDATE(dvp, &tv, &tv, 0); + HFS_KNOTE(dvp, NOTE_WRITE); - // XXXdbg - if (started_tr) { - journal_end_transaction(hfsmp->jnl); - } - if (grabbed_lock) { - hfs_global_shared_lock_release(hfsmp); +out: + /* All done with component name... */ + if ((options & HFSRM_SAVE_NAME) == 0 && + (cnp != 0) && + (cnp->cn_flags & (HASBUF | SAVENAME)) == (HASBUF | SAVENAME)) { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); } - if (rvp) - vrele(rvp); - VOP_UNLOCK(vp, 0, p); - // XXXdbg - try to prevent the lost ubc_info panic - if ((cp->c_flag & C_HARDLINK) == 0 || cp->c_nlink == 0) { - (void) ubc_uncache(vp); + if (!(options & HFSRM_SKIP_RESERVE)) { + cat_postflight(hfsmp, &cookie, p); } - vrele(vp); - vput(dvp); - - return (0); -out: - if (rvp) - vrele(rvp); - /* Commit the truncation to the catalog record */ if (truncated) { - cp->c_flag |= C_CHANGE | C_UPDATE; - tv = time; - (void) VOP_UPDATE(vp, &tv, &tv, 0); + cp->c_flag |= C_CHANGE | C_UPDATE | C_FORCEUPDATE; + tv = time; + (void) VOP_UPDATE(vp, &tv, &tv, 0); } - vput(vp); - vput(dvp); // XXXdbg if (started_tr) { @@ -1936,6 +2059,26 @@ out: hfs_global_shared_lock_release(hfsmp); } + HFS_KNOTE(vp, NOTE_DELETE); + if (rvp) { + HFS_KNOTE(rvp, NOTE_DELETE); + vrele(rvp); + }; + + if (error) { + vput(vp); + } else { + VOP_UNLOCK(vp, 0, p); + // XXXdbg - try to prevent the lost ubc_info panic + if ((cp->c_flag & C_HARDLINK) == 0 || cp->c_nlink == 0) { + (void) ubc_uncache(vp); + } + vrele(vp); + } + if (!(options & HFSRM_PARENT_LOCKED)) { + vput(dvp); + } + return (error); } @@ -1950,7 +2093,7 @@ replace_desc(struct cnode *cp, struct cat_desc *cdp) cp->c_desc.cd_nameptr = 0; cp->c_desc.cd_namelen = 0; cp->c_desc.cd_flags &= ~CD_HASBUF; - FREE(name, M_TEMP); + remove_name(name); } bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc)); @@ -1963,19 +2106,11 @@ replace_desc(struct cnode *cp, struct cat_desc *cdp) /* # -#% rename fdvp U U U -#% rename fvp U U U -#% rename tdvp L U U -#% rename tvp X U U +#% rename fdvp U U U +#% rename fvp U U U +#% rename tdvp L U U +#% rename tvp X U U # - vop_rename { - IN WILLRELE struct vnode *fdvp; - IN WILLRELE struct vnode *fvp; - IN struct componentname *fcnp; - IN WILLRELE struct vnode *tdvp; - IN WILLRELE struct vnode *tvp; - IN struct componentname *tcnp; - }; */ /* * Rename a cnode. @@ -2014,53 +2149,114 @@ hfs_rename(ap) struct cat_desc from_desc; struct cat_desc to_desc; struct cat_desc out_desc; - struct hfsmount *hfsmp; + struct hfsmount *hfsmp = NULL; struct timeval tv; - int fdvp_locked, fvp_locked, tdvp_locked; + cat_cookie_t cookie = {0}; + int fdvp_locked, fvp_locked, tdvp_locked, tvp_locked; int tvp_deleted; int started_tr = 0, grabbed_lock = 0; int error = 0; - hfsmp = VTOHFS(tdvp); /* Establish our vnode lock state. */ tdvp_locked = 1; + tvp_locked = (tvp != 0); fdvp_locked = 0; fvp_locked = 0; tvp_deleted = 0; + /* + * Check for cross-device rename. + */ + if ((fvp->v_mount != tdvp->v_mount) || + (tvp && (fvp->v_mount != tvp->v_mount))) { + error = EXDEV; + goto out; + } + /* * When fvp matches tvp they must be case variants * or hard links. * - * For the hardlink case there can be an extra ref on fvp. + * In some cases tvp will be locked in other cases + * it be unlocked with no reference. Normalize the + * state here (unlocked with a reference) so that + * we can exit in a known state. */ if (fvp == tvp) { - if (VOP_ISLOCKED(fvp) && - (VTOC(fvp)->c_lock.lk_lockholder == p->p_pid) && - (VTOC(fvp)->c_lock.lk_lockthread == current_thread())) { - fvp_locked = 1; - vrele(fvp); /* drop the extra ref */ + if (VOP_ISLOCKED(tvp) && + (VTOC(tvp)->c_lock.lk_lockholder == p->p_pid) && + (VTOC(tvp)->c_lock.lk_lockthread == current_thread())) { + vput(tvp); } tvp = NULL; + tvp_locked = 0; + /* - * If this a hard link and its not a case - * variant then keep tvp around for removal. + * If this a hard link with different parents + * and its not a case variant then keep tvp + * around for removal. */ if ((VTOC(fvp)->c_flag & C_HARDLINK) && ((fdvp != tdvp) || (hfs_namecmp(fcnp->cn_nameptr, fcnp->cn_namelen, tcnp->cn_nameptr, tcnp->cn_namelen) != 0))) { tvp = fvp; + vref(tvp); } } /* - * Check for cross-device rename. + * The following edge case is caught here: + * (to cannot be a descendent of from) + * + * o fdvp + * / + * / + * o fvp + * \ + * \ + * o tdvp + * / + * / + * o tvp */ - if ((fvp->v_mount != tdvp->v_mount) || - (tvp && (fvp->v_mount != tvp->v_mount))) { - error = EXDEV; + if (tdcp->c_parentcnid == VTOC(fvp)->c_cnid) { + error = EINVAL; + goto out; + } + + /* + * The following two edge cases are caught here: + * (note tvp is not empty) + * + * o tdvp o tdvp + * / / + * / / + * o tvp tvp o fdvp + * \ \ + * \ \ + * o fdvp o fvp + * / + * / + * o fvp + */ + if (tvp && (tvp->v_type == VDIR) && (VTOC(tvp)->c_entries != 0)) { + error = ENOTEMPTY; + goto out; + } + + /* + * The following edge case is caught here: + * (the from child and parent are the same) + * + * o tdvp + * / + * / + * fdvp o fvp + */ + if (fdvp == fvp) { + error = EINVAL; goto out; } @@ -2073,16 +2269,7 @@ hfs_rename(ap) goto out; } - /* - * Be sure we are not renaming ".", "..", or an alias of ".". - */ - if ((fvp->v_type == VDIR) && - (((fcnp->cn_namelen == 1) && (fcnp->cn_nameptr[0] == '.')) || - (fdvp == fvp) || - (fcnp->cn_flags&ISDOTDOT))) { - error = EINVAL; - goto out; - } + hfsmp = VTOHFS(tdvp); /* * If the destination parent directory is "sticky", then the @@ -2090,146 +2277,123 @@ hfs_rename(ap) * the rename, otherwise the destination may not be changed * (except by root). This implements append-only directories. * - * Note that checks for immutable, write access, and a non-empty - * target are done by the call to VOP_REMOVE. + * Note that checks for immutable and write access are done + * by the call to VOP_REMOVE. */ if (tvp && (tdcp->c_mode & S_ISTXT) && (tcnp->cn_cred->cr_uid != 0) && (tcnp->cn_cred->cr_uid != tdcp->c_uid) && (hfs_owner_rights(hfsmp, VTOC(tvp)->c_uid, tcnp->cn_cred, p, false)) ) { - error = EPERM; - goto out; + error = EPERM; + goto out; } +#if QUOTA + if (tvp) + (void)hfs_getinoquota(VTOC(tvp)); +#endif + /* - * All done with preflighting. - * - * We now break the call into two transactions: - * 1 - Remove the destionation (if any) using VOP_REMOVE, - * which in itself is a complete transaction. - * - * 2 - Rename source to destination. - * - * Since all the preflighting is done, we assume that a - * rename failure is unlikely once part 1 is complete. - * Breaking rename into two transactions buys us a much - * simpler implementation with respect to the locking - * protocol. There are only 3 vnodes to worry about - * locking in the correct order (instead of 4). + * Lock all the vnodes before starting a journal transaction. */ /* - * Part 1 - If the destination exists then it needs to be removed. + * Simple case (same parent) - just lock child (fvp). */ - if (tvp) { - /* - * VOP_REMOVE will vput tdvp so we better bump its - * ref count and relockit, always set tvp to NULL - * afterwards to indicate that we're done with it. - */ - VREF(tdvp); - - if (tvp == fvp) { - if (fvp_locked) { - VREF(fvp); - } else { - error = vget(fvp, LK_EXCLUSIVE | LK_RETRY, p); - if (error) - goto out; - fvp_locked = 1; - } - } else { - cache_purge(tvp); - } - - /* Clear SAVENAME to keep VOP_REMOVE from smashing tcnp. */ - tcnp->cn_flags &= ~SAVENAME; - - if (tvp->v_type == VDIR) - error = VOP_RMDIR(tdvp, tvp, tcnp); - else - error = VOP_REMOVE(tdvp, tvp, tcnp); - - /* Get lock states back in sync. */ - tdvp_locked = 0; - if (tvp == fvp) - fvp_locked = 0; - tvp = NULL; /* all done with tvp */ - tvp_deleted = 1; - - if (error) - goto out; /* couldn't remove destination! */ + if (fdvp == tdvp) { + if (error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p)) + goto out; + fvp_locked = 1; + goto vnlocked; } - /* - * All done with tvp. - * - * For POSIX compliance, if tvp was removed the only - * error we can return from this point on is EIO. - */ /* - * Part 2 - rename source to destination + * If fdvp is the parent of tdvp then we'll need to + * drop tdvp's lock before acquiring a lock on fdvp. + * + * fdvp + * o + * / \ + * / \ + * tdvp o o fvp + * \ + * \ + * o tvp + * + * + * If the parent directories are unrelated then we'll + * need to aquire their vnode locks in vnode address + * order. Otherwise we can race with another rename + * call that involves the same vnodes except that to + * and from are switched and potentially deadlock. + * [ie rename("a/b", "c/d") vs rename("c/d", "a/b")] + * + * If its not either of the two above cases then we + * can safely lock fdvp and fvp. */ + if ((VTOC(fdvp)->c_cnid == VTOC(tdvp)->c_parentcnid) || + ((VTOC(tdvp)->c_cnid != VTOC(fdvp)->c_parentcnid) && + (fdvp < tdvp))) { - /* - * Lock the vnodes before starting a journal transaction. - */ - if (fdvp != tdvp) { - /* - * fvp is a child and must be locked last. - */ - if (fvp_locked) { - VOP_UNLOCK(fvp, 0, p); - fvp_locked = 0; + /* Drop locks on tvp and tdvp */ + if (tvp_locked) { + VOP_UNLOCK(tvp, 0, p); + tvp_locked = 0; } + VOP_UNLOCK(tdvp, 0, p); + tdvp_locked = 0; + + /* Aquire locks in correct order */ + if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p))) + goto out; + fdvp_locked = 1; + if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p))) + goto out; + tdvp_locked = 1; + /* - * If fdvp is the parent of tdvp then it needs to be locked first. + * Now that the parents are locked only one thread + * can continue. So the lock order of the children + * doesn't really matter */ - if ((VTOC(fdvp)->c_cnid == VTOC(tdvp)->c_parentcnid)) { - if (tdvp_locked) { - VOP_UNLOCK(tdvp, 0, p); - tdvp_locked = 0; - } - if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p))) - goto out; - fdvp_locked = 1; - if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p))) + if (tvp == fvp) { + if ((error = vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p))) goto out; - tdvp_locked = 1; - - } else /* Lock tdvp then fdvp */ { - if (!tdvp_locked) { - if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p))) + tvp_locked = 1; + } else { + if (tvp) { + if ((error = vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p))) goto out; - tdvp_locked = 1; + tvp_locked = 1; } - if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p))) + if ((error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p))) goto out; - fdvp_locked = 1; + fvp_locked = 1; } - } else if (!tdvp_locked) { - /* - * fvp is a child and must be locked last. - */ - if (fvp_locked) { - VOP_UNLOCK(fvp, 0, p); - fvp_locked = 0; - } - if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p))) - goto out; - tdvp_locked = 1; - } - /* Now its safe to lock fvp */ - if (!fvp_locked) { + } else /* OK to lock fdvp and fvp */ { + if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p))) + goto out; + fdvp_locked = 1; if (error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p)) goto out; - fvp_locked = 1; + if (tvp == fvp) + tvp_locked = 1; + else + fvp_locked = 1; } +vnlocked: fdcp = VTOC(fdvp); fcp = VTOC(fvp); + /* + * While fvp is still locked, purge it from the name cache and + * grab it's c_cnid value. Note that the removal of tvp (below) + * can drop fvp's lock when fvp == tvp. + */ + cache_purge(fvp); + /* * When a file moves out of "Cleanup At Startup" * we can drop its NODUMP status. @@ -2238,24 +2402,13 @@ hfs_rename(ap) (fvp->v_type == VREG) && (fdvp != tdvp) && (fdcp->c_desc.cd_nameptr != NULL) && - (strcmp(fdcp->c_desc.cd_nameptr, "Cleanup At Startup") == 0)) { + (strcmp(fdcp->c_desc.cd_nameptr, CARBON_TEMP_DIR_NAME) == 0)) { fcp->c_flags &= ~UF_NODUMP; fcp->c_flag |= C_CHANGE; tv = time; (void) VOP_UPDATE(fvp, &tv, &tv, 0); } - hfs_global_shared_lock_acquire(hfsmp); - grabbed_lock = 1; - if (hfsmp->jnl) { - if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { - goto out; - } - started_tr = 1; - } - - cache_purge(fvp); - bzero(&from_desc, sizeof(from_desc)); from_desc.cd_nameptr = fcnp->cn_nameptr; from_desc.cd_namelen = fcnp->cn_namelen; @@ -2270,6 +2423,52 @@ hfs_rename(ap) to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED); to_desc.cd_cnid = fcp->c_cnid; + hfs_global_shared_lock_acquire(hfsmp); + grabbed_lock = 1; + if (hfsmp->jnl) { + if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { + goto out; + } + started_tr = 1; + } + + /* + * Reserve some space in the Catalog file. + */ + if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) { + goto out; + } + + /* + * If the destination exists then it needs to be removed. + */ + + if (tvp) { + if (tvp != fvp) + cache_purge(tvp); + /* + * Note that hfs_removedir and hfs_removefile + * will keep tdvp locked with a reference. + * But tvp will lose its lock and reference. + */ + if (tvp->v_type == VDIR) + error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_RENAMEOPTS); + else + error = hfs_removefile(tdvp, tvp, tcnp, HFSRM_RENAMEOPTS); + + if (tvp == fvp) + fvp_locked = 0; + tvp = NULL; + tvp_locked = 0; + tvp_deleted = 1; + if (error) + goto out; + } + + /* + * All done with tvp and fvp + */ + /* Lock catalog b-tree */ error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); if (error) @@ -2279,22 +2478,23 @@ hfs_rename(ap) /* Unlock catalog b-tree */ (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); - if (error) + + if (error) { goto out; + } /* Update cnode's catalog descriptor */ - replace_desc(fcp, &out_desc); + if (fvp_locked) { + replace_desc(fcp, &out_desc); + fcp->c_parentcnid = tdcp->c_cnid; + fcp->c_hint = 0; + } hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_RMDIR : VOL_RMFILE, (fdcp->c_cnid == kHFSRootFolderID)); hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_MKDIR : VOL_MKFILE, (tdcp->c_cnid == kHFSRootFolderID)); - VOP_UNLOCK(fvp, 0, p); - fcp = NULL; - fvp_locked = 0; - /* All done with fvp. */ - /* Update both parent directories. */ tv = time; if (fdvp != tdvp) { @@ -2312,6 +2512,9 @@ hfs_rename(ap) (void) VOP_UPDATE(tdvp, &tv, &tv, 0); out: + if (hfsmp) { + cat_postflight(hfsmp, &cookie, p); + } if (started_tr) { journal_end_transaction(hfsmp->jnl); } @@ -2319,6 +2522,14 @@ out: hfs_global_shared_lock_release(hfsmp); } + /* Note that if hfs_removedir or hfs_removefile was invoked above they will already have + generated a NOTE_WRITE for tdvp and a NOTE_DELETE for tvp. + */ + if (error == 0) { + HFS_KNOTE(fvp, NOTE_RENAME); + HFS_KNOTE(fdvp, NOTE_WRITE); + if (tdvp != fdvp) HFS_KNOTE(tdvp, NOTE_WRITE); + }; if (fvp_locked) { VOP_UNLOCK(fvp, 0, p); } @@ -2328,18 +2539,18 @@ out: if (tdvp_locked) { VOP_UNLOCK(tdvp, 0, p); } - if (tvp && (tvp != fvp)) { - if (tvp != tdvp) - VOP_UNLOCK(tvp, 0, p); - vrele(tvp); + if (tvp_locked) { + VOP_UNLOCK(tvp, 0, p); } vrele(fvp); vrele(fdvp); + if (tvp) + vrele(tvp); vrele(tdvp); /* After tvp is removed the only acceptable error is EIO */ - if ((error == ENOSPC) && tvp_deleted) + if (error && tvp_deleted) error = EIO; return (error); @@ -2441,7 +2652,6 @@ hfs_symlink(ap) vp = *vpp; len = strlen(ap->a_target); fp = VTOF(vp); - fp->ff_clumpsize = VTOVCB(vp)->blockSize; #if QUOTA (void)hfs_getinoquota(VTOC(vp)); @@ -2570,6 +2780,10 @@ hfs_readdir(ap) int eofflag = 0; void *user_start = NULL; int user_len; + + int ncookies=0; + u_long *cookies=NULL; + u_long *cookiep=NULL; /* We assume it's all one big buffer... */ if (uio->uio_iovcnt > 1 || uio->uio_resid < AVERAGE_HFSDIRENTRY_SIZE) @@ -2602,7 +2816,6 @@ hfs_readdir(ap) } } - /* Create the entries for . and .. */ if (uio->uio_offset < sizeof(rootdots)) { caddr_t dep; @@ -2627,10 +2840,58 @@ hfs_readdir(ap) goto Exit; } + if (ap->a_ncookies != NULL) { + /* + * These cookies are handles that allow NFS to restart + * scanning through a directory. If a directory is large + * enough, NFS will issue a successive readdir() with a + * uio->uio_offset that is equal to one of these cookies. + * + * The cookies that we generate are synthesized byte-offsets. + * The offset is where the dirent the dirent would be if the + * directory were an array of packed dirent structs. It is + * synthetic because that's not how directories are stored in + * HFS but other code expects that the cookie is a byte offset. + * + * We have to pre-allocate the cookies because cat_getdirentries() + * is the only one that can properly synthesize the offsets (since + * it may have to skip over entries and only it knows the true + * virtual offset of any particular directory entry). So we allocate + * a cookie table here and pass it in to cat_getdirentries(). + * + * Note that the handling of "." and ".." is mostly done here but + * cat_getdirentries() is aware of. + * + * Only the NFS server uses cookies so fortunately this code is + * not executed unless the NFS server is issuing the readdir + * request. + * + * Also note that the NFS server is the one responsible for + * free'ing the cookies even though we allocated them. Ick. + * + * We allocate a reasonable number of entries for the size of + * the buffer that we're going to fill in. cat_getdirentries() + * is smart enough to not overflow if there's more room in the + * buffer but not enough room in the cookie table. + */ + if (uio->uio_segflg != UIO_SYSSPACE) + panic("hfs_readdir: unexpected uio from NFS server"); + + ncookies = uio->uio_iov->iov_len / (AVERAGE_HFSDIRENTRY_SIZE/2); + MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK); + + *ap->a_ncookies = ncookies; + *ap->a_cookies = cookies; + } + /* If there are no children then we're done */ if (cp->c_entries == 0) { eofflag = 1; retval = 0; + if (cookies) { + cookies[0] = 0; + cookies[1] = sizeof(struct hfsdotentry); + } goto Exit; } @@ -2638,7 +2899,7 @@ hfs_readdir(ap) retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p); if (retval) goto Exit; - retval = cat_getdirentries(hfsmp, &cp->c_desc, uio, &eofflag); + retval = cat_getdirentries(hfsmp, &cp->c_desc, cp->c_entries, uio, &eofflag, cookies, ncookies); /* Unlock catalog b-tree */ (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); @@ -2654,38 +2915,6 @@ hfs_readdir(ap) } cp->c_flag |= C_ACCESS; - /* Bake any cookies */ - if (!retval && ap->a_ncookies != NULL) { - struct dirent* dpStart; - struct dirent* dpEnd; - struct dirent* dp; - int ncookies; - u_long *cookies; - u_long *cookiep; - - /* - * Only the NFS server uses cookies, and it loads the - * directory block into system space, so we can just look at - * it directly. - */ - if (uio->uio_segflg != UIO_SYSSPACE) - panic("hfs_readdir: unexpected uio from NFS server"); - dpStart = (struct dirent *)(uio->uio_iov->iov_base - (uio->uio_offset - off)); - dpEnd = (struct dirent *) uio->uio_iov->iov_base; - for (dp = dpStart, ncookies = 0; - dp < dpEnd && dp->d_reclen != 0; - dp = (struct dirent *)((caddr_t)dp + dp->d_reclen)) - ncookies++; - MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK); - for (dp = dpStart, cookiep = cookies; - dp < dpEnd; - dp = (struct dirent *)((caddr_t) dp + dp->d_reclen)) { - off += dp->d_reclen; - *cookiep++ = (u_long) off; - } - *ap->a_ncookies = ncookies; - *ap->a_cookies = cookies; - } Exit:; if (hfsmp->jnl && user_start) { @@ -2761,40 +2990,28 @@ hfs_readlink(ap) } } retval = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio); - - return (retval); -} - - -/* - * hfs abort op, called after namei() when a CREATE/DELETE isn't actually - * done. If a buffer has been saved in anticipation of a CREATE, delete it. -#% abortop dvp = = = -# - vop_abortop { - IN struct vnode *dvp; - IN struct componentname *cnp; - - */ - -/* ARGSUSED */ - -static int -hfs_abortop(ap) - struct vop_abortop_args /* { - struct vnode *a_dvp; - struct componentname *a_cnp; - } */ *ap; -{ - if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { - FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); - ap->a_cnp->cn_flags &= ~HASBUF; +#if 1 + /* + * Keep track blocks read + */ + if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (retval == 0)) { + + /* + * If this file hasn't been seen since the start of + * the current sampling period then start over. + */ + if (cp->c_atime < VTOHFS(vp)->hfc_timebase) + VTOF(vp)->ff_bytesread = fp->ff_size; + else + VTOF(vp)->ff_bytesread += fp->ff_size; + + // if (VTOF(vp)->ff_bytesread > fp->ff_size) + // cp->c_flag |= C_ACCESS; } - - return (0); +#endif + return (retval); } - /* * Lock an cnode. If its already locked, set the WANT bit and sleep. #% lock vp U L U @@ -2816,9 +3033,6 @@ hfs_lock(ap) struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); - if (cp == NULL) - panic("hfs_lock: cnode in vnode is null\n"); - return (lockmgr(&cp->c_lock, ap->a_flags, &vp->v_interlock, ap->a_p)); } @@ -2842,10 +3056,12 @@ hfs_unlock(ap) { struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); - - if (cp == NULL) - panic("hfs_unlock: cnode in vnode is null\n"); - +#if 0 + if (!lockstatus(&cp->c_lock)) { + printf("hfs_unlock: vnode %s wasn't locked!\n", + cp->c_desc.cd_nameptr ? cp->c_desc.cd_nameptr : ""); + } +#endif return (lockmgr(&cp->c_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock, ap->a_p)); } @@ -2929,6 +3145,9 @@ hfs_pathconf(ap) case _PC_PATH_MAX: *ap->a_retval = PATH_MAX; /* 1024 */ break; + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + break; case _PC_CHOWN_RESTRICTED: *ap->a_retval = 1; break; @@ -2939,7 +3158,10 @@ hfs_pathconf(ap) *ap->a_retval = kHFSPlusMaxFileNameChars; break; case _PC_CASE_SENSITIVE: - *ap->a_retval = 0; + if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE) + *ap->a_retval = 1; + else + *ap->a_retval = 0; break; case _PC_CASE_PRESERVING: *ap->a_retval = 1; @@ -3015,12 +3237,16 @@ hfs_advlock(ap) return (EINVAL); } - if (start < 0) - return (EINVAL); if (fl->l_len == 0) end = -1; - else + else if (fl->l_len > 0) end = start + fl->l_len - 1; + else { /* l_len is negative */ + end = start - 1; + start += fl->l_len; + } + if (start < 0) + return (EINVAL); /* * Create the hfslockf structure @@ -3098,14 +3324,14 @@ hfs_update(ap) hfsmp = VTOHFS(vp); /* XXX do we really want to clear the sytem cnode flags here???? */ - if ((vp->v_flag & VSYSTEM) || - (VTOVFS(vp)->mnt_flag & MNT_RDONLY) || + if (((vp->v_flag & VSYSTEM) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))|| + (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) { cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE); return (0); } - updateflag = cp->c_flag & (C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE); + updateflag = cp->c_flag & (C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_FORCEUPDATE); /* Nothing to update. */ if (updateflag == 0) { @@ -3117,23 +3343,15 @@ hfs_update(ap) } if (updateflag & C_ACCESS) { /* - * If only the access time is changing then defer - * updating it on-disk util later (in hfs_inactive). - * If it was recently updated then skip the update. + * When the access time is the only thing changing + * then make sure its sufficiently newer before + * committing it to disk. */ - if (updateflag == C_ACCESS) { - cp->c_flag &= ~C_ACCESS; - - /* Its going to disk or its sufficiently newer... */ - if ((cp->c_flag & C_ATIMEMOD) || - (ap->a_access->tv_sec > (cp->c_atime + ATIME_ACCURACY))) { - cp->c_atime = ap->a_access->tv_sec; - cp->c_flag |= C_ATIMEMOD; - } + if ((updateflag == C_ACCESS) && + (ap->a_access->tv_sec < (cp->c_atime + ATIME_ONDISK_ACCURACY))) { return (0); - } else { - cp->c_atime = ap->a_access->tv_sec; } + cp->c_atime = ap->a_access->tv_sec; } if (updateflag & C_UPDATE) { cp->c_mtime = ap->a_modify->tv_sec; @@ -3163,15 +3381,21 @@ hfs_update(ap) * gets written to disk. * * Deleted files can defer meta data updates until inactive. + * + * If we're ever called with the C_FORCEUPDATE flag though + * we have to do the update. */ - if (ISSET(cp->c_flag, C_DELETED) || + if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 && + (ISSET(cp->c_flag, C_DELETED) || (dataforkp && cp->c_datafork->ff_unallocblocks) || - (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks)) { + (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) { if (updateflag & (C_CHANGE | C_UPDATE)) hfs_volupdate(hfsmp, VOL_UPDATE, 0); cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE); cp->c_flag |= C_MODIFIED; + HFS_KNOTE(vp, NOTE_ATTRIB); + return (0); } @@ -3195,6 +3419,19 @@ hfs_update(ap) bcopy(dataforkp, &datafork, sizeof(datafork)); datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start; dataforkp = &datafork; + } else if (dataforkp && (cp->c_datafork->ff_unallocblocks != 0)) { + // always make sure the block count and the size + // of the file match the number of blocks actually + // allocated to the file on disk + bcopy(dataforkp, &datafork, sizeof(datafork)); + // make sure that we don't assign a negative block count + if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) { + panic("hfs: ff_blocks %d is less than unalloc blocks %d\n", + cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks); + } + datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks); + datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize; + dataforkp = &datafork; } /* @@ -3217,18 +3454,20 @@ hfs_update(ap) /* Unlock the Catalog b-tree file. */ (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); - if (updateflag & (C_CHANGE | C_UPDATE)) + if (updateflag & (C_CHANGE | C_UPDATE | C_FORCEUPDATE)) hfs_volupdate(hfsmp, VOL_UPDATE, 0); + /* After the updates are finished, clear the flags */ + cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_FORCEUPDATE); + // XXXdbg if (hfsmp->jnl) { journal_end_transaction(hfsmp->jnl); } hfs_global_shared_lock_release(hfsmp); - /* After the updates are finished, clear the flags */ - cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_ATIMEMOD); - + HFS_KNOTE(vp, NOTE_ATTRIB); + return (error); } @@ -3253,6 +3492,7 @@ hfs_makenode(mode, dvp, vpp, cnp) struct proc *p; struct cat_desc in_desc, out_desc; struct cat_attr attr; + cat_cookie_t cookie = {0}; int error, started_tr = 0, grabbed_lock = 0; enum vtype vnodetype; @@ -3339,6 +3579,17 @@ hfs_makenode(mode, dvp, vpp, cnp) started_tr = 1; } + /* + * Reserve some space in the Catalog file. + * + * (we also add CAT_DELETE since our getnewvnode + * request can cause an hfs_inactive call to + * delete an unlinked file) + */ + if ((error = cat_preflight(hfsmp, CAT_CREATE | CAT_DELETE, &cookie, p))) { + goto exit; + } + /* Lock catalog b-tree */ error = hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_EXCLUSIVE, p); if (error) @@ -3358,6 +3609,11 @@ hfs_makenode(mode, dvp, vpp, cnp) dcp->c_flag |= C_CHANGE | C_UPDATE; tv = time; (void) VOP_UPDATE(dvp, &tv, &tv, 0); + if (vnodetype == VDIR) { + HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); + } else { + HFS_KNOTE(dvp, NOTE_WRITE); + }; hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE, (dcp->c_cnid == kHFSRootFolderID)); @@ -3388,6 +3644,8 @@ hfs_makenode(mode, dvp, vpp, cnp) if (error) goto exit; + // XXXdbg + cache_enter(dvp, tvp, cnp); #if QUOTA cp = VTOC(tvp); @@ -3398,16 +3656,15 @@ hfs_makenode(mode, dvp, vpp, cnp) */ if ((error = hfs_getinoquota(cp)) || (error = hfs_chkiq(cp, 1, cnp->cn_cred, FORCE))) { - if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); - cnp->cn_flags &= ~HASBUF; - } if (tvp->v_type == VDIR) VOP_RMDIR(dvp,tvp, cnp); else VOP_REMOVE(dvp,tvp, cnp); - return (error); + // because VOP_RMDIR and VOP_REMOVE already + // have done the vput() + dvp = NULL; + goto exit; } #endif /* QUOTA */ @@ -3432,17 +3689,22 @@ hfs_makenode(mode, dvp, vpp, cnp) exit: cat_releasedesc(&out_desc); - if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + cat_postflight(hfsmp, &cookie, p); + if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); + } /* * Check if a file is located in the "Cleanup At Startup" * directory. If it is then tag it as NODUMP so that we * can be lazy about zero filling data holes. */ - if ((error == 0) && (vnodetype == VREG) && + if ((error == 0) && dvp && (vnodetype == VREG) && (dcp->c_desc.cd_nameptr != NULL) && - (strcmp(dcp->c_desc.cd_nameptr, "Cleanup At Startup") == 0)) { + (strcmp(dcp->c_desc.cd_nameptr, CARBON_TEMP_DIR_NAME) == 0)) { struct vnode *ddvp; cnid_t parid; @@ -3463,11 +3725,9 @@ exit: vput(ddvp); } } - if (dvp) vput(dvp); - // XXXdbg if (started_tr) { journal_end_transaction(hfsmp->jnl); started_tr = 0; @@ -3527,6 +3787,158 @@ hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, stru } +static void +filt_hfsdetach(struct knote *kn) +{ + struct vnode *vp; + int result; + struct proc *p = current_proc(); + + vp = (struct vnode *)kn->kn_hook; + if (1) { /* ! KNDETACH_VNLOCKED */ + result = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (result) return; + }; + + result = KNOTE_DETACH(&VTOC(vp)->c_knotes, kn); + + if (1) { /* ! KNDETACH_VNLOCKED */ + VOP_UNLOCK(vp, 0, p); + }; +} + +/*ARGSUSED*/ +static int +filt_hfsread(struct knote *kn, long hint) +{ + struct vnode *vp = (struct vnode *)kn->kn_fp->f_data; + + if (hint == NOTE_REVOKE) { + /* + * filesystem is gone, so set the EOF flag and schedule + * the knote for deletion. + */ + kn->kn_flags |= (EV_EOF | EV_ONESHOT); + return (1); + } + + kn->kn_data = VTOF(vp)->ff_size - kn->kn_fp->f_offset; + return (kn->kn_data != 0); +} + +/*ARGSUSED*/ +static int +filt_hfswrite(struct knote *kn, long hint) +{ + if (hint == NOTE_REVOKE) { + /* + * filesystem is gone, so set the EOF flag and schedule + * the knote for deletion. + */ + kn->kn_flags |= (EV_EOF | EV_ONESHOT); + } + + kn->kn_data = 0; + return (1); +} + +static int +filt_hfsvnode(struct knote *kn, long hint) +{ + + if (kn->kn_sfflags & hint) + kn->kn_fflags |= hint; + if (hint == NOTE_REVOKE) { + kn->kn_flags |= EV_EOF; + return (1); + } + return (kn->kn_fflags != 0); +} + +static struct filterops hfsread_filtops = + { 1, NULL, filt_hfsdetach, filt_hfsread }; +static struct filterops hfswrite_filtops = + { 1, NULL, filt_hfsdetach, filt_hfswrite }; +static struct filterops hfsvnode_filtops = + { 1, NULL, filt_hfsdetach, filt_hfsvnode }; + +/* + # + #% kqfilt_add vp L L L + # + vop_kqfilt_add + IN struct vnode *vp; + IN struct knote *kn; + IN struct proc *p; + */ +static int +hfs_kqfilt_add(ap) + struct vop_kqfilt_add_args /* { + struct vnode *a_vp; + struct knote *a_kn; + struct proc *p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct knote *kn = ap->a_kn; + + switch (kn->kn_filter) { + case EVFILT_READ: + if (vp->v_type == VREG) { + kn->kn_fop = &hfsread_filtops; + } else { + return EINVAL; + }; + break; + case EVFILT_WRITE: + if (vp->v_type == VREG) { + kn->kn_fop = &hfswrite_filtops; + } else { + return EINVAL; + }; + break; + case EVFILT_VNODE: + kn->kn_fop = &hfsvnode_filtops; + break; + default: + return (1); + } + + kn->kn_hook = (caddr_t)vp; + + /* simple_lock(&vp->v_pollinfo.vpi_lock); */ + KNOTE_ATTACH(&VTOC(vp)->c_knotes, kn); + /* simple_unlock(&vp->v_pollinfo.vpi_lock); */ + + return (0); +} + +/* + # + #% kqfilt_remove vp L L L + # + vop_kqfilt_remove + IN struct vnode *vp; + IN uintptr_t ident; + IN struct proc *p; + */ +static int +hfs_kqfilt_remove(ap) + struct vop_kqfilt_remove_args /* { + struct vnode *a_vp; + uintptr_t ident; + struct proc *p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + uintptr_t ident = ap->a_ident; + int result; + + result = ENOTSUP; /* XXX */ + + return (result); +} + /* * Wrapper for special device reads */ @@ -3656,6 +4068,43 @@ hfsfifo_close(ap) simple_unlock(&vp->v_interlock); return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap)); } + +/* + * kqfilt_add wrapper for fifos. + * + * Fall through to hfs kqfilt_add routines if needed + */ +int +hfsfifo_kqfilt_add(ap) + struct vop_kqfilt_add_args *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + int error; + + error = VOCALL(fifo_vnodeop_p, VOFFSET(vop_kqfilt_add), ap); + if (error) + error = hfs_kqfilt_add(ap); + return (error); +} + +/* + * kqfilt_remove wrapper for fifos. + * + * Fall through to hfs kqfilt_remove routines if needed + */ +int +hfsfifo_kqfilt_remove(ap) + struct vop_kqfilt_remove_args *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + int error; + + error = VOCALL(fifo_vnodeop_p, VOFFSET(vop_kqfilt_remove), ap); + if (error) + error = hfs_kqfilt_remove(ap); + return (error); +} + #endif /* FIFO */ @@ -3706,6 +4155,7 @@ struct vnodeopv_entry_desc hfs_vnodeop_entries[] = { { &vop_write_desc, (VOPFUNC)hfs_write }, /* write */ { &vop_ioctl_desc, (VOPFUNC)hfs_ioctl }, /* ioctl */ { &vop_select_desc, (VOPFUNC)hfs_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */ { &vop_exchange_desc, (VOPFUNC)hfs_exchange }, /* exchange */ { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */ @@ -3722,7 +4172,7 @@ struct vnodeopv_entry_desc hfs_vnodeop_entries[] = { { &vop_readdir_desc, (VOPFUNC)hfs_readdir }, /* readdir */ { &vop_readdirattr_desc, (VOPFUNC)hfs_readdirattr }, /* readdirattr */ { &vop_readlink_desc, (VOPFUNC)hfs_readlink }, /* readlink */ - { &vop_abortop_desc, (VOPFUNC)hfs_abortop }, /* abortop */ + { &vop_abortop_desc, (VOPFUNC)nop_abortop }, /* abortop */ { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */ { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */ { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */ @@ -3745,6 +4195,8 @@ struct vnodeopv_entry_desc hfs_vnodeop_entries[] = { { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */ { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */ { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */ + { &vop_kqfilt_add_desc, (VOPFUNC)hfs_kqfilt_add }, /* kqfilt_add */ + { &vop_kqfilt_remove_desc, (VOPFUNC)hfs_kqfilt_remove }, /* kqfilt_remove */ { NULL, (VOPFUNC)NULL } }; @@ -3776,6 +4228,7 @@ struct vnodeopv_entry_desc hfs_specop_entries[] = { { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ + { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ @@ -3834,6 +4287,7 @@ struct vnodeopv_entry_desc hfs_fifoop_entries[] = { { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */ { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */ { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */ + { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */ { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */ { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */ @@ -3861,6 +4315,8 @@ struct vnodeopv_entry_desc hfs_fifoop_entries[] = { { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */ { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */ { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */ + { &vop_kqfilt_add_desc, (VOPFUNC)hfsfifo_kqfilt_add }, /* kqfilt_add */ + { &vop_kqfilt_remove_desc, (VOPFUNC)hfsfifo_kqfilt_remove }, /* kqfilt_remove */ { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } }; struct vnodeopv_desc hfs_fifoop_opv_desc = diff --git a/bsd/hfs/hfscommon/BTree/BTree.c b/bsd/hfs/hfscommon/BTree/BTree.c index 0061a3900..d61c73936 100644 --- a/bsd/hfs/hfscommon/BTree/BTree.c +++ b/bsd/hfs/hfscommon/BTree/BTree.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -155,6 +155,12 @@ */ #define kNumLeafRecSlack 10 +/* BTree accessor routines */ +extern OSStatus GetBTreeBlock(FileReference vp, UInt32 blockNum, GetBlockOptions options, BlockDescriptor *block); +extern OSStatus SetBTreeBlockSize(FileReference vp, ByteCount blockSize, ItemCount minBlockCount); +extern OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF); +extern OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlockOptions options); + //////////////////////////////////// Globals //////////////////////////////////// @@ -171,9 +177,6 @@ Function: Create BTree control block for a file, if necessary. Validates the Input: filePtr - pointer to file to open as a B-tree keyCompareProc - pointer to client's KeyCompare function - getBlockProc - pointer to client's GetBlock function - releaseBlockProc - pointer to client's ReleaseBlock function - setEndOfForkProc - pointer to client's SetEOF function Result: noErr - success paramErr - required ptr was nil @@ -182,12 +185,7 @@ Result: noErr - success != noErr - failure -------------------------------------------------------------------------------*/ -OSStatus BTOpenPath (FCB *filePtr, - KeyCompareProcPtr keyCompareProc, - GetBlockProcPtr getBlockProc, - ReleaseBlockProcPtr releaseBlockProc, - SetEndOfForkProcPtr setEndOfForkProc, - SetBlockSizeProcPtr setBlockSizeProc ) +OSStatus BTOpenPath(FCB *filePtr, KeyCompareProcPtr keyCompareProc) { OSStatus err; BTreeControlBlockPtr btreePtr; @@ -196,21 +194,22 @@ OSStatus BTOpenPath (FCB *filePtr, ////////////////////// Preliminary Error Checking /////////////////////////// - if ( filePtr == nil || - getBlockProc == nil || - releaseBlockProc == nil || - setEndOfForkProc == nil || - setBlockSizeProc == nil ) + if ( filePtr == nil ) { return paramErr; } - if ( filePtr->fcbBTCBPtr != nil ) // already has a BTreeCB + /* + * Subsequent opens allow key compare proc to be changed. + */ + if ( filePtr->fcbBTCBPtr != nil && keyCompareProc != nil) { + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + btreePtr->keyCompareProc = keyCompareProc; return noErr; + } - // is file large enough to contain header node? if ( filePtr->fcbEOF < kMinNodeSize ) - return fsBTInvalidFileErr; //€€ or E_BadHeader? + return fsBTInvalidFileErr; //////////////////////// Allocate Control Block ///////////////////////////// @@ -222,9 +221,9 @@ OSStatus BTOpenPath (FCB *filePtr, return memFullErr; } - btreePtr->getBlockProc = getBlockProc; - btreePtr->releaseBlockProc = releaseBlockProc; - btreePtr->setEndOfForkProc = setEndOfForkProc; + btreePtr->getBlockProc = GetBTreeBlock; + btreePtr->releaseBlockProc = ReleaseBTreeBlock; + btreePtr->setEndOfForkProc = ExtendBTreeFile; btreePtr->keyCompareProc = keyCompareProc; /////////////////////////// Read Header Node //////////////////////////////// @@ -236,15 +235,20 @@ OSStatus BTOpenPath (FCB *filePtr, /* The minimum node size is the physical block size */ nodeRec.blockSize = VTOHFS(btreePtr->fileRefNum)->hfs_phys_block_size; + /* Start with the allocation block size for regular files. */ + if (FTOC(filePtr)->c_fileid >= kHFSFirstUserCatalogNodeID) + { + nodeRec.blockSize = FCBTOVCB(filePtr)->blockSize; + } REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); // it is now safe to call M_ExitOnError (err) - err = setBlockSizeProc (btreePtr->fileRefNum, nodeRec.blockSize, 1); + err = SetBTreeBlockSize (btreePtr->fileRefNum, nodeRec.blockSize, 1); M_ExitOnError (err); - err = getBlockProc (btreePtr->fileRefNum, + err = GetBTreeBlock(btreePtr->fileRefNum, kHeaderNodeNum, kGetBlock, &nodeRec ); @@ -278,9 +282,12 @@ OSStatus BTOpenPath (FCB *filePtr, btreePtr->maxKeyLength = header->maxKeyLength; btreePtr->totalNodes = header->totalNodes; btreePtr->freeNodes = header->freeNodes; - // ignore header->clumpSize; //€€ rename this field? + if (FTOC(filePtr)->c_fileid >= kHFSFirstUserCatalogNodeID) + filePtr->ff_clumpsize = header->clumpSize; btreePtr->btreeType = header->btreeType; + btreePtr->keyCompareType = header->keyCompareType; + btreePtr->attributes = header->attributes; if ( btreePtr->maxKeyLength > 40 ) @@ -304,7 +311,7 @@ OSStatus BTOpenPath (FCB *filePtr, * we cannot mount using the current physical block size. */ if (btreePtr->leafRecords > 0 || - VTOHFS(btreePtr->fileRefNum)->hfs_media_writeable) + VTOHFS(btreePtr->fileRefNum)->hfs_flags & HFS_WRITEABLE_MEDIA) { err = fsBTBadNodeSize; goto ErrorExit; @@ -321,14 +328,14 @@ OSStatus BTOpenPath (FCB *filePtr, } else { - err = setBlockSizeProc (btreePtr->fileRefNum, btreePtr->nodeSize, 32); //€€ we should try and get this down to 8 + err = SetBTreeBlockSize (btreePtr->fileRefNum, btreePtr->nodeSize, 32); M_ExitOnError (err); /* * Need to use kTrashBlock option to force the * buffer cache to read the entire node */ - err = releaseBlockProc(btreePtr->fileRefNum, &nodeRec, kTrashBlock); + err = ReleaseBTreeBlock(btreePtr->fileRefNum, &nodeRec, kTrashBlock); ++btreePtr->numReleaseNodes; M_ExitOnError (err); @@ -1255,7 +1262,7 @@ OSStatus BTInsertRecord (FCB *filePtr, case fsBTEmptyErr: // if tree empty add 1st leaf node - if (btreePtr->freeNodes == 0) + if (BTAvailableNodes(btreePtr) == 0) { err = ExtendBTree (btreePtr, btreePtr->totalNodes + 1); M_ExitOnError (err); @@ -1317,10 +1324,10 @@ OSStatus BTInsertRecord (FCB *filePtr, /////////////////////// Extend File If Necessary //////////////////////////// - nodesNeeded = btreePtr->treeDepth + 1 - btreePtr->freeNodes; //€€ math limit + nodesNeeded = (SInt32)btreePtr->treeDepth + 1 - BTAvailableNodes(btreePtr); if (nodesNeeded > 0) { - nodesNeeded += btreePtr->totalNodes; + nodesNeeded += (SInt32)btreePtr->totalNodes; if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! ++nodesNeeded; @@ -1469,10 +1476,10 @@ OSStatus BTReplaceRecord (FCB *filePtr, //////////////////////////// Make Some Room ///////////////////////////////// - nodesNeeded = btreePtr->treeDepth + 1 - btreePtr->freeNodes; //€€ math limit + nodesNeeded = (SInt32)btreePtr->treeDepth + 1 - BTAvailableNodes(btreePtr); if (nodesNeeded > 0) { - nodesNeeded += btreePtr->totalNodes; + nodesNeeded += (SInt32)btreePtr->totalNodes; if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! ++nodesNeeded; @@ -1480,7 +1487,6 @@ OSStatus BTReplaceRecord (FCB *filePtr, M_ExitOnError (err); } - // XXXdbg ModifyBlockStart(btreePtr->fileRefNum, &nodeRec); @@ -1643,6 +1649,7 @@ OSStatus BTDeleteRecord (FCB *filePtr, BTreeControlBlockPtr btreePtr; TreePathTable treePathTable; BlockDescriptor nodeRec; + SInt32 nodesNeeded; UInt32 nodeNum; UInt16 index; @@ -1673,6 +1680,19 @@ OSStatus BTDeleteRecord (FCB *filePtr, M_ExitOnError (err); // record must exit for Delete + /////////////////////// Extend File If Necessary //////////////////////////// + + nodesNeeded = (SInt32)btreePtr->treeDepth + 1 - BTAvailableNodes(btreePtr); + if ((btreePtr->attributes & kBTVariableIndexKeysMask) && (nodesNeeded > 0)) + { + nodesNeeded += (SInt32)btreePtr->totalNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) + ++nodesNeeded; + + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + ///////////////////////////// Delete Record ///////////////////////////////// err = DeleteTree (btreePtr, treePathTable, &nodeRec, index, 1); @@ -1728,8 +1748,7 @@ OSStatus BTGetInformation (FCB *filePtr, info->numNodes = btreePtr->totalNodes; info->numFreeNodes = btreePtr->freeNodes; info->lastfsync = btreePtr->lastfsync; - info->reserved = 0; - + info->keyCompareType = btreePtr->keyCompareType; return noErr; } @@ -1940,25 +1959,10 @@ OSStatus BTSetLastSync (FCB *filePtr, } -/*------------------------------------------------------------------------------- -Routine: BTCheckFreeSpace - -Function: Makes sure there is enough free space so that a tree operation - will succeed. - -Input: fcb - pointer file control block - -Output: none - -Result: noErr - success - --------------------------------------------------------------------------------*/ - __private_extern__ -OSStatus BTCheckFreeSpace (FCB *filePtr) +OSStatus BTHasContiguousNodes (FCB *filePtr) { BTreeControlBlockPtr btreePtr; - int nodesNeeded, err = noErr; M_ReturnErrorIf (filePtr == nil, paramErr); @@ -1969,33 +1973,85 @@ OSStatus BTCheckFreeSpace (FCB *filePtr) M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); - // XXXdbg this is highly conservative but so much better than - // winding up with turds on your disk. - // - nodesNeeded = (btreePtr->treeDepth + 1) * 10; + return NodesAreContiguous(FCBTOVCB(filePtr), filePtr, btreePtr->nodeSize); +} + + +/*------------------------------------------------------------------------------- +Routine: BTGetUserData + +Function: Read the user data area of the b-tree header node. + +-------------------------------------------------------------------------------*/ +__private_extern__ +OSStatus +BTGetUserData(FCB *filePtr, void * dataPtr, int dataSize) +{ + BTreeControlBlockPtr btreePtr; + BlockDescriptor node; + char * offset; + OSStatus err; + + if (dataSize > kBTreeHeaderUserBytes) + return (EINVAL); + node.buffer = nil; + node.blockHeader = nil; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + return (fsBTInvalidFileErr); + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + err = GetNode(btreePtr, kHeaderNodeNum, &node); + if (err) + return (err); - if (btreePtr->freeNodes < nodesNeeded) { - err = ExtendBTree(btreePtr, nodesNeeded + btreePtr->totalNodes - btreePtr->freeNodes); - } + offset = (char *)node.buffer + sizeof(BTNodeDescriptor) + sizeof(BTHeaderRec); + bcopy(offset, dataPtr, dataSize); - return err; + (void) ReleaseNode(btreePtr, &node); + + return (0); } +/*------------------------------------------------------------------------------- +Routine: BTSetUserData + +Function: Write the user data area of the b-tree header node. +-------------------------------------------------------------------------------*/ __private_extern__ -OSStatus BTHasContiguousNodes (FCB *filePtr) +OSStatus +BTSetUserData(FCB *filePtr, void * dataPtr, int dataSize) { - BTreeControlBlockPtr btreePtr; - int nodesNeeded, err = noErr; - + BTreeControlBlockPtr btreePtr; + BlockDescriptor node; + char * offset; + OSStatus err; - M_ReturnErrorIf (filePtr == nil, paramErr); + if (dataSize > kBTreeHeaderUserBytes) + return (EINVAL); + node.buffer = nil; + node.blockHeader = nil; btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + return (fsBTInvalidFileErr); + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + err = GetNode(btreePtr, kHeaderNodeNum, &node); + if (err) + return (err); - REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + ModifyBlockStart(btreePtr->fileRefNum, &node); - M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + offset = (char *)node.buffer + sizeof(BTNodeDescriptor) + sizeof(BTHeaderRec); + bcopy(dataPtr, offset, dataSize); - return NodesAreContiguous(FCBTOVCB(filePtr), filePtr, btreePtr->nodeSize); + err = UpdateNode (btreePtr, &node, 0, 0); + + return (err); } + diff --git a/bsd/hfs/hfscommon/BTree/BTreeAllocate.c b/bsd/hfs/hfscommon/BTree/BTreeAllocate.c index 557cd19cf..e08170ee1 100644 --- a/bsd/hfs/hfscommon/BTree/BTreeAllocate.c +++ b/bsd/hfs/hfscommon/BTree/BTreeAllocate.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -193,6 +193,10 @@ OSStatus AllocateNode (BTreeControlBlockPtr btreePtr, UInt32 *nodeNum) --btreePtr->freeNodes; btreePtr->flags |= kBTHeaderDirty; + + /* Account for allocations from node reserve */ + BTUpdateReserve(btreePtr, 1); + *nodeNum = nodeNumber; return noErr; diff --git a/bsd/hfs/hfscommon/BTree/BTreeMiscOps.c b/bsd/hfs/hfscommon/BTree/BTreeMiscOps.c index bd4282a12..156d84020 100644 --- a/bsd/hfs/hfscommon/BTree/BTreeMiscOps.c +++ b/bsd/hfs/hfscommon/BTree/BTreeMiscOps.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -176,7 +176,7 @@ OSStatus VerifyHeader (FCB *filePtr, forkSize = (UInt64)totalNodes * (UInt64)header->nodeSize; - if ( forkSize != filePtr->fcbEOF ) + if ( forkSize > filePtr->fcbEOF ) return fsBTInvalidHeaderErr; if ( header->freeNodes >= totalNodes ) diff --git a/bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c b/bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c new file mode 100644 index 000000000..c619d78a7 --- /dev/null +++ b/bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include "../headers/BTreesPrivate.h" +#include "sys/malloc.h" + + +/* + * B-tree Node Reserve + * + * BTReserveSpace + * BTReleaseReserve + * BTUpdateReserve + * BTAvailableNodes + * + * Each kernel thread can have it's own reserve of b-tree + * nodes. This reserve info is kept in a hash table. + * + * Don't forget to call BTReleaseReserve when you're finished + * or you will leave stale node reserves in the hash. + */ + + +/* + * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT! + * + * It must remain equal in size to the opaque cat_cookie_t + * struct (in hfs_catalog.h). + */ +struct nreserve { + LIST_ENTRY(nreserve) nr_hash; /* hash chain */ + int nr_nodecnt; /* count of nodes held in reserve */ + int nr_newnodes; /* nodes that were allocated */ + struct vnode *nr_btvp; /* b-tree file vnode */ + void *nr_tag; /* unique tag (per thread) */ +}; + +#define NR_GET_TAG() (current_act()) + +#define NR_CACHE 17 + +#define NR_HASH(btvp, tag) \ + (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask]) + +LIST_HEAD(nodereserve, nreserve) *nr_hashtbl; + +u_long nr_hashmask; + + +/* Internal Node Reserve Hash Routines (private) */ +static void nr_insert (struct vnode *, struct nreserve *nrp, int); +static void nr_delete (struct vnode *, struct nreserve *nrp, int *); +static int nr_lookup (struct vnode *); +static void nr_update (struct vnode *, int); + + +/* + * BTReserveSetup - initialize the node reserve hash table + */ +__private_extern__ +void +BTReserveSetup() +{ + if (sizeof(struct nreserve) != sizeof(cat_cookie_t)) + panic("BTReserveSetup: nreserve size != opaque struct size"); + + nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask); +} + + +/* + * BTAvailNodes - obtain the actual available nodes (for current thread) + * + */ +__private_extern__ +SInt32 +BTAvailableNodes(BTreeControlBlock *btree) +{ + SInt32 availNodes; + + availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes; + + return (availNodes + nr_lookup(btree->fileRefNum)); +} + + +/* + * BTReserveSpace - obtain a node reserve (for current thread) + * + * Used by the Catalog Layer (hfs_catalog.c) to reserve space. + */ +__private_extern__ +int +BTReserveSpace(FCB *file, int operations, void* data) +{ + BTreeControlBlock *btree; + int rsrvNodes, availNodes, totalNodes; + int height; + int inserts, deletes; + int err = 0; + + btree = (BTreeControlBlockPtr)file->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btree->fileRefNum, true); + + /* + * The node reserve is based on the number of b-tree + * operations (insert/deletes) and the height of the + * tree. + */ + height = btree->treeDepth; + inserts = operations & 0xffff; + deletes = operations >> 16; + + rsrvNodes = 1; /* allow for at least one root split */ + if (deletes) + rsrvNodes += (deletes * (height - 1)) - 1; + if (inserts) + rsrvNodes += (inserts * height) + 1; + + availNodes = btree->freeNodes - btree->reservedNodes; + + if (rsrvNodes > availNodes) { + totalNodes = rsrvNodes + btree->totalNodes - availNodes; + + /* See if we also need a map node */ + if (totalNodes > CalcMapBits(btree)) + ++totalNodes; + if ((err = ExtendBTree(btree, totalNodes))) + return (err); + } + + btree->reservedNodes += rsrvNodes; + nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes); + return (0); +} + + +/* + * BTReleaseReserve - release the node reserve held by current thread + * + * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space. + */ +__private_extern__ +int +BTReleaseReserve(FCB *file, void* data) +{ + BTreeControlBlock *btree; + int nodecnt; + + btree = (BTreeControlBlockPtr)file->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btree->fileRefNum, true); + + nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt); + + if (nodecnt) + btree->reservedNodes -= nodecnt; + + return (0); +} + +/* + * BTUpdateReserve - update a node reserve for allocations that occured. + */ +__private_extern__ +void +BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes) +{ + nr_update(btreePtr->fileRefNum, nodes); +} + + +/*----------------------------------------------------------------------------*/ +/* Node Reserve Hash Functions (private) */ + + +int nrinserts = 0; +int nrdeletes = 0; + +/* + * Insert a new node reserve. + */ +static void +nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt) +{ + struct nodereserve *nrhead; + struct nreserve *tmp_nrp; + void * tag = NR_GET_TAG(); + + /* + * Check the cache - there may already be a reserve + */ + nrhead = NR_HASH(btvp, tag); + for (tmp_nrp = nrhead->lh_first; tmp_nrp; + tmp_nrp = tmp_nrp->nr_hash.le_next) { + if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) { + nrp->nr_tag = 0; + return; + } + } + + nrp->nr_nodecnt = nodecnt; + nrp->nr_newnodes = 0; + nrp->nr_btvp = btvp; + nrp->nr_tag = tag; + LIST_INSERT_HEAD(nrhead, nrp, nr_hash); + ++nrinserts; +} + +/* + * Delete a node reserve. + */ +static void +nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt) +{ + void * tag = NR_GET_TAG(); + + if (nrp->nr_tag) { + if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp)) + panic("nr_delete: invalid NR (%08x)", nrp); + LIST_REMOVE(nrp, nr_hash); + *nodecnt = nrp->nr_nodecnt; + bzero(nrp, sizeof(struct nreserve)); + ++nrdeletes; + } else { + *nodecnt = 0; + } +} + +/* + * Lookup a node reserve. + */ +static int +nr_lookup(struct vnode * btvp) +{ + struct nodereserve *nrhead; + struct nreserve *nrp; + void* tag = NR_GET_TAG(); + + nrhead = NR_HASH(btvp, tag); + for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) { + if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) + return (nrp->nr_nodecnt - nrp->nr_newnodes); + } + return (0); +} + +/* + * Update a node reserve for any allocations that occured. + */ +static void +nr_update(struct vnode * btvp, int nodecnt) +{ + struct nodereserve *nrhead; + struct nreserve *nrp; + void* tag = NR_GET_TAG(); + + nrhead = NR_HASH(btvp, tag); + for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) { + if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) { + nrp->nr_newnodes += nodecnt; + break; + } + } +} diff --git a/bsd/hfs/hfscommon/BTree/BTreeScanner.c b/bsd/hfs/hfscommon/BTree/BTreeScanner.c index a406754bf..9e66851e9 100644 --- a/bsd/hfs/hfscommon/BTree/BTreeScanner.c +++ b/bsd/hfs/hfscommon/BTree/BTreeScanner.c @@ -25,6 +25,7 @@ * @(#)BTreeScanner.c */ #include +#include "../../hfs_endian.h" #include "../headers/BTreeScanner.h" @@ -182,6 +183,23 @@ static int FindNextLeafNode( BTScanState *scanState, Boolean avoidIO ) (u_int8_t *) scanState->currentNodePtr += scanState->btcb->nodeSize; } +#if BYTE_ORDER == LITTLE_ENDIAN + { + BlockDescriptor block; + FileReference fref; + + /* Fake a BlockDescriptor */ + block.buffer = scanState->currentNodePtr; + block.blockSize = scanState->btcb->nodeSize; + block.blockReadFromDisk = 1; + block.isModified = 0; + + fref = scanState->btcb->fileRefNum; + + SWAP_BT_NODE(&block, ISHFSPLUS(VTOVCB(fref)), VTOC(fref)->c_fileid, 0); + } +#endif + // Make sure this is a valid node if ( CheckNode( scanState->btcb, scanState->currentNodePtr ) != noErr ) { diff --git a/bsd/hfs/hfscommon/Catalog/CatalogIterators.c b/bsd/hfs/hfscommon/Catalog/CatalogIterators.c index c3a385c90..867b489cb 100644 --- a/bsd/hfs/hfscommon/Catalog/CatalogIterators.c +++ b/bsd/hfs/hfscommon/Catalog/CatalogIterators.c @@ -369,7 +369,7 @@ GetCatalogIterator(ExtendedVCB *volume, HFSCatalogNodeID folderID, UInt32 offset bestIterator->volume = volume; // update the iterator's volume bestIterator->folderID = folderID; // ... and folderID - bestIterator->currentIndex = 0xFFFFFFFF; // ... and offspring index marker + bestIterator->currentIndex = 0xFFFF; // ... and offspring index marker bestIterator->currentOffset = 0xFFFFFFFF; bestIterator->nextOffset = 0xFFFFFFFF; diff --git a/bsd/hfs/hfscommon/Catalog/FileIDsServices.c b/bsd/hfs/hfscommon/Catalog/FileIDsServices.c index 2481b7463..0731463ae 100644 --- a/bsd/hfs/hfscommon/Catalog/FileIDsServices.c +++ b/bsd/hfs/hfscommon/Catalog/FileIDsServices.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -68,9 +68,6 @@ OSErr ExchangeFileIDs( ExtendedVCB *vcb, ConstUTF8Param srcName, ConstUTF8Param err = BuildCatalogKeyUTF8(vcb, destID, destName, kUndefinedStrLen, &destKey, NULL); ReturnIfError(err); - err = BTCheckFreeSpace(GetFileControlBlock(vcb->extentsRefNum)); - ReturnIfError(err); - if ( isHFSPlus ) { //-- Step 1: Check the catalog nodes for extents diff --git a/bsd/hfs/hfscommon/Misc/FileExtentMapping.c b/bsd/hfs/hfscommon/Misc/FileExtentMapping.c index 7a38d6d71..57068f546 100644 --- a/bsd/hfs/hfscommon/Misc/FileExtentMapping.c +++ b/bsd/hfs/hfscommon/Misc/FileExtentMapping.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -250,15 +250,12 @@ enum kPreviousRecord = -1 }; -void HFSToHFSPlusExtents( - const HFSExtentRecord oldExtents, - HFSPlusExtentRecord newExtents); -OSErr HFSPlusToHFSExtents( +static OSErr HFSPlusToHFSExtents( const HFSPlusExtentRecord oldExtents, HFSExtentRecord newExtents); -OSErr FindExtentRecord( +static OSErr FindExtentRecord( const ExtendedVCB *vcb, UInt8 forkType, UInt32 fileID, @@ -268,7 +265,7 @@ OSErr FindExtentRecord( HFSPlusExtentRecord foundData, UInt32 *foundHint); -OSErr DeleteExtentRecord( +static OSErr DeleteExtentRecord( const ExtendedVCB *vcb, UInt8 forkType, UInt32 fileID, @@ -281,7 +278,7 @@ static OSErr CreateExtentRecord( UInt32 *hint); -OSErr GetFCBExtentRecord( +static OSErr GetFCBExtentRecord( const FCB *fcb, HFSPlusExtentRecord extents); @@ -359,7 +356,7 @@ static Boolean ExtentsAreIntegral( // fourth entry will be zeroes. // foundHint The BTree hint to find the node again //_________________________________________________________________________________ -OSErr FindExtentRecord( +static OSErr FindExtentRecord( const ExtendedVCB *vcb, UInt8 forkType, UInt32 fileID, @@ -376,7 +373,8 @@ OSErr FindExtentRecord( UInt16 btRecordSize; err = noErr; - *foundHint = 0; + if (foundHint) + *foundHint = 0; fcb = GetFileControlBlock(vcb->extentsRefNum); MALLOC(btIterator, BTreeIterator *, sizeof(*btIterator), M_TEMP, M_WAITOK); @@ -416,14 +414,15 @@ OSErr FindExtentRecord( if (err == noErr) { UInt16 i; - // Copy the found key back for the caller - foundKey->keyLength = kHFSPlusExtentKeyMaximumLength; - foundKey->forkType = extentKeyPtr->forkType; - foundKey->pad = 0; - foundKey->fileID = extentKeyPtr->fileID; - foundKey->startBlock = extentKeyPtr->startBlock; - - // Copy the found data back for the caller + // Copy the found key back for the caller + if (foundKey) { + foundKey->keyLength = kHFSPlusExtentKeyMaximumLength; + foundKey->forkType = extentKeyPtr->forkType; + foundKey->pad = 0; + foundKey->fileID = extentKeyPtr->fileID; + foundKey->startBlock = extentKeyPtr->startBlock; + } + // Copy the found data back for the caller foundData[0].startBlock = extentData[0].startBlock; foundData[0].blockCount = extentData[0].blockCount; foundData[1].startBlock = extentData[1].startBlock; @@ -471,14 +470,16 @@ OSErr FindExtentRecord( } if (err == noErr) { - // Copy the found key back for the caller - BlockMoveData(extentKeyPtr, foundKey, sizeof(HFSPlusExtentKey)); - // Copy the found data back for the caller + // Copy the found key back for the caller + if (foundKey) + BlockMoveData(extentKeyPtr, foundKey, sizeof(HFSPlusExtentKey)); + // Copy the found data back for the caller BlockMoveData(&extentData, foundData, sizeof(HFSPlusExtentRecord)); } } - - *foundHint = btIterator->hint.nodeNum; + + if (foundHint) + *foundHint = btIterator->hint.nodeNum; FREE(btIterator, M_TEMP); return err; } @@ -499,11 +500,6 @@ static OSErr CreateExtentRecord( err = noErr; *hint = 0; - // XXXdbg - preflight that there's enough space - err = BTCheckFreeSpace(GetFileControlBlock(vcb->extentsRefNum)); - if (err) - return err; - MALLOC(btIterator, BTreeIterator *, sizeof(*btIterator), M_TEMP, M_WAITOK); bzero(btIterator, sizeof(*btIterator)); @@ -546,7 +542,7 @@ static OSErr CreateExtentRecord( } -OSErr DeleteExtentRecord( +static OSErr DeleteExtentRecord( const ExtendedVCB *vcb, UInt8 forkType, UInt32 fileID, @@ -557,11 +553,6 @@ OSErr DeleteExtentRecord( err = noErr; - // XXXdbg - preflight that there's enough space - err = BTCheckFreeSpace(GetFileControlBlock(vcb->extentsRefNum)); - if (err) - return err; - MALLOC(btIterator, BTreeIterator *, sizeof(*btIterator), M_TEMP, M_WAITOK); bzero(btIterator, sizeof(*btIterator)); @@ -616,6 +607,7 @@ OSErr DeleteExtentRecord( // Called By: Log2Phys (read/write in place), Cache (map a file block). //_________________________________________________________________________________ +__private_extern__ OSErr MapFileBlockC ( ExtendedVCB *vcb, // volume that file resides on FCB *fcb, // FCB of file @@ -685,12 +677,14 @@ OSErr MapFileBlockC ( // Determine the number of contiguous bytes until the end of the extent // (or the amount they asked for, whichever comes first). // - tmpOff = dataEnd - offset; - if (tmpOff > (off_t)(numberOfBytes)) - *availableBytes = numberOfBytes; // more there than they asked for, so pin the output - else - *availableBytes = tmpOff; - + if (availableBytes) + { + tmpOff = dataEnd - offset; + if (tmpOff > (off_t)(numberOfBytes)) + *availableBytes = numberOfBytes; // more there than they asked for, so pin the output + else + *availableBytes = tmpOff; + } return noErr; } @@ -827,6 +821,7 @@ static OSErr DeallocateFork( // Function: Flushes the extent file for a specified volume //‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +__private_extern__ OSErr FlushExtentFile( ExtendedVCB *vcb ) { FCB * fcb; @@ -856,6 +851,7 @@ OSErr FlushExtentFile( ExtendedVCB *vcb ) // an HFS volume. //‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +__private_extern__ SInt32 CompareExtentKeys( const HFSExtentKey *searchKey, const HFSExtentKey *trialKey ) { SInt32 result; // ± 1 @@ -919,6 +915,7 @@ SInt32 CompareExtentKeys( const HFSExtentKey *searchKey, const HFSExtentKey *tri // an HFS volume. //‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +__private_extern__ SInt32 CompareExtentKeysPlus( const HFSPlusExtentKey *searchKey, const HFSPlusExtentKey *trialKey ) { SInt32 result; // ± 1 @@ -973,6 +970,72 @@ SInt32 CompareExtentKeysPlus( const HFSPlusExtentKey *searchKey, const HFSPlusEx return( result ); } +/* + * Add a file extent to a file. + * + * Used by hfs_extendfs to extend the volume allocation bitmap file. + * + */ +__private_extern__ +int +AddFileExtent(ExtendedVCB *vcb, FCB *fcb, UInt32 startBlock, UInt32 blockCount) +{ + HFSPlusExtentKey foundKey; + HFSPlusExtentRecord foundData; + UInt32 foundIndex; + UInt32 hint; + UInt32 nextBlock; + SInt64 peof; + int i; + int error; + + peof = (SInt64)(fcb->ff_blocks + blockCount) * (SInt64)vcb->blockSize; + + error = SearchExtentFile(vcb, fcb, peof-1, &foundKey, foundData, &foundIndex, &hint, &nextBlock); + if (error != fxRangeErr) + return (EBUSY); + + /* + * Add new extent. See if there is room in the current record. + */ + if (foundData[foundIndex].blockCount != 0) + ++foundIndex; + if (foundIndex == kHFSPlusExtentDensity) { + /* + * Existing record is full so create a new one. + */ + foundKey.keyLength = kHFSPlusExtentKeyMaximumLength; + foundKey.forkType = kDataForkType; + foundKey.pad = 0; + foundKey.fileID = FTOC(fcb)->c_fileid; + foundKey.startBlock = nextBlock; + + foundData[0].startBlock = startBlock; + foundData[0].blockCount = blockCount; + + /* zero out remaining extents. */ + for (i = 1; i < kHFSPlusExtentDensity; ++i) { + foundData[i].startBlock = 0; + foundData[i].blockCount = 0; + } + + foundIndex = 0; + + error = CreateExtentRecord(vcb, &foundKey, foundData, &hint); + if (error == fxOvFlErr) + error = dskFulErr; + } else { + /* + * Add a new extent into existing record. + */ + foundData[foundIndex].startBlock = startBlock; + foundData[foundIndex].blockCount = blockCount; + error = UpdateExtentRecord(vcb, fcb, &foundKey, foundData, hint); + } + (void) FlushExtentFile(vcb); + + return (error); +} //_________________________________________________________________________________ @@ -1000,6 +1063,7 @@ SInt32 CompareExtentKeysPlus( const HFSPlusExtentKey *searchKey, const HFSPlusEx // Note: ExtendFile updates the PEOF in the FCB. //_________________________________________________________________________________ +__private_extern__ OSErr ExtendFileC ( ExtendedVCB *vcb, // volume that file resides on FCB *fcb, // FCB of file to truncate @@ -1021,6 +1085,7 @@ OSErr ExtendFileC ( Boolean allOrNothing; Boolean forceContig; Boolean wantContig; + Boolean useMetaZone; Boolean needsFlush; UInt32 actualStartBlock; UInt32 actualNumBlocks; @@ -1055,7 +1120,7 @@ OSErr ExtendFileC ( // Determine how many blocks need to be allocated. // Round up the number of desired bytes to add. // - blocksToAdd = FileBytesToBlocks(bytesToAdd, volumeBlockSize); + blocksToAdd = howmany(bytesToAdd, volumeBlockSize); bytesToAdd = (SInt64)((SInt64)blocksToAdd * (SInt64)volumeBlockSize); /* @@ -1070,7 +1135,7 @@ OSErr ExtendFileC ( FTOC(fcb)->c_blocks += blocksToAdd; fcb->ff_blocks += blocksToAdd; - FTOC(fcb)->c_flag |= C_MODIFIED; + FTOC(fcb)->c_flag |= C_MODIFIED | C_FORCEUPDATE; *actualBytesAdded = bytesToAdd; return (0); } @@ -1092,11 +1157,11 @@ OSErr ExtendFileC ( // then set the maximum number of bytes to the requested number of bytes // rounded up to a multiple of the clump size. // - if ((fcb->fcbClmpSize > volumeBlockSize) + if ((vcb->vcbClpSiz > volumeBlockSize) && (bytesToAdd < (SInt64)HFS_MAX_DEFERED_ALLOC) && (flags & kEFNoClumpMask) == 0) { - maximumBytes = (SInt64)FileBytesToBlocks(bytesToAdd, fcb->fcbClmpSize); - maximumBytes *= fcb->fcbClmpSize; + maximumBytes = (SInt64)howmany(bytesToAdd, vcb->vcbClpSiz); + maximumBytes *= vcb->vcbClpSiz; } else { maximumBytes = bytesToAdd; } @@ -1131,7 +1196,7 @@ OSErr ExtendFileC ( // Enough blocks are already allocated. Just update the FCB to reflect the new length. fcb->ff_blocks = peof / volumeBlockSize; FTOC(fcb)->c_blocks += (bytesToAdd / volumeBlockSize); - FTOC(fcb)->c_flag |= C_MODIFIED; + FTOC(fcb)->c_flag |= C_MODIFIED | C_FORCEUPDATE; goto Exit; } if (err != fxRangeErr) // Any real error? @@ -1158,6 +1223,7 @@ OSErr ExtendFileC ( // else, keep getting bits and pieces (non-contig) err = noErr; wantContig = true; + useMetaZone = flags & kEFMetadataMask; vcb->vcbFreeExtCnt = 0; /* For now, force rebuild of free extent list */ do { if (blockHint != 0) @@ -1183,16 +1249,18 @@ OSErr ExtendFileC ( err = BlockAllocate( vcb, startBlock, - MIN(bytesToAdd, availbytes), - MIN(maximumBytes, availbytes), + howmany(MIN(bytesToAdd, availbytes), volumeBlockSize), + howmany(MIN(maximumBytes, availbytes), volumeBlockSize), wantContig, + useMetaZone, &actualStartBlock, &actualNumBlocks); } } } else { - err = BlockAllocate(vcb, startBlock, bytesToAdd, maximumBytes, - wantContig, &actualStartBlock, &actualNumBlocks); + err = BlockAllocate(vcb, startBlock, howmany(bytesToAdd, volumeBlockSize), + howmany(maximumBytes, volumeBlockSize), wantContig, useMetaZone, + &actualStartBlock, &actualNumBlocks); } if (err == dskFulErr) { if (forceContig) @@ -1205,8 +1273,20 @@ OSErr ExtendFileC ( } if (actualNumBlocks != 0) err = noErr; + if (useMetaZone == 0) { + /* Couldn't get anything so dip into metadat zone */ + err = noErr; + useMetaZone = 1; + continue; + } } if (err == noErr) { + if (actualNumBlocks != 0) { + // this catalog entry *must* get forced to disk when + // hfs_update() is called + FTOC(fcb)->c_flag |= C_FORCEUPDATE; + } + // Add the new extent to the existing extent record, or create a new one. if ((actualStartBlock == startBlock) && (blockHint == 0)) { // We grew the file's last extent, so just adjust the number of blocks. @@ -1284,7 +1364,7 @@ OSErr ExtendFileC ( } fcb->ff_blocks += (bytesThisExtent / volumeBlockSize); FTOC(fcb)->c_blocks += (bytesThisExtent / volumeBlockSize); - FTOC(fcb)->c_flag |= C_MODIFIED; + FTOC(fcb)->c_flag |= C_MODIFIED | C_FORCEUPDATE; // If contiguous allocation was requested, then we've already got one contiguous // chunk. If we didn't get all we wanted, then adjust the error to disk full. @@ -1298,6 +1378,13 @@ OSErr ExtendFileC ( ErrorExit: Exit: + if (VCBTOHFS(vcb)->hfs_flags & HFS_METADATA_ZONE) { + /* Keep the roving allocator out of the metadata zone. */ + if (vcb->nextAllocation >= VCBTOHFS(vcb)->hfs_metazone_start && + vcb->nextAllocation <= VCBTOHFS(vcb)->hfs_metazone_end) { + vcb->nextAllocation = VCBTOHFS(vcb)->hfs_metazone_end + 1; + } + } *actualBytesAdded = (SInt64)(fcb->ff_blocks - prevblocks) * (SInt64)volumeBlockSize; if (needsFlush) @@ -1335,6 +1422,7 @@ Overflow: // Note: TruncateFile updates the PEOF in the FCB. //_________________________________________________________________________________ +__private_extern__ OSErr TruncateFileC ( ExtendedVCB *vcb, // volume that file resides on FCB *fcb, // FCB of file to truncate @@ -1378,7 +1466,7 @@ OSErr TruncateFileC ( // two gigabytes or more, then round down by one allocation block (??? really? // shouldn't that be an error?). // - nextBlock = FileBytesToBlocks(peof, vcb->blockSize); // number of allocation blocks to remain in file + nextBlock = howmany(peof, vcb->blockSize); // number of allocation blocks to remain in file peof = (SInt64)((SInt64)nextBlock * (SInt64)vcb->blockSize); // number of bytes in those blocks if ((vcb->vcbSigWord == kHFSSigWord) && (peof >= kTwoGigabytes)) { #if DEBUG_BUILD @@ -1391,10 +1479,16 @@ OSErr TruncateFileC ( // // Update FCB's length // + /* + * XXX Any errors could cause ff_blocks and c_blocks to get out of sync... + */ numBlocks = peof / vcb->blockSize; FTOC(fcb)->c_blocks -= (fcb->ff_blocks - numBlocks); fcb->ff_blocks = numBlocks; - FTOC(fcb)->c_flag |= C_MODIFIED; + + // this catalog entry is modified and *must* get forced + // to disk when hfs_update() is called + FTOC(fcb)->c_flag |= C_MODIFIED | C_FORCEUPDATE; // // If the new PEOF is 0, then truncateToExtent has no meaning (we should always deallocate @@ -1502,6 +1596,147 @@ ErrorExit: } +/* + * HFS Plus only + * + */ +__private_extern__ +OSErr HeadTruncateFile ( + ExtendedVCB *vcb, + FCB *fcb, + UInt32 headblks) +{ + HFSPlusExtentRecord extents; + HFSPlusExtentRecord tailExtents; + HFSCatalogNodeID fileID; + UInt8 forkType; + UInt32 blkcnt; + UInt32 startblk; + UInt32 blksfreed; + int i, j; + int error; + + + if (vcb->vcbSigWord != kHFSPlusSigWord) + return (-1); + + forkType = FORK_IS_RSRC(fcb) ? kResourceForkType : kDataForkType; + fileID = FTOC(fcb)->c_fileid; + bzero(tailExtents, sizeof(tailExtents)); + + blksfreed = 0; + startblk = 0; + + /* + * Process catalog resident extents + */ + for (i = 0, j = 0; i < kHFSPlusExtentDensity; ++i) { + blkcnt = fcb->fcbExtents[i].blockCount; + if (blkcnt == 0) + break; /* end of extents */ + + if (blksfreed < headblks) { + error = BlockDeallocate(vcb, fcb->fcbExtents[i].startBlock, blkcnt); + /* + * Any errors after the first BlockDeallocate + * must be ignored so we can put the file in + * a known state. + */ + if (error ) { + if (i == 0) + goto ErrorExit; /* uh oh */ + else { + error = 0; + printf("HeadTruncateFile: problems deallocating %s (%d)\n", + FTOC(fcb)->c_desc.cd_nameptr ? FTOC(fcb)->c_desc.cd_nameptr : "", error); + } + } + + blksfreed += blkcnt; + fcb->fcbExtents[i].startBlock = 0; + fcb->fcbExtents[i].blockCount = 0; + } else { + tailExtents[j].startBlock = fcb->fcbExtents[i].startBlock; + tailExtents[j].blockCount = blkcnt; + ++j; + } + startblk += blkcnt; + } + + if (blkcnt == 0) + goto CopyExtents; + + /* + * Process overflow extents + */ + for (;;) { + UInt32 extblks; + + error = FindExtentRecord(vcb, forkType, fileID, startblk, false, NULL, extents, NULL); + if (error) { + /* + * Any errors after the first BlockDeallocate + * must be ignored so we can put the file in + * a known state. + */ + if (error != btNotFound) + printf("HeadTruncateFile: problems finding extents %s (%d)\n", + FTOC(fcb)->c_desc.cd_nameptr ? FTOC(fcb)->c_desc.cd_nameptr : "", error); + error = 0; + break; + } + + for(i = 0, extblks = 0; i < kHFSPlusExtentDensity; ++i) { + blkcnt = extents[i].blockCount; + if (blkcnt == 0) + break; /* end of extents */ + + if (blksfreed < headblks) { + error = BlockDeallocate(vcb, extents[i].startBlock, blkcnt); + if (error) { + printf("HeadTruncateFile: problems deallocating %s (%d)\n", + FTOC(fcb)->c_desc.cd_nameptr ? FTOC(fcb)->c_desc.cd_nameptr : "", error); + error = 0; + } + blksfreed += blkcnt; + } else { + tailExtents[j].startBlock = extents[i].startBlock; + tailExtents[j].blockCount = blkcnt; + ++j; + } + extblks += blkcnt; + } + + error = DeleteExtentRecord(vcb, forkType, fileID, startblk); + if (error) { + printf("HeadTruncateFile: problems deallocating %s (%d)\n", + FTOC(fcb)->c_desc.cd_nameptr ? FTOC(fcb)->c_desc.cd_nameptr : "", error); + error = 0; + } + + if (blkcnt == 0) + break; /* all done */ + + startblk += extblks; + } + +CopyExtents: + if (blksfreed) { + bcopy(tailExtents, fcb->fcbExtents, sizeof(tailExtents)); + blkcnt = fcb->ff_blocks - headblks; + FTOC(fcb)->c_blocks -= blkcnt; + fcb->ff_blocks = blkcnt; + + FTOC(fcb)->c_flag |= C_CHANGE | C_FORCEUPDATE; + + (void) FlushExtentFile(vcb); + } + +ErrorExit: + return MacToVFSError(error); +} + + //‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ // Routine: SearchExtentRecord (was XRSearch) @@ -1749,11 +1984,6 @@ static OSErr UpdateExtentRecord ( // btFCB = GetFileControlBlock(vcb->extentsRefNum); - // XXXdbg - preflight that there's enough space - err = BTCheckFreeSpace(btFCB); - if (err) - return err; - MALLOC(btIterator, BTreeIterator *, sizeof(*btIterator), M_TEMP, M_WAITOK); bzero(btIterator, sizeof(*btIterator)); @@ -1811,31 +2041,8 @@ static OSErr UpdateExtentRecord ( -void HFSToHFSPlusExtents( - const HFSExtentRecord oldExtents, - HFSPlusExtentRecord newExtents) -{ - UInt32 i; - - // copy the first 3 extents - newExtents[0].startBlock = oldExtents[0].startBlock; - newExtents[0].blockCount = oldExtents[0].blockCount; - newExtents[1].startBlock = oldExtents[1].startBlock; - newExtents[1].blockCount = oldExtents[1].blockCount; - newExtents[2].startBlock = oldExtents[2].startBlock; - newExtents[2].blockCount = oldExtents[2].blockCount; - - // zero out the remaining ones - for (i = 3; i < kHFSPlusExtentDensity; ++i) - { - newExtents[i].startBlock = 0; - newExtents[i].blockCount = 0; - } -} - - -OSErr HFSPlusToHFSExtents( +static OSErr HFSPlusToHFSExtents( const HFSPlusExtentRecord oldExtents, HFSExtentRecord newExtents) { @@ -1864,7 +2071,7 @@ OSErr HFSPlusToHFSExtents( -OSErr GetFCBExtentRecord( +static OSErr GetFCBExtentRecord( const FCB *fcb, HFSPlusExtentRecord extents) { @@ -1923,6 +2130,7 @@ static Boolean ExtentsAreIntegral( // Called by BTOpenPath during volume mount //_________________________________________________________________________________ +__private_extern__ Boolean NodesAreContiguous( ExtendedVCB *vcb, FCB *fcb, diff --git a/bsd/hfs/hfscommon/Misc/VolumeAllocation.c b/bsd/hfs/hfscommon/Misc/VolumeAllocation.c index 8908358e5..8893ad1b4 100644 --- a/bsd/hfs/hfscommon/Misc/VolumeAllocation.c +++ b/bsd/hfs/hfscommon/Misc/VolumeAllocation.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -121,6 +121,7 @@ static OSErr BlockAllocateAny( UInt32 startingBlock, UInt32 endingBlock, UInt32 maxBlocks, + Boolean useMetaZone, UInt32 *actualStartBlock, UInt32 *actualNumBlocks); @@ -129,6 +130,7 @@ static OSErr BlockAllocateContig( UInt32 startingBlock, UInt32 minBlocks, UInt32 maxBlocks, + Boolean useMetaZone, UInt32 *actualStartBlock, UInt32 *actualNumBlocks); @@ -138,19 +140,10 @@ static OSErr BlockFindContiguous( UInt32 endingBlock, UInt32 minBlocks, UInt32 maxBlocks, + Boolean useMetaZone, UInt32 *actualStartBlock, UInt32 *actualNumBlocks); -static OSErr BlockMarkAllocated( - ExtendedVCB *vcb, - UInt32 startingBlock, - UInt32 numBlocks); - -static OSErr BlockMarkFree( - ExtendedVCB *vcb, - UInt32 startingBlock, - UInt32 numBlocks); - static OSErr BlockAllocateKnown( ExtendedVCB *vcb, UInt32 maxBlocks, @@ -175,19 +168,17 @@ static OSErr BlockAllocateKnown( ; the volume's allocation block pointer will be used as a starting ; point. ; -; All requests will be rounded up to the next highest clump size, as -; indicated in the file's FCB. -; ; Input Arguments: ; vcb - Pointer to ExtendedVCB for the volume to allocate space on ; fcb - Pointer to FCB for the file for which storage is being allocated ; startingBlock - Preferred starting allocation block, 0 = no preference ; forceContiguous - Force contiguous flag - if bit 0 set (NE), allocation is contiguous ; or an error is returned -; bytesRequested - Number of bytes requested. If the allocation is non-contiguous, +; useMetaZone - +; minBlocks - Number of blocks requested. If the allocation is non-contiguous, ; less than this may actually be allocated -; bytesMaximum - The maximum number of bytes to allocate. If there is additional free -; space after bytesRequested, then up to bytesMaximum bytes should really +; maxBlocks - The maximum number of blocks to allocate. If there is additional free +; space after bytesRequested, then up to maxBlocks bytes should really ; be allocated. (Used by ExtendFileC to round up allocations to a multiple ; of the file's clump size.) ; @@ -201,21 +192,22 @@ static OSErr BlockAllocateKnown( ;________________________________________________________________________________ */ +__private_extern__ OSErr BlockAllocate ( ExtendedVCB *vcb, /* which volume to allocate space on */ UInt32 startingBlock, /* preferred starting block, or 0 for no preference */ - SInt64 bytesRequested, /* desired number of BYTES to allocate */ - SInt64 bytesMaximum, /* maximum number of bytes to allocate */ + UInt32 minBlocks, /* desired number of blocks to allocate */ + UInt32 maxBlocks, /* maximum number of blocks to allocate */ Boolean forceContiguous, /* non-zero to force contiguous allocation and to force */ - /* bytesRequested bytes to actually be allocated */ + /* minBlocks bytes to actually be allocated */ + + Boolean useMetaZone, UInt32 *actualStartBlock, /* actual first block of allocation */ UInt32 *actualNumBlocks) /* number of blocks actually allocated; if forceContiguous */ - /* was zero, then this may represent fewer than bytesRequested */ - /* bytes */ + /* was zero, then this may represent fewer than minBlocks */ { + UInt32 freeBlocks; OSErr err; - UInt32 minBlocks; // minimum number of allocation blocks requested - UInt32 maxBlocks; // number of allocation blocks requested, rounded to clump size Boolean updateAllocPtr = false; // true if nextAllocation needs to be updated // @@ -223,25 +215,29 @@ OSErr BlockAllocate ( // *actualStartBlock = 0; *actualNumBlocks = 0; - - // - // Compute the number of allocation blocks requested, and maximum - // - minBlocks = FileBytesToBlocks(bytesRequested, vcb->blockSize); - maxBlocks = FileBytesToBlocks(bytesMaximum, vcb->blockSize); + freeBlocks = hfs_freeblks(VCBTOHFS(vcb), 0); // // If the disk is already full, don't bother. // - if (hfs_freeblks(VCBTOHFS(vcb), 0) == 0) { + if (freeBlocks == 0) { err = dskFulErr; goto Exit; } - if (forceContiguous && hfs_freeblks(VCBTOHFS(vcb), 0) < minBlocks) { + if (forceContiguous && freeBlocks < minBlocks) { err = dskFulErr; goto Exit; } - + /* + * Clip if necessary so we don't over-subscribe the free blocks. + */ + if (minBlocks > freeBlocks) { + minBlocks = freeBlocks; + } + if (maxBlocks > freeBlocks) { + maxBlocks = freeBlocks; + } + // // If caller didn't specify a starting block number, then use the volume's // next block to allocate from. @@ -252,19 +248,27 @@ OSErr BlockAllocate ( VCB_UNLOCK(vcb); updateAllocPtr = true; } + if (startingBlock >= vcb->totalBlocks) { + startingBlock = 0; /* overflow so start at beginning */ + } // // If the request must be contiguous, then find a sequence of free blocks // that is long enough. Otherwise, find the first free block. // if (forceContiguous) { - err = BlockAllocateContig(vcb, startingBlock, minBlocks, maxBlocks, actualStartBlock, actualNumBlocks); + err = BlockAllocateContig(vcb, startingBlock, minBlocks, maxBlocks, + useMetaZone, actualStartBlock, actualNumBlocks); /* * If we allocated from a new position then * also update the roving allocatior. */ - if ((err == noErr) && (*actualStartBlock > startingBlock)) - vcb->nextAllocation = *actualStartBlock; + if ((err == noErr) && + (*actualStartBlock > startingBlock) && + ((*actualStartBlock < VCBTOHFS(vcb)->hfs_metazone_start) || + (*actualStartBlock > VCBTOHFS(vcb)->hfs_metazone_end))) { + vcb->nextAllocation = *actualStartBlock; /* XXX */ + } } else { /* * Scan the bitmap once, gather the N largest free extents, then @@ -275,9 +279,13 @@ OSErr BlockAllocate ( */ err = BlockAllocateKnown(vcb, maxBlocks, actualStartBlock, actualNumBlocks); if (err == dskFulErr) - err = BlockAllocateAny(vcb, startingBlock, vcb->totalBlocks, maxBlocks, actualStartBlock, actualNumBlocks); + err = BlockAllocateAny(vcb, startingBlock, vcb->totalBlocks, + maxBlocks, useMetaZone, actualStartBlock, + actualNumBlocks); if (err == dskFulErr) - err = BlockAllocateAny(vcb, 0, startingBlock, maxBlocks, actualStartBlock, actualNumBlocks); + err = BlockAllocateAny(vcb, 1, startingBlock, maxBlocks, + useMetaZone, actualStartBlock, + actualNumBlocks); } if (err == noErr) { @@ -291,13 +299,16 @@ OSErr BlockAllocate ( // VCB_LOCK(vcb); - if (updateAllocPtr) + if (updateAllocPtr && + ((*actualStartBlock < VCBTOHFS(vcb)->hfs_metazone_start) || + (*actualStartBlock > VCBTOHFS(vcb)->hfs_metazone_end))) { vcb->nextAllocation = *actualStartBlock; - + } // // Update the number of free blocks on the volume // vcb->freeBlocks -= *actualNumBlocks; + hfs_generate_volume_notifications(VCBTOHFS(vcb)); VCB_UNLOCK(vcb); MarkVCBDirty(vcb); @@ -329,6 +340,7 @@ Exit: ;________________________________________________________________________________ */ +__private_extern__ OSErr BlockDeallocate ( ExtendedVCB *vcb, // Which volume to deallocate space on UInt32 firstBlock, // First block in range to deallocate @@ -356,44 +368,98 @@ OSErr BlockDeallocate ( // VCB_LOCK(vcb); vcb->freeBlocks += numBlocks; + hfs_generate_volume_notifications(VCBTOHFS(vcb)); if (vcb->nextAllocation == (firstBlock + numBlocks)) vcb->nextAllocation -= numBlocks; VCB_UNLOCK(vcb); MarkVCBDirty(vcb); - + Exit: return err; } -/* -;_______________________________________________________________________ -; -; Routine: FileBytesToBlocks -; -; Function: Divide numerator by denominator, rounding up the result if there -; was a remainder. This is frequently used for computing the number -; of whole and/or partial blocks used by some count of bytes. -; Actuall divides a 64 bit by a 32 bit into a 32bit result -; -; CAREFULL!!! THIS CAN CAUSE OVERFLOW....USER BEWARE!!! -;_______________________________________________________________________ -*/ -UInt32 FileBytesToBlocks( - SInt64 numerator, - UInt32 denominator) +UInt8 freebitcount[16] = { + 4, 3, 3, 2, 3, 2, 2, 1, /* 0 1 2 3 4 5 6 7 */ + 3, 2, 2, 1, 2, 1, 1, 0, /* 8 9 A B C D E F */ +}; + +__private_extern__ +UInt32 +MetaZoneFreeBlocks(ExtendedVCB *vcb) { - UInt32 quotient; + UInt32 freeblocks; + UInt32 *currCache; + UInt32 blockRef; + UInt32 bit; + UInt32 lastbit; + int bytesleft; + int bytesperblock; + UInt8 byte; + UInt8 *buffer; + blockRef = 0; + bytesleft = freeblocks = 0; + bit = VCBTOHFS(vcb)->hfs_metazone_start; + if (bit == 1) + bit = 0; - quotient = (UInt32)(numerator / denominator); - if (quotient * denominator != numerator) - quotient++; - - return quotient; + lastbit = VCBTOHFS(vcb)->hfs_metazone_end; + bytesperblock = vcb->vcbVBMIOSize; + + /* + * Count all the bits from bit to lastbit. + */ + while (bit < lastbit) { + /* + * Get next bitmap block. + */ + if (bytesleft == 0) { + if (blockRef) { + (void) ReleaseBitmapBlock(vcb, blockRef, false); + blockRef = 0; + } + if (ReadBitmapBlock(vcb, bit, &currCache, &blockRef) != 0) { + return (0); + } + buffer = (UInt8 *)currCache; + bytesleft = bytesperblock; + } + byte = *buffer++; + freeblocks += freebitcount[byte & 0x0F]; + freeblocks += freebitcount[(byte >> 4) & 0x0F]; + bit += kBitsPerByte; + --bytesleft; + } + if (blockRef) + (void) ReleaseBitmapBlock(vcb, blockRef, false); + + return (freeblocks); } +/* + * Obtain the next allocation block (bit) that's + * outside the metadata allocation zone. + */ +static UInt32 NextBitmapBlock( + ExtendedVCB *vcb, + UInt32 bit) +{ + struct hfsmount *hfsmp = VCBTOHFS(vcb); + + if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) == 0) + return (bit); + /* + * Skip over metadata allocation zone. + */ + if ((bit >= hfsmp->hfs_metazone_start) && + (bit <= hfsmp->hfs_metazone_end)) { + bit = hfsmp->hfs_metazone_end + 1; + } + return (bit); +} + /* ;_______________________________________________________________________ @@ -476,6 +542,12 @@ static OSErr ReleaseBitmapBlock( Boolean dirty) { struct buf *bp = (struct buf *)blockRef; + + if (blockRef == 0) { + if (dirty) + panic("ReleaseBitmapBlock: missing bp"); + return (0); + } if (bp) { if (dirty) { @@ -511,6 +583,7 @@ Inputs: startingBlock Preferred first block for allocation minBlocks Minimum number of contiguous blocks to allocate maxBlocks Maximum number of contiguous blocks to allocate + useMetaZone Outputs: actualStartBlock First block of range allocated, or 0 if error @@ -522,6 +595,7 @@ static OSErr BlockAllocateContig( UInt32 startingBlock, UInt32 minBlocks, UInt32 maxBlocks, + Boolean useMetaZone, UInt32 *actualStartBlock, UInt32 *actualNumBlocks) { @@ -541,18 +615,22 @@ static OSErr BlockAllocateContig( * with the free extent cache, this can lead to duplicate entries * in the cache, causing the same blocks to be allocated twice. */ - err = BlockFindContiguous(vcb, startingBlock, vcb->totalBlocks, minBlocks, maxBlocks, - actualStartBlock, actualNumBlocks); + err = BlockFindContiguous(vcb, startingBlock, vcb->totalBlocks, minBlocks, + maxBlocks, useMetaZone, actualStartBlock, actualNumBlocks); if (err == dskFulErr && startingBlock != 0) { /* * Constrain the endingBlock so we don't bother looking for ranges * that would overlap those found in the previous call. */ - err = BlockFindContiguous(vcb, 0, startingBlock, minBlocks, maxBlocks, - actualStartBlock, actualNumBlocks); + err = BlockFindContiguous(vcb, 1, startingBlock, minBlocks, maxBlocks, + useMetaZone, actualStartBlock, actualNumBlocks); } if (err != noErr) goto Exit; + // sanity check + if ((*actualStartBlock + *actualNumBlocks) > vcb->totalBlocks) + panic("BlockAllocateContig: allocation overflow on \"%s\"", vcb->vcbVN); + // // Now mark those blocks allocated. // @@ -582,6 +660,7 @@ Inputs: startingBlock Preferred first block for allocation endingBlock Last block to check + 1 maxBlocks Maximum number of contiguous blocks to allocate + useMetaZone Outputs: actualStartBlock First block of range allocated, or 0 if error @@ -593,6 +672,7 @@ static OSErr BlockAllocateAny( UInt32 startingBlock, register UInt32 endingBlock, UInt32 maxBlocks, + Boolean useMetaZone, UInt32 *actualStartBlock, UInt32 *actualNumBlocks) { @@ -601,8 +681,8 @@ static OSErr BlockAllocateAny( register UInt32 currentWord; // Pointer to current word within bitmap block register UInt32 bitMask; // Word with given bits already set (ready to OR in) register UInt32 wordsLeft; // Number of words left in this bitmap block - UInt32 *buffer = NULL; - UInt32 *currCache = NULL; + UInt32 *buffer = NULL; + UInt32 *currCache = NULL; UInt32 blockRef; UInt32 bitsPerBlock; UInt32 wordsPerBlock; @@ -614,12 +694,18 @@ static OSErr BlockAllocateAny( maxBlocks = endingBlock - startingBlock; } + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) + startingBlock = NextBitmapBlock(vcb, startingBlock); + // // Pre-read the first bitmap block // - err = ReadBitmapBlock(vcb, startingBlock, &currCache, &blockRef); + err = ReadBitmapBlock(vcb, startingBlock, &currCache, &blockRef); if (err != noErr) goto Exit; - buffer = currCache; + buffer = currCache; // // Set up the current position within the block @@ -644,7 +730,7 @@ static OSErr BlockAllocateAny( while (block < endingBlock) { if ((currentWord & bitMask) == 0) break; - + // Next bit ++block; bitMask >>= 1; @@ -652,27 +738,36 @@ static OSErr BlockAllocateAny( // Next word bitMask = kHighBitInWordMask; ++buffer; - + if (--wordsLeft == 0) { // Next block - buffer = currCache = NULL; + buffer = currCache = NULL; err = ReleaseBitmapBlock(vcb, blockRef, false); if (err != noErr) goto Exit; - err = ReadBitmapBlock(vcb, block, &currCache, &blockRef); + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) { + block = NextBitmapBlock(vcb, block); + if (block >= endingBlock) { + err = dskFulErr; + goto Exit; + } + } + err = ReadBitmapBlock(vcb, block, &currCache, &blockRef); if (err != noErr) goto Exit; - buffer = currCache; + buffer = currCache; wordsLeft = wordsPerBlock; } - currentWord = SWAP_BE32 (*buffer); } } // Did we get to the end of the bitmap before finding a free block? // If so, then couldn't allocate anything. - if (block == endingBlock) { + if (block >= endingBlock) { err = dskFulErr; goto Exit; } @@ -717,13 +812,25 @@ static OSErr BlockAllocateAny( if (--wordsLeft == 0) { // Next block - buffer = currCache = NULL; + buffer = currCache = NULL; err = ReleaseBitmapBlock(vcb, blockRef, true); if (err != noErr) goto Exit; - err = ReadBitmapBlock(vcb, block, &currCache, &blockRef); + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) { + UInt32 nextBlock; + + nextBlock = NextBitmapBlock(vcb, block); + if (nextBlock != block) { + goto Exit; /* allocation gap, so stop */ + } + } + + err = ReadBitmapBlock(vcb, block, &currCache, &blockRef); if (err != noErr) goto Exit; - buffer = currCache; + buffer = currCache; // XXXdbg if (hfsmp->jnl) { @@ -741,6 +848,10 @@ static OSErr BlockAllocateAny( Exit: if (err == noErr) { *actualNumBlocks = block - *actualStartBlock; + + // sanity check + if ((*actualStartBlock + *actualNumBlocks) > vcb->totalBlocks) + panic("BlockAllocateAny: allocation overflow on \"%s\"", vcb->vcbVN); } else { *actualStartBlock = 0; @@ -828,6 +939,10 @@ static OSErr BlockAllocateKnown( vcb->vcbFreeExt[i-1].blockCount = newBlockCount; } + // sanity check + if ((*actualStartBlock + *actualNumBlocks) > vcb->totalBlocks) + panic("BlockAllocateKnown: allocation overflow on \"%s\"", vcb->vcbVN); + // // Now mark the found extent in the bitmap // @@ -851,7 +966,8 @@ Inputs: numBlocks Number of blocks to mark as allocated _______________________________________________________________________ */ -static OSErr BlockMarkAllocated( +__private_extern__ +OSErr BlockMarkAllocated( ExtendedVCB *vcb, UInt32 startingBlock, register UInt32 numBlocks) @@ -869,6 +985,7 @@ static OSErr BlockMarkAllocated( // XXXdbg struct hfsmount *hfsmp = VCBTOHFS(vcb); + // // Pre-read the bitmap block containing the first word of allocation // @@ -1018,7 +1135,8 @@ Inputs: numBlocks Number of blocks to mark as freed _______________________________________________________________________ */ -static OSErr BlockMarkFree( +__private_extern__ +OSErr BlockMarkFree( ExtendedVCB *vcb, UInt32 startingBlock, register UInt32 numBlocks) @@ -1036,6 +1154,12 @@ static OSErr BlockMarkFree( // XXXdbg struct hfsmount *hfsmp = VCBTOHFS(vcb); + if (startingBlock + numBlocks > vcb->totalBlocks) { + panic("hfs: block mark free: trying to free non-existent blocks (%d %d %d)\n", + startingBlock, numBlocks, vcb->totalBlocks); + } + + // // Pre-read the bitmap block containing the first word of allocation // @@ -1075,11 +1199,9 @@ static OSErr BlockMarkFree( numBits = numBlocks; // entire allocation is inside this one word bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); // turn off bits after last } -#if DEBUG_BUILD if ((*currentWord & SWAP_BE32 (bitMask)) != SWAP_BE32 (bitMask)) { - panic("BlockMarkFree: blocks not allocated!"); + goto Corruption; } -#endif *currentWord &= SWAP_BE32 (~bitMask); // clear the bits in the bitmap numBlocks -= numBits; // adjust number of blocks left to free @@ -1112,12 +1234,9 @@ static OSErr BlockMarkFree( currentWord = buffer; wordsLeft = wordsPerBlock; } - -#if DEBUG_BUILD if (*currentWord != SWAP_BE32 (kAllBitsSetInWord)) { - panic("BlockMarkFree: blocks not allocated!"); + goto Corruption; } -#endif *currentWord = 0; // clear the entire word numBlocks -= kBitsPerWord; @@ -1151,11 +1270,9 @@ static OSErr BlockMarkFree( currentWord = buffer; wordsLeft = wordsPerBlock; } -#if DEBUG_BUILD if ((*currentWord & SWAP_BE32 (bitMask)) != SWAP_BE32 (bitMask)) { - panic("BlockMarkFree: blocks not allocated!"); + goto Corruption; } -#endif *currentWord &= SWAP_BE32 (~bitMask); // clear the bits in the bitmap // No need to update currentWord or wordsLeft @@ -1167,6 +1284,17 @@ Exit: (void)ReleaseBitmapBlock(vcb, blockRef, true); return err; + +Corruption: +#if DEBUG_BUILD + panic("BlockMarkFree: blocks not allocated!"); +#else + printf("hfs: WARNING - blocks on volume %s not allocated!\n", vcb->vcbVN); + vcb->vcbAtrb |= kHFSVolumeInconsistentMask; + MarkVCBDirty(vcb); + err = EIO; + goto Exit; +#endif } @@ -1185,6 +1313,7 @@ Inputs: endingBlock Last possible block in range + 1 minBlocks Minimum number of blocks needed. Must be > 0. maxBlocks Maximum (ideal) number of blocks desired + useMetaZone OK to dip into metadata allocation zone Outputs: actualStartBlock First block of range found, or 0 if error @@ -1202,6 +1331,7 @@ static OSErr BlockFindContiguous( UInt32 endingBlock, UInt32 minBlocks, UInt32 maxBlocks, + Boolean useMetaZone, UInt32 *actualStartBlock, UInt32 *actualNumBlocks) { @@ -1228,7 +1358,13 @@ static OSErr BlockFindContiguous( stopBlock = endingBlock - minBlocks + 1; currentBlock = startingBlock; firstBlock = 0; - + + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) + currentBlock = NextBitmapBlock(vcb, currentBlock); + // // Pre-read the first bitmap block. // @@ -1240,7 +1376,7 @@ static OSErr BlockFindContiguous( // wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord; - wordsLeft = (startingBlock / kBitsPerWord) & (wordsPerBlock-1); // Current index into buffer + wordsLeft = (currentBlock / kBitsPerWord) & (wordsPerBlock-1); // Current index into buffer currentWord = buffer + wordsLeft; wordsLeft = wordsPerBlock - wordsLeft; @@ -1287,6 +1423,15 @@ static OSErr BlockFindContiguous( err = ReleaseBitmapBlock(vcb, blockRef, false); if (err != noErr) goto ErrorExit; + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) { + currentBlock = NextBitmapBlock(vcb, currentBlock); + if (currentBlock >= stopBlock) + break; + } + err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef); if ( err != noErr ) goto ErrorExit; @@ -1363,6 +1508,18 @@ FoundUnused: err = ReleaseBitmapBlock(vcb, blockRef, false); if (err != noErr) goto ErrorExit; + /* + * Skip over metadata blocks. + */ + if (!useMetaZone) { + UInt32 nextBlock; + + nextBlock = NextBitmapBlock(vcb, currentBlock); + if (nextBlock != currentBlock) { + break; /* allocation gap, so stop */ + } + } + err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef); if ( err != noErr ) goto ErrorExit; diff --git a/bsd/hfs/hfscommon/headers/BTreesInternal.h b/bsd/hfs/hfscommon/headers/BTreesInternal.h index 1657de34d..00c9bfabe 100644 --- a/bsd/hfs/hfscommon/headers/BTreesInternal.h +++ b/bsd/hfs/hfscommon/headers/BTreesInternal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -220,6 +220,8 @@ enum BTreeTypes{ kReservedBTreeType = 255 // }; +#define kBTreeHeaderUserBytes 128 + typedef BTreeKey *BTreeKeyPtr; @@ -236,7 +238,8 @@ struct BTreeInfoRec{ ItemCount numRecords; ItemCount numNodes; ItemCount numFreeNodes; - UInt32 reserved; + UInt8 keyCompareType; + UInt8 reserved[3]; }; typedef struct BTreeInfoRec BTreeInfoRec; typedef BTreeInfoRec *BTreeInfoPtr; @@ -282,12 +285,8 @@ typedef BTreeIterator *BTreeIteratorPtr; typedef SInt32 (* IterateCallBackProcPtr)(BTreeKeyPtr key, void * record, UInt16 recordLen, void * state); -extern OSStatus BTOpenPath (FCB *filePtr, - KeyCompareProcPtr keyCompareProc, - GetBlockProcPtr getBlockProc, - ReleaseBlockProcPtr releaseBlockProc, - SetEndOfForkProcPtr setEndOfForkProc, - SetBlockSizeProcPtr setBlockSizeProc ); + +extern OSStatus BTOpenPath(FCB *filePtr, KeyCompareProcPtr keyCompareProc); extern OSStatus BTClosePath (FCB *filePtr ); @@ -342,10 +341,20 @@ extern OSStatus BTGetLastSync (FCB *filePtr, extern OSStatus BTSetLastSync (FCB *filePtr, UInt32 lastfsync ); -extern OSStatus BTCheckFreeSpace (FCB *filePtr); - extern OSStatus BTHasContiguousNodes(FCB *filePtr); +extern OSStatus BTGetUserData(FCB *filePtr, void * dataPtr, int dataSize); + +extern OSStatus BTSetUserData(FCB *filePtr, void * dataPtr, int dataSize); + +/* B-tree node reserve routines. */ +extern void BTReserveSetup(void); + +extern int BTReserveSpace(FCB *file, int operations, void * data); + +extern int BTReleaseReserve(FCB *file, void * data); + + #endif /* __APPLE_API_PRIVATE */ #endif /* KERNEL */ #endif // __BTREESINTERNAL__ diff --git a/bsd/hfs/hfscommon/headers/BTreesPrivate.h b/bsd/hfs/hfscommon/headers/BTreesPrivate.h index d9beee4ac..836a39895 100644 --- a/bsd/hfs/hfscommon/headers/BTreesPrivate.h +++ b/bsd/hfs/hfscommon/headers/BTreesPrivate.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -188,7 +188,7 @@ typedef enum { typedef struct BTreeControlBlock { // fields specific to BTree CBs - UInt8 reserved1; // keep for alignment with old style fields + UInt8 keyCompareType; /* Key string Comparison Type */ UInt8 btreeType; UInt16 treeDepth; FileReference fileRefNum; // refNum of btree file @@ -224,7 +224,7 @@ typedef struct BTreeControlBlock { // fields specific to BTree CBs UInt32 numHintChecks; UInt32 numPossibleHints; // Looks like a formated hint UInt32 numValidHints; // Hint used to find correct record. - + UInt32 reservedNodes; } BTreeControlBlock, *BTreeControlBlockPtr; @@ -317,6 +317,10 @@ OSStatus ExtendBTree (BTreeControlBlockPtr btreePtr, UInt32 CalcMapBits (BTreeControlBlockPtr btreePtr); +SInt32 BTAvailableNodes (BTreeControlBlock *btree); + +void BTUpdateReserve (BTreeControlBlockPtr btreePtr, + int nodes); //////////////////////////////// Misc Operations //////////////////////////////// diff --git a/bsd/hfs/hfscommon/headers/CatalogPrivate.h b/bsd/hfs/hfscommon/headers/CatalogPrivate.h index 9c51d65f1..f5ae41fe8 100644 --- a/bsd/hfs/hfscommon/headers/CatalogPrivate.h +++ b/bsd/hfs/hfscommon/headers/CatalogPrivate.h @@ -188,6 +188,8 @@ extern OSErr CreateFileThreadID( FIDParam *filePB, WDCBRecPtr *wdcbPtr ); extern OSErr ExchangeFiles( FIDParam *filePB, WDCBRecPtr *wdcbPtr ); #endif +extern void UpdateCatalogName( ConstStr31Param srcName, Str31 destName ); + // Catalog Iterator Routines diff --git a/bsd/hfs/hfscommon/headers/FileMgrInternal.h b/bsd/hfs/hfscommon/headers/FileMgrInternal.h index 2ed9ad24f..83bd5b902 100644 --- a/bsd/hfs/hfscommon/headers/FileMgrInternal.h +++ b/bsd/hfs/hfscommon/headers/FileMgrInternal.h @@ -118,6 +118,7 @@ enum { kEFReserveMask = 0x04, /* keep block reserve */ kEFDeferMask = 0x08, /* defer file block allocations */ kEFNoClumpMask = 0x10, /* don't round up to clump size */ + kEFMetadataMask = 0x20, /* metadata allocation */ kTFTrunExtBit = 0, /* truncate to the extent containing new PEOF*/ kTFTrunExtMask = 1 @@ -289,9 +290,10 @@ ReplaceBTreeRecord (FileReference refNum, EXTERN_API_C( OSErr ) BlockAllocate (ExtendedVCB * vcb, UInt32 startingBlock, - SInt64 bytesRequested, - SInt64 bytesMaximum, + UInt32 minBlocks, + UInt32 maxBlocks, Boolean forceContiguous, + Boolean useMetaZone, UInt32 * startBlock, UInt32 * actualBlocks); @@ -300,10 +302,19 @@ BlockDeallocate (ExtendedVCB * vcb, UInt32 firstBlock, UInt32 numBlocks); +EXTERN_API_C( OSErr ) +BlockMarkAllocated(ExtendedVCB *vcb, UInt32 startingBlock, UInt32 numBlocks); + +EXTERN_API_C( OSErr ) +BlockMarkFree( ExtendedVCB *vcb, UInt32 startingBlock, UInt32 numBlocks); + EXTERN_API_C( UInt32 ) FileBytesToBlocks (SInt64 numerator, UInt32 denominator); +EXTERN_API_C( UInt32 ) +MetaZoneFreeBlocks(ExtendedVCB *vcb); + /* File Extent Mapping routines*/ EXTERN_API_C( OSErr ) FlushExtentFile (ExtendedVCB * vcb); @@ -338,6 +349,9 @@ MapFileBlockC (ExtendedVCB * vcb, daddr_t * startBlock, size_t * availableBytes); +EXTERN_API_C( int ) +AddFileExtent (ExtendedVCB *vcb, FCB *fcb, UInt32 startBlock, UInt32 blockCount); + #if TARGET_API_MACOS_X EXTERN_API_C( Boolean ) NodesAreContiguous (ExtendedVCB * vcb, diff --git a/bsd/i386/ucontext.h b/bsd/i386/ucontext.h index bd806e483..4af2eb278 100644 --- a/bsd/i386/ucontext.h +++ b/bsd/i386/ucontext.h @@ -43,6 +43,13 @@ struct mcontext { typedef struct mcontext * mcontext_t; +struct mcontext64 { + struct sigcontext sc; +}; +#define I386_MCONTEXT64_SIZE sizeof(struct mcontext64) + +typedef struct mcontext64 * mcontext64_t; + #endif /* __APPLE_API_UNSTABLE */ #endif /* _I386_UCONTEXT_H_ */ diff --git a/bsd/i386/vmparam.h b/bsd/i386/vmparam.h index b8603cf7f..35c21945f 100644 --- a/bsd/i386/vmparam.h +++ b/bsd/i386/vmparam.h @@ -28,7 +28,7 @@ #include -#define USRSTACK 0xc0000000 +#define USRSTACK 0xbfff9000 /* * Virtual memory related constants, all in bytes @@ -40,10 +40,10 @@ #define MAXDSIZ (RLIM_INFINITY) /* max data size */ #endif #ifndef DFLSSIZ -#define DFLSSIZ (512*1024) /* initial stack size limit */ +#define DFLSSIZ (8*1024*1024 - 7*4*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ -#define MAXSSIZ (64*1024*1024) /* max stack size */ +#define MAXSSIZ (64*1024*1024 - 7*4*1024) /* max stack size */ #endif #ifndef DFLCSIZ #define DFLCSIZ (0) /* initial core size limit */ diff --git a/bsd/if/ppc/if_en.c b/bsd/if/ppc/if_en.c deleted file mode 100644 index 3df54955d..000000000 --- a/bsd/if/ppc/if_en.c +++ /dev/null @@ -1,1132 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1997 Apple Computer, Inc. - * - * ethernet driver for mace on-board ethernet - * - * HISTORY - * - * Dieter Siegmund (dieter@next.com) Thu Feb 27 18:25:33 PST 1997 - * - ripped off code from MK/LINUX, turned it into a polled-mode - * driver for the PCI (8500) class machines - * - * Dieter Siegmund (dieter@next.com) Fri Mar 21 12:41:29 PST 1997 - * - reworked to support a BSD-style interface, and to support kdb polled - * interface and interrupt-driven interface concurrently - * - * Justin Walker (justin@apple.com) Tue May 20 10:29:29 PDT 1997 - * - Added multicast support - * - * Dieter Siegmund (dieter@next.com) Thu May 29 15:02:29 PDT 1997 - * - fixed problem with sending arp packets for ip address 0.0.0.0 - * - use kdp_register_send_receive() instead of defining - * en_send_pkt/en_recv_pkt routines to avoid name space - * collisions with IOEthernetDebugger and allow these routines to be - * overridden by a driverkit-style driver - * - * Dieter Siegmund (dieter@apple.com) Tue Jun 24 18:29:15 PDT 1997 - * - don't let the adapter auto-strip 802.3 receive frames, it messes - * up the frame size logic - * - * Dieter Siegmund (dieter@apple.com) Tue Aug 5 16:24:52 PDT 1997 - * - handle multicast address deletion correctly - */ -#ifdef MACE_DEBUG -/* - * Caveat: MACE_DEBUG delimits some code that is getting kind of - * stale. Before blindly turning on MACE_DEBUG for your - * testing, take a look at the code enabled by it to check - * that it is reasonably sane. - */ -#endif - -#include -#include - -#define RECEIVE_INT DBDMA_INT_ALWAYS - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "if_en.h" -#include "mace.h" - -extern int kdp_flag; - -#if NBPFILTER > 0 -#include -#endif - -static void polled_send_pkt(char * data, int len); -static void polled_receive_pkt(char *data, int *len, int timeout_ms); -void mace_dbdma_rx_intr(int unit, void *, void *); -void mace_dbdma_tx_intr(int, void *, void *); -void mace_pci_intr(int, void *); -void mace_service_queue(struct ifnet * ifp); - -#ifdef MACE_DEBUG -static int mace_watchdog(); -#endif - -static __inline__ vm_offset_t -KVTOPHYS(vm_offset_t v) -{ - return (v); -} - -typedef int (*funcptr)(char *, int, void *); - -#ifdef MACE_DEBUG -static int -macAddrsEqual(unsigned char * one, unsigned char * two) -{ - int i; - - for (i = 0; i < NUM_EN_ADDR_BYTES; i++) - if (*one++ != *two++) - return 0; - return 1; -} -#endif - -static __inline__ int -isprint(unsigned char c) -{ - return (c >= 0x20 && c <= 0x7e); -} - -static void -printEtherHeader(enet_addr_t * dh, enet_addr_t * sh, u_short etype) -{ - u_char * dhost = dh->ether_addr_octet; - u_char * shost = sh->ether_addr_octet; - - printf("Dst: %x:%x:%x:%x:%x:%x Src: %x:%x:%x:%x:%x:%x Type: 0x%x\n", - dhost[0], dhost[1], dhost[2], dhost[3], dhost[4], dhost[5], - shost[0], shost[1], shost[2], shost[3], shost[4], shost[5], - etype); -} - -static void -printData(u_char * data_p, int n_bytes) -{ -#define CHARS_PER_LINE 16 - char line_buf[CHARS_PER_LINE + 1]; - int line_pos; - int offset; - - for (line_pos = 0, offset = 0; offset < n_bytes; offset++, data_p++) { - if (line_pos == 0) { - printf("%04d ", offset); - } - - line_buf[line_pos] = isprint(*data_p) ? *data_p : '.'; - printf(" %02x", *data_p); - line_pos++; - if (line_pos == CHARS_PER_LINE) { - line_buf[CHARS_PER_LINE] = '\0'; - printf(" %s\n", line_buf); - line_pos = 0; - } - } - if (line_pos) { /* need to finish up the line */ - for (; line_pos < CHARS_PER_LINE; line_pos++) { - printf(" "); - line_buf[line_pos] = ' '; - } - line_buf[CHARS_PER_LINE] = '\0'; - printf(" %s\n", line_buf); - } -} - -static void -printEtherPacket(enet_addr_t * dhost, enet_addr_t * shost, u_short type, - u_char * data_p, int n_bytes) -{ - printEtherHeader(dhost, shost, type); - printData(data_p, n_bytes); -} - -void -printContiguousEtherPacket(u_char * data_p, int n_bytes) -{ - printEtherPacket((enet_addr_t *)data_p, - (enet_addr_t *)(data_p + NUM_EN_ADDR_BYTES), - *((u_short *)(data_p + (NUM_EN_ADDR_BYTES * 2))), - data_p, n_bytes); -} - -mace_t mace; - -#define MACE_DMA_AREA_SIZE (ETHER_RX_NUM_DBDMA_BUFS * ETHERNET_BUF_SIZE + PG_SIZE) -static unsigned long mace_rx_dma_area[(MACE_DMA_AREA_SIZE + sizeof(long))/sizeof(long)]; - -static unsigned long mace_tx_dma_area[(ETHERNET_BUF_SIZE + PG_SIZE + sizeof(long))/sizeof(long)]; - -/* - * mace_get_hwid - * - * This function computes the Ethernet Hardware address - * from PROM. (Its best not to ask how this is done.) - */ - -unsigned char -mace_swapbits(unsigned char bits) -{ - unsigned char mask = 0x1, i, newbits = 0; - - for (i = 0x80; i; mask <<= 1, i >>=1) { - if (bits & mask) - newbits |= i; - } - - return newbits; -} - -void -mace_get_hwid(unsigned char *hwid_addr, mace_t * m) -{ - int i; - - for (i = 0; i < NUM_EN_ADDR_BYTES; i++, hwid_addr += 16) { - m->macaddr[i] = mace_swapbits(*hwid_addr); - } -} - -/* - * mace_reset - * - * Reset the board.. - */ - -void -mace_reset() -{ - dbdma_reset(DBDMA_ETHERNET_RV); - dbdma_reset(DBDMA_ETHERNET_TX); -} - - -/* - * mace_geteh: - * - * This function gets the ethernet address (array of 6 unsigned - * bytes) from the MACE board registers. - * - */ - -void -mace_geteh(char *ep) -{ - int i; - unsigned char ep_temp; - - mace.ereg->iac = IAC_PHYADDR; eieio(); - - for (i = 0; i < ETHER_ADD_SIZE; i++) { - ep_temp = mace.ereg->padr; eieio(); - *ep++ = ep_temp; - } -} - -/* - * mace_seteh: - * - * This function sets the ethernet address (array of 6 unsigned - * bytes) on the MACE board. - */ - -static void -mace_seteh(char *ep) -{ - int i; - unsigned char status; - - if (mace.chip_id != MACE_REVISION_A2) { - mace.ereg->iac = IAC_ADDRCHG|IAC_PHYADDR; eieio(); - - while ((status = mace.ereg->iac)) { - if ((status & IAC_ADDRCHG) == 0) { - eieio(); - break; - } - eieio(); - } - } - else { - /* start to load the address.. */ - mace.ereg->iac = IAC_PHYADDR; eieio(); - } - - for (i = 0; i < NUM_EN_ADDR_BYTES; i++) { - mace.ereg->padr = *(ep+i); eieio(); - } - return; -} - -/* - * mace_setup_dbdma - * - * Setup various dbdma pointers. - */ - -void -mace_setup_dbdma() -{ - mace_t * m = &mace; - int i; - dbdma_command_t * d; - vm_offset_t address; - dbdma_regmap_t * regmap; - -#define ALIGN_MASK 0xfffffffcUL - if (m->rv_dma_area == 0) { - m->rv_dma_area = (unsigned char *) - ((((unsigned long)mace_rx_dma_area) + 3) & ALIGN_MASK); - m->rv_dma = dbdma_alloc(ETHER_RX_NUM_DBDMA_BUFS + 2); - m->tx_dma = dbdma_alloc(TX_NUM_DBDMA); - m->tx_dma_area = (unsigned char *) - ((((unsigned long)mace_tx_dma_area) + 3) & ALIGN_MASK); - } - - /* set up a ring of buffers */ - d = m->rv_dma; - for (i = 0; i < ETHER_RX_NUM_DBDMA_BUFS; i++, d++) { - address = (vm_offset_t) KVTOPHYS((vm_offset_t)&m->rv_dma_area[i*ETHERNET_BUF_SIZE]); - DBDMA_BUILD(d, DBDMA_CMD_IN_LAST, 0, ETHERNET_BUF_SIZE, - address, RECEIVE_INT, - DBDMA_WAIT_NEVER, - DBDMA_BRANCH_NEVER); - } - - /* stop when we hit the end of the list */ - DBDMA_BUILD(d, DBDMA_CMD_STOP, 0, 0, 0, RECEIVE_INT, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - d++; - - /* branch to command at "address" ie. element 0 of the "array" */ - DBDMA_BUILD(d, DBDMA_CMD_NOP, 0, 0, 0, DBDMA_INT_NEVER, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_ALWAYS); - address = (vm_offset_t) KVTOPHYS((vm_offset_t)m->rv_dma); - dbdma_st4_endian(&d->d_cmddep, address); - - m->rv_head = 0; - m->rv_tail = ETHER_RX_NUM_DBDMA_BUFS; /* always contains DBDMA_CMD_STOP */ - - /* stop/init/restart dma channel */ - dbdma_reset(DBDMA_ETHERNET_RV); - dbdma_reset(DBDMA_ETHERNET_TX); - - /* Set the wait value.. */ - regmap = DBDMA_REGMAP(DBDMA_ETHERNET_RV); - dbdma_st4_endian(®map->d_wait, DBDMA_SET_CNTRL(0x00)); - - /* Set the tx wait value */ - regmap = DBDMA_REGMAP(DBDMA_ETHERNET_TX); - dbdma_st4_endian(®map->d_wait, DBDMA_SET_CNTRL(0x20)); - - flush_cache_v((vm_offset_t)m->rv_dma, - sizeof(dbdma_command_t) * (ETHER_RX_NUM_DBDMA_BUFS + 2)); - /* start receiving */ - dbdma_start(DBDMA_ETHERNET_RV, m->rv_dma); -} - -#ifdef MACE_DEBUG -static unsigned char testBuffer[PG_SIZE * 4]; -static unsigned char testMsg[] = "mace ethernet interface test"; - -static void -send_test_packet() -{ - unsigned char * tp; - - bzero(testBuffer, sizeof(testBuffer)); - - tp = testBuffer; - - /* send self-addressed packet */ - bcopy(&mace.macaddr[0], tp, NUM_EN_ADDR_BYTES); - tp += NUM_EN_ADDR_BYTES; - bcopy(&mace.macaddr[0], tp, NUM_EN_ADDR_BYTES); - tp += NUM_EN_ADDR_BYTES; - *tp++ = 0; - *tp++ = 0; - bcopy(testMsg, tp, sizeof(testMsg)); - polled_send_pkt(testBuffer, 80); - return; -} -#endif - -/* - * Function: init_mace - * - * Purpose: - * Called early on, initializes the adapter and readies it for - * kdb kernel debugging. - */ -void -init_mace() -{ - unsigned char status; - mace_t * m = &mace; - struct mace_board * ereg; - int mpc = 0; - - /* - * Only use in-kernel driver for early debugging (bootargs: kdp=1 or kdp=3) - */ - if ( (kdp_flag & 1) == 0 ) - { - return; - } - - bzero(&mace, sizeof(mace)); - - /* get the ethernet registers' mapped address */ - ereg = m->ereg - = (struct mace_board *) POWERMAC_IO(PCI_ETHERNET_BASE_PHYS); - mace_get_hwid((unsigned char *)POWERMAC_IO(PCI_ETHERNET_ADDR_PHYS), m); - - /* Reset the board & AMIC.. */ - mace_reset(); - - /* grab the MACE chip rev */ - m->chip_id = (ereg->chipid2 << 8 | ereg->chipid1); - - /* don't auto-strip for 802.3 */ - m->ereg->rcvfc &= ~(RCVFC_ASTRPRCV); - - /* set the ethernet address */ - mace_seteh(mace.macaddr); - { - unsigned char macaddr[NUM_EN_ADDR_BYTES]; - mace_geteh(macaddr); - printf("mace ethernet [%02x:%02x:%02x:%02x:%02x:%02x]\n", - macaddr[0], macaddr[1], macaddr[2], - macaddr[3], macaddr[4], macaddr[5]); - } - - /* Now clear the Multicast filter */ - if (m->chip_id != MACE_REVISION_A2) { - ereg->iac = IAC_ADDRCHG|IAC_LOGADDR; eieio(); - - while ((status = ereg->iac)) { - if ((status & IAC_ADDRCHG) == 0) - break; - eieio(); - } - eieio(); - } - else { - ereg->iac = IAC_LOGADDR; eieio(); - } - { - int i; - - for (i=0; i < 8; i++) - { ereg->ladrf = 0; - eieio(); - } - } - - /* register interrupt routines */ - mace_setup_dbdma(); - - /* Start the chip... */ - m->ereg->maccc = MACCC_ENXMT|MACCC_ENRCV; eieio(); - { - volatile char ch = mace.ereg->ir; eieio(); - } - - delay(500); /* paranoia */ - mace.ereg->imr = 0xfe; eieio(); - - /* register our debugger routines */ - kdp_register_send_receive((kdp_send_t)polled_send_pkt, - (kdp_receive_t)polled_receive_pkt); - -#if 0 - printf("Testing 1 2 3\n"); - send_test_packet(); - printf("Testing 1 2 3\n"); - send_test_packet(); - printf("Testing 1 2 3\n"); - send_test_packet(); - do { - static unsigned char buf[ETHERNET_BUF_SIZE]; - int len; - int nmpc = mace.ereg->mpc; eieio(); - - if (nmpc > mpc) { - mpc = nmpc; - printf("mpc %d\n", mpc); - } - polled_receive_pkt(buf, &len, 100); - if (len > 0) { - printf("rx %d\n", len); - printContiguousEtherPacket(buf, len); - } - } while(1); -#endif - - return; -} - -#ifdef MACE_DEBUG -static void -txstatus(char * msg) -{ - volatile dbdma_regmap_t * dmap = DBDMA_REGMAP(DBDMA_ETHERNET_TX); - volatile unsigned long status; - volatile unsigned long intr; - volatile unsigned long branch; - volatile unsigned long wait; - - status = dbdma_ld4_endian(&dmap->d_status); eieio(); - intr = dbdma_ld4_endian(&dmap->d_intselect); eieio(); - branch = dbdma_ld4_endian(&dmap->d_branch); eieio(); - wait = dbdma_ld4_endian(&dmap->d_wait); eieio(); - printf("(%s s=0x%x i=0x%x b=0x%x w=0x%x)", msg, status, intr, branch, - wait); - return; -} -#endif - -static void -tx_dbdma(char * data, int len) -{ - unsigned long count; - dbdma_command_t * d; - unsigned long page; - - d = mace.tx_dma; - page = ((unsigned long) data) & PG_MASK; - if ((page + len) <= PG_SIZE) { /* one piece dma */ - DBDMA_BUILD(d, DBDMA_CMD_OUT_LAST, DBDMA_KEY_STREAM0, - len, - (vm_offset_t) KVTOPHYS((vm_offset_t) data), - DBDMA_INT_NEVER, - DBDMA_WAIT_IF_FALSE, DBDMA_BRANCH_NEVER); - } - else { /* two piece dma */ - count = PG_SIZE - page; - DBDMA_BUILD(d, DBDMA_CMD_OUT_MORE, DBDMA_KEY_STREAM0, - count, - (vm_offset_t)KVTOPHYS((vm_offset_t) data), - DBDMA_INT_NEVER, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - d++; - DBDMA_BUILD(d, DBDMA_CMD_OUT_LAST, DBDMA_KEY_STREAM0, - len - count, (vm_offset_t) - KVTOPHYS((vm_offset_t)((unsigned char *)data + count)), - DBDMA_INT_NEVER, - DBDMA_WAIT_IF_FALSE, DBDMA_BRANCH_NEVER); - } - d++; - DBDMA_BUILD(d, DBDMA_CMD_LOAD_QUAD, DBDMA_KEY_SYSTEM, - 1, KVTOPHYS((vm_offset_t) &mace.ereg->xmtfs),DBDMA_INT_NEVER, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - d++; - DBDMA_BUILD(d, DBDMA_CMD_LOAD_QUAD, DBDMA_KEY_SYSTEM, - 1, KVTOPHYS((vm_offset_t) &mace.ereg->ir), DBDMA_INT_ALWAYS, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - d++; - DBDMA_BUILD(d, DBDMA_CMD_STOP, 0, 0, 0, 0, 0, 0); - flush_cache_v((vm_offset_t)mace.tx_dma, sizeof(dbdma_command_t) * TX_NUM_DBDMA); - dbdma_start(DBDMA_ETHERNET_TX, mace.tx_dma); - return; - -} - -static void -waitForDBDMADone(char * msg) -{ - { - /* wait for tx dma completion */ - volatile dbdma_regmap_t * dmap = DBDMA_REGMAP(DBDMA_ETHERNET_TX); - int i; - volatile unsigned long val; - - i = 0; - do { - val = dbdma_ld4_endian(&dmap->d_status); eieio(); - delay(50); - i++; - } while ((i < 100000) && (val & DBDMA_CNTRL_ACTIVE)); - if (i == 100000) - printf("mace(%s): tx_dbdma poll timed out 0x%x", msg, val); - } -} - -void -mace_service_queue(struct ifnet * ifp) -{ - unsigned char * buf_p; - struct mbuf * m; - struct mbuf * mp; - int len; - - if (mace.tx_busy) { /* transmit in progress? */ - return; - } - - IF_DEQUEUE(&(ifp->if_snd), m); - if (m == 0) { - return; - } - - len = m->m_pkthdr.len; - - if (len > ETHERMAXPACKET) { - printf("mace_start: packet too big (%d), dropping\n", len); - m_freem(m); - return; - - } - buf_p = mace.tx_dma_area; - if (m->m_nextpkt) { - printf("mace: sending more than one mbuf\n"); - } - for (mp = m; mp; mp = mp->m_next) { - if (mp->m_len == 0) - continue; - bcopy(mtod(mp, caddr_t), buf_p, min(mp->m_len, len)); - len -= mp->m_len; - buf_p += mp->m_len; - } - m_freem(m); - -#if NBPFILTER > 0 - if (ifp->if_bpf) - BPF_TAP(ifp->if_bpf, mace.tx_dma_area, m->m_pkthdr.len); -#endif - -#if 0 - printf("tx packet %d\n", m->m_pkthdr.len); - printContiguousEtherPacket(mace.tx_dma_area, m->m_pkthdr.len); -#endif - - /* fill in the dbdma records and kick off the dma */ - tx_dbdma(mace.tx_dma_area, m->m_pkthdr.len); - mace.tx_busy = 1; - return; -} - -#ifdef MACE_DEBUG -static int -mace_watchdog() -{ - struct ifnet * ifp = &mace.en_arpcom.ac_if; - int s; - - mace.txwatchdog++; - s = splnet(); - if (mace.rxintr == 0) { - printf("rx is hung up\n"); - rx_intr(); - } - mace.rxintr = 0; -#if 0 - if (mace.txintr == 0 && ifp->if_snd.ifq_head) { - if (mace.tx_busy) - dbdma_stop(DBDMA_ETHERNET_TX); - mace.tx_busy = 0; - mace_service_queue(ifp); - } - mace.txintr = 0; -#endif - timeout(mace_watchdog, 0, 10*hz); /* just in case we drop an interrupt */ - return (0); -} -#endif /* MACE_DEBUG */ - -static int -mace_start(struct ifnet * ifp) -{ -// int i = mace.tx_busy; - -// printf("mace_start %s\n", mace.tx_busy ? "(txBusy)" : ""); - mace_service_queue(ifp); - -// if (mace.tx_busy && !i) -// printf("(txStarted)\n"); - return 0; -} - -int -mace_recv_pkt(funcptr pktfunc, void * p) -{ - vm_offset_t address; - struct mace_board * board; - long bytes; - int done = 0; - int doContinue = 0; - mace_t * m; - unsigned long resid; - unsigned short status; - int tail; - - m = &mace; - board = m->ereg; - - /* remember where the tail was */ - tail = m->rv_tail; - for (done = 0; (done == 0) && (m->rv_head != tail);) { - dbdma_command_t * dmaHead; - - dmaHead = &m->rv_dma[m->rv_head]; - resid = dbdma_ld4_endian(&dmaHead->d_status_resid); - status = (resid >> 16); - bytes = resid & 0xffff; - bytes = ETHERNET_BUF_SIZE - bytes - 8; /* strip off FCS/CRC */ - - if ((status & DBDMA_ETHERNET_EOP) == 0) { - /* no packets are ready yet */ - break; - } - doContinue = 1; - /* if the packet is good, pass it up */ - if (bytes >= (ETHER_MIN_PACKET - 4)) { - char * dmaPacket; - dmaPacket = &m->rv_dma_area[m->rv_head * ETHERNET_BUF_SIZE]; - done = (*pktfunc)(dmaPacket, bytes, p); - } - /* mark the head as the new tail in the dma channel command list */ - DBDMA_BUILD(dmaHead, DBDMA_CMD_STOP, 0, 0, 0, RECEIVE_INT, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - flush_cache_v((vm_offset_t)dmaHead, sizeof(*dmaHead)); - eieio(); - - /* make the tail an available dma'able entry */ - { - dbdma_command_t * dmaTail; - dmaTail = &m->rv_dma[m->rv_tail]; - address = KVTOPHYS((vm_offset_t) - &m->rv_dma_area[m->rv_tail*ETHERNET_BUF_SIZE]); - // this command is live so write it carefully - DBDMA_ST4_ENDIAN(&dmaTail->d_address, address); - dmaTail->d_status_resid = 0; - dmaTail->d_cmddep = 0; - eieio(); - DBDMA_ST4_ENDIAN(&dmaTail->d_cmd_count, - ((DBDMA_CMD_IN_LAST) << 28) | ((0) << 24) | - ((RECEIVE_INT) << 20) | - ((DBDMA_BRANCH_NEVER) << 18) | ((DBDMA_WAIT_NEVER) << 16) | - (ETHERNET_BUF_SIZE)); - eieio(); - flush_cache_v((vm_offset_t)dmaTail, sizeof(*dmaTail)); - } - /* head becomes the tail */ - m->rv_tail = m->rv_head; - - /* advance the head */ - m->rv_head++; - if (m->rv_head == (ETHER_RX_NUM_DBDMA_BUFS + 1)) - m->rv_head = 0; - } - if (doContinue) { - sync(); - dbdma_continue(DBDMA_ETHERNET_RV); - } - return (done); -} - -/* kdb handle buffer routines */ -struct kdbCopy { - int * len; - char * data; -}; - -static int -kdb_copy(char * pktBuf, int len, void * p) -{ - struct kdbCopy * cp = (struct kdbCopy *)p; - - bcopy(pktBuf, cp->data, len); - *cp->len = len; - return (1); /* signal that we're done */ -} - -/* kdb debugger routines */ -static void -polled_send_pkt(char * data, int len) -{ - waitForDBDMADone("mace: polled_send_pkt start"); - tx_dbdma(data, len); - waitForDBDMADone("mace: polled_send_pkt end"); - return; -} - -static void -polled_receive_pkt(char *data, int *len, int timeout_ms) -{ - struct kdbCopy cp; - - cp.len = len; - cp.data = data; - - timeout_ms *= 1000; - *len = 0; - while (mace_recv_pkt(kdb_copy, (void *)&cp) == 0) { - if (timeout_ms <= 0) - break; - delay(50); - timeout_ms -= 50; - } - return; -} - -/* Bump to force ethernet data to be 4-byte aligned - * (since the ethernet header is 14 bytes, and the 802.3 header is - * 22 = 14+8 bytes). This assumes that m_data is word-aligned - * (which it is). - */ -#define ETHER_DATA_ALIGN 2 - -/* - * Function: rxpkt - * - * Purpose: - * Called from within mace_recv_pkt to deal with a packet of data. - * rxpkt() allocates an mbuf(+cluser) and passes it up to the stacks. - * Returns: - * 0 if the packet was copied to an mbuf, 1 otherwise - */ -static int -rxpkt(char * data, int len, void * p) -{ - struct ether_header * eh_p = (struct ether_header *)data; - struct ifnet * ifp = &mace.en_arpcom.ac_if; - struct mbuf * m; - - int interesting; - - mace.rxintr++; - - /* mcast, bcast -- we're interested in either */ - interesting = eh_p->ether_dhost[0] & 1; - -#if NBPFILTER > 0 - /* - * Check if there's a bpf filter listening on this interface. - * If so, hand off the raw packet to bpf_tap(). - */ - if (ifp->if_bpf) { - BPF_TAP(ifp->if_bpf, data, len); - - /* - * Keep the packet if it's a broadcast or has our - * physical ethernet address (or if we support - * multicast and it's one). - */ - if ((interesting == 0) && bcmp(eh_p->ether_dhost, mace.macaddr, - sizeof(eh_p->ether_dhost)) != 0) { - return (1); - } - } -#endif - - /* - * We "know" a full-sized packet fits in one cluster. Set up the - * packet header, and if the length is sufficient, attempt to allocate - * a cluster. If that fails, fall back to the old way (m_devget()). - * Here, we take the simple approach of cluster vs. single mbuf. - */ - MGETHDR(m, M_DONTWAIT, MT_DATA); - if (m == 0) { -#ifdef MACE_DEBUG - printf("mget failed\n"); -#endif - return (1); - } - - if (len > (MHLEN - ETHER_DATA_ALIGN)) - { MCLGET(m, M_DONTWAIT); - if (m->m_flags&M_EXT) /* MCLGET succeeded */ - { m->m_data += ETHER_DATA_ALIGN; - bcopy(data, mtod(m, caddr_t), (unsigned)len); - } else - { -#ifdef MACE_DEBUG - printf("no clusters\n"); -#endif - m_free(m); - m = (struct mbuf *)m_devget(data, len, 0, ifp, 0); - if (m == 0) - return (1); - } - } else - { m->m_data += ETHER_DATA_ALIGN; - bcopy(data, mtod(m, caddr_t), (unsigned)len); - } - - /* - * Current code up the line assumes that the media header's been - * stripped, but we'd like to preserve it, just in case someone - * wants to peek. - */ - m->m_pkthdr.len = len; - m->m_len = len; - m->m_pkthdr.rcvif = ifp; - m->m_data += sizeof(*eh_p); - m->m_len -= sizeof (*eh_p); - m->m_pkthdr.len -= sizeof(*eh_p); - ether_input(ifp, eh_p, m); - - return (0); -} - - -static void -rx_intr() -{ - mace_recv_pkt(rxpkt, 0); -} - -void -mace_dbdma_rx_intr(int unit, void *ignored, void * arp) -{ - if (!mace.ready) - return; - - thread_call_func((thread_call_func_t)rx_intr, 0, TRUE); -} - - -int -mace_ioctl(struct ifnet * ifp,u_long cmd, caddr_t data) -{ - struct arpcom * ar; - unsigned error = 0; - struct ifaddr * ifa = (struct ifaddr *)data; - struct ifreq * ifr = (struct ifreq *)data; - struct sockaddr_in * sin; - - sin = (struct sockaddr_in *)(&((struct ifreq *)data)->ifr_addr); - ar = (struct arpcom *)ifp; - - switch (cmd) { - case SIOCAUTOADDR: - error = in_bootp(ifp, sin, &mace.en_arpcom.ac_enaddr); - break; - - case SIOCSIFADDR: -#if NeXT - ifp->if_flags |= (IFF_UP | IFF_RUNNING); -#else - ifp->if_flags |= IFF_UP; -#endif - switch (ifa->ifa_addr->sa_family) { - case AF_INET: - /* - * See if another station has *our* IP address. - * i.e.: There is an address conflict! If a - * conflict exists, a message is sent to the - * console. - */ - if (IA_SIN(ifa)->sin_addr.s_addr != 0) { /* don't bother for 0.0.0.0 */ - ar->ac_ipaddr = IA_SIN(ifa)->sin_addr; - arpwhohas(ar, &IA_SIN(ifa)->sin_addr); - } - break; - default: - break; - } - break; - - case SIOCSIFFLAGS: - /* - * If interface is marked down and it is running, then stop it - */ - if ((ifp->if_flags & IFF_UP) == 0 && - (ifp->if_flags & IFF_RUNNING) != 0) { - /* - * If interface is marked down and it is running, then - * stop it. - */ - ifp->if_flags &= ~IFF_RUNNING; - } else if ((ifp->if_flags & IFF_UP) != 0 && - (ifp->if_flags & IFF_RUNNING) == 0) { - /* - * If interface is marked up and it is stopped, then - * start it. - */ - ifp->if_flags |= IFF_RUNNING; - } - - /* - * If the state of the promiscuous bit changes, the - * interface must be reset to effect the change. - */ - if (((ifp->if_flags ^ mace.promisc) & IFF_PROMISC) && - (ifp->if_flags & IFF_RUNNING)) { - mace.promisc = ifp->if_flags & IFF_PROMISC; - mace_sync_promisc(ifp); - } - - break; - - case SIOCADDMULTI: - if ((error = ether_addmulti(ifr, ar)) == ENETRESET) - { if ((error = mace_addmulti(ifr, ar)) != 0) - { error = 0; - mace_sync_mcast(ifp); - } - } - break; - - case SIOCDELMULTI: - { - struct ether_addr enaddr[2]; /* [0] - addrlo, [1] - addrhi */ - - if ((error = ether_delmulti(ifr, ar, enaddr)) == ENETRESET) { - if ((error = mace_delmulti(ifr, ar, enaddr)) != 0) { - error = 0; - mace_sync_mcast(ifp); - } - } - } - break; - - default: - error = EINVAL; - break; - } - return (error); -} - -void -mace_init() -{ - struct ifnet * ifp = &mace.en_arpcom.ac_if; - - /* - * Only use in-kernel driver for early debugging (bootargs: kdp=1|3) - */ - if ( (kdp_flag & 1) == 0 ) - { - return; - } - - mace.tx_busy = 0; - mace.txintr = 0; - mace.promisc = 0; - - bzero((caddr_t)ifp, sizeof(struct ifnet)); - bcopy(&mace.macaddr, &mace.en_arpcom.ac_enaddr, NUM_EN_ADDR_BYTES); - - ifp->if_name = "en"; - ifp->if_unit = 0; - ifp->if_private = 0; - ifp->if_ioctl = mace_ioctl; - ifp->if_start = mace_start; - ifp->if_flags = - IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; -#if NBPFILTER > 0 - bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header)); -#endif - if_attach(ifp); - ether_ifattach(ifp); - - mace.rxintr = 0; - - /* wire in the interrupt routines */ - pmac_register_int(PMAC_DMA_ETHERNET_RX, SPLNET, - mace_dbdma_rx_intr, 0); - pmac_register_int(PMAC_DMA_ETHERNET_TX, SPLNET, - mace_dbdma_tx_intr, 0); - -// pmac_register_int(PMAC_DEV_ETHERNET, SPLNET, mace_pci_intr); - mace.ready = 1; -#ifdef MACE_DEBUG - timeout(mace_watchdog, 0, 10*hz); /* just in case we drop an interrupt */ -#endif - return; -} - -/* - * mace_pci_intr - * - * Service MACE interrupt - */ - -void -mace_pci_intr(int device, void *ssp) -{ - unsigned char ir, retry, frame, packet, length; - - ir = mace.ereg->ir; eieio(); /* Clear Interrupt */ - packet = mace.ereg->mpc; eieio(); - length = mace.ereg->rntpc; eieio(); - - printf("(txI)"); - - if (ir & IR_XMTINT) { - retry = mace.ereg->xmtrc; eieio(); /* Grab transmit retry count */ - frame = mace.ereg->xmtfs; eieio(); -// if (mace.ready) -// mace_dbdma_tx_intr(device, ssp); - } - return; -} - -static void -tx_intr() -{ - mace.txintr++; - mace.tx_busy = 0; - mace_service_queue(&mace.en_arpcom.ac_if); -} - -/* - * mace_dbdma_tx_intr - * - * DBDMA interrupt routine - */ -void -mace_dbdma_tx_intr(int unit, void *ignored, void * arg) -{ - if (!mace.ready) - return; - - thread_call_func((thread_call_func_t)tx_intr, 0, TRUE); - return; -} diff --git a/bsd/if/ppc/if_en.h b/bsd/if/ppc/if_en.h deleted file mode 100644 index 7654c2933..000000000 --- a/bsd/if/ppc/if_en.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * MacOSX Mace driver - * Defines and device state - * Dieter Siegmund (dieter@next.com) Thu Feb 27 18:25:33 PST 1997 - * - ripped off code from MK/LINUX - */ - -#define PG_SIZE 0x1000UL -#define PG_MASK (PG_SIZE - 1UL) - -#define ETHERMTU 1500 -#define ETHER_RX_NUM_DBDMA_BUFS 32 -#define ETHERNET_BUF_SIZE (ETHERMTU + 36) -#define ETHER_MIN_PACKET 64 -#define TX_NUM_DBDMA 6 - -#define DBDMA_ETHERNET_EOP 0x40 - -typedef struct mace_s { - struct arpcom en_arpcom; - struct mace_board * ereg; /* ethernet register set address */ - unsigned char macaddr[NUM_EN_ADDR_BYTES]; /* mac address */ - int chip_id; - dbdma_command_t *rv_dma; - dbdma_command_t *tx_dma; - unsigned char *rv_dma_area; - unsigned char *tx_dma_area; - unsigned char multi_mask[8]; /* Multicast mask */ - unsigned char multi_use[64]; /* Per-mask-bit use count */ - int rv_tail; - int rv_head; - int tx_busy; - int txintr; - int rxintr; - int txwatchdog; - int ready; - int promisc; /* IFF_PROMISC state */ -} mace_t; - diff --git a/bsd/if/ppc/mace.c b/bsd/if/ppc/mace.c deleted file mode 100644 index 40921aeff..000000000 --- a/bsd/if/ppc/mace.c +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * MACE Device-dependent code (some still lives in if_en.c): - * - * MACE Multicast Address scheme - - * Compute Enet CRC for each Mcast address; take high 6 bits of 32-bit - * crc, giving a "bit index" into a 64-bit register. On packet receipt, - * if corresponding bit is set, accept packet. - * We keep track of requests in a per-hash-value table (16-bit counters - * should be sufficient). Since we're hashing, we only care about the - * hash value of each address. - * - * Apple Confidential - * - * (C) COPYRIGHT Apple Computer, Inc., 1994-1997 - * All Rights Reserved - * - * Justin C. Walker - */ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "if_en.h" -#include "mace.h" - -extern mace_t mace; - -#define ENET_CRCPOLY 0x04c11db7 - -/* Real fast bit-reversal algorithm, 6-bit values */ -int reverse6[] = -{ 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, - 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, - 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, - 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, - 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, - 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, - 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, - 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f -}; - -unsigned int crc416(current, nxtval) -register unsigned int current; -register unsigned short nxtval; -{ register unsigned int counter; - register int highCRCBitSet, lowDataBitSet; - - /* Swap bytes */ - nxtval = ((nxtval & 0x00FF) << 8) | (nxtval >> 8); - - /* Compute bit-by-bit */ - for (counter = 0; counter != 16; ++counter) - { /* is high CRC bit set? */ - if ((current & 0x80000000) == NULL) - highCRCBitSet = 0; - else - highCRCBitSet = 1; - - current = current << 1; - - if ((nxtval & 0x0001) == NULL) - lowDataBitSet = 0; - else - lowDataBitSet = 1; - - nxtval = nxtval >> 1; - - /* do the XOR */ - if (highCRCBitSet ^ lowDataBitSet) - current = current ^ ENET_CRCPOLY; - } - return current; -} - -unsigned int mace_crc(unsigned short *address) -{ register unsigned int newcrc; - - newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ - newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ - newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ - - return(newcrc); -} - -/* - * Add requested mcast addr to Mace's filter. Assume that the first - * address in the arpcom ac_multiaddrs list is the one we're interested in. - */ -int -mace_addmulti(register struct ifreq *ifr, register struct arpcom *ar) -{ register unsigned char *addr; - unsigned int crc; - unsigned char mask; - - addr = ar->ac_multiaddrs->enm_addrlo; - - crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ - crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ - if (mace.multi_use[crc]++) - return(0); /* This bit is already set */ - mask = crc % 8; - mask = (unsigned char)1 << mask; - mace.multi_mask[crc/8] |= mask; - return(1); -} - -int -mace_delmulti(register struct ifreq *ifr, register struct arpcom *ar, - struct ether_addr * enaddr) -{ register unsigned char *addr; - unsigned int crc; - unsigned char mask; - - addr = (char *)enaddr; /* XXX assumes addrlo == addrhi */ - - /* Now, delete the address from the filter copy, as indicated */ - crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ - crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ - if (mace.multi_use[crc] == 0) - return(EINVAL); /* That bit wasn't in use! */ - - if (--mace.multi_use[crc]) - return(0); /* That bit is still in use */ - - mask = crc % 8; - mask = ((unsigned char)1 << mask) ^ 0xff; /* To turn off bit */ - mace.multi_mask[crc/8] &= mask; - return(1); -} - -/* - * Sync the adapter with the software copy of the multicast mask - * (logical address filter). - * If we want all m-cast addresses, we just blast 1's into the filter. - * When we reverse this, we can use the current state of the (software) - * filter, which should have been kept up to date. - */ -void -mace_sync_mcast(register struct ifnet * ifp) -{ register unsigned long temp, temp1; - register int i; - register char *p; - register struct mace_board *ereg = mace.ereg; - - temp = ereg->maccc; - - /* - * Have to deal with early rev of chip for updating LAF - * Don't know if any MacOSX systems still run this rev. - */ - if (mace.chip_id == MACERevA2) - { /* First, turn off receiver */ - temp1 = temp&~MACCC_ENRCV; - ereg->maccc = temp1; - eieio(); - - /* Then, check FIFO - frame being received will complete */ - temp1 = ereg->fifofc; - - mace.ereg->iac = IAC_LOGADDR; - eieio(); - } else - { ereg->iac = IAC_ADDRCHG|IAC_LOGADDR; - eieio(); - - while (temp1 = ereg->iac) - { eieio(); - if ((temp1&IAC_ADDRCHG) == 0) - break; - } - } - - if (ifp->if_flags & IFF_ALLMULTI) /* Then want ALL m-cast pkts */ - { /* set mask to all 1's */ - for (i=0;i<8;i++) - { ereg->ladrf = 0xff; - eieio(); - } - } else - { - /* Assuming everything is big-endian */ - for (i=0, p = &mace.multi_mask[0];i<8;i++) - { ereg->ladrf = *p++; - eieio(); - } - } - - ereg->maccc = temp; /* Reset config ctrlr */ - eieio(); - -} - -void -mace_sync_promisc(register struct ifnet *ifp) -{ - register u_long o_maccc, n_maccc; - register struct mace_board *ereg = mace.ereg; - - /* - * Save current state and disable receive. - */ - o_maccc = ereg->maccc; - n_maccc = o_maccc & ~MACCC_ENRCV; - ereg->maccc = n_maccc; - eieio(); - - /* - * Calculate new desired state - */ - if (ifp->if_flags & IFF_PROMISC) { - /* set PROMISC bit */ - o_maccc |= MACCC_PROM; - } else { - /* clear PROMISC bit */ - o_maccc &= ~MACCC_PROM; - } - - /* - * Note that the "old" mode includes the new promiscuous state now. - */ - ereg->maccc = o_maccc; - eieio(); -} diff --git a/bsd/if/ppc/mace.h b/bsd/if/ppc/mace.h deleted file mode 100644 index bb82bb51e..000000000 --- a/bsd/if/ppc/mace.h +++ /dev/null @@ -1,371 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ -/* - * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -/* - * MKLINUX-1.0DR2 - */ -/* - * PMach Operating System - * Copyright (c) 1995 Santa Clara University - * All Rights Reserved. - */ -/* - * Mach Operating System - * Copyright (c) 1991,1990,1989 Carnegie Mellon University - * All Rights Reserved. - * - * Permission to use, copy, modify and distribute this software and its - * documentation is hereby granted, provided that both the copyright - * notice and this permission notice appear in all copies of the - * software, derivative works or modified versions, and any portions - * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" - * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR - * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * - * Carnegie Mellon requests users of this software to return to - * - * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU - * School of Computer Science - * Carnegie Mellon University - * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon - * the rights to redistribute these changes. - */ -/* - * File: if_3c501.h - * Author: Philippe Bernadat - * Date: 1989 - * Copyright (c) 1989 OSF Research Institute - * - * 3COM Etherlink 3C501 Mach Ethernet drvier - */ -/* - Copyright 1990 by Open Software Foundation, -Cambridge, MA. - - All Rights Reserved - - Permission to use, copy, modify, and distribute this software and -its documentation for any purpose and without fee is hereby granted, -provided that the above copyright notice appears in all copies and -that both the copyright notice and this permission notice appear in -supporting documentation, and that the name of OSF or Open Software -Foundation not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior -permission. - - OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE -INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, -IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, -NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION -WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -#ifdef KERNEL -#include -#endif - - -#define ENETPAD(n) char n[15] - -/* 0x50f0a000 */ -struct mace_board { - volatile unsigned char rcvfifo; /* 00 receive fifo */ - ENETPAD(epad0); - volatile unsigned char xmtfifo; /* 01 transmit fifo */ - ENETPAD(epad1); - volatile unsigned char xmtfc; /* 02 transmit frame control */ - ENETPAD(epad2); - volatile unsigned char xmtfs; /* 03 transmit frame status */ - ENETPAD(epad3); - volatile unsigned char xmtrc; /* 04 transmit retry count */ - ENETPAD(epad4); - volatile unsigned char rcvfc; /* 05 receive frame control -- 4 bytes */ - ENETPAD(epad5); - volatile unsigned char rcvfs; /* 06 receive frame status */ - ENETPAD(epad6); - volatile unsigned char fifofc; /* 07 fifo frame count */ - ENETPAD(epad7); - volatile unsigned char ir; /* 08 interrupt */ - ENETPAD(epad8); - volatile unsigned char imr; /* 09 interrupt mask */ - ENETPAD(epad9); - volatile unsigned char pr; /* 10 poll */ - ENETPAD(epad10); - volatile unsigned char biucc; /* 11 bus interface unit configuration control */ - ENETPAD(epad11); - volatile unsigned char fifocc; /* 12 fifo configuration control */ - ENETPAD(epad12); - volatile unsigned char maccc; /* 13 media access control configuration control */ - ENETPAD(epad13); - volatile unsigned char plscc; /* 14 physical layer signalling configuration control */ - ENETPAD(epad14); - volatile unsigned char phycc; /* 15 physical layer configuration control */ - ENETPAD(epad15); - volatile unsigned char chipid1; /* 16 chip identification LSB */ - ENETPAD(epad16); - volatile unsigned char chipid2; /* 17 chip identification MSB */ - ENETPAD(epad17); - volatile unsigned char iac; /* 18 internal address configuration */ - ENETPAD(epad18); - volatile unsigned char res1; /* 19 */ - ENETPAD(epad19); - volatile unsigned char ladrf; /* 20 logical address filter -- 8 bytes */ - ENETPAD(epad20); - volatile unsigned char padr; /* 21 physical address -- 6 bytes */ - ENETPAD(epad21); - volatile unsigned char res2; /* 22 */ - ENETPAD(epad22); - volatile unsigned char res3; /* 23 */ - ENETPAD(epad23); - volatile unsigned char mpc; /* 24 missed packet count */ - ENETPAD(epad24); - volatile unsigned char res4; /* 25 */ - ENETPAD(epad25); - volatile unsigned char rntpc; /* 26 runt packet count */ - ENETPAD(epad26); - volatile unsigned char rcvcc; /* 27 receive collision count */ - ENETPAD(epad27); - volatile unsigned char res5; /* 28 */ - ENETPAD(epad28); - volatile unsigned char utr; /* 29 user test */ - ENETPAD(epad29); - volatile unsigned char res6; /* 30 */ - ENETPAD(epad30); - volatile unsigned char res7; /* 31 */ - }; - -/* - * Chip Revisions.. - */ - -#define MACE_REVISION_B0 0x0940 -#define MACE_REVISION_A2 0x0941 - -/* xmtfc */ -#define XMTFC_DRTRY 0X80 -#define XMTFC_DXMTFCS 0x08 -#define XMTFC_APADXNT 0x01 - -/* xmtfs */ -#define XMTFS_XNTSV 0x80 -#define XMTFS_XMTFS 0x40 -#define XMTFS_LCOL 0x20 -#define XMTFS_MORE 0x10 -#define XMTFS_ONE 0x08 -#define XMTFS_DEFER 0x04 -#define XMTFS_LCAR 0x02 -#define XMTFS_RTRY 0x01 - -/* xmtrc */ -#define XMTRC_EXDEF 0x80 - -/* rcvfc */ -#define RCVFC_LLRCV 0x08 -#define RCVFC_M_R 0x04 -#define RCVFC_ASTRPRCV 0x01 - -/* rcvfs */ -#define RCVFS_OFLO 0x80 -#define RCVFS_CLSN 0x40 -#define RCVFS_FRAM 0x20 -#define RCVFS_FCS 0x10 -#define RCVFS_REVCNT 0x0f - -/* fifofc */ -#define FIFOCC_XFW_8 0x00 -#define FIFOCC_XFW_16 0x40 -#define FIFOCC_XFW_32 0x80 -#define FIFOCC_XFW_XX 0xc0 -#define FIFOCC_RFW_16 0x00 -#define FIFOCC_RFW_32 0x10 -#define FIFOCC_RFW_64 0x20 -#define FIFOCC_RFW_XX 0x30 -#define FIFOCC_XFWU 0x08 -#define FIFOCC_RFWU 0x04 -#define FIFOCC_XBRST 0x02 -#define FIFOCC_RBRST 0x01 - - -/* ir */ -#define IR_JAB 0x80 -#define IR_BABL 0x40 -#define IR_CERR 0x20 -#define IR_RCVCCO 0x10 -#define IR_RNTPCO 0x08 -#define IR_MPCO 0x04 -#define IR_RCVINT 0x02 -#define IR_XMTINT 0x01 - -/* imr */ -#define IMR_MJAB 0x80 -#define IMR_MBABL 0x40 -#define IMR_MCERR 0x20 -#define IMR_MRCVCCO 0x10 -#define IMR_MRNTPCO 0x08 -#define IMR_MMPCO 0x04 -#define IMR_MRCVINT 0x02 -#define IMR_MXMTINT 0x01 - -/* pr */ -#define PR_XMTSV 0x80 -#define PR_TDTREQ 0x40 -#define PR_RDTREQ 0x20 - -/* biucc */ -#define BIUCC_BSWP 0x40 -#define BIUCC_XMTSP04 0x00 -#define BIUCC_XMTSP16 0x10 -#define BIUCC_XMTSP64 0x20 -#define BIUCC_XMTSP112 0x30 -#define BIUCC_SWRST 0x01 - -/* fifocc */ -#define FIFOCC_XMTFW08W 0x00 -#define FIFOCC_XMTFW16W 0x40 -#define FIFOCC_XMTFW32W 0x80 - -#define FIFOCC_RCVFW16 0x00 -#define FIFOCC_RCVFW32 0x10 -#define FIFOCC_RCVFW64 0x20 - -#define FIFOCC_XMTFWU 0x08 -#define FIFOCC_RCVFWU 0x04 -#define FIFOCC_XMTBRST 0x02 -#define FIFOCC_RCVBRST 0x01 - -/* maccc */ -#define MACCC_PROM 0x80 -#define MACCC_DXMT2PD 0x40 -#define MACCC_EMBA 0x20 -#define MACCC_DRCVPA 0x08 -#define MACCC_DRCVBC 0x04 -#define MACCC_ENXMT 0x02 -#define MACCC_ENRCV 0x01 - -/* plscc */ -#define PLSCC_XMTSEL 0x08 -#define PLSCC_AUI 0x00 -#define PLSCC_TENBASE 0x02 -#define PLSCC_DAI 0x04 -#define PLSCC_GPSI 0x06 -#define PLSCC_ENPLSIO 0x01 - -/* phycc */ -#define PHYCC_LNKFL 0x80 -#define PHYCC_DLNKTST 0x40 -#define PHYCC_REVPOL 0x20 -#define PHYCC_DAPC 0x10 -#define PHYCC_LRT 0x08 -#define PHYCC_ASEL 0x04 -#define PHYCC_RWAKE 0x02 -#define PHYCC_AWAKE 0x01 - -/* iac */ -#define IAC_ADDRCHG 0x80 -#define IAC_PHYADDR 0x04 -#define IAC_LOGADDR 0x02 - -/* utr */ -#define UTR_RTRE 0x80 -#define UTR_RTRD 0x40 -#define UTR_RPA 0x20 -#define UTR_FCOLL 0x10 -#define UTR_RCVFCSE 0x08 - -#define UTR_NOLOOP 0x00 -#define UTR_EXTLOOP 0x02 -#define UTR_INLOOP 0x04 -#define UTR_INLOOP_M 0x06 - -#define ENET_PHYADDR_LEN 6 -#define ENET_HEADER 14 - -#define BFRSIZ 2048 -#define ETHER_ADD_SIZE 6 /* size of a MAC address */ -#define DSF_LOCK 1 -#define DSF_RUNNING 2 -#define MOD_ENAL 1 -#define MOD_PROM 2 - -/* - * MACE Chip revision codes - */ -#define MACERevA2 0x0941 -#define MACERevB0 0x0940 - -#ifdef KERNEL -int mace_delmulti __P((register struct ifreq *, register struct arpcom *, - struct ether_addr *)); -int mace_addmulti __P((register struct ifreq *, register struct arpcom *)); -void mace_sync_mcast __P((register struct ifnet *)); -void mace_sync_promisc __P((register struct ifnet *)); -#endif /* KERNEL */ - diff --git a/bsd/isofs/cd9660/cd9660_bmap.c b/bsd/isofs/cd9660/cd9660_bmap.c index b0d7d4601..e07162197 100644 --- a/bsd/isofs/cd9660/cd9660_bmap.c +++ b/bsd/isofs/cd9660/cd9660_bmap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -102,6 +102,17 @@ cd9660_bmap(ap) if (ap->a_bnp == NULL) return (0); + /* + * Associated files have an Apple Double header + */ + if ((ip->i_flag & ISO_ASSOCIATED) && (lblkno > (ADH_BLKS - 1))) { + lblkno -= ADH_BLKS; + *ap->a_bnp = (ip->iso_start + lblkno); + if (ap->a_runp) + *ap->a_runp = 0; + return (0); + } + /* * Compute the requested block number */ @@ -137,7 +148,7 @@ cd9660_blktooff(ap) } */ *ap; { register struct iso_node *ip; - register struct iso_mnt *imp; + register struct iso_mnt *imp; if (ap->a_vp == NULL) return (EINVAL); @@ -185,6 +196,7 @@ struct vop_cmap_args /* { struct iso_node *ip = VTOI(ap->a_vp); size_t cbytes; int devBlockSize = 0; + off_t offset = ap->a_foffset; /* * Check for underlying vnode requests and ensure that logical @@ -195,15 +207,29 @@ struct vop_cmap_args /* { VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); - *ap->a_bpn = (daddr_t)(ip->iso_start + lblkno(ip->i_mnt, ap->a_foffset)); + /* + * Associated files have an Apple Double header + */ + if (ip->i_flag & ISO_ASSOCIATED) { + if (offset < ADH_SIZE) { + if (ap->a_run) + *ap->a_run = 0; + *ap->a_bpn = -1; + goto out; + } else { + offset -= ADH_SIZE; + } + } + + *ap->a_bpn = (daddr_t)(ip->iso_start + lblkno(ip->i_mnt, offset)); /* * Determine maximum number of contiguous bytes following the * requested offset. */ if (ap->a_run) { - if (ip->i_size > ap->a_foffset) - cbytes = ip->i_size - ap->a_foffset; + if (ip->i_size > offset) + cbytes = ip->i_size - offset; else cbytes = 0; @@ -211,9 +237,9 @@ struct vop_cmap_args /* { *ap->a_run = MIN(cbytes, ap->a_size); }; - +out: if (ap->a_poff) - *(int *)ap->a_poff = (long)ap->a_foffset & (devBlockSize - 1); + *(int *)ap->a_poff = (long)offset & (devBlockSize - 1); return (0); } diff --git a/bsd/isofs/cd9660/cd9660_lookup.c b/bsd/isofs/cd9660/cd9660_lookup.c index ba3050e95..ae81d2216 100644 --- a/bsd/isofs/cd9660/cd9660_lookup.c +++ b/bsd/isofs/cd9660/cd9660_lookup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -64,12 +64,7 @@ * from: @(#)ufs_lookup.c 7.33 (Berkeley) 5/19/91 * * @(#)cd9660_lookup.c 8.5 (Berkeley) 12/5/94 - - - - * HISTORY - * 22-Jan-98 radar 1669467 - ISO 9660 CD support - jwc - + * */ #include @@ -79,7 +74,6 @@ #include #include #include -#include #include #include @@ -137,7 +131,7 @@ cd9660_lookup(ap) struct buf *bp; /* a buffer of directory entries */ struct iso_directory_record *ep = NULL;/* the current directory entry */ int entryoffsetinblock; /* offset of ep in bp's buffer */ - int saveoffset = 0; /* offset of last directory entry in dir */ + int saveoffset = 0; /* offset of last directory entry in dir */ int numdirpasses; /* strategy for directory search */ doff_t endsearch; /* offset to end directory search */ struct vnode *pdp; /* saved dp during symlink work */ @@ -145,23 +139,22 @@ cd9660_lookup(ap) u_long bmask; /* block offset mask */ int lockparent; /* 1 => lockparent flag is set */ int wantparent; /* 1 => wantparent or lockparent flag */ - int wantrsrc; /* 1 => looking for resource fork */ + int wantassoc; int error; ino_t ino = 0; int reclen; u_short namelen; + int isoflags; char altname[ISO_RRIP_NAMEMAX]; int res; int len; char *name; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; - struct ucred *cred = cnp->cn_cred; int flags = cnp->cn_flags; int nameiop = cnp->cn_nameiop; struct proc *p = cnp->cn_proc; int devBlockSize=0; - long rsrcsize; size_t altlen; bp = NULL; @@ -171,45 +164,29 @@ cd9660_lookup(ap) imp = dp->i_mnt; lockparent = flags & LOCKPARENT; wantparent = flags & (LOCKPARENT|WANTPARENT); - wantrsrc = 0; + wantassoc = 0; + /* * Check accessiblity of directory. */ if (vdp->v_type != VDIR) return (ENOTDIR); - if ( (error = VOP_ACCESS(vdp, VEXEC, cred, p)) ) + if ( (error = VOP_ACCESS(vdp, VEXEC, cnp->cn_cred, p)) ) return (error); - /* - * Determine if we're looking for a resource fork - * note: this could cause a read off the end of the - * component name buffer in some rare cases. - */ - if ((flags & ISLASTCN) == 0 && - bcmp(&cnp->cn_nameptr[cnp->cn_namelen], - _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC) - 1) == 0) { - flags |= ISLASTCN; - cnp->cn_consume = sizeof(_PATH_RSRCFORKSPEC) - 1; - wantrsrc = 1; - } /* * We now have a segment name to search for, and a directory to search. * * Before tediously performing a linear scan of the directory, * check the name cache to see if the directory/name pair * we are looking for is known already. - * Note: resource forks are never in the name cache */ - if ((error = cache_lookup(vdp, vpp, cnp)) && !wantrsrc) { + if ((error = cache_lookup(vdp, vpp, cnp))) { int vpid; /* capability number of vnode */ if (error == ENOENT) return (error); -#ifdef PARANOID - if ((vdp->v_flag & VROOT) && (flags & ISDOTDOT)) - panic("cd9660_lookup: .. through root"); -#endif /* * Get the next vnode in the path. * See comment below starting `Step through' for @@ -253,8 +230,15 @@ cd9660_lookup(ap) len = cnp->cn_namelen; name = cnp->cn_nameptr; altname[0] = '\0'; - rsrcsize = 0; - + /* + * A "._" prefix means, we are looking for an associated file + */ + if (imp->iso_ftype != ISO_FTYPE_RRIP && + *name == ASSOCCHAR1 && *(name+1) == ASSOCCHAR2) { + wantassoc = 1; + len -= 2; + name += 2; + } /* * Decode search name into UCS-2 (Unicode) */ @@ -281,7 +265,7 @@ cd9660_lookup(ap) * profiling time and hence has been removed in the interest * of simplicity. */ - bmask = imp->im_bmask; + bmask = imp->im_sector_size - 1; if (nameiop != LOOKUP || dp->i_diroff == 0 || dp->i_diroff > dp->i_size) { entryoffsetinblock = 0; @@ -291,7 +275,7 @@ cd9660_lookup(ap) dp->i_offset = dp->i_diroff; if ((entryoffsetinblock = dp->i_offset & bmask) && - (error = VOP_BLKATOFF(vdp, (off_t)dp->i_offset, NULL, &bp))) + (error = VOP_BLKATOFF(vdp, SECTOFF(imp, dp->i_offset), NULL, &bp))) return (error); numdirpasses = 2; iso_nchstats.ncs_2passes++; @@ -308,7 +292,7 @@ searchloop: if ((dp->i_offset & bmask) == 0) { if (bp != NULL) brelse(bp); - if ( (error = VOP_BLKATOFF(vdp, (off_t)dp->i_offset, NULL, &bp)) ) + if ( (error = VOP_BLKATOFF(vdp, SECTOFF(imp,dp->i_offset), NULL, &bp)) ) return (error); entryoffsetinblock = 0; } @@ -322,38 +306,29 @@ searchloop: if (reclen == 0) { /* skip to next block, if any */ dp->i_offset = - (dp->i_offset & ~bmask) + imp->logical_block_size; + (dp->i_offset & ~bmask) + imp->im_sector_size; continue; } - if (reclen < ISO_DIRECTORY_RECORD_SIZE) + if (reclen < ISO_DIRECTORY_RECORD_SIZE) { /* illegal entry, stop */ break; - - if (entryoffsetinblock + reclen > imp->logical_block_size) - /* entries are not allowed to cross boundaries */ + } + if (entryoffsetinblock + reclen > imp->im_sector_size) { + /* entries are not allowed to cross sector boundaries */ break; - + } namelen = isonum_711(ep->name_len); + isoflags = isonum_711(ep->flags); if (reclen < ISO_DIRECTORY_RECORD_SIZE + namelen) /* illegal entry, stop */ break; - - /* remember the size of resource forks (associated files) */ - if ((isonum_711(ep->flags) & (directoryBit | associatedBit)) == associatedBit) { - if (namelen < sizeof(altname) && ino == 0) { - rsrcsize = isonum_733(ep->size); - bcopy(ep->name, altname, namelen); - altname[namelen] = '\0'; - altlen = namelen; - } - } /* * Check for a name match. */ if (imp->iso_ftype == ISO_FTYPE_RRIP) { - if ( isonum_711(ep->flags) & directoryBit ) + if (isoflags & directoryBit) ino = isodirino(ep, imp); else ino = (bp->b_blkno << imp->im_bshift) + entryoffsetinblock; @@ -364,7 +339,7 @@ searchloop: goto found; ino = 0; } else { - if ((!(isonum_711(ep->flags) & associatedBit)) == !wantrsrc) { + if ((!(isoflags & associatedBit)) == !wantassoc) { if ((len == 1 && *name == '.') || (flags & ISDOTDOT)) { @@ -382,14 +357,14 @@ searchloop: goto notfound; } else if (imp->iso_ftype != ISO_FTYPE_JOLIET && !(res = isofncmp(name,len, ep->name,namelen))) { - if ( isonum_711(ep->flags) & directoryBit ) + if ( isoflags & directoryBit ) ino = isodirino(ep, imp); else ino = (bp->b_blkno << imp->im_bshift) + entryoffsetinblock; saveoffset = dp->i_offset; } else if (imp->iso_ftype == ISO_FTYPE_JOLIET && !(res = ucsfncmp((u_int16_t*)name, len, (u_int16_t*) ep->name, namelen))) { - if ( isonum_711(ep->flags) & directoryBit ) + if ( isoflags & directoryBit ) ino = isodirino(ep, imp); else ino = (bp->b_blkno << imp->im_bshift) + entryoffsetinblock; @@ -416,7 +391,7 @@ foundino: lblkno(imp, saveoffset)) { if (bp != NULL) brelse(bp); - if ( (error = VOP_BLKATOFF(vdp, (off_t)saveoffset, NULL, &bp)) ) + if ( (error = VOP_BLKATOFF(vdp, SECTOFF(imp, saveoffset), NULL, &bp)) ) return (error); } entryoffsetinblock = saveoffset & bmask; @@ -443,7 +418,7 @@ notfound: /* * Insert name into cache (as non-existent) if appropriate. */ - if ((cnp->cn_flags & MAKEENTRY) && !wantrsrc) + if (cnp->cn_flags & MAKEENTRY) cache_enter(vdp, *vpp, cnp); if (nameiop == CREATE || nameiop == RENAME) { /* @@ -452,11 +427,7 @@ notfound: */ return (EROFS); } - - if (wantrsrc) - return (ENOTDIR); - else - return (ENOENT); + return (ENOENT); found: if (numdirpasses == 2) @@ -519,10 +490,6 @@ found: dp->i_ino != ino, ep, p); /* save parent inode number */ VTOI(tdp)->i_parent = VTOI(pdp)->i_number; - if (!wantrsrc && (tdp->v_type == VREG) && (rsrcsize > 0)) { - if (bcmp(ep->name, altname, altlen) == 0) - VTOI(tdp)->i_rsrcsize = rsrcsize; - } brelse(bp); if (error) return (error); @@ -534,7 +501,7 @@ found: /* * Insert name into cache if appropriate. */ - if ((cnp->cn_flags & MAKEENTRY) && !wantrsrc) + if (cnp->cn_flags & MAKEENTRY) cache_enter(vdp, *vpp, cnp); return (0); @@ -565,7 +532,11 @@ cd9660_blkatoff(ap) imp = ip->i_mnt; lbn = lblkno(imp, ap->a_offset); bsize = blksize(imp, ip, lbn); - + if ((bsize != imp->im_sector_size) && + (ap->a_offset & (imp->im_sector_size - 1)) == 0) { + bsize = imp->im_sector_size; + } + if ( (error = bread(ap->a_vp, lbn, bsize, NOCRED, &bp)) ) { brelse(bp); *ap->a_bpp = NULL; diff --git a/bsd/isofs/cd9660/cd9660_mount.h b/bsd/isofs/cd9660/cd9660_mount.h index 8be4d3706..bf848a2ba 100644 --- a/bsd/isofs/cd9660/cd9660_mount.h +++ b/bsd/isofs/cd9660/cd9660_mount.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -71,16 +71,20 @@ /* * Arguments to mount ISO 9660 filesystems. */ +struct CDTOC; struct iso_args { char *fspec; /* block special device to mount */ struct export_args export; /* network export info */ int flags; /* mounting flags, see below */ int ssector; /* starting sector, 0 for 1st session */ + int toc_length; /* Size of *toc, including the toc.length field */ + struct CDTOC *toc; }; #define ISOFSMNT_NORRIP 0x00000001 /* disable Rock Ridge Ext.*/ #define ISOFSMNT_GENS 0x00000002 /* enable generation numbers */ #define ISOFSMNT_EXTATT 0x00000004 /* enable extended attributes */ #define ISOFSMNT_NOJOLIET 0x00000008 /* disable Joliet Ext.*/ +#define ISOFSMNT_TOC 0x00000010 /* iso_args.toc is valid */ #endif /* __APPLE_API_UNSTABLE */ #endif /* __ISOFS_CD9660_CD9660_MOUNT_H__ */ diff --git a/bsd/isofs/cd9660/cd9660_node.c b/bsd/isofs/cd9660/cd9660_node.c index b9b0cdcce..956074d1d 100644 --- a/bsd/isofs/cd9660/cd9660_node.c +++ b/bsd/isofs/cd9660/cd9660_node.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -82,6 +82,7 @@ #include #include #include +#include #include #include @@ -101,11 +102,9 @@ u_long idvhash; #define DNOHASH(device, inum) (((device) + ((inum)>>12)) & idvhash) #endif -/* defined in bsd/ufs/ufs/ufs_inode.c */ +/* defined in bsd/vfs/vfs_subr.c */ extern int prtactive; /* 1 => print out reclaim of active vnodes */ -extern void cache_purge (struct vnode *vp); - extern u_char isonullname[]; /* * Initialize hash links for inodes and dnodes. @@ -315,6 +314,8 @@ cd9660_reclaim(ap) } if (ip->i_namep != isonullname) FREE(ip->i_namep, M_TEMP); + if (ip->i_riff != NULL) + FREE(ip->i_riff, M_TEMP); FREE_ZONE(vp->v_data, sizeof(struct iso_node), M_ISOFSNODE); vp->v_data = NULL; return (0); diff --git a/bsd/isofs/cd9660/cd9660_node.h b/bsd/isofs/cd9660/cd9660_node.h index 57318b873..76d9f44fd 100644 --- a/bsd/isofs/cd9660/cd9660_node.h +++ b/bsd/isofs/cd9660/cd9660_node.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -114,6 +114,7 @@ struct iso_node { struct iso_node *i_next, **i_prev; /* hash chain */ struct vnode *i_vnode; /* vnode associated with this inode */ struct vnode *i_devvp; /* vnode for block I/O */ + u_int32_t i_flag; /* flags, see below */ dev_t i_dev; /* device where inode resides */ ino_t i_number; /* the identity of the inode */ /* we use the actual starting block of the file */ @@ -140,11 +141,15 @@ struct iso_node { u_int16_t i_FinderFlags; /* MacOS finder flags */ u_int16_t i_entries; /* count of directory entries */ + + struct riff_header *i_riff; }; #define i_forw i_chain[0] #define i_back i_chain[1] +/* These flags are kept in i_flag. */ +#define ISO_ASSOCIATED 0x0001 /* node is an associated file. */ /* defines VTOI and ITOV macros */ #undef VTOI @@ -162,13 +167,13 @@ int cd9660_close __P((struct vop_close_args *)); int cd9660_access __P((struct vop_access_args *)); int cd9660_getattr __P((struct vop_getattr_args *)); int cd9660_read __P((struct vop_read_args *)); +int cd9660_xa_read __P((struct vop_read_args *)); int cd9660_ioctl __P((struct vop_ioctl_args *)); int cd9660_select __P((struct vop_select_args *)); int cd9660_mmap __P((struct vop_mmap_args *)); int cd9660_seek __P((struct vop_seek_args *)); int cd9660_readdir __P((struct vop_readdir_args *)); int cd9660_readlink __P((struct vop_readlink_args *)); -int cd9660_abortop __P((struct vop_abortop_args *)); int cd9660_inactive __P((struct vop_inactive_args *)); int cd9660_reclaim __P((struct vop_reclaim_args *)); int cd9660_bmap __P((struct vop_bmap_args *)); diff --git a/bsd/isofs/cd9660/cd9660_rrip.c b/bsd/isofs/cd9660/cd9660_rrip.c index 25b2b6979..68855255a 100644 --- a/bsd/isofs/cd9660/cd9660_rrip.c +++ b/bsd/isofs/cd9660/cd9660_rrip.c @@ -300,7 +300,8 @@ cd9660_rrip_defname(isodir,ana) switch (*isodir->name) { default: isofntrans(isodir->name, isonum_711(isodir->name_len), - ana->outbuf, ana->outlen, 1); + ana->outbuf, ana->outlen, 1, + isonum_711(isodir->flags) & associatedBit); break; case 0: *ana->outlen = 1; diff --git a/bsd/isofs/cd9660/cd9660_util.c b/bsd/isofs/cd9660/cd9660_util.c index dc724462a..db3f5adfd 100644 --- a/bsd/isofs/cd9660/cd9660_util.c +++ b/bsd/isofs/cd9660/cd9660_util.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -219,14 +219,24 @@ ucsfncmp(fn, fnlen, ucsfn, ucslen) * translate a filename */ void -isofntrans(infn, infnlen, outfn, outfnlen, original) +isofntrans(infn, infnlen, outfn, outfnlen, original, assoc) u_char *infn, *outfn; int infnlen; u_short *outfnlen; int original; + int assoc; { int fnidx = 0; + /* + * Add a "._" prefix for associated files + */ + if (assoc) { + *outfn++ = ASSOCCHAR1; + *outfn++ = ASSOCCHAR2; + fnidx += 2; + infnlen +=2; + } for (; fnidx < infnlen; fnidx++) { char c = *infn++; @@ -259,12 +269,13 @@ isofntrans(infn, infnlen, outfn, outfnlen, original) * translate a UCS-2 filename to UTF-8 */ void -ucsfntrans(infn, infnlen, outfn, outfnlen, dir) +ucsfntrans(infn, infnlen, outfn, outfnlen, dir, assoc) u_int16_t *infn; int infnlen; u_char *outfn; u_short *outfnlen; int dir; + int assoc; { if (infnlen == 1) { strcpy(outfn, ".."); @@ -281,6 +292,13 @@ ucsfntrans(infn, infnlen, outfn, outfnlen, dir) fnidx = infnlen/2; flags = 0; + /* + * Add a "._" prefix for associated files + */ + if (assoc) { + *outfn++ = ASSOCCHAR1; + *outfn++ = ASSOCCHAR2; + } if (!dir) { /* strip file version number */ for (fnidx--; fnidx > 0; fnidx--) { @@ -301,7 +319,7 @@ ucsfntrans(infn, infnlen, outfn, outfnlen, dir) flags |= UTF_REVERSE_ENDIAN; (void) utf8_encodestr(infn, fnidx * 2, outfn, &outbytes, ISO_JOLIET_NAMEMAX, 0, flags); - *outfnlen = outbytes; + *outfnlen = assoc ? outbytes + 2 : outbytes; } } @@ -317,6 +335,7 @@ isochildcount(vdp, dircnt, filcnt) { struct iso_node *dp; struct buf *bp = NULL; + struct iso_mnt *imp; struct iso_directory_record *ep; u_long bmask; int error = 0; @@ -327,8 +346,9 @@ isochildcount(vdp, dircnt, filcnt) long diroffset; dp = VTOI(vdp); - bmask = dp->i_mnt->im_bmask; - logblksize = dp->i_mnt->logical_block_size; + imp = dp->i_mnt; + bmask = imp->im_sector_size - 1; + logblksize = imp->im_sector_size; blkoffset = diroffset = 0; dirs = files = 0; @@ -340,7 +360,7 @@ isochildcount(vdp, dircnt, filcnt) if ((diroffset & bmask) == 0) { if (bp != NULL) brelse(bp); - if ( (error = VOP_BLKATOFF(vdp, diroffset, NULL, &bp)) ) + if ( (error = VOP_BLKATOFF(vdp, SECTOFF(imp, diroffset), NULL, &bp)) ) break; blkoffset = 0; } @@ -363,6 +383,15 @@ isochildcount(vdp, dircnt, filcnt) break; } + /* + * Some poorly mastered discs have an incorrect directory + * file size. If the '.' entry has a better size (bigger) + * then use that instead. + */ + if ((diroffset == 0) && (isonum_733(ep->size) > dp->i_size)) { + dp->i_size = isonum_733(ep->size); + } + if ( isonum_711(ep->flags) & directoryBit ) dirs++; else if ((isonum_711(ep->flags) & associatedBit) == 0) @@ -666,16 +695,42 @@ packvolattr (struct attrlist *alist, }; if (a & ATTR_VOL_ENCODINGSUSED) *((unsigned long long *)attrbufptr)++ = (unsigned long long)0; if (a & ATTR_VOL_CAPABILITIES) { - ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_PERSISTENTOBJECTIDS; + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_FORMAT] = + (imp->iso_ftype == ISO_FTYPE_RRIP ? VOL_CAP_FMT_SYMBOLICLINKS : 0) | + (imp->iso_ftype == ISO_FTYPE_RRIP ? VOL_CAP_FMT_HARDLINKS : 0) | + (imp->iso_ftype == ISO_FTYPE_RRIP || imp->iso_ftype == ISO_FTYPE_JOLIET + ? VOL_CAP_FMT_CASE_SENSITIVE : 0) | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS; ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_INTERFACES] = - VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT; + VOL_CAP_INT_ATTRLIST | + VOL_CAP_INT_NFSEXPORT; ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_RESERVED1] = 0; ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_RESERVED2] = 0; ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_FORMAT] = - VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_SYMBOLICLINKS | VOL_CAP_FMT_HARDLINKS; + VOL_CAP_FMT_PERSISTENTOBJECTIDS | + VOL_CAP_FMT_SYMBOLICLINKS | + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_JOURNAL | + VOL_CAP_FMT_JOURNAL_ACTIVE | + VOL_CAP_FMT_NO_ROOT_TIMES | + VOL_CAP_FMT_SPARSE_FILES | + VOL_CAP_FMT_ZERO_RUNS | + VOL_CAP_FMT_CASE_SENSITIVE | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS; ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_INTERFACES] = - VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT; + VOL_CAP_INT_SEARCHFS | + VOL_CAP_INT_ATTRLIST | + VOL_CAP_INT_NFSEXPORT | + VOL_CAP_INT_READDIRATTR | + VOL_CAP_INT_EXCHANGEDATA | + VOL_CAP_INT_COPYFILE | + VOL_CAP_INT_ALLOCATE | + VOL_CAP_INT_VOL_RENAME | + VOL_CAP_INT_ADVLOCK | + VOL_CAP_INT_FLOCK; ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_RESERVED1] = 0; ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_RESERVED2] = 0; diff --git a/bsd/isofs/cd9660/cd9660_vfsops.c b/bsd/isofs/cd9660/cd9660_vfsops.c index 929eb5446..cabf82778 100644 --- a/bsd/isofs/cd9660/cd9660_vfsops.c +++ b/bsd/isofs/cd9660/cd9660_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -75,7 +75,7 @@ #include #include #include -#include +#include #include #include #include @@ -88,6 +88,38 @@ #include #include +/* + * Minutes, Seconds, Frames (M:S:F) + */ +struct CDMSF { + u_char minute; + u_char second; + u_char frame; +}; + +/* + * Table Of Contents + */ +struct CDTOC_Desc { + u_char session; + u_char ctrl_adr; /* typed to be machine and compiler independent */ + u_char tno; + u_char point; + struct CDMSF address; + u_char zero; + struct CDMSF p; +}; + +struct CDTOC { + u_short length; /* in native cpu endian */ + u_char first_session; + u_char last_session; + struct CDTOC_Desc trackdesc[1]; +}; + +#define MSF_TO_LBA(msf) \ + (((((msf).minute * 60UL) + (msf).second) * 75UL) + (msf).frame - 150) + u_char isonullname[] = "\0"; extern int enodev (); @@ -162,8 +194,14 @@ cd9660_mountroot() LIST_INIT(&mp->mnt_vnodelist); args.flags = ISOFSMNT_ROOT; args.ssector = 0; + args.fspec = 0; + args.toc_length = 0; + args.toc = 0; if ((error = iso_mountfs(rootvp, mp, p, &args))) { vrele(rootvp); /* release the reference from bdevvp() */ + + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + FREE(mp->mnt_xinfo_ptr, M_TEMP); FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); return (error); } @@ -246,8 +284,8 @@ cd9660_mount(mp, path, data, ndp, p) return (error); } - /* Set the mount flag to indicate that we support volfs */ - mp->mnt_flag |= MNT_DOVOLFS; + /* Indicate that we don't support volfs */ + mp->mnt_flag &= ~MNT_DOVOLFS; (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); @@ -257,6 +295,119 @@ cd9660_mount(mp, path, data, ndp, p) return (0); } +/* + * Find the BSD device for the physical disk corresponding to the + * mount point's device. We use this physical device to read whole + * (2352 byte) sectors from the CD to get the content for the video + * files (tracks). + * + * The "path" argument is the path to the block device that the volume + * is being mounted on (args.fspec). It should be of the form: + * /dev/disk1s0 + * where the last "s0" part is stripped off to determine the physical + * device's path. It is assumed to be in user memory. + */ +static struct vnode * +cd9660_phys_device(char *path, struct proc *p) +{ + int err; + char *whole_path = NULL; // path to "whole" device + char *s, *saved; + struct nameidata nd; + struct vnode *result; + size_t actual_size; + + if (path == NULL) + return NULL; + + result = NULL; + + /* Make a copy of the mount from name, then remove trailing "s...". */ + MALLOC(whole_path, char *, MNAMELEN, M_ISOFSMNT, M_WAITOK); + copyinstr(path, whole_path, MNAMELEN-1, &actual_size); + + /* + * I would use strrchr or rindex here, but those are declared __private_extern__, + * and can't be used across component boundaries at this time. + */ + for (s=whole_path, saved=NULL; *s; ++s) + if (*s == 's') + saved = s; + *saved = '\0'; + + /* Lookup the "whole" device. */ + NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, whole_path, p); + err = namei(&nd); + if (err) { + printf("isofs: Cannot find physical device: %s\n", whole_path); + goto done; + } + + /* Open the "whole" device. */ + err = VOP_OPEN(nd.ni_vp, FREAD, FSCRED, p); + if (err) { + vrele(nd.ni_vp); + printf("isofs: Cannot open physical device: %s\n", whole_path); + goto done; + } + + result = nd.ni_vp; + +done: + FREE(whole_path, M_ISOFSMNT); + return result; +} + + +/* + * See if the given CD-ROM XA disc appears to be a Video CD + * (version < 2.0; so, not SVCD). If so, fill in the extent + * information for the MPEGAV directory, set the VCD flag, + * and return true. + */ +static int +cd9660_find_video_dir(struct iso_mnt *isomp) +{ + int result, err; + struct vnode *rootvp = NULL; + struct vnode *videovp = NULL; + struct componentname cn; + char dirname[] = "MPEGAV"; + + result = 0; /* Assume not a video CD */ + + err = cd9660_root(isomp->im_mountp, &rootvp); + if (err) { + printf("cd9660_find_video_dir: cd9660_root failed (%d)\n", err); + return 0; /* couldn't find video dir */ + } + + cn.cn_nameiop = LOOKUP; + cn.cn_flags = LOCKPARENT|ISLASTCN; + cn.cn_proc = current_proc(); + cn.cn_cred = cn.cn_proc->p_ucred; + cn.cn_pnbuf = dirname; + cn.cn_pnlen = sizeof(dirname)-1; + cn.cn_nameptr = cn.cn_pnbuf; + cn.cn_namelen = cn.cn_pnlen; + + err = VOP_LOOKUP(rootvp, &videovp, &cn); + if (err == 0) { + struct iso_node *ip = VTOI(videovp); + result = 1; /* Looks like video CD */ + isomp->video_dir_start = ip->iso_start; + isomp->video_dir_end = ip->iso_start + (ip->i_size >> isomp->im_bshift); + isomp->im_flags2 |= IMF2_IS_VCD; + } + + if (videovp != NULL) + vput(videovp); + if (rootvp != NULL) + vput(rootvp); + + return result; +} + /* * Common code for mount and mountroot */ @@ -336,6 +487,16 @@ iso_mountfs(devvp, mp, p, argp) printf("cd9660_vfsops.c: iso_mountfs: " "Invalid ID in volume desciptor.\n"); #endif + /* There should be a primary volume descriptor followed by any + * secondary volume descriptors, then an end volume descriptor. + * Some discs are mastered without an end volume descriptor or + * they have the type field set and the volume descriptor ID is + * not set. If we at least found a primary volume descriptor, + * mount the disc. + */ + if (pri != NULL) + break; + error = EINVAL; goto out; } @@ -405,6 +566,7 @@ iso_mountfs(devvp, mp, p, argp) MALLOC(isomp, struct iso_mnt *, sizeof *isomp, M_ISOFSMNT, M_WAITOK); bzero((caddr_t)isomp, sizeof *isomp); + isomp->im_sector_size = ISO_DEFAULT_BLOCK_SIZE; isomp->logical_block_size = logical_block_size; isomp->volume_space_size = isonum_733 (pri->volume_space_size); /* @@ -444,8 +606,9 @@ iso_mountfs(devvp, mp, p, argp) /* See if this is a CD-XA volume */ if (bcmp( pri->CDXASignature, ISO_XA_ID, - sizeof(pri->CDXASignature) ) == 0 ) + sizeof(pri->CDXASignature) ) == 0 ) { isomp->im_flags2 |= IMF2_IS_CDXA; + } isomp->im_bmask = logical_block_size - 1; isomp->im_bshift = 0; @@ -467,6 +630,20 @@ iso_mountfs(devvp, mp, p, argp) isomp->im_devvp = devvp; devvp->v_specflags |= SI_MOUNTEDON; + + /* + * If the logical block size is not 2K then we must + * set the block device's physical block size to this + * disc's logical block size. + * + */ + if (logical_block_size != iso_bsize) { + iso_bsize = logical_block_size; + if ((error = VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, + (caddr_t)&iso_bsize, FWRITE, p->p_ucred, p))) + goto out; + devvp->v_specsize = iso_bsize; + } /* Check the Rock Ridge Extention support */ if (!(argp->flags & ISOFSMNT_NORRIP)) { @@ -523,13 +700,13 @@ skipRRIP: /* * On Joliet CDs use the UCS-2 volume identifier. * - * This name can have up to 15 UCS-2 chars and is - * terminated with 0x0000 or padded with 0x0020. + * This name can have up to 16 UCS-2 chars. */ convflags = UTF_DECOMPOSED; if (BYTE_ORDER != BIG_ENDIAN) convflags |= UTF_REVERSE_ENDIAN; - for (i = 0, uchp = (u_int16_t *)sup->volume_id; i < 15 && uchp[i]; ++i); + uchp = (u_int16_t *)sup->volume_id; + for (i = 0; i < 16 && uchp[i]; ++i); if ((utf8_encodestr((u_int16_t *)sup->volume_id, (i * 2), vol_id, &convbytes, sizeof(vol_id), 0, convflags) == 0) && convbytes && (vol_id[0] != ' ')) { @@ -539,7 +716,7 @@ skipRRIP: strp = vol_id + convbytes - 1; while (strp > vol_id && *strp == ' ') *strp-- = '\0'; - bcopy(vol_id, isomp->volume_id, convbytes); + bcopy(vol_id, isomp->volume_id, convbytes + 1); } rootp = (struct iso_directory_record *) @@ -556,6 +733,19 @@ skipRRIP: supbp = NULL; } + /* If there was a TOC in the arguments, copy it in. */ + if (argp->flags & ISOFSMNT_TOC) { + MALLOC(isomp->toc, struct CDTOC *, argp->toc_length, M_ISOFSMNT, M_WAITOK); + if ((error = copyin(argp->toc, isomp->toc, argp->toc_length))) + goto out; + } + + /* See if this could be a Video CD */ + if ((isomp->im_flags2 & IMF2_IS_CDXA) && cd9660_find_video_dir(isomp)) { + /* Get the 2352-bytes-per-block device. */ + isomp->phys_devvp = cd9660_phys_device(argp->fspec, p); + } + return (0); out: if (bp) @@ -567,6 +757,8 @@ out: if (needclose) (void)VOP_CLOSE(devvp, FREAD, NOCRED, p); if (isomp) { + if (isomp->toc) + FREE((caddr_t)isomp->toc, M_ISOFSMNT); FREE((caddr_t)isomp, M_ISOFSMNT); mp->mnt_data = (qaddr_t)0; } @@ -630,6 +822,17 @@ cd9660_unmount(mp, mntflags, p) return(error); vrele(isomp->im_devvp); + + if (isomp->phys_devvp) { + error = VOP_CLOSE(isomp->phys_devvp, FREAD, FSCRED, p); + if (error && !force) + return error; + vrele(isomp->phys_devvp); + } + + if (isomp->toc) + FREE((caddr_t)isomp->toc, M_ISOFSMNT); + FREE((caddr_t)isomp, M_ISOFSMNT); mp->mnt_data = (qaddr_t)0; mp->mnt_flag &= ~MNT_LOCAL; @@ -767,7 +970,7 @@ cd9660_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) * Get the export permission structure for this tuple. */ np = vfs_export_lookup(mp, &imp->im_export, nam); - if (np == NULL) + if (nam && (np == NULL)) return (EACCES); if ( (error = VFS_VGET(mp, &ifhp->ifid_ino, &nvp)) ) { @@ -781,11 +984,99 @@ cd9660_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) return (ESTALE); } *vpp = nvp; - *exflagsp = np->netc_exflags; - *credanonp = &np->netc_anon; + if (np) { + *exflagsp = np->netc_exflags; + *credanonp = &np->netc_anon; + } return (0); } +/* + * Scan the TOC for the track which contains the given sector. + * + * If there is no matching track, or no TOC, then return -1. + */ +static int +cd9660_track_for_sector(struct CDTOC *toc, u_int sector) +{ + int i, tracks, result; + + if (toc == NULL) + return -1; + + tracks = toc->length / sizeof(struct CDTOC_Desc); + + result = -1; /* Sentinel in case we don't find the right track. */ + for (i=0; itrackdesc[i].point < 100 && MSF_TO_LBA(toc->trackdesc[i].p) <= sector) { + result = toc->trackdesc[i].point; + } + } + + return result; +} + +/* + * Determine whether the given node is really a video CD video + * file. Return non-zero if it appears to be a video file. + */ +static int +cd9660_is_video_file(struct iso_node *ip, struct iso_mnt *imp) +{ + int lbn; + int track; + + /* Check whether this could really be a Video CD at all */ + if (((imp->im_flags2 & IMF2_IS_VCD) == 0) || + imp->phys_devvp == NULL || + imp->toc == NULL) + { + return 0; /* Doesn't even look like VCD... */ + } + + /* Make sure it is a file */ + if ((ip->inode.iso_mode & S_IFMT) != S_IFREG) + return 0; /* Not even a file... */ + + /* + * And in the right directory. This assumes the same inode + * number convention that cd9660_vget_internal uses (that + * part of the inode number is the block containing the + * file's directory entry). + */ + lbn = lblkno(imp, ip->i_number); + if (lbn < imp->video_dir_start || lbn >= imp->video_dir_end) + return 0; /* Not in the correct directory */ + + /* + * If we get here, the file should be a video file, but + * do a couple of extra sanity checks just to be sure. + * First, verify the form of the name + */ + if (strlen(ip->i_namep) != 11 || /* Wrong length? */ + bcmp(ip->i_namep+7, ".DAT", 4) || /* Wrong extension? */ + (bcmp(ip->i_namep, "AVSEQ", 5) && /* Wrong beginning? */ + bcmp(ip->i_namep, "MUSIC", 5))) + { + return 0; /* Invalid name format */ + } + + /* + * Verify that AVSEQnn.DAT is in track #(nn+1). This would + * not be appropriate for Super Video CD, which allows + * multiple sessions, so the track numbers might not + * match up like this. + */ + track = (ip->i_namep[5] - '0') * 10 + ip->i_namep[6] - '0'; + if (track != (cd9660_track_for_sector(imp->toc, ip->iso_start) - 1)) + { + return 0; /* Wrong number in name */ + } + + /* It must be a video file if we got here. */ + return 1; +} + int cd9660_vget(mp, ino, vpp) struct mount *mp; @@ -936,15 +1227,31 @@ cd9660_vget_internal(mp, ino, vpp, relocated, isodir, p) * go get apple extensions to ISO directory record or use * defaults when there are no apple extensions. */ - if ( (isonum_711( isodir->flags ) & directoryBit) == 0 ) { + if ( ((isonum_711( isodir->flags ) & directoryBit) == 0) && + (imp->iso_ftype != ISO_FTYPE_RRIP) ) { /* This is an ISO directory record for a file */ - DRGetTypeCreatorAndFlags( imp, isodir, &ip->i_FileType, - &ip->i_Creator, &ip->i_FinderFlags ); + DRGetTypeCreatorAndFlags(imp, isodir, &ip->i_FileType, + &ip->i_Creator, &ip->i_FinderFlags); + + if (isonum_711(isodir->flags) & associatedBit) + ip->i_flag |= ISO_ASSOCIATED; + } + + /* + * Shadow the ISO 9660 invisible state to the FinderInfo + */ + if (isonum_711(isodir->flags) & existenceBit) { + ip->i_FinderFlags |= fInvisibleBit; } ip->iso_extent = isonum_733(isodir->extent); ip->i_size = isonum_733(isodir->size); ip->iso_start = isonum_711(isodir->ext_attr_length) + ip->iso_extent; + /* + * account for AppleDouble header + */ + if (ip->i_flag & ISO_ASSOCIATED) + ip->i_size += ADH_SIZE; /* * if we have a valid name, fill in i_namep with UTF-8 name @@ -965,13 +1272,13 @@ cd9660_vget_internal(mp, ino, vpp, relocated, isodir, p) case ISO_FTYPE_JOLIET: ucsfntrans((u_int16_t *)isodir->name, namelen, utf8namep, &namelen, - isonum_711(isodir->flags) & directoryBit); + isonum_711(isodir->flags) & directoryBit, ip->i_flag & ISO_ASSOCIATED); break; default: isofntrans (isodir->name, namelen, utf8namep, &namelen, - imp->iso_ftype == ISO_FTYPE_9660); + imp->iso_ftype == ISO_FTYPE_9660, ip->i_flag & ISO_ASSOCIATED); } utf8namep[namelen] = '\0'; @@ -1005,6 +1312,22 @@ cd9660_vget_internal(mp, ino, vpp, relocated, isodir, p) break; } + /* + * See if this is a Video CD file. If so, we must adjust the + * length to account for larger sectors plus the RIFF header. + * We also must substitute the VOP_READ and VOP_PAGEIN functions. + * + * The cd9660_is_video_file routine assumes that the inode has + * been completely set up; it refers to several fields. + * + * This must be done before we release bp, because isodir + * points into bp's data. + */ + if (cd9660_is_video_file(ip, imp)) + { + cd9660_xa_init(vp, isodir); + } + if (bp != 0) brelse(bp); @@ -1158,8 +1481,14 @@ DRGetTypeCreatorAndFlags( struct iso_mnt * theMountPointPtr, myPtr += 14;/* add in CD-XA fixed record offset (tnx, Phillips) */ myNewAppleExtPtr = (NewAppleExtension *) myPtr; - /* calculate the "real" end of the directory record information */ + /* + * Calculate the "real" end of the directory record information. + * + * Note: We always read the first 4 bytes of the System-Use data, so + * adjust myPtr down so we don't read off the end of the directory! + */ myPtr = ((char *) theDirRecPtr) + (isonum_711(theDirRecPtr->length)); + myPtr -= sizeof(NewAppleExtension) - 1; while( (char *) myNewAppleExtPtr < myPtr ) /* end of directory buffer */ { /* @@ -1169,8 +1498,8 @@ DRGetTypeCreatorAndFlags( struct iso_mnt * theMountPointPtr, * struct OptionalSystemUse * { * byte Signature[2]; - * byte systemUseID; * byte OSULength; + * byte systemUseID; * byte fileType[4]; # only if HFS * byte fileCreator[4]; # only if HFS * byte finderFlags[2]; # only if HFS diff --git a/bsd/isofs/cd9660/cd9660_vnops.c b/bsd/isofs/cd9660/cd9660_vnops.c index aa94424e6..f0b62e53e 100644 --- a/bsd/isofs/cd9660/cd9660_vnops.c +++ b/bsd/isofs/cd9660/cd9660_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -91,8 +91,8 @@ #include #include #include - #include +#include #include #include @@ -308,9 +308,52 @@ cd9660_read(ap) imp = ip->i_mnt; VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); - if (UBCISVALID(vp)) - error = cluster_read(vp, uio, (off_t)ip->i_size, devBlockSize, 0); - else { + if (UBCISVALID(vp)) { + /* + * Copy any part of the Apple Double header. + */ + if ((ip->i_flag & ISO_ASSOCIATED) && (uio->uio_offset < ADH_SIZE)) { + apple_double_header_t header; + int bytes; + + if (uio->uio_offset < sizeof(apple_double_header_t)) { + header.magic = APPLEDOUBLE_MAGIC; + header.version = APPLEDOUBLE_VERSION; + header.count = 2; + header.entries[0].entryID = APPLEDOUBLE_FINDERINFO; + header.entries[0].offset = offsetof(apple_double_header_t, finfo); + header.entries[0].length = 32; + header.entries[1].entryID = APPLEDOUBLE_RESFORK; + header.entries[1].offset = ADH_SIZE; + header.entries[1].length = ip->i_size - ADH_SIZE; + header.finfo.fdType = ip->i_FileType; + header.finfo.fdCreator = ip->i_Creator; + header.finfo.fdFlags = ip->i_FinderFlags; + header.finfo.fdLocation.v = -1; + header.finfo.fdLocation.h = -1; + header.finfo.fdReserved = 0; + + bytes = min(uio->uio_resid, sizeof(apple_double_header_t) - uio->uio_offset); + error = uiomove(((char *) &header) + uio->uio_offset, bytes, uio); + if (error) + return error; + } + if (uio->uio_resid && uio->uio_offset < ADH_SIZE) { + caddr_t buffer; + + if (kmem_alloc(kernel_map, (vm_offset_t *)&buffer, ADH_SIZE)) { + return (ENOMEM); + } + bytes = min(uio->uio_resid, ADH_SIZE - uio->uio_offset); + error = uiomove(((char *) buffer) + uio->uio_offset, bytes, uio); + kmem_free(kernel_map, (vm_offset_t)buffer, ADH_SIZE); + if (error) + return error; + } + } + if (uio->uio_resid > 0) + error = cluster_read(vp, uio, (off_t)ip->i_size, devBlockSize, 0); + } else { do { lbn = lblkno(imp, uio->uio_offset); @@ -363,7 +406,6 @@ cd9660_ioctl(ap) struct proc *a_p; } */ *ap; { - printf("You did ioctl for isofs !!\n"); return (ENOTTY); } @@ -505,6 +547,10 @@ iso_shipdir(idp) /* * Vnode op for readdir + * + * Note that directories are sector aligned (2K) and + * that an entry can cross a logical block but not + * a sector. */ int cd9660_readdir(ap) @@ -536,7 +582,7 @@ cd9660_readdir(ap) dp = VTOI(vdp); imp = dp->i_mnt; - bmask = imp->im_bmask; + bmask = imp->im_sector_size - 1; MALLOC(idp, struct isoreaddir *, sizeof(*idp), M_TEMP, M_WAITOK); idp->saveent.d_namlen = 0; @@ -550,7 +596,7 @@ cd9660_readdir(ap) idp->curroff = uio->uio_offset; if ((entryoffsetinblock = idp->curroff & bmask) && - (error = VOP_BLKATOFF(vdp, (off_t)idp->curroff, NULL, &bp))) { + (error = VOP_BLKATOFF(vdp, SECTOFF(imp, idp->curroff), NULL, &bp))) { FREE(idp, M_TEMP); return (error); } @@ -565,7 +611,7 @@ cd9660_readdir(ap) if ((idp->curroff & bmask) == 0) { if (bp != NULL) brelse(bp); - if ( (error = VOP_BLKATOFF(vdp, (off_t)idp->curroff, NULL, &bp)) ) + if ((error = VOP_BLKATOFF(vdp, SECTOFF(imp, idp->curroff), NULL, &bp))) break; entryoffsetinblock = 0; } @@ -579,7 +625,7 @@ cd9660_readdir(ap) if (reclen == 0) { /* skip to next block, if any */ idp->curroff = - (idp->curroff & ~bmask) + imp->logical_block_size; + (idp->curroff & ~bmask) + imp->im_sector_size; continue; } @@ -589,7 +635,7 @@ cd9660_readdir(ap) break; } - if (entryoffsetinblock + reclen > imp->logical_block_size) { + if (entryoffsetinblock + reclen > imp->im_sector_size) { error = EINVAL; /* illegal directory, so stop looking */ break; @@ -603,17 +649,20 @@ cd9660_readdir(ap) break; } - /* skip over associated files (Mac OS resource fork) */ - if (isonum_711(ep->flags) & associatedBit) { - idp->curroff += reclen; - entryoffsetinblock += reclen; - continue; + /* + * Some poorly mastered discs have an incorrect directory + * file size. If the '.' entry has a better size (bigger) + * then use that instead. + */ + if ((uio->uio_offset == 0) && (isonum_733(ep->size) > endsearch)) { + dp->i_size = endsearch = isonum_733(ep->size); } if ( isonum_711(ep->flags) & directoryBit ) idp->current.d_fileno = isodirino(ep, imp); else { - idp->current.d_fileno = (bp->b_blkno << imp->im_bshift) + entryoffsetinblock; + idp->current.d_fileno = (bp->b_blkno << imp->im_bshift) + + entryoffsetinblock; } idp->curroff += reclen; @@ -630,7 +679,8 @@ cd9660_readdir(ap) case ISO_FTYPE_JOLIET: ucsfntrans((u_int16_t *)ep->name, idp->current.d_namlen, idp->current.d_name, &namelen, - isonum_711(ep->flags) & directoryBit); + isonum_711(ep->flags) & directoryBit, + isonum_711(ep->flags) & associatedBit); idp->current.d_namlen = (u_char)namelen; if (idp->current.d_namlen) error = iso_uiodir(idp,&idp->current,idp->curroff); @@ -650,7 +700,8 @@ cd9660_readdir(ap) default: isofntrans(ep->name,idp->current.d_namlen, idp->current.d_name, &namelen, - imp->iso_ftype == ISO_FTYPE_9660); + imp->iso_ftype == ISO_FTYPE_9660, + isonum_711(ep->flags) & associatedBit); idp->current.d_namlen = (u_char)namelen; if (imp->iso_ftype == ISO_FTYPE_DEFAULT) error = iso_shipdir(idp); @@ -829,22 +880,6 @@ cd9660_readlink(ap) return (0); } -/* - * Ufs abort op, called after namei() when a CREATE/DELETE isn't actually - * done. If a buffer has been saved in anticipation of a CREATE, delete it. - */ -int -cd9660_abortop(ap) - struct vop_abortop_args /* { - struct vnode *a_dvp; - struct componentname *a_cnp; - } */ *ap; -{ - if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) - FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); - return (0); -} - /* * Lock an inode. */ @@ -1017,22 +1052,65 @@ cd9660_pagein(ap) { struct vnode *vp = ap->a_vp; upl_t pl = ap->a_pl; - size_t size= ap->a_size; + size_t size = ap->a_size; off_t f_offset = ap->a_f_offset; vm_offset_t pl_offset = ap->a_pl_offset; int flags = ap->a_flags; register struct iso_node *ip = VTOI(vp); - int devBlockSize=0, error; + int error = 0; - /* check pageouts are for reg file only and ubc info is present*/ - if (UBCINVALID(vp)) - panic("cd9660_pagein: Not a VREG"); - UBCINFOCHECK("cd9660_pagein", vp); + /* + * Copy the Apple Double header. + */ + if ((ip->i_flag & ISO_ASSOCIATED) && (f_offset == 0) && (size == ADH_SIZE)) { + apple_double_header_t header; + kern_return_t kret; + vm_offset_t ioaddr; + + kret = ubc_upl_map(pl, &ioaddr); + if (kret != KERN_SUCCESS) + panic("cd9660_xa_pagein: ubc_upl_map error = %d", kret); + ioaddr += pl_offset; + bzero((caddr_t)ioaddr, ADH_SIZE); + + header.magic = APPLEDOUBLE_MAGIC; + header.version = APPLEDOUBLE_VERSION; + header.count = 2; + header.entries[0].entryID = APPLEDOUBLE_FINDERINFO; + header.entries[0].offset = offsetof(apple_double_header_t, finfo); + header.entries[0].length = 32; + header.entries[1].entryID = APPLEDOUBLE_RESFORK; + header.entries[1].offset = ADH_SIZE; + header.entries[1].length = ip->i_size - ADH_SIZE; + header.finfo.fdType = ip->i_FileType; + header.finfo.fdCreator = ip->i_Creator; + header.finfo.fdFlags = ip->i_FinderFlags; + header.finfo.fdLocation.v = -1; + header.finfo.fdLocation.h = -1; + header.finfo.fdReserved = 0; + + bcopy((caddr_t)&header, (caddr_t)ioaddr, sizeof(apple_double_header_t)); + + kret = ubc_upl_unmap(pl); + if (kret != KERN_SUCCESS) + panic("cd9660_xa_pagein: ubc_upl_unmap error = %d", kret); + + if ((flags & UPL_NOCOMMIT) == 0) { + ubc_upl_commit_range(pl, pl_offset, size, UPL_COMMIT_FREE_ON_EMPTY); + } + } else { + int devBlockSize = 0; - VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + /* check pageouts are for reg file only and ubc info is present*/ + if (UBCINVALID(vp)) + panic("cd9660_pagein: Not a VREG"); + UBCINFOCHECK("cd9660_pagein", vp); + + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); - error = cluster_pagein(vp, pl, pl_offset, f_offset, size, + error = cluster_pagein(vp, pl, pl_offset, f_offset, size, (off_t)ip->i_size, devBlockSize, flags); + } return (error); } @@ -1168,6 +1246,277 @@ cd9660_getattrlist(ap) return error; } +/* + * Make a RIFF file header for a CD-ROM XA media file. + */ +__private_extern__ void +cd9660_xa_init(struct vnode *vp, struct iso_directory_record *isodir) +{ + u_long sectors; + struct iso_node *ip = VTOI(vp); + struct riff_header *header; + u_char name_len; + char *cdxa; + + MALLOC(header, struct riff_header *, sizeof(struct riff_header), M_TEMP, M_WAITOK); + + sectors = ip->i_size / 2048; + + strncpy(header->riff, "RIFF", 4); + header->fileSize = NXSwapHostLongToLittle(sectors * CDXA_SECTOR_SIZE + sizeof(struct riff_header) - 8); + strncpy(header->cdxa, "CDXA", 4); + strncpy(header->fmt, "fmt ", 4); + header->fmtSize = NXSwapHostLongToLittle(16); + strncpy(header->data, "data", 4); + header->dataSize = NXSwapHostLongToLittle(sectors * CDXA_SECTOR_SIZE); + + /* + * Copy the CD-ROM XA extended directory information into the header. As far as + * I can tell, it's always 14 bytes in the directory record, but allocated 16 bytes + * in the header (the last two being zeroed pad bytes). + */ + name_len = isonum_711(isodir->name_len); + cdxa = &isodir->name[name_len]; + if ((name_len & 0x01) == 0) + ++cdxa; /* Skip pad byte */ + bcopy(cdxa, header->fmtData, 14); + header->fmtData[14] = 0; + header->fmtData[15] = 0; + + /* + * Point this i-node to the "whole sector" device instead of the normal + * device. This allows cd9660_strategy to be ignorant of the block + * (sector) size. + */ + vrele(ip->i_devvp); + ip->i_devvp = ip->i_mnt->phys_devvp; + VREF(ip->i_devvp); + + ip->i_size = sectors * CDXA_SECTOR_SIZE + sizeof(struct riff_header); + ip->i_riff = header; + vp->v_op = cd9660_cdxaop_p; +} + +/* + * Helper routine for VOP_READ and VOP_PAGEIN of CD-ROM XA multimedia files. + * This routine determines the physical location of the file, then reads + * sectors directly from the device into a buffer. It also handles inserting + * the RIFF header at the beginning of the file. + * + * Exactly one of buffer or uio must be non-zero. It will either bcopy to + * buffer, or uiomove via uio. + * + * XXX Should this code be using breadn and vp->v_lastr to support single-block + * read-ahead? Should we try more aggressive read-ahead like cluster_io does? + * + * XXX This could be made to do larger I/O to the device (reading all the + * whole sectors directly into the buffer). That would make the code more + * complex, and the current code only adds 2.5% overhead compared to reading + * from the device directly (at least on my test machine). + */ +static int +cd9660_xa_read_common( + struct vnode *vp, + off_t offset, + size_t amount, + caddr_t buffer, + struct uio *uio) +{ + struct iso_node *ip = VTOI(vp); + struct buf *bp; + off_t diff; /* number of bytes from offset to file's EOF */ + daddr_t block; /* physical disk block containing offset */ + off_t sect_off; /* starting offset into current sector */ + u_int count; /* number of bytes to transfer in current block */ + int error=0; + + /* + * Copy any part of the RIFF header. + */ + if (offset < sizeof(struct riff_header)) { + char *p; + + p = ((char *) ip->i_riff) + offset; + count = min(amount, sizeof(struct riff_header) - offset); + if (buffer) { + bcopy(p, buffer, count); + buffer += count; + } else { + error = uiomove(p, count, uio); + } + amount -= count; + offset += count; + } + if (error) + return error; + + /* + * Loop over (possibly partial) blocks to transfer. + */ + while (error == 0 && amount > 0) { + /* + * Determine number of bytes until EOF. If we've hit + * EOF then return. + */ + diff = ip->i_size - offset; + if (diff <= 0) + return 0; + + /* Get a block from the underlying device */ + block = ip->iso_start + (offset - sizeof(struct riff_header))/CDXA_SECTOR_SIZE; + error = bread(ip->i_devvp, block, CDXA_SECTOR_SIZE, NOCRED, &bp); + if (error) { + brelse(bp); + return error; + } + if (bp->b_resid) { + printf("isofs: cd9660_xa_read_common: bread didn't read full sector\n"); + return EIO; + } + + /* Figure out which part of the block to copy, and copy it */ + sect_off = (offset - sizeof(struct riff_header)) % CDXA_SECTOR_SIZE; + count = min(CDXA_SECTOR_SIZE-sect_off, amount); + if (diff < count) /* Pin transfer amount to EOF */ + count = diff; + + if (buffer) { + bcopy(bp->b_data+sect_off, buffer, count); + buffer += count; + } else { + error = uiomove(bp->b_data+sect_off, count, uio); + } + amount -= count; + offset += count; + + /* + * If we copied through the end of the block, or the end of file, then + * age the device block. This is optimized for sequential access. + */ + if (sect_off+count == CDXA_SECTOR_SIZE || offset == (off_t)ip->i_size) + bp->b_flags |= B_AGE; + brelse(bp); + } + + return error; +} + +/* + * Read from a CD-ROM XA multimedia file. + * + * This uses the same common routine as pagein for doing the actual read + * from the device. + * + * This routine doesn't do any caching beyond what the block device does. + * Even then, cd9660_xa_read_common ages the blocks once we read up to + * the end. + * + * We don't even take advantage if the file has been memory mapped and has + * valid pages already (in which case we could just uiomove from the page + * to the caller). Since we're a read-only filesystem, there can't be + * any cache coherency problems. Multimedia files are expected to be + * large and streamed anyway, so caching file contents probably isn't + * important. + */ +int +cd9660_xa_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + register struct uio *uio = ap->a_uio; + register struct iso_node *ip = VTOI(vp); + off_t offset = uio->uio_offset; + size_t size = uio->uio_resid; + + /* Check for some obvious parameter problems */ + if (offset < 0) + return EINVAL; + if (size == 0) + return 0; + if (offset >= ip->i_size) + return 0; + + /* Pin the size of the read to the file's EOF */ + if (offset + size > ip->i_size) + size = ip->i_size - offset; + + return cd9660_xa_read_common(vp, offset, size, NULL, uio); +} + +/* + * Page in from a CD-ROM XA media file. + * + * Since our device block size isn't a power of two, we can't use + * cluster_pagein. Instead, we have to map the page and read into it. + */ +static int +cd9660_xa_pagein(ap) + struct vop_pagein_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + upl_t pl = ap->a_pl; + size_t size= ap->a_size; + off_t f_offset = ap->a_f_offset; + vm_offset_t pl_offset = ap->a_pl_offset; + int flags = ap->a_flags; + register struct iso_node *ip = VTOI(vp); + int error; + kern_return_t kret; + vm_offset_t ioaddr; + + /* check pageins are for reg file only and ubc info is present*/ + if (UBCINVALID(vp)) + panic("cd9660_xa_pagein: Not a VREG"); + UBCINFOCHECK("cd9660_xa_pagein", vp); + + if (size <= 0) + panic("cd9660_xa_pagein: size = %d", size); + + kret = ubc_upl_map(pl, &ioaddr); + if (kret != KERN_SUCCESS) + panic("cd9660_xa_pagein: ubc_upl_map error = %d", kret); + + ioaddr += pl_offset; + + /* Make sure pagein doesn't extend past EOF */ + if (f_offset + size > ip->i_size) + size = ip->i_size - f_offset; /* pin size to EOF */ + + /* Read the data in using the underlying device */ + error = cd9660_xa_read_common(vp, f_offset, size, (caddr_t)ioaddr, NULL); + + /* Zero fill part of page past EOF */ + if (ap->a_size > size) + bzero((caddr_t)ioaddr+size, ap->a_size-size); + + kret = ubc_upl_unmap(pl); + if (kret != KERN_SUCCESS) + panic("cd9660_xa_pagein: ubc_upl_unmap error = %d", kret); + + if ((flags & UPL_NOCOMMIT) == 0) + { + if (error) + ubc_upl_abort_range(pl, pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY); + else + ubc_upl_commit_range(pl, pl_offset, ap->a_size, UPL_COMMIT_FREE_ON_EMPTY); + } + + return error; +} + /* * Global vfs data structures for isofs */ @@ -1244,7 +1593,7 @@ struct vnodeopv_entry_desc cd9660_vnodeop_entries[] = { { &vop_symlink_desc, (VOPFUNC)cd9660_symlink }, /* symlink */ { &vop_readdir_desc, (VOPFUNC)cd9660_readdir }, /* readdir */ { &vop_readlink_desc, (VOPFUNC)cd9660_readlink },/* readlink */ - { &vop_abortop_desc, (VOPFUNC)cd9660_abortop }, /* abortop */ + { &vop_abortop_desc, (VOPFUNC)nop_abortop }, /* abortop */ { &vop_inactive_desc, (VOPFUNC)cd9660_inactive },/* inactive */ { &vop_reclaim_desc, (VOPFUNC)cd9660_reclaim }, /* reclaim */ { &vop_lock_desc, (VOPFUNC)cd9660_lock }, /* lock */ @@ -1272,6 +1621,65 @@ struct vnodeopv_entry_desc cd9660_vnodeop_entries[] = { struct vnodeopv_desc cd9660_vnodeop_opv_desc = { &cd9660_vnodeop_p, cd9660_vnodeop_entries }; +/* + * The VOP table for CD-ROM XA (media) files is almost the same + * as for ordinary files, except for read, and pagein. + * Note that cd9660_xa_read doesn't use cluster I/O, so cmap + * isn't needed, and isn't implemented. Similarly, it doesn't + * do bread() on CD XA vnodes, so bmap, blktooff, offtoblk + * aren't needed. + */ +int (**cd9660_cdxaop_p)(void *); +struct vnodeopv_entry_desc cd9660_cdxaop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)cd9660_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)cd9660_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)cd9660_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)cd9660_open }, /* open */ + { &vop_close_desc, (VOPFUNC)cd9660_close }, /* close */ + { &vop_access_desc, (VOPFUNC)cd9660_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)cd9660_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)cd9660_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)cd9660_xa_read }, /* read */ + { &vop_write_desc, (VOPFUNC)cd9660_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)cd9660_lease_check },/* lease */ + { &vop_ioctl_desc, (VOPFUNC)cd9660_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)cd9660_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)cd9660_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)cd9660_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)cd9660_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)cd9660_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)cd9660_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)cd9660_rename }, /* rename */ + { &vop_copyfile_desc, (VOPFUNC)cd9660_copyfile },/* copyfile */ + { &vop_mkdir_desc, (VOPFUNC)cd9660_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)cd9660_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)cd9660_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)cd9660_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)cd9660_readlink },/* readlink */ + { &vop_inactive_desc, (VOPFUNC)cd9660_inactive },/* inactive */ + { &vop_reclaim_desc, (VOPFUNC)cd9660_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)cd9660_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)cd9660_unlock }, /* unlock */ + { &vop_strategy_desc, (VOPFUNC)cd9660_strategy },/* strategy */ + { &vop_print_desc, (VOPFUNC)cd9660_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)cd9660_islocked },/* islocked */ + { &vop_pathconf_desc, (VOPFUNC)cd9660_pathconf },/* pathconf */ + { &vop_advlock_desc, (VOPFUNC)cd9660_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)cd9660_blkatoff },/* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)cd9660_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)cd9660_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)cd9660_truncate },/* truncate */ + { &vop_update_desc, (VOPFUNC)cd9660_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)vn_bwrite }, + { &vop_pagein_desc, (VOPFUNC)cd9660_xa_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)cd9660_pageout }, /* Pageout */ + { &vop_getattrlist_desc, (VOPFUNC)cd9660_getattrlist }, /* getattrlist */ + { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } +}; +struct vnodeopv_desc cd9660_cdxaop_opv_desc = + { &cd9660_cdxaop_p, cd9660_cdxaop_entries }; + /* * Special device vnode ops */ diff --git a/bsd/isofs/cd9660/iso.h b/bsd/isofs/cd9660/iso.h index 5cbf9b001..60faf67aa 100644 --- a/bsd/isofs/cd9660/iso.h +++ b/bsd/isofs/cd9660/iso.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -261,6 +261,7 @@ struct iso_mnt { int logical_block_size; int im_bshift; int im_bmask; + int im_sector_size; int volume_space_size; struct netexport im_export; @@ -276,6 +277,10 @@ struct iso_mnt { struct timespec creation_date; /* needed for getattrlist */ struct timespec modification_date; /* needed for getattrlist */ u_char volume_id[32]; /* name of volume */ + struct vnode *phys_devvp; /* device for 2352-byte blocks */ + struct CDTOC *toc; /* the TOC, or NULL for none */ + int video_dir_start; /* start sector of the "MPEGAV" dir */ + int video_dir_end; /* sector following end of "MPEGAV" dir */ }; /* bit settings for iso_mnt.im_flags2 */ @@ -286,6 +291,9 @@ struct iso_mnt { */ #define IMF2_IS_CDXA 0x00000001 +/* CD is Video CD (version < 2.0) */ +#define IMF2_IS_VCD 0x00000002 + #define VFSTOISOFS(mp) ((struct iso_mnt *)((mp)->mnt_data)) #define blkoff(imp, loc) ((loc) & (imp)->im_bmask) @@ -293,6 +301,10 @@ struct iso_mnt { #define lblkno(imp, loc) ((loc) >> (imp)->im_bshift) #define blksize(imp, ip, lbn) ((imp)->logical_block_size) +#define SECTOFF(imp, off) \ + (off_t)(((off) / (imp)->im_sector_size) * (imp)->im_sector_size) + + int cd9660_mount __P((struct mount *, char *, caddr_t, struct nameidata *, struct proc *)); int cd9660_start __P((struct mount *, int, struct proc *)); @@ -316,6 +328,7 @@ extern int (**cd9660_specop_p)(void *); #if FIFO extern int (**cd9660_fifoop_p)(void *); #endif +extern int (**cd9660_cdxaop_p)(void *); static __inline int isonum_711(p) @@ -389,8 +402,8 @@ isonum_733(p) int isofncmp __P((u_char *, int, u_char *, int)); int ucsfncmp __P((u_int16_t *, int, u_int16_t *, int)); -void isofntrans __P((u_char *, int, u_char *, u_short *, int)); -void ucsfntrans __P((u_int16_t *, int, u_char *, u_short *, int)); +void isofntrans __P((u_char *, int, u_char *, u_short *, int, int)); +void ucsfntrans __P((u_int16_t *, int, u_char *, u_short *, int, int)); ino_t isodirino __P((struct iso_directory_record *, struct iso_mnt *)); int attrcalcsize __P((struct attrlist *attrlist)); void packattrblk __P((struct attrlist *alist, struct vnode *vp, @@ -398,9 +411,68 @@ void packattrblk __P((struct attrlist *alist, struct vnode *vp, /* - * Associated files have a leading '='. + * Associated files have a leading "._". + */ +#define ASSOCCHAR1 '.' +#define ASSOCCHAR2 '_' + +/* + * This header is prepended on media tracks, such as Video CD MPEG files. + */ +struct riff_header { + char riff[4]; // "RIFF" + u_int32_t fileSize; // little endian file size, not including this field or sig + char cdxa[4]; // "CDXA" + char fmt[4]; // "fmt " + u_int32_t fmtSize; // always 16 (XXX this is an assumption) + char fmtData[16]; // CDXA extension of ISO directory entry, padded to 16 bytes + char data[4]; // "data" + u_int32_t dataSize; // number of sectors * 2352, little endian +}; + +#define CDXA_SECTOR_SIZE 2352 + + +/* + * AppleDouble constants + */ +#define APPLEDOUBLE_MAGIC 0x00051607 +#define APPLEDOUBLE_VERSION 0x00020000 + +#define APPLEDOUBLE_DATAFORK 1 +#define APPLEDOUBLE_RESFORK 2 +#define APPLEDOUBLE_FINDERINFO 9 + +/* + * Note that 68k alignment is needed to make sure that the first + * AppleDoubleEntry (after the numEntries below) is *immediately* + * after the numEntries, and not padded by 2 bytes. + * + * Consult RFC 1740 for details on AppleSingle/AppleDouble formats. */ -#define ASSOCCHAR '=' +#pragma options align=mac68k + +struct apple_double_entry { + u_int32_t entryID; + u_int32_t offset; + u_int32_t length; +}; +typedef struct apple_double_entry apple_double_entry_t; + +struct apple_double_header { + u_int32_t magic; + u_int32_t version; + u_int8_t filler[16]; + u_int16_t count; + apple_double_entry_t entries[2]; /* FinderInfo + ResourceFork */ + struct finder_info finfo; +}; +typedef struct apple_double_header apple_double_header_t; + +#define ADH_SIZE 4096 +#define ADH_BLKS 2 + +#pragma options align=reset #endif /* __APPLE_API_PRIVATE */ #endif /* ! _ISO_H_ */ diff --git a/bsd/kern/bsd_init.c b/bsd/kern/bsd_init.c index 7b4a41efb..cc54ea10b 100644 --- a/bsd/kern/bsd_init.c +++ b/bsd/kern/bsd_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -69,13 +69,6 @@ * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ -/* - * HISTORY - * 16-Apr-98 A. Ramesh at Apple - * Created for Apple Core from DR2 init_main.c. - */ - -#include #include #include @@ -88,7 +81,10 @@ #include #include #include -#include +#include +#include + +#include #include #include @@ -103,11 +99,11 @@ #include #include -#include +#include /* for ux_exception_port */ #include #include -#include +#include /* for pseudo_inits */ #include #include @@ -120,11 +116,12 @@ #include #include -extern shared_region_mapping_t system_shared_region; extern int app_profile; /* on/off switch for pre-heat cache */ char copyright[] = -"Copyright (c) 1982, 1986, 1989, 1991, 1993\n\tThe Regents of the University of California. All rights reserved.\n\n"; +"Copyright (c) 1982, 1986, 1989, 1991, 1993\n\t" +"The Regents of the University of California. " +"All rights reserved.\n\n"; extern void ux_handler(); @@ -155,9 +152,12 @@ dev_t dumpdev; /* device to take dumps on */ long dumplo; /* offset into dumpdev */ long hostid; char hostname[MAXHOSTNAMELEN]; -int hostnamelen; +int hostnamelen; char domainname[MAXDOMNAMELEN]; -int domainnamelen; +int domainnamelen; +char classichandler[32] = {0}; +long classichandler_fsid = -1L; +long classichandler_fileid = -1L; char rootdevice[16]; /* hfs device names have at least 9 chars */ struct timeval boottime; /* GRODY! This has to go... */ @@ -170,7 +170,7 @@ int lbolt; /* awoken once a second */ struct vnode *rootvp; int boothowto = RB_DEBUG; -#define BSD_PAGABLE_MAP_SIZE (4 * 512 * 1024) +#define BSD_PAGABLE_MAP_SIZE (16 * 512 * 1024) vm_map_t bsd_pageable_map; vm_map_t mb_map; semaphore_t execve_semaphore; @@ -183,8 +183,8 @@ extern task_t bsd_init_task; extern char init_task_failure_data[]; extern void time_zone_slock_init(void); -funnel_t * kernel_flock; -funnel_t * network_flock; +funnel_t *kernel_flock; +funnel_t *network_flock; int disable_funnel = 0; /* disables split funnel */ int enable_funnel = 0; /* disables split funnel */ @@ -194,13 +194,10 @@ int enable_funnel = 0; /* disables split funnel */ * soon as a stack and segmentation * have been established. * Functions: - * clear and free user core * turn on clock * hand craft 0th process * call all initialization routines - * fork - process 0 to schedule - * - process 1 execute bootstrap - * - process 2 to page out + * hand craft 1st user process */ /* @@ -247,19 +244,8 @@ bsd_init() extern void uthread_zone_init(); - -#if 1 /* split funnel is enabled by default */ PE_parse_boot_arg("dfnl", &disable_funnel); -#else - /* split funnel is disabled befault */ - disable_funnel = 1; - PE_parse_boot_arg("efnl", &enable_funnel); - if (enable_funnel) { - /* enable only if efnl is set in bootarg */ - disable_funnel = 0; - } -#endif kernel_flock = funnel_alloc(KERNEL_FUNNEL); if (kernel_flock == (funnel_t *)0 ) { @@ -339,11 +325,23 @@ bsd_init() p->p_cred = &cred0; p->p_ucred = crget(); p->p_ucred->cr_ngroups = 1; /* group 0 */ + + TAILQ_INIT(&p->aio_activeq); + TAILQ_INIT(&p->aio_doneq); + p->aio_active_count = 0; + p->aio_done_count = 0; + + /* Set the audit info for this process */ + audit_proc_init(p); /* Create the file descriptor table. */ filedesc0.fd_refcnt = 1+1; /* +1 so shutdown will not _FREE_ZONE */ p->p_fd = &filedesc0; filedesc0.fd_cmask = cmask; + filedesc0.fd_knlistsize = -1; + filedesc0.fd_knlist = NULL; + filedesc0.fd_knhash = NULL; + filedesc0.fd_knhashmask = 0; /* Create the limits structures. */ p->p_limit = &limit0; @@ -352,6 +350,7 @@ bsd_init() limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = MAXUPRC; + limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; @@ -404,6 +403,18 @@ bsd_init() /* Initialize syslog */ log_init(); + /* + * Initializes security event auditing. + * XXX: Should/could this occur later? + */ + audit_init(); + + /* Initialize kqueues */ + knote_init(); + + /* Initialize for async IO */ + aio_init(); + /* POSIX Shm and Sem */ pshm_cache_init(); psem_cache_init(); @@ -431,7 +442,7 @@ bsd_init() /* kick off timeout driven events by calling first time */ thread_wakeup(&lbolt); - timeout(lightning_bolt,0,hz); + timeout((void (*)(void *))lightning_bolt, 0, hz); bsd_autoconf(); @@ -459,7 +470,7 @@ bsd_init() * read the time after clock_initialize_calendar() * and before nfs mount */ - microtime(&time); + microtime((struct timeval *)&time); bsd_hardclockinit = -1; /* start ticking */ @@ -507,13 +518,11 @@ bsd_init() devfs_kernel_mount("/dev"); } -#endif DEVFS +#endif /* DEVFS */ /* Initialize signal state for process 0. */ siginit(p); - /* printf("Launching user process\n"); */ - bsd_utaskbootstrap(); /* invoke post-root-mount hook */ @@ -531,6 +540,7 @@ bsdinit_task(void) struct uthread *ut; kern_return_t kr; thread_act_t th_act; + shared_region_mapping_t system_region; proc_name("init", p); @@ -560,8 +570,14 @@ bsdinit_task(void) bsd_hardclockinit = 1; /* Start bsd hardclock */ bsd_init_task = get_threadtask(th_act); init_task_failure_data[0] = 0; - shared_region_mapping_ref(system_shared_region); - vm_set_shared_region(get_threadtask(th_act), system_shared_region); + system_region = lookup_default_shared_region(ENV_DEFAULT_ROOT, + machine_slot[cpu_number()].cpu_type); + if (system_region == NULL) { + shared_file_boot_time_init(ENV_DEFAULT_ROOT, + machine_slot[cpu_number()].cpu_type); + } else { + vm_set_shared_region(get_threadtask(th_act), system_region); + } load_init_program(p); /* turn on app-profiling i.e. pre-heating */ app_profile = 1; @@ -602,7 +618,7 @@ bsd_autoconf() } -#include // for MAXPARTITIONS +#include /* for MAXPARTITIONS */ setconf() { @@ -649,11 +665,7 @@ bsd_utaskbootstrap() ut = (struct uthread *)get_bsdthread_info(th_act); ut->uu_sigmask = 0; - thread_hold(th_act); - (void)thread_stop(getshuttle_thread(th_act)); act_set_astbsd(th_act); - thread_release(th_act); - thread_unstop(getshuttle_thread(th_act)); (void) thread_resume(th_act); } @@ -675,6 +687,7 @@ parse_bsd_args() else strcat(init_args,"-s"); } + if (PE_parse_boot_arg("-b", namep)) { boothowto |= RB_NOBOOTRC; len = strlen(init_args); @@ -708,6 +721,14 @@ parse_bsd_args() strcat(init_args,"-x"); } + if (PE_parse_boot_arg("-d", namep)) { + len = strlen(init_args); + if(len != 0) + strcat(init_args," -d"); + else + strcat(init_args,"-d"); + } + PE_parse_boot_arg("srv", &srv); PE_parse_boot_arg("ncl", &ncl); PE_parse_boot_arg("nbuf", &nbuf); @@ -720,7 +741,6 @@ thread_funnel_switch( int oldfnl, int newfnl) { - thread_t cur_thread; boolean_t funnel_state_prev; int curfnl; funnel_t * curflock; @@ -748,8 +768,6 @@ thread_funnel_switch( panic("thread_funnel_switch: no funnel held"); } - cur_thread = current_thread(); - if ((oldfnl == NETWORK_FUNNEL) && (curflock != network_flock)) panic("thread_funnel_switch: network funnel not held"); diff --git a/bsd/kern/bsd_stubs.c b/bsd/kern/bsd_stubs.c index eebece1cb..f7c4940ca 100644 --- a/bsd/kern/bsd_stubs.c +++ b/bsd/kern/bsd_stubs.c @@ -43,7 +43,7 @@ kmem_mb_alloc(vm_map_t mbmap, int size) if (kernel_memory_allocate(mbmap, &addr, size, 0, KMA_NOPAGEWAIT|KMA_KOBJECT) == KERN_SUCCESS) - return((void *)addr); + return(addr); else return(0); @@ -101,6 +101,9 @@ bdevsw_isfree(int index) sizeof(struct bdevsw)) == 0) break; } + } else { + /* NB: Not used below unless index is in range */ + devsw = &bdevsw[index]; } if ((index < 0) || (index >= nblkdev) || @@ -123,7 +126,7 @@ bdevsw_add(int index, struct bdevsw * bsw) struct bdevsw *devsw; if (index == -1) { - devsw = bdevsw; + devsw = &bdevsw[1]; /* Start at slot 1 - this is a hack to fix the index=1 hack */ /* yes, start at 1 to avoid collision with volfs (Radar 2842228) */ for(index=1; index < nblkdev; index++, devsw++) { if(memcmp((char *)devsw, diff --git a/bsd/kern/init_sysent.c b/bsd/kern/init_sysent.c index b0cc649eb..4f7d1f650 100644 --- a/bsd/kern/init_sysent.c +++ b/bsd/kern/init_sysent.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995-1999, 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1995-1999, 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -127,6 +127,9 @@ int setpriority(); int socket(); int connect(); int getpriority(); +#ifdef __ppc__ +int osigreturn(); +#endif int sigreturn(); int bind(); int setsockopt(); @@ -302,6 +305,9 @@ int sem_getvalue(); int sem_init(); int sem_destroy(); +int fmod_watch_enable(); +int fmod_watch(); + int issetugid(); int utrace(); int pread(); @@ -314,10 +320,49 @@ int sigwait(); int pthread_sigmask(); int __disable_threadsignal(); +int nfsclnt(); +int fhopen(); + +int aio_cancel(); +int aio_error(); +int aio_fsync(); +int aio_read(); +int aio_return(); +int aio_suspend(); +int aio_write(); +int lio_listio(); + +int kqueue(); +int kqueue_portset_np(); +int kqueue_from_portset_np(); +int kevent(); + +int audit(); +int auditon(); +int auditsvc(); +int getauid(); +int setauid(); +int getaudit(); +int setaudit(); +int getaudit_addr(); +int setaudit_addr(); +int auditctl(); + /* * System call switch table. */ +/* + * N.B. + * The argument count numbers in this table are actually + * the number of UInt32 words that comprise the arguments + * not the number of arguments + * + * This value is not currently used on PPC but Intel Darwin + * does use it and will not work correctly if the values + * are wrong + */ + struct sysent sysent[] = { syss(nosys,0), /* 0 = indir */ syss(exit,1), /* 1 = exit */ @@ -396,7 +441,7 @@ struct sysent sysent[] = { syss(sstk,1), /* 70 = sstk */ compat(smmap,6), /* 71 = old mmap */ syss(ovadvise,1), /* 72 = old vadvise */ - syss(munmap,2), /* 73 = munmap */ + sysnofnl(munmap,2), /* 73 = munmap */ syss(mprotect,3), /* 74 = mprotect */ syss(madvise,3), /* 75 = madvise */ syss(nosys,0), /* 76 was obsolete vhangup */ @@ -407,7 +452,7 @@ struct sysent sysent[] = { sysp(getpgrp,0), /* 81 = getpgrp */ sysp(setpgid,2), /* 82 = setpgid */ syss(setitimer,3), /* 83 = setitimer */ - compat(wait,0), /* 84 = old wait */ + compat(wait,1), /* 84 = old wait */ syss(swapon,1), /* 85 = swapon */ syss(getitimer,2), /* 86 = getitimer */ compat(gethostname,2), /* 87 = old gethostname */ @@ -426,7 +471,11 @@ struct sysent sysent[] = { sysp(getpriority,2), /* 100 = getpriority */ comaptnet(send,4), /* 101 = old send */ comaptnet(recv,4), /* 102 = old recv */ - syss(sigreturn,1), /* 103 = sigreturn */ +#ifdef __ppc__ + syss(osigreturn,1), /* 103 = sigreturn ; compat for jaguar*/ +#else + syss(sigreturn,1), /* 103 = sigreturn */ +#endif sysnets(bind,3), /* 104 = bind */ sysnets(setsockopt,5), /* 105 = setsockopt */ sysnets(listen,2), /* 106 = listen */ @@ -439,6 +488,18 @@ struct sysent sysent[] = { comaptnet(recvmsg,3), /* 113 = recvmsg */ comaptnet(sendmsg,3), /* 114 = sendmsg */ syss(nosys,0), /* 115 = old vtrace */ + +/* + * N.B. + * The argument count numbers in this table are actually + * the number of UInt32 words that comprise the arguments + * not the number of arguments + * + * This value is not currently used on PPC but Intel Darwin + * does use it and will not work correctly if the values + * are wrong + */ + #ifdef __ppc__ sysnofnl(ppc_gettimeofday,2), /* 116 = gettimeofday */ #else @@ -481,11 +542,11 @@ struct sysent sysent[] = { syss(getpgid,1), /* 151 = getpgid */ sysp(setprivexec,1),/* 152 = setprivexec */ #ifdef DOUBLE_ALIGN_PARAMS + syss(pread,6), /* 153 = pread */ + syss(pwrite,6), /* 154 = pwrite */ +#else syss(pread,5), /* 153 = pread */ syss(pwrite,5), /* 154 = pwrite */ -#else - syss(pread,4), /* 153 = pread */ - syss(pwrite,4), /* 154 = pwrite */ #endif syss(nfssvc,2), /* 155 = nfs_svc */ compat(getdirentries,4), /* 156 = old getdirentries */ @@ -499,7 +560,7 @@ struct sysent sysent[] = { syss(nosys,0), /* 164 */ #if QUOTA syss(quotactl, 4), /* 165 = quotactl */ -#else QUOTA +#else /* QUOTA */ syss(nosys, 0), /* 165 = not configured */ #endif /* QUOTA */ syss(nosys,0), /* 166 was exportfs */ @@ -516,11 +577,15 @@ struct sysent sysent[] = { syss(nosys,0), /* 177 */ syss(nosys,0), /* 178 */ syss(nosys,0), /* 179 */ - syss(kdebug_trace,6), /* 180 */ + sysnofnl(kdebug_trace,6), /* 180 */ syss(setgid,1), /* 181 */ syss(setegid,1), /* 182 */ syss(seteuid,1), /* 183 */ +#ifdef __ppc__ + syss(sigreturn, 2), /* 184 = nosys */ +#else syss(nosys,0), /* 184 = nosys */ +#endif syss(nosys,0), /* 185 = nosys */ syss(nosys,0), /* 186 = nosys */ syss(nosys,0), /* 187 = nosys */ @@ -529,6 +594,18 @@ struct sysent sysent[] = { syss(lstat,2), /* 190 = lstat */ syss(pathconf,2), /* 191 = pathconf */ syss(fpathconf,2), /* 192 = fpathconf */ + +/* + * N.B. + * The argument count numbers in this table are actually + * the number of UInt32 words that comprise the arguments + * not the number of arguments + * + * This value is not currently used on PPC but Intel Darwin + * does use it and will not work correctly if the values + * are wrong + */ + #if COMPAT_GETFSSTAT syss(getfsstat,3), /* 193 = getfsstat */ #else @@ -568,8 +645,8 @@ struct sysent sysent[] = { sysnets(ATPgetreq,3), /* 211 = ATPgetreq*/ sysnets(ATPgetrsp,2), /* 212 = ATPgetrsp*/ syss(nosys,0), /* 213 = Reserved for AppleTalk */ - syss(nosys,0), /* 214 = Reserved for AppleTalk */ - syss(nosys,0), /* 215 = Reserved for AppleTalk */ + syss(kqueue_from_portset_np,1), /* 214 = kqueue_from_portset_np */ + syss(kqueue_portset_np,1), /* 215 = kqueue_portset_np */ #else syss(nosys,0), /* 206 = Reserved for AppleTalk */ syss(nosys,0), /* 207 = Reserved for AppleTalk */ @@ -592,6 +669,18 @@ struct sysent sysent[] = { * We expect all filesystems to recognize the call and report that it is * not supported or to actually implement it. */ + +/* + * N.B. + * The argument count numbers in this table are actually + * the number of UInt32 words that comprise the arguments + * not the number of arguments + * + * This value is not currently used on PPC but Intel Darwin + * does use it and will not work correctly if the values + * are wrong + */ + syss(nosys,3), /* 216 = HFS make complex file call (multipel forks */ syss(nosys,2), /* 217 = HFS statv extended stat call for HFS */ syss(nosys,2), /* 218 = HFS lstatv extended lstat call for HFS */ @@ -607,7 +696,7 @@ struct sysent sysent[] = { #endif /* __APPLE_API_OBSOLETE */ syss(searchfs,6), /* 225 = HFS searchfs to implement catalog searching */ syss(delete,1), /* 226 = private delete (Carbon semantics) */ - syss(copyfile,4), /* 227 = copyfile - orignally for AFP */ + syss(copyfile,6), /* 227 = copyfile - orignally for AFP */ syss(nosys,0), /* 228 */ syss(nosys,0), /* 229 */ syss(nosys,0), /* 230 */ @@ -627,8 +716,8 @@ struct sysent sysent[] = { syss(nosys,0), /* 244 */ syss(nosys,0), /* 245 */ syss(nosys,0), /* 246 */ - syss(nosys,0), /* 247 */ - syss(nosys,0), /* 248 */ + syss(nfsclnt,2), /* 247 = nfsclnt*/ + syss(fhopen,2), /* 248 = fhopen */ syss(nosys,0), /* 249 */ syss(minherit,3), /* 250 = minherit */ syss(semsys,5), /* 251 = semsys */ @@ -669,8 +758,8 @@ struct sysent sysent[] = { syss(nosys,0), /* 286 */ syss(nosys,0), /* 287 */ syss(nosys,0), /* 288 */ - syss(nosys,0), /* 289 */ - syss(nosys,0), /* 290 */ + syss(fmod_watch_enable, 1), /* 289 = fmod_watching */ + syss(fmod_watch, 4), /* 290 = fmod_watch */ syss(nosys,0), /* 291 */ syss(nosys,0), /* 292 */ syss(nosys,0), /* 293 */ @@ -693,14 +782,14 @@ struct sysent sysent[] = { syss(getsid,1), /* 310 = getsid */ syss(nosys,0), /* 311 */ syss(nosys,0), /* 312 */ - syss(nosys,0), /* 313 */ - syss(nosys,0), /* 314 */ - syss(nosys,0), /* 315 */ - syss(nosys,0), /* 316 */ - syss(nosys,0), /* 317 */ - syss(nosys,0), /* 318 */ - syss(nosys,0), /* 319 */ - syss(nosys,0), /* 320 */ + sysnofnl(aio_fsync,1), /* 313 = aio_fsync */ + sysnofnl(aio_return,1), /* 314 = aio_return */ + sysnofnl(aio_suspend,3), /* 315 = aio_suspend */ + sysnofnl(aio_cancel,2), /* 316 = aio_cancel */ + sysnofnl(aio_error,1), /* 317 = aio_error */ + sysnofnl(aio_read,1), /* 318 = aio_read */ + sysnofnl(aio_write,1), /* 319 = aio_write */ + sysnofnl(lio_listio,4), /* 320 = lio_listio */ syss(nosys,0), /* 321 */ syss(nosys,0), /* 322 */ syss(nosys,0), /* 323 */ @@ -729,6 +818,38 @@ struct sysent sysent[] = { syss(nosys,0), /* 346 */ syss(nosys,0), /* 347 */ syss(nosys,0), /* 348 */ - syss(nosys,0) /* 349 */ + syss(nosys,0), /* 349 */ + syss(audit,2), /* 350 */ + syss(auditon,3), /* 351 */ + syss(auditsvc,2), /* 352 */ + syss(getauid,1), /* 353 */ + syss(setauid,1), /* 354 */ + syss(getaudit,1), /* 355 */ + syss(setaudit,1), /* 356 */ + syss(getaudit_addr,2), /* 357 */ + syss(setaudit_addr,2), /* 358 */ + syss(auditctl,1), /* 359 */ + syss(nosys,0), /* 360 */ + syss(nosys,0), /* 361 */ + syss(kqueue,0), /* 362 = kqueue */ + syss(kevent,6), /* 363 = kevent */ + syss(nosys,0), /* 364 */ + syss(nosys,0), /* 365 */ + syss(nosys,0), /* 366 */ + syss(nosys,0), /* 367 */ + syss(nosys,0), /* 368 */ + syss(nosys,0) /* 369 */ + +/* + * N.B. + * The argument count numbers in this table are actually + * the number of UInt32 words that comprise the arguments + * not the number of arguments + * + * This value is not currently used on PPC but Intel Darwin + * does use it and will not work correctly if the values + * are wrong + */ + }; int nsysent = sizeof(sysent) / sizeof(sysent[0]); diff --git a/bsd/kern/kdebug.c b/bsd/kern/kdebug.c index cee34d326..f9e4fdd5e 100644 --- a/bsd/kern/kdebug.c +++ b/bsd/kern/kdebug.c @@ -28,6 +28,7 @@ #define HZ 100 #include #include +#include #include #include @@ -46,7 +47,7 @@ unsigned int kdebug_enable = 0; /* track timestamps for security server's entropy needs */ -mach_timespec_t * kd_entropy_buffer = 0; +uint64_t * kd_entropy_buffer = 0; unsigned int kd_entropy_bufsize = 0; unsigned int kd_entropy_count = 0; unsigned int kd_entropy_indx = 0; @@ -97,7 +98,8 @@ struct kdebug_args { /* task to string structure */ struct tts { - task_t *task; + task_t *task; /* from procs task */ + pid_t pid; /* from procs p_pid */ char task_comm[20]; /* from procs p_comm */ }; @@ -159,7 +161,7 @@ unsigned int debugid, arg1, arg2, arg3, arg4, arg5; { if (kd_entropy_indx < kd_entropy_count) { - ml_get_timebase((unsigned long long *) &kd_entropy_buffer [ kd_entropy_indx]); + kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time(); kd_entropy_indx++; } @@ -231,23 +233,17 @@ unsigned int debugid, arg1, arg2, arg3, arg4, arg5; kd->arg2 = arg2; kd->arg3 = arg3; kd->arg4 = arg4; - kd->arg5 = (int)current_thread(); + kd->arg5 = (int)current_act(); if (cpu_number()) kd->arg5 |= KDBG_CPU_MASK; - ml_get_timebase((unsigned long long *)&kd->timestamp); + now = kd->timestamp = mach_absolute_time(); /* Watch for out of order timestamps */ - now = (((unsigned long long)kd->timestamp.tv_sec) << 32) | - (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec)); if (now < kd_prev_timebase) { - /* timestamps are out of order -- adjust */ - kd_prev_timebase++; - tsp = (mach_timespec_t *)&kd_prev_timebase; - kd->timestamp.tv_sec = tsp->tv_sec; - kd->timestamp.tv_nsec = tsp->tv_nsec; + kd->timestamp = ++kd_prev_timebase; } else { @@ -353,19 +349,14 @@ unsigned int debugid, arg1, arg2, arg3, arg4, arg5; kd->arg3 = arg3; kd->arg4 = arg4; kd->arg5 = arg5; - ml_get_timebase((unsigned long long *)&kd->timestamp); + now = kd->timestamp = mach_absolute_time(); /* Watch for out of order timestamps */ - now = (((unsigned long long)kd->timestamp.tv_sec) << 32) | - (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec)); if (now < kd_prev_timebase) { /* timestamps are out of order -- adjust */ - kd_prev_timebase++; - tsp = (mach_timespec_t *)&kd_prev_timebase; - kd->timestamp.tv_sec = tsp->tv_sec; - kd->timestamp.tv_nsec = tsp->tv_nsec; + kd->timestamp = ++kd_prev_timebase; } else { @@ -421,11 +412,11 @@ kdbg_reinit() kdebug_nolog = 1; if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) - kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize); + kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize); if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) { - kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); + kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize); kdebug_flags &= ~KDBG_MAPINIT; kd_mapsize = 0; kd_mapptr = (kd_threadmap *) 0; @@ -437,6 +428,17 @@ kdbg_reinit() return(ret); } +void kdbg_trace_data(struct proc *proc, long *arg_pid) +{ + if (!proc) + *arg_pid = 0; + else + *arg_pid = proc->p_pid; + + return; +} + + void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4) { int i; @@ -484,11 +486,20 @@ kdbg_resolve_map(thread_act_t th_act, krt_t *t) if(t->count < t->maxcount) { mapptr=&t->map[t->count]; - mapptr->thread = (unsigned int)getshuttle_thread(th_act); - mapptr->valid = 1; + mapptr->thread = (unsigned int)th_act; (void) strncpy (mapptr->command, t->atts->task_comm, sizeof(t->atts->task_comm)-1); mapptr->command[sizeof(t->atts->task_comm)-1] = '\0'; + + /* + Some kernel threads have no associated pid. + We still need to mark the entry as valid. + */ + if (t->atts->pid) + mapptr->valid = t->atts->pid; + else + mapptr->valid = 1; + t->count++; } } @@ -527,14 +538,20 @@ void kdbg_mapinit() kd_mapsize = kd_mapcount * sizeof(kd_threadmap); if((kmem_alloc(kernel_map, & kd_maptomem, (vm_size_t)kd_mapsize) == KERN_SUCCESS)) + { kd_mapptr = (kd_threadmap *) kd_maptomem; + bzero(kd_mapptr, kd_mapsize); + } else kd_mapptr = (kd_threadmap *) 0; tts_mapsize = tts_count * sizeof(struct tts); if((kmem_alloc(kernel_map, & tts_maptomem, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) + { tts_mapptr = (struct tts *) tts_maptomem; + bzero(tts_mapptr, tts_mapsize); + } else tts_mapptr = (struct tts *) 0; @@ -553,6 +570,7 @@ void kdbg_mapinit() if (task_reference_try(p->task)) { tts_mapptr[i].task = p->task; + tts_mapptr[i].pid = p->p_pid; (void)strncpy(&tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1); i++; } @@ -573,9 +591,9 @@ void kdbg_mapinit() { akrt.atts = &tts_mapptr[i]; task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt); - task_deallocate(tts_mapptr[i].task); + task_deallocate((task_t) tts_mapptr[i].task); } - kmem_free(kernel_map, (char *)tts_mapptr, tts_mapsize); + kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize); } } @@ -591,14 +609,14 @@ int x; kdebug_flags &= (unsigned int)~KDBG_CKTYPES; kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK); kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE); - kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize); + kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize); kd_buffer = (kd_buf *)0; kd_bufsize = 0; kd_prev_timebase = 0LL; /* Clean up the thread map buffer */ kdebug_flags &= ~KDBG_MAPINIT; - kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); + kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize); kd_mapptr = (kd_threadmap *) 0; kd_mapsize = 0; kd_mapcount = 0; @@ -819,7 +837,7 @@ kdbg_readmap(kd_threadmap *buffer, size_t *number) if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) { - kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); + kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize); kdebug_flags &= ~KDBG_MAPINIT; kd_mapsize = 0; kd_mapptr = (kd_threadmap *) 0; @@ -848,11 +866,11 @@ kdbg_getentropy (mach_timespec_t * buffer, size_t *number, int ms_timeout) if (kmem_alloc(kernel_map, &kd_entropy_buftomem, (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) { - kd_entropy_buffer = (mach_timespec_t *)kd_entropy_buftomem; + kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem; } else { - kd_entropy_buffer = (mach_timespec_t *) 0; + kd_entropy_buffer = (uint64_t *) 0; kd_entropy_count = 0; kd_entropy_indx = 0; return (EINVAL); @@ -885,8 +903,8 @@ kdbg_getentropy (mach_timespec_t * buffer, size_t *number, int ms_timeout) kd_entropy_count = 0; kd_entropy_indx = 0; kd_entropy_buftomem = 0; - kmem_free(kernel_map, (char *)kd_entropy_buffer, kd_entropy_bufsize); - kd_entropy_buffer = (mach_timespec_t *) 0; + kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize); + kd_entropy_buffer = (uint64_t *) 0; return(ret); } @@ -1025,9 +1043,9 @@ struct proc *p, *curproc; kdbg_mapinit(); break; case KERN_KDSETBUF: - /* We allow a maximum buffer size of 25% of memory */ + /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */ /* 'value' is the desired number of trace entries */ - max_entries = (mem_size/4) / sizeof(kd_buf); + max_entries = (sane_size/4) / sizeof(kd_buf); if (value <= max_entries) nkdbufs = value; else @@ -1204,3 +1222,10 @@ kd_buf * my_kd_bufptr; } /* end if count */ return (EINVAL); } + +unsigned char *getProcName(struct proc *proc); +unsigned char *getProcName(struct proc *proc) { + + return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */ + +} diff --git a/bsd/kern/kern_aio.c b/bsd/kern/kern_aio.c new file mode 100644 index 000000000..06942bcb4 --- /dev/null +++ b/bsd/kern/kern_aio.c @@ -0,0 +1,2180 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +/* + * todo: + * 1) ramesh is looking into how to replace taking a reference on + * the user's map (vm_map_reference()) since it is believed that + * would not hold the process for us. + * 2) david is looking into a way for us to set the priority of the + * worker threads to match that of the user's thread when the + * async IO was queued. + */ + + +/* + * This file contains support for the POSIX 1003.1B AIO/LIO facility. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#define AIO_work_queued 1 +#define AIO_worker_wake 2 +#define AIO_completion_sig 3 +#define AIO_completion_cleanup_wait 4 +#define AIO_completion_cleanup_wake 5 +#define AIO_completion_suspend_wake 6 +#define AIO_fsync_delay 7 +#define AIO_cancel 10 +#define AIO_cancel_async_workq 11 +#define AIO_cancel_sync_workq 12 +#define AIO_cancel_activeq 13 +#define AIO_cancel_doneq 14 +#define AIO_fsync 20 +#define AIO_read 30 +#define AIO_write 40 +#define AIO_listio 50 +#define AIO_error 60 +#define AIO_error_val 61 +#define AIO_error_activeq 62 +#define AIO_error_workq 63 +#define AIO_return 70 +#define AIO_return_val 71 +#define AIO_return_activeq 72 +#define AIO_return_workq 73 +#define AIO_exec 80 +#define AIO_exit 90 +#define AIO_exit_sleep 91 +#define AIO_close 100 +#define AIO_close_sleep 101 +#define AIO_suspend 110 +#define AIO_suspend_sleep 111 +#define AIO_worker_thread 120 + +#if 0 +#undef KERNEL_DEBUG +#define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT +#endif + +/* + * aio requests queue up on the aio_async_workq or lio_sync_workq (for + * lio_listio LIO_WAIT). Requests then move to the per process aio_activeq + * (proc.aio_activeq) when one of our worker threads start the IO. + * And finally, requests move to the per process aio_doneq (proc.aio_doneq) + * when the IO request completes. The request remains on aio_doneq until + * user process calls aio_return or the process exits, either way that is our + * trigger to release aio resources. + */ +struct aio_anchor_cb +{ + int aio_async_workq_count; /* entries on aio_async_workq */ + int lio_sync_workq_count; /* entries on lio_sync_workq */ + int aio_active_count; /* entries on all active queues (proc.aio_activeq) */ + int aio_done_count; /* entries on all done queues (proc.aio_doneq) */ + TAILQ_HEAD( , aio_workq_entry ) aio_async_workq; + TAILQ_HEAD( , aio_workq_entry ) lio_sync_workq; +}; +typedef struct aio_anchor_cb aio_anchor_cb; + + +/* + * Notes on aio sleep / wake channels. + * We currently pick a couple fields within the proc structure that will allow + * us sleep channels that currently do not collide with any other kernel routines. + * At this time, for binary compatibility reasons, we cannot create new proc fields. + */ +#define AIO_SUSPEND_SLEEP_CHAN p_estcpu +#define AIO_CLEANUP_SLEEP_CHAN p_pctcpu + + +/* + * aysnc IO locking macros used to protect critical sections. + */ +#define AIO_LOCK usimple_lock( &aio_lock ) +#define AIO_UNLOCK usimple_unlock( &aio_lock ) + + +/* + * LOCAL PROTOTYPES + */ +static int aio_active_requests_for_process( struct proc *procp ); +static boolean_t aio_delay_fsync_request( aio_workq_entry *entryp ); +static int aio_free_request( aio_workq_entry *entryp, vm_map_t the_map ); +static int aio_get_all_queues_count( void ); +static int aio_get_process_count( struct proc *procp ); +static aio_workq_entry * aio_get_some_work( void ); +static boolean_t aio_last_group_io( aio_workq_entry *entryp ); +static void aio_mark_requests( aio_workq_entry *entryp ); +static int aio_queue_async_request( struct proc *procp, + struct aiocb *aiocbp, + int kindOfIO ); +static int aio_validate( aio_workq_entry *entryp ); +static void aio_work_thread( void ); +static int do_aio_cancel( struct proc *p, + int fd, + struct aiocb *aiocbp, + boolean_t wait_for_completion, + boolean_t disable_notification ); +static void do_aio_completion( aio_workq_entry *entryp ); +static int do_aio_fsync( aio_workq_entry *entryp ); +static int do_aio_read( aio_workq_entry *entryp ); +static int do_aio_write( aio_workq_entry *entryp ); +static boolean_t is_already_queued( struct proc *procp, + struct aiocb *aiocbp ); +static int lio_create_async_entry( struct proc *procp, + struct aiocb *aiocbp, + struct sigevent *sigp, + long group_tag, + aio_workq_entry **entrypp ); +static int lio_create_sync_entry( struct proc *procp, + struct aiocb *aiocbp, + long group_tag, + aio_workq_entry **entrypp ); + +/* + * EXTERNAL PROTOTYPES + */ + +/* in ...bsd/kern/sys_generic.c */ +extern struct file* holdfp( struct filedesc* fdp, int fd, int flag ); +extern int dofileread( struct proc *p, struct file *fp, int fd, + void *buf, size_t nbyte, off_t offset, + int flags, int *retval ); +extern int dofilewrite( struct proc *p, struct file *fp, int fd, + const void *buf, size_t nbyte, off_t offset, + int flags, int *retval ); +extern vm_map_t vm_map_switch( vm_map_t map ); + + +/* + * aio external global variables. + */ +extern int aio_max_requests; /* AIO_MAX - configurable */ +extern int aio_max_requests_per_process; /* AIO_PROCESS_MAX - configurable */ +extern int aio_worker_threads; /* AIO_THREAD_COUNT - configurable */ + + +/* + * aio static variables. + */ +static aio_anchor_cb aio_anchor; +static simple_lock_data_t aio_lock; +static struct zone *aio_workq_zonep; + + +/* + * syscall input parameters + */ +#ifndef _SYS_SYSPROTO_H_ + +struct aio_cancel_args { + int fd; + struct aiocb *aiocbp; +}; + +struct aio_error_args { + struct aiocb *aiocbp; +}; + +struct aio_fsync_args { + int op; + struct aiocb *aiocbp; +}; + +struct aio_read_args { + struct aiocb *aiocbp; +}; + +struct aio_return_args { + struct aiocb *aiocbp; +}; + +struct aio_suspend_args { + struct aiocb *const *aiocblist; + int nent; + const struct timespec *timeoutp; +}; + +struct aio_write_args { + struct aiocb *aiocbp; +}; + +struct lio_listio_args { + int mode; + struct aiocb *const *aiocblist; + int nent; + struct sigevent *sigp; +}; + +#endif /* _SYS_SYSPROTO_H_ */ + + +/* + * aio_cancel - attempt to cancel one or more async IO requests currently + * outstanding against file descriptor uap->fd. If uap->aiocbp is not + * NULL then only one specific IO is cancelled (if possible). If uap->aiocbp + * is NULL then all outstanding async IO request for the given file + * descriptor are cancelled (if possible). + */ + +int +aio_cancel( struct proc *p, struct aio_cancel_args *uap, int *retval ) +{ + struct aiocb my_aiocb; + int result; + boolean_t funnel_state; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); + + /* quick check to see if there are any async IO requests queued up */ + AIO_LOCK; + result = aio_get_all_queues_count( ); + AIO_UNLOCK; + if ( result < 1 ) { + result = EBADF; + goto ExitRoutine; + } + + *retval = -1; + if ( uap->aiocbp != NULL ) { + result = copyin( uap->aiocbp, &my_aiocb, sizeof(my_aiocb) ); + if ( result != 0 ) { + result = EAGAIN; + goto ExitRoutine; + } + + /* NOTE - POSIX standard says a mismatch between the file */ + /* descriptor passed in and the file descriptor embedded in */ + /* the aiocb causes unspecified results. We return EBADF in */ + /* that situation. */ + if ( uap->fd != my_aiocb.aio_fildes ) { + result = EBADF; + goto ExitRoutine; + } + } + + /* current BSD code assumes funnel lock is held */ + funnel_state = thread_funnel_set( kernel_flock, TRUE ); + result = do_aio_cancel( p, uap->fd, uap->aiocbp, FALSE, FALSE ); + (void) thread_funnel_set( kernel_flock, funnel_state ); + + if ( result != -1 ) { + *retval = result; + result = 0; + goto ExitRoutine; + } + + result = EBADF; + +ExitRoutine: + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, result, 0, 0 ); + + return( result ); + +} /* aio_cancel */ + + +/* + * _aio_close - internal function used to clean up async IO requests for + * a file descriptor that is closing. + * NOTE - kernel funnel lock is held when we get called. + * THIS MAY BLOCK. + */ + +__private_extern__ void +_aio_close( struct proc *p, int fd ) +{ + int error, count; + + /* quick check to see if there are any async IO requests queued up */ + AIO_LOCK; + count = aio_get_all_queues_count( ); + AIO_UNLOCK; + if ( count < 1 ) + return; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_close)) | DBG_FUNC_START, + (int)p, fd, 0, 0, 0 ); + + /* cancel all async IO requests on our todo queues for this file descriptor */ + error = do_aio_cancel( p, fd, NULL, TRUE, FALSE ); + if ( error == AIO_NOTCANCELED ) { + /* + * AIO_NOTCANCELED is returned when we find an aio request for this process + * and file descriptor on the active async IO queue. Active requests cannot + * be cancelled so we must wait for them to complete. We will get a special + * wake up call on our channel used to sleep for ALL active requests to + * complete. This sleep channel (proc.AIO_CLEANUP_SLEEP_CHAN) is only used + * when we must wait for all active aio requests. + */ + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_close_sleep)) | DBG_FUNC_NONE, + (int)p, fd, 0, 0, 0 ); + + tsleep( &p->AIO_CLEANUP_SLEEP_CHAN, PRIBIO, "aio_close", 0 ); + } + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_close)) | DBG_FUNC_END, + (int)p, fd, 0, 0, 0 ); + + return; + +} /* _aio_close */ + + +/* + * aio_error - return the error status associated with the async IO + * request referred to by uap->aiocbp. The error status is the errno + * value that would be set by the corresponding IO request (read, wrtie, + * fdatasync, or sync). + */ + +int +aio_error( struct proc *p, struct aio_error_args *uap, int *retval ) +{ + aio_workq_entry *entryp; + int error; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); + + AIO_LOCK; + + /* quick check to see if there are any async IO requests queued up */ + if ( aio_get_all_queues_count( ) < 1 ) { + error = EINVAL; + goto ExitRoutine; + } + + /* look for a match on our queue of async IO requests that have completed */ + TAILQ_FOREACH( entryp, &p->aio_doneq, aio_workq_link ) { + if ( entryp->uaiocbp == uap->aiocbp ) { + *retval = entryp->errorval; + error = 0; + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error_val)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + goto ExitRoutine; + } + } + + /* look for a match on our queue of active async IO requests */ + TAILQ_FOREACH( entryp, &p->aio_activeq, aio_workq_link ) { + if ( entryp->uaiocbp == uap->aiocbp ) { + *retval = EINPROGRESS; + error = 0; + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error_activeq)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + goto ExitRoutine; + } + } + + /* look for a match on our queue of todo work */ + TAILQ_FOREACH( entryp, &aio_anchor.aio_async_workq, aio_workq_link ) { + if ( p == entryp->procp && entryp->uaiocbp == uap->aiocbp ) { + *retval = EINPROGRESS; + error = 0; + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error_workq)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + goto ExitRoutine; + } + } + error = EINVAL; + +ExitRoutine: + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + AIO_UNLOCK; + + return( error ); + +} /* aio_error */ + + +/* + * aio_fsync - asynchronously force all IO operations associated + * with the file indicated by the file descriptor (uap->aiocbp->aio_fildes) and + * queued at the time of the call to the synchronized completion state. + * NOTE - we do not support op O_DSYNC at this point since we do not support the + * fdatasync() call. + */ + +int +aio_fsync( struct proc *p, struct aio_fsync_args *uap, int *retval ) +{ + int error; + int fsync_kind; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, uap->op, 0, 0 ); + + *retval = 0; + if ( uap->op == O_SYNC ) + fsync_kind = AIO_FSYNC; +#if 0 // we don't support fdatasync() call yet + else if ( uap->op == O_DSYNC ) + fsync_kind = AIO_DSYNC; +#endif + else { + *retval = -1; + error = EINVAL; + goto ExitRoutine; + } + + error = aio_queue_async_request( p, uap->aiocbp, fsync_kind ); + if ( error != 0 ) + *retval = -1; + +ExitRoutine: + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + + return( error ); + +} /* aio_fsync */ + + +/* aio_read - asynchronously read uap->aiocbp->aio_nbytes bytes from the + * file descriptor (uap->aiocbp->aio_fildes) into the buffer + * (uap->aiocbp->aio_buf). + */ + +int +aio_read( struct proc *p, struct aio_read_args *uap, int *retval ) +{ + int error; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_read)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); + + *retval = 0; + + error = aio_queue_async_request( p, uap->aiocbp, AIO_READ ); + if ( error != 0 ) + *retval = -1; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_read)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + + return( error ); + +} /* aio_read */ + + +/* + * aio_return - return the return status associated with the async IO + * request referred to by uap->aiocbp. The return status is the value + * that would be returned by corresponding IO request (read, wrtie, + * fdatasync, or sync). This is where we release kernel resources + * held for async IO call associated with the given aiocb pointer. + */ + +int +aio_return( struct proc *p, struct aio_return_args *uap, register_t *retval ) +{ + aio_workq_entry *entryp; + int error; + boolean_t lock_held; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); + + AIO_LOCK; + lock_held = TRUE; + *retval = 0; + + /* quick check to see if there are any async IO requests queued up */ + if ( aio_get_all_queues_count( ) < 1 ) { + error = EINVAL; + goto ExitRoutine; + } + + /* look for a match on our queue of async IO requests that have completed */ + TAILQ_FOREACH( entryp, &p->aio_doneq, aio_workq_link ) { + if ( entryp->uaiocbp == uap->aiocbp ) { + TAILQ_REMOVE( &p->aio_doneq, entryp, aio_workq_link ); + aio_anchor.aio_done_count--; + p->aio_done_count--; + + *retval = entryp->returnval; + + /* we cannot free requests that are still completing */ + if ( (entryp->flags & AIO_COMPLETION) == 0 ) { + vm_map_t my_map; + + my_map = entryp->aio_map; + entryp->aio_map = VM_MAP_NULL; + AIO_UNLOCK; + lock_held = FALSE; + aio_free_request( entryp, my_map ); + } + else + /* tell completion code to free this request */ + entryp->flags |= AIO_DO_FREE; + error = 0; + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return_val)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + goto ExitRoutine; + } + } + + /* look for a match on our queue of active async IO requests */ + TAILQ_FOREACH( entryp, &p->aio_activeq, aio_workq_link ) { + if ( entryp->uaiocbp == uap->aiocbp ) { + error = EINPROGRESS; + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return_activeq)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + goto ExitRoutine; + } + } + + /* look for a match on our queue of todo work */ + TAILQ_FOREACH( entryp, &aio_anchor.aio_async_workq, aio_workq_link ) { + if ( p == entryp->procp && entryp->uaiocbp == uap->aiocbp ) { + error = EINPROGRESS; + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return_workq)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + goto ExitRoutine; + } + } + error = EINVAL; + +ExitRoutine: + if ( lock_held ) + AIO_UNLOCK; + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + + return( error ); + +} /* aio_return */ + + +/* + * _aio_exec - internal function used to clean up async IO requests for + * a process that is going away due to exec(). We cancel any async IOs + * we can and wait for those already active. We also disable signaling + * for cancelled or active aio requests that complete. + * NOTE - kernel funnel lock is held when we get called. + * This routine MAY block! + */ + +__private_extern__ void +_aio_exec( struct proc *p ) +{ + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exec)) | DBG_FUNC_START, + (int)p, 0, 0, 0, 0 ); + + _aio_exit( p ); + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exec)) | DBG_FUNC_END, + (int)p, 0, 0, 0, 0 ); + + return; + +} /* _aio_exec */ + + +/* + * _aio_exit - internal function used to clean up async IO requests for + * a process that is terminating (via exit() or exec() ). We cancel any async IOs + * we can and wait for those already active. We also disable signaling + * for cancelled or active aio requests that complete. This routine MAY block! + * NOTE - kernel funnel lock is held when we get called. + */ + +__private_extern__ void +_aio_exit( struct proc *p ) +{ + int error, count; + aio_workq_entry *entryp; + + /* quick check to see if there are any async IO requests queued up */ + AIO_LOCK; + count = aio_get_all_queues_count( ); + AIO_UNLOCK; + if ( count < 1 ) { + return; + } + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exit)) | DBG_FUNC_START, + (int)p, 0, 0, 0, 0 ); + + /* + * cancel async IO requests on the todo work queue and wait for those + * already active to complete. + */ + error = do_aio_cancel( p, 0, NULL, TRUE, TRUE ); + if ( error == AIO_NOTCANCELED ) { + /* + * AIO_NOTCANCELED is returned when we find an aio request for this process + * on the active async IO queue. Active requests cannot be cancelled so we + * must wait for them to complete. We will get a special wake up call on + * our channel used to sleep for ALL active requests to complete. This sleep + * channel (proc.AIO_CLEANUP_SLEEP_CHAN) is only used when we must wait for all + * active aio requests. + */ + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exit_sleep)) | DBG_FUNC_NONE, + (int)p, 0, 0, 0, 0 ); + + tsleep( &p->AIO_CLEANUP_SLEEP_CHAN, PRIBIO, "aio_exit", 0 ); + } + + /* release all aio resources used by this process */ + AIO_LOCK; + entryp = TAILQ_FIRST( &p->aio_doneq ); + while ( entryp != NULL ) { + aio_workq_entry *next_entryp; + + next_entryp = TAILQ_NEXT( entryp, aio_workq_link ); + TAILQ_REMOVE( &p->aio_doneq, entryp, aio_workq_link ); + aio_anchor.aio_done_count--; + p->aio_done_count--; + + /* we cannot free requests that are still completing */ + if ( (entryp->flags & AIO_COMPLETION) == 0 ) { + vm_map_t my_map; + + my_map = entryp->aio_map; + entryp->aio_map = VM_MAP_NULL; + AIO_UNLOCK; + aio_free_request( entryp, my_map ); + + /* need to start over since aio_doneq may have been */ + /* changed while we were away. */ + AIO_LOCK; + entryp = TAILQ_FIRST( &p->aio_doneq ); + continue; + } + else + /* tell completion code to free this request */ + entryp->flags |= AIO_DO_FREE; + entryp = next_entryp; + } + AIO_UNLOCK; + +ExitRoutine: + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exit)) | DBG_FUNC_END, + (int)p, 0, 0, 0, 0 ); + + return; + +} /* _aio_exit */ + + +/* + * do_aio_cancel - cancel async IO requests (if possible). We get called by + * aio_cancel, close, and at exit. + * There are three modes of operation: 1) cancel all async IOs for a process - + * fd is 0 and aiocbp is NULL 2) cancel all async IOs for file descriptor - fd + * is > 0 and aiocbp is NULL 3) cancel one async IO associated with the given + * aiocbp. + * Returns -1 if no matches were found, AIO_CANCELED when we cancelled all + * target async IO requests, AIO_NOTCANCELED if we could not cancel all + * target async IO requests, and AIO_ALLDONE if all target async IO requests + * were already complete. + * WARNING - do not deference aiocbp in this routine, it may point to user + * land data that has not been copied in (when called from aio_cancel() ) + * NOTE - kernel funnel lock is held when we get called. + */ + +static int +do_aio_cancel( struct proc *p, int fd, struct aiocb *aiocbp, + boolean_t wait_for_completion, boolean_t disable_notification ) +{ + aio_workq_entry *entryp; + int result; + + result = -1; + + /* look for a match on our queue of async todo work. */ + AIO_LOCK; + entryp = TAILQ_FIRST( &aio_anchor.aio_async_workq ); + while ( entryp != NULL ) { + aio_workq_entry *next_entryp; + + next_entryp = TAILQ_NEXT( entryp, aio_workq_link ); + if ( p == entryp->procp ) { + if ( (aiocbp == NULL && fd == 0) || + (aiocbp != NULL && entryp->uaiocbp == aiocbp) || + (aiocbp == NULL && fd == entryp->aiocb.aio_fildes) ) { + /* we found a match so we remove the entry from the */ + /* todo work queue and place it on the done queue */ + TAILQ_REMOVE( &aio_anchor.aio_async_workq, entryp, aio_workq_link ); + aio_anchor.aio_async_workq_count--; + entryp->errorval = ECANCELED; + entryp->returnval = -1; + if ( disable_notification ) + entryp->flags |= AIO_DISABLE; /* flag for special completion processing */ + result = AIO_CANCELED; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_async_workq)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); + + TAILQ_INSERT_TAIL( &p->aio_doneq, entryp, aio_workq_link ); + aio_anchor.aio_done_count++; + p->aio_done_count++; + entryp->flags |= AIO_COMPLETION; + AIO_UNLOCK; + + /* do completion processing for this request */ + do_aio_completion( entryp ); + + AIO_LOCK; + entryp->flags &= ~AIO_COMPLETION; + if ( (entryp->flags & AIO_DO_FREE) != 0 ) { + vm_map_t my_map; + + my_map = entryp->aio_map; + entryp->aio_map = VM_MAP_NULL; + AIO_UNLOCK; + aio_free_request( entryp, my_map ); + } + else + AIO_UNLOCK; + + if ( aiocbp != NULL ) { + return( result ); + } + + /* need to start over since aio_async_workq may have been */ + /* changed while we were away doing completion processing. */ + AIO_LOCK; + entryp = TAILQ_FIRST( &aio_anchor.aio_async_workq ); + continue; + } + } + entryp = next_entryp; + } /* while... */ + + /* + * look for a match on our queue of synchronous todo work. This will + * be a rare occurrence but could happen if a process is terminated while + * processing a lio_listio call. + */ + entryp = TAILQ_FIRST( &aio_anchor.lio_sync_workq ); + while ( entryp != NULL ) { + aio_workq_entry *next_entryp; + + next_entryp = TAILQ_NEXT( entryp, aio_workq_link ); + if ( p == entryp->procp ) { + if ( (aiocbp == NULL && fd == 0) || + (aiocbp != NULL && entryp->uaiocbp == aiocbp) || + (aiocbp == NULL && fd == entryp->aiocb.aio_fildes) ) { + /* we found a match so we remove the entry from the */ + /* todo work queue and place it on the done queue */ + TAILQ_REMOVE( &aio_anchor.lio_sync_workq, entryp, aio_workq_link ); + aio_anchor.lio_sync_workq_count--; + entryp->errorval = ECANCELED; + entryp->returnval = -1; + if ( disable_notification ) + entryp->flags |= AIO_DISABLE; /* flag for special completion processing */ + result = AIO_CANCELED; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_sync_workq)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); + + TAILQ_INSERT_TAIL( &p->aio_doneq, entryp, aio_workq_link ); + aio_anchor.aio_done_count++; + p->aio_done_count++; + if ( aiocbp != NULL ) { + AIO_UNLOCK; + return( result ); + } + } + } + entryp = next_entryp; + } /* while... */ + + /* + * look for a match on our queue of active async IO requests and + * return AIO_NOTCANCELED result. + */ + TAILQ_FOREACH( entryp, &p->aio_activeq, aio_workq_link ) { + if ( (aiocbp == NULL && fd == 0) || + (aiocbp != NULL && entryp->uaiocbp == aiocbp) || + (aiocbp == NULL && fd == entryp->aiocb.aio_fildes) ) { + result = AIO_NOTCANCELED; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_activeq)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); + + if ( wait_for_completion ) + entryp->flags |= AIO_WAITING; /* flag for special completion processing */ + if ( disable_notification ) + entryp->flags |= AIO_DISABLE; /* flag for special completion processing */ + if ( aiocbp != NULL ) { + AIO_UNLOCK; + return( result ); + } + } + } + + /* + * if we didn't find any matches on the todo or active queues then look for a + * match on our queue of async IO requests that have completed and if found + * return AIO_ALLDONE result. + */ + if ( result == -1 ) { + TAILQ_FOREACH( entryp, &p->aio_doneq, aio_workq_link ) { + if ( (aiocbp == NULL && fd == 0) || + (aiocbp != NULL && entryp->uaiocbp == aiocbp) || + (aiocbp == NULL && fd == entryp->aiocb.aio_fildes) ) { + result = AIO_ALLDONE; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_doneq)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); + + if ( aiocbp != NULL ) { + AIO_UNLOCK; + return( result ); + } + } + } + } + AIO_UNLOCK; + + return( result ); + +} /* do_aio_cancel */ + + +/* + * aio_suspend - suspend the calling thread until at least one of the async + * IO operations referenced by uap->aiocblist has completed, until a signal + * interrupts the function, or uap->timeoutp time interval (optional) has + * passed. + * Returns 0 if one or more async IOs have completed else -1 and errno is + * set appropriately - EAGAIN if timeout elapses or EINTR if an interrupt + * woke us up. + */ + +int +aio_suspend( struct proc *p, struct aio_suspend_args *uap, int *retval ) +{ + int error; + int i, count; + uint64_t abstime; + struct timespec ts; + struct timeval tv; + aio_workq_entry *entryp; + struct aiocb * *aiocbpp; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend)) | DBG_FUNC_START, + (int)p, uap->nent, 0, 0, 0 ); + + *retval = -1; + abstime = 0; + aiocbpp = NULL; + + /* quick check to see if there are any async IO requests queued up */ + AIO_LOCK; + count = aio_get_all_queues_count( ); + AIO_UNLOCK; + if ( count < 1 ) { + error = EINVAL; + goto ExitThisRoutine; + } + + if ( uap->nent < 1 || uap->nent > AIO_LISTIO_MAX ) { + error = EINVAL; + goto ExitThisRoutine; + } + + if ( uap->timeoutp != NULL ) { + error = copyin( (void *)uap->timeoutp, &ts, sizeof(ts) ); + if ( error != 0 ) { + error = EAGAIN; + goto ExitThisRoutine; + } + + if ( ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000 ) { + error = EINVAL; + goto ExitThisRoutine; + } + + nanoseconds_to_absolutetime( (uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, + &abstime ); + clock_absolutetime_interval_to_deadline( abstime, &abstime ); + } + + MALLOC( aiocbpp, void *, (uap->nent * sizeof(struct aiocb *)), M_TEMP, M_WAITOK ); + if ( aiocbpp == NULL ) { + error = EAGAIN; + goto ExitThisRoutine; + } + + /* check list of aio requests to see if any have completed */ + for ( i = 0; i < uap->nent; i++ ) { + struct aiocb *aiocbp; + + /* copyin in aiocb pointer from list */ + error = copyin( (void *)(uap->aiocblist + i), (aiocbpp + i), sizeof(aiocbp) ); + if ( error != 0 ) { + error = EAGAIN; + goto ExitThisRoutine; + } + + /* NULL elements are legal so check for 'em */ + aiocbp = *(aiocbpp + i); + if ( aiocbp == NULL ) + continue; + + /* return immediately if any aio request in the list is done */ + AIO_LOCK; + TAILQ_FOREACH( entryp, &p->aio_doneq, aio_workq_link ) { + if ( entryp->uaiocbp == aiocbp ) { + *retval = 0; + error = 0; + AIO_UNLOCK; + goto ExitThisRoutine; + } + } + AIO_UNLOCK; + } /* for ( ; i < uap->nent; ) */ + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend_sleep)) | DBG_FUNC_NONE, + (int)p, uap->nent, 0, 0, 0 ); + + /* + * wait for an async IO to complete or a signal fires or timeout expires. + * we return EAGAIN (35) for timeout expiration and EINTR (4) when a signal + * interrupts us. If an async IO completes before a signal fires or our + * timeout expires, we get a wakeup call from aio_work_thread(). We do not + * use tsleep() here in order to avoid getting kernel funnel lock. + */ + assert_wait( (event_t) &p->AIO_SUSPEND_SLEEP_CHAN, THREAD_ABORTSAFE ); + if ( abstime > 0 ) { + thread_set_timer_deadline( abstime ); + } + error = thread_block( THREAD_CONTINUE_NULL ); + if ( error == THREAD_AWAKENED ) { + /* got our wakeup call from aio_work_thread() */ + if ( abstime > 0 ) { + thread_cancel_timer(); + } + *retval = 0; + error = 0; + } + else if ( error == THREAD_TIMED_OUT ) { + /* our timeout expired */ + error = EAGAIN; + } + else { + /* we were interrupted */ + if ( abstime > 0 ) { + thread_cancel_timer(); + } + error = EINTR; + } + +ExitThisRoutine: + if ( aiocbpp != NULL ) + FREE( aiocbpp, M_TEMP ); + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend)) | DBG_FUNC_END, + (int)p, uap->nent, error, 0, 0 ); + + return( error ); + +} /* aio_suspend */ + + +/* aio_write - asynchronously write uap->aiocbp->aio_nbytes bytes to the + * file descriptor (uap->aiocbp->aio_fildes) from the buffer + * (uap->aiocbp->aio_buf). + */ + +int +aio_write( struct proc *p, struct aio_write_args *uap, int *retval ) +{ + int error; + + *retval = 0; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_write)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); + + error = aio_queue_async_request( p, uap->aiocbp, AIO_WRITE ); + if ( error != 0 ) + *retval = -1; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_write)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + + return( error ); + +} /* aio_write */ + + +/* + * lio_listio - initiate a list of IO requests. We process the list of aiocbs + * either synchronously (mode == LIO_WAIT) or asynchronously (mode == LIO_NOWAIT). + * The caller gets error and return status for each aiocb in the list via aio_error + * and aio_return. We must keep completed requests until released by the + * aio_return call. + */ + +int +lio_listio( struct proc *p, struct lio_listio_args *uap, int *retval ) +{ + int i; + int call_result; + int result; + long group_tag; + aio_workq_entry * *entryp_listp; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_listio)) | DBG_FUNC_START, + (int)p, uap->nent, uap->mode, 0, 0 ); + + entryp_listp = NULL; + call_result = -1; + *retval = -1; + if ( !(uap->mode == LIO_NOWAIT || uap->mode == LIO_WAIT) ) { + call_result = EINVAL; + goto ExitRoutine; + } + + if ( uap->nent < 1 || uap->nent > AIO_LISTIO_MAX ) { + call_result = EINVAL; + goto ExitRoutine; + } + + /* + * we use group_tag to mark IO requests for delayed completion processing + * which means we wait until all IO requests in the group have completed + * before we either return to the caller when mode is LIO_WAIT or signal + * user when mode is LIO_NOWAIT. + */ + group_tag = random(); + + /* + * allocate a list of aio_workq_entry pointers that we will use to queue + * up all our requests at once while holding our lock. + */ + MALLOC( entryp_listp, void *, (uap->nent * sizeof(struct aiocb *)), M_TEMP, M_WAITOK ); + if ( entryp_listp == NULL ) { + call_result = EAGAIN; + goto ExitRoutine; + } + + /* process list of aio requests */ + for ( i = 0; i < uap->nent; i++ ) { + struct aiocb *my_aiocbp; + + *(entryp_listp + i) = NULL; + + /* copyin in aiocb pointer from list */ + result = copyin( (void *)(uap->aiocblist + i), &my_aiocbp, sizeof(my_aiocbp) ); + if ( result != 0 ) { + call_result = EAGAIN; + continue; + } + + /* NULL elements are legal so check for 'em */ + if ( my_aiocbp == NULL ) + continue; + + if ( uap->mode == LIO_NOWAIT ) + result = lio_create_async_entry( p, my_aiocbp, uap->sigp, + group_tag, (entryp_listp + i) ); + else + result = lio_create_sync_entry( p, my_aiocbp, group_tag, + (entryp_listp + i) ); + + if ( result != 0 && call_result == -1 ) + call_result = result; + } + + /* + * we need to protect this section since we do not want any of these grouped + * IO requests to begin until we have them all on the queue. + */ + AIO_LOCK; + for ( i = 0; i < uap->nent; i++ ) { + aio_workq_entry *entryp; + + /* NULL elements are legal so check for 'em */ + entryp = *(entryp_listp + i); + if ( entryp == NULL ) + continue; + + /* check our aio limits to throttle bad or rude user land behavior */ + if ( aio_get_all_queues_count( ) >= aio_max_requests || + aio_get_process_count( entryp->procp ) >= aio_max_requests_per_process || + is_already_queued( entryp->procp, entryp->uaiocbp ) == TRUE ) { + vm_map_t my_map; + + my_map = entryp->aio_map; + entryp->aio_map = VM_MAP_NULL; + result = EAGAIN; + AIO_UNLOCK; + aio_free_request( entryp, my_map ); + AIO_LOCK; + continue; + } + + /* place the request on the appropriate queue */ + if ( uap->mode == LIO_NOWAIT ) { + TAILQ_INSERT_TAIL( &aio_anchor.aio_async_workq, entryp, aio_workq_link ); + aio_anchor.aio_async_workq_count++; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_NONE, + (int)p, (int)entryp->uaiocbp, 0, 0, 0 ); + } + else { + TAILQ_INSERT_TAIL( &aio_anchor.lio_sync_workq, entryp, aio_workq_link ); + aio_anchor.lio_sync_workq_count++; + } + } + AIO_UNLOCK; + + if ( uap->mode == LIO_NOWAIT ) + /* caller does not want to wait so we'll fire off a worker thread and return */ + wakeup_one( &aio_anchor.aio_async_workq ); + else { + aio_workq_entry *entryp; + int error; + + /* + * mode is LIO_WAIT - handle the IO requests now. + */ + AIO_LOCK; + entryp = TAILQ_FIRST( &aio_anchor.lio_sync_workq ); + while ( entryp != NULL ) { + if ( p == entryp->procp && group_tag == entryp->group_tag ) { + boolean_t funnel_state; + + TAILQ_REMOVE( &aio_anchor.lio_sync_workq, entryp, aio_workq_link ); + aio_anchor.lio_sync_workq_count--; + AIO_UNLOCK; + + // file system IO code path requires kernel funnel lock + funnel_state = thread_funnel_set( kernel_flock, TRUE ); + if ( (entryp->flags & AIO_READ) != 0 ) { + error = do_aio_read( entryp ); + } + else if ( (entryp->flags & AIO_WRITE) != 0 ) { + error = do_aio_write( entryp ); + } + else if ( (entryp->flags & AIO_FSYNC) != 0 ) { + error = do_aio_fsync( entryp ); + } + else { + printf( "%s - unknown aio request - flags 0x%02X \n", + __FUNCTION__, entryp->flags ); + error = EINVAL; + } + entryp->errorval = error; + if ( error != 0 && call_result == -1 ) + call_result = EIO; + (void) thread_funnel_set( kernel_flock, funnel_state ); + + AIO_LOCK; + /* we're done with the IO request so move it on the done queue */ + TAILQ_INSERT_TAIL( &p->aio_doneq, entryp, aio_workq_link ); + aio_anchor.aio_done_count++; + p->aio_done_count++; + + /* need to start over since lio_sync_workq may have been changed while we */ + /* were away doing the IO. */ + entryp = TAILQ_FIRST( &aio_anchor.lio_sync_workq ); + continue; + } /* p == entryp->procp */ + + entryp = TAILQ_NEXT( entryp, aio_workq_link ); + } /* while ( entryp != NULL ) */ + AIO_UNLOCK; + } /* uap->mode == LIO_WAIT */ + + /* call_result == -1 means we had no trouble queueing up requests */ + if ( call_result == -1 ) { + call_result = 0; + *retval = 0; + } + +ExitRoutine: + if ( entryp_listp != NULL ) + FREE( entryp_listp, M_TEMP ); + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_listio)) | DBG_FUNC_END, + (int)p, call_result, 0, 0, 0 ); + + return( call_result ); + +} /* lio_listio */ + + +/* + * aio worker thread. this is where all the real work gets done. + * we get a wake up call on sleep channel &aio_anchor.aio_async_workq + * after new work is queued up. + */ + +static void +aio_work_thread( void ) +{ + aio_workq_entry *entryp; + struct uthread *uthread = (struct uthread *)get_bsdthread_info(current_act()); + + for( ;; ) { + entryp = aio_get_some_work(); + if ( entryp == NULL ) { + /* + * aio worker threads wait for some work to get queued up + * by aio_queue_async_request. Once some work gets queued + * it will wake up one of these worker threads just before + * returning to our caller in user land. We do not use + * tsleep() here in order to avoid getting kernel funnel lock. + */ + assert_wait( (event_t) &aio_anchor.aio_async_workq, THREAD_UNINT ); + thread_block( THREAD_CONTINUE_NULL ); + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_wake)) | DBG_FUNC_NONE, + 0, 0, 0, 0, 0 ); + } + else { + int error; + boolean_t funnel_state; + vm_map_t currentmap; + vm_map_t oldmap = VM_MAP_NULL; + task_t oldaiotask = TASK_NULL; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread)) | DBG_FUNC_START, + (int)entryp->procp, (int)entryp->uaiocbp, entryp->flags, 0, 0 ); + + /* + * Assume the target's address space identity for the duration + * of the IO. + */ + funnel_state = thread_funnel_set( kernel_flock, TRUE ); + + currentmap = get_task_map( (current_proc())->task ); + if ( currentmap != entryp->aio_map ) { + oldaiotask = uthread->uu_aio_task; + uthread->uu_aio_task = entryp->procp->task; + oldmap = vm_map_switch( entryp->aio_map ); + } + + if ( (entryp->flags & AIO_READ) != 0 ) { + error = do_aio_read( entryp ); + } + else if ( (entryp->flags & AIO_WRITE) != 0 ) { + error = do_aio_write( entryp ); + } + else if ( (entryp->flags & AIO_FSYNC) != 0 ) { + error = do_aio_fsync( entryp ); + } + else { + printf( "%s - unknown aio request - flags 0x%02X \n", + __FUNCTION__, entryp->flags ); + error = EINVAL; + } + entryp->errorval = error; + if ( currentmap != entryp->aio_map ) { + (void) vm_map_switch( oldmap ); + uthread->uu_aio_task = oldaiotask; + } + + /* we're done with the IO request so pop it off the active queue and */ + /* push it on the done queue */ + AIO_LOCK; + TAILQ_REMOVE( &entryp->procp->aio_activeq, entryp, aio_workq_link ); + aio_anchor.aio_active_count--; + entryp->procp->aio_active_count--; + TAILQ_INSERT_TAIL( &entryp->procp->aio_doneq, entryp, aio_workq_link ); + aio_anchor.aio_done_count++; + entryp->procp->aio_done_count++; + entryp->flags |= AIO_COMPLETION; + + /* remove our reference to the user land map. */ + if ( VM_MAP_NULL != entryp->aio_map ) { + vm_map_t my_map; + + my_map = entryp->aio_map; + entryp->aio_map = VM_MAP_NULL; + AIO_UNLOCK; /* must unlock before calling vm_map_deallocate() */ + vm_map_deallocate( my_map ); + } + else { + AIO_UNLOCK; + } + + do_aio_completion( entryp ); + (void) thread_funnel_set( kernel_flock, funnel_state ); + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread)) | DBG_FUNC_END, + (int)entryp->procp, (int)entryp->uaiocbp, entryp->errorval, + entryp->returnval, 0 ); + + AIO_LOCK; + entryp->flags &= ~AIO_COMPLETION; + if ( (entryp->flags & AIO_DO_FREE) != 0 ) { + vm_map_t my_map; + + my_map = entryp->aio_map; + entryp->aio_map = VM_MAP_NULL; + AIO_UNLOCK; + aio_free_request( entryp, my_map ); + } + else + AIO_UNLOCK; + } + } /* for ( ;; ) */ + + /* NOT REACHED */ + +} /* aio_work_thread */ + + +/* + * aio_get_some_work - get the next async IO request that is ready to be executed. + * aio_fsync complicates matters a bit since we cannot do the fsync until all async + * IO requests at the time the aio_fsync call came in have completed. + */ + +static aio_workq_entry * +aio_get_some_work( void ) +{ + aio_workq_entry *entryp; + int skip_count = 0; + + /* pop some work off the work queue and add to our active queue */ + AIO_LOCK; + for ( entryp = TAILQ_FIRST( &aio_anchor.aio_async_workq ); + entryp != NULL; + entryp = TAILQ_NEXT( entryp, aio_workq_link ) ) { + + if ( (entryp->flags & AIO_FSYNC) != 0 ) { + /* leave aio_fsync calls on the work queue if there are IO */ + /* requests on the active queue for the same file descriptor. */ + if ( aio_delay_fsync_request( entryp ) ) { + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync_delay)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + continue; + } + } + break; + } + + if ( entryp != NULL ) { + TAILQ_REMOVE( &aio_anchor.aio_async_workq, entryp, aio_workq_link ); + aio_anchor.aio_async_workq_count--; + TAILQ_INSERT_TAIL( &entryp->procp->aio_activeq, entryp, aio_workq_link ); + aio_anchor.aio_active_count++; + entryp->procp->aio_active_count++; + } + AIO_UNLOCK; + + return( entryp ); + +} /* aio_get_some_work */ + + +/* + * aio_delay_fsync_request - look to see if this aio_fsync request should be delayed at + * this time. Delay will happen when there are any active IOs for the same file + * descriptor that were queued at time the aio_sync call was queued. + * NOTE - AIO_LOCK must be held by caller + */ +static boolean_t +aio_delay_fsync_request( aio_workq_entry *entryp ) +{ + aio_workq_entry *my_entryp; + + TAILQ_FOREACH( my_entryp, &entryp->procp->aio_activeq, aio_workq_link ) { + if ( my_entryp->fsyncp != NULL && + entryp->uaiocbp == my_entryp->fsyncp && + entryp->aiocb.aio_fildes == my_entryp->aiocb.aio_fildes ) { + return( TRUE ); + } + } + + return( FALSE ); + +} /* aio_delay_fsync_request */ + + +/* + * aio_queue_async_request - queue up an async IO request on our work queue then + * wake up one of our worker threads to do the actual work. We get a reference + * to our caller's user land map in order to keep it around while we are + * processing the request. + */ + +static int +aio_queue_async_request( struct proc *procp, struct aiocb *aiocbp, int kindOfIO ) +{ + aio_workq_entry *entryp; + int result; + + entryp = (aio_workq_entry *) zalloc( aio_workq_zonep ); + if ( entryp == NULL ) { + result = EAGAIN; + goto error_exit; + } + bzero( entryp, sizeof(*entryp) ); + + /* fill in the rest of the aio_workq_entry */ + entryp->procp = procp; + entryp->uaiocbp = aiocbp; + entryp->flags |= kindOfIO; + entryp->aio_map = VM_MAP_NULL; + result = copyin( aiocbp, &entryp->aiocb, sizeof(entryp->aiocb) ); + if ( result != 0 ) { + result = EAGAIN; + goto error_exit; + } + + /* do some more validation on the aiocb and embedded file descriptor */ + result = aio_validate( entryp ); + if ( result != 0 ) + goto error_exit; + + /* get a reference to the user land map in order to keep it around */ + entryp->aio_map = get_task_map( procp->task ); + vm_map_reference( entryp->aio_map ); + + AIO_LOCK; + + if ( is_already_queued( entryp->procp, entryp->uaiocbp ) == TRUE ) { + AIO_UNLOCK; + result = EAGAIN; + goto error_exit; + } + + /* check our aio limits to throttle bad or rude user land behavior */ + if ( aio_get_all_queues_count( ) >= aio_max_requests || + aio_get_process_count( procp ) >= aio_max_requests_per_process ) { + AIO_UNLOCK; + result = EAGAIN; + goto error_exit; + } + + /* + * aio_fsync calls sync up all async IO requests queued at the time + * the aio_fsync call was made. So we mark each currently queued async + * IO with a matching file descriptor as must complete before we do the + * fsync. We set the fsyncp field of each matching async IO + * request with the aiocb pointer passed in on the aio_fsync call to + * know which IOs must complete before we process the aio_fsync call. + */ + if ( (kindOfIO & AIO_FSYNC) != 0 ) + aio_mark_requests( entryp ); + + /* queue up on our aio asynchronous work queue */ + TAILQ_INSERT_TAIL( &aio_anchor.aio_async_workq, entryp, aio_workq_link ); + aio_anchor.aio_async_workq_count++; + + AIO_UNLOCK; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_NONE, + (int)procp, (int)aiocbp, 0, 0, 0 ); + + wakeup_one( &aio_anchor.aio_async_workq ); + + return( 0 ); + +error_exit: + if ( entryp != NULL ) { + /* this entry has not been queued up so no worries about unlocked */ + /* state and aio_map */ + aio_free_request( entryp, entryp->aio_map ); + } + + return( result ); + +} /* aio_queue_async_request */ + + +/* + * lio_create_async_entry - allocate an aio_workq_entry and fill it in. + * If all goes well return 0 and pass the aio_workq_entry pointer back to + * our caller. We get a reference to our caller's user land map in order to keep + * it around while we are processing the request. + * lio_listio calls behave differently at completion they do completion notification + * when all async IO requests have completed. We use group_tag to tag IO requests + * that behave in the delay notification manner. + */ + +static int +lio_create_async_entry( struct proc *procp, struct aiocb *aiocbp, + struct sigevent *sigp, long group_tag, + aio_workq_entry **entrypp ) +{ + aio_workq_entry *entryp; + int result; + + entryp = (aio_workq_entry *) zalloc( aio_workq_zonep ); + if ( entryp == NULL ) { + result = EAGAIN; + goto error_exit; + } + bzero( entryp, sizeof(*entryp) ); + + /* fill in the rest of the aio_workq_entry */ + entryp->procp = procp; + entryp->uaiocbp = aiocbp; + entryp->flags |= AIO_LIO; + entryp->group_tag = group_tag; + entryp->aio_map = VM_MAP_NULL; + result = copyin( aiocbp, &entryp->aiocb, sizeof(entryp->aiocb) ); + if ( result != 0 ) { + result = EAGAIN; + goto error_exit; + } + + /* look for lio_listio LIO_NOP requests and ignore them. */ + /* Not really an error, but we need to free our aio_workq_entry. */ + if ( entryp->aiocb.aio_lio_opcode == LIO_NOP ) { + result = 0; + goto error_exit; + } + + /* use sigevent passed in to lio_listio for each of our calls, but only */ + /* do completion notification after the last request completes. */ + if ( sigp != NULL ) { + result = copyin( sigp, &entryp->aiocb.aio_sigevent, sizeof(entryp->aiocb.aio_sigevent) ); + if ( result != 0 ) { + result = EAGAIN; + goto error_exit; + } + } + + /* do some more validation on the aiocb and embedded file descriptor */ + result = aio_validate( entryp ); + if ( result != 0 ) + goto error_exit; + + /* get a reference to the user land map in order to keep it around */ + entryp->aio_map = get_task_map( procp->task ); + vm_map_reference( entryp->aio_map ); + + *entrypp = entryp; + return( 0 ); + +error_exit: + if ( entryp != NULL ) + zfree( aio_workq_zonep, (vm_offset_t) entryp ); + + return( result ); + +} /* lio_create_async_entry */ + + +/* + * aio_mark_requests - aio_fsync calls synchronize file data for all queued async IO + * requests at the moment the aio_fsync call is queued. We use aio_workq_entry.fsyncp + * to mark each async IO that must complete before the fsync is done. We use the uaiocbp + * field from the aio_fsync call as the aio_workq_entry.fsyncp in marked requests. + * NOTE - AIO_LOCK must be held by caller + */ + +static void +aio_mark_requests( aio_workq_entry *entryp ) +{ + aio_workq_entry *my_entryp; + + TAILQ_FOREACH( my_entryp, &entryp->procp->aio_activeq, aio_workq_link ) { + if ( entryp->aiocb.aio_fildes == my_entryp->aiocb.aio_fildes ) { + my_entryp->fsyncp = entryp->uaiocbp; + } + } + + TAILQ_FOREACH( my_entryp, &aio_anchor.aio_async_workq, aio_workq_link ) { + if ( entryp->procp == my_entryp->procp && + entryp->aiocb.aio_fildes == my_entryp->aiocb.aio_fildes ) { + my_entryp->fsyncp = entryp->uaiocbp; + } + } + +} /* aio_mark_requests */ + + +/* + * lio_create_sync_entry - allocate an aio_workq_entry and fill it in. + * If all goes well return 0 and pass the aio_workq_entry pointer back to + * our caller. + * lio_listio calls behave differently at completion they do completion notification + * when all async IO requests have completed. We use group_tag to tag IO requests + * that behave in the delay notification manner. + */ + +static int +lio_create_sync_entry( struct proc *procp, struct aiocb *aiocbp, + long group_tag, aio_workq_entry **entrypp ) +{ + aio_workq_entry *entryp; + int result; + + entryp = (aio_workq_entry *) zalloc( aio_workq_zonep ); + if ( entryp == NULL ) { + result = EAGAIN; + goto error_exit; + } + bzero( entryp, sizeof(*entryp) ); + + /* fill in the rest of the aio_workq_entry */ + entryp->procp = procp; + entryp->uaiocbp = aiocbp; + entryp->flags |= AIO_LIO; + entryp->group_tag = group_tag; + entryp->aio_map = VM_MAP_NULL; + result = copyin( aiocbp, &entryp->aiocb, sizeof(entryp->aiocb) ); + if ( result != 0 ) { + result = EAGAIN; + goto error_exit; + } + + /* look for lio_listio LIO_NOP requests and ignore them. */ + /* Not really an error, but we need to free our aio_workq_entry. */ + if ( entryp->aiocb.aio_lio_opcode == LIO_NOP ) { + result = 0; + goto error_exit; + } + + result = aio_validate( entryp ); + if ( result != 0 ) { + goto error_exit; + } + + *entrypp = entryp; + return( 0 ); + +error_exit: + if ( entryp != NULL ) + zfree( aio_workq_zonep, (vm_offset_t) entryp ); + + return( result ); + +} /* lio_create_sync_entry */ + + +/* + * aio_free_request - remove our reference on the user land map and + * free the work queue entry resources. + * We are not holding the lock here thus aio_map is passed in and + * zeroed while we did have the lock. + */ + +static int +aio_free_request( aio_workq_entry *entryp, vm_map_t the_map ) +{ + /* remove our reference to the user land map. */ + if ( VM_MAP_NULL != the_map ) { + vm_map_deallocate( the_map ); + } + + zfree( aio_workq_zonep, (vm_offset_t) entryp ); + + return( 0 ); + +} /* aio_free_request */ + + +/* aio_validate - validate the aiocb passed in by one of the aio syscalls. + */ + +static int +aio_validate( aio_workq_entry *entryp ) +{ + boolean_t funnel_state; + struct file *fp; + int flag; + int result; + + result = 0; + + if ( (entryp->flags & AIO_LIO) != 0 ) { + if ( entryp->aiocb.aio_lio_opcode == LIO_READ ) + entryp->flags |= AIO_READ; + else if ( entryp->aiocb.aio_lio_opcode == LIO_WRITE ) + entryp->flags |= AIO_WRITE; + else if ( entryp->aiocb.aio_lio_opcode == LIO_NOP ) + return( 0 ); + else + return( EINVAL ); + } + + flag = FREAD; + if ( (entryp->flags & (AIO_WRITE | AIO_FSYNC)) != 0 ) { + flag = FWRITE; + } + + if ( (entryp->flags & (AIO_READ | AIO_WRITE)) != 0 ) { + if ( entryp->aiocb.aio_offset < 0 || + entryp->aiocb.aio_nbytes < 0 || + entryp->aiocb.aio_nbytes > INT_MAX || + entryp->aiocb.aio_buf == NULL ) + return( EINVAL ); + } + + /* validate aiocb.aio_sigevent. at this point we only support sigev_notify + * equal to SIGEV_SIGNAL or SIGEV_NONE. this means sigev_value, + * sigev_notify_function, and sigev_notify_attributes are ignored. + */ + if ( entryp->aiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ) { + int signum; + /* make sure we have a valid signal number */ + signum = entryp->aiocb.aio_sigevent.sigev_signo; + if ( signum <= 0 || signum >= NSIG || + signum == SIGKILL || signum == SIGSTOP ) + return (EINVAL); + } + else if ( entryp->aiocb.aio_sigevent.sigev_notify != SIGEV_NONE ) + return (EINVAL); + + /* validate the file descriptor and that the file was opened + * for the appropriate read / write access. This section requires + * kernel funnel lock. + */ + funnel_state = thread_funnel_set( kernel_flock, TRUE ); + + result = fdgetf( entryp->procp, entryp->aiocb.aio_fildes, &fp ); + if ( result == 0 ) { + if ( (fp->f_flag & flag) == 0 ) { + /* we don't have read or write access */ + result = EBADF; + } + else if ( fp->f_type != DTYPE_VNODE ) { + /* this is not a file */ + result = ESPIPE; + } + } + else { + result = EBADF; + } + + (void) thread_funnel_set( kernel_flock, funnel_state ); + + return( result ); + +} /* aio_validate */ + + +/* + * aio_get_process_count - runs through our queues that hold outstanding + * async IO reqests and totals up number of requests for the given + * process. + * NOTE - caller must hold aio lock! + */ + +static int +aio_get_process_count( struct proc *procp ) +{ + aio_workq_entry *entryp; + int error; + int count; + + /* begin with count of completed async IO requests for this process */ + count = procp->aio_done_count; + + /* add in count of active async IO requests for this process */ + count += procp->aio_active_count; + + /* look for matches on our queue of asynchronous todo work */ + TAILQ_FOREACH( entryp, &aio_anchor.aio_async_workq, aio_workq_link ) { + if ( procp == entryp->procp ) { + count++; + } + } + + /* look for matches on our queue of synchronous todo work */ + TAILQ_FOREACH( entryp, &aio_anchor.lio_sync_workq, aio_workq_link ) { + if ( procp == entryp->procp ) { + count++; + } + } + + return( count ); + +} /* aio_get_process_count */ + + +/* + * aio_get_all_queues_count - get total number of entries on all aio work queues. + * NOTE - caller must hold aio lock! + */ + +static int +aio_get_all_queues_count( void ) +{ + int count; + + count = aio_anchor.aio_async_workq_count; + count += aio_anchor.lio_sync_workq_count; + count += aio_anchor.aio_active_count; + count += aio_anchor.aio_done_count; + + return( count ); + +} /* aio_get_all_queues_count */ + + +/* + * do_aio_completion. Handle async IO completion. + */ + +static void +do_aio_completion( aio_workq_entry *entryp ) +{ + /* signal user land process if appropriate */ + if ( entryp->aiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL && + (entryp->flags & AIO_DISABLE) == 0 ) { + + /* + * if group_tag is non zero then make sure this is the last IO request + * in the group before we signal. + */ + if ( entryp->group_tag == 0 || + (entryp->group_tag != 0 && aio_last_group_io( entryp )) ) { + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_sig)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, + entryp->aiocb.aio_sigevent.sigev_signo, 0, 0 ); + + psignal( entryp->procp, entryp->aiocb.aio_sigevent.sigev_signo ); + return; + } + } + + /* + * need to handle case where a process is trying to exit, exec, or close + * and is currently waiting for active aio requests to complete. If + * AIO_WAITING is set then we need to look to see if there are any + * other requests in the active queue for this process. If there are + * none then wakeup using the AIO_CLEANUP_SLEEP_CHAN tsleep channel. If + * there are some still active then do nothing - we only want to wakeup + * when all active aio requests for the process are complete. + */ + if ( (entryp->flags & AIO_WAITING) != 0 ) { + int active_requests; + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wait)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + + AIO_LOCK; + active_requests = aio_active_requests_for_process( entryp->procp ); + AIO_UNLOCK; + if ( active_requests < 1 ) { + /* no active aio requests for this process, continue exiting */ + + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wake)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + + wakeup_one( &entryp->procp->AIO_CLEANUP_SLEEP_CHAN ); + } + return; + } + + /* + * aio_suspend case when a signal was not requested. In that scenario we + * are sleeping on the AIO_SUSPEND_SLEEP_CHAN channel. + * NOTE - the assumption here is that this wakeup call is inexpensive. + * we really only need to do this when an aio_suspend call is pending. + * If we find the wakeup call should be avoided we could mark the + * async IO requests given in the list provided by aio_suspend and only + * call wakeup for them. If we do mark them we should unmark them after + * the aio_suspend wakes up. + */ + KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_suspend_wake)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + + wakeup_one( &entryp->procp->AIO_SUSPEND_SLEEP_CHAN ); + + return; + +} /* do_aio_completion */ + + +/* + * aio_last_group_io - checks to see if this is the last unfinished IO request + * for the given group_tag. Returns TRUE if there are no other active IO + * requests for this group or FALSE if the are active IO requests + * NOTE - AIO_LOCK must be held by caller + */ + +static boolean_t +aio_last_group_io( aio_workq_entry *entryp ) +{ + aio_workq_entry *my_entryp; + + /* look for matches on our queue of active async IO requests */ + TAILQ_FOREACH( my_entryp, &entryp->procp->aio_activeq, aio_workq_link ) { + if ( my_entryp->group_tag == entryp->group_tag ) + return( FALSE ); + } + + /* look for matches on our queue of asynchronous todo work */ + TAILQ_FOREACH( my_entryp, &aio_anchor.aio_async_workq, aio_workq_link ) { + if ( my_entryp->group_tag == entryp->group_tag ) + return( FALSE ); + } + + /* look for matches on our queue of synchronous todo work */ + TAILQ_FOREACH( my_entryp, &aio_anchor.lio_sync_workq, aio_workq_link ) { + if ( my_entryp->group_tag == entryp->group_tag ) + return( FALSE ); + } + + return( TRUE ); + +} /* aio_last_group_io */ + + +/* + * do_aio_read + */ +static int +do_aio_read( aio_workq_entry *entryp ) +{ + struct file *fp; + int error; + + fp = holdfp( entryp->procp->p_fd, entryp->aiocb.aio_fildes, FREAD ); + if ( fp != NULL ) { + error = dofileread( entryp->procp, fp, entryp->aiocb.aio_fildes, + (void *)entryp->aiocb.aio_buf, + entryp->aiocb.aio_nbytes, + entryp->aiocb.aio_offset, FOF_OFFSET, + &entryp->returnval ); + frele( fp ); + } + else + error = EBADF; + + return( error ); + +} /* do_aio_read */ + + +/* + * do_aio_write + */ +static int +do_aio_write( aio_workq_entry *entryp ) +{ + struct file *fp; + int error; + + fp = holdfp( entryp->procp->p_fd, entryp->aiocb.aio_fildes, FWRITE ); + if ( fp != NULL ) { + error = dofilewrite( entryp->procp, fp, entryp->aiocb.aio_fildes, + (const void *)entryp->aiocb.aio_buf, + entryp->aiocb.aio_nbytes, + entryp->aiocb.aio_offset, FOF_OFFSET, + &entryp->returnval ); + frele( fp ); + } + else + error = EBADF; + + return( error ); + +} /* do_aio_write */ + + +/* + * aio_active_requests_for_process - return number of active async IO + * requests for the given process. + * NOTE - caller must hold aio lock! + */ + +static int +aio_active_requests_for_process( struct proc *procp ) +{ + + return( procp->aio_active_count ); + +} /* aio_active_requests_for_process */ + + +/* + * do_aio_fsync + */ +static int +do_aio_fsync( aio_workq_entry *entryp ) +{ + register struct vnode *vp; + struct file *fp; + int error; + + /* + * NOTE - we will not support AIO_DSYNC until fdatasync() is supported. + * AIO_DSYNC is caught before we queue up a request and flagged as an error. + * The following was shamelessly extracted from fsync() implementation. + */ + error = getvnode( entryp->procp, entryp->aiocb.aio_fildes, &fp ); + if ( error == 0 ) { + vp = (struct vnode *)fp->f_data; + vn_lock( vp, LK_EXCLUSIVE | LK_RETRY, entryp->procp ); + error = VOP_FSYNC( vp, fp->f_cred, MNT_WAIT, entryp->procp ); + VOP_UNLOCK( vp, 0, entryp->procp ); + } + if ( error != 0 ) + entryp->returnval = -1; + + return( error ); + +} /* do_aio_fsync */ + + +/* + * is_already_queued - runs through our queues to see if the given + * aiocbp / process is there. Returns TRUE if there is a match + * on any of our aio queues. + * NOTE - callers must hold aio lock! + */ + +static boolean_t +is_already_queued( struct proc *procp, + struct aiocb *aiocbp ) +{ + aio_workq_entry *entryp; + boolean_t result; + + result = FALSE; + + /* look for matches on our queue of async IO requests that have completed */ + TAILQ_FOREACH( entryp, &procp->aio_doneq, aio_workq_link ) { + if ( aiocbp == entryp->uaiocbp ) { + result = TRUE; + goto ExitThisRoutine; + } + } + + /* look for matches on our queue of active async IO requests */ + TAILQ_FOREACH( entryp, &procp->aio_activeq, aio_workq_link ) { + if ( aiocbp == entryp->uaiocbp ) { + result = TRUE; + goto ExitThisRoutine; + } + } + + /* look for matches on our queue of asynchronous todo work */ + TAILQ_FOREACH( entryp, &aio_anchor.aio_async_workq, aio_workq_link ) { + if ( procp == entryp->procp && aiocbp == entryp->uaiocbp ) { + result = TRUE; + goto ExitThisRoutine; + } + } + + /* look for matches on our queue of synchronous todo work */ + TAILQ_FOREACH( entryp, &aio_anchor.lio_sync_workq, aio_workq_link ) { + if ( procp == entryp->procp && aiocbp == entryp->uaiocbp ) { + result = TRUE; + goto ExitThisRoutine; + } + } + +ExitThisRoutine: + return( result ); + +} /* is_already_queued */ + + +/* + * aio initialization + */ +__private_extern__ void +aio_init( void ) +{ + int i; + + simple_lock_init( &aio_lock ); + + AIO_LOCK; + TAILQ_INIT( &aio_anchor.aio_async_workq ); + TAILQ_INIT( &aio_anchor.lio_sync_workq ); + aio_anchor.aio_async_workq_count = 0; + aio_anchor.lio_sync_workq_count = 0; + aio_anchor.aio_active_count = 0; + aio_anchor.aio_done_count = 0; + AIO_UNLOCK; + + i = sizeof( aio_workq_entry ); + aio_workq_zonep = zinit( i, i * aio_max_requests, i * aio_max_requests, "aiowq" ); + + _aio_create_worker_threads( aio_worker_threads ); + + return; + +} /* aio_init */ + + +/* + * aio worker threads created here. + */ +__private_extern__ void +_aio_create_worker_threads( int num ) +{ + int i; + + /* create some worker threads to handle the async IO requests */ + for ( i = 0; i < num; i++ ) { + thread_t myThread; + + myThread = kernel_thread( kernel_task, aio_work_thread ); + if ( THREAD_NULL == myThread ) { + printf( "%s - failed to create a work thread \n", __FUNCTION__ ); + } + } + + return; + +} /* _aio_create_worker_threads */ + +/* + * Return the current activation utask + */ +task_t +get_aiotask(void) +{ + return ((struct uthread *)get_bsdthread_info(current_act()))->uu_aio_task; +} diff --git a/bsd/kern/kern_audit.c b/bsd/kern/kern_audit.c new file mode 100644 index 000000000..ce838a4f4 --- /dev/null +++ b/bsd/kern/kern_audit.c @@ -0,0 +1,1592 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef AUDIT + +/* + * The AUDIT_EXCESSIVELY_VERBOSE define enables a number of + * gratuitously noisy printf's to the console. Due to the + * volume, it should be left off unless you want your system + * to churn a lot whenever the audit record flow gets high. + */ +/* #define AUDIT_EXCESSIVELY_VERBOSE */ +#ifdef AUDIT_EXCESSIVELY_VERBOSE +#define AUDIT_PRINTF(x) printf x +#else +#define AUDIT_PRINTF(X) +#endif + +#if DIAGNOSTIC +#if defined(assert) +#undef assert() +#endif +#define assert(cond) \ + ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond))) +#else +#include +#endif /* DIAGNOSTIC */ + +/* + * Define the audit control flags. + */ +int audit_enabled; +int audit_suspended; + +/* + * Mutex to protect global variables shared between various threads and + * processes. + */ +static mutex_t *audit_mtx; + +/* + * Queue of audit records ready for delivery to disk. We insert new + * records at the tail, and remove records from the head. + */ +static TAILQ_HEAD(, kaudit_record) audit_q; + +/* + * Condition variable to signal to the worker that it has work to do: + * either new records are in the queue, or a log replacement is taking + * place. + */ +static wait_queue_t audit_wait_queue; + +/* + * When an audit log is rotated, the actual rotation must be performed + * by the audit worker thread, as it may have outstanding writes on the + * current audit log. audit_replacement_vp holds the vnode replacing + * the current vnode. We can't let more than one replacement occur + * at a time, so if more than one thread requests a replacement, only + * one can have the replacement "in progress" at any given moment. If + * a thread tries to replace the audit vnode and discovers a replacement + * is already in progress (i.e., audit_replacement_flag != 0), then it + * will sleep on audit_replacement_cv waiting its turn to perform a + * replacement. When a replacement is completed, this cv is signalled + * by the worker thread so a waiting thread can start another replacement. + * We also store a credential to perform audit log write operations with. + */ +static wait_queue_t audit_replacement_wait_queue; + +static int audit_replacement_flag; +static struct vnode *audit_replacement_vp; +static struct ucred *audit_replacement_cred; + +/* + * Flags to use on audit files when opening and closing. + */ +const static int audit_open_flags = FWRITE | O_APPEND; +const static int audit_close_flags = FWRITE | O_APPEND; + +/* + * XXX: Couldn't find the include file for this, so copied kern_exec.c's + * behavior. + */ +extern task_t kernel_task; + +static void +audit_free(struct kaudit_record *ar) +{ + if (ar->k_ar.ar_arg_upath1 != NULL) { + kmem_free(kernel_map, ar->k_ar.ar_arg_upath1, MAXPATHLEN); + } + if (ar->k_ar.ar_arg_upath2 != NULL) { + kmem_free(kernel_map, ar->k_ar.ar_arg_upath2, MAXPATHLEN); + } + if (ar->k_ar.ar_arg_kpath1 != NULL) { + kmem_free(kernel_map, ar->k_ar.ar_arg_kpath1, MAXPATHLEN); + } + if (ar->k_ar.ar_arg_kpath2 != NULL) { + kmem_free(kernel_map, ar->k_ar.ar_arg_kpath2, MAXPATHLEN); + } + if (ar->k_ar.ar_arg_text != NULL) { + kmem_free(kernel_map, ar->k_ar.ar_arg_text, MAXPATHLEN); + } + if (ar->k_udata != NULL) { + kmem_free(kernel_map, ar->k_udata, ar->k_ulen); + } + kmem_free(kernel_map, ar, sizeof(*ar)); +} + +static int +audit_write(struct vnode *vp, struct kaudit_record *ar, struct ucred *cred, + struct proc *p) +{ + int ret; + struct au_record *bsm; + + /* + * If there is a user audit record attached to the kernel record, + * then write the user record. + */ + /* XXX Need to decide a few things here: IF the user audit + * record is written, but the write of the kernel record fails, + * what to do? Should the kernel record come before or after the + * user record? For now, we write the user record first, and + * we ignore errors. + */ + if (ar->k_udata != NULL) { + vn_rdwr(UIO_WRITE, vp, (void *)ar->k_udata, ar->k_ulen, + (off_t)0, UIO_SYSSPACE, IO_APPEND|IO_UNIT, cred, NULL, p); + } + + /* + * Convert the internal kernel record to BSM format and write it + * out if everything's OK. + */ + ret = kaudit_to_bsm(ar, &bsm); + if (ret == BSM_NOAUDIT) + return (0); + + if (ret == BSM_FAILURE) { + AUDIT_PRINTF(("BSM conversion failure\n")); + return (-1); + } + + /* XXX This function can be called with the kernel funnel held, + * which is not optimal. We should break the write functionality + * away from the BSM record generation and have the BSM generation + * done before this function is called. This function will then + * take the BSM record as a parameter. + */ + ret = (vn_rdwr(UIO_WRITE, vp, (void *)bsm->data, bsm->len, + (off_t)0, UIO_SYSSPACE, IO_APPEND|IO_UNIT, cred, NULL, p)); + + kau_free(bsm); + + return (ret); +} + +static void +audit_worker() +{ + int do_replacement_signal, error, release_funnel; + TAILQ_HEAD(, kaudit_record) ar_worklist; + struct kaudit_record *ar, *ar_start, *ar_stop; + struct vnode *audit_vp, *old_vp; + struct ucred *audit_cred, *old_cred; + struct proc *audit_p; + + AUDIT_PRINTF(("audit_worker starting\n")); + + TAILQ_INIT(&ar_worklist); + audit_cred = NULL; + audit_p = current_proc(); + audit_vp = NULL; + + /* + * XXX: Presumably we can assume Mach threads are started without + * holding the BSD kernel funnel? + */ + thread_funnel_set(kernel_flock, FALSE); + + mutex_lock(audit_mtx); + while (1) { + /* + * First priority: replace the audit log target if requested. + * As we actually close the vnode in the worker thread, we + * need to grab the funnel, which means releasing audit_mtx. + * In case another replacement was scheduled while the mutex + * we released, we loop. + * + * XXX It could well be we should drain existing records + * first to ensure that the timestamps and ordering + * are right. + */ + do_replacement_signal = 0; + while (audit_replacement_flag != 0) { + old_cred = audit_cred; + old_vp = audit_vp; + audit_cred = audit_replacement_cred; + audit_vp = audit_replacement_vp; + audit_replacement_cred = NULL; + audit_replacement_vp = NULL; + audit_replacement_flag = 0; + + audit_enabled = (audit_vp != NULL); + + if (old_vp != NULL || audit_vp != NULL) { + mutex_unlock(audit_mtx); + thread_funnel_set(kernel_flock, TRUE); + release_funnel = 1; + } else + release_funnel = 0; + /* + * XXX: What to do about write failures here? + */ + if (old_vp != NULL) { + AUDIT_PRINTF(("Closing old audit file\n")); + vn_close(old_vp, audit_close_flags, old_cred, + audit_p); + crfree(old_cred); + old_cred = NULL; + old_vp = NULL; + AUDIT_PRINTF(("Audit file closed\n")); + } + if (audit_vp != NULL) { + AUDIT_PRINTF(("Opening new audit file\n")); + } + if (release_funnel) { + thread_funnel_set(kernel_flock, FALSE); + mutex_lock(audit_mtx); + } + do_replacement_signal = 1; + } + /* + * Signal that replacement have occurred to wake up and + * start any other replacements started in parallel. We can + * continue about our business in the mean time. We + * broadcast so that both new replacements can be inserted, + * but also so that the source(s) of replacement can return + * successfully. + */ + if (do_replacement_signal) + wait_queue_wakeup_all(audit_replacement_wait_queue, + 0, THREAD_AWAKENED); + + /* + * Next, check to see if we have any records to drain into + * the vnode. If not, go back to waiting for an event. + */ + if (TAILQ_EMPTY(&audit_q)) { + int ret; + + AUDIT_PRINTF(("audit_worker waiting\n")); + ret = wait_queue_assert_wait(audit_wait_queue, 0, + THREAD_UNINT); + mutex_unlock(audit_mtx); + + assert(ret == THREAD_WAITING); + ret = thread_block(THREAD_CONTINUE_NULL); + assert(ret == THREAD_AWAKENED); + AUDIT_PRINTF(("audit_worker woken up\n")); + AUDIT_PRINTF(("audit_worker: new vp = %p; value of flag %d\n", + audit_replacement_vp, audit_replacement_flag)); + + mutex_lock(audit_mtx); + continue; + } + + /* + * If we have records, but there's no active vnode to + * write to, drain the record queue. Generally, we + * prevent the unnecessary allocation of records + * elsewhere, but we need to allow for races between + * conditional allocation and queueing. Go back to + * waiting when we're done. + * + * XXX: We go out of our way to avoid calling audit_free() + * with the audit_mtx held, to avoid a lock order reversal + * as free() may grab the funnel. This will be fixed at + * some point. + */ + if (audit_vp == NULL) { + while ((ar = TAILQ_FIRST(&audit_q))) { + TAILQ_REMOVE(&audit_q, ar, k_q); + TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q); + } + mutex_unlock(audit_mtx); + while ((ar = TAILQ_FIRST(&ar_worklist))) { + TAILQ_REMOVE(&ar_worklist, ar, k_q); + audit_free(ar); + } + mutex_lock(audit_mtx); + continue; + } + + /* + * We have both records to write, and an active vnode + * to write to. Dequeue a record, and start the write. + * Eventually, it might make sense to dequeue several + * records and perform our own clustering, if the lower + * layers aren't doing it automatically enough. + * + * XXX: We go out of our way to avoid calling audit_free() + * with the audit_mtx held, to avoid a lock order reversal + * as free() may grab the funnel. This will be fixed at + * some point. + */ + while ((ar = TAILQ_FIRST(&audit_q))) { + TAILQ_REMOVE(&audit_q, ar, k_q); + TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q); + } + mutex_unlock(audit_mtx); + release_funnel = 0; + while ((ar = TAILQ_FIRST(&ar_worklist))) { + TAILQ_REMOVE(&ar_worklist, ar, k_q); + if (audit_vp != NULL) { + /* + * XXX: What should happen if there's a write + * error here? + */ + if (!release_funnel) { + thread_funnel_set(kernel_flock, TRUE); + release_funnel = 1; + } + VOP_LEASE(audit_vp, audit_p, audit_cred, + LEASE_WRITE); + error = audit_write(audit_vp, ar, audit_cred, + audit_p); + if (error) + printf("audit_worker: write error %d\n", + error); + } + audit_free(ar); + } + if (release_funnel) + thread_funnel_set(kernel_flock, FALSE); + mutex_lock(audit_mtx); + } +} + +void +audit_init(void) +{ + + /* Verify that the syscall to audit event table is the same + * size as the system call table. + */ + if (nsys_au_event != nsysent) { + printf("Security auditing service initialization failed, "); + printf("audit event table doesn't match syscall table.\n"); + return; + } + + printf("Security auditing service present\n"); + TAILQ_INIT(&audit_q); + audit_enabled = 0; + audit_suspended = 0; + audit_replacement_cred = NULL; + audit_replacement_flag = 0; + audit_replacement_vp = NULL; + audit_mtx = mutex_alloc(ETAP_NO_TRACE); + audit_wait_queue = wait_queue_alloc(SYNC_POLICY_FIFO); + audit_replacement_wait_queue = wait_queue_alloc(SYNC_POLICY_FIFO); + + /* Initialize the BSM audit subsystem. */ + kau_init(); + + kernel_thread(kernel_task, audit_worker); +} + +static void +audit_rotate_vnode(struct ucred *cred, struct vnode *vp) +{ + int ret; + + /* + * If other parallel log replacements have been requested, we wait + * until they've finished before continuing. + */ + mutex_lock(audit_mtx); + while (audit_replacement_flag != 0) { + + AUDIT_PRINTF(("audit_rotate_vnode: sleeping to wait for " + "flag\n")); + ret = wait_queue_assert_wait(audit_replacement_wait_queue, 0, + THREAD_UNINT); + mutex_unlock(audit_mtx); + + assert(ret == THREAD_WAITING); + ret = thread_block(THREAD_CONTINUE_NULL); + assert(ret == THREAD_AWAKENED); + AUDIT_PRINTF(("audit_rotate_vnode: woken up (flag %d)\n", + audit_replacement_flag)); + + mutex_lock(audit_mtx); + } + audit_replacement_cred = cred; + audit_replacement_flag = 1; + audit_replacement_vp = vp; + + /* + * Wake up the audit worker to perform the exchange once we + * release the mutex. + */ + wait_queue_wakeup_one(audit_wait_queue, 0, THREAD_AWAKENED); + + /* + * Wait for the audit_worker to broadcast that a replacement has + * taken place; we know that once this has happened, our vnode + * has been replaced in, so we can return successfully. + */ + AUDIT_PRINTF(("audit_rotate_vnode: waiting for news of " + "replacement\n")); + ret = wait_queue_assert_wait(audit_replacement_wait_queue, 0, + THREAD_UNINT); + mutex_unlock(audit_mtx); + + assert(ret == THREAD_WAITING); + ret = thread_block(THREAD_CONTINUE_NULL); + assert(ret == THREAD_AWAKENED); + AUDIT_PRINTF(("audit_rotate_vnode: change acknowledged by " + "audit_worker (flag " "now %d)\n", audit_replacement_flag)); +} + +/* + * Drain the audit queue and close the log at shutdown. + */ +void +audit_shutdown(void) +{ + + audit_rotate_vnode(NULL, NULL); +} + +static __inline__ struct uthread * +curuthread(void) +{ + + return (get_bsdthread_info(current_act())); +} + +static __inline__ struct kaudit_record * +currecord(void) +{ + + return (curuthread()->uu_ar); +} + +/********************************** + * Begin system calls. * + **********************************/ +/* + * System call to allow a user space application to submit a BSM audit + * record to the kernel for inclusion in the audit log. This function + * does little verification on the audit record that is submitted. + * + * XXXAUDIT: Audit preselection for user records does not currently + * work, since we pre-select only based on the AUE_audit event type, + * not the event type submitted as part of the user audit data. + */ +struct audit_args { + void * record; + int length; +}; +/* ARGSUSED */ +int +audit(struct proc *p, struct audit_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + void * rec; + struct kaudit_record *ar; + + ar = currecord(); + + /* XXX: What's the proper error code if a user audit record can't + * be written due to auditing off, or otherwise unavailable? + */ + if (ar == NULL) + return (ENOTSUP); + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + + if (uap->length > MAX_AUDIT_RECORD_SIZE) + return (EINVAL); + + error = kmem_alloc(kernel_map, (vm_offset_t *)&rec, uap->length); + if (error != KERN_SUCCESS) + return(ENOMEM); + + error = copyin(uap->record, rec, uap->length); + if (error) + goto free_out; + + /* Verify the record */ + if (bsm_rec_verify(rec) == 0) { + error = EINVAL; + goto free_out; + } + + /* Attach the user audit record to the kernel audit record. Because + * this system call is an auditable event, we will write the user + * record along with the record for this audit event. + */ + ar->k_udata = rec; + ar->k_ulen = uap->length; + return (0); + +free_out: + kmem_free(kernel_map, (vm_offset_t)rec, uap->length); + return (error); +} + +/* + * System call to manipulate auditing. + */ +struct auditon_args { + int cmd; + void * data; + int length; +}; +/* ARGSUSED */ +int +auditon(struct proc *p, struct auditon_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + return (ENOSYS); +} + +/* + * System call to pass in file descriptor for audit log. + */ +struct auditsvc_args { + int fd; + int limit; +}; +/* ARGSUSED */ +int +auditsvc(struct proc *p, struct auditsvc_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + return (ENOSYS); +} + +/* + * System calls to manage the user audit information. + * XXXAUDIT May need to lock the proc structure. + */ +struct getauid_args { + au_id_t *auid; +}; +/* ARGSUSED */ +int +getauid(struct proc *p, struct getauid_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + + error = copyout((void *)&p->p_au->ai_auid, (void *)uap->auid, + sizeof(*uap->auid)); + if (error) + return (error); + + return (0); +} + +struct setauid_args { + au_id_t *auid; +}; +/* ARGSUSED */ +int +setauid(struct proc *p, struct setauid_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + + error = copyin((void *)uap->auid, (void *)&p->p_au->ai_auid, + sizeof(p->p_au->ai_auid)); + if (error) + return (error); + + audit_arg_auid(p->p_au->ai_auid); + return (0); +} + +/* + * System calls to get and set process audit information. + */ +struct getaudit_args { + struct auditinfo *auditinfo; +}; +/* ARGSUSED */ +int +getaudit(struct proc *p, struct getaudit_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + error = copyout((void *)p->p_au, (void *)uap->auditinfo, + sizeof(*uap->auditinfo)); + if (error) + return (error); + + return (0); +} + +struct setaudit_args { + struct auditinfo *auditinfo; +}; +/* ARGSUSED */ +int +setaudit(struct proc *p, struct setaudit_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + error = copyin((void *)uap->auditinfo, (void *)p->p_au, + sizeof(*p->p_au)); + if (error) + return (error); + + return (0); +} + +struct getaudit_addr_args { + struct auditinfo_addr *auditinfo_addr; + int length; +}; +/* ARGSUSED */ +int +getaudit_addr(struct proc *p, struct getaudit_addr_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + return (ENOSYS); +} + +struct setaudit_addr_args { + struct auditinfo_addr *auditinfo_addr; + int length; +}; +/* ARGSUSED */ +int +setaudit_addr(struct proc *p, struct setaudit_addr_args *uap, register_t *retval) +{ + register struct pcred *pc = p->p_cred; + int error; + + error = suser(pc->pc_ucred, &p->p_acflag); + if (error) + return (error); + return (ENOSYS); +} + +/* + * Syscall to manage audit files. + * + * XXX: Should generate an audit event. + */ +struct auditctl_args { + char *path; +}; +/* ARGSUSED */ +int +auditctl(struct proc *p, struct auditctl_args *uap) +{ + struct kaudit_record *ar; + struct nameidata nd; + struct ucred *cred; + struct vnode *vp; + int error, flags, ret; + + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + + vp = NULL; + cred = NULL; + + /* + * If a path is specified, open the replacement vnode, perform + * validity checks, and grab another reference to the current + * credential. + */ + if (uap->path != NULL) { + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + flags = audit_open_flags; + error = vn_open(&nd, flags, 0); + if (error) + goto out; + VOP_UNLOCK(nd.ni_vp, 0, p); + vp = nd.ni_vp; + if (vp->v_type != VREG) { + vn_close(vp, audit_close_flags, p->p_ucred, p); + error = EINVAL; + goto out; + } + cred = p->p_ucred; + crhold(cred); + } + + audit_rotate_vnode(cred, vp); +out: + return (error); +} + +/********************************** + * End of system calls. * + **********************************/ + +/* + * MPSAFE + */ +struct kaudit_record * +audit_new(int event, struct proc *p, struct uthread *uthread) +{ + struct kaudit_record *ar; + int no_record; + + /* + * Eventually, there may be certain classes of events that + * we will audit regardless of the audit state at the time + * the record is created. These events will generally + * correspond to changes in the audit state. The dummy + * code below is from our first prototype, but may also + * be used in the final version (with modified event numbers). + */ +#if 0 + if (event != AUDIT_EVENT_FILESTOP && event != AUDIT_EVENT_FILESTART) { +#endif + mutex_lock(audit_mtx); + no_record = (audit_suspended || !audit_enabled); + mutex_unlock(audit_mtx); + if (no_record) + return (NULL); +#if 0 + } +#endif + + /* + * Eventually, we might want to have global event filtering + * by event type here. + */ + + /* + * XXX: Process-based event preselection should occur here. + * Currently, we only post-select. + */ + + /* + * Initialize the audit record header. + * XXX: Should probably use a zone; whatever we use must be + * safe to call from the non-BSD side of the house. + * XXX: We may want to fail-stop if allocation fails. + */ + (void)kmem_alloc(kernel_map, &ar, sizeof(*ar)); + if (ar == NULL) + return NULL; + + bzero(ar, sizeof(*ar)); + ar->k_ar.ar_magic = AUDIT_RECORD_MAGIC; + ar->k_ar.ar_event = event; + nanotime(&ar->k_ar.ar_starttime); + + /* Export the subject credential. */ + cru2x(p->p_ucred, &ar->k_ar.ar_subj_cred); + ar->k_ar.ar_subj_ruid = p->p_cred->p_ruid; + ar->k_ar.ar_subj_rgid = p->p_cred->p_rgid; + ar->k_ar.ar_subj_egid = p->p_ucred->cr_groups[0]; + ar->k_ar.ar_subj_auid = p->p_au->ai_auid; + ar->k_ar.ar_subj_pid = p->p_pid; + bcopy(p->p_comm, ar->k_ar.ar_subj_comm, MAXCOMLEN); + bcopy(&p->p_au->ai_mask, &ar->k_ar.ar_subj_amask, + sizeof(p->p_au->ai_mask)); + + return (ar); +} + +/* + * MPSAFE + * XXXAUDIT: So far, this is unused, and should probably be GC'd. + */ +void +audit_abort(struct kaudit_record *ar) +{ + + audit_free(ar); +} + +/* + * MPSAFE + */ +void +audit_commit(struct kaudit_record *ar, int error, int retval) +{ + + if (ar == NULL) + return; + + ar->k_ar.ar_errno = error; + ar->k_ar.ar_retval = retval; + + /* + * We might want to do some system-wide post-filtering + * here at some point. + */ + + /* + * Timestamp system call end. + */ + nanotime(&ar->k_ar.ar_endtime); + + /* + * XXXAUDIT: The number of outstanding uncommitted audit records is + * limited by the number of concurrent threads servicing system + * calls in the kernel. However, there is currently no bound on + * the size of the committed records in the audit event queue + * before they are sent to disk. Probably, there should be a fixed + * size bound (perhaps configurable), and if that bound is reached, + * threads should sleep in audit_commit() until there's room. + */ + mutex_lock(audit_mtx); + /* + * Note: it could be that some records initiated while audit was + * enabled should still be committed? + */ + if (audit_suspended || !audit_enabled) { + mutex_unlock(audit_mtx); + audit_free(ar); + return; + } + TAILQ_INSERT_TAIL(&audit_q, ar, k_q); + wait_queue_wakeup_one(audit_wait_queue, 0, THREAD_AWAKENED); + mutex_unlock(audit_mtx); +} + +/* + * Calls to set up and tear down audit structures associated with + * each system call. + */ +void +audit_syscall_enter(unsigned short code, struct proc *proc, + struct uthread *uthread) +{ + int audit_event; + + assert(uthread->uu_ar == NULL); + + audit_event = sys_au_event[code]; + + /* + * Allocate an audit record, if desired, and store in the BSD + * thread for later use. + */ + if (audit_event != AUE_NULL) { +#if 0 + AUDIT_PRINTF(("Allocated record type %d for syscall %d\n", + audit_event, code)); +#endif + if (au_preselect(audit_event, &proc->p_au->ai_mask, + AU_PRS_FAILURE | AU_PRS_SUCCESS)) { + uthread->uu_ar = audit_new(audit_event, proc, uthread); + } else { + uthread->uu_ar = NULL; + } + } +} + +void +audit_syscall_exit(int error, struct proc *proc, struct uthread *uthread) +{ + int retval; + + /* + * Commit the audit record as desired; once we pass the record + * into audit_commit(), the memory is owned by the audit + * subsystem. + * The return value from the system call is stored on the user + * thread. If there was an error, the return value is set to -1, + * imitating the behavior of the cerror routine. + */ + if (error) + retval = -1; + else + retval = uthread->uu_rval[0]; + + audit_commit(uthread->uu_ar, error, retval); + if (uthread->uu_ar != NULL) + AUDIT_PRINTF(("audit record committed by pid %d\n", proc->p_pid)); + uthread->uu_ar = NULL; + +} + +/* + * Calls to manipulate elements of the audit record structure from system + * call code. Macro wrappers will prevent this functions from being + * entered if auditing is disabled, avoiding the function call cost. We + * check the thread audit record pointer anyway, as the audit condition + * could change, and pre-selection may not have allocated an audit + * record for this event. + */ +void +audit_arg_accmode(int accmode) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_accmode = accmode; + ar->k_ar.ar_valid_arg |= ARG_ACCMODE; +} + +void +audit_arg_cmode(int cmode) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_cmode = cmode; + ar->k_ar.ar_valid_arg |= ARG_CMODE; +} + +void +audit_arg_fd(int fd) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_fd = fd; + ar->k_ar.ar_valid_arg |= ARG_FD; +} + +void +audit_arg_fflags(int fflags) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_fflags = fflags; + ar->k_ar.ar_valid_arg |= ARG_FFLAGS; +} + +void +audit_arg_gid(gid_t gid, gid_t egid, gid_t rgid, gid_t sgid) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_gid = gid; + ar->k_ar.ar_arg_egid = egid; + ar->k_ar.ar_arg_rgid = rgid; + ar->k_ar.ar_arg_sgid = sgid; + ar->k_ar.ar_valid_arg |= (ARG_GID | ARG_EGID | ARG_RGID | ARG_SGID); +} + +void +audit_arg_uid(uid_t uid, uid_t euid, uid_t ruid, uid_t suid) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_uid = uid; + ar->k_ar.ar_arg_euid = euid; + ar->k_ar.ar_arg_ruid = ruid; + ar->k_ar.ar_arg_suid = suid; + ar->k_ar.ar_valid_arg |= (ARG_UID | ARG_EUID | ARG_RUID | ARG_SUID); +} + +void +audit_arg_groupset(gid_t *gidset, u_int gidset_size) +{ + int i; + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + for (i = 0; i < gidset_size; i++) + ar->k_ar.ar_arg_groups.gidset[i] = gidset[i]; + ar->k_ar.ar_arg_groups.gidset_size = gidset_size; + ar->k_ar.ar_valid_arg |= ARG_GROUPSET; +} + +void +audit_arg_login(char *login) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + +#if 0 + /* + * XXX: Add strlcpy() to Darwin for improved safety. + */ + strlcpy(ar->k_ar.ar_arg_login, login, MAXLOGNAME); +#else + strcpy(ar->k_ar.ar_arg_login, login); +#endif + + ar->k_ar.ar_valid_arg |= ARG_LOGIN; +} + +void +audit_arg_mask(int mask) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_mask = mask; + ar->k_ar.ar_valid_arg |= ARG_MASK; +} + +void +audit_arg_mode(mode_t mode) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_mode = mode; + ar->k_ar.ar_valid_arg |= ARG_MODE; +} + +void +audit_arg_dev(int dev) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_dev = dev; + ar->k_ar.ar_valid_arg |= ARG_DEV; +} + +void +audit_arg_owner(uid_t uid, gid_t gid) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_uid = uid; + ar->k_ar.ar_arg_gid = gid; + ar->k_ar.ar_valid_arg |= (ARG_UID | ARG_GID); +} + +void +audit_arg_pid(pid_t pid) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_pid = pid; + ar->k_ar.ar_valid_arg |= ARG_PID; +} + +void +audit_arg_signum(u_int signum) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_signum = signum; + ar->k_ar.ar_valid_arg |= ARG_SIGNUM; +} + +void +audit_arg_socket(int sodomain, int sotype, int soprotocol) +{ + + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_sockinfo.sodomain = sodomain; + ar->k_ar.ar_arg_sockinfo.sotype = sotype; + ar->k_ar.ar_arg_sockinfo.soprotocol = soprotocol; + ar->k_ar.ar_valid_arg |= ARG_SOCKINFO; +} + +void +audit_arg_sockaddr(struct proc *p, struct sockaddr *so) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL || p == NULL || so == NULL) + return; + + bcopy(so, &ar->k_ar.ar_arg_sockaddr, sizeof(ar->k_ar.ar_arg_sockaddr)); + switch (so->sa_family) { + case AF_INET: + ar->k_ar.ar_valid_arg |= ARG_SADDRINET; + break; + case AF_INET6: + ar->k_ar.ar_valid_arg |= ARG_SADDRINET6; + break; + case AF_UNIX: + audit_arg_upath(p, ((struct sockaddr_un *)so)->sun_path, + ARG_UPATH1); + ar->k_ar.ar_valid_arg |= ARG_SADDRUNIX; + break; + } +} + +void +audit_arg_auid(uid_t auid) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_auid = auid; + ar->k_ar.ar_valid_arg |= ARG_AUID; +} + +void +audit_arg_text(char *text) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + /* Invalidate the text string */ + ar->k_ar.ar_valid_arg &= (ARG_ALL ^ ARG_TEXT); + if (text == NULL) + return; + + if (ar->k_ar.ar_arg_text == NULL) { + kmem_alloc(kernel_map, &ar->k_ar.ar_arg_text, MAXPATHLEN); + if (ar->k_ar.ar_arg_text == NULL) + return; + } + + strcpy(ar->k_ar.ar_arg_text, text); + ar->k_ar.ar_valid_arg |= ARG_TEXT; +} + +void +audit_arg_cmd(int cmd) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_cmd = cmd; + ar->k_ar.ar_valid_arg |= ARG_CMD; +} + +void +audit_arg_svipc_cmd(int cmd) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_svipc_cmd = cmd; + ar->k_ar.ar_valid_arg |= ARG_SVIPC_CMD; +} + +void +audit_arg_svipc_perm(struct ipc_perm *perm) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + bcopy(perm, &ar->k_ar.ar_arg_svipc_perm, + sizeof(ar->k_ar.ar_arg_svipc_perm)); + ar->k_ar.ar_valid_arg |= ARG_SVIPC_PERM; +} + +void +audit_arg_svipc_id(int id) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_svipc_id = id; + ar->k_ar.ar_valid_arg |= ARG_SVIPC_ID; +} + +void +audit_arg_svipc_addr(void * addr) +{ + struct kaudit_record *ar; + + ar = currecord(); + if (ar == NULL) + return; + + ar->k_ar.ar_arg_svipc_addr = addr; + ar->k_ar.ar_valid_arg |= ARG_SVIPC_ADDR; +} + +/* + * Initialize the audit information for the a process, presumably the first + * process in the system. + * XXX It is not clear what the initial values should be for audit ID, + * session ID, etc. + */ +void +audit_proc_init(struct proc *p) +{ + MALLOC_ZONE(p->p_au, struct auditinfo *, sizeof(*p->p_au), + M_SUBPROC, M_WAITOK); + + bzero((void *)p->p_au, sizeof(*p->p_au)); +} + +/* + * Copy the audit info from the parent process to the child process when + * a fork takes place. + * XXX Need to check for failure from the memory allocation, in here + * as well as in any functions that use the process auditing info. + */ +void +audit_proc_fork(struct proc *parent, struct proc *child) +{ + /* Always set up the audit information pointer as this function + * should only be called when the proc is new. If proc structures + * are ever cached and reused, then this behavior will leak memory. + */ + MALLOC_ZONE(child->p_au, struct auditinfo *, sizeof(*child->p_au), + M_SUBPROC, M_WAITOK); + + bcopy(parent->p_au, child->p_au, sizeof(*child->p_au)); +} + +/* + * Free the auditing structure for the process. + */ +void +audit_proc_free(struct proc *p) +{ + FREE_ZONE((void *)p->p_au, sizeof(*p->p_au), M_SUBPROC); + p->p_au = NULL; +} + +/* + * Store a path as given by the user process for auditing into the audit + * record stored on the user thread. This function will allocate the memory to + * store the path info if not already available. This memory will be + * freed when the audit record is freed. + */ +void +audit_arg_upath(struct proc *p, char *upath, u_int64_t flags) +{ + struct kaudit_record *ar; + char **pathp; + + if (p == NULL || upath == NULL) + return; /* nothing to do! */ + + if (flags & (ARG_UPATH1 | ARG_UPATH2) == 0) + return; + + ar = currecord(); + if (ar == NULL) /* This will be the case for unaudited system calls */ + return; + + if (flags & ARG_UPATH1) { + ar->k_ar.ar_valid_arg &= (ARG_ALL ^ ARG_UPATH1); + pathp = &ar->k_ar.ar_arg_upath1; + } + else { + ar->k_ar.ar_valid_arg &= (ARG_ALL ^ ARG_UPATH2); + pathp = &ar->k_ar.ar_arg_upath2; + } + + if (*pathp == NULL) { + kmem_alloc(kernel_map, pathp, MAXPATHLEN); + if (*pathp == NULL) + return; + } + + canon_path(p, upath, *pathp); + + if (flags & ARG_UPATH1) + ar->k_ar.ar_valid_arg |= ARG_UPATH1; + else + ar->k_ar.ar_valid_arg |= ARG_UPATH2; +} + +/* + * Function to save the path and vnode attr information into the audit + * record. + * + * It is assumed that the caller will hold any vnode locks necessary to + * perform a VOP_GETATTR() on the passed vnode. + * + * XXX: The attr code is very similar to vfs_vnops.c:vn_stat(), but + * always provides access to the generation number as we need that + * to construct the BSM file ID. + * XXX: We should accept the process argument from the caller, since + * it's very likely they already have a reference. + * XXX: Error handling in this function is poor. + */ +void +audit_arg_vnpath(struct vnode *vp, u_int64_t flags) +{ + struct kaudit_record *ar; + struct vattr vattr; + int error; + int len; + char **pathp; + struct vnode_au_info *vnp; + struct proc *p; + + if (vp == NULL) + return; + + ar = currecord(); + if (ar == NULL) /* This will be the case for unaudited system calls */ + return; + + if (flags & (ARG_VNODE1 | ARG_VNODE2) == 0) + return; + + p = current_proc(); + + if (flags & ARG_VNODE1) { + ar->k_ar.ar_valid_arg &= (ARG_ALL ^ ARG_KPATH1); + ar->k_ar.ar_valid_arg &= (ARG_ALL ^ ARG_VNODE1); + pathp = &ar->k_ar.ar_arg_kpath1; + vnp = &ar->k_ar.ar_arg_vnode1; + } + else { + ar->k_ar.ar_valid_arg &= (ARG_ALL ^ ARG_KPATH2); + ar->k_ar.ar_valid_arg &= (ARG_ALL ^ ARG_VNODE2); + pathp = &ar->k_ar.ar_arg_kpath2; + vnp = &ar->k_ar.ar_arg_vnode2; + } + + if (*pathp == NULL) { + kmem_alloc(kernel_map, pathp, MAXPATHLEN); + if (*pathp == NULL) + return; + } + + /* Copy the path looked up by the vn_getpath() function */ + len = MAXPATHLEN; + vn_getpath(vp, *pathp, &len); + if (flags & ARG_VNODE1) + ar->k_ar.ar_valid_arg |= ARG_KPATH1; + else + ar->k_ar.ar_valid_arg |= ARG_KPATH2; + + /* + * XXX: We'd assert the vnode lock here, only Darwin doesn't + * appear to have vnode locking assertions. + */ + error = VOP_GETATTR(vp, &vattr, p->p_ucred, p); + if (error) { + /* XXX: How to handle this case? */ + return; + } + + vnp->vn_mode = vattr.va_mode; + vnp->vn_uid = vattr.va_uid; + vnp->vn_gid = vattr.va_gid; + vnp->vn_dev = vattr.va_rdev; + vnp->vn_fsid = vattr.va_fsid; + vnp->vn_fileid = vattr.va_fileid; + vnp->vn_gen = vattr.va_gen; + if (flags & ARG_VNODE1) + ar->k_ar.ar_valid_arg |= ARG_VNODE1; + else + ar->k_ar.ar_valid_arg |= ARG_VNODE2; + +} + +#else /* !AUDIT */ + +void +audit_init(void) +{ + +} + +void +audit_shutdown(void) +{ + +} + +int +audit(struct proc *p, struct audit_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +auditon(struct proc *p, struct auditon_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +auditsvc(struct proc *p, struct auditsvc_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +getauid(struct proc *p, struct getauid_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +setauid(struct proc *p, struct setauid_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +getaudit(struct proc *p, struct getaudit_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +setaudit(struct proc *p, struct setaudit_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +getaudit_addr(struct proc *p, struct getaudit_addr_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +setaudit_addr(struct proc *p, struct setaudit_addr_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +int +auditctl(struct proc *p, struct auditctl_args *uap, register_t *retval) +{ + return (ENOSYS); +} + +void +audit_proc_init(struct proc *p) +{ + +} + +void +audit_proc_fork(struct proc *parent, struct proc *child) +{ + +} + +void +audit_proc_free(struct proc *p) +{ + +} + +#endif /* AUDIT */ diff --git a/bsd/kern/kern_bsm_audit.c b/bsd/kern/kern_bsm_audit.c new file mode 100644 index 000000000..99132a09c --- /dev/null +++ b/bsd/kern/kern_bsm_audit.c @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* The number of BSM records allocated. */ +static int bsm_rec_count = 0; + +/* + * Records that can be recycled are maintained in the list given below + * The maximum number of elements that can be present in this list is + * bounded by MAX_AUDIT_RECORDS. Memory allocated for these records are never + * freed + */ +LIST_HEAD(, au_record) bsm_free_q; + +/* + * Lock for serializing access to the list of audit records. + */ +static mutex_t *bsm_audit_mutex; + +/* + * Initialize the BSM auditing subsystem. + */ +void +kau_init(void) +{ + printf("BSM auditing present\n"); + LIST_INIT(&bsm_free_q); + bsm_audit_mutex = mutex_alloc(ETAP_NO_TRACE); +} + +/* + * This call reserves memory for the audit record. + * Memory must be guaranteed before any auditable event can be + * generated. + * The au_record structure maintains a reference to the + * memory allocated above and also the list of tokens associated + * with this record + */ +struct au_record * +kau_open(void) +{ + struct au_record *rec = NULL; + + /* + * Find an unused record, remove it from the free list, mark as used + */ + mutex_lock(bsm_audit_mutex); + if (!LIST_EMPTY(&bsm_free_q)) { + rec = LIST_FIRST(&bsm_free_q); + LIST_REMOVE(rec, au_rec_q); + } + mutex_unlock(bsm_audit_mutex); + + if (rec == NULL) { + mutex_lock(bsm_audit_mutex); + if (bsm_rec_count >= MAX_AUDIT_RECORDS) { + /* XXX We need to increase size of MAX_AUDIT_RECORDS */ + mutex_unlock(bsm_audit_mutex); + return NULL; + } + mutex_unlock(bsm_audit_mutex); + + /* + * Create a new BSM kernel record. + */ + kmem_alloc(kernel_map, &rec, sizeof(*rec)); + if(rec == NULL) { + return NULL; + } + kmem_alloc(kernel_map, &rec->data, + MAX_AUDIT_RECORD_SIZE * sizeof(u_char)); + if((rec->data) == NULL) { + kmem_free(kernel_map, rec, sizeof(*rec)); + return NULL; + } + mutex_lock(bsm_audit_mutex); + bsm_rec_count++; + mutex_unlock(bsm_audit_mutex); + } + memset(rec->data, 0, MAX_AUDIT_RECORD_SIZE); + + TAILQ_INIT(&rec->token_q); + rec->len = 0; + rec->used = 1; + + return rec; +} + +/* + * Store the token with the record descriptor + * + */ +int kau_write(struct au_record *rec, struct au_token *tok) +{ + if(tok == NULL) { + return -1; /* Invalid Token */ + } + + /* Add the token to the tail */ + /* + * XXX Not locking here -- we should not be writing to + * XXX the same audit record from different threads + */ + TAILQ_INSERT_TAIL(&rec->token_q, tok, tokens); + + rec->len += tok->len; /* grow record length by token size bytes */ + + return 0; +} + +/* + * Close out the audit record by adding the header token, identifying + * any missing tokens. Write out the tokens to the record memory. + */ +int kau_close(struct au_record *rec, struct timespec *ctime, short event) +{ + u_char *dptr; + size_t tot_rec_size; + token_t *cur, *hdr, *trail; + int retval = 0; + + tot_rec_size = rec->len + HEADER_SIZE + TRAILER_SIZE; + if(tot_rec_size <= MAX_AUDIT_RECORD_SIZE) { + /* Create the header token */ + hdr = kau_to_header32(ctime, tot_rec_size, event, 0); + + if(hdr != NULL) { + /* Add to head of list */ + TAILQ_INSERT_HEAD(&rec->token_q, hdr, tokens); + + trail = au_to_trailer(tot_rec_size); + if(trail != NULL) { + TAILQ_INSERT_TAIL(&rec->token_q, trail, tokens); + } + } + /* Serialize token data to the record */ + + rec->len = tot_rec_size; + dptr = rec->data; + TAILQ_FOREACH(cur, &rec->token_q, tokens) { + memcpy(dptr, cur->t_data, cur->len); + dptr += cur->len; + } + } +} + +/* + * Free a BSM audit record by releasing all the tokens and clearing the + * audit record information. + */ +void kau_free(struct au_record *rec) +{ + struct au_token *tok; + + /* Free the token list */ + while ((tok = TAILQ_FIRST(&rec->token_q))) { + TAILQ_REMOVE(&rec->token_q, tok, tokens); + kmem_free(kernel_map, tok->t_data, tok->len); + kmem_free(kernel_map, tok, sizeof(struct au_token)); + } + + rec->used = 0; + rec->len = 0; + + mutex_lock(bsm_audit_mutex); + + /* Add the record to the freelist */ + LIST_INSERT_HEAD(&bsm_free_q, rec, au_rec_q); + + mutex_unlock(bsm_audit_mutex); + +} + +/* + * XXX May want turn some (or all) of these macros into functions in order + * to reduce the generated code sized. + */ +#define UPATH1_TOKENS \ + do { \ + if (ar->ar_valid_arg & ARG_UPATH1) { \ + tok = au_to_path(ar->ar_arg_upath1); \ + kau_write(rec, tok); \ + } \ + } while (0) + +#define UPATH2_TOKENS \ + do { \ + if (ar->ar_valid_arg & ARG_UPATH2) { \ + tok = au_to_path(ar->ar_arg_upath2); \ + kau_write(rec, tok); \ + } \ + } while (0) + +#define KPATH1_VNODE1_TOKENS \ + do { \ + if (ar->ar_valid_arg & ARG_KPATH1) { \ + tok = au_to_path(ar->ar_arg_kpath1); \ + kau_write(rec, tok); \ + } \ + if (ar->ar_valid_arg & ARG_VNODE1) { \ + fill_vattr(&vattr, &ar->ar_arg_vnode1); \ + tok = au_to_attr32(&vattr); \ + kau_write(rec, tok); \ + } \ + } while (0) + +#define KPATH1_VNODE1_OR_UPATH1_TOKENS \ + do { \ + if (ar->ar_valid_arg & ARG_KPATH1) { \ + tok = au_to_path(ar->ar_arg_kpath1); \ + kau_write(rec, tok); \ + } else { \ + UPATH1_TOKENS; \ + } \ + if (ar->ar_valid_arg & ARG_VNODE1) { \ + fill_vattr(&vattr, &ar->ar_arg_vnode1); \ + tok = au_to_attr32(&vattr); \ + kau_write(rec, tok); \ + } \ + } while (0) + +#define KPATH2_VNODE2_TOKENS \ + do { \ + if (ar->ar_valid_arg & ARG_KPATH2) { \ + tok = au_to_path(ar->ar_arg_kpath2); \ + kau_write(rec, tok); \ + } \ + if (ar->ar_valid_arg & ARG_VNODE2) { \ + fill_vattr(&vattr, &ar->ar_arg_vnode2); \ + tok = au_to_attr32(&vattr); \ + kau_write(rec, tok); \ + } \ + } while (0) + +#define FD_KPATH1_VNODE1_TOKENS \ + do { \ + if (ar->ar_valid_arg & ARG_KPATH1) { \ + tok = au_to_path(ar->ar_arg_kpath1); \ + kau_write(rec, tok); \ + if (ar->ar_valid_arg & ARG_VNODE1) { \ + fill_vattr(&vattr, &ar->ar_arg_vnode1); \ + tok = au_to_attr32(&vattr); \ + kau_write(rec, tok); \ + } \ + } else { \ + tok = au_to_arg32(1, "no path: fd", ar->ar_arg_fd); \ + kau_write(rec, tok); \ + } \ + } while (0) + +/* + * Convert an internal kernel audit record to a BSM record and return + * a success/failure indicator. The BSM record is passed as an out + * parameter to this function. + * Return conditions: + * BSM_SUCCESS: The BSM record is valid + * BSM_FAILURE: Failure; the BSM record is NULL. + * BSM_NOAUDIT: The event is not auditable for BSM; the BSM record is NULL. + */ +int +kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) +{ + struct au_token *tok, *subj_tok; + struct au_record *rec; + au_tid_t tid; + struct audit_record *ar; + struct vattr vattr; + int sorf; + int ctr; + + *pau = NULL; + if (kar == NULL) + return (BSM_FAILURE); + + ar = &kar->k_ar; + + /* + * Decide whether to create the BSM audit record by checking the + * error value from the system call and using the appropriate + * user audit mask. + */ + if (ar->ar_errno) + sorf = AU_PRS_FAILURE; + else + sorf = AU_PRS_SUCCESS; + + if (au_preselect(ar->ar_event, &ar->ar_subj_amask, sorf) == 0) + return (BSM_NOAUDIT); + + rec = kau_open(); + if (rec == NULL) + return (BSM_FAILURE); + + /* Create the subject token */ + tid.port = ar->ar_subj_term.port; + tid.machine = ar->ar_subj_term.machine; + subj_tok = au_to_subject32(ar->ar_subj_auid, /* audit ID */ + ar->ar_subj_cred.cr_uid, /* eff uid */ + ar->ar_subj_egid, /* eff group id */ + ar->ar_subj_ruid, /* real uid */ + ar->ar_subj_rgid, /* real group id */ + ar->ar_subj_pid, /* process id */ + ar->ar_subj_asid, /* session ID */ + &tid); + + /* The logic inside each case fills in the tokens required for the + * event, except for the header, trailer, and return tokens. The + * header and trailer tokens are added by the kau_close() function. + * The return token is added outside of the switch statement. + */ + switch(ar->ar_event) { + + /* + * Socket-related events. + */ + case AUE_ACCEPT: + case AUE_BIND: + case AUE_CONNECT: + case AUE_RECVFROM: + case AUE_RECVMSG: + case AUE_SENDMSG: + case AUE_SENDTO: + tok = au_to_arg32(1, "fd", ar->ar_arg_fd); + kau_write(rec, tok); + if (ar->ar_valid_arg & ARG_SADDRINET) { + tok = au_to_sock_inet( + (struct sockaddr_in *)&ar->ar_arg_sockaddr); + kau_write(rec, tok); + } + if (ar->ar_valid_arg & ARG_SADDRUNIX) { + tok = au_to_sock_unix( + (struct sockaddr_un *)&ar->ar_arg_sockaddr); + kau_write(rec, tok); + UPATH1_TOKENS; + } + /* XXX Need to handle ARG_SADDRINET6 */ + break; + + case AUE_SOCKET: + case AUE_SOCKETPAIR: + tok = au_to_arg32(1,"domain", ar->ar_arg_sockinfo.sodomain); + kau_write(rec, tok); + tok = au_to_arg32(2,"type", ar->ar_arg_sockinfo.sotype); + kau_write(rec, tok); + tok = au_to_arg32(3,"protocol", ar->ar_arg_sockinfo.soprotocol); + kau_write(rec, tok); + break; + + case AUE_SETSOCKOPT: + case AUE_SHUTDOWN: + tok = au_to_arg32(1, "fd", ar->ar_arg_fd); + kau_write(rec, tok); + break; + + case AUE_SETAUID: + tok = au_to_arg32(2, "setauid", ar->ar_arg_auid); + kau_write(rec, tok); + /* fall through */ + case AUE_ADJTIME: + case AUE_AUDIT: + case AUE_EXIT: + case AUE_GETAUID: + case AUE_GETFSSTAT: + case AUE_PIPE: + case AUE_SETPGRP: + case AUE_SETRLIMIT: + /* Header, subject, and return tokens added at end */ + break; + + case AUE_ACCESS: + case AUE_CHDIR: + case AUE_CHROOT: + case AUE_EXECVE: + case AUE_GETATTRLIST: + case AUE_GETFH: + case AUE_LSTAT: + case AUE_MKFIFO: + case AUE_PATHCONF: + case AUE_READLINK: + case AUE_REVOKE: + case AUE_RMDIR: + case AUE_SEARCHFS: + case AUE_SETATTRLIST: + case AUE_STAT: + case AUE_STATFS: + case AUE_TRUNCATE: + case AUE_UNDELETE: + case AUE_UNLINK: + case AUE_UTIMES: + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_CHFLAGS: + tok = au_to_arg32(2, "flags", ar->ar_arg_fflags); + kau_write(rec, tok); + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_CHMOD: + tok = au_to_arg32(2, "new file mode", ar->ar_arg_mode); + kau_write(rec, tok); + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_CHOWN: + tok = au_to_arg32(2, "new file uid", ar->ar_arg_uid); + kau_write(rec, tok); + tok = au_to_arg32(3, "new file gid", ar->ar_arg_gid); + kau_write(rec, tok); + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_EXCHANGEDATA: + KPATH1_VNODE1_OR_UPATH1_TOKENS; + KPATH2_VNODE2_TOKENS; + break; + +/* + * XXXAUDIT: Close is not audited in the kernel yet. + case AUE_CLOSE: + tok = au_to_arg32(2, "fd", ar->ar_arg_fd); + kau_write(rec, tok); + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; +*/ + case AUE_FCHMOD: + tok = au_to_arg32(2, "new file mode", ar->ar_arg_mode); + kau_write(rec, tok); + FD_KPATH1_VNODE1_TOKENS; + break; + + case AUE_FCHDIR: + case AUE_FPATHCONF: + case AUE_FSTAT: /* XXX Need to handle sockets and shm */ + case AUE_FSTATFS: + case AUE_FTRUNCATE: + case AUE_FUTIMES: + case AUE_GETDIRENTRIES: + case AUE_GETDIRENTRIESATTR: + FD_KPATH1_VNODE1_TOKENS; + break; + + case AUE_FCHOWN: + tok = au_to_arg32(2, "new file uid", ar->ar_arg_uid); + kau_write(rec, tok); + tok = au_to_arg32(3, "new file gid", ar->ar_arg_gid); + kau_write(rec, tok); + FD_KPATH1_VNODE1_TOKENS; + break; + + case AUE_FCNTL: + if (ar->ar_arg_cmd == F_GETLK || ar->ar_arg_cmd == F_SETLK || + ar->ar_arg_cmd == F_SETLKW) { + tok = au_to_arg32(2, "cmd", ar->ar_arg_cmd); + kau_write(rec, tok); + FD_KPATH1_VNODE1_TOKENS; + } + break; + + case AUE_FCHFLAGS: + tok = au_to_arg32(2, "flags", ar->ar_arg_fflags); + kau_write(rec, tok); + FD_KPATH1_VNODE1_TOKENS; + break; + + case AUE_FLOCK: + tok = au_to_arg32(2, "operation", ar->ar_arg_cmd); + kau_write(rec, tok); + FD_KPATH1_VNODE1_TOKENS; + break; + + case AUE_LINK: + case AUE_RENAME: + KPATH1_VNODE1_OR_UPATH1_TOKENS; + UPATH2_TOKENS; + break; + + case AUE_MKDIR: + tok = au_to_arg32(2, "mode", ar->ar_arg_mode); + kau_write(rec, tok); + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_MKNOD: + tok = au_to_arg32(2, "mode", ar->ar_arg_mode); + kau_write(rec, tok); + tok = au_to_arg32(3, "dev", ar->ar_arg_dev); + kau_write(rec, tok); + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_MOUNT: + /* XXX Need to handle NFS mounts */ + tok = au_to_arg32(3, "flags", ar->ar_arg_fflags); + kau_write(rec, tok); + if (ar->ar_arg_text != NULL) { + tok = au_to_text(ar->ar_arg_text); + kau_write(rec, tok); + } + /* fall through */ + case AUE_UMOUNT: + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_MSGCTL: + ar->ar_event = msgctl_to_event(ar->ar_arg_svipc_cmd); + /* Fall through */ + case AUE_MSGRCV: + case AUE_MSGSND: + tok = au_to_arg32(1, "msg ID", ar->ar_arg_svipc_id); + kau_write(rec, tok); + if (ar->ar_errno != EINVAL) { + tok = au_to_ipc(AT_IPC_MSG, ar->ar_arg_svipc_id); + kau_write(rec, tok); + } + break; + + case AUE_MSGGET: + if (ar->ar_errno == 0) { + tok = au_to_ipc(AT_IPC_MSG, ar->ar_arg_svipc_id); + kau_write(rec, tok); + } + break; + + case AUE_OPEN_R: + case AUE_OPEN_RC: + case AUE_OPEN_RTC: + case AUE_OPEN_RT: + case AUE_OPEN_RW: + case AUE_OPEN_RWC: + case AUE_OPEN_RWTC: + case AUE_OPEN_RWT: + case AUE_OPEN_W: + case AUE_OPEN_WC: + case AUE_OPEN_WTC: + case AUE_OPEN_WT: + /* The open syscall always writes a OPEN_R event; convert the + * file flags to the proper type of event. + */ + ar->ar_event = flags_to_openevent(ar->ar_arg_fflags); + UPATH1_TOKENS; /* Save the user space path */ + KPATH1_VNODE1_TOKENS; /* Audit the kernel path as well */ + break; + + case AUE_QUOTACTL: + tok = au_to_arg32(2, "command", ar->ar_arg_cmd); + kau_write(rec, tok); + tok = au_to_arg32(3, "uid", ar->ar_arg_uid); + kau_write(rec, tok); + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_SEMCTL: + ar->ar_event = semctl_to_event(ar->ar_arg_svipc_cmd); + /* Fall through */ + case AUE_SEMOP: + tok = au_to_arg32(1, "sem ID", ar->ar_arg_svipc_id); + kau_write(rec, tok); + if (ar->ar_errno != EINVAL) { + tok = au_to_ipc(AT_IPC_SEM, ar->ar_arg_svipc_id); + kau_write(rec, tok); + } + break; + case AUE_SEMGET: + if (ar->ar_errno == 0) { + tok = au_to_ipc(AT_IPC_SEM, ar->ar_arg_svipc_id); + kau_write(rec, tok); + } + break; + case AUE_SETEGID: + tok = au_to_arg32(1, "gid", ar->ar_arg_egid); + kau_write(rec, tok); + break; + case AUE_SETEUID: + tok = au_to_arg32(1, "uid", ar->ar_arg_euid); + kau_write(rec, tok); + break; + case AUE_SETGID: + tok = au_to_arg32(1, "gid", ar->ar_arg_gid); + kau_write(rec, tok); + break; + case AUE_SETUID: + tok = au_to_arg32(1, "uid", ar->ar_arg_uid); + kau_write(rec, tok); + break; + case AUE_SETGROUPS: + if (ar->ar_valid_arg & ARG_GROUPSET) { + for(ctr = 0; ctr < ar->ar_arg_groups.gidset_size; ctr++) + { + tok = au_to_arg32(1, "setgroups", ar->ar_arg_groups.gidset[ctr]); + kau_write(rec, tok); + } + } + break; + case AUE_SHMAT: + tok = au_to_arg32(1, "shmid", ar->ar_arg_svipc_id); + kau_write(rec, tok); + tok = au_to_arg32(2, "shmaddr", (int)ar->ar_arg_svipc_addr); + kau_write(rec, tok); + if (ar->ar_valid_arg & ARG_SVIPC_PERM) { + tok = au_to_ipc(AT_IPC_SHM, ar->ar_arg_svipc_id); + kau_write(rec, tok); + tok = au_to_ipc_perm(&ar->ar_arg_svipc_perm); + kau_write(rec, tok); + } + break; + + case AUE_SHMCTL: + tok = au_to_arg32(1, "shmid", ar->ar_arg_svipc_id); + kau_write(rec, tok); + switch (ar->ar_arg_svipc_cmd) { + case IPC_STAT: + ar->ar_event = AUE_SHMCTL_STAT; + if (ar->ar_valid_arg & ARG_SVIPC_PERM) { + tok = au_to_ipc(AT_IPC_SHM, + ar->ar_arg_svipc_id); + kau_write(rec, tok); + } + break; + case IPC_RMID: + ar->ar_event = AUE_SHMCTL_RMID; + if (ar->ar_valid_arg & ARG_SVIPC_PERM) { + tok = au_to_ipc(AT_IPC_SHM, + ar->ar_arg_svipc_id); + kau_write(rec, tok); + } + break; + case IPC_SET: + ar->ar_event = AUE_SHMCTL_SET; + if (ar->ar_valid_arg & ARG_SVIPC_PERM) { + tok = au_to_ipc(AT_IPC_SHM, + ar->ar_arg_svipc_id); + kau_write(rec, tok); + tok = au_to_ipc_perm(&ar->ar_arg_svipc_perm); + kau_write(rec, tok); + } + break; + default: + break; /* We will audit a bad command */ + } + break; + + case AUE_SHMDT: + tok = au_to_arg32(1, "shmaddr", (int)ar->ar_arg_svipc_addr); + kau_write(rec, tok); + break; + + case AUE_SHMGET: + /* This is unusual; the return value is in an argument token */ + tok = au_to_arg32(0, "shmid", ar->ar_arg_svipc_id); + kau_write(rec, tok); + if (ar->ar_valid_arg & ARG_SVIPC_PERM) { + tok = au_to_ipc(AT_IPC_SHM, ar->ar_arg_svipc_id); + kau_write(rec, tok); + tok = au_to_ipc_perm(&ar->ar_arg_svipc_perm); + kau_write(rec, tok); + } + break; + + case AUE_SYMLINK: + if (ar->ar_valid_arg & ARG_TEXT) { + tok = au_to_text(ar->ar_arg_text); + kau_write(rec, tok); + } + KPATH1_VNODE1_OR_UPATH1_TOKENS; + break; + + case AUE_UMASK: + tok = au_to_arg32(1, "new mask", ar->ar_arg_mask); + kau_write(rec, tok); + tok = au_to_arg32(0, "prev mask", ar->ar_retval); + kau_write(rec, tok); + break; + + default: /* We shouldn't fall through to here. */ + printf("BSM conversion requested for unknown event %d\n", + ar->ar_event); + kau_free(rec); + return BSM_NOAUDIT; + } + + kau_write(rec, subj_tok); + tok = au_to_return32((char)ar->ar_errno, ar->ar_retval); + kau_write(rec, tok); /* Every record gets a return token */ + + kau_close(rec, &ar->ar_endtime, ar->ar_event); + + *pau = rec; + return BSM_SUCCESS; +} + +/* + * Verify that a record is a valid BSM record. This verification is + * simple now, but may be expanded on sometime in the future. + * Return 1 if the record is good, 0 otherwise. + * + */ +int +bsm_rec_verify(caddr_t rec) +{ + /* + * Check the token ID of the first token; it has to be a header + * token. + */ + /* XXXAUDIT There needs to be a token structure to map a token. + * XXXAUDIT 'Shouldn't be simply looking at the first char. + */ + if ( ((char)*rec != AU_HEADER_32_TOKEN) && + ((char)*rec != AU_HEADER_EX_32_TOKEN) && + ((char)*rec != AU_HEADER_64_TOKEN) && + ((char)*rec != AU_HEADER_EX_64_TOKEN) ) { + return (0); + } + return (1); +} diff --git a/bsd/kern/kern_bsm_klib.c b/bsd/kern/kern_bsm_klib.c new file mode 100644 index 000000000..ee69ec02e --- /dev/null +++ b/bsd/kern/kern_bsm_klib.c @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Initialize the system call to audit event mapping table. This table + * must be kept in sync with the system call table. This table is meant to + * be directly accessed. + * XXX This should be improved, though, to make it independent of the syscall + * table (but we don't want to traverse a large table for every system call + * to find a match). Ultimately, it would be best to place the audit event + * number in the system call table. + */ +au_event_t sys_au_event[] = { + AUE_NULL, /* 0 = indir */ + AUE_EXIT, /* 1 = exit */ + AUE_NULL, /* 2 = fork */ + AUE_NULL, /* 3 = read */ + AUE_NULL, /* 4 = write */ + AUE_OPEN_R, /* 5 = open */ + AUE_NULL, /* 6 = close */ + AUE_NULL, /* 7 = wait4 */ + AUE_NULL, /* 8 = old creat */ + AUE_LINK, /* 9 = link */ + AUE_UNLINK, /* 10 = unlink */ + AUE_NULL, /* 11 was obsolete execv */ + AUE_CHDIR, /* 12 = chdir */ + AUE_FCHDIR, /* 13 = fchdir */ + AUE_MKNOD, /* 14 = mknod */ + AUE_CHMOD, /* 15 = chmod */ + AUE_CHOWN, /* 16 = chown; now 3 args */ + AUE_NULL, /* 17 = old break */ +#if COMPAT_GETFSSTAT + AUE_NULL, /* 18 = ogetfsstat */ +#else + AUE_GETFSSTAT, /* 18 = getfsstat */ +#endif + AUE_NULL, /* 19 = old lseek */ + AUE_NULL, /* 20 = getpid */ + AUE_NULL, /* 21 was obsolete mount */ + AUE_NULL, /* 22 was obsolete umount */ + AUE_SETUID, /* 23 = setuid */ + AUE_NULL, /* 24 = getuid */ + AUE_NULL, /* 25 = geteuid */ + AUE_NULL, /* 26 = ptrace */ + AUE_RECVMSG, /* 27 = recvmsg */ + AUE_SENDMSG, /* 28 = sendmsg */ + AUE_RECVFROM, /* 29 = recvfrom */ + AUE_ACCEPT, /* 30 = accept */ + AUE_NULL, /* 31 = getpeername */ + AUE_NULL, /* 32 = getsockname */ + AUE_ACCESS, /* 33 = access */ + AUE_CHFLAGS, /* 34 = chflags */ + AUE_FCHFLAGS, /* 35 = fchflags */ + AUE_NULL, /* 36 = sync */ + AUE_NULL, /* 37 = kill */ + AUE_NULL, /* 38 = old stat */ + AUE_NULL, /* 39 = getppid */ + AUE_NULL, /* 40 = old lstat */ + AUE_NULL, /* 41 = dup */ + AUE_PIPE, /* 42 = pipe */ + AUE_NULL, /* 43 = getegid */ + AUE_NULL, /* 44 = profil */ + AUE_NULL, /* 45 = ktrace */ + AUE_NULL, /* 46 = sigaction */ + AUE_NULL, /* 47 = getgid */ + AUE_NULL, /* 48 = sigprocmask */ + AUE_NULL, /* 49 = getlogin */ + AUE_NULL, /* 50 = setlogin */ + AUE_NULL, /* 51 = turn acct off/on */ + AUE_NULL, /* 52 = sigpending */ + AUE_NULL, /* 53 = sigaltstack */ + AUE_NULL, /* 54 = ioctl */ + AUE_NULL, /* 55 = reboot */ + AUE_REVOKE, /* 56 = revoke */ + AUE_SYMLINK, /* 57 = symlink */ + AUE_READLINK, /* 58 = readlink */ + AUE_EXECVE, /* 59 = execve */ + AUE_UMASK, /* 60 = umask */ + AUE_CHROOT, /* 61 = chroot */ + AUE_NULL, /* 62 = old fstat */ + AUE_NULL, /* 63 = used internally, reserved */ + AUE_NULL, /* 64 = old getpagesize */ + AUE_NULL, /* 65 = msync */ + AUE_NULL, /* 66 = vfork */ + AUE_NULL, /* 67 was obsolete vread */ + AUE_NULL, /* 68 was obsolete vwrite */ + AUE_NULL, /* 69 = sbrk */ + AUE_NULL, /* 70 = sstk */ + AUE_NULL, /* 71 = old mmap */ + AUE_NULL, /* 72 = old vadvise */ + AUE_NULL, /* 73 = munmap */ + AUE_NULL, /* 74 = mprotect */ + AUE_NULL, /* 75 = madvise */ + AUE_NULL, /* 76 was obsolete vhangup */ + AUE_NULL, /* 77 was obsolete vlimit */ + AUE_NULL, /* 78 = mincore */ + AUE_NULL, /* 79 = getgroups */ + AUE_SETGROUPS, /* 80 = setgroups */ + AUE_NULL, /* 81 = getpgrp */ + AUE_SETPGRP, /* 82 = setpgid */ + AUE_NULL, /* 83 = setitimer */ + AUE_NULL, /* 84 = old wait */ + AUE_NULL, /* 85 = swapon */ + AUE_NULL, /* 86 = getitimer */ + AUE_NULL, /* 87 = old gethostname */ + AUE_NULL, /* 88 = old sethostname */ + AUE_NULL, /* 89 getdtablesize */ + AUE_NULL, /* 90 = dup2 */ + AUE_NULL, /* 91 was obsolete getdopt */ + AUE_FCNTL, /* 92 = fcntl */ + AUE_NULL, /* 93 = select */ + AUE_NULL, /* 94 was obsolete setdopt */ + AUE_NULL, /* 95 = fsync */ + AUE_NULL, /* 96 = setpriority */ + AUE_SOCKET, /* 97 = socket */ + AUE_CONNECT, /* 98 = connect */ + AUE_NULL, /* 99 = accept */ + AUE_NULL, /* 100 = getpriority */ + AUE_NULL, /* 101 = old send */ + AUE_NULL, /* 102 = old recv */ + AUE_NULL, /* 103 = sigreturn */ + AUE_BIND, /* 104 = bind */ + AUE_SETSOCKOPT, /* 105 = setsockopt */ + AUE_NULL, /* 106 = listen */ + AUE_NULL, /* 107 was vtimes */ + AUE_NULL, /* 108 = sigvec */ + AUE_NULL, /* 109 = sigblock */ + AUE_NULL, /* 110 = sigsetmask */ + AUE_NULL, /* 111 = sigpause */ + AUE_NULL, /* 112 = sigstack */ + AUE_NULL, /* 113 = recvmsg */ + AUE_NULL, /* 114 = sendmsg */ + AUE_NULL, /* 115 = old vtrace */ + AUE_NULL, /* 116 = gettimeofday */ + AUE_NULL, /* 117 = getrusage */ + AUE_NULL, /* 118 = getsockopt */ + AUE_NULL, /* 119 = old resuba */ + AUE_NULL, /* 120 = readv */ + AUE_NULL, /* 121 = writev */ + AUE_NULL, /* 122 = settimeofday */ + AUE_FCHOWN, /* 123 = fchown */ + AUE_FCHMOD, /* 124 = fchmod */ + AUE_NULL, /* 125 = recvfrom */ + AUE_NULL, /* 126 = setreuid */ + AUE_NULL, /* 127 = setregid */ + AUE_RENAME, /* 128 = rename */ + AUE_NULL, /* 129 = old truncate */ + AUE_NULL, /* 130 = old ftruncate */ + AUE_FLOCK, /* 131 = flock */ + AUE_MKFIFO, /* 132 = mkfifo */ + AUE_SENDTO, /* 133 = sendto */ + AUE_SHUTDOWN, /* 134 = shutdown */ + AUE_SOCKETPAIR, /* 135 = socketpair */ + AUE_MKDIR, /* 136 = mkdir */ + AUE_RMDIR, /* 137 = rmdir */ + AUE_UTIMES, /* 138 = utimes */ + AUE_FUTIMES, /* 139 = futimes */ + AUE_ADJTIME, /* 140 = adjtime */ + AUE_NULL, /* 141 = getpeername */ + AUE_NULL, /* 142 = old gethostid */ + AUE_NULL, /* 143 = old sethostid */ + AUE_NULL, /* 144 = old getrlimit */ + AUE_NULL, /* 145 = old setrlimit */ + AUE_NULL, /* 146 = old killpg */ + AUE_NULL, /* 147 = setsid */ + AUE_NULL, /* 148 was setquota */ + AUE_NULL, /* 149 was qquota */ + AUE_NULL, /* 150 = getsockname */ + AUE_NULL, /* 151 = getpgid */ + AUE_NULL, /* 152 = setprivexec */ + AUE_NULL, /* 153 = pread */ + AUE_NULL, /* 154 = pwrite */ + AUE_NULL, /* 155 = nfs_svc */ + AUE_NULL, /* 156 = old getdirentries */ + AUE_STATFS, /* 157 = statfs */ + AUE_FSTATFS, /* 158 = fstatfs */ + AUE_UMOUNT, /* 159 = unmount */ + AUE_NULL, /* 160 was async_daemon */ + AUE_GETFH, /* 161 = get file handle */ + AUE_NULL, /* 162 = getdomainname */ + AUE_NULL, /* 163 = setdomainname */ + AUE_NULL, /* 164 */ +#if QUOTA + AUE_QUOTACTL, /* 165 = quotactl */ +#else /* QUOTA */ + AUE_NULL, /* 165 = not configured */ +#endif /* QUOTA */ + AUE_NULL, /* 166 was exportfs */ + AUE_MOUNT, /* 167 = mount */ + AUE_NULL, /* 168 was ustat */ + AUE_NULL, /* 169 = nosys */ + AUE_NULL, /* 170 was table */ + AUE_NULL, /* 171 = old wait3 */ + AUE_NULL, /* 172 was rpause */ + AUE_NULL, /* 173 = nosys */ + AUE_NULL, /* 174 was getdents */ + AUE_NULL, /* 175 was gc_control */ + AUE_NULL, /* 176 = add_profil */ + AUE_NULL, /* 177 */ + AUE_NULL, /* 178 */ + AUE_NULL, /* 179 */ + AUE_NULL, /* 180 */ + AUE_SETGID, /* 181 */ + AUE_SETEGID, /* 182 */ + AUE_SETEUID, /* 183 */ + AUE_NULL, /* 184 = nosys */ + AUE_NULL, /* 185 = nosys */ + AUE_NULL, /* 186 = nosys */ + AUE_NULL, /* 187 = nosys */ + AUE_STAT, /* 188 = stat */ + AUE_FSTAT, /* 189 = fstat */ + AUE_LSTAT, /* 190 = lstat */ + AUE_PATHCONF, /* 191 = pathconf */ + AUE_FPATHCONF, /* 192 = fpathconf */ + +#if COMPAT_GETFSSTAT + AUE_GETFSSTAT, /* 193 = getfsstat */ +#else + AUE_NULL, /* 193 is unused */ +#endif + AUE_NULL, /* 194 = getrlimit */ + AUE_SETRLIMIT, /* 195 = setrlimit */ + AUE_GETDIRENTRIES, /* 196 = getdirentries */ + AUE_NULL, /* 197 = mmap */ + AUE_NULL, /* 198 = __syscall */ + AUE_NULL, /* 199 = lseek */ + AUE_TRUNCATE, /* 200 = truncate */ + AUE_FTRUNCATE, /* 201 = ftruncate */ + AUE_NULL, /* 202 = __sysctl */ + AUE_NULL, /* 203 = mlock */ + AUE_NULL, /* 204 = munlock */ + AUE_UNDELETE, /* 205 = undelete */ + AUE_NULL, /* 206 = ATsocket */ + AUE_NULL, /* 207 = ATgetmsg*/ + AUE_NULL, /* 208 = ATputmsg*/ + AUE_NULL, /* 209 = ATPsndreq*/ + AUE_NULL, /* 210 = ATPsndrsp*/ + AUE_NULL, /* 211 = ATPgetreq*/ + AUE_NULL, /* 212 = ATPgetrsp*/ + AUE_NULL, /* 213 = Reserved for AppleTalk */ + AUE_NULL, /* 214 = Reserved for AppleTalk */ + AUE_NULL, /* 215 = Reserved for AppleTalk */ + + AUE_NULL, /* 216 = HFS make complex file call (multipel forks */ + AUE_NULL, /* 217 = HFS statv extended stat call for HFS */ + AUE_NULL, /* 218 = HFS lstatv extended lstat call for HFS */ + AUE_NULL, /* 219 = HFS fstatv extended fstat call for HFS */ + AUE_GETATTRLIST,/* 220 = HFS getarrtlist get attribute list cal */ + AUE_SETATTRLIST,/* 221 = HFS setattrlist set attribute list */ + AUE_GETDIRENTRIESATTR,/* 222 = HFS getdirentriesattr get directory attributes */ + AUE_EXCHANGEDATA,/* 223 = HFS exchangedata exchange file contents */ + AUE_NULL,/* 224 = HFS checkuseraccess check access to a file */ + AUE_SEARCHFS, /* 225 = HFS searchfs to implement catalog searching */ + AUE_NULL, /* 226 = private delete (Carbon semantics) */ + AUE_NULL, /* 227 = copyfile - orignally for AFP */ + AUE_NULL, /* 228 */ + AUE_NULL, /* 229 */ + AUE_NULL, /* 230 */ + AUE_NULL, /* 231 */ + AUE_NULL, /* 232 */ + AUE_NULL, /* 233 */ + AUE_NULL, /* 234 */ + AUE_NULL, /* 235 */ + AUE_NULL, /* 236 */ + AUE_NULL, /* 237 */ + AUE_NULL, /* 238 */ + AUE_NULL, /* 239 */ + AUE_NULL, /* 240 */ + AUE_NULL, /* 241 */ + AUE_NULL, /* 242 = fsctl */ + AUE_NULL, /* 243 */ + AUE_NULL, /* 244 */ + AUE_NULL, /* 245 */ + AUE_NULL, /* 246 */ + AUE_NULL, /* 247 = nfsclnt*/ + AUE_NULL, /* 248 = fhopen */ + AUE_NULL, /* 249 */ + AUE_NULL, /* 250 = minherit */ + AUE_NULL, /* 251 = semsys */ + AUE_NULL, /* 252 = msgsys */ + AUE_NULL, /* 253 = shmsys */ + AUE_SEMCTL, /* 254 = semctl */ + AUE_SEMGET, /* 255 = semget */ + AUE_SEMOP, /* 256 = semop */ + AUE_NULL, /* 257 = semconfig */ + AUE_MSGCTL, /* 258 = msgctl */ + AUE_MSGGET, /* 259 = msgget */ + AUE_MSGSND, /* 260 = msgsnd */ + AUE_MSGRCV, /* 261 = msgrcv */ + AUE_SHMAT, /* 262 = shmat */ + AUE_SHMCTL, /* 263 = shmctl */ + AUE_SHMDT, /* 264 = shmdt */ + AUE_SHMGET, /* 265 = shmget */ + AUE_NULL, /* 266 = shm_open */ + AUE_NULL, /* 267 = shm_unlink */ + AUE_NULL, /* 268 = sem_open */ + AUE_NULL, /* 269 = sem_close */ + AUE_NULL, /* 270 = sem_unlink */ + AUE_NULL, /* 271 = sem_wait */ + AUE_NULL, /* 272 = sem_trywait */ + AUE_NULL, /* 273 = sem_post */ + AUE_NULL, /* 274 = sem_getvalue */ + AUE_NULL, /* 275 = sem_init */ + AUE_NULL, /* 276 = sem_destroy */ + AUE_NULL, /* 277 */ + AUE_NULL, /* 278 */ + AUE_NULL, /* 279 */ + AUE_NULL, /* 280 */ + AUE_NULL, /* 281 */ + AUE_NULL, /* 282 */ + AUE_NULL, /* 283 */ + AUE_NULL, /* 284 */ + AUE_NULL, /* 285 */ + AUE_NULL, /* 286 */ + AUE_NULL, /* 287 */ + AUE_NULL, /* 288 */ + AUE_NULL, /* 289 */ + AUE_NULL, /* 290 */ + AUE_NULL, /* 291 */ + AUE_NULL, /* 292 */ + AUE_NULL, /* 293 */ + AUE_NULL, /* 294 */ + AUE_NULL, /* 295 */ + AUE_NULL, /* 296 = load_shared_file */ + AUE_NULL, /* 297 = reset_shared_file */ + AUE_NULL, /* 298 = new_system_shared_regions */ + AUE_NULL, /* 299 */ + AUE_NULL, /* 300 */ + AUE_NULL, /* 301 */ + AUE_NULL, /* 302 */ + AUE_NULL, /* 303 */ + AUE_NULL, /* 304 */ + AUE_NULL, /* 305 */ + AUE_NULL, /* 306 */ + AUE_NULL, /* 307 */ + AUE_NULL, /* 308 */ + AUE_NULL, /* 309 */ + AUE_NULL, /* 310 = getsid */ + AUE_NULL, /* 311 */ + AUE_NULL, /* 312 */ + AUE_NULL, /* 313 */ + AUE_NULL, /* 314 */ + AUE_NULL, /* 315 */ + AUE_NULL, /* 316 */ + AUE_NULL, /* 317 */ + AUE_NULL, /* 318 */ + AUE_NULL, /* 319 */ + AUE_NULL, /* 320 */ + AUE_NULL, /* 321 */ + AUE_NULL, /* 322 */ + AUE_NULL, /* 323 */ + AUE_NULL, /* 324 = mlockall*/ + AUE_NULL, /* 325 = munlockall*/ + AUE_NULL, /* 326 */ + AUE_NULL, /* 327 = issetugid */ + AUE_NULL, /* 328 */ + AUE_NULL, /* 329 */ + AUE_NULL, /* 330 */ + AUE_NULL, /* 331 */ + AUE_NULL, /* 332 */ + AUE_NULL, /* 333 */ + AUE_NULL, /* 334 */ + AUE_NULL, /* 335 = utrace */ + AUE_NULL, /* 336 */ + AUE_NULL, /* 337 */ + AUE_NULL, /* 338 */ + AUE_NULL, /* 339 */ + AUE_NULL, /* 340 */ + AUE_NULL, /* 341 */ + AUE_NULL, /* 342 */ + AUE_NULL, /* 343 */ + AUE_NULL, /* 344 */ + AUE_NULL, /* 345 */ + AUE_NULL, /* 346 */ + AUE_NULL, /* 347 */ + AUE_NULL, /* 348 */ + AUE_NULL, /* 349 */ + AUE_AUDIT, /* 350 */ + AUE_NULL, /* 351 */ + AUE_NULL, /* 352 */ + AUE_GETAUID, /* 353 */ + AUE_SETAUID, /* 354 */ + AUE_NULL, /* 355 */ + AUE_NULL, /* 356 */ + AUE_NULL, /* 357 */ + AUE_NULL, /* 358 */ + AUE_NULL, /* 359 */ + AUE_NULL, /* 360 */ + AUE_NULL, /* 361 */ + AUE_NULL, /* 362 = kqueue */ + AUE_NULL, /* 363 = kevent */ + AUE_NULL, /* 364 */ + AUE_NULL, /* 365 */ + AUE_NULL, /* 366 */ + AUE_NULL, /* 367 */ + AUE_NULL, /* 368 */ + AUE_NULL /* 369 */ +}; +int nsys_au_event = sizeof(sys_au_event) / sizeof(sys_au_event[0]); + +/* + * Check whether an event is aditable by comparing the mask of classes this + * event is part of against the kernel's preselection mask the given mask + * which will be the process event mask. + * + * XXX This needs to eventually implement the selection based on the + * event->class mapping that is controlled by a configuration file. + */ +int au_preselect(au_event_t event, au_mask_t *mask_p, int sorf) +{ + au_class_t ae_class; + au_class_t effmask = 0; + + if(mask_p == NULL) + return (-1); + + /* + * XXX Set the event class using a big ugly switch statement. This + * will change to use the mapping defined by a configuration file. + */ + switch (event) { + case AUE_MMAP: + case AUE_PIPE: + /* mmap() and pipe() are AU_NULL in some systems; we'll + * place them in AU_IPC for now. + */ + ae_class = AU_IPC; break; + case AUE_READLINK: + case AUE_GETDIRENTRIES: + ae_class = AU_FREAD; break; + case AUE_ACCESS: + case AUE_FSTAT: + case AUE_FSTATFS: + case AUE_GETFH: + case AUE_LSTAT: + case AUE_FPATHCONF: + case AUE_PATHCONF: + case AUE_STAT: + case AUE_STATFS: + case AUE_GETATTRLIST: + case AUE_GETFSSTAT: + case AUE_GETDIRENTRIESATTR: + case AUE_SEARCHFS: + ae_class = AU_FACCESS; break; + case AUE_CHMOD: + case AUE_CHOWN: + case AUE_FCHMOD: + case AUE_FCHOWN: + case AUE_FCNTL: + case AUE_FLOCK: + case AUE_UTIMES: + case AUE_CHFLAGS: + case AUE_FCHFLAGS: + case AUE_FUTIMES: + case AUE_SETATTRLIST: + case AUE_TRUNCATE: + case AUE_FTRUNCATE: + case AUE_UNDELETE: + case AUE_EXCHANGEDATA: + ae_class = AU_FMODIFY; break; + case AUE_LINK: + case AUE_MKDIR: + case AUE_MKNOD: + case AUE_SYMLINK: + case AUE_MKFIFO: + ae_class = AU_FCREATE; break; + case AUE_RMDIR: + case AUE_UNLINK: + ae_class = AU_FDELETE; break; + case AUE_CLOSE: + case AUE_MUNMAP: + case AUE_REVOKE: + ae_class = AU_CLOSE; break; + case AUE_CHDIR: + case AUE_CHROOT: + case AUE_EXIT: + case AUE_FCHDIR: + case AUE_FORK: + case AUE_KILL: + case AUE_SETEGID: + case AUE_SETEUID: + case AUE_SETGID: + case AUE_SETGROUPS: + case AUE_SETPGRP: + case AUE_SETUID: + case AUE_VFORK: + case AUE_UMASK: + ae_class = AU_PROCESS; break; + case AUE_ACCEPT: + case AUE_BIND: + case AUE_CONNECT: + case AUE_RECVFROM: + case AUE_RECVMSG: + case AUE_SENDMSG: + case AUE_SENDTO: + case AUE_SETSOCKOPT: + case AUE_SHUTDOWN: + case AUE_SOCKET: + case AUE_SOCKETPAIR: + ae_class = AU_NET; break; + case AUE_MSGCTL: + case AUE_MSGGET: + case AUE_MSGRCV: + case AUE_MSGSND: + case AUE_SEMCTL: + case AUE_SEMGET: + case AUE_SEMOP: + case AUE_SHMAT: + case AUE_SHMCTL: + case AUE_SHMDT: + case AUE_SHMGET: + ae_class = AU_IPC; break; + case AUE_ACCT: + case AUE_ADJTIME: + case AUE_GETAUID: + case AUE_MOUNT: + case AUE_SETAUID: + case AUE_SETRLIMIT: + case AUE_UMOUNT: + ae_class = AU_ADMIN; break; + case AUE_IOCTL: + ae_class = AU_IOCTL; break; + case AUE_EXECVE: + ae_class = AU_PROCESS|AU_EXEC; break; + case AUE_OPEN_R: + ae_class = AU_FREAD; break; + case AUE_OPEN_RC: + ae_class = AU_FREAD|AU_FCREATE; break; + case AUE_OPEN_RTC: + ae_class = AU_FREAD|AU_FCREATE|AU_FDELETE; break; + case AUE_OPEN_RT: + ae_class = AU_FREAD|AU_FDELETE; break; + case AUE_OPEN_RW: + ae_class = AU_FREAD|AU_FWRITE; break; + case AUE_OPEN_RWC: + ae_class = AU_FREAD|AU_FWRITE|AU_FCREATE; break; + case AUE_OPEN_RWTC: + ae_class = AU_FREAD|AU_FWRITE|AU_FCREATE|AU_FDELETE; break; + case AUE_OPEN_RWT: + ae_class = AU_FREAD|AU_FWRITE|AU_FDELETE; break; + case AUE_OPEN_W: + ae_class = AU_FWRITE; break; + case AUE_OPEN_WC: + ae_class = AU_FWRITE|AU_FCREATE; break; + case AUE_OPEN_WTC: + ae_class = AU_FWRITE|AU_FCREATE|AU_FDELETE; break; + case AUE_OPEN_WT: + ae_class = AU_FWRITE|AU_FDELETE; break; + case AUE_RENAME: + ae_class = AU_FCREATE|AU_FDELETE; break; + default: /* Assign the event to all classes */ + ae_class = AU_ALL; break; + } + + /* + * Perform the actual check of the masks against the event. + */ + /* + * XXX Need to compare against the kernel mask??? Or do we not do + * that by default and let the client code just call this function + * with the kernel preselection mask as the mask parameter? + */ + if(sorf & AU_PRS_SUCCESS) { + effmask |= (mask_p->am_success & ae_class); + } + + if(sorf & AU_PRS_FAILURE) { + effmask |= (mask_p->am_failure & ae_class); + } + + if(effmask) + return (1); + else + return (0); +} + +/* + * Convert an open flags specifier into a specific type of open event for + * auditing purposes. + */ +au_event_t flags_to_openevent(int oflags) { + + /* Need to check only those flags we care about. */ + oflags = oflags & (O_RDONLY | O_CREAT | O_TRUNC | O_RDWR | O_WRONLY); + + /* These checks determine what flags are on with the condition + * that ONLY that combination is on, and no other flags are on. + */ + if (!(oflags ^ O_RDONLY)) + return AUE_OPEN_R; + if (!(oflags ^ (O_RDONLY | O_CREAT))) + return AUE_OPEN_RC; + if (!(oflags ^ (O_RDONLY | O_CREAT | O_TRUNC))) + return AUE_OPEN_RTC; + if (!(oflags ^ (O_RDONLY | O_TRUNC))) + return AUE_OPEN_RT; + if (!(oflags ^ O_RDWR)) + return AUE_OPEN_RW; + if (!(oflags ^ (O_RDWR | O_CREAT))) + return AUE_OPEN_RWC; + if (!(oflags ^ (O_RDWR | O_CREAT | O_TRUNC))) + return AUE_OPEN_RWTC; + if (!(oflags ^ (O_RDWR | O_TRUNC))) + return AUE_OPEN_RWT; + if (!(oflags ^ O_WRONLY)) + return AUE_OPEN_W; + if (!(oflags ^ (O_WRONLY | O_CREAT))) + return AUE_OPEN_WC; + if (!(oflags ^ (O_WRONLY | O_CREAT | O_TRUNC))) + return AUE_OPEN_WTC; + if (!(oflags ^ (O_WRONLY | O_TRUNC))) + return AUE_OPEN_WT; + + return AUE_OPEN_R; +} + +/* + * Fill in a vattr struct from kernel audit record fields. This function + * would be unecessary if we store a vattr in the kernel audit record + * directly. +*/ +void fill_vattr(struct vattr *v, struct vnode_au_info *vn_info) +{ + v->va_mode = vn_info->vn_mode; + v->va_uid = vn_info->vn_uid; + v->va_gid = vn_info->vn_gid; + v->va_fsid = vn_info->vn_fsid; + v->va_fileid = vn_info->vn_fileid; + v->va_rdev = vn_info->vn_dev; +} + +/* Convert a MSGCTL command to a specific event. */ +int msgctl_to_event(int cmd) +{ + switch (cmd) { + case IPC_RMID: + return AUE_MSGCTL_RMID; + case IPC_SET: + return AUE_MSGCTL_SET; + case IPC_STAT: + return AUE_MSGCTL_STAT; + default: + return AUE_MSGCTL; + /* We will audit a bad command */ + } +} + +/* Convert a SEMCTL command to a specific event. */ +int semctl_to_event(int cmd) +{ + switch (cmd) { + case GETALL: + return AUE_SEMCTL_GETALL; + case GETNCNT: + return AUE_SEMCTL_GETNCNT; + case GETPID: + return AUE_SEMCTL_GETPID; + case GETVAL: + return AUE_SEMCTL_GETVAL; + case GETZCNT: + return AUE_SEMCTL_GETZCNT; + case IPC_RMID: + return AUE_SEMCTL_RMID; + case IPC_SET: + return AUE_SEMCTL_SET; + case SETALL: + return AUE_SEMCTL_SETALL; + case SETVAL: + return AUE_SEMCTL_SETVAL; + case IPC_STAT: + return AUE_SEMCTL_STAT; + default: + return AUE_SEMCTL; + /* We will audit a bad command */ + } +} + +/* + * Create a canonical path from given path by prefixing either the + * root directory, or the current working directory. + * If the process working directory is NULL, we could use 'rootvnode' + * to obtain the root directoty, but this results in a volfs name + * written to the audit log. So we will leave the filename starting + * with '/' in the audit log in this case. + */ +void canon_path(struct proc *p, char *path, char *cpath) +{ + char *bufp; + int len; + struct vnode *vnp; + struct filedesc *fdp; + + fdp = p->p_fd; + bufp = path; + if (*(path) == '/') { + while (*(bufp) == '/') + bufp++; /* skip leading '/'s */ + /* If no process root, or it is the same as the system root, + * audit the path as passed in with a single '/'. + */ + if ((fdp->fd_rdir == NULL) || + (fdp->fd_rdir == rootvnode)) { + vnp = NULL; + bufp--; /* restore one '/' */ + } else { + vnp = fdp->fd_rdir; /* use process root */ + } + } else { + vnp = fdp->fd_cdir; /* prepend the current dir */ + bufp = path; + } + if (vnp != NULL) { + len = MAXPATHLEN; + vn_getpath(vnp, cpath, &len); + /* The length returned by vn_getpath() is two greater than the + * number of characters in the string. + */ + if (len < MAXPATHLEN) + cpath[len-2] = '/'; + strncpy(cpath + len-1, bufp, MAXPATHLEN - len); + } else { + strncpy(cpath, bufp, MAXPATHLEN); + } +} diff --git a/bsd/kern/kern_bsm_token.c b/bsd/kern/kern_bsm_token.c new file mode 100644 index 000000000..bbd68c77d --- /dev/null +++ b/bsd/kern/kern_bsm_token.c @@ -0,0 +1,1344 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +#define GET_TOKEN_AREA(tok, dptr, length) \ + do {\ + kmem_alloc(kernel_map, &tok, sizeof(*tok)); \ + if(tok != NULL)\ + {\ + tok->len = length;\ + kmem_alloc(kernel_map, &tok->t_data, \ + length * sizeof(u_char));\ + if((dptr = tok->t_data) == NULL)\ + {\ + kmem_free(kernel_map, tok, sizeof(*tok));\ + tok = NULL;\ + }\ + else\ + {\ + memset(dptr, 0, length);\ + }\ + }\ + }while(0) + + + +/* + * token ID 1 byte + * argument # 1 byte + * argument value 4 bytes/8 bytes (32-bit/64-bit value) + * text length 2 bytes + * text N bytes + 1 terminating NULL byte + */ +token_t *au_to_arg32(char n, char *text, u_int32_t v) +{ + token_t *t; + u_char *dptr; + u_int16_t textlen; + + if(text == NULL) { + return NULL; + } + + /* Make sure that text is null terminated */ + textlen = strlen(text); + if(text[textlen] != '\0') { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 9 + textlen); + if(t == NULL) { + return NULL; + } + + textlen += 1; + + ADD_U_CHAR(dptr, AU_ARG32_TOKEN); + ADD_U_CHAR(dptr, n); + ADD_U_INT32(dptr, v); + ADD_U_INT16(dptr, textlen); + ADD_STRING(dptr, text, textlen); + + return t; + +} + +token_t *au_to_arg64(char n, char *text, u_int64_t v) +{ + token_t *t; + u_char *dptr; + u_int16_t textlen; + + if(text == NULL) { + return NULL; + } + + /* Make sure that text is null terminated */ + textlen = strlen(text); + if(text[textlen] != '\0') { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 13 + textlen); + if(t == NULL) { + return NULL; + } + + textlen += 1; + + ADD_U_CHAR(dptr, AU_ARG64_TOKEN); + ADD_U_CHAR(dptr, n); + ADD_U_INT64(dptr, v); + ADD_U_INT16(dptr, textlen); + ADD_STRING(dptr, text, textlen); + + return t; + +} + +token_t *au_to_arg(char n, char *text, u_int32_t v) +{ + return au_to_arg32(n, text, v); +} + +/* + * token ID 1 byte + * file access mode 4 bytes + * owner user ID 4 bytes + * owner group ID 4 bytes + * file system ID 4 bytes + * node ID 8 bytes + * device 4 bytes/8 bytes (32-bit/64-bit) + */ +token_t *au_to_attr32(struct vattr *attr) +{ + token_t *t; + u_char *dptr; + + if(attr == NULL) { + return NULL; + } + + + GET_TOKEN_AREA(t, dptr, 29); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_ATTR32_TOKEN); + ADD_U_INT32(dptr, attr->va_mode); + ADD_U_INT32(dptr, attr->va_uid); + ADD_U_INT32(dptr, attr->va_gid); + ADD_U_INT32(dptr, attr->va_fsid); + ADD_U_INT64(dptr, attr->va_fileid); + ADD_U_INT32(dptr, attr->va_rdev); + + return t; +} + +token_t *au_to_attr64(struct vattr *attr) +{ + token_t *t; + u_char *dptr; + + if(attr == NULL) { + return NULL; + } + + + GET_TOKEN_AREA(t, dptr, 33); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_ATTR64_TOKEN); + ADD_U_INT32(dptr, attr->va_mode); + ADD_U_INT32(dptr, attr->va_uid); + ADD_U_INT32(dptr, attr->va_gid); + ADD_U_INT32(dptr, attr->va_fsid); + ADD_U_INT64(dptr, attr->va_fileid); + ADD_U_INT64(dptr, attr->va_rdev); + + return t; +} + +token_t *au_to_attr(struct vattr *attr) +{ + return au_to_attr32(attr); + +} + + +/* + * token ID 1 byte + * how to print 1 byte + * basic unit 1 byte + * unit count 1 byte + * data items (depends on basic unit) + */ +token_t *au_to_data(char unit_print, char unit_type, + char unit_count, char *p) +{ + token_t *t; + u_char *dptr; + size_t datasize, totdata; + + if(p == NULL) { + return NULL; + } + + /* Determine the size of the basic unit */ + switch(unit_type) { + case AUR_BYTE: datasize = AUR_BYTE_SIZE; + break; + + case AUR_SHORT: datasize = AUR_SHORT_SIZE; + break; + + case AUR_LONG: datasize = AUR_LONG_SIZE; + break; + + default: return NULL; + } + + totdata = datasize * unit_count; + + GET_TOKEN_AREA(t, dptr, totdata + 4); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_ARB_TOKEN); + ADD_U_CHAR(dptr, unit_print); + ADD_U_CHAR(dptr, unit_type); + ADD_U_CHAR(dptr, unit_count); + ADD_MEM(dptr, p, totdata); + + return t; +} + + +/* + * token ID 1 byte + * status 4 bytes + * return value 4 bytes + */ +token_t *au_to_exit(int retval, int err) +{ + token_t *t; + u_char *dptr; + + GET_TOKEN_AREA(t, dptr, 9); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_EXIT_TOKEN); + ADD_U_INT32(dptr, err); + ADD_U_INT32(dptr, retval); + + return t; +} + +/* + */ +token_t *au_to_groups(int *groups) +{ + return au_to_newgroups(MAX_GROUPS, groups); +} + +/* + * token ID 1 byte + * number groups 2 bytes + * group list count * 4 bytes + */ +token_t *au_to_newgroups(u_int16_t n, gid_t *groups) +{ + token_t *t; + u_char *dptr; + int i; + + if(groups == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, n * 4 + 3); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_NEWGROUPS_TOKEN); + ADD_U_INT16(dptr, n); + for(i = 0; i < n; i++) { + ADD_U_INT32(dptr, groups[i]); + } + + return t; +} + + + + +/* + * token ID 1 byte + * internet address 4 bytes + */ +token_t *au_to_in_addr(struct in_addr *internet_addr) +{ + token_t *t; + u_char *dptr; + + if(internet_addr == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 5); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_IN_ADDR_TOKEN); + ADD_U_INT32(dptr, internet_addr->s_addr); + + return t; +} + +/* + * token ID 1 byte + * address type/length 4 bytes + * Address 16 bytes + */ +token_t *au_to_in_addr_ex(struct in6_addr *internet_addr) +{ + token_t *t; + u_char *dptr; + + if(internet_addr == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 21); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_IN_ADDR_EX_TOKEN); + ADD_U_INT32(dptr, internet_addr->__u6_addr.__u6_addr32[0]); + ADD_U_INT32(dptr, internet_addr->__u6_addr.__u6_addr32[1]); + ADD_U_INT32(dptr, internet_addr->__u6_addr.__u6_addr32[2]); + ADD_U_INT32(dptr, internet_addr->__u6_addr.__u6_addr32[3]); + + return t; +} + +/* + * token ID 1 byte + * ip header 20 bytes + */ +token_t *au_to_ip(struct ip *ip) +{ + token_t *t; + u_char *dptr; + + if(ip == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 21); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_IP_TOKEN); + ADD_MEM(dptr, ip, sizeof(struct ip)); + + return t; +} + +/* + * token ID 1 byte + * object ID type 1 byte + * object ID 4 bytes + */ +token_t *au_to_ipc(char type, int id) +{ + token_t *t; + u_char *dptr; + + + GET_TOKEN_AREA(t, dptr, 6); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_IPC_TOKEN); + ADD_U_CHAR(dptr, type); + ADD_U_INT32(dptr, id); + + return t; +} + +/* + * token ID 1 byte + * owner user ID 4 bytes + * owner group ID 4 bytes + * creator user ID 4 bytes + * creator group ID 4 bytes + * access mode 4 bytes + * slot sequence # 4 bytes + * key 4 bytes + */ +token_t *au_to_ipc_perm(struct ipc_perm *perm) +{ + token_t *t; + u_char *dptr; + + if(perm == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 29); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_IPCPERM_TOKEN); + ADD_U_INT32(dptr, perm->uid); + ADD_U_INT32(dptr, perm->gid); + ADD_U_INT32(dptr, perm->cuid); + ADD_U_INT32(dptr, perm->cgid); + ADD_U_INT32(dptr, perm->mode); + ADD_U_INT32(dptr, perm->seq); + ADD_U_INT32(dptr, perm->key); + + return t; +} + + +/* + * token ID 1 byte + * port IP address 2 bytes + */ +token_t *au_to_iport(u_int16_t iport) +{ + token_t *t; + u_char *dptr; + + + GET_TOKEN_AREA(t, dptr, 3); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_IPORT_TOKEN); + ADD_U_INT16(dptr, iport); + + return t; +} + + +/* + * token ID 1 byte + * size 2 bytes + * data size bytes + */ +token_t *au_to_opaque(char *data, u_int16_t bytes) +{ + token_t *t; + u_char *dptr; + + if((data == NULL) || (bytes <= 0)) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, bytes + 3); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_OPAQUE_TOKEN); + ADD_U_INT16(dptr, bytes); + ADD_MEM(dptr, data, bytes); + + return t; +} + +#ifdef KERNEL +/* + * Kernel version of the add file token function, where the time value + * is passed in as an additional parameter. + * token ID 1 byte + * seconds of time 4 bytes + * milliseconds of time 4 bytes + * file name len 2 bytes + * file pathname N bytes + 1 terminating NULL byte + */ +token_t *kau_to_file(char *file, struct timeval *tv) +{ + token_t *t; + u_char *dptr; + u_int16_t filelen; + u_int32_t timems = tv->tv_usec/1000; /* We need time in ms */ + + if(file == NULL) { + return NULL; + } + /* Make sure that text is null terminated */ + filelen = strlen(file); + if(file[filelen] != '\0') { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, filelen + 12); + if(t == NULL) { + return NULL; + } + + filelen += 1; + + ADD_U_CHAR(dptr, AU_FILE_TOKEN); + + /* Add the timestamp */ + ADD_U_INT32(dptr, tv->tv_sec); + ADD_U_INT32(dptr, timems); + + ADD_U_INT16(dptr, filelen); + ADD_STRING(dptr, file, filelen); + + return t; + +} +#endif + +/* + * token ID 1 byte + * text length 2 bytes + * text N bytes + 1 terminating NULL byte + */ +token_t *au_to_text(char *text) +{ + token_t *t; + u_char *dptr; + u_int16_t textlen; + + if(text == NULL) { + return NULL; + } + /* Make sure that text is null terminated */ + textlen = strlen(text); + if(text[textlen] != '\0') { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, textlen + 4); + if(t == NULL) { + return NULL; + } + + textlen += 1; + + ADD_U_CHAR(dptr, AU_TEXT_TOKEN); + ADD_U_INT16(dptr, textlen); + ADD_STRING(dptr, text, textlen); + + return t; +} + +/* + * token ID 1 byte + * path length 2 bytes + * path N bytes + 1 terminating NULL byte + */ +token_t *au_to_path(char *text) +{ + token_t *t; + u_char *dptr; + u_int16_t textlen; + + if(text == NULL) { + return NULL; + } + /* Make sure that text is null terminated */ + textlen = strlen(text); + if(text[textlen] != '\0') { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, textlen + 4); + if(t == NULL) { + return NULL; + } + + textlen += 1; + + ADD_U_CHAR(dptr, AU_PATH_TOKEN); + ADD_U_INT16(dptr, textlen); + ADD_STRING(dptr, text, textlen); + + return t; +} + +/* + * token ID 1 byte + * audit ID 4 bytes + * effective user ID 4 bytes + * effective group ID 4 bytes + * real user ID 4 bytes + * real group ID 4 bytes + * process ID 4 bytes + * session ID 4 bytes + * terminal ID + * port ID 4 bytes/8 bytes (32-bit/64-bit value) + * machine address 4 bytes + */ +token_t *au_to_process32(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid) +{ + token_t *t; + u_char *dptr; + + if(tid == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 37); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_PROCESS_32_TOKEN); + ADD_U_INT32(dptr, auid); + ADD_U_INT32(dptr, euid); + ADD_U_INT32(dptr, egid); + ADD_U_INT32(dptr, ruid); + ADD_U_INT32(dptr, rgid); + ADD_U_INT32(dptr, pid); + ADD_U_INT32(dptr, sid); + ADD_U_INT32(dptr, tid->port); + ADD_U_INT32(dptr, tid->machine); + + return t; +} + +token_t *au_to_process64(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid) +{ + token_t *t; + u_char *dptr; + + if(tid == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 41); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_PROCESS_64_TOKEN); + ADD_U_INT32(dptr, auid); + ADD_U_INT32(dptr, euid); + ADD_U_INT32(dptr, egid); + ADD_U_INT32(dptr, ruid); + ADD_U_INT32(dptr, rgid); + ADD_U_INT32(dptr, pid); + ADD_U_INT32(dptr, sid); + ADD_U_INT64(dptr, tid->port); + ADD_U_INT32(dptr, tid->machine); + + return t; +} + +token_t *au_to_process(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid) +{ + return au_to_process32(auid, euid, egid, ruid, rgid, pid, + sid, tid); +} + + +/* + * token ID 1 byte + * audit ID 4 bytes + * effective user ID 4 bytes + * effective group ID 4 bytes + * real user ID 4 bytes + * real group ID 4 bytes + * process ID 4 bytes + * session ID 4 bytes + * terminal ID + * port ID 4 bytes/8 bytes (32-bit/64-bit value) + * address type-len 4 bytes + * machine address 16 bytes + */ +token_t *au_to_process32_ex(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid) +{ + token_t *t; + u_char *dptr; + + if(tid == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 53); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_PROCESS_32_EX_TOKEN); + ADD_U_INT32(dptr, auid); + ADD_U_INT32(dptr, euid); + ADD_U_INT32(dptr, egid); + ADD_U_INT32(dptr, ruid); + ADD_U_INT32(dptr, rgid); + ADD_U_INT32(dptr, pid); + ADD_U_INT32(dptr, sid); + ADD_U_INT32(dptr, tid->at_port); + ADD_U_INT32(dptr, tid->at_type); + ADD_U_INT32(dptr, tid->at_addr[0]); + ADD_U_INT32(dptr, tid->at_addr[1]); + ADD_U_INT32(dptr, tid->at_addr[2]); + ADD_U_INT32(dptr, tid->at_addr[3]); + + return t; +} + +token_t *au_to_process64_ex(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid) +{ + token_t *t; + u_char *dptr; + + if(tid == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 57); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_PROCESS_64_EX_TOKEN); + ADD_U_INT32(dptr, auid); + ADD_U_INT32(dptr, euid); + ADD_U_INT32(dptr, egid); + ADD_U_INT32(dptr, ruid); + ADD_U_INT32(dptr, rgid); + ADD_U_INT32(dptr, pid); + ADD_U_INT32(dptr, sid); + ADD_U_INT64(dptr, tid->at_port); + ADD_U_INT32(dptr, tid->at_type); + ADD_U_INT32(dptr, tid->at_addr[0]); + ADD_U_INT32(dptr, tid->at_addr[1]); + ADD_U_INT32(dptr, tid->at_addr[2]); + ADD_U_INT32(dptr, tid->at_addr[3]); + + return t; +} + +token_t *au_to_process_ex(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid) +{ + return au_to_process32_ex(auid, euid, egid, ruid, rgid, + pid, sid, tid); +} + +/* + * token ID 1 byte + * error status 1 byte + * return value 4 bytes/8 bytes (32-bit/64-bit value) + */ +token_t *au_to_return32(char status, u_int32_t ret) +{ + token_t *t; + u_char *dptr; + + + GET_TOKEN_AREA(t, dptr, 6); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_RETURN_32_TOKEN); + ADD_U_CHAR(dptr, status); + ADD_U_INT32(dptr, ret); + + return t; +} + +token_t *au_to_return64(char status, u_int64_t ret) +{ + token_t *t; + u_char *dptr; + + + GET_TOKEN_AREA(t, dptr, 10); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_RETURN_64_TOKEN); + ADD_U_CHAR(dptr, status); + ADD_U_INT64(dptr, ret); + + return t; +} + +token_t *au_to_return(char status, u_int32_t ret) +{ + return au_to_return32(status, ret); +} + +/* + * token ID 1 byte + * sequence number 4 bytes + */ +token_t *au_to_seq(long audit_count) +{ + token_t *t; + u_char *dptr; + + + GET_TOKEN_AREA(t, dptr, 5); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_SEQ_TOKEN); + ADD_U_INT32(dptr, audit_count); + + return t; +} + +/* + * token ID 1 byte + * socket type 2 bytes + * remote port 2 bytes + * remote Internet address 4 bytes + */ +token_t *au_to_socket(struct socket *so) +{ + return au_to_socket_ex_32(so); +} + +/* + * token ID 1 byte + * socket type 2 bytes + * local port 2 bytes + * address type/length 4 bytes + * local Internet address 4 bytes/16 bytes (IPv4/IPv6 address) + * remote port 4 bytes + * address type/length 4 bytes + * remote Internet address 4 bytes/16 bytes (IPv4/IPv6 address) + */ +token_t *au_to_socket_ex_32(struct socket *so) +{ + return NULL; +} +token_t *au_to_socket_ex_128(struct socket *so) +{ + return NULL; +} + +/* + * token ID 1 byte + * socket family 2 bytes + * local port 2 bytes + * socket address 4 bytes + */ +token_t *au_to_sock_inet32(struct sockaddr_in *so) +{ + token_t *t; + u_char *dptr; + + if(so == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 9); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_SOCK_INET_32_TOKEN); + /* In Darwin, sin_family is one octet, but BSM defines the token + * to store two. So we copy in a 0 first. + */ + ADD_U_CHAR(dptr, 0); + ADD_U_CHAR(dptr, so->sin_family); + ADD_U_INT16(dptr, so->sin_port); + ADD_U_INT32(dptr, so->sin_addr.s_addr); + + return t; + +} + +token_t *au_to_sock_inet128(struct sockaddr_in6 *so) +{ + token_t *t; + u_char *dptr; + + if(so == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 21); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_SOCK_INET_128_TOKEN); + /* In Darwin, sin_family is one octet, but BSM defines the token + * to store two. So we copy in a 0 first. + */ + ADD_U_CHAR(dptr, 0); + ADD_U_CHAR(dptr, so->sin6_family); + ADD_U_INT16(dptr, so->sin6_port); + ADD_U_INT32(dptr, so->sin6_addr.__u6_addr.__u6_addr32[0]); + ADD_U_INT32(dptr, so->sin6_addr.__u6_addr.__u6_addr32[1]); + ADD_U_INT32(dptr, so->sin6_addr.__u6_addr.__u6_addr32[2]); + ADD_U_INT32(dptr, so->sin6_addr.__u6_addr.__u6_addr32[3]); + + return t; + + + +} + +/* + * token ID 1 byte + * socket family 2 bytes + * path 104 bytes + */ +token_t *au_to_sock_unix(struct sockaddr_un *so) +{ + token_t *t; + u_char *dptr; + + if(so == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 107); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_SOCK_UNIX_TOKEN); + /* BSM token has two bytes for family */ + ADD_U_CHAR(dptr, 0); + ADD_U_CHAR(dptr, so->sun_family); + ADD_STRING(dptr, so->sun_path, strlen(so->sun_path)); + + return t; + +} + +token_t *au_to_sock_inet(struct sockaddr_in *so) +{ + return au_to_sock_inet32(so); +} + +/* + * token ID 1 byte + * audit ID 4 bytes + * effective user ID 4 bytes + * effective group ID 4 bytes + * real user ID 4 bytes + * real group ID 4 bytes + * process ID 4 bytes + * session ID 4 bytes + * terminal ID + * port ID 4 bytes/8 bytes (32-bit/64-bit value) + * machine address 4 bytes + */ +token_t *au_to_subject32(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid) +{ + token_t *t; + u_char *dptr; + + if(tid == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 37); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_SUBJECT_32_TOKEN); + ADD_U_INT32(dptr, auid); + ADD_U_INT32(dptr, euid); + ADD_U_INT32(dptr, egid); + ADD_U_INT32(dptr, ruid); + ADD_U_INT32(dptr, rgid); + ADD_U_INT32(dptr, pid); + ADD_U_INT32(dptr, sid); + ADD_U_INT32(dptr, tid->port); + ADD_U_INT32(dptr, tid->machine); + + return t; +} + +token_t *au_to_subject64(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid) +{ + token_t *t; + u_char *dptr; + + if(tid == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 41); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_SUBJECT_64_TOKEN); + ADD_U_INT32(dptr, auid); + ADD_U_INT32(dptr, euid); + ADD_U_INT32(dptr, egid); + ADD_U_INT32(dptr, ruid); + ADD_U_INT32(dptr, rgid); + ADD_U_INT32(dptr, pid); + ADD_U_INT32(dptr, sid); + ADD_U_INT64(dptr, tid->port); + ADD_U_INT32(dptr, tid->machine); + + return t; +} + +token_t *au_to_subject(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid) +{ + return au_to_subject32(auid, euid, egid, ruid, rgid, + pid, sid, tid); + +} + +/* + * token ID 1 byte + * audit ID 4 bytes + * effective user ID 4 bytes + * effective group ID 4 bytes + * real user ID 4 bytes + * real group ID 4 bytes + * process ID 4 bytes + * session ID 4 bytes + * terminal ID + * port ID 4 bytes/8 bytes (32-bit/64-bit value) + * address type/length 4 bytes + * machine address 16 bytes + */ +token_t *au_to_subject32_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid) +{ + token_t *t; + u_char *dptr; + + if(tid == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 53); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_SUBJECT_32_EX_TOKEN); + ADD_U_INT32(dptr, auid); + ADD_U_INT32(dptr, euid); + ADD_U_INT32(dptr, egid); + ADD_U_INT32(dptr, ruid); + ADD_U_INT32(dptr, rgid); + ADD_U_INT32(dptr, pid); + ADD_U_INT32(dptr, sid); + ADD_U_INT32(dptr, tid->at_port); + ADD_U_INT32(dptr, tid->at_type); + ADD_U_INT32(dptr, tid->at_addr[0]); + ADD_U_INT32(dptr, tid->at_addr[1]); + ADD_U_INT32(dptr, tid->at_addr[2]); + ADD_U_INT32(dptr, tid->at_addr[3]); + + return t; +} + +token_t *au_to_subject64_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid) +{ + token_t *t; + u_char *dptr; + + if(tid == NULL) { + return NULL; + } + + GET_TOKEN_AREA(t, dptr, 57); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_SUBJECT_64_EX_TOKEN); + ADD_U_INT32(dptr, auid); + ADD_U_INT32(dptr, euid); + ADD_U_INT32(dptr, egid); + ADD_U_INT32(dptr, ruid); + ADD_U_INT32(dptr, rgid); + ADD_U_INT32(dptr, pid); + ADD_U_INT32(dptr, sid); + ADD_U_INT64(dptr, tid->at_port); + ADD_U_INT32(dptr, tid->at_type); + ADD_U_INT32(dptr, tid->at_addr[0]); + ADD_U_INT32(dptr, tid->at_addr[1]); + ADD_U_INT32(dptr, tid->at_addr[2]); + ADD_U_INT32(dptr, tid->at_addr[3]); + + return t; +} + +token_t *au_to_subject_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid) +{ + return au_to_subject32_ex(auid, euid, egid, ruid, rgid, + pid, sid, tid); + +} + +/* + * token ID 1 byte + * count 4 bytes + * text count null-terminated strings + */ +token_t *au_to_exec_args(const char **args) +{ + token_t *t; + u_char *dptr; + const char *nextarg; + int i, count = 0; + size_t totlen = 0; + + if(args == NULL) { + return NULL; + } + + nextarg = *args; + + while(nextarg != NULL) { + int nextlen; + + nextlen = strlen(nextarg); + if(nextarg[nextlen] != '\0') { + return NULL; + } + + totlen += nextlen + 1; + count++; + nextarg = *(args + count); + } + + + GET_TOKEN_AREA(t, dptr, 5 + totlen); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_EXEC_ARG_TOKEN); + ADD_U_INT32(dptr, count); + + for(i =0; i< count; i++) { + nextarg = *(args + i); + ADD_MEM(dptr, nextarg, strlen(nextarg) + 1); + } + + return t; +} + + +/* + * token ID 1 byte + * count 4 bytes + * text count null-terminated strings + */ +token_t *au_to_exec_env(const char **env) +{ + token_t *t; + u_char *dptr; + int i, count = 0; + size_t totlen = 0; + const char *nextenv; + + if(env == NULL) { + return NULL; + } + + nextenv = *env; + + while(nextenv != NULL) { + int nextlen; + + nextlen = strlen(nextenv); + if(nextenv[nextlen] != '\0') { + return NULL; + } + + totlen += nextlen + 1; + count++; + nextenv = *(env + count); + } + + + GET_TOKEN_AREA(t, dptr, 5 + totlen); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_EXEC_ENV_TOKEN); + ADD_U_INT32(dptr, count); + + for(i =0; i< count; i++) { + nextenv = *(env + i); + ADD_MEM(dptr, nextenv, strlen(nextenv) + 1); + } + + return t; +} + + +#ifdef KERNEL +/* + * Kernel version of the BSM header token functions. These versions take + * a timespec struct as an additional parameter in order to obtain the + * create time value for the BSM audit record. + * token ID 1 byte + * record byte count 4 bytes + * version # 1 byte [2] + * event type 2 bytes + * event modifier 2 bytes + * seconds of time 4 bytes/8 bytes (32-bit/64-bit value) + * milliseconds of time 4 bytes/8 bytes (32-bit/64-bit value) + */ +token_t *kau_to_header32(struct timespec *ctime, int rec_size, + au_event_t e_type, au_emod_t e_mod) +{ + token_t *t; + u_char *dptr; + u_int32_t timems = ctime->tv_nsec/1000000; /* We need time in ms */ + + GET_TOKEN_AREA(t, dptr, 18); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_HEADER_32_TOKEN); + ADD_U_INT32(dptr, rec_size); + ADD_U_CHAR(dptr, HEADER_VERSION); + ADD_U_INT16(dptr, e_type); + ADD_U_INT16(dptr, e_mod); + + /* Add the timestamp */ + ADD_U_INT32(dptr, ctime->tv_sec); + ADD_U_INT32(dptr, timems); + + return t; +} + +token_t *kau_to_header64(struct timespec *ctime, int rec_size, + au_event_t e_type, au_emod_t e_mod) +{ + token_t *t; + u_char *dptr; + u_int32_t timems = ctime->tv_nsec/1000000; /* We need time in ms */ + + GET_TOKEN_AREA(t, dptr, 26); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_HEADER_64_TOKEN); + ADD_U_INT32(dptr, rec_size); + ADD_U_CHAR(dptr, HEADER_VERSION); + ADD_U_INT16(dptr, e_type); + ADD_U_INT16(dptr, e_mod); + + /* Add the timestamp */ + ADD_U_INT32(dptr, ctime->tv_sec); + ADD_U_INT32(dptr, timems); + + return t; +} + +token_t *kau_to_header(struct timespec *ctime, int rec_size, + au_event_t e_type, au_emod_t e_mod) +{ + return kau_to_header32(ctime, rec_size, e_type, e_mod); +} + +#endif + +/* + * token ID 1 byte + * trailer magic number 2 bytes + * record byte count 4 bytes + */ +token_t *au_to_trailer(int rec_size) +{ + token_t *t; + u_char *dptr; + u_int16_t magic = TRAILER_PAD_MAGIC; + + + GET_TOKEN_AREA(t, dptr, 7); + if(t == NULL) { + return NULL; + } + + ADD_U_CHAR(dptr, AU_TRAILER_TOKEN); + ADD_U_INT16(dptr, magic); + ADD_U_INT32(dptr, rec_size); + + return t; + +} + diff --git a/bsd/kern/kern_clock.c b/bsd/kern/kern_clock.c index 25c65d595..340880695 100644 --- a/bsd/kern/kern_clock.c +++ b/bsd/kern/kern_clock.c @@ -121,6 +121,7 @@ bsd_hardclock(usermode, pc, numticks) register struct proc *p; register thread_t thread; int nusecs = numticks * tick; + struct timeval tv; if (!bsd_hardclockinit) return; @@ -128,13 +129,14 @@ bsd_hardclock(usermode, pc, numticks) /* * Increment the time-of-day. */ - microtime(&time); + microtime(&tv); + time = tv; if (bsd_hardclockinit < 0) { return; } - thread = current_thread(); + thread = current_act(); /* * Charge the time out based on the mode the cpu is in. * Here again we fudge for the lack of proper interval timers @@ -160,7 +162,7 @@ bsd_hardclock(usermode, pc, numticks) extern void psignal_vtalarm(struct proc *); /* does psignal(p, SIGVTALRM) in a thread context */ - thread_call_func(psignal_vtalarm, p, FALSE); + thread_call_func((thread_call_func_t)psignal_vtalarm, p, FALSE); } } @@ -183,7 +185,7 @@ bsd_hardclock(usermode, pc, numticks) extern void psignal_xcpu(struct proc *); /* does psignal(p, SIGXCPU) in a thread context */ - thread_call_func(psignal_xcpu, p, FALSE); + thread_call_func((thread_call_func_t)psignal_xcpu, p, FALSE); if (p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur < p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_max) @@ -195,7 +197,7 @@ bsd_hardclock(usermode, pc, numticks) extern void psignal_sigprof(struct proc *); /* does psignal(p, SIGPROF) in a thread context */ - thread_call_func(psignal_sigprof, p, FALSE); + thread_call_func((thread_call_func_t)psignal_sigprof, p, FALSE); } } } diff --git a/bsd/kern/kern_control.c b/bsd/kern/kern_control.c index 7319540f5..49f5e3b52 100644 --- a/bsd/kern/kern_control.c +++ b/bsd/kern/kern_control.c @@ -168,6 +168,13 @@ ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p) if (ctl == NULL) return(EADDRNOTAVAIL); + if (ctl->flags & CTL_FLAG_PRIVILEGED) { + if (p == 0) + return(EINVAL); + if (error = suser(p->p_ucred, &p->p_acflag)) + return error; + } + if (ctl->skt != NULL) return(EBUSY); @@ -179,13 +186,6 @@ ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p) ctl->skt = so; - if (ctl->flags & CTL_FLAG_PRIVILEGED) { - if (p == 0) - return(EPERM); - if (error = suser(p->p_ucred, &p->p_acflag)) - return error; - } - if (ctl->connect) error = (*ctl->connect)(ctl, ctl->userdata); if (error) { @@ -284,7 +284,8 @@ ctl_enqueuedata(void *ctlref, void *data, size_t len, u_int32_t flags) } bcopy(data, mtod(m, void *), len); - + m->m_pkthdr.len = m->m_len = len; + sbappend(&so->so_rcv, m); if ((flags & CTL_DATA_NOWAKEUP) == 0) sorwakeup(so); diff --git a/bsd/kern/kern_core.c b/bsd/kern/kern_core.c index e4482b90e..052a0d537 100644 --- a/bsd/kern/kern_core.c +++ b/bsd/kern/kern_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -28,9 +28,6 @@ * * This file contains machine independent code for performing core dumps. * - * HISTORY - * 16-Feb-91 Mike DeMoney (mike@next.com) - * Massaged into MI form from m68k/core.c. */ #include @@ -68,9 +65,10 @@ typedef struct { mythread_state_flavor_t thread_flavor_array[]={ {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT}, {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT}, - {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT} + {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT}, + {PPC_VECTOR_STATE, PPC_VECTOR_STATE_COUNT} }; -int mynum_flavors=3; +int mynum_flavors=4; #elif defined (__i386__) mythread_state_flavor_t thread_flavor_array [] = { {i386_THREAD_STATE, i386_THREAD_STATE_COUNT}, @@ -97,6 +95,7 @@ typedef struct { int tstate_size; } tir_t; +void collectth_state(thread_act_t th_act, tir_t *t) { vm_offset_t header; @@ -172,6 +171,7 @@ coredump(p) tir_t tir1; struct vnode * vp; extern boolean_t coredumpok(vm_map_t map, vm_offset_t va); /* temp fix */ + extern task_t current_task(); /* XXX */ if (pcred->p_svuid != pcred->p_ruid || pcred->p_svgid != pcred->p_rgid) return (EFAULT); @@ -185,8 +185,8 @@ coredump(p) (void) task_suspend(task); sprintf(core_name, "/cores/core.%d", p->p_pid); - NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, core_name, p); - if(error = vn_open(&nd, O_CREAT | FWRITE, S_IRUSR )) + NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, core_name, p); + if(error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR )) return (error); vp = nd.ni_vp; @@ -215,18 +215,8 @@ coredump(p) * nflavors here is really the number of ints in flavors * to meet the thread_getstatus() calling convention */ -#if 0 - nflavors = sizeof(flavors)/sizeof(int); - if (thread_getstatus(current_thread(), THREAD_STATE_FLAVOR_LIST, - (thread_state_t)(flavors), - &nflavors) != KERN_SUCCESS) - panic("core flavor list"); - /* now convert to number of flavors */ - nflavors /= sizeof(mythread_state_flavor_t)/sizeof(int); -#else nflavors = mynum_flavors; bcopy(thread_flavor_array,flavors,sizeof(thread_flavor_array)); -#endif tstate_size = 0; for (i = 0; i < nflavors; i++) tstate_size += sizeof(mythread_state_flavor_t) + @@ -255,9 +245,10 @@ coredump(p) mh->sizeofcmds = command_size; hoffset = sizeof(struct mach_header); /* offset into header */ - foffset = round_page(header_size); /* offset into file */ + foffset = round_page_32(header_size); /* offset into file */ vmoffset = VM_MIN_ADDRESS; /* offset into VM */ - /* We use to check for an error, here, now we try and get + /* + * We use to check for an error, here, now we try and get * as much as we can */ while (segment_count > 0){ @@ -314,7 +305,9 @@ coredump(p) * Note: if we can't read, then we end up with * a hole in the file. */ - if ((maxprot & VM_PROT_READ) == VM_PROT_READ && vbr.user_tag != VM_MEMORY_IOKIT && coredumpok(map,vmoffset)) { + if ((maxprot & VM_PROT_READ) == VM_PROT_READ + && vbr.user_tag != VM_MEMORY_IOKIT + && coredumpok(map,vmoffset)) { error = vn_rdwr(UIO_WRITE, vp, (caddr_t)vmoffset, size, foffset, UIO_USERSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p); } @@ -325,44 +318,12 @@ coredump(p) segment_count--; } -#if 0 /* [ */ - task_lock(task); - thread = (thread_t) queue_first(&task->thread_list); - while (thread_count > 0) { - /* - * Fill in thread command structure. - */ - tc = (struct thread_command *) (header + hoffset); - tc->cmd = LC_THREAD; - tc->cmdsize = sizeof(struct thread_command) - + tstate_size; - hoffset += sizeof(struct thread_command); - /* - * Follow with a struct thread_state_flavor and - * the appropriate thread state struct for each - * thread state flavor. - */ - for (i = 0; i < nflavors; i++) { - *(mythread_state_flavor_t *)(header+hoffset) = - flavors[i]; - hoffset += sizeof(mythread_state_flavor_t); - thread_getstatus(thread, flavors[i].flavor, - (thread_state_t *)(header+hoffset), - &flavors[i].count); - hoffset += flavors[i].count*sizeof(int); - } - thread = (thread_t) queue_next(&thread->thread_list); - thread_count--; - } - task_unlock(task); -#else /* /* 0 ][ */ tir1.header = header; tir1.hoffset = hoffset; tir1.flavors = flavors; tir1.tstate_size = tstate_size; task_act_iterate_wth_args(task, collectth_state,&tir1); -#endif /* 0 ] */ /* * Write out the Mach header at the beginning of the * file. @@ -375,4 +336,5 @@ out: error1 = vn_close(vp, FWRITE, cred, p); if (error == 0) error = error1; + return (error); } diff --git a/bsd/kern/kern_descrip.c b/bsd/kern/kern_descrip.c index b1f7469d4..1f6089b4a 100644 --- a/bsd/kern/kern_descrip.c +++ b/bsd/kern/kern_descrip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -79,6 +79,8 @@ #include #include #include +#include +#include #include @@ -247,11 +249,14 @@ fcntl(p, uap, retval) daddr_t lbn, bn; int devBlockSize = 0; + AUDIT_ARG(fd, uap->fd); + AUDIT_ARG(cmd, uap->cmd); if ((u_int)fd >= fdp->fd_nfiles || (fp = fdp->fd_ofiles[fd]) == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) return (EBADF); pop = &fdp->fd_ofileflags[fd]; + switch (uap->cmd) { case F_DUPFD: @@ -325,6 +330,7 @@ fcntl(p, uap, retval) if (fp->f_type != DTYPE_VNODE) return (EBADF); vp = (struct vnode *)fp->f_data; + AUDIT_ARG(vnpath, vp, ARG_VNODE1); /* Copy in the lock structure */ error = copyin((caddr_t)uap->arg, (caddr_t)&fl, sizeof (fl)); @@ -358,6 +364,7 @@ fcntl(p, uap, retval) if (fp->f_type != DTYPE_VNODE) return (EBADF); vp = (struct vnode *)fp->f_data; + AUDIT_ARG(vnpath, vp, ARG_VNODE1); /* Copy in the lock structure */ error = copyin((caddr_t)uap->arg, (caddr_t)&fl, sizeof (fl)); @@ -510,6 +517,18 @@ fcntl(p, uap, retval) return(error); return (VOP_IOCTL(vp, 1, (caddr_t)&ra_struct, 0, fp->f_cred, p)); + case F_CHKCLEAN: + /* + * used by regression test to determine if + * all the dirty pages (via write) have been cleaned + * after a call to 'fsysnc'. + */ + if (fp->f_type != DTYPE_VNODE) + return (EBADF); + vp = (struct vnode *)fp->f_data; + + return (VOP_IOCTL(vp, 5, 0, 0, fp->f_cred, p)); + case F_READBOOTSTRAP: case F_WRITEBOOTSTRAP: if (fp->f_type != DTYPE_VNODE) @@ -550,10 +569,12 @@ fcntl(p, uap, retval) error = vn_lock(vp, LK_EXCLUSIVE|LK_RETRY, p); if (error) return (error); - if (VOP_OFFTOBLK(vp, fp->f_offset, &lbn)) - panic("fcntl LOG2PHYS OFFTOBLK"); - if (VOP_BLKTOOFF(vp, lbn, &offset)) - panic("fcntl LOG2PHYS BLKTOOFF1"); + error = VOP_OFFTOBLK(vp, fp->f_offset, &lbn); + if (error) + return (error); + error = VOP_BLKTOOFF(vp, lbn, &offset); + if (error) + return (error); error = VOP_BMAP(vp, lbn, &devvp, &bn, 0); VOP_DEVBLOCKSIZE(devvp, &devBlockSize); VOP_UNLOCK(vp, 0, p); @@ -568,6 +589,32 @@ fcntl(p, uap, retval) } return (error); + case F_GETPATH: { + char *pathbuf; + int len; + extern int vn_getpath(struct vnode *vp, char *pathbuf, int *len); + + if (fp->f_type != DTYPE_VNODE) + return (EBADF); + vp = (struct vnode *)fp->f_data; + + len = MAXPATHLEN; + MALLOC(pathbuf, char *, len, M_TEMP, M_WAITOK); + error = vn_getpath(vp, pathbuf, &len); + if (error == 0) + error = copyout((caddr_t)pathbuf, (caddr_t)uap->arg, len); + FREE(pathbuf, M_TEMP); + return error; + } + + case F_FULLFSYNC: { + if (fp->f_type != DTYPE_VNODE) + return (EBADF); + vp = (struct vnode *)fp->f_data; + + return (VOP_IOCTL(vp, 6, (caddr_t)NULL, 0, fp->f_cred, p)); + } + default: return (EINVAL); } @@ -620,6 +667,16 @@ close(p, uap, retval) (fp = fdp->fd_ofiles[fd]) == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) return (EBADF); + + /* Keep people from using the filedesc while we are closing it */ + fdp->fd_ofileflags[fd] |= UF_RESERVED; + + /* cancel all async IO requests that can be cancelled. */ + _aio_close( p, fd ); + + if (fd < fdp->fd_knlistsize) + knote_fdclose(p, fd); + _fdrelse(fdp, fd); return (closef(fp, p)); } @@ -644,6 +701,7 @@ fstat(p, uap, retval) struct stat ub; int error; + AUDIT_ARG(fd, uap->fd); if ((u_int)fd >= fdp->fd_nfiles || (fp = fdp->fd_ofiles[fd]) == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) @@ -652,6 +710,9 @@ fstat(p, uap, retval) case DTYPE_VNODE: error = vn_stat((struct vnode *)fp->f_data, &ub, p); + if (error == 0) { + AUDIT_ARG(vnpath, (struct vnode *)fp->f_data, ARG_VNODE1); + } break; case DTYPE_SOCKET: @@ -661,6 +722,11 @@ fstat(p, uap, retval) case DTYPE_PSXSHM: error = pshm_stat((void *)fp->f_data, &ub); break; + + case DTYPE_KQUEUE: + error = kqueue_stat(fp, &ub, p); + break; + default: panic("fstat"); /*NOTREACHED*/ @@ -736,6 +802,7 @@ fpathconf(p, uap, retval) struct file *fp; struct vnode *vp; + AUDIT_ARG(fd, uap->fd); if ((u_int)fd >= fdp->fd_nfiles || (fp = fdp->fd_ofiles[fd]) == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) @@ -750,6 +817,8 @@ fpathconf(p, uap, retval) case DTYPE_VNODE: vp = (struct vnode *)fp->f_data; + AUDIT_ARG(vnpath, vp, ARG_VNODE1); + return (VOP_PATHCONF(vp, uap->name, retval)); default: @@ -923,11 +992,6 @@ falloc(p, resultfp, resultfd) nfiles++; MALLOC_ZONE(fp, struct file *, sizeof(struct file), M_FILE, M_WAITOK); bzero(fp, sizeof(struct file)); - if (fq = p->p_fd->fd_ofiles[0]) { - LIST_INSERT_AFTER(fq, fp, f_list); - } else { - LIST_INSERT_HEAD(&filehead, fp, f_list); - } p->p_fd->fd_ofiles[i] = fp; fp->f_count = 1; fp->f_cred = p->p_ucred; @@ -936,6 +1000,11 @@ falloc(p, resultfp, resultfd) *resultfp = fp; if (resultfd) *resultfd = i; + if (fq = p->p_fd->fd_ofiles[0]) { + LIST_INSERT_AFTER(fq, fp, f_list); + } else { + LIST_INSERT_HEAD(&filehead, fp, f_list); + } return (0); } @@ -976,6 +1045,9 @@ fdexec(p) if ((*flags & (UF_RESERVED|UF_EXCLOSE)) == UF_EXCLOSE) { register struct file *fp = *fpp; + if (i < fdp->fd_knlistsize) + knote_fdclose(p, i); + *fpp = NULL; *flags = 0; if (i == fdp->fd_lastfile && i > 0) fdp->fd_lastfile--; @@ -1037,6 +1109,26 @@ fdcopy(p) (void) memcpy(newfdp->fd_ofileflags, fdp->fd_ofileflags, i * sizeof *fdp->fd_ofileflags); + /* + * kq descriptors cannot be copied. + */ + if (newfdp->fd_knlistsize != -1) { + fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile]; + for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--) { + if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) { + *fpp = NULL; + if (i < newfdp->fd_freefile) + newfdp->fd_freefile = i; + } + if (*fpp == NULL && i == newfdp->fd_lastfile && i > 0) + newfdp->fd_lastfile--; + } + newfdp->fd_knlist = NULL; + newfdp->fd_knlistsize = -1; + newfdp->fd_knhash = NULL; + newfdp->fd_knhashmask = 0; + } + fpp = newfdp->fd_ofiles; flags = newfdp->fd_ofileflags; for (i = newfdp->fd_lastfile; i-- >= 0; fpp++, flags++) @@ -1060,31 +1152,69 @@ fdfree(p) struct proc *p; { struct filedesc *fdp; - struct file **fpp; + struct file *fp; int i; struct vnode *tvp; + /* Certain daemons might not have file descriptors */ if ((fdp = p->p_fd) == NULL) return; + if (--fdp->fd_refcnt > 0) return; - p->p_fd = NULL; + + /* Last reference: the structure can't change out from under us */ if (fdp->fd_nfiles > 0) { - fpp = fdp->fd_ofiles; - for (i = fdp->fd_lastfile; i-- >= 0; fpp++) - if (*fpp) - (void) closef(*fpp, p); + for (i = fdp->fd_lastfile; i >= 0; i--) +#if 1 /* WORKAROUND */ + /* + * Merlot: need to remove the bogus f_data check + * from the following "if" statement. It's there + * because of the network/kernel funnel race on a + * close of a socket vs. fdfree on exit. See + * Radar rdar://problem/3365650 for details, but + * the sort version is the commment before the "if" + * above is wrong under certain circumstances. + * + * We have to do this twice, in case knote_fdclose() + * results in a block. + * + * This works because an fdfree() will set all fields + * in the struct file to -1. + */ + if ((fp = fdp->fd_ofiles[i]) != NULL && + fp->f_data != (caddr_t)-1) { + if (i < fdp->fd_knlistsize) + knote_fdclose(p, i); + if (fp->f_data != (caddr_t)-1) + (void) closef(fp, p); + } +#else /* !WORKAROUND */ + if ((fp = fdp->fd_ofiles[i]) != NULL) { + if (i < fdp->fd_knlistsize) + knote_fdclose(p, i); + (void) closef(fp, p); + } +#endif /* !WORKAROUND */ FREE_ZONE(fdp->fd_ofiles, fdp->fd_nfiles * OFILESIZE, M_OFILETABL); } + tvp = fdp->fd_cdir; fdp->fd_cdir = NULL; vrele(tvp); + if (fdp->fd_rdir) { tvp = fdp->fd_rdir; fdp->fd_rdir = NULL; vrele(tvp); } + + if (fdp->fd_knlist) + FREE(fdp->fd_knlist, M_KQUEUE); + if (fdp->fd_knhash) + FREE(fdp->fd_knhash, M_KQUEUE); + FREE_ZONE(fdp, sizeof *fdp, M_FILEDESC); } @@ -1175,6 +1305,7 @@ flock(p, uap, retval) struct vnode *vp; struct flock lf; + AUDIT_ARG(fd, uap->fd); if ((u_int)fd >= fdp->fd_nfiles || (fp = fdp->fd_ofiles[fd]) == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) @@ -1182,6 +1313,7 @@ flock(p, uap, retval) if (fp->f_type != DTYPE_VNODE) return (EOPNOTSUPP); vp = (struct vnode *)fp->f_data; + AUDIT_ARG(vnpath, vp, ARG_VNODE1); lf.l_whence = SEEK_SET; lf.l_start = 0; lf.l_len = 0; diff --git a/bsd/kern/kern_event.c b/bsd/kern/kern_event.c index f298f525b..0b1425c07 100644 --- a/bsd/kern/kern_event.c +++ b/bsd/kern/kern_event.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -23,10 +23,1091 @@ * @APPLE_LICENSE_HEADER_END@ * */ +/*- + * Copyright (c) 1999,2000,2001 Jonathan Lemon + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ /* * @(#)kern_event.c 1.0 (3/31/2000) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); + +static int kqueue_scan(struct file *fp, int maxevents, + struct kevent *ulistp, const struct timespec *timeout, + register_t *retval, struct proc *p); +static void kqueue_wakeup(struct kqueue *kq); + +static int kqueue_read __P((struct file *fp, struct uio *uio, + struct ucred *cred, int flags, struct proc *p)); +static int kqueue_write __P((struct file *fp, struct uio *uio, + struct ucred *cred, int flags, struct proc *p)); +static int kqueue_ioctl __P((struct file *fp, u_long com, caddr_t data, + struct proc *p)); +static int kqueue_select __P((struct file *fp, int which, void *wql, + struct proc *p)); +static int kqueue_close __P((struct file *fp, struct proc *p)); +static int kqueue_kqfilter __P((struct file *fp, struct knote *kn, struct proc *p)); + +static struct fileops kqueueops = { + kqueue_read, + kqueue_write, + kqueue_ioctl, + kqueue_select, + kqueue_close, + kqueue_kqfilter +}; + +static void knote_fdpattach(struct knote *kn, struct filedesc *fdp); +static void knote_drop(struct knote *kn, struct proc *p); +static void knote_enqueue(struct knote *kn); +static void knote_dequeue(struct knote *kn); +static struct knote *knote_alloc(void); +static void knote_free(struct knote *kn); + +static int filt_fileattach(struct knote *kn); +static struct filterops file_filtops = + { 1, filt_fileattach, NULL, NULL }; + +static void filt_kqdetach(struct knote *kn); +static int filt_kqueue(struct knote *kn, long hint); +static struct filterops kqread_filtops = + { 1, NULL, filt_kqdetach, filt_kqueue }; + +/* + * JMM - placeholder for not-yet-implemented filters + */ +static int filt_badattach(struct knote *kn); +static struct filterops bad_filtops = + { 0, filt_badattach, 0 , 0 }; + +static int filt_procattach(struct knote *kn); +static void filt_procdetach(struct knote *kn); +static int filt_proc(struct knote *kn, long hint); + +static struct filterops proc_filtops = + { 0, filt_procattach, filt_procdetach, filt_proc }; + +extern struct filterops fs_filtops; + +extern struct filterops sig_filtops; + +#if 0 +/* JMM - We don't implement these now */ +static void filt_timerexpire(void *knx); +static int filt_timerattach(struct knote *kn); +static void filt_timerdetach(struct knote *kn); +static int filt_timer(struct knote *kn, long hint); + +static struct filterops timer_filtops = + { 0, filt_timerattach, filt_timerdetach, filt_timer }; + +static int kq_ncallouts = 0; +static int kq_calloutmax = (4 * 1024); + +SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, + &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); +#endif /* 0 */ + +static zone_t knote_zone; + +#define KNOTE_ACTIVATE(kn) do { \ + kn->kn_status |= KN_ACTIVE; \ + if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ + knote_enqueue(kn); \ +} while(0) + +#define KN_HASHSIZE 64 /* XXX should be tunable */ +#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) + +#if 0 +extern struct filterops aio_filtops; +#endif + +/* + * Table for for all system-defined filters. + */ +static struct filterops *sysfilt_ops[] = { + &file_filtops, /* EVFILT_READ */ + &file_filtops, /* EVFILT_WRITE */ +#if 0 + &aio_filtops, /* EVFILT_AIO */ +#else + &bad_filtops, /* EVFILT_AIO */ +#endif + &file_filtops, /* EVFILT_VNODE */ + &proc_filtops, /* EVFILT_PROC */ + &sig_filtops, /* EVFILT_SIGNAL */ +#if 0 + &timer_filtops, /* EVFILT_TIMER */ +#else + &bad_filtops, /* EVFILT_TIMER */ +#endif + &bad_filtops, /* EVFILT_MACHPORT */ + &fs_filtops /* EVFILT_FS */ +}; + +static int +filt_fileattach(struct knote *kn) +{ + + return (fo_kqfilter(kn->kn_fp, kn, current_proc())); +} + +static void +filt_kqdetach(struct knote *kn) +{ + struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; + + if (kq->kq_state & KQ_SEL) + return; + + KNOTE_DETACH(&kq->kq_sel.si_note, kn); +} + +/*ARGSUSED*/ +static int +filt_kqueue(struct knote *kn, long hint) +{ + struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; + + kn->kn_data = kq->kq_count; + return (kn->kn_data > 0); +} + +static int +filt_procattach(struct knote *kn) +{ + struct proc *p; + + p = pfind(kn->kn_id); + if (p == NULL) + return (ESRCH); + if (! PRISON_CHECK(current_proc(), p)) + return (EACCES); + + kn->kn_ptr.p_proc = p; + kn->kn_flags |= EV_CLEAR; /* automatically set */ + + /* + * internal flag indicating registration done by kernel + */ + if (kn->kn_flags & EV_FLAG1) { + kn->kn_data = kn->kn_sdata; /* ppid */ + kn->kn_fflags = NOTE_CHILD; + kn->kn_flags &= ~EV_FLAG1; + } + + /* XXX lock the proc here while adding to the list? */ + KNOTE_ATTACH(&p->p_klist, kn); + + return (0); +} + +/* + * The knote may be attached to a different process, which may exit, + * leaving nothing for the knote to be attached to. So when the process + * exits, the knote is marked as DETACHED and also flagged as ONESHOT so + * it will be deleted when read out. However, as part of the knote deletion, + * this routine is called, so a check is needed to avoid actually performing + * a detach, because the original process does not exist any more. + */ +static void +filt_procdetach(struct knote *kn) +{ + struct proc *p = kn->kn_ptr.p_proc; + + if (kn->kn_status & KN_DETACHED) + return; + + /* XXX locking? this might modify another process. */ + KNOTE_DETACH(&p->p_klist, kn); +} + +static int +filt_proc(struct knote *kn, long hint) +{ + u_int event; + + /* + * mask off extra data + */ + event = (u_int)hint & NOTE_PCTRLMASK; + + /* + * if the user is interested in this event, record it. + */ + if (kn->kn_sfflags & event) + kn->kn_fflags |= event; + + /* + * process is gone, so flag the event as finished. + */ + if (event == NOTE_EXIT) { + kn->kn_status |= KN_DETACHED; + kn->kn_flags |= (EV_EOF | EV_ONESHOT); + return (1); + } + + /* + * process forked, and user wants to track the new process, + * so attach a new knote to it, and immediately report an + * event with the parent's pid. + */ + if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { + struct kevent kev; + int error; + + /* + * register knote with new process. + */ + kev.ident = hint & NOTE_PDATAMASK; /* pid */ + kev.filter = kn->kn_filter; + kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; + kev.fflags = kn->kn_sfflags; + kev.data = kn->kn_id; /* parent */ + kev.udata = kn->kn_kevent.udata; /* preserve udata */ + error = kqueue_register(kn->kn_kq, &kev, NULL); + if (error) + kn->kn_fflags |= NOTE_TRACKERR; + } + + return (kn->kn_fflags != 0); +} + +#if 0 +static void +filt_timerexpire(void *knx) +{ + struct knote *kn = knx; + struct callout *calloutp; + struct timeval tv; + int tticks; + + kn->kn_data++; + KNOTE_ACTIVATE(kn); + + if ((kn->kn_flags & EV_ONESHOT) == 0) { + tv.tv_sec = kn->kn_sdata / 1000; + tv.tv_usec = (kn->kn_sdata % 1000) * 1000; + tticks = tvtohz(&tv); + calloutp = (struct callout *)kn->kn_hook; + callout_reset(calloutp, tticks, filt_timerexpire, kn); + } +} + +/* + * data contains amount of time to sleep, in milliseconds + */ +static int +filt_timerattach(struct knote *kn) +{ + struct callout *calloutp; + struct timeval tv; + int tticks; + + if (kq_ncallouts >= kq_calloutmax) + return (ENOMEM); + kq_ncallouts++; + + tv.tv_sec = kn->kn_sdata / 1000; + tv.tv_usec = (kn->kn_sdata % 1000) * 1000; + tticks = tvtohz(&tv); + + kn->kn_flags |= EV_CLEAR; /* automatically set */ + MALLOC(calloutp, struct callout *, sizeof(*calloutp), + M_KQUEUE, M_WAITOK); + callout_init(calloutp); + callout_reset(calloutp, tticks, filt_timerexpire, kn); + kn->kn_hook = (caddr_t)calloutp; + + return (0); +} + +static void +filt_timerdetach(struct knote *kn) +{ + struct callout *calloutp; + + calloutp = (struct callout *)kn->kn_hook; + callout_stop(calloutp); + FREE(calloutp, M_KQUEUE); + kq_ncallouts--; +} + +static int +filt_timer(struct knote *kn, long hint) +{ + + return (kn->kn_data != 0); +} +#endif /* 0 */ + +/* + * JMM - placeholder for not-yet-implemented filters + */ +static int +filt_badattach(struct knote *kn) +{ + return(EOPNOTSUPP); +} + +#ifndef _SYS_SYSPROTO_H_ +struct kqueue_args { + int dummy; +}; +#endif + +int +kqueue(struct proc *p, struct kqueue_args *uap, register_t *retval) +{ + struct filedesc *fdp = p->p_fd; + struct kqueue *kq; + struct file *fp; + int fd, error; + + error = falloc(p, &fp, &fd); + if (error) + return (error); + fp->f_flag = FREAD | FWRITE; + fp->f_type = DTYPE_KQUEUE; + fp->f_ops = &kqueueops; + kq = (struct kqueue *)_MALLOC(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); + TAILQ_INIT(&kq->kq_head); + fp->f_data = (caddr_t)kq; + *retval = fd; + if (fdp->fd_knlistsize < 0) + fdp->fd_knlistsize = 0; /* this process has a kq */ + kq->kq_fdp = fdp; + return (error); +} + +#ifndef _SYS_SYSPROTO_H_ +struct kqueue_portset_np_args { + int fd; +}; +#endif +int +kqueue_portset_np(struct proc *p, struct kqueue_portset_np_args *uap, register_t *retval) +{ + /* JMM - Placeholder for now */ + return (EOPNOTSUPP); +} + +#ifndef _SYS_SYSPROTO_H_ +struct kqueue_from_portset_np_args { + int fd; +}; +#endif +int +kqueue_from_portset_np(struct proc *p, struct kqueue_from_portset_np_args *uap, register_t *retval) +{ + /* JMM - Placeholder for now */ + return (EOPNOTSUPP); +} + +#if !0 +/* JMM - We don't implement this yet */ +#define fhold(fp) +#define fdrop(fp, p) +#endif /* !0 */ + +#ifndef _SYS_SYSPROTO_H_ +struct kevent_args { + int fd; + const struct kevent *changelist; + int nchanges; + struct kevent *eventlist; + int nevents; + const struct timespec *timeout; +}; +#endif +int +kevent(struct proc *p, struct kevent_args *uap, register_t *retval) +{ + struct filedesc* fdp = p->p_fd; + struct kqueue *kq; + struct file *fp = NULL; + struct timespec ts; + int i, nerrors, error; + + if (uap->timeout != NULL) { + error = copyin((caddr_t)uap->timeout, (caddr_t)&ts, sizeof(ts)); + if (error) + goto done; + uap->timeout = &ts; + } + + if (((u_int)uap->fd) >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[uap->fd]) == NULL || + (fp->f_type != DTYPE_KQUEUE)) + return (EBADF); + + fhold(fp); + + kq = (struct kqueue *)fp->f_data; + nerrors = 0; + + while (uap->nchanges > 0) { + int i; + int n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges; + struct kevent kq_kev[n]; + + error = copyin((caddr_t)uap->changelist, (caddr_t)kq_kev, + n * sizeof(struct kevent)); + if (error) + goto done; + for (i = 0; i < n; i++) { + struct kevent *kevp = &kq_kev[i]; + + kevp->flags &= ~EV_SYSFLAGS; + error = kqueue_register(kq, kevp, p); + if (error) { + if (uap->nevents != 0) { + kevp->flags = EV_ERROR; + kevp->data = error; + (void) copyout((caddr_t)kevp, + (caddr_t)uap->eventlist, + sizeof(*kevp)); + uap->eventlist++; + uap->nevents--; + nerrors++; + } else { + goto done; + } + } + } + uap->nchanges -= n; + uap->changelist += n; + } + if (nerrors) { + *retval = nerrors; + error = 0; + goto done; + } + + error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, retval, p); +done: + if (fp != NULL) + fdrop(fp, p); + return (error); +} + +int +kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p) +{ + struct filedesc *fdp = kq->kq_fdp; + struct filterops *fops; + struct file *fp = NULL; + struct knote *kn = NULL; + int s, error = 0; + + if (kev->filter < 0) { + if (kev->filter + EVFILT_SYSCOUNT < 0) + return (EINVAL); + fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ + } else { + /* + * XXX + * filter attach routine is responsible for insuring that + * the identifier can be attached to it. + */ + printf("unknown filter: %d\n", kev->filter); + return (EINVAL); + } + + if (fops->f_isfd) { + /* validate descriptor */ + if ((u_int)kev->ident >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[kev->ident]) == NULL) + return (EBADF); + fhold(fp); + + if (kev->ident < fdp->fd_knlistsize) { + SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) + if (kq == kn->kn_kq && + kev->filter == kn->kn_filter) + break; + } + } else { + if (fdp->fd_knhashmask != 0) { + struct klist *list; + + list = &fdp->fd_knhash[ + KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; + SLIST_FOREACH(kn, list, kn_link) + if (kev->ident == kn->kn_id && + kq == kn->kn_kq && + kev->filter == kn->kn_filter) + break; + } + } + + if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { + error = ENOENT; + goto done; + } + + /* + * kn now contains the matching knote, or NULL if no match + */ + if (kev->flags & EV_ADD) { + + if (kn == NULL) { + kn = knote_alloc(); + if (kn == NULL) { + error = ENOMEM; + goto done; + } + kn->kn_fp = fp; + kn->kn_kq = kq; + kn->kn_fop = fops; + + /* + * apply reference count to knote structure, and + * do not release it at the end of this routine. + */ + fp = NULL; + + kn->kn_sfflags = kev->fflags; + kn->kn_sdata = kev->data; + kev->fflags = 0; + kev->data = 0; + kn->kn_kevent = *kev; + + knote_fdpattach(kn, fdp); + if ((error = fops->f_attach(kn)) != 0) { + knote_drop(kn, p); + goto done; + } + } else { + /* + * The user may change some filter values after the + * initial EV_ADD, but doing so will not reset any + * filter which have already been triggered. + */ + kn->kn_sfflags = kev->fflags; + kn->kn_sdata = kev->data; + kn->kn_kevent.udata = kev->udata; + } + + s = splhigh(); + if (kn->kn_fop->f_event(kn, 0)) + KNOTE_ACTIVATE(kn); + splx(s); + + } else if (kev->flags & EV_DELETE) { + kn->kn_fop->f_detach(kn); + knote_drop(kn, p); + goto done; + } + + if ((kev->flags & EV_DISABLE) && + ((kn->kn_status & KN_DISABLED) == 0)) { + s = splhigh(); + kn->kn_status |= KN_DISABLED; + splx(s); + } + + if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { + s = splhigh(); + kn->kn_status &= ~KN_DISABLED; + if ((kn->kn_status & KN_ACTIVE) && + ((kn->kn_status & KN_QUEUED) == 0)) + knote_enqueue(kn); + splx(s); + } + +done: + if (fp != NULL) + fdrop(fp, p); + return (error); +} + +static int +kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp, + const struct timespec *tsp, register_t *retval, struct proc *p) +{ + struct kqueue *kq = (struct kqueue *)fp->f_data; + struct timeval atv, rtv, ttv; + int s, count, timeout, error = 0; + struct knote marker; + + count = maxevents; + if (count == 0) + goto done; + + if (tsp != NULL) { + TIMESPEC_TO_TIMEVAL(&atv, tsp); + if (itimerfix(&atv)) { + error = EINVAL; + goto done; + } + if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) + timeout = -1; + else + timeout = atv.tv_sec > 24 * 60 * 60 ? + 24 * 60 * 60 * hz : tvtohz(&atv); + getmicrouptime(&rtv); + timevaladd(&atv, &rtv); + } else { + atv.tv_sec = 0; + atv.tv_usec = 0; + timeout = 0; + } + goto start; + +retry: + if (atv.tv_sec || atv.tv_usec) { + getmicrouptime(&rtv); + if (timevalcmp(&rtv, &atv, >=)) + goto done; + ttv = atv; + timevalsub(&ttv, &rtv); + timeout = ttv.tv_sec > 24 * 60 * 60 ? + 24 * 60 * 60 * hz : tvtohz(&ttv); + } + +start: + s = splhigh(); + if (kq->kq_count == 0) { + if (timeout < 0) { + error = EWOULDBLOCK; + } else { + kq->kq_state |= KQ_SLEEP; + error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout); + } + splx(s); + if (error == 0) + goto retry; + /* don't restart after signals... */ + if (error == ERESTART) + error = EINTR; + else if (error == EWOULDBLOCK) + error = 0; + goto done; + } + + /* JMM - This marker trick doesn't work with multiple threads */ + TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); + while (count) { + int maxkev = (count > KQ_NEVENTS) ? KQ_NEVENTS : count; + struct kevent kq_kev[maxkev]; + struct kevent *kevp = kq_kev; + struct knote *kn; + int nkev = 0; + + while (nkev < maxkev) { + kn = TAILQ_FIRST(&kq->kq_head); + TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); + if (kn == &marker) { + if (count == maxevents) + goto retry; + break; + } else if (kn->kn_status & KN_DISABLED) { + kn->kn_status &= ~KN_QUEUED; + kq->kq_count--; + continue; + } else if ((kn->kn_flags & EV_ONESHOT) == 0 && + kn->kn_fop->f_event(kn, 0) == 0) { + kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); + kq->kq_count--; + continue; + } + + *kevp = kn->kn_kevent; + kevp++; + nkev++; + count--; + + if (kn->kn_flags & EV_ONESHOT) { + kn->kn_status &= ~KN_QUEUED; + kq->kq_count--; + splx(s); + kn->kn_fop->f_detach(kn); + knote_drop(kn, p); + s = splhigh(); + } else if (kn->kn_flags & EV_CLEAR) { + kn->kn_data = 0; + kn->kn_fflags = 0; + kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); + kq->kq_count--; + } else { + TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); + } + } + splx(s); + error = copyout((caddr_t)kq_kev, (caddr_t)ulistp, + sizeof(struct kevent) * nkev); + if (kn == &marker) + goto done; + ulistp += nkev; + s = splhigh(); + if (error) + break; + } + TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); + splx(s); +done: + *retval = maxevents - count; + return (error); +} + +/* + * XXX + * This could be expanded to call kqueue_scan, if desired. + */ +/*ARGSUSED*/ +static int +kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, + int flags, struct proc *p) +{ + return (ENXIO); +} + +/*ARGSUSED*/ +static int +kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, + int flags, struct proc *p) +{ + return (ENXIO); +} + +/*ARGSUSED*/ +static int +kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) +{ + return (ENOTTY); +} + +/*ARGSUSED*/ +static int +kqueue_select(struct file *fp, int which, void *wql, struct proc *p) +{ + struct kqueue *kq = (struct kqueue *)fp->f_data; + int retnum = 0; + int s = splnet(); + + if (which == FREAD) { + if (kq->kq_count) { + retnum = 1; + } else { + selrecord(p, &kq->kq_sel, wql); + kq->kq_state |= KQ_SEL; + } + } + splx(s); + return (retnum); +} + +/*ARGSUSED*/ +static int +kqueue_close(struct file *fp, struct proc *p) +{ + struct kqueue *kq = (struct kqueue *)fp->f_data; + struct filedesc *fdp = p->p_fd; + struct knote **knp, *kn, *kn0; + int i; + + for (i = 0; i < fdp->fd_knlistsize; i++) { + knp = &SLIST_FIRST(&fdp->fd_knlist[i]); + kn = *knp; + while (kn != NULL) { + kn0 = SLIST_NEXT(kn, kn_link); + if (kq == kn->kn_kq) { + kn->kn_fop->f_detach(kn); + fdrop(kn->kn_fp, p); + knote_free(kn); + *knp = kn0; + } else { + knp = &SLIST_NEXT(kn, kn_link); + } + kn = kn0; + } + } + if (fdp->fd_knhashmask != 0) { + for (i = 0; i < fdp->fd_knhashmask + 1; i++) { + knp = &SLIST_FIRST(&fdp->fd_knhash[i]); + kn = *knp; + while (kn != NULL) { + kn0 = SLIST_NEXT(kn, kn_link); + if (kq == kn->kn_kq) { + kn->kn_fop->f_detach(kn); + /* XXX non-fd release of kn->kn_ptr */ + knote_free(kn); + *knp = kn0; + } else { + knp = &SLIST_NEXT(kn, kn_link); + } + kn = kn0; + } + } + } + _FREE(kq, M_KQUEUE); + fp->f_data = NULL; + + return (0); +} + +/*ARGSUSED*/ +static int +kqueue_kqfilter(struct file *fp, struct knote *kn, struct proc *p) +{ + struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; + + if (kn->kn_filter != EVFILT_READ || (kq->kq_state & KQ_SEL)) + return (1); + + kn->kn_fop = &kqread_filtops; + KNOTE_ATTACH(&kq->kq_sel.si_note, kn); + return (0); +} + +/*ARGSUSED*/ +int +kqueue_stat(struct file *fp, struct stat *st, struct proc *p) +{ + struct kqueue *kq = (struct kqueue *)fp->f_data; + + bzero((void *)st, sizeof(*st)); + st->st_size = kq->kq_count; + st->st_blksize = sizeof(struct kevent); + st->st_mode = S_IFIFO; + return (0); +} + +static void +kqueue_wakeup(struct kqueue *kq) +{ + + if (kq->kq_state & KQ_SLEEP) { + kq->kq_state &= ~KQ_SLEEP; + wakeup(kq); + } + if (kq->kq_state & KQ_SEL) { + // kq->kq_state &= ~KQ_SEL; /* remove for now */ + selwakeup(&kq->kq_sel); + } else + KNOTE(&kq->kq_sel.si_note, 0); +} + +void +klist_init(struct klist *list) +{ + SLIST_INIT(list); +} + +/* + * walk down a list of knotes, activating them if their event has triggered. + */ +void +knote(struct klist *list, long hint) +{ + struct knote *kn; + + SLIST_FOREACH(kn, list, kn_selnext) + if (kn->kn_fop->f_event(kn, hint)) + KNOTE_ACTIVATE(kn); +} + +/* + * attach a knote to the specified list. Return true if this is the first entry. + */ +int +knote_attach(struct klist *list, struct knote *kn) +{ + int ret = SLIST_EMPTY(list); + SLIST_INSERT_HEAD(list, kn, kn_selnext); + return ret; +} + +/* + * detach a knote from the specified list. Return true if that was the last entry. + */ +int +knote_detach(struct klist *list, struct knote *kn) +{ + SLIST_REMOVE(list, kn, knote, kn_selnext); + return SLIST_EMPTY(list); +} + +/* + * remove all knotes from a specified klist + */ +void +knote_remove(struct proc *p, struct klist *list) +{ + struct knote *kn; + + while ((kn = SLIST_FIRST(list)) != NULL) { + kn->kn_fop->f_detach(kn); + knote_drop(kn, p); + } +} + +/* + * remove all knotes referencing a specified fd + */ +void +knote_fdclose(struct proc *p, int fd) +{ + struct filedesc *fdp = p->p_fd; + struct klist *list = &fdp->fd_knlist[fd]; + + knote_remove(p, list); +} + +static void +knote_fdpattach(struct knote *kn, struct filedesc *fdp) +{ + struct klist *list; + int size; + + if (! kn->kn_fop->f_isfd) { + if (fdp->fd_knhashmask == 0) + fdp->fd_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, + &fdp->fd_knhashmask); + list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; + goto done; + } + + if (fdp->fd_knlistsize <= kn->kn_id) { + size = fdp->fd_knlistsize; + while (size <= kn->kn_id) + size += KQEXTENT; + MALLOC(list, struct klist *, + size * sizeof(struct klist *), M_KQUEUE, M_WAITOK); + bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list, + fdp->fd_knlistsize * sizeof(struct klist *)); + bzero((caddr_t)list + + fdp->fd_knlistsize * sizeof(struct klist *), + (size - fdp->fd_knlistsize) * sizeof(struct klist *)); + if (fdp->fd_knlist != NULL) + FREE(fdp->fd_knlist, M_KQUEUE); + fdp->fd_knlistsize = size; + fdp->fd_knlist = list; + } + list = &fdp->fd_knlist[kn->kn_id]; +done: + SLIST_INSERT_HEAD(list, kn, kn_link); + kn->kn_status = 0; +} + +/* + * should be called at spl == 0, since we don't want to hold spl + * while calling fdrop and free. + */ +static void +knote_drop(struct knote *kn, struct proc *p) +{ + struct filedesc *fdp = p->p_fd; + struct klist *list; + + if (kn->kn_fop->f_isfd) + list = &fdp->fd_knlist[kn->kn_id]; + else + list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; + + SLIST_REMOVE(list, kn, knote, kn_link); + if (kn->kn_status & KN_QUEUED) + knote_dequeue(kn); + if (kn->kn_fop->f_isfd) + fdrop(kn->kn_fp, p); + knote_free(kn); +} + + +static void +knote_enqueue(struct knote *kn) +{ + struct kqueue *kq = kn->kn_kq; + int s = splhigh(); + + KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); + + TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); + kn->kn_status |= KN_QUEUED; + kq->kq_count++; + splx(s); + kqueue_wakeup(kq); +} + +static void +knote_dequeue(struct knote *kn) +{ + struct kqueue *kq = kn->kn_kq; + int s = splhigh(); + + KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); + + TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); + kn->kn_status &= ~KN_QUEUED; + kq->kq_count--; + splx(s); +} + +void +knote_init(void) +{ + knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote), 8192, "knote zone"); +} +SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) + +static struct knote * +knote_alloc(void) +{ + return ((struct knote *)zalloc(knote_zone)); +} + +static void +knote_free(struct knote *kn) +{ + zfree(knote_zone, (vm_offset_t)kn); +} + #include #include #include @@ -77,6 +1158,10 @@ int kev_attach(struct socket *so, int proto, struct proc *p) int error; struct kern_event_pcb *ev_pcb; + error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE); + if (error) + return error; + ev_pcb = _MALLOC(sizeof(struct kern_event_pcb), M_PCB, M_WAITOK); if (ev_pcb == 0) return ENOBUFS; @@ -86,9 +1171,6 @@ int kev_attach(struct socket *so, int proto, struct proc *p) so->so_pcb = (caddr_t) ev_pcb; LIST_INSERT_HEAD(&kern_event_head, ev_pcb, ev_link); - error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE); - if (error) - return error; return 0; } @@ -98,9 +1180,11 @@ int kev_detach(struct socket *so) { struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb; - LIST_REMOVE(ev_pcb, ev_link); - if (ev_pcb) - FREE(ev_pcb, M_PCB); + if (ev_pcb != 0) { + LIST_REMOVE(ev_pcb, ev_link); + FREE(ev_pcb, M_PCB); + so->so_pcb = 0; + } return 0; } diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c index c5f9ce416..c39e7ecc2 100644 --- a/bsd/kern/kern_exec.c +++ b/bsd/kern/kern_exec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -89,13 +89,18 @@ #include #include #include +#include #include #include #include +#include #include #include + +extern vm_map_t vm_map_switch(vm_map_t map); /* XXX */ + #include #include @@ -134,6 +139,45 @@ execv(p, args, retval) return (execve(p, args, retval)); } +extern char classichandler[32]; +extern long classichandler_fsid; +extern long classichandler_fileid; + +/* + * Helper routine to get rid of a loop in execve. Given a pointer to + * something for the arg list (which might be in kernel space or in user + * space), copy it into the kernel buffer at the currentWritePt. This code + * does the proper thing to get the data transferred. + * bytesWritten, currentWritePt, and bytesLeft are kept up-to-date. + */ + +static int copyArgument(char *argument, int pointerInKernel, + int *bytesWritten,char **currentWritePt, + int *bytesLeft){ + int error = 0; + do { + size_t len = 0; + if (*bytesLeft <= 0) { + error = E2BIG; + break; + } + if (pointerInKernel == UIO_SYSSPACE) { + error = copystr(argument, *currentWritePt, (unsigned)*bytesLeft, &len); + } else { + /* + * pointer in kernel == UIO_USERSPACE + * Copy in from user space. + */ + error = copyinstr((caddr_t)argument, *currentWritePt, (unsigned)*bytesLeft, + &len); + } + *currentWritePt += len; + *bytesWritten += len; + *bytesLeft -= len; + } while (error == ENAMETOOLONG); + return error; +} + /* ARGSUSED */ int execve(p, uap, retval) @@ -143,12 +187,14 @@ execve(p, uap, retval) { register struct ucred *cred = p->p_ucred; register struct filedesc *fdp = p->p_fd; - register nc; - register char *cp; + int nc; + char *cp; int na, ne, ucp, ap, cc; unsigned len; - int indir; - char *sharg; + int executingInterpreter=0; + + int executingClassic=0; + char binaryWithClassicName[sizeof(p->p_comm)] = {0}; char *execnamep; struct vnode *vp; struct vattr vattr; @@ -157,6 +203,10 @@ execve(p, uap, retval) struct nameidata nd; struct ps_strings ps; #define SHSIZE 512 + /* Argument(s) to an interpreter. If we're executing a shell + * script, the name (#!/bin/csh) is allowed to be followed by + * arguments. cfarg holds these arguments. + */ char cfarg[SHSIZE]; boolean_t is_fat; kern_return_t ret; @@ -169,7 +219,10 @@ execve(p, uap, retval) vm_map_t old_map; vm_map_t map; int i; - boolean_t new_shared_regions = FALSE; + boolean_t clean_regions = FALSE; + shared_region_mapping_t shared_region = NULL; + shared_region_mapping_t initial_region = NULL; + union { /* #! and name of interpreter */ char ex_shell[SHSIZE]; @@ -193,6 +246,12 @@ execve(p, uap, retval) unsigned long arch_size = 0; char *ws_cache_name = NULL; /* used for pre-heat */ + /* + * XXXAUDIT: Currently, we only audit the pathname of the binary. + * There may also be poor interaction with dyld. + */ + + cfarg[0] = '\0'; /* initialize to null value. */ task = current_task(); thr_act = current_act(); uthread = get_bsdthread_info(thr_act); @@ -214,7 +273,7 @@ execve(p, uap, retval) if (error) return(error); - savedpath = execargs; + savedpath = (char *)execargs; /* * To support new app package launching for Mac OS X, the dyld @@ -229,16 +288,26 @@ execve(p, uap, retval) * absolute pathname. This might be unacceptable for dyld. */ /* XXX We could optimize to avoid copyinstr in the namei() */ + + /* + * XXXAUDIT: Note: the double copyin introduces an audit + * race. To correct this race, we must use a single + * copyin(). + */ - error = copyinstr(uap->fname, savedpath, MAXPATHLEN, &savedpathlen); - if (error) - return (error); + error = copyinstr(uap->fname, savedpath, + MAXPATHLEN, (size_t *)&savedpathlen); + if (error) { + execargs_free(execargs); + return(error); + } /* * copyinstr will put in savedpathlen, the count of * characters (including NULL) in the path. + * No app profiles under chroot */ - if(app_profile != 0) { + if((fdp->fd_rdir == NULLVP) && (app_profile != 0)) { /* grab the name of the file out of its path */ /* we will need this for lookup within the */ @@ -253,13 +322,14 @@ execve(p, uap, retval) } ws_cache_name++; } - + /* Save the name aside for future use */ execargsp = (vm_offset_t *)((char *)(execargs) + savedpathlen); - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | SAVENAME, + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | SAVENAME | AUDITVNPATH1, UIO_USERSPACE, uap->fname, p); - if ((error = namei(&nd))) + error = namei(&nd); + if (error) goto bad1; vp = nd.ni_vp; VOP_LEASE(vp, p, p->p_ucred, LEASE_READ); @@ -273,7 +343,6 @@ execve(p, uap, retval) goto bad; } - indir = 0; if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED)) origvattr.va_mode &= ~(VSUID | VSGID); @@ -317,27 +386,46 @@ again: #endif /* lint */ mach_header = &exdata.mach_header; fat_header = &exdata.fat_header; - if (mach_header->magic == MH_MAGIC) + if ((mach_header->magic == MH_CIGAM) && + (classichandler[0] == 0)) { + error = EBADARCH; + goto bad; + } else if ((mach_header->magic == MH_MAGIC) || + (mach_header->magic == MH_CIGAM)) { is_fat = FALSE; - else if (fat_header->magic == FAT_MAGIC || - fat_header->magic == FAT_CIGAM) + } else if ((fat_header->magic == FAT_MAGIC) || + (fat_header->magic == FAT_CIGAM)) { is_fat = TRUE; - else if (mach_header->magic == MH_CIGAM) { - error = EBADARCH; - goto bad; } else { + /* If we've already redirected once from an interpreted file + * to an interpreter, don't permit the second time. + */ if (exdata.ex_shell[0] != '#' || exdata.ex_shell[1] != '!' || - indir) { + executingInterpreter) { error = ENOEXEC; goto bad; } + if (executingClassic == 1) { + error = EBADARCH; + goto bad; + } cp = &exdata.ex_shell[2]; /* skip "#!" */ while (cp < &exdata.ex_shell[SHSIZE]) { - if (*cp == '\t') + if (*cp == '\t') /* convert all tabs to spaces */ *cp = ' '; - else if (*cp == '\n') { - *cp = '\0'; + else if (*cp == '\n' || *cp == '#') { + *cp = '\0'; /* trunc the line at nl or comment */ + + /* go back and remove the spaces before the /n or # */ + /* todo: do we have to do this if we fix the passing of args to shells ? */ + if ( cp != &exdata.ex_shell[2] ) { + do { + if ( *(cp-1) != ' ') + break; + *(--cp) = '\0'; + } while ( cp != &exdata.ex_shell[2] ); + } break; } cp++; @@ -369,14 +457,15 @@ again: * savedpathlen. +1 for NULL. */ savedpathlen = (cpnospace - execnamep + 1); - error = copystr(execnamep, savedpath, savedpathlen, &savedpathlen); + error = copystr(execnamep, savedpath, + savedpathlen, (size_t *)&savedpathlen); if (error) goto bad; /* Save the name aside for future use */ execargsp = (vm_offset_t *)((char *)(execargs) + savedpathlen); - indir = 1; + executingInterpreter= 1; vput(vp); nd.ni_cnd.cn_nameiop = LOOKUP; nd.ni_cnd.cn_flags = (nd.ni_cnd.cn_flags & HASBUF) | @@ -413,56 +502,7 @@ again: /* * Copy arguments into file in argdev area. */ - if (uap->argp) for (;;) { - ap = NULL; - sharg = NULL; - if (indir && na == 0) { - sharg = nd.ni_cnd.cn_nameptr; - ap = (int)sharg; - uap->argp++; /* ignore argv[0] */ - } else if (indir && (na == 1 && cfarg[0])) { - sharg = cfarg; - ap = (int)sharg; - } else if (indir && (na == 1 || (na == 2 && cfarg[0]))) - ap = (int)uap->fname; - else if (uap->argp) { - ap = fuword((caddr_t)uap->argp); - uap->argp++; - } - if (ap == NULL && uap->envp) { - uap->argp = NULL; - if ((ap = fuword((caddr_t)uap->envp)) != NULL) - uap->envp++, ne++; - } - if (ap == NULL) - break; - na++; - if (ap == -1) { - error = EFAULT; - break; - } - do { - if (nc >= (NCARGS - savedpathlen - 2*NBPW -1)) { - error = E2BIG; - break; - } - if (sharg) { - error = copystr(sharg, cp, (unsigned)cc, &len); - sharg += len; - } else { - error = copyinstr((caddr_t)ap, cp, (unsigned)cc, - &len); - ap += len; - } - cp += len; - nc += len; - cc -= len; - } while (error == ENAMETOOLONG); - if (error) { - goto bad; - } - } - nc = (nc + NBPW-1) & ~(NBPW-1); + /* * If we have a fat file, find "our" executable. @@ -471,7 +511,8 @@ again: /* * Look up our architecture in the fat file. */ - lret = fatfile_getarch(vp, (vm_offset_t)fat_header, &fat_arch); + lret = fatfile_getarch_affinity(vp,(vm_offset_t)fat_header, &fat_arch, + (p->p_flag & P_AFFINITY)); if (lret != LOAD_SUCCESS) { error = load_return_to_errno(lret); goto bad; @@ -493,7 +534,8 @@ again: } /* Is what we found a Mach-O executable */ - if (mach_header->magic != MH_MAGIC) { + if ((mach_header->magic != MH_MAGIC) && + (mach_header->magic != MH_CIGAM)) { error = ENOEXEC; goto bad; } @@ -508,10 +550,168 @@ again: arch_size = (u_long)vattr.va_size; } + if ( ! check_cpu_subtype(mach_header->cpusubtype) ) { + error = EBADARCH; + goto bad; + } + + if (mach_header->magic == MH_CIGAM) { + + int classicBinaryLen = nd.ni_cnd.cn_namelen; + if (classicBinaryLen > MAXCOMLEN) + classicBinaryLen = MAXCOMLEN; + bcopy((caddr_t)nd.ni_cnd.cn_nameptr, + (caddr_t)binaryWithClassicName, + (unsigned)classicBinaryLen); + binaryWithClassicName[classicBinaryLen] = '\0'; + executingClassic = 1; + + vput(vp); /* cleanup? */ + nd.ni_cnd.cn_nameiop = LOOKUP; + + nd.ni_cnd.cn_flags = (nd.ni_cnd.cn_flags & HASBUF) | + /* (FOLLOW | LOCKLEAF | SAVENAME) */ + (LOCKLEAF | SAVENAME); + nd.ni_segflg = UIO_SYSSPACE; + + nd.ni_dirp = classichandler; + if ((error = namei(&nd)) != 0) { + error = EBADARCH; + goto bad1; + } + vp = nd.ni_vp; + + VOP_LEASE(vp,p,cred,LEASE_READ); + if ((error = VOP_GETATTR(vp,&vattr,p->p_ucred,p))) { + goto bad; + } + goto again; + } + + if (uap->argp != NULL) { + /* geez -- why would argp ever be NULL, and why would we proceed? */ + + /* First, handle any argument massaging */ + if (executingInterpreter && executingClassic) { + error = copyArgument(classichandler,UIO_SYSSPACE,&nc,&cp,&cc); + na++; + if (error) goto bad; + + /* Now name the interpreter. */ + error = copyArgument(savedpath,UIO_SYSSPACE,&nc,&cp,&cc); + na++; + if (error) goto bad; + + /* + * if we're running an interpreter, as we'd be passing the + * command line executable as an argument to the interpreter already. + * Doing "execve("myShellScript","bogusName",arg1,arg2,...) + * probably shouldn't ever let bogusName be seen by the shell + * script. + */ + + if (cfarg[0]) { + error = copyArgument(cfarg,UIO_SYSSPACE,&nc,&cp,&cc); + na++; + if (error) goto bad; + } + + char* originalExecutable = uap->fname; + error = copyArgument(originalExecutable,UIO_USERSPACE,&nc,&cp,&cc); + na++; + /* remove argv[0] b/c we've already placed it at */ + /* this point */ + uap->argp++; + if (error) goto bad; + + /* and continue with rest of the arguments. */ + } else if (executingClassic) { + error = copyArgument(classichandler,UIO_SYSSPACE,&nc,&cp,&cc); + na++; + if (error) goto bad; + + char* originalExecutable = uap->fname; + error = copyArgument(originalExecutable,UIO_USERSPACE,&nc,&cp,&cc); + if (error) goto bad; + uap->argp++; + na++; + + /* and rest of arguments continue as before. */ + } else if (executingInterpreter) { + char *actualExecutable = nd.ni_cnd.cn_nameptr; + error = copyArgument(actualExecutable,UIO_SYSSPACE,&nc,&cp,&cc); + na++; + /* remove argv[0] b/c we just placed it in the arg list. */ + uap->argp++; + if (error) goto bad; + /* Copy the argument in the interpreter first line if there + * was one. + */ + if (cfarg[0]) { + error = copyArgument(cfarg,UIO_SYSSPACE,&nc,&cp,&cc); + na++; + if (error) goto bad; + } + + /* copy the name of the file being interpreted, gotten from + * the structures passed in to execve. + */ + error = copyArgument(uap->fname,UIO_USERSPACE,&nc,&cp,&cc); + na++; + } + /* Now, get rest of arguments */ + while (uap->argp != NULL) { + char* userArgument = (char*)fuword((caddr_t) uap->argp); + uap->argp++; + if (userArgument == NULL) { + break; + } else if ((int)userArgument == -1) { + /* Um... why would it be -1? */ + error = EFAULT; + goto bad; + } + error = copyArgument(userArgument, UIO_USERSPACE,&nc,&cp,&cc); + if (error) goto bad; + na++; + } + /* Now, get the environment */ + while (uap->envp != NULL) { + char *userEnv = (char*) fuword((caddr_t) uap->envp); + uap->envp++; + if (userEnv == NULL) { + break; + } else if ((int)userEnv == -1) { + error = EFAULT; + goto bad; + } + error = copyArgument(userEnv,UIO_USERSPACE,&nc,&cp,&cc); + if (error) goto bad; + na++; + ne++; + } + } + + /* make sure there are nulls are the end!! */ + { + int cnt = 3; + char *mp = cp; + + while ( cnt-- ) + *mp++ = '\0'; + } + + /* and round up count of bytes written to next word. */ + nc = (nc + NBPW-1) & ~(NBPW-1); + + if (vattr.va_fsid == classichandler_fsid && + vattr.va_fileid == classichandler_fileid) { + executingClassic = 1; + } + if (vfexec) { kern_return_t result; - result = task_create_local(task, FALSE, FALSE, &new_task); + result = task_create_internal(task, FALSE, &new_task); if (result != KERN_SUCCESS) printf("execve: task_create failed. Code: 0x%x\n", result); p->task = new_task; @@ -526,35 +726,58 @@ again: uthread = get_bsdthread_info(thr_act); } else { map = VM_MAP_NULL; - } /* * Load the Mach-O file. */ - VOP_UNLOCK(vp, 0, p); + VOP_UNLOCK(vp, 0, p); /* XXX */ if(ws_cache_name) { tws_handle_startup_file(task, cred->cr_uid, - ws_cache_name, vp, &new_shared_regions); + ws_cache_name, vp, &clean_regions); } - if (new_shared_regions) { - shared_region_mapping_t new_shared_region; - shared_region_mapping_t old_shared_region; - - if (shared_file_create_system_region(&new_shared_region)) - panic("couldn't create system_shared_region\n"); - - vm_get_shared_region(task, &old_shared_region); - vm_set_shared_region(task, new_shared_region); - shared_region_mapping_dealloc(old_shared_region); + vm_get_shared_region(task, &initial_region); + int parentIsClassic = (p->p_flag & P_CLASSIC); + struct vnode *rootDir = p->p_fd->fd_rdir; + + if ((parentIsClassic && !executingClassic) || + (!parentIsClassic && executingClassic)) { + shared_region = lookup_default_shared_region( + (int)rootDir, + (executingClassic ? + CPU_TYPE_POWERPC : + machine_slot[cpu_number()].cpu_type)); + if (shared_region == NULL) { + shared_region_mapping_t old_region; + shared_region_mapping_t new_region; + vm_get_shared_region(current_task(), &old_region); + /* grrrr... this sets current_task(), not task + * -- they're different (usually) + */ + shared_file_boot_time_init( + (int)rootDir, + (executingClassic ? + CPU_TYPE_POWERPC : + machine_slot[cpu_number()].cpu_type)); + if ( current_task() != task ) { + vm_get_shared_region(current_task(),&new_region); + vm_set_shared_region(task,new_region); + vm_set_shared_region(current_task(),old_region); + } + } else { + vm_set_shared_region(task, shared_region); + } + shared_region_mapping_dealloc(initial_region); } - + lret = load_machfile(vp, mach_header, arch_offset, - arch_size, &load_result, thr_act, map); + arch_size, &load_result, thr_act, map, clean_regions); if (lret != LOAD_SUCCESS) { error = load_return_to_errno(lret); + vrele(vp); + vp = NULL; goto badtoolate; } @@ -587,6 +810,14 @@ again: if (origvattr.va_mode & VSGID) p->p_ucred->cr_gid = origvattr.va_gid; + /* + * Have mach reset the task port. We don't want + * anyone who had the task port before a setuid + * exec to be able to access/control the task + * after. + */ + ipc_task_reset(task); + set_security_token(p); p->p_flag |= P_SUGID; @@ -626,13 +857,17 @@ again: p->p_cred->p_svuid = p->p_ucred->cr_uid; p->p_cred->p_svgid = p->p_ucred->cr_gid; + KNOTE(&p->p_klist, NOTE_EXEC); + if (!vfexec && (p->p_flag & P_TRACED)) psignal(p, SIGTRAP); if (error) { + vrele(vp); + vp = NULL; goto badtoolate; } - VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ vput(vp); vp = NULL; @@ -652,7 +887,7 @@ again: */ - ucp = p->user_stack; + ucp = (int)p->user_stack; if (vfexec) { old_map = vm_map_switch(get_task_map(task)); } @@ -666,17 +901,26 @@ again: * the "path" at the begining of the execargs buffer. * copy it just before the string area. */ - savedpathlen = (savedpathlen + NBPW-1) & ~(NBPW-1); len = 0; - pathptr = ucp - savedpathlen; + pathptr = ucp - ((savedpathlen + NBPW-1) & ~(NBPW-1)); error = copyoutstr(savedpath, (caddr_t)pathptr, - (unsigned)savedpathlen, &len); + (unsigned)savedpathlen, (size_t *)&len); + savedpathlen = (savedpathlen + NBPW-1) & ~(NBPW-1); + if (error) { if (vfexec) vm_map_switch(old_map); goto badtoolate; } - + + /* + * Record the size of the arguments area so that + * sysctl_procargs() can return the argument area without having + * to parse the arguments. + */ + p->p_argslen = (int)p->user_stack - pathptr; + p->p_argc = na - ne; /* save argc for sysctl_procargs() */ + /* Save a NULL pointer below it */ (void) suword((caddr_t)(pathptr - NBPW), 0); @@ -717,7 +961,7 @@ again: (void) suword((caddr_t)ap, ucp); do { error = copyoutstr(cp, (caddr_t)ucp, - (unsigned)cc, &len); + (unsigned)cc, (size_t *)&len); ucp += len; cp += len; nc += len; @@ -762,9 +1006,16 @@ again: * which specify close-on-exec. */ fdexec(p); + + /* + * need to cancel async IO requests that can be cancelled and wait for those + * already active. MAY BLOCK! + */ + _aio_exec( p ); + /* FIXME: Till vmspace inherit is fixed: */ if (!vfexec && p->vm_shm) - shmexit(p); + shmexec(p); /* Clean up the semaphores */ semexit(p); @@ -772,11 +1023,20 @@ again: * Remember file name for accounting. */ p->p_acflag &= ~AFORK; - if (nd.ni_cnd.cn_namelen > MAXCOMLEN) - nd.ni_cnd.cn_namelen = MAXCOMLEN; - bcopy((caddr_t)nd.ni_cnd.cn_nameptr, (caddr_t)p->p_comm, - (unsigned)nd.ni_cnd.cn_namelen); - p->p_comm[nd.ni_cnd.cn_namelen] = '\0'; + /* If the translated name isn't NULL, then we want to use + * that translated name as the name we show as the "real" name. + * Otherwise, use the name passed into exec. + */ + if (0 != binaryWithClassicName[0]) { + bcopy((caddr_t)binaryWithClassicName, (caddr_t)p->p_comm, + sizeof(binaryWithClassicName)); + } else { + if (nd.ni_cnd.cn_namelen > MAXCOMLEN) + nd.ni_cnd.cn_namelen = MAXCOMLEN; + bcopy((caddr_t)nd.ni_cnd.cn_nameptr, (caddr_t)p->p_comm, + (unsigned)nd.ni_cnd.cn_namelen); + p->p_comm[nd.ni_cnd.cn_namelen] = '\0'; + } { /* This is for kdebug */ @@ -785,14 +1045,29 @@ again: /* Collect the pathname for tracing */ kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); + + if (vfexec) + { + KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE, + p->p_pid ,0,0,0, (unsigned int)thr_act); KERNEL_DEBUG_CONSTANT1((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, - dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, getshuttle_thread(thr_act)); + dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, (unsigned int)thr_act); + } else + { + KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA, 2)) | DBG_FUNC_NONE, + p->p_pid ,0,0,0,0); KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); + } } + if (executingClassic) + p->p_flag |= P_CLASSIC | P_AFFINITY; + else + p->p_flag &= ~P_CLASSIC; + /* * mark as execed, wakeup the process that vforked (if any) and tell * it that it now has it's own resources back @@ -842,11 +1117,12 @@ create_unix_stack(map, user_stack, customstack, p) vm_size_t size; vm_offset_t addr; - p->user_stack = user_stack; + p->user_stack = (caddr_t)user_stack; if (!customstack) { - size = round_page(unix_stack_size(p)); - addr = trunc_page(user_stack - size); - return (vm_allocate(map,&addr, size, FALSE)); + size = round_page_64(unix_stack_size(p)); + addr = trunc_page_32(user_stack - size); + return (vm_allocate(map, &addr, size, + VM_MAKE_TAG(VM_MEMORY_STACK) | FALSE)); } else return(KERN_SUCCESS); } @@ -974,7 +1250,7 @@ load_return_to_errno(load_return_t lrtn) { switch (lrtn) { case LOAD_SUCCESS: - return 0; + return 0; case LOAD_BADARCH: return EBADARCH; case LOAD_BADMACHO: @@ -982,10 +1258,14 @@ load_return_to_errno(load_return_t lrtn) case LOAD_SHLIB: return ESHLIBVERS; case LOAD_NOSPACE: + case LOAD_RESOURCE: return ENOMEM; case LOAD_PROTECT: return EACCES; - case LOAD_RESOURCE: + case LOAD_ENOENT: + return ENOENT; + case LOAD_IOERROR: + return EIO; case LOAD_FAILURE: default: return EBADEXEC; @@ -1046,9 +1326,10 @@ execargs_alloc(addrp) } kret = kmem_alloc_pageable(bsd_pageable_map, addrp, NCARGS); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { + semaphore_signal(execve_semaphore); return (ENOMEM); - + } return (0); } @@ -1074,4 +1355,3 @@ execargs_free(addr) return (EINVAL); } } - diff --git a/bsd/kern/kern_exit.c b/bsd/kern/kern_exit.c index 25dc9ef71..c9796b2e9 100644 --- a/bsd/kern/kern_exit.c +++ b/bsd/kern/kern_exit.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -85,6 +85,8 @@ #include #include #include +#include +#include #include #include @@ -97,6 +99,9 @@ extern char init_task_failure_data[]; int exit1 __P((struct proc *, int, int *)); +void proc_prepareexit(struct proc *p); +void vfork_exit(struct proc *p, int rv); +void vproc_exit(struct proc *p); /* * exit -- @@ -134,8 +139,7 @@ exit1(p, rv, retval) int * retval; { register struct proc *q, *nq; - thread_t self = current_thread(); - thread_act_t th_act_self = current_act(); + thread_act_t self = current_act(); struct task *task = p->task; register int i,s; struct uthread *ut; @@ -146,22 +150,23 @@ exit1(p, rv, retval) * right here. */ - ut = get_bsdthread_info(th_act_self); + ut = get_bsdthread_info(self); if (ut->uu_flag & P_VFORK) { - (void)vfork_exit(p, rv); - vfork_return(th_act_self, p->p_pptr, p , retval); + vfork_exit(p, rv); + vfork_return(self, p->p_pptr, p , retval); unix_syscall_return(0); /* NOT REACHED */ } + audit_syscall_exit(0, p, ut); /* Exit is always successfull */ signal_lock(p); while (p->exit_thread != self) { if (sig_try_locked(p) <= 0) { - if (get_threadtask(th_act_self) != task) { + if (get_threadtask(self) != task) { signal_unlock(p); return(0); } signal_unlock(p); - thread_terminate(th_act_self); + thread_terminate(self); thread_funnel_set(kernel_flock, FALSE); thread_exception_return(); /* NOTREACHED */ @@ -179,27 +184,12 @@ exit1(p, rv, retval) s = splsched(); p->p_flag |= P_WEXIT; splx(s); - (void)proc_prepareexit(p); + proc_prepareexit(p); p->p_xstat = rv; /* task terminate will call proc_terminate and that cleans it up */ task_terminate_internal(task); - /* - * we come back and returns to AST which - * should cleanup the rest - */ -#if 0 - if (task == current_task()) { - thread_exception_return(); - /*NOTREACHED*/ - } - - while (task == current_task()) { - thread_terminate_self(); - /*NOTREACHED*/ - } -#endif return(0); } @@ -208,8 +198,12 @@ proc_prepareexit(struct proc *p) { int s; struct uthread *ut; - thread_t self = current_thread(); - thread_act_t th_act_self = current_act(); + exception_data_t code[EXCEPTION_CODE_MAX]; + thread_act_t self = current_act(); + + code[0] = 0xFF000001; /* Set terminate code */ + code[1] = p->p_pid; /* Pass out the pid */ + (void)sys_perf_notify(p->task, &code, 2); /* Notify the perf server */ /* * Remove proc from allproc queue and from pidhash chain. @@ -218,6 +212,7 @@ proc_prepareexit(struct proc *p) * in partially cleaned state. */ LIST_REMOVE(p, p_list); + LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ LIST_REMOVE(p, p_hash); #ifdef PGINPROF @@ -230,7 +225,7 @@ proc_prepareexit(struct proc *p) p->p_flag &= ~(P_TRACED | P_PPWAIT); p->p_sigignore = ~0; p->p_siglist = 0; - ut = get_bsdthread_info(th_act_self); + ut = get_bsdthread_info(self); ut->uu_siglist = 0; untimeout(realitexpire, (caddr_t)p->p_pid); } @@ -239,8 +234,6 @@ void proc_exit(struct proc *p) { register struct proc *q, *nq, *pp; - thread_t self = current_thread(); - thread_act_t th_act_self = current_act(); struct task *task = p->task; register int i,s; boolean_t funnel_state; @@ -260,6 +253,12 @@ proc_exit(struct proc *p) MALLOC_ZONE(p->p_ru, struct rusage *, sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK); + /* + * need to cancel async IO requests that can be cancelled and wait for those + * already active. MAY BLOCK! + */ + _aio_exit( p ); + /* * Close open files and release open-file table. * This may block! @@ -337,9 +336,6 @@ proc_exit(struct proc *p) if (q->p_flag & P_TRACED) { q->p_flag &= ~P_TRACED; if (q->sigwait_thread) { - thread_t sig_shuttle; - - sig_shuttle = (thread_t)getshuttle_thread((thread_act_t)q->sigwait_thread); /* * The sigwait_thread could be stopped at a * breakpoint. Wake it up to kill. @@ -348,7 +344,7 @@ proc_exit(struct proc *p) * the process would result into a deadlock on q->sigwait. */ thread_resume((thread_act_t)q->sigwait_thread); - clear_wait(sig_shuttle, THREAD_INTERRUPTED); + clear_wait(q->sigwait_thread, THREAD_INTERRUPTED); threadsignal((thread_act_t)q->sigwait_thread, SIGKILL, 0); } psignal(q, SIGKILL); @@ -421,6 +417,9 @@ proc_exit(struct proc *p) FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC); p->p_limit = NULL; + /* Free the auditing info */ + audit_proc_free(p); + /* * Finish up by terminating the task * and halt this thread (only if a @@ -430,12 +429,20 @@ proc_exit(struct proc *p) //task->proc = NULL; set_bsdtask_info(task, NULL); + KNOTE(&p->p_klist, NOTE_EXIT); + /* * Notify parent that we're gone. */ if (p->p_pptr->p_flag & P_NOCLDWAIT) { struct proc * pp = p->p_pptr; + /* + * Add child resource usage to parent before giving + * zombie to init + */ + ruadd(&p->p_pptr->p_stats->p_cru, p->p_ru); + proc_reparent(p, initproc); /* If there are no more children wakeup parent */ if (LIST_EMPTY(&pp->p_children)) @@ -452,8 +459,7 @@ proc_exit(struct proc *p) psignal(pp, SIGCHLD); - /* Place onto zombproc. */ - LIST_INSERT_HEAD(&zombproc, p, p_list); + /* mark as a zombie */ p->p_stat = SZOMB; /* and now wakeup the parent */ @@ -540,7 +546,7 @@ wait1continue(result) thread = current_act(); vt = (void *)get_bsduthreadarg(thread); retval = (int *)get_bsduthreadrval(thread); - wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0); + return(wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0)); } int @@ -777,11 +783,11 @@ vfork_exit(p, rv) int rv; { register struct proc *q, *nq; - thread_t self = current_thread(); - thread_act_t th_act_self = current_act(); + thread_act_t self = current_act(); struct task *task = p->task; register int i,s; struct uthread *ut; + exception_data_t code[EXCEPTION_CODE_MAX]; /* * If a thread in this task has already @@ -789,17 +795,17 @@ vfork_exit(p, rv) * right here. */ - ut = get_bsdthread_info(th_act_self); + ut = get_bsdthread_info(self); #ifdef FIXME signal_lock(p); while (p->exit_thread != self) { if (sig_try_locked(p) <= 0) { - if (get_threadtask(th_act_self) != task) { + if (get_threadtask(self) != task) { signal_unlock(p); return; } signal_unlock(p); - thread_terminate(th_act_self); + thread_terminate(self); thread_funnel_set(kernel_flock, FALSE); thread_exception_return(); /* NOTREACHED */ @@ -817,6 +823,11 @@ panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data); s = splsched(); p->p_flag |= P_WEXIT; splx(s); + + code[0] = 0xFF000001; /* Set terminate code */ + code[1] = p->p_pid; /* Pass out the pid */ + (void)sys_perf_notify(p->task, &code, 2); /* Notify the perf server */ + /* * Remove proc from allproc queue and from pidhash chain. * Need to do this before we do anything that can block. @@ -824,6 +835,7 @@ panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data); * in partially cleaned state. */ LIST_REMOVE(p, p_list); + LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ LIST_REMOVE(p, p_hash); /* * If parent is waiting for us to exit or exec, @@ -838,15 +850,13 @@ panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data); p->p_xstat = rv; - (void)vproc_exit(p); + vproc_exit(p); } void vproc_exit(struct proc *p) { register struct proc *q, *nq, *pp; - thread_t self = current_thread(); - thread_act_t th_act_self = current_act(); struct task *task = p->task; register int i,s; boolean_t funnel_state; @@ -924,9 +934,6 @@ vproc_exit(struct proc *p) if (q->p_flag & P_TRACED) { q->p_flag &= ~P_TRACED; if (q->sigwait_thread) { - thread_t sig_shuttle; - - sig_shuttle = (thread_t) getshuttle_thread((thread_act_t)q->sigwait_thread); /* * The sigwait_thread could be stopped at a * breakpoint. Wake it up to kill. @@ -935,7 +942,7 @@ vproc_exit(struct proc *p) * the process would result into a deadlock on q->sigwait. */ thread_resume((thread_act_t)q->sigwait_thread); - clear_wait(sig_shuttle, THREAD_INTERRUPTED); + clear_wait(q->sigwait_thread, THREAD_INTERRUPTED); threadsignal((thread_act_t)q->sigwait_thread, SIGKILL, 0); } psignal(q, SIGKILL); @@ -1029,8 +1036,7 @@ vproc_exit(struct proc *p) } psignal(p->p_pptr, SIGCHLD); - /* Place onto zombproc. */ - LIST_INSERT_HEAD(&zombproc, p, p_list); + /* mark as a zombie */ p->p_stat = SZOMB; /* and now wakeup the parent */ diff --git a/bsd/kern/kern_fork.c b/bsd/kern/kern_fork.c index b67a66906..ba5682953 100644 --- a/bsd/kern/kern_fork.c +++ b/bsd/kern/kern_fork.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -63,6 +63,7 @@ * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95 */ +#include #include #include #include @@ -74,6 +75,7 @@ #include #include #include +#include #if KTRACE #include #endif @@ -146,6 +148,7 @@ vfork(p, uap, retval) ut = (struct uthread *)get_bsdthread_info(cur_act); if (ut->uu_flag & P_VFORK) { printf("vfork called recursively by %s\n", p->p_comm); + (void)chgproccnt(uid, -1); return (EINVAL); } p->p_flag |= P_VFORK; @@ -204,7 +207,6 @@ vfork_return(th_act, p, p2, retval) { long flags; register uid_t uid; - thread_t newth, self = current_thread(); thread_act_t cur_act = (thread_act_t)current_act(); int s, count; task_t t; @@ -245,12 +247,13 @@ procdup( thread_act_t thread; task_t task; kern_return_t result; + pmap_t pmap; extern task_t kernel_task; if (parent->task == kernel_task) - result = task_create_local(TASK_NULL, FALSE, FALSE, &task); + result = task_create_internal(TASK_NULL, FALSE, &task); else - result = task_create_local(parent->task, TRUE, FALSE, &task); + result = task_create_internal(parent->task, TRUE, &task); if (result != KERN_SUCCESS) printf("fork/procdup: task_create failed. Code: 0x%x\n", result); child->task = task; @@ -258,6 +261,7 @@ procdup( set_bsdtask_info(task, child); if (child->p_nice != 0) resetpriority(child); + result = thread_create(task, &thread); if (result != KERN_SUCCESS) printf("fork/procdup: thread_create failed. Code: 0x%x\n", result); @@ -333,6 +337,8 @@ fork1(p1, flags, retval) } act_deallocate(newth); + KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); + while (p2->p_flag & P_PPWAIT) tsleep(p1, PWAIT, "ppwait", 0); @@ -464,18 +470,26 @@ again: (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); p2->vm_shm = (void *)NULL; /* Make sure it is zero */ + /* + * Copy the audit info. + */ + audit_proc_fork(p1, p2); + /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * The p_stats and p_sigacts substructs are set in vm_fork. */ p2->p_flag = P_INMEM; + p2->p_flag |= (p1->p_flag & P_CLASSIC); // copy from parent + p2->p_flag |= (p1->p_flag & P_AFFINITY); // copy from parent if (p1->p_flag & P_PROFIL) startprofclock(p2); bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred)); p2->p_cred->p_refcnt = 1; crhold(p1->p_ucred); lockinit(&p2->p_cred->pc_lock, PLOCK, "proc cred", 0, 0); + klist_init(&p2->p_klist); /* bump references to the text vnode */ p2->p_textvp = p1->p_textvp; @@ -515,6 +529,8 @@ again: if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; + p2->p_argslen = p1->p_argslen; + p2->p_argc = p1->p_argc; p2->p_xstat = 0; p2->p_ru = NULL; @@ -527,10 +543,13 @@ again: p2->sigwait_thread = NULL; p2->exit_thread = NULL; p2->user_stack = p1->user_stack; - p2->p_xxxsigpending = 0; p2->p_vforkcnt = 0; p2->p_vforkact = 0; TAILQ_INIT(&p2->p_uthlist); + TAILQ_INIT(&p2->aio_activeq); + TAILQ_INIT(&p2->aio_doneq); + p2->aio_active_count = 0; + p2->aio_done_count = 0; #if KTRACE /* @@ -581,7 +600,7 @@ uthread_alloc(task_t task, thread_act_t thr_act ) if (task != kernel_task) { uth = (struct uthread *)ut; - p = get_bsdtask_info(task); + p = (struct proc *) get_bsdtask_info(task); funnel_state = thread_funnel_set(kernel_flock, TRUE); uth_parent = (struct uthread *)get_bsdthread_info(current_act()); @@ -612,6 +631,15 @@ uthread_free(task_t task, void *uthread, void * bsd_info) extern task_t kernel_task; int size; boolean_t funnel_state; + struct nlminfo *nlmp; + + /* + * Per-thread audit state should never last beyond system + * call return. Since we don't audit the thread creation/ + * removal, the thread state pointer should never be + * non-NULL when we get here. + */ + assert(uth->uu_ar == NULL); sel = &uth->uu_state.ss_select; /* cleanup the select bit space */ @@ -628,6 +656,11 @@ uthread_free(task_t task, void *uthread, void * bsd_info) sel->wql = 0; } + if ((nlmp = uth->uu_nlminfo)) { + uth->uu_nlminfo = 0; + FREE(nlmp, M_LOCKF); + } + if ((task != kernel_task) && p) { funnel_state = thread_funnel_set(kernel_flock, TRUE); //signal_lock(p); diff --git a/bsd/kern/kern_ktrace.c b/bsd/kern/kern_ktrace.c index 2a4d1c3d3..f813e2b26 100644 --- a/bsd/kern/kern_ktrace.c +++ b/bsd/kern/kern_ktrace.c @@ -419,7 +419,7 @@ utrace(curp, uap, retval) p->p_traceflag |= KTRFAC_ACTIVE; kth = ktrgetheader(KTR_USER); MALLOC(cp, caddr_t, uap->len, M_KTRACE, M_WAITOK); - if (!copyin(uap->addr, cp, uap->len)) { + if (!copyin((caddr_t)uap->addr, cp, uap->len)) { kth->ktr_buf = cp; kth->ktr_len = uap->len; ktrwrite(p->p_tracep, kth, NULL, KERNEL_FUNNEL); @@ -641,7 +641,8 @@ ktrcanset(callp, targetp) target->p_ruid == target->p_svuid && caller->p_rgid == target->p_rgid && /* XXX */ target->p_rgid == target->p_svgid && - (targetp->p_traceflag & KTRFAC_ROOT) == 0) || + (targetp->p_traceflag & KTRFAC_ROOT) == 0 && + (targetp->p_flag & P_SUGID) == 0) || caller->pc_ucred->cr_uid == 0) return (1); diff --git a/bsd/kern/kern_lock.c b/bsd/kern/kern_lock.c index 25f845707..b7a2269fc 100644 --- a/bsd/kern/kern_lock.c +++ b/bsd/kern/kern_lock.c @@ -192,7 +192,7 @@ lockmgr(lkp, flags, interlkp, p) int extflags; void *self; - error = 0; self = current_thread(); + error = 0; self = current_act(); if (p) pid = p->p_pid; else diff --git a/bsd/kern/kern_malloc.c b/bsd/kern/kern_malloc.c index 6af52d4ee..2090235ad 100644 --- a/bsd/kern/kern_malloc.c +++ b/bsd/kern/kern_malloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -71,6 +71,9 @@ #include #include +#include +#include + #include #include #include @@ -86,6 +89,7 @@ #include #include +#include #include #include @@ -210,12 +214,15 @@ struct kmzones { 0, KMZ_MALLOC, /* 88 M_IP6MISC */ 0, KMZ_MALLOC, /* 89 M_TSEGQ */ 0, KMZ_MALLOC, /* 90 M_IGMP */ - SOS(journal), KMZ_CREATEZONE, /* 91 M_JNL_JNL */ + SOS(journal), KMZ_CREATEZONE, /* 91 M_JNL_JNL */ SOS(transaction), KMZ_CREATEZONE, /* 92 M_JNL_TR */ + SOS(specinfo), KMZ_CREATEZONE, /* 93 M_SPECINFO */ + SOS(kqueue), KMZ_CREATEZONE, /* 94 M_KQUEUE */ #undef SOS #undef SOX }; +extern zone_t kalloc_zone(vm_size_t); /* XXX */ /* * Initialize the kernel memory allocator @@ -277,7 +284,7 @@ struct _mhead { char dat[0]; }; -#define ZEROSIZETOKEN 0xFADEDFAD +#define ZEROSIZETOKEN (void *)0xFADEDFAD void *_MALLOC( size_t size, @@ -307,6 +314,9 @@ void *_MALLOC( mem->hdr.mlen = memsize; + if (flags & M_ZERO) + bzero(mem->hdr.dat, size); + return (mem->hdr.dat); } diff --git a/bsd/kern/kern_mib.c b/bsd/kern/kern_mib.c index b70331969..e67ab0701 100644 --- a/bsd/kern/kern_mib.c +++ b/bsd/kern/kern_mib.c @@ -303,12 +303,6 @@ SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW, NULL, "optional features"); SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN, 0, 1, ""); /* always set */ -/* - * Export of _cpu_capabilities to userspace, consumed by the pthread code - * only. - */ -SYSCTL_INT(_hw, OID_AUTO, _cpu_capabilities, CTLFLAG_RD, &_cpu_capabilities, 0, ""); - /* * Deprecated variables. These are supported for backwards compatibility * purposes only. The MASKED flag requests that the variables not be @@ -332,7 +326,7 @@ SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | SYSCTL_INT (_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, ""); SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED, 0, HW_MACHINE, sysctl_hw_generic, "A", ""); SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED, 0, HW_MODEL, sysctl_hw_generic, "A", ""); -SYSCTL_INT (_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED, &mem_size, 0, ""); +SYSCTL_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED, &mem_size, 0, ""); SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED, 0, HW_USERMEM, sysctl_hw_generic, "I", ""); SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED, 0, HW_EPOCH, sysctl_hw_generic, "I", ""); SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", ""); diff --git a/bsd/kern/kern_mman.c b/bsd/kern/kern_mman.c index f42e2d135..0c02c2f7e 100644 --- a/bsd/kern/kern_mman.c +++ b/bsd/kern/kern_mman.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -151,6 +151,7 @@ struct osmmap_args { long pos; }; +int osmmap(curp, uap, retval) struct proc *curp; register struct osmmap_args *uap; @@ -246,7 +247,7 @@ mmap(p, uap, retval) /* Adjust size for rounding (on both ends). */ user_size += pageoff; /* low end... */ - user_size = (vm_size_t) round_page(user_size); /* hi end */ + user_size = (vm_size_t) round_page_32(user_size); /* hi end */ /* @@ -280,8 +281,8 @@ mmap(p, uap, retval) * There should really be a pmap call to determine a reasonable * location. */ - else if (addr < round_page(p->p_vmspace->vm_daddr + MAXDSIZ)) - addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); + else if (addr < round_page_32(p->p_vmspace->vm_daddr + MAXDSIZ)) + addr = round_page_32(p->p_vmspace->vm_daddr + MAXDSIZ); #endif @@ -303,7 +304,7 @@ mmap(p, uap, retval) if (err) return(err); if(fp->f_type == DTYPE_PSXSHM) { - uap->addr = user_addr; + uap->addr = (caddr_t)user_addr; uap->len = user_size; uap->prot = prot; uap->flags = flags; @@ -322,7 +323,7 @@ mmap(p, uap, retval) * SunOS). */ if (vp->v_type == VCHR || vp->v_type == VSTR) { - return(EOPNOTSUPP); + return(ENODEV); } else { /* * Ensure that file and memory protections are @@ -374,7 +375,7 @@ mmap(p, uap, retval) * We bend a little - round the start and end addresses * to the nearest page boundary. */ - user_size = round_page(user_size); + user_size = round_page_32(user_size); if (file_pos & PAGE_MASK_64) return (EINVAL); @@ -383,9 +384,9 @@ mmap(p, uap, retval) if ((flags & MAP_FIXED) == 0) { find_space = TRUE; - user_addr = round_page(user_addr); + user_addr = round_page_32(user_addr); } else { - if (user_addr != trunc_page(user_addr)) + if (user_addr != trunc_page_32(user_addr)) return (EINVAL); find_space = FALSE; (void) vm_deallocate(user_map, user_addr, user_size); @@ -419,9 +420,16 @@ mmap(p, uap, retval) if (result != KERN_SUCCESS) goto out; + result = vm_protect(user_map, user_addr, user_size, TRUE, maxprot); + if (result != KERN_SUCCESS) + goto out; + result = vm_protect(user_map, user_addr, user_size, FALSE, prot); + if (result != KERN_SUCCESS) + goto out; + } else { UBCINFOCHECK("mmap", vp); - pager = ubc_getpager(vp); + pager = (vm_pager_t)ubc_getpager(vp); if (pager == NULL) return (ENOMEM); @@ -461,7 +469,7 @@ mmap(p, uap, retval) ubc_map(vp); } - if (flags & (MAP_SHARED|MAP_INHERIT)) { + if (flags & MAP_SHARED) { result = vm_inherit(user_map, user_addr, user_size, VM_INHERIT_SHARE); if (result != KERN_SUCCESS) { @@ -510,7 +518,7 @@ msync(p, uap, retval) pageoff = (addr & PAGE_MASK); addr -= pageoff; size = uap->len; - size = (vm_size_t) round_page(size); + size = (vm_size_t) round_page_32(size); flags = uap->flags; if (addr + size < addr) @@ -518,6 +526,9 @@ msync(p, uap, retval) user_map = current_map(); + if ((flags & (MS_ASYNC|MS_SYNC)) == (MS_ASYNC|MS_SYNC)) + return (EINVAL); + if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) return (EINVAL); @@ -529,7 +540,7 @@ msync(p, uap, retval) * inaccurate results, lets just return error as invalid size * specified */ - return(EINVAL); + return (EINVAL); /* XXX breaks posix apps */ } if (flags & MS_KILLPAGES) @@ -559,10 +570,10 @@ msync(p, uap, retval) } return (0); - } +int mremap() { /* Not yet implemented */ @@ -573,6 +584,7 @@ struct munmap_args { caddr_t addr; int len; }; +int munmap(p, uap, retval) struct proc *p; struct munmap_args *uap; @@ -590,7 +602,7 @@ munmap(p, uap, retval) user_addr -= pageoff; user_size += pageoff; - user_size = round_page(user_size); + user_size = round_page_32(user_size); if (user_addr + user_size < user_addr) return(EINVAL); @@ -654,7 +666,7 @@ mprotect(p, uap, retval) pageoff = (user_addr & PAGE_MASK); user_addr -= pageoff; user_size += pageoff; - user_size = round_page(user_size); + user_size = round_page_32(user_size); if (user_addr + user_size < user_addr) return(EINVAL); @@ -697,7 +709,7 @@ minherit(p, uap, retval) pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vm_size_t) round_page(size); + size = (vm_size_t) round_page_32(size); if (addr + size < addr) return(EINVAL); @@ -747,8 +759,8 @@ madvise(p, uap, retval) * Since this routine is only advisory, we default to conservative * behavior. */ - start = trunc_page((vm_offset_t) uap->addr); - end = round_page((vm_offset_t) uap->addr + uap->len); + start = trunc_page_32((vm_offset_t) uap->addr); + end = round_page_32((vm_offset_t) uap->addr + uap->len); user_map = current_map(); @@ -812,8 +824,8 @@ mincore(p, uap, retval) * Make sure that the addresses presented are valid for user * mode. */ - first_addr = addr = trunc_page((vm_offset_t) uap->addr); - end = addr + (vm_size_t)round_page(uap->len); + first_addr = addr = trunc_page_32((vm_offset_t) uap->addr); + end = addr + (vm_size_t)round_page_32(uap->len); if (VM_MAX_ADDRESS > 0 && end > VM_MAX_ADDRESS) return (EINVAL); @@ -913,7 +925,7 @@ mlock(p, uap, retval) pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vm_size_t) round_page(size); + size = (vm_size_t) round_page_32(size); /* disable wrap around */ if (addr + size < addr) @@ -962,7 +974,7 @@ munlock(p, uap, retval) pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vm_size_t) round_page(size); + size = (vm_size_t) round_page_32(size); /* disable wrap around */ if (addr + size < addr) @@ -1014,6 +1026,7 @@ munlockall(p, uap) struct obreak_args { char *nsiz; }; +int obreak(p, uap, retval) struct proc *p; struct obreak_args *uap; @@ -1025,6 +1038,7 @@ obreak(p, uap, retval) int both; +int ovadvise() { @@ -1033,12 +1047,11 @@ ovadvise() #endif } /* END DEFUNCT */ -#if 1 -int print_map_addr=0; -#endif /* 1 */ /* CDY need to fix interface to allow user to map above 32 bits */ -kern_return_t map_fd( +/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */ +kern_return_t +map_fd( int fd, vm_offset_t offset, vm_offset_t *va, @@ -1058,7 +1071,8 @@ kern_return_t map_fd( return ret; } -kern_return_t map_fd_funneled( +kern_return_t +map_fd_funneled( int fd, vm_object_offset_t offset, vm_offset_t *va, @@ -1075,9 +1089,6 @@ kern_return_t map_fd_funneled( int err=0; vm_map_t my_map; struct proc *p =(struct proc *)current_proc(); -#if 0 - extern int print_map_addr; -#endif /* 0 */ /* * Find the inode; verify that it's a regular file. @@ -1102,7 +1113,7 @@ kern_return_t map_fd_funneled( printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm); return (KERN_INVALID_ARGUMENT); } - map_size = round_page(size); + map_size = round_page_32(size); /* * Allow user to map in a zero length file. @@ -1135,7 +1146,7 @@ kern_return_t map_fd_funneled( vm_map_copy_t tmp; if (copyin(va, &dst_addr, sizeof (dst_addr)) || - trunc_page(dst_addr) != dst_addr) { + trunc_page_32(dst_addr) != dst_addr) { (void) vm_map_remove( my_map, map_addr, map_addr + map_size, diff --git a/bsd/kern/kern_newsysctl.c b/bsd/kern/kern_newsysctl.c index 09da4572b..002a77df4 100644 --- a/bsd/kern/kern_newsysctl.c +++ b/bsd/kern/kern_newsysctl.c @@ -80,6 +80,7 @@ struct sysctl_oid_list sysctl__sysctl_children; */ extern struct sysctl_oid *newsysctl_list[]; +extern struct sysctl_oid *machdep_sysctl_list[]; static void @@ -211,12 +212,13 @@ void sysctl_unregister_set(struct linker_set *lsp) void sysctl_register_fixed() { - int i = 0; + int i; - - while (newsysctl_list[i]) { -/* printf("Registering %d\n", i); */ - sysctl_register_oid(newsysctl_list[i++]); + for (i=0; newsysctl_list[i]; i++) { + sysctl_register_oid(newsysctl_list[i]); + } + for (i=0; machdep_sysctl_list[i]; i++) { + sysctl_register_oid(machdep_sysctl_list[i]); } } @@ -1053,6 +1055,9 @@ userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *old return (error); } +/* Non-standard BSDI extension - only present on their 4.3 net-2 releases */ +#define KINFO_BSDI_SYSINFO (101<<8) + /* * Kernel versions of the userland sysctl helper functions. * diff --git a/bsd/kern/kern_panicinfo.c b/bsd/kern/kern_panicinfo.c index db92fb59a..b8b08476f 100644 --- a/bsd/kern/kern_panicinfo.c +++ b/bsd/kern/kern_panicinfo.c @@ -192,8 +192,8 @@ sysctl_dopanicinfo(name, namelen, oldp, oldlenp, newp, newlen, p) off_t filesize = 0; size_t len; vm_offset_t image; - vm_offset_t oimage; - vm_size_t osize; + vm_offset_t oimage = NULL; + vm_size_t osize = 0; /* covariable: quiet compiler */ len = strlen(imname); oldstr = image_pathname; diff --git a/bsd/kern/kern_pcsamples.c b/bsd/kern/kern_pcsamples.c index 9a0a3ced5..54d9a18f3 100644 --- a/bsd/kern/kern_pcsamples.c +++ b/bsd/kern/kern_pcsamples.c @@ -184,7 +184,7 @@ int ret=0; pcsample_enable = 0; if (pc_bufsize && pc_buffer) - kmem_free(kernel_map,pc_buffer,pc_bufsize); + kmem_free(kernel_map, (vm_offset_t)pc_buffer, pc_bufsize); ret= pcsamples_bootstrap(); return(ret); @@ -196,7 +196,7 @@ pcsamples_clear() global_state_pid = -1; pcsample_enable = 0; if(pc_bufsize && pc_buffer) - kmem_free(kernel_map,pc_buffer,pc_bufsize); + kmem_free(kernel_map, (vm_offset_t)pc_buffer, pc_bufsize); pc_buffer = (u_long *)0; pc_bufptr = (u_long *)0; pc_buflast = (u_long *)0; diff --git a/bsd/kern/kern_proc.c b/bsd/kern/kern_proc.c index 125fde90a..a2907280c 100644 --- a/bsd/kern/kern_proc.c +++ b/bsd/kern/kern_proc.c @@ -206,6 +206,21 @@ pfind(pid) return (NULL); } +/* + * Locate a zombie by PID + */ +__private_extern__ struct proc * +pzfind(pid) + register pid_t pid; +{ + register struct proc *p; + + for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) + if (p->p_pid == pid) + return (p); + return (NULL); +} + /* * Locate a process group by number */ @@ -441,6 +456,12 @@ pgrpdump() } #endif /* DEBUG */ +int +proc_is_classic(struct proc *p) +{ + return (p->p_flag & P_CLASSIC) ? 1 : 0; +} + struct proc * current_proc_EXTERNAL() { return (current_proc()); diff --git a/bsd/kern/kern_prot.c b/bsd/kern/kern_prot.c index 2cd41c3b1..b01377b48 100644 --- a/bsd/kern/kern_prot.c +++ b/bsd/kern/kern_prot.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -75,6 +75,7 @@ #include #include #include +#include #include #include @@ -279,7 +280,7 @@ setsid(p, uap, retval) register_t *retval; { - if (p->p_pgid == p->p_pid || pgfind(p->p_pid)) { + if (p->p_pgid == p->p_pid || pgfind(p->p_pid) || p->p_flag & P_INVFORK) { return (EPERM); } else { (void)enterpgrp(p, p->p_pid, 1); @@ -329,7 +330,7 @@ setpgid(curp, uap, retval) uap->pgid = targp->p_pid; else if (uap->pgid != targp->p_pid) if ((pgrp = pgfind(uap->pgid)) == 0 || - pgrp->pg_session != curp->p_session) + pgrp->pg_session != curp->p_session) return (EPERM); return (enterpgrp(targp, uap->pgid, 0)); } @@ -369,6 +370,7 @@ setuid(p, uap, retval) int error; uid = uap->uid; + AUDIT_ARG(uid, uid, 0, 0, 0); if (uid != pc->p_ruid && (error = suser(pc->pc_ucred, &p->p_acflag))) return (error); @@ -407,6 +409,7 @@ seteuid(p, uap, retval) int error; euid = uap->euid; + AUDIT_ARG(uid, 0, euid, 0, 0); if (euid != pc->p_ruid && euid != pc->p_svuid && (error = suser(pc->pc_ucred, &p->p_acflag))) return (error); @@ -437,6 +440,7 @@ setgid(p, uap, retval) int error; gid = uap->gid; + AUDIT_ARG(gid, gid, 0, 0, 0); if (gid != pc->p_rgid && (error = suser(pc->pc_ucred, &p->p_acflag))) return (error); pcred_writelock(p); @@ -464,6 +468,7 @@ setegid(p, uap, retval) int error; egid = uap->egid; + AUDIT_ARG(gid, 0, egid, 0, 0); if (egid != pc->p_rgid && egid != pc->p_svgid && (error = suser(pc->pc_ucred, &p->p_acflag))) return (error); @@ -495,16 +500,23 @@ setgroups(p, uap, retval) if (error = suser(pc->pc_ucred, &p->p_acflag)) return (error); ngrp = uap->gidsetsize; - if (ngrp < 1 || ngrp > NGROUPS) + if (ngrp > NGROUPS) return (EINVAL); new = crget(); - error = copyin((caddr_t)uap->gidset, - (caddr_t)new->cr_groups, ngrp * sizeof(gid_t)); - if (error) { - crfree(new); - return (error); + + if ( ngrp < 1 ) { + ngrp = 1; + } + else { + error = copyin((caddr_t)uap->gidset, + (caddr_t)new->cr_groups, ngrp * sizeof(gid_t)); + if (error) { + crfree(new); + return (error); + } } new->cr_ngroups = ngrp; + AUDIT_ARG(groupset, new->cr_groups, ngrp); pcred_writelock(p); old = pc->pc_ucred; new->cr_uid = old->cr_uid; @@ -722,6 +734,32 @@ crdup(cr) return (newcr); } +/* + * compare two cred structs + */ +int +crcmp(cr1, cr2) + struct ucred *cr1; + struct ucred *cr2; +{ + int i; + + if (cr1 == cr2) + return 0; + if (cr1 == NOCRED || cr1 == FSCRED || + cr2 == NOCRED || cr2 == FSCRED) + return 1; + if (cr1->cr_uid != cr2->cr_uid) + return 1; + if (cr1->cr_ngroups != cr2->cr_ngroups) + return 1; + // XXX assumes groups will always be listed in some order + for (i=0; i < cr1->cr_ngroups; i++) + if (cr1->cr_groups[i] != cr2->cr_groups[i]) + return 1; + return (0); +} + /* * Get login name, if available. */ @@ -774,13 +812,40 @@ kern_return_t set_security_token(struct proc * p) { security_token_t sec_token; + audit_token_t audit_token; sec_token.val[0] = p->p_ucred->cr_uid; sec_token.val[1] = p->p_ucred->cr_gid; + audit_token.val[0] = p->p_au->ai_auid; + audit_token.val[1] = p->p_au->ai_asid; + /* use au_tid for now, until au_tid_addr is put to use */ + audit_token.val[2] = p->p_au->ai_termid.port; + audit_token.val[3] = p->p_au->ai_termid.machine; + audit_token.val[4] = 0; + audit_token.val[5] = 0; + audit_token.val[6] = 0; + audit_token.val[7] = 0; return host_security_set_task_token(host_security_self(), p->task, sec_token, + audit_token, (sec_token.val[0]) ? - HOST_PRIV_NULL : + HOST_PRIV_NULL : host_priv_self()); } + + +/* + * Fill in a struct xucred based on a struct ucred. + */ +__private_extern__ +void +cru2x(struct ucred *cr, struct xucred *xcr) +{ + + bzero(xcr, sizeof(*xcr)); + xcr->cr_version = XUCRED_VERSION; + xcr->cr_uid = cr->cr_uid; + xcr->cr_ngroups = cr->cr_ngroups; + bcopy(cr->cr_groups, xcr->cr_groups, sizeof(xcr->cr_groups)); +} diff --git a/bsd/kern/kern_resource.c b/bsd/kern/kern_resource.c index a592319cb..c59a2cf9c 100644 --- a/bsd/kern/kern_resource.c +++ b/bsd/kern/kern_resource.c @@ -65,6 +65,7 @@ #include #include +#include #include #include #include @@ -88,6 +89,22 @@ int dosetrlimit __P((struct proc *p, u_int which, struct rlimit *limp)); rlim_t maxdmap = MAXDSIZ; /* XXX */ rlim_t maxsmap = MAXSSIZ; /* XXX */ +/* + * Limits on the number of open files per process, and the number + * of child processes per process. + * + * Note: would be in kern/subr_param.c in FreeBSD. + */ +int maxprocperuid = CHILD_MAX; /* max # of procs per user */ +int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */ + +SYSCTL_INT( _kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW, + &maxprocperuid, 0, "Maximum processes allowed per userid" ); + +SYSCTL_INT( _kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, + &maxfilesperproc, 0, "Maximum files allowed open per process" ); + + /* * Resource controls and accounting. */ @@ -353,14 +370,14 @@ dosetrlimit(p, which, limp) if (limp->rlim_cur > alimp->rlim_cur) { /* grow stack */ - size = round_page(limp->rlim_cur); - size -= round_page(alimp->rlim_cur); + size = round_page_64(limp->rlim_cur); + size -= round_page_64(alimp->rlim_cur); #if STACK_GROWTH_UP /* go to top of current stack */ - addr = trunc_page(p->user_stack + alimp->rlim_cur); + addr = trunc_page((unsigned int)(p->user_stack + alimp->rlim_cur)); #else STACK_GROWTH_UP - addr = trunc_page(p->user_stack - alimp->rlim_cur); + addr = trunc_page_32((unsigned int)(p->user_stack - alimp->rlim_cur)); addr -= size; #endif /* STACK_GROWTH_UP */ if (vm_allocate(current_map(), @@ -373,39 +390,44 @@ dosetrlimit(p, which, limp) break; case RLIMIT_NOFILE: - /* - * Only root can get the maxfiles limits, as it is systemwide resource - */ - if (is_suser()) { + /* + * Only root can set the maxfiles limits, as it is systemwide resource + */ + if ( is_suser() ) { if (limp->rlim_cur > maxfiles) limp->rlim_cur = maxfiles; if (limp->rlim_max > maxfiles) limp->rlim_max = maxfiles; - } else { - if (limp->rlim_cur > OPEN_MAX) - limp->rlim_cur = OPEN_MAX; - if (limp->rlim_max > OPEN_MAX) - limp->rlim_max = OPEN_MAX; + } + else { + if (limp->rlim_cur > maxfilesperproc) + limp->rlim_cur = maxfilesperproc; + if (limp->rlim_max > maxfilesperproc) + limp->rlim_max = maxfilesperproc; } break; case RLIMIT_NPROC: - /* - * Only root can get the maxproc limits, as it is systemwide resource - */ - if (is_suser()) { + /* + * Only root can set to the maxproc limits, as it is + * systemwide resource; all others are limited to + * maxprocperuid (presumably less than maxproc). + */ + if ( is_suser() ) { if (limp->rlim_cur > maxproc) limp->rlim_cur = maxproc; if (limp->rlim_max > maxproc) limp->rlim_max = maxproc; - } else { - if (limp->rlim_cur > CHILD_MAX) - limp->rlim_cur = CHILD_MAX; - if (limp->rlim_max > CHILD_MAX) - limp->rlim_max = CHILD_MAX; + } + else { + if (limp->rlim_cur > maxprocperuid) + limp->rlim_cur = maxprocperuid; + if (limp->rlim_max > maxprocperuid) + limp->rlim_max = maxprocperuid; } break; - } + + } /* switch... */ *alimp = *limp; return (0); } @@ -460,8 +482,8 @@ calcru(p, up, sp, ip) ut.tv_usec = tinfo.user_time.microseconds; st.tv_sec = tinfo.system_time.seconds; st.tv_usec = tinfo.system_time.microseconds; - timeradd(&ut,up,up); - timeradd(&st,up,up); + timeradd(&ut, up, up); + timeradd(&st, sp, sp); task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; task_info(task, TASK_THREAD_TIMES_INFO, @@ -471,8 +493,8 @@ calcru(p, up, sp, ip) ut.tv_usec = ttimesinfo.user_time.microseconds; st.tv_sec = ttimesinfo.system_time.seconds; st.tv_usec = ttimesinfo.system_time.microseconds; - timeradd(&ut,up,up); - timeradd(&st,up,up); + timeradd(&ut, up, up); + timeradd(&st, sp, sp); } } diff --git a/bsd/kern/kern_shutdown.c b/bsd/kern/kern_shutdown.c index 809b9c6d3..19f68507d 100644 --- a/bsd/kern/kern_shutdown.c +++ b/bsd/kern/kern_shutdown.c @@ -59,6 +59,7 @@ #include #include #include +#include int waittime = -1; @@ -94,6 +95,8 @@ boot(paniced, howto, command) /* handle live procs (deallocate their root and current directories). */ proc_shutdown(); + audit_shutdown(); + sync(p, (void *)NULL, (int *)NULL); /* Release vnodes from the VM object cache */ @@ -208,6 +211,19 @@ proc_shutdown() if (TERM_catch == 0) break; } + if (TERM_catch) { + /* + * log the names of the unresponsive tasks + */ + + for (p = allproc.lh_first; p; p = p->p_list.le_next) { + if (((p->p_flag&P_SYSTEM) == 0) && (p->p_pptr->p_pid != 0) && (p != self)) { + if (p->p_sigcatch & sigmask(SIGTERM)) + printf("%s[%d]: didn't act on SIGTERM\n", p->p_comm, p->p_pid); + } + } + IOSleep(1000 * 5); + } /* * send a SIGKILL to all the procs still hanging around @@ -251,7 +267,7 @@ proc_shutdown() thread_block(THREAD_CONTINUE_NULL); } else { - p->exit_thread = current_thread(); + p->exit_thread = current_act(); printf("."); exit1(p, 1, (int *)NULL); } diff --git a/bsd/kern/kern_sig.c b/bsd/kern/kern_sig.c index 5041944ca..9f6c488fa 100644 --- a/bsd/kern/kern_sig.c +++ b/bsd/kern/kern_sig.c @@ -113,6 +113,13 @@ void psignal_lock __P((struct proc *, int, int)); void psignal_uthread __P((thread_act_t, int)); kern_return_t do_bsdexception(int, int, int); +static int filt_sigattach(struct knote *kn); +static void filt_sigdetach(struct knote *kn); +static int filt_signal(struct knote *kn, long hint); + +struct filterops sig_filtops = + { 0, filt_sigattach, filt_sigdetach, filt_signal }; + #if SIGNAL_DEBUG void ram_printf __P((int)); int ram_debug=0; @@ -290,6 +297,8 @@ sigaction(p, uap, retval) sa->sa_flags |= SA_SIGINFO; if (ps->ps_signodefer & bit) sa->sa_flags |= SA_NODEFER; + if (ps->ps_64regset & bit) + sa->sa_flags |= SA_64REGSET; if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP)) sa->sa_flags |= SA_NOCLDSTOP; if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT)) @@ -427,12 +436,16 @@ setsigvec(p, signum, sa) * Change setting atomically. */ ps->ps_sigact[signum] = sa->sa_handler; - ps->ps_trampact[signum] = sa->sa_tramp; + ps->ps_trampact[signum] = (sig_t) sa->sa_tramp; ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; if (sa->sa_flags & SA_SIGINFO) ps->ps_siginfo |= bit; else ps->ps_siginfo &= ~bit; + if (sa->sa_flags & SA_64REGSET) + ps->ps_64regset |= bit; + else + ps->ps_64regset &= ~bit; if ((sa->sa_flags & SA_RESTART) == 0) ps->ps_sigintr |= bit; else @@ -655,7 +668,6 @@ osigvec(p, uap, retval) register int signum; int bit, error=0; - panic("osigvec: notsupp"); #if 0 signum = uap->signum; if (signum <= 0 || signum >= NSIG || @@ -684,6 +696,8 @@ osigvec(p, uap, retval) sv->sv_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ error = setsigvec(p, signum, (struct sigaction *)sv); } +#else +error = ENOSYS; #endif return (error); } @@ -814,8 +828,7 @@ __pthread_kill(p, uap, retval) } uth = (struct uthread *)get_bsdthread_info(target_act); - { void *tht = getshuttle_thread(target_act); -} + if (uth->uu_flag & UNO_SIGMASK) { error = ESRCH; goto out; @@ -1048,7 +1061,9 @@ sigaltstack(p, uap, retval) psp->ps_sigstk.ss_flags = ss.ss_flags; return (0); } - if (ss.ss_size < MINSIGSTKSZ) +/* The older stacksize was 8K, enforce that one so no compat problems */ +#define OLDMINSIGSTKSZ 8*1024 + if (ss.ss_size < OLDMINSIGSTKSZ) return (ENOMEM); psp->ps_flags |= SAS_ALTSTACK; psp->ps_sigstk= ss; @@ -1073,8 +1088,16 @@ kill(cp, uap, retval) return (EINVAL); if (uap->pid > 0) { /* kill single process */ - if ((p = pfind(uap->pid)) == NULL) + if ((p = pfind(uap->pid)) == NULL) { + if ((p = pzfind(uap->pid)) != NULL) { + /* + * IEEE Std 1003.1-2001: return success + * when killing a zombie. + */ + return (0); + } return (ESRCH); + } if (!cansignal(cp, pc, p, uap->signum)) return (EPERM); if (uap->signum) @@ -1376,12 +1399,11 @@ get_signalthread(struct proc *p, int signum) sigset_t mask = sigmask(signum); thread_act_t sig_thread_act; struct task * sig_task = p->task; - thread_t sig_thread; kern_return_t kret; if ((p->p_flag & P_INVFORK) && p->p_vforkact) { sig_thread_act = p->p_vforkact; - kret = check_actforsig(sig_task, sig_thread_act, &sig_thread, 1); + kret = check_actforsig(sig_task, sig_thread_act, 1); if (kret == KERN_SUCCESS) return(sig_thread_act); else @@ -1391,11 +1413,11 @@ get_signalthread(struct proc *p, int signum) TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { if(((uth->uu_flag & UNO_SIGMASK)== 0) && (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) { - if (check_actforsig(p->task, uth->uu_act, NULL, 1) == KERN_SUCCESS) + if (check_actforsig(p->task, uth->uu_act, 1) == KERN_SUCCESS) return(uth->uu_act); } } - if (get_signalact(p->task, &thr_act, NULL, 1) == KERN_SUCCESS) { + if (get_signalact(p->task, &thr_act, 1) == KERN_SUCCESS) { return(thr_act); } @@ -1424,10 +1446,7 @@ psignal_lock(p, signum, withlock) register int s, prop; register sig_t action; thread_act_t sig_thread_act; - thread_t sig_thread; register task_t sig_task; - register thread_t cur_thread; - thread_act_t cur_act; int mask; struct uthread *uth; kern_return_t kret; @@ -1459,6 +1478,10 @@ psignal_lock(p, signum, withlock) return; } + s = splhigh(); + KNOTE(&p->p_klist, NOTE_SIGNAL | signum); + splx(s); + /* * do not send signals to the process that has the thread * doing a reboot(). Not doing so will mark that thread aborted @@ -1477,7 +1500,7 @@ psignal_lock(p, signum, withlock) * Deliver the signal to the first thread in the task. This * allows single threaded applications which use signals to * be able to be linked with multithreaded libraries. We have - * an implicit reference to the current_thread, but need + * an implicit reference to the current thread, but need * an explicit one otherwise. The thread reference keeps * the corresponding task data structures around too. This * reference is released by thread_deallocate. @@ -1486,9 +1509,6 @@ psignal_lock(p, signum, withlock) if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask)) goto psigout; - cur_thread = current_thread(); /* this is a shuttle */ - cur_act = current_act(); - /* If successful return with ast set */ sig_thread_act = get_signalthread(p, signum); @@ -1602,8 +1622,15 @@ psignal_lock(p, signum, withlock) * Wake up the thread, but don't un-suspend it * (except for SIGCONT). */ - if (prop & SA_CONT) - (void) task_resume(sig_task); + if (prop & SA_CONT) { + if (p->p_flag & P_TTYSLEEP) { + p->p_flag &= ~P_TTYSLEEP; + wakeup(&p->p_siglist); + } else { + (void) task_resume(sig_task); + } + p->p_stat = SRUN; + } goto run; } else { /* Default action - varies */ @@ -1726,10 +1753,7 @@ psignal_uthread(thr_act, signum) register int s, prop; register sig_t action; thread_act_t sig_thread_act; - thread_t sig_thread; register task_t sig_task; - register thread_t cur_thread; - thread_act_t cur_act; int mask; struct uthread *uth; kern_return_t kret; @@ -1772,7 +1796,7 @@ psignal_uthread(thr_act, signum) * Deliver the signal to the first thread in the task. This * allows single threaded applications which use signals to * be able to be linked with multithreaded libraries. We have - * an implicit reference to the current_thread, but need + * an implicit reference to the current thread, but need * an explicit one otherwise. The thread reference keeps * the corresponding task data structures around too. This * reference is released by thread_deallocate. @@ -1781,10 +1805,7 @@ psignal_uthread(thr_act, signum) if (((p->p_flag & P_TRACED) == 0) && (p->p_sigignore & mask)) goto puthout; - cur_thread = current_thread(); /* this is a shuttle */ - cur_act = current_act(); - - kret = check_actforsig(sig_task, sig_thread_act, &sig_thread, 1); + kret = check_actforsig(sig_task, sig_thread_act, 1); if (kret != KERN_SUCCESS) { error = EINVAL; @@ -2007,7 +2028,7 @@ __inline__ void sig_lock_to_exit( struct proc *p) { - thread_t self = current_thread(); + thread_t self = current_act(); p->exit_thread = self; (void) task_suspend(p->task); @@ -2017,7 +2038,7 @@ __inline__ int sig_try_locked( struct proc *p) { - thread_t self = current_thread(); + thread_t self = current_act(); while (p->sigwait || p->exit_thread) { if (p->exit_thread) { @@ -2025,7 +2046,7 @@ sig_try_locked( /* * Already exiting - no signals. */ - thread_abort(current_act()); + thread_abort(self); } return(0); } @@ -2064,14 +2085,12 @@ issignal(p) { register int signum, mask, prop, sigbits; task_t task = p->task; - thread_t cur_thread; thread_act_t cur_act; int s; struct uthread * ut; kern_return_t kret; struct proc *pp; - cur_thread = current_thread(); cur_act = current_act(); #if SIGNAL_DEBUG @@ -2133,6 +2152,7 @@ issignal(p) do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum); signal_lock(p); } else { +// panic("Unsupportef gdb option \n");; pp->si_pid = p->p_pid; pp->si_status = p->p_xstat; pp->si_code = CLD_TRAPPED; @@ -2177,7 +2197,7 @@ issignal(p) * clear it, since sig_lock_to_exit will * wait. */ - clear_wait(current_thread(), THREAD_INTERRUPTED); + clear_wait(current_act(), THREAD_INTERRUPTED); sig_lock_to_exit(p); /* * Since this thread will be resumed @@ -2194,7 +2214,7 @@ issignal(p) /* * We may have to quit */ - if (thread_should_abort(current_thread())) { + if (thread_should_abort(current_act())) { signal_unlock(p); return(0); } @@ -2314,14 +2334,12 @@ CURSIG(p) { register int signum, mask, prop, sigbits; task_t task = p->task; - thread_t cur_thread; thread_act_t cur_act; int s; struct uthread * ut; int retnum = 0; - cur_thread = current_thread(); cur_act = current_act(); ut = get_bsdthread_info(cur_act); @@ -2584,6 +2602,48 @@ sigexit_locked(p, signum) /* NOTREACHED */ } + +static int +filt_sigattach(struct knote *kn) +{ + struct proc *p = current_proc(); + + kn->kn_ptr.p_proc = p; + kn->kn_flags |= EV_CLEAR; /* automatically set */ + + /* XXX lock the proc here while adding to the list? */ + KNOTE_ATTACH(&p->p_klist, kn); + + return (0); +} + +static void +filt_sigdetach(struct knote *kn) +{ + struct proc *p = kn->kn_ptr.p_proc; + + KNOTE_DETACH(&p->p_klist, kn); +} + +/* + * signal knotes are shared with proc knotes, so we apply a mask to + * the hint in order to differentiate them from process hints. This + * could be avoided by using a signal-specific knote list, but probably + * isn't worth the trouble. + */ +static int +filt_signal(struct knote *kn, long hint) +{ + + if (hint & NOTE_SIGNAL) { + hint &= ~NOTE_SIGNAL; + + if (kn->kn_id == hint) + kn->kn_data++; + } + return (kn->kn_data != 0); +} + void bsd_ast(thread_act_t thr_act) { @@ -2605,7 +2665,7 @@ bsd_ast(thread_act_t thr_act) p->p_flag &= ~P_OWEUPC; } - if (CHECK_SIGNALS(p, current_thread(), ut)) { + if (CHECK_SIGNALS(p, current_act(), ut)) { while (signum = issignal(p)) postsig(signum); } diff --git a/bsd/kern/kern_subr.c b/bsd/kern/kern_subr.c index 5ad9b0eb1..c0bba3e8e 100644 --- a/bsd/kern/kern_subr.c +++ b/bsd/kern/kern_subr.c @@ -68,6 +68,7 @@ #include #include #include +#include #include @@ -81,6 +82,12 @@ uiomove(cp, n, uio) register caddr_t cp; register int n; register struct uio *uio; +{ + return uiomove64((addr64_t)((unsigned int)cp), n, uio); +} + +int +uiomove64(addr64_t cp, int n, struct uio *uio) { register struct iovec *iov; u_int cnt; @@ -110,22 +117,22 @@ uiomove(cp, n, uio) if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, - cp, iov->iov_base, cnt, 0,0); + (int)cp, (int)iov->iov_base, cnt, 0,0); - error = copyout(cp, iov->iov_base, cnt); + error = copyout( CAST_DOWN(caddr_t, cp), iov->iov_base, cnt ); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, - cp, iov->iov_base, cnt, 0,0); + (int)cp, (int)iov->iov_base, cnt, 0,0); } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, - iov->iov_base, cp, cnt, 0,0); + (int)iov->iov_base, (int)cp, cnt, 0,0); - error = copyin(iov->iov_base, cp, cnt); + error = copyin(iov->iov_base, CAST_DOWN(caddr_t, cp), cnt); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, - iov->iov_base, cp, cnt, 0,0); + (int)iov->iov_base, (int)cp, cnt, 0,0); } if (error) return (error); @@ -133,10 +140,10 @@ uiomove(cp, n, uio) case UIO_SYSSPACE: if (uio->uio_rw == UIO_READ) - error = copywithin((caddr_t)cp, iov->iov_base, + error = copywithin(CAST_DOWN(caddr_t, cp), iov->iov_base, cnt); else - error = copywithin(iov->iov_base, (caddr_t)cp, + error = copywithin(iov->iov_base, CAST_DOWN(caddr_t, cp), cnt); break; @@ -144,23 +151,51 @@ uiomove(cp, n, uio) if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, - cp, iov->iov_base, cnt, 1,0); + (int)cp, (int)iov->iov_base, cnt, 1,0); + + if (error = copypv((addr64_t)cp, (addr64_t)((unsigned int)iov->iov_base), cnt, cppvPsrc | cppvNoRefSrc)) /* Copy physical to virtual */ + error = EFAULT; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, + (int)cp, (int)iov->iov_base, cnt, 1,0); + } + else + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, + (int)iov->iov_base, (int)cp, cnt, 1,0); - error = copyp2v(cp, iov->iov_base, cnt); + if (error = copypv((addr64_t)((unsigned int)iov->iov_base), (addr64_t)cp, cnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk)) /* Copy virtual to physical */ + error = EFAULT; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, + (int)iov->iov_base, (int)cp, cnt, 1,0); + } + if (error) + return (error); + break; + + case UIO_PHYS_SYSSPACE: + if (uio->uio_rw == UIO_READ) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, + (int)cp, (int)iov->iov_base, cnt, 2,0); + if (error = copypv((addr64_t)cp, (addr64_t)((unsigned int)iov->iov_base), cnt, cppvKmap | cppvPsrc | cppvNoRefSrc)) /* Copy physical to virtual */ + error = EFAULT; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, - cp, iov->iov_base, cnt, 1,0); + (int)cp, (int)iov->iov_base, cnt, 2,0); } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, - iov->iov_base, cp, cnt, 1,0); + (int)iov->iov_base, (int)cp, cnt, 2,0); - panic("copyv2p not implemented yet\n"); + if (error = copypv((addr64_t)((unsigned int)iov->iov_base), (addr64_t)cp, cnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk)) /* Copy virtual to physical */ + error = EFAULT; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, - iov->iov_base, cp, cnt, 1,0); + (int)iov->iov_base, (int)cp, cnt, 2,0); } if (error) return (error); diff --git a/bsd/kern/kern_symfile.c b/bsd/kern/kern_symfile.c index e66764491..016fa3195 100644 --- a/bsd/kern/kern_symfile.c +++ b/bsd/kern/kern_symfile.c @@ -110,15 +110,15 @@ static int output_kernel_symbols(struct proc *p) // Dispose of unnecessary gumf, the booter doesn't need to load these rc_mh = IODTGetLoaderInfo("Kernel-__HEADER", (void **)&orig_mh, &orig_mhsize); - if (rc_mh && orig_mh) + if (rc_mh == 0 && orig_mh) IODTFreeLoaderInfo("Kernel-__HEADER", - (void *)orig_mh, round_page(orig_mhsize)); + (void *)orig_mh, round_page_32(orig_mhsize)); rc_sc = IODTGetLoaderInfo("Kernel-__SYMTAB", (void **) &orig_st, &orig_st_size); - if (rc_sc && orig_st) + if (rc_sc == 0 && orig_st) IODTFreeLoaderInfo("Kernel-__SYMTAB", - (void *)orig_st, round_page(orig_st_size)); + (void *)orig_st, round_page_32(orig_st_size)); if (pcred->p_svuid != pcred->p_ruid || pcred->p_svgid != pcred->p_rgid) goto out; @@ -207,7 +207,7 @@ static int output_kernel_symbols(struct proc *p) mh->flags = orig_mh->flags; // Initialise the current file offset and addr - offset = round_page(header_size); + offset = round_page_32(header_size); addr = (caddr_t) const_text->addr; // Load address of __TEXT,__const /* @@ -220,7 +220,7 @@ static int output_kernel_symbols(struct proc *p) sg->vmaddr = (unsigned long) addr; sg->vmsize = const_text->size; sg->fileoff = 0; - sg->filesize = const_text->size + round_page(header_size); + sg->filesize = const_text->size + round_page_32(header_size); sg->maxprot = 0; sg->initprot = 0; sg->flags = 0; @@ -237,7 +237,7 @@ static int output_kernel_symbols(struct proc *p) const_text = se; } } - offset = round_page((vm_address_t) offset); + offset = round_page_32((vm_address_t) offset); // Now copy of the __DATA segment load command, the image need // not be stored to disk nobody needs it, yet! @@ -258,7 +258,7 @@ static int output_kernel_symbols(struct proc *p) se->offset = offset; se->nreloc = 0; } - offset = round_page(offset); + offset = round_page_32(offset); /* diff --git a/bsd/kern/kern_synch.c b/bsd/kern/kern_synch.c index 06f1d4591..8bb748231 100644 --- a/bsd/kern/kern_synch.c +++ b/bsd/kern/kern_synch.c @@ -61,21 +61,16 @@ static void _sleep_continue(void) { register struct proc *p; - register thread_t thread = current_thread(); - thread_act_t th_act; + register thread_t self = current_act(); struct uthread * ut; int sig, catch; int error = 0; - th_act = current_act(); - ut = get_bsdthread_info(th_act); + ut = get_bsdthread_info(self); catch = ut->uu_pri & PCATCH; p = current_proc(); -#if FIXME /* [ */ - thread->wait_mesg = NULL; -#endif /* FIXME ] */ - switch (get_thread_waitresult(thread)) { + switch (get_thread_waitresult(self)) { case THREAD_TIMED_OUT: error = EWOULDBLOCK; break; @@ -90,7 +85,7 @@ _sleep_continue(void) /* else fall through */ case THREAD_INTERRUPTED: if (catch) { - if (thread_should_abort(current_thread())) { + if (thread_should_abort(self)) { error = EINTR; } else if (SHOULDissignal(p,ut)) { if (sig = CURSIG(p)) { @@ -99,7 +94,7 @@ _sleep_continue(void) else error = ERESTART; } - if (thread_should_abort(current_thread())) { + if (thread_should_abort(self)) { error = EINTR; } } @@ -109,7 +104,7 @@ _sleep_continue(void) } if (error == EINTR || error == ERESTART) - act_set_astbsd(th_act); + act_set_astbsd(self); if (ut->uu_timo) thread_cancel_timer(); @@ -145,8 +140,7 @@ _sleep( int (*continuation)(int)) { register struct proc *p; - register thread_t thread = current_thread(); - thread_act_t th_act; + register thread_t self = current_act(); struct uthread * ut; int sig, catch = pri & PCATCH; int sigttblock = pri & PTTYBLOCK; @@ -156,8 +150,7 @@ _sleep( s = splhigh(); - th_act = current_act(); - ut = get_bsdthread_info(th_act); + ut = get_bsdthread_info(self); p = current_proc(); #if KTRACE @@ -166,11 +159,11 @@ _sleep( #endif p->p_priority = pri & PRIMASK; - if (chan) - wait_result = assert_wait(chan, - (catch) ? THREAD_ABORTSAFE : THREAD_UNINT); - - if (abstime) + if (chan != NULL) + assert_wait_prim(chan, NULL, abstime, + (catch) ? THREAD_ABORTSAFE : THREAD_UNINT); + else + if (abstime != 0) thread_set_timer_deadline(abstime); /* @@ -185,7 +178,8 @@ _sleep( if (catch) { if (SHOULDissignal(p,ut)) { if (sig = CURSIG(p)) { - clear_wait(thread, THREAD_INTERRUPTED); + if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) + goto block; /* if SIGTTOU or SIGTTIN then block till SIGCONT */ if (sigttblock && ((sig == SIGTTOU) || (sig == SIGTTIN))) { p->p_flag |= P_TTYSLEEP; @@ -206,24 +200,24 @@ _sleep( goto out; } } - if (thread_should_abort(current_thread())) { - clear_wait(thread, THREAD_INTERRUPTED); + if (thread_should_abort(self)) { + if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) + goto block; error = EINTR; goto out; } - if (get_thread_waitresult(thread) != THREAD_WAITING) { + if (get_thread_waitresult(self) != THREAD_WAITING) { /*already happened */ goto out; } } -#if FIXME /* [ */ - thread->wait_mesg = wmsg; -#endif /* FIXME ] */ +block: + splx(s); p->p_stats->p_ru.ru_nvcsw++; - if (continuation != THREAD_CONTINUE_NULL ) { + if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL ) { ut->uu_continuation = continuation; ut->uu_pri = pri; ut->uu_timo = abstime? 1: 0; @@ -233,9 +227,6 @@ _sleep( wait_result = thread_block(THREAD_CONTINUE_NULL); -#if FIXME /* [ */ - thread->wait_mesg = NULL; -#endif /* FIXME ] */ switch (wait_result) { case THREAD_TIMED_OUT: error = EWOULDBLOCK; @@ -251,7 +242,7 @@ _sleep( /* else fall through */ case THREAD_INTERRUPTED: if (catch) { - if (thread_should_abort(current_thread())) { + if (thread_should_abort(self)) { error = EINTR; } else if (SHOULDissignal(p,ut)) { if (sig = CURSIG(p)) { @@ -260,7 +251,7 @@ _sleep( else error = ERESTART; } - if (thread_should_abort(current_thread())) { + if (thread_should_abort(self)) { error = EINTR; } } @@ -270,7 +261,7 @@ _sleep( } out: if (error == EINTR || error == ERESTART) - act_set_astbsd(th_act); + act_set_astbsd(self); if (abstime) thread_cancel_timer(); (void) splx(s); diff --git a/bsd/kern/kern_sysctl.c b/bsd/kern/kern_sysctl.c index fe123db9b..19eaed4ae 100644 --- a/bsd/kern/kern_sysctl.c +++ b/bsd/kern/kern_sysctl.c @@ -75,11 +75,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -95,9 +97,7 @@ extern vm_map_t bsd_pageable_map; #include #include -#if __ppc__ -#include -#endif +#include sysctlfn kern_sysctl; #ifdef DEBUG @@ -107,18 +107,35 @@ extern sysctlfn vm_sysctl; extern sysctlfn vfs_sysctl; extern sysctlfn net_sysctl; extern sysctlfn cpu_sysctl; +extern int aio_max_requests; +extern int aio_max_requests_per_process; +extern int aio_worker_threads; +extern int maxprocperuid; +extern int maxfilesperproc; int userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval); -void -fill_proc(struct proc *p,struct kinfo_proc *kp, int doingzomb); - -void -fill_externproc(struct proc *p, struct extern_proc *exp); - +static int +sysctl_aiomax( void *oldp, size_t *oldlenp, void *newp, size_t newlen ); +static int +sysctl_aioprocmax( void *oldp, size_t *oldlenp, void *newp, size_t newlen ); +static int +sysctl_aiothreads( void *oldp, size_t *oldlenp, void *newp, size_t newlen ); +static void +fill_proc(struct proc *p, struct kinfo_proc *kp); +static int +sysctl_maxfilesperproc( void *oldp, size_t *oldlenp, void *newp, size_t newlen ); +static int +sysctl_maxprocperuid( void *oldp, size_t *oldlenp, void *newp, size_t newlen ); +static int +sysctl_maxproc( void *oldp, size_t *oldlenp, void *newp, size_t newlen ); +static int +sysctl_procargs2( int *name, u_int namelen, char *where, size_t *sizep, struct proc *cur_proc); +static int +sysctl_procargsx( int *name, u_int namelen, char *where, size_t *sizep, struct proc *cur_proc, int argc_yes); /* @@ -308,6 +325,10 @@ extern char hostname[MAXHOSTNAMELEN]; /* defined in bsd/kern/init_main.c */ extern int hostnamelen; extern char domainname[MAXHOSTNAMELEN]; extern int domainnamelen; +extern char classichandler[32]; +extern long classichandler_fsid; +extern long classichandler_fileid; + extern long hostid; #ifdef INSECURE int securelevel = -1; @@ -315,6 +336,124 @@ int securelevel = -1; int securelevel; #endif +static int +sysctl_affinity(name, namelen, oldBuf, oldSize, newBuf, newSize, cur_proc) + int *name; + u_int namelen; + char *oldBuf; + size_t *oldSize; + char *newBuf; + size_t newSize; + struct proc *cur_proc; +{ + if (namelen < 1) + return (EOPNOTSUPP); + + if (name[0] == 0 && 1 == namelen) { + return sysctl_rdint(oldBuf, oldSize, newBuf, + (cur_proc->p_flag & P_AFFINITY) ? 1 : 0); + } else if (name[0] == 1 && 2 == namelen) { + if (name[1] == 0) { + cur_proc->p_flag &= ~P_AFFINITY; + } else { + cur_proc->p_flag |= P_AFFINITY; + } + return 0; + } + return (EOPNOTSUPP); +} + +static int +sysctl_classic(name, namelen, oldBuf, oldSize, newBuf, newSize, cur_proc) + int *name; + u_int namelen; + char *oldBuf; + size_t *oldSize; + char *newBuf; + size_t newSize; + struct proc *cur_proc; +{ + int newVal; + int err; + struct proc *p; + + if (namelen != 1) + return (EOPNOTSUPP); + + p = pfind(name[0]); + if (p == NULL) + return (EINVAL); + + if ((p->p_ucred->cr_uid != cur_proc->p_ucred->cr_uid) + && suser(cur_proc->p_ucred, &cur_proc->p_acflag)) + return (EPERM); + + return sysctl_rdint(oldBuf, oldSize, newBuf, + (p->p_flag & P_CLASSIC) ? 1 : 0); +} + +static int +sysctl_classichandler(name, namelen, oldBuf, oldSize, newBuf, newSize, p) + int *name; + u_int namelen; + char *oldBuf; + size_t *oldSize; + char *newBuf; + size_t newSize; + struct proc *p; +{ + int error; + int len; + struct nameidata nd; + struct vattr vattr; + char handler[sizeof(classichandler)]; + + if ((error = suser(p->p_ucred, &p->p_acflag))) + return (error); + len = strlen(classichandler) + 1; + if (oldBuf && *oldSize < len) + return (ENOMEM); + if (newBuf && newSize >= sizeof(classichandler)) + return (ENAMETOOLONG); + *oldSize = len - 1; + if (newBuf) { + error = copyin(newBuf, handler, newSize); + if (error) + return (error); + handler[newSize] = 0; + + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, + handler, p); + error = namei(&nd); + if (error) + return (error); + /* Check mount point */ + if ((nd.ni_vp->v_mount->mnt_flag & MNT_NOEXEC) || + (nd.ni_vp->v_type != VREG)) { + vput(nd.ni_vp); + return (EACCES); + } + error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p); + if (error) { + vput(nd.ni_vp); + return (error); + } + classichandler_fsid = vattr.va_fsid; + classichandler_fileid = vattr.va_fileid; + vput(nd.ni_vp); + } + if (oldBuf) { + error = copyout(classichandler, oldBuf, len); + if (error) + return (error); + } + if (newBuf) { + strcpy(classichandler, handler); + } + return (error); +} + + extern int get_kernel_symfile( struct proc *, char **); extern int sysctl_dopanicinfo(int *, u_int, void *, size_t *, void *, size_t, struct proc *); @@ -344,9 +483,12 @@ kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) || name[0] == KERN_PROF || name[0] == KERN_KDEBUG || name[0] == KERN_PROCARGS + || name[0] == KERN_PROCARGS2 || name[0] == KERN_PCSAMPLES || name[0] == KERN_IPC || name[0] == KERN_SYSV + || name[0] == KERN_AFFINITY + || name[0] == KERN_CLASSIC || name[0] == KERN_PANICINFO) ) return (ENOTDIR); /* overloaded */ @@ -365,11 +507,16 @@ kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) error = sysctl_int(oldp, oldlenp, newp, newlen, &desiredvnodes); reset_vmobjectcache(oldval, desiredvnodes); + resize_namecache(desiredvnodes); return(error); case KERN_MAXPROC: - return (sysctl_int(oldp, oldlenp, newp, newlen, &maxproc)); + return (sysctl_maxproc(oldp, oldlenp, newp, newlen)); case KERN_MAXFILES: return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles)); + case KERN_MAXPROCPERUID: + return( sysctl_maxprocperuid( oldp, oldlenp, newp, newlen ) ); + case KERN_MAXFILESPERPROC: + return( sysctl_maxfilesperproc( oldp, oldlenp, newp, newlen ) ); case KERN_ARGMAX: return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX)); case KERN_SECURELVL: @@ -433,6 +580,9 @@ kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) case KERN_PROCARGS: /* new one as it does not use kinfo_proc */ return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp, p)); + case KERN_PROCARGS2: + /* new one as it does not use kinfo_proc */ + return (sysctl_procargs2(name + 1, namelen - 1, oldp, oldlenp, p)); case KERN_SYMFILE: error = get_kernel_symfile( p, &str ); if ( error ) @@ -443,6 +593,21 @@ kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) case KERN_PANICINFO: return(sysctl_dopanicinfo(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p)); + case KERN_AFFINITY: + return sysctl_affinity(name+1, namelen-1, oldp, oldlenp, + newp, newlen, p); + case KERN_CLASSIC: + return sysctl_classic(name+1, namelen-1, oldp, oldlenp, + newp, newlen, p); + case KERN_CLASSICHANDLER: + return sysctl_classichandler(name+1, namelen-1, oldp, oldlenp, + newp, newlen, p); + case KERN_AIOMAX: + return( sysctl_aiomax( oldp, oldlenp, newp, newlen ) ); + case KERN_AIOPROCMAX: + return( sysctl_aioprocmax( oldp, oldlenp, newp, newlen ) ); + case KERN_AIOTHREADS: + return( sysctl_aiothreads( oldp, oldlenp, newp, newlen ) ); default: return (EOPNOTSUPP); } @@ -798,25 +963,28 @@ again: break; case KERN_PROC_TTY: - if ( doingzomb || (p->p_flag & P_CONTROLT) == 0 || + if ((p->p_flag & P_CONTROLT) == 0 || + (p->p_session == NULL) || p->p_session->s_ttyp == NULL || p->p_session->s_ttyp->t_dev != (dev_t)name[1]) continue; break; case KERN_PROC_UID: - if (doingzomb || (p->p_ucred->cr_uid != (uid_t)name[1])) + if ((p->p_ucred == NULL) || + (p->p_ucred->cr_uid != (uid_t)name[1])) continue; break; case KERN_PROC_RUID: - if ( doingzomb || (p->p_cred->p_ruid != (uid_t)name[1])) + if ((p->p_ucred == NULL) || + (p->p_cred->p_ruid != (uid_t)name[1])) continue; break; } if (buflen >= sizeof(struct kinfo_proc)) { bzero(&kproc, sizeof(struct kinfo_proc)); - fill_proc(p, &kproc, doingzomb); + fill_proc(p, &kproc); if (error = copyout((caddr_t)&kproc, &dp->kp_proc, sizeof(struct kinfo_proc))) return (error); @@ -841,56 +1009,49 @@ again: return (0); } -void -fill_proc(p,kp, doingzomb) - register struct proc *p; - register struct kinfo_proc *kp; - int doingzomb; -{ - fill_externproc(p, &kp->kp_proc); - if (!doingzomb) - fill_eproc(p, &kp->kp_eproc); -} /* * Fill in an eproc structure for the specified process. */ -void +static void fill_eproc(p, ep) register struct proc *p; register struct eproc *ep; { register struct tty *tp; - /* - * Skip zombie processes. - */ - if (p->p_stat == SZOMB) - return; - ep->e_paddr = p; - ep->e_sess = p->p_pgrp->pg_session; - ep->e_pcred = *p->p_cred; - ep->e_ucred = *p->p_ucred; + if (p->p_pgrp) { + ep->e_sess = p->p_pgrp->pg_session; + ep->e_pgid = p->p_pgrp->pg_id; + ep->e_jobc = p->p_pgrp->pg_jobc; + if (ep->e_sess && ep->e_sess->s_ttyvp) + ep->e_flag = EPROC_CTTY; + } else { + ep->e_sess = (struct session *)0; + ep->e_pgid = 0; + ep->e_jobc = 0; + } + ep->e_ppid = (p->p_pptr) ? p->p_pptr->p_pid : 0; + if (p->p_cred) { + ep->e_pcred = *p->p_cred; + if (p->p_ucred) + ep->e_ucred = *p->p_ucred; + } if (p->p_stat == SIDL || p->p_stat == SZOMB) { ep->e_vm.vm_tsize = 0; ep->e_vm.vm_dsize = 0; ep->e_vm.vm_ssize = 0; } ep->e_vm.vm_rssize = 0; - if (p->p_pptr) - ep->e_ppid = p->p_pptr->p_pid; - else - ep->e_ppid = 0; - ep->e_pgid = p->p_pgrp->pg_id; - ep->e_jobc = p->p_pgrp->pg_jobc; - if ((p->p_flag & P_CONTROLT) && + + if ((p->p_flag & P_CONTROLT) && (ep->e_sess) && (tp = ep->e_sess->s_ttyp)) { ep->e_tdev = tp->t_dev; ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; ep->e_tsess = tp->t_session; } else ep->e_tdev = NODEV; - ep->e_flag = ep->e_sess->s_ttyvp ? EPROC_CTTY : 0; + if (SESS_LEADER(p)) ep->e_flag |= EPROC_SLEADER; if (p->p_wmesg) @@ -898,10 +1059,11 @@ fill_eproc(p, ep) ep->e_xsize = ep->e_xrssize = 0; ep->e_xccount = ep->e_xswrss = 0; } + /* * Fill in an eproc structure for the specified process. */ -void +static void fill_externproc(p, exp) register struct proc *p; register struct extern_proc *exp; @@ -954,6 +1116,15 @@ fill_externproc(p, exp) exp->p_ru = p->p_ru ; } +static void +fill_proc(p, kp) + register struct proc *p; + register struct kinfo_proc *kp; +{ + fill_externproc(p, &kp->kp_proc); + fill_eproc(p, &kp->kp_eproc); +} + int kdebug_ops(name, namelen, where, sizep, p) int *name; @@ -1029,10 +1200,8 @@ struct proc *p; } /* - * Returns the top N bytes of the user stack, with - * everything below the first argument character - * zeroed for security reasons. - * Odd data structure is for compatibility. + * Return the top *sizep bytes of the user stack, or the entire area of the + * user stack down through the saved exec_path, whichever is smaller. */ int sysctl_procargs(name, namelen, where, sizep, cur_proc) @@ -1041,6 +1210,29 @@ sysctl_procargs(name, namelen, where, sizep, cur_proc) char *where; size_t *sizep; struct proc *cur_proc; +{ + return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 0); +} + +static int +sysctl_procargs2(name, namelen, where, sizep, cur_proc) + int *name; + u_int namelen; + char *where; + size_t *sizep; + struct proc *cur_proc; +{ + return sysctl_procargsx( name, namelen, where, sizep, cur_proc, 1); +} + +static int +sysctl_procargsx(name, namelen, where, sizep, cur_proc, argc_yes) + int *name; + u_int namelen; + char *where; + size_t *sizep; + struct proc *cur_proc; + int argc_yes; { register struct proc *p; register int needed = 0; @@ -1054,14 +1246,14 @@ sysctl_procargs(name, namelen, where, sizep, cur_proc) caddr_t data; unsigned size; vm_offset_t copy_start, copy_end; - vm_offset_t dealloc_start; /* area to remove from kernel map */ - vm_offset_t dealloc_end; int *ip; kern_return_t ret; int pid; + if (argc_yes) + buflen -= NBPW; /* reserve first word to return argc */ - if ((buflen <= 0) || (buflen > (PAGE_SIZE << 1))) { + if ((buflen <= 0) || (buflen > ARG_MAX)) { return(EINVAL); } arg_size = buflen; @@ -1116,20 +1308,20 @@ sysctl_procargs(name, namelen, where, sizep, cur_proc) goto restart; } - ret = kmem_alloc(kernel_map, ©_start, round_page(arg_size)); + ret = kmem_alloc(kernel_map, ©_start, round_page_32(arg_size)); if (ret != KERN_SUCCESS) { task_deallocate(task); return(ENOMEM); } proc_map = get_task_map(task); - copy_end = round_page(copy_start + arg_size); + copy_end = round_page_32(copy_start + arg_size); - if( vm_map_copyin(proc_map, trunc_page(arg_addr), round_page(arg_size), + if( vm_map_copyin(proc_map, trunc_page(arg_addr), round_page_32(arg_size), FALSE, &tmp) != KERN_SUCCESS) { task_deallocate(task); kmem_free(kernel_map, copy_start, - round_page(arg_size)); + round_page_32(arg_size)); return (EIO); } @@ -1142,61 +1334,94 @@ sysctl_procargs(name, namelen, where, sizep, cur_proc) if( vm_map_copy_overwrite(kernel_map, copy_start, tmp, FALSE) != KERN_SUCCESS) { kmem_free(kernel_map, copy_start, - round_page(arg_size)); + round_page_32(arg_size)); return (EIO); } data = (caddr_t) (copy_end - arg_size); - ip = (int *) copy_end; - size = arg_size; - /* - * Now look down the stack for the bottom of the - * argument list. Since this call is otherwise - * unprotected, we can't let the nosy user see - * anything else on the stack. - * - * The arguments are pushed on the stack by - * execve() as: - * - * .long 0 - * arg 0 (null-terminated) - * arg 1 - * ... - * arg N - * .long 0 - * - */ + if (buflen > p->p_argslen) { + data = &data[buflen - p->p_argslen]; + size = p->p_argslen; + } else { + size = buflen; + } - ip -= 2; /*skip trailing 0 word and assume at least one - argument. The last word of argN may be just - the trailing 0, in which case we'd stop - there */ - while (*--ip) - if (ip == (int *)data) - break; - /* - * To account for saved path name and not having a null after that - * Run the sweep again. If we have already sweeped entire range skip this - */ - if (ip != (int *)data) { - while (*--ip) - if (ip == (int *)data) - break; - } - - bzero(data, (unsigned) ((int)ip - (int)data)); - - dealloc_start = copy_start; - dealloc_end = copy_end; - - - size = MIN(size, buflen); - error = copyout(data, where, size); - - if (dealloc_start != (vm_offset_t) 0) { - kmem_free(kernel_map, dealloc_start, - dealloc_end - dealloc_start); + if (argc_yes) { + /* Put processes argc as the first word in the copyout buffer */ + suword(where, p->p_argc); + error = copyout(data, where + NBPW, size); + } else { + error = copyout(data, where, size); + + /* + * Make the old PROCARGS work to return the executable's path + * But, only if there is enough space in the provided buffer + * + * on entry: data [possibily] points to the beginning of the path + * + * Note: we keep all pointers&sizes aligned to word boundries + */ + + if ( (! error) && (buflen > p->p_argslen) ) + { + int binPath_sz; + int extraSpaceNeeded, addThis; + char * placeHere; + char * str = (char *) data; + unsigned int max_len = size; + + /* Some apps are really bad about messing up their stacks + So, we have to be extra careful about getting the length + of the executing binary. If we encounter an error, we bail. + */ + + /* Limit ourselves to PATH_MAX paths */ + if ( max_len > PATH_MAX ) max_len = PATH_MAX; + + binPath_sz = 0; + + while ( (binPath_sz < max_len-1) && (*str++ != 0) ) + binPath_sz++; + + if (binPath_sz < max_len-1) binPath_sz += 1; + + /* Pre-Flight the space requiremnts */ + + /* Account for the padding that fills out binPath to the next word */ + binPath_sz += (binPath_sz & (NBPW-1)) ? (NBPW-(binPath_sz & (NBPW-1))) : 0; + + placeHere = where + size; + + /* Account for the bytes needed to keep placeHere word aligned */ + addThis = ((unsigned long)placeHere & (NBPW-1)) ? (NBPW-((unsigned long)placeHere & (NBPW-1))) : 0; + + /* Add up all the space that is needed */ + extraSpaceNeeded = binPath_sz + addThis + (4 * NBPW); + + /* is there is room to tack on argv[0]? */ + if ( (buflen & ~(NBPW-1)) >= ( p->p_argslen + extraSpaceNeeded )) + { + placeHere += addThis; + suword(placeHere, 0); + placeHere += NBPW; + suword(placeHere, 0xBFFF0000); + placeHere += NBPW; + suword(placeHere, 0); + placeHere += NBPW; + error = copyout(data, placeHere, binPath_sz); + if ( ! error ) + { + placeHere += binPath_sz; + suword(placeHere, 0); + size += extraSpaceNeeded; + } + } + } + } + + if (copy_start != (vm_offset_t) 0) { + kmem_free(kernel_map, copy_start, copy_end - copy_start); } if (error) { return(error); @@ -1206,3 +1431,197 @@ sysctl_procargs(name, namelen, where, sizep, cur_proc) *sizep = size; return (0); } + + +/* + * Validate parameters and get old / set new parameters + * for max number of concurrent aio requests. Makes sure + * the system wide limit is greater than the per process + * limit. + */ +static int +sysctl_aiomax( void *oldp, size_t *oldlenp, void *newp, size_t newlen ) +{ + int error = 0; + int new_value; + + if ( oldp && *oldlenp < sizeof(int) ) + return (ENOMEM); + if ( newp && newlen != sizeof(int) ) + return (EINVAL); + + *oldlenp = sizeof(int); + if ( oldp ) + error = copyout( &aio_max_requests, oldp, sizeof(int) ); + if ( error == 0 && newp ) + error = copyin( newp, &new_value, sizeof(int) ); + if ( error == 0 && newp ) { + if ( new_value >= aio_max_requests_per_process ) + aio_max_requests = new_value; + else + error = EINVAL; + } + return( error ); + +} /* sysctl_aiomax */ + + +/* + * Validate parameters and get old / set new parameters + * for max number of concurrent aio requests per process. + * Makes sure per process limit is less than the system wide + * limit. + */ +static int +sysctl_aioprocmax( void *oldp, size_t *oldlenp, void *newp, size_t newlen ) +{ + int error = 0; + int new_value = 0; + + if ( oldp && *oldlenp < sizeof(int) ) + return (ENOMEM); + if ( newp && newlen != sizeof(int) ) + return (EINVAL); + + *oldlenp = sizeof(int); + if ( oldp ) + error = copyout( &aio_max_requests_per_process, oldp, sizeof(int) ); + if ( error == 0 && newp ) + error = copyin( newp, &new_value, sizeof(int) ); + if ( error == 0 && newp ) { + if ( new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX ) + aio_max_requests_per_process = new_value; + else + error = EINVAL; + } + return( error ); + +} /* sysctl_aioprocmax */ + + +/* + * Validate parameters and get old / set new parameters + * for max number of async IO worker threads. + * We only allow an increase in the number of worker threads. + */ +static int +sysctl_aiothreads( void *oldp, size_t *oldlenp, void *newp, size_t newlen ) +{ + int error = 0; + int new_value; + + if ( oldp && *oldlenp < sizeof(int) ) + return (ENOMEM); + if ( newp && newlen != sizeof(int) ) + return (EINVAL); + + *oldlenp = sizeof(int); + if ( oldp ) + error = copyout( &aio_worker_threads, oldp, sizeof(int) ); + if ( error == 0 && newp ) + error = copyin( newp, &new_value, sizeof(int) ); + if ( error == 0 && newp ) { + if (new_value > aio_worker_threads ) { + _aio_create_worker_threads( (new_value - aio_worker_threads) ); + aio_worker_threads = new_value; + } + else + error = EINVAL; + } + return( error ); + +} /* sysctl_aiothreads */ + + +/* + * Validate parameters and get old / set new parameters + * for max number of processes per UID. + * Makes sure per UID limit is less than the system wide limit. + */ +static int +sysctl_maxprocperuid( void *oldp, size_t *oldlenp, void *newp, size_t newlen ) +{ + int error = 0; + int new_value; + + if ( oldp != NULL && *oldlenp < sizeof(int) ) + return (ENOMEM); + if ( newp != NULL && newlen != sizeof(int) ) + return (EINVAL); + + *oldlenp = sizeof(int); + if ( oldp != NULL ) + error = copyout( &maxprocperuid, oldp, sizeof(int) ); + if ( error == 0 && newp != NULL ) { + error = copyin( newp, &new_value, sizeof(int) ); + if ( error == 0 && new_value <= maxproc && new_value > 0 ) + maxprocperuid = new_value; + else + error = EINVAL; + } + return( error ); + +} /* sysctl_maxprocperuid */ + + +/* + * Validate parameters and get old / set new parameters + * for max number of files per process. + * Makes sure per process limit is less than the system-wide limit. + */ +static int +sysctl_maxfilesperproc( void *oldp, size_t *oldlenp, void *newp, size_t newlen ) +{ + int error = 0; + int new_value; + + if ( oldp != NULL && *oldlenp < sizeof(int) ) + return (ENOMEM); + if ( newp != NULL && newlen != sizeof(int) ) + return (EINVAL); + + *oldlenp = sizeof(int); + if ( oldp != NULL ) + error = copyout( &maxfilesperproc, oldp, sizeof(int) ); + if ( error == 0 && newp != NULL ) { + error = copyin( newp, &new_value, sizeof(int) ); + if ( error == 0 && new_value < maxfiles && new_value > 0 ) + maxfilesperproc = new_value; + else + error = EINVAL; + } + return( error ); + +} /* sysctl_maxfilesperproc */ + + +/* + * Validate parameters and get old / set new parameters + * for the system-wide limit on the max number of processes. + * Makes sure the system-wide limit is less than the configured hard + * limit set at kernel compilation. + */ +static int +sysctl_maxproc( void *oldp, size_t *oldlenp, void *newp, size_t newlen ) +{ + int error = 0; + int new_value; + + if ( oldp != NULL && *oldlenp < sizeof(int) ) + return (ENOMEM); + if ( newp != NULL && newlen != sizeof(int) ) + return (EINVAL); + + *oldlenp = sizeof(int); + if ( oldp != NULL ) + error = copyout( &maxproc, oldp, sizeof(int) ); + if ( error == 0 && newp != NULL ) { + error = copyin( newp, &new_value, sizeof(int) ); + if ( error == 0 && new_value <= hard_maxproc && new_value > 0 ) + maxproc = new_value; + else + error = EINVAL; + } + return( error ); + +} /* sysctl_maxproc */ diff --git a/bsd/kern/kern_time.c b/bsd/kern/kern_time.c index c31bbb507..78fc31661 100644 --- a/bsd/kern/kern_time.c +++ b/bsd/kern/kern_time.c @@ -102,7 +102,7 @@ gettimeofday(p, uap, retval) /* NOTE THIS implementation is for non ppc architectures only */ if (uap->tp) { - microtime(&atv); + clock_get_calendar_microtime(&atv.tv_sec, &atv.tv_usec); if (error = copyout((caddr_t)&atv, (caddr_t)uap->tp, sizeof (atv))) return(error); @@ -158,21 +158,14 @@ setthetime(tv) struct timeval *tv; { long delta = tv->tv_sec - time.tv_sec; - mach_timespec_t now; - now.tv_sec = tv->tv_sec; - now.tv_nsec = tv->tv_usec * NSEC_PER_USEC; - - clock_set_calendar_value(now); + clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec); boottime.tv_sec += delta; #if NFSCLIENT || NFSSERVER lease_updatetime(delta); #endif } -#define tickadj (40 * NSEC_PER_USEC) /* "standard" skew, ns / 10 ms */ -#define bigadj (1 * NSEC_PER_SEC) /* use 10x skew above bigadj ns */ - struct adjtime_args { struct timeval *delta; struct timeval *olddelta; @@ -185,8 +178,6 @@ adjtime(p, uap, retval) register_t *retval; { struct timeval atv; - int64_t total; - uint32_t delta; int error; if (error = suser(p->p_ucred, &p->p_acflag)) @@ -198,17 +189,9 @@ adjtime(p, uap, retval) /* * Compute the total correction and the rate at which to apply it. */ - total = (int64_t)atv.tv_sec * NSEC_PER_SEC + atv.tv_usec * NSEC_PER_USEC; - if (total > bigadj || total < -bigadj) - delta = 10 * tickadj; - else - delta = tickadj; - - total = clock_set_calendar_adjtime(total, delta); + clock_adjtime(&atv.tv_sec, &atv.tv_usec); if (uap->olddelta) { - atv.tv_sec = total / NSEC_PER_SEC; - atv.tv_usec = (total / NSEC_PER_USEC) % USEC_PER_SEC; (void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta, sizeof (struct timeval)); } @@ -226,6 +209,8 @@ void inittodr(base) time_t base; { + struct timeval tv; + /* * Assertion: * The calendar has already been @@ -234,21 +219,17 @@ inittodr(base) * The value returned by microtime() * is gotten from the calendar. */ - microtime(&time); + microtime(&tv); - /* - * This variable still exists to keep - * 'w' happy. It should only be considered - * an approximation. - */ - boottime.tv_sec = time.tv_sec; + time = tv; + boottime.tv_sec = tv.tv_sec; boottime.tv_usec = 0; /* * If the RTC does not have acceptable value, i.e. time before * the UNIX epoch, set it to the UNIX epoch */ - if (time.tv_sec < 0) { + if (tv.tv_sec < 0) { printf ("WARNING: preposterous time in Real Time Clock"); time.tv_sec = 0; /* the UNIX epoch */ time.tv_usec = 0; @@ -430,10 +411,10 @@ realitexpire( } } - thread_call_func_delayed(realitexpire, pid, tvtoabstime(&p->p_rtime)); - psignal(p, SIGALRM); + thread_call_func_delayed(realitexpire, pid, tvtoabstime(&p->p_rtime)); + (void) thread_funnel_set(kernel_flock, FALSE); } @@ -549,20 +530,14 @@ void microtime( struct timeval *tvp) { - mach_timespec_t now = clock_get_calendar_value(); - - tvp->tv_sec = now.tv_sec; - tvp->tv_usec = now.tv_nsec / NSEC_PER_USEC; + clock_get_calendar_microtime(&tvp->tv_sec, &tvp->tv_usec); } void microuptime( struct timeval *tvp) { - mach_timespec_t now = clock_get_system_value(); - - tvp->tv_sec = now.tv_sec; - tvp->tv_usec = now.tv_nsec / NSEC_PER_USEC; + clock_get_system_microtime(&tvp->tv_sec, &tvp->tv_usec); } /* @@ -572,20 +547,14 @@ void nanotime( struct timespec *tsp) { - mach_timespec_t now = clock_get_calendar_value(); - - tsp->tv_sec = now.tv_sec; - tsp->tv_nsec = now.tv_nsec; + clock_get_calendar_nanotime((uint32_t *)&tsp->tv_sec, &tsp->tv_nsec); } void nanouptime( struct timespec *tsp) { - mach_timespec_t now = clock_get_system_value(); - - tsp->tv_sec = now.tv_sec; - tsp->tv_nsec = now.tv_nsec; + clock_get_system_nanotime((uint32_t *)&tsp->tv_sec, &tsp->tv_nsec); } uint64_t diff --git a/bsd/kern/kern_xxx.c b/bsd/kern/kern_xxx.c index 49d35c157..730ec3194 100644 --- a/bsd/kern/kern_xxx.c +++ b/bsd/kern/kern_xxx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -118,8 +118,7 @@ register_t *retval; name = KERN_HOSTNAME; - return (kern_sysctl(&name, 1, uap->hostname, &uap->len), - 0, 0); + return (kern_sysctl(&name, 1, uap->hostname, &uap->len, 0, 0)); } struct osethostname_args { @@ -204,8 +203,8 @@ register_t *retval; return(error); if (uap->opt & RB_COMMAND) - error = copyinstr(uap->command, - command, sizeof(command), &dummy); + error = copyinstr((void *)uap->command, + (void *)command, sizeof(command), (size_t *)&dummy); if (!error) { SET(p->p_flag, P_REBOOT); /* No more signals for this proc */ boot(RB_BOOT, uap->opt, command); diff --git a/bsd/kern/mach_fat.c b/bsd/kern/mach_fat.c index eca9d60e2..96669065a 100644 --- a/bsd/kern/mach_fat.c +++ b/bsd/kern/mach_fat.c @@ -43,33 +43,36 @@ #include #include +#define CPU_TYPE_NATIVE (machine_slot[cpu_number()].cpu_type) +#define CPU_TYPE_CLASSIC CPU_TYPE_POWERPC /********************************************************************** - * Routine: fatfile_getarch() + * Routine: fatfile_getarch2() * * Function: Locate the architecture-dependant contents of a fat * file that match this CPU. * * Args: vp: The vnode for the fat file. * header: A pointer to the fat file header. + * cpu_type: The required cpu type. * archret (out): Pointer to fat_arch structure to hold * the results. * * Returns: KERN_SUCCESS: Valid architecture found. * KERN_FAILURE: No valid architecture found. **********************************************************************/ -load_return_t -fatfile_getarch( - struct vnode *vp, - vm_offset_t data_ptr, - struct fat_arch *archret) +static load_return_t +fatfile_getarch2( + struct vnode *vp, + vm_offset_t data_ptr, + cpu_type_t cpu_type, + struct fat_arch *archret) { /* vm_pager_t pager; */ vm_offset_t addr; vm_size_t size; kern_return_t kret; load_return_t lret; - struct machine_slot *ms; struct fat_arch *arch; struct fat_arch *best_arch; int grade; @@ -107,7 +110,7 @@ fatfile_getarch( /* * Round size of fat_arch structures up to page boundry. */ - size = round_page(end_of_archs); + size = round_page_32(end_of_archs); if (size <= 0) return(LOAD_BADMACHO); @@ -115,7 +118,6 @@ fatfile_getarch( * Scan the fat_arch's looking for the best one. */ addr = data_ptr; - ms = &machine_slot[cpu_number()]; best_arch = NULL; best_grade = 0; arch = (struct fat_arch *) (addr + sizeof(struct fat_header)); @@ -124,7 +126,7 @@ fatfile_getarch( /* * Check to see if right cpu type. */ - if(NXSwapBigIntToHost(arch->cputype) != ms->cpu_type) + if(NXSwapBigIntToHost(arch->cputype) != cpu_type) continue; /* @@ -168,4 +170,54 @@ fatfile_getarch( return(lret); } +extern char classichandler[]; + +load_return_t +fatfile_getarch_affinity( + struct vnode *vp, + vm_offset_t data_ptr, + struct fat_arch *archret, + int affinity) +{ + load_return_t lret; + int handler = (classichandler[0] != 0); + cpu_type_t primary_type, fallback_type; + + if (handler && affinity) { + primary_type = CPU_TYPE_CLASSIC; + fallback_type = CPU_TYPE_NATIVE; + } else { + primary_type = CPU_TYPE_NATIVE; + fallback_type = CPU_TYPE_CLASSIC; + } + lret = fatfile_getarch2(vp, data_ptr, primary_type, archret); + if ((lret != 0) && handler) { + lret = fatfile_getarch2(vp, data_ptr, fallback_type, + archret); + } + return lret; +} + +/********************************************************************** + * Routine: fatfile_getarch() + * + * Function: Locate the architecture-dependant contents of a fat + * file that match this CPU. + * + * Args: vp: The vnode for the fat file. + * header: A pointer to the fat file header. + * archret (out): Pointer to fat_arch structure to hold + * the results. + * + * Returns: KERN_SUCCESS: Valid architecture found. + * KERN_FAILURE: No valid architecture found. + **********************************************************************/ +load_return_t +fatfile_getarch( + struct vnode *vp, + vm_offset_t data_ptr, + struct fat_arch *archret) +{ + return fatfile_getarch2(vp, data_ptr, CPU_TYPE_NATIVE, archret); +} diff --git a/bsd/kern/mach_header.c b/bsd/kern/mach_header.c index 2510daa92..a303b109b 100644 --- a/bsd/kern/mach_header.c +++ b/bsd/kern/mach_header.c @@ -398,7 +398,7 @@ struct segment_command *getfakefvmseg(void) #if DEBUG printf("fake fvm seg __USER/\"%s\" at 0x%x, size 0x%x\n", sp->sectname, sp->addr, sp->size); -#endif DEBUG +#endif /* DEBUG */ } /* diff --git a/bsd/kern/mach_loader.c b/bsd/kern/mach_loader.c index c851ff478..a0289da65 100644 --- a/bsd/kern/mach_loader.c +++ b/bsd/kern/mach_loader.c @@ -47,6 +47,7 @@ #include #include +#include #include #include @@ -77,7 +78,8 @@ parse_machfile( unsigned long file_offset, unsigned long macho_size, int depth, - load_result_t *result + load_result_t *result, + boolean_t clean_regions ), load_segment( struct segment_command *scp, @@ -121,7 +123,8 @@ load_dylinker( vm_map_t map, thread_act_t thr_act, int depth, - load_result_t *result + load_result_t *result, + boolean_t clean_regions ), get_macho_vnode( char *path, @@ -139,7 +142,8 @@ load_machfile( unsigned long macho_size, load_result_t *result, thread_act_t thr_act, - vm_map_t new_map + vm_map_t new_map, + boolean_t clean_regions ) { pmap_t pmap; @@ -149,6 +153,9 @@ load_machfile( kern_return_t kret; load_return_t lret; boolean_t create_map = TRUE; +#ifndef i386 + extern pmap_t pmap_create(vm_size_t size); /* XXX */ +#endif if (new_map != VM_MAP_NULL) { create_map = FALSE; @@ -168,29 +175,30 @@ load_machfile( TRUE); /**** FIXME ****/ } else map = new_map; - + if (!result) result = &myresult; *result = (load_result_t) { 0 }; lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size, - 0, result); + 0, result, clean_regions); if (lret != LOAD_SUCCESS) { - if (create_map) + if (create_map) { vm_map_deallocate(map); /* will lose pmap reference too */ + } return(lret); } + /* * Commit to new map. First make sure that the current * users of the task get done with it, and that we clean * up the old contents of IPC and memory. The task is * guaranteed to be single threaded upon return (us). * - * Swap the new map for the old at the task level and at - * our activation. The latter consumes our new map reference - * but each leaves us responsible for the old_map reference. + * Swap the new map for the old, which consumes our new map + * reference but each leaves us responsible for the old_map reference. * That lets us get off the pmap associated with it, and * then we can release it. */ @@ -198,10 +206,6 @@ load_machfile( task_halt(current_task()); old_map = swap_task_map(current_task(), map); - vm_map_deallocate(old_map); - - old_map = swap_act_map(current_act(), map); - #ifndef i386 pmap_switch(pmap); /* Make sure we are using the new pmap */ #endif @@ -211,7 +215,6 @@ load_machfile( } int dylink_test = 1; -extern vm_offset_t system_shared_region; static load_return_t @@ -223,7 +226,8 @@ parse_machfile( unsigned long file_offset, unsigned long macho_size, int depth, - load_result_t *result + load_result_t *result, + boolean_t clean_regions ) { struct machine_slot *ms; @@ -231,7 +235,7 @@ parse_machfile( struct load_command *lcp, *next; struct dylinker_command *dlp = 0; void * pager; - load_return_t ret; + load_return_t ret = LOAD_SUCCESS; vm_offset_t addr, kl_addr; vm_size_t size,kl_size; int offset; @@ -299,7 +303,7 @@ parse_machfile( /* * Round size of Mach-O commands up to page boundry. */ - size = round_page(sizeof (struct mach_header) + header->sizeofcmds); + size = round_page_32(sizeof (struct mach_header) + header->sizeofcmds); if (size <= 0) return(LOAD_BADMACHO); @@ -313,11 +317,11 @@ parse_machfile( if (addr == NULL) return(LOAD_NOSPACE); - if(error = vn_rdwr(UIO_READ, vp, addr, size, file_offset, + if(error = vn_rdwr(UIO_READ, vp, (caddr_t)addr, size, file_offset, UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) { if (kl_addr ) kfree(kl_addr, kl_size); - return(EIO); + return(LOAD_IOERROR); } /* ubc_map(vp); */ /* NOT HERE */ @@ -376,13 +380,13 @@ parse_machfile( case LC_LOAD_DYLINKER: if (pass != 2) break; - if (depth == 1 || dlp == 0) + if ((depth == 1) && (dlp == 0)) dlp = (struct dylinker_command *)lcp; else ret = LOAD_FAILURE; break; default: - ret = KERN_SUCCESS;/* ignore other stuff */ + ret = LOAD_SUCCESS;/* ignore other stuff */ } if (ret != LOAD_SUCCESS) break; @@ -390,7 +394,7 @@ parse_machfile( if (ret != LOAD_SUCCESS) break; } - if (ret == LOAD_SUCCESS && dlp != 0) { + if ((ret == LOAD_SUCCESS) && (depth == 1)) { vm_offset_t addr; shared_region_mapping_t shared_region; struct shared_region_task_mappings map_info; @@ -408,33 +412,91 @@ RedoLookup: &(map_info.client_base), &(map_info.alternate_base), &(map_info.alternate_next), + &(map_info.fs_base), + &(map_info.system), &(map_info.flags), &next); - if((map_info.self != (vm_offset_t)system_shared_region) && - (map_info.flags & SHARED_REGION_SYSTEM)) { - shared_region_mapping_ref(system_shared_region); - vm_set_shared_region(task, system_shared_region); - shared_region_mapping_dealloc( + if((map_info.flags & SHARED_REGION_FULL) || + (map_info.flags & SHARED_REGION_STALE)) { + shared_region_mapping_t system_region; + system_region = lookup_default_shared_region( + map_info.fs_base, map_info.system); + if((map_info.self != (vm_offset_t)system_region) && + (map_info.flags & SHARED_REGION_SYSTEM)) { + if(system_region == NULL) { + shared_file_boot_time_init( + map_info.fs_base, map_info.system); + } else { + vm_set_shared_region(task, system_region); + } + shared_region_mapping_dealloc( (shared_region_mapping_t)map_info.self); - goto RedoLookup; + goto RedoLookup; + } else if (map_info.flags & SHARED_REGION_SYSTEM) { + shared_region_mapping_dealloc(system_region); + shared_file_boot_time_init( + map_info.fs_base, map_info.system); + shared_region_mapping_dealloc( + (shared_region_mapping_t)map_info.self); + } else { + shared_region_mapping_dealloc(system_region); + } } if (dylink_test) { p->p_flag |= P_NOSHLIB; /* no shlibs in use */ addr = map_info.client_base; - vm_map(map, &addr, map_info.text_size, 0, + if(clean_regions) { + vm_map(map, &addr, map_info.text_size, + 0, SHARED_LIB_ALIAS, + map_info.text_region, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); + } else { + vm_map(map, &addr, map_info.text_size, 0, (VM_MEMORY_SHARED_PMAP << 24) | SHARED_LIB_ALIAS, map_info.text_region, 0, FALSE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); + } addr = map_info.client_base + map_info.text_size; vm_map(map, &addr, map_info.data_size, 0, SHARED_LIB_ALIAS, map_info.data_region, 0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); + + while (next) { + /* this should be fleshed out for the general case */ + /* but this is not necessary for now. Indeed we */ + /* are handling the com page inside of the */ + /* shared_region mapping create calls for now for */ + /* simplicities sake. If more general support is */ + /* needed the code to manipulate the shared range */ + /* chain can be pulled out and moved to the callers*/ + shared_region_mapping_info(next, + &(map_info.text_region), + &(map_info.text_size), + &(map_info.data_region), + &(map_info.data_size), + &(map_info.region_mappings), + &(map_info.client_base), + &(map_info.alternate_base), + &(map_info.alternate_next), + &(map_info.fs_base), + &(map_info.system), + &(map_info.flags), &next); + + addr = map_info.client_base; + vm_map(map, &addr, map_info.text_size, + 0, SHARED_LIB_ALIAS, + map_info.text_region, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); + } } - ret = load_dylinker(dlp, map, thr_act, depth, result); + if (dlp != 0) { + ret = load_dylinker(dlp, map, thr_act, + depth, result, clean_regions); + } } if (kl_addr ) @@ -467,9 +529,6 @@ load_segment( caddr_t tmp; vm_prot_t initprot; vm_prot_t maxprot; -#if 1 - extern int print_map_addr; -#endif /* 1 */ /* * Make sure what we get from the file is really ours (as specified @@ -478,15 +537,15 @@ load_segment( if (scp->fileoff + scp->filesize > macho_size) return (LOAD_BADMACHO); - seg_size = round_page(scp->vmsize); + seg_size = round_page_32(scp->vmsize); if (seg_size == 0) return(KERN_SUCCESS); /* * Round sizes to page size. */ - map_size = round_page(scp->filesize); - map_addr = trunc_page(scp->vmaddr); + map_size = round_page_32(scp->filesize); + map_addr = trunc_page_32(scp->vmaddr); map_offset = pager_offset + scp->fileoff; @@ -504,10 +563,6 @@ load_segment( if (ret != KERN_SUCCESS) return(LOAD_NOSPACE); -#if 1 - if (print_map_addr) - printf("LSegment: Mapped addr= %x; size = %x\n", map_addr, map_size); -#endif /* 1 */ /* * If the file didn't end on a page boundary, * we need to zero the leftover. @@ -570,18 +625,16 @@ static load_return_t load_unixthread( struct thread_command *tcp, - thread_act_t thr_act, + thread_act_t thread, load_result_t *result ) { - thread_t thread = current_thread(); load_return_t ret; int customstack =0; if (result->thread_count != 0) return (LOAD_FAILURE); - thread = getshuttle_thread(thr_act); ret = load_threadstack(thread, (unsigned long *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), @@ -620,25 +673,23 @@ static load_return_t load_thread( struct thread_command *tcp, - thread_act_t thr_act, + thread_act_t thread, load_result_t *result ) { - thread_t thread; kern_return_t kret; load_return_t lret; task_t task; int customstack=0; - task = get_threadtask(thr_act); - thread = getshuttle_thread(thr_act); + task = get_threadtask(thread); /* if count is 0; same as thr_act */ if (result->thread_count != 0) { kret = thread_create(task, &thread); if (kret != KERN_SUCCESS) return(LOAD_RESOURCE); - thread_deallocate(thread); + act_deallocate(thread); } lret = load_threadstate(thread, @@ -706,7 +757,7 @@ load_threadstate( total_size -= (size+2)*sizeof(unsigned long); if (total_size < 0) return(LOAD_BADMACHO); - ret = thread_setstatus(getact_thread(thread), flavor, ts, size); + ret = thread_setstatus(thread, flavor, ts, size); if (ret != KERN_SUCCESS) return(LOAD_FAILURE); ts += size; /* ts is a (unsigned long *) */ @@ -783,7 +834,8 @@ load_dylinker( vm_map_t map, thread_act_t thr_act, int depth, - load_result_t *result + load_result_t *result, + boolean_t clean_regions ) { char *name; @@ -798,6 +850,7 @@ load_dylinker( vm_map_copy_t tmp; vm_offset_t dyl_start, map_addr; vm_size_t dyl_length; + extern pmap_t pmap_create(vm_size_t size); /* XXX */ name = (char *)lcp + lcp->name.offset; /* @@ -824,7 +877,7 @@ load_dylinker( ret = parse_machfile(vp, copy_map, thr_act, &header, file_offset, macho_size, - depth, &myresult); + depth, &myresult, clean_regions); if (ret) goto out; @@ -898,7 +951,7 @@ get_macho_vnode( struct proc *p = current_proc(); /* XXXX */ boolean_t is_fat; struct fat_arch fat_arch; - int error = KERN_SUCCESS; + int error = LOAD_SUCCESS; int resid; union { struct mach_header mach_header; @@ -907,6 +960,7 @@ get_macho_vnode( } header; off_t fsize = (off_t)0; struct ucred *cred = p->p_ucred; + int err2; ndp = &nid; atp = &attr; @@ -914,24 +968,31 @@ get_macho_vnode( /* init the namei data to point the file user's program name */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p); - if (error = namei(ndp)) + if (error = namei(ndp)) { + if (error == ENOENT) + error = LOAD_ENOENT; + else + error = LOAD_FAILURE; return(error); + } vp = ndp->ni_vp; /* check for regular file */ if (vp->v_type != VREG) { - error = EACCES; + error = LOAD_PROTECT; goto bad1; } /* get attributes */ - if (error = VOP_GETATTR(vp, &attr, cred, p)) + if (error = VOP_GETATTR(vp, &attr, cred, p)) { + error = LOAD_FAILURE; goto bad1; + } /* Check mount point */ if (vp->v_mount->mnt_flag & MNT_NOEXEC) { - error = EACCES; + error = LOAD_PROTECT; goto bad1; } @@ -939,28 +1000,33 @@ get_macho_vnode( atp->va_mode &= ~(VSUID | VSGID); /* check access. for root we have to see if any exec bit on */ - if (error = VOP_ACCESS(vp, VEXEC, cred, p)) + if (error = VOP_ACCESS(vp, VEXEC, cred, p)) { + error = LOAD_PROTECT; goto bad1; + } if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { - error = EACCES; + error = LOAD_PROTECT; goto bad1; } /* hold the vnode for the IO */ if (UBCINFOEXISTS(vp) && !ubc_hold(vp)) { - error = ENOENT; + error = LOAD_ENOENT; goto bad1; } /* try to open it */ if (error = VOP_OPEN(vp, FREAD, cred, p)) { + error = LOAD_PROTECT; ubc_rele(vp); goto bad1; } if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0, - UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p)) + UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p)) { + error = LOAD_IOERROR; goto bad2; + } if (header.mach_header.magic == MH_MAGIC) is_fat = FALSE; @@ -979,11 +1045,11 @@ get_macho_vnode( goto bad2; /* Read the Mach-O header out of it */ - error = vn_rdwr(UIO_READ, vp, &header.mach_header, + error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header, sizeof(header.mach_header), fat_arch.offset, UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p); if (error) { - error = LOAD_FAILURE; + error = LOAD_IOERROR; goto bad2; } @@ -1012,7 +1078,7 @@ get_macho_vnode( bad2: VOP_UNLOCK(vp, 0, p); - error = VOP_CLOSE(vp, FREAD, cred, p); + err2 = VOP_CLOSE(vp, FREAD, cred, p); ubc_rele(vp); vrele(vp); return (error); diff --git a/bsd/kern/mach_loader.h b/bsd/kern/mach_loader.h index 4b8a2db1c..3da190d28 100644 --- a/bsd/kern/mach_loader.h +++ b/bsd/kern/mach_loader.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -38,20 +38,19 @@ #define _BSD_KERN_MACH_LOADER_H_ #include - #include typedef int load_return_t; typedef struct _load_result { - vm_offset_t mach_header; - vm_offset_t entry_point; - vm_offset_t user_stack; - int thread_count; - unsigned int - /* boolean_t */ unixproc :1, - dynlinker :1, - customstack :1, + vm_offset_t mach_header; + vm_offset_t entry_point; + vm_offset_t user_stack; + int thread_count; + unsigned int + /* boolean_t */ unixproc :1, + dynlinker :1, + customstack :1, :0; } load_result_t; @@ -62,7 +61,8 @@ load_return_t load_machfile( unsigned long macho_size, load_result_t *result, thread_act_t thr_act, - vm_map_t map); + vm_map_t map, + boolean_t clean_regions); #define LOAD_SUCCESS 0 #define LOAD_BADARCH 1 /* CPU type/subtype not found */ @@ -72,5 +72,7 @@ load_return_t load_machfile( #define LOAD_NOSPACE 5 /* No VM available */ #define LOAD_PROTECT 6 /* protection violation */ #define LOAD_RESOURCE 7 /* resource allocation failure */ +#define LOAD_ENOENT 8 /* resource not found */ +#define LOAD_IOERROR 9 /* IO error */ #endif /* _BSD_KERN_MACH_LOADER_H_ */ diff --git a/bsd/kern/mach_process.c b/bsd/kern/mach_process.c index 583080baf..8e6a05af4 100644 --- a/bsd/kern/mach_process.c +++ b/bsd/kern/mach_process.c @@ -117,7 +117,7 @@ ptrace(p, uap, retval) int *locr0; int error = 0; #if defined(ppc) - struct ppc_thread_state statep; + struct ppc_thread_state64 statep; #elif defined(i386) struct i386_saved_state statep; #else @@ -291,8 +291,8 @@ ptrace(p, uap, retval) goto errorLabel; } #elif defined(ppc) - state_count = PPC_THREAD_STATE_COUNT; - if (thread_getstatus(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + state_count = PPC_THREAD_STATE64_COUNT; + if (thread_getstatus(th_act, PPC_THREAD_STATE64, &statep, &state_count) != KERN_SUCCESS) { goto errorLabel; } #else @@ -306,9 +306,9 @@ ptrace(p, uap, retval) if (!ALIGNED((int)uap->addr, sizeof(int))) return (ERESTART); - statep.srr0 = (int)uap->addr; - state_count = PPC_THREAD_STATE_COUNT; - if (thread_setstatus(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + statep.srr0 = (uint64_t)((uint32_t)uap->addr); + state_count = PPC_THREAD_STATE64_COUNT; + if (thread_setstatus(th_act, PPC_THREAD_STATE64, &statep, &state_count) != KERN_SUCCESS) { goto errorLabel; } #undef ALIGNED @@ -324,8 +324,8 @@ ptrace(p, uap, retval) psignal_lock(t, uap->data, 0); } #if defined(ppc) - state_count = PPC_THREAD_STATE_COUNT; - if (thread_getstatus(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + state_count = PPC_THREAD_STATE64_COUNT; + if (thread_getstatus(th_act, PPC_THREAD_STATE64, &statep, &state_count) != KERN_SUCCESS) { goto errorLabel; } #endif @@ -349,8 +349,8 @@ ptrace(p, uap, retval) #endif } #if defined (ppc) - state_count = PPC_THREAD_STATE_COUNT; - if (thread_setstatus(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + state_count = PPC_THREAD_STATE64_COUNT; + if (thread_setstatus(th_act, PPC_THREAD_STATE64, &statep, &state_count) != KERN_SUCCESS) { goto errorLabel; } #endif @@ -359,7 +359,8 @@ ptrace(p, uap, retval) t->p_stat = SRUN; if (t->sigwait) { wakeup((caddr_t)&(t->sigwait)); - task_release(task); + if ((t->p_flag & P_SIGEXC) == 0) + task_release(task); } break; diff --git a/bsd/kern/netboot.c b/bsd/kern/netboot.c index ce58ad619..9e5c66170 100644 --- a/bsd/kern/netboot.c +++ b/bsd/kern/netboot.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2001-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -348,7 +348,7 @@ netboot_info_init(struct in_addr iaddr) if (PE_parse_boot_arg("vndevice", vndevice) == TRUE) { use_hdix = FALSE; } - _FREE_ZONE(vndevice, MAXPATHLEN, M_NAMEI); + FREE_ZONE(vndevice, MAXPATHLEN, M_NAMEI); info = (struct netboot_info *)kalloc(sizeof(*info)); bzero(info, sizeof(*info)); @@ -412,7 +412,7 @@ netboot_info_init(struct in_addr iaddr) printf("netboot: root path uses unrecognized format\n"); } } - _FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); + FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); return (info); } diff --git a/bsd/kern/posix_sem.c b/bsd/kern/posix_sem.c index 56b920a4f..df10fbc31 100644 --- a/bsd/kern/posix_sem.c +++ b/bsd/kern/posix_sem.c @@ -138,8 +138,10 @@ static int psem_select __P((struct file *fp, int which, void *wql, struct proc *p)); static int psem_closefile __P((struct file *fp, struct proc *p)); +static int psem_kqfilter __P((struct file *fp, struct knote *kn, struct proc *p)); + struct fileops psemops = - { psem_read, psem_write, psem_ioctl, psem_select, psem_closefile }; + { psem_read, psem_write, psem_ioctl, psem_select, psem_closefile, psem_kqfilter }; /* * Lookup an entry in the cache @@ -310,7 +312,7 @@ sem_open(p, uap, retval) register struct filedesc *fdp = p->p_fd; register struct file *fp; register struct vnode *vp; - int flags, i; + int i; struct file *nfp; int type, indx, error; struct psemname nd; @@ -334,7 +336,7 @@ sem_open(p, uap, retval) MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); pathlen = MAXPATHLEN; - error = copyinstr(uap->name, pnbuf, + error = copyinstr((void *)uap->name, pnbuf, MAXPATHLEN, &pathlen); if (error) { goto bad; @@ -446,13 +448,13 @@ sem_open(p, uap, retval) pinfo->psem_flags &= ~PSEM_INCREATE; pinfo->psem_usecount++; pnode->pinfo = pinfo; - fp->f_flag = flags & FMASK; + fp->f_flag = fmode & FMASK; fp->f_type = DTYPE_PSXSEM; fp->f_ops = &psemops; fp->f_data = (caddr_t)pnode; *fdflags(p, indx) &= ~UF_RESERVED; *retval = indx; - _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); return (0); bad3: @@ -473,7 +475,7 @@ bad1: fdrelse(p, indx); ffree(nfp); bad: - _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); return (error); } @@ -553,7 +555,7 @@ sem_unlink(p, uap, retval) MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); pathlen = MAXPATHLEN; - error = copyinstr(uap->name, pnbuf, + error = copyinstr((void *)uap->name, pnbuf, MAXPATHLEN, &pathlen); if (error) { goto bad; @@ -624,7 +626,7 @@ sem_unlink(p, uap, retval) _FREE(pcache, M_SHM); error = 0; bad: - _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); return (error); } @@ -943,3 +945,13 @@ psem_select(fp, which, wql, p) { return(EOPNOTSUPP); } + +static int +psem_kqfilter(fp, kn, p) + struct file *fp; + struct knote *kn; + struct proc *p; +{ + return (EOPNOTSUPP); +} + diff --git a/bsd/kern/posix_shm.c b/bsd/kern/posix_shm.c index b6bb6fe45..11c319808 100644 --- a/bsd/kern/posix_shm.c +++ b/bsd/kern/posix_shm.c @@ -143,8 +143,10 @@ static int pshm_select __P((struct file *fp, int which, void *wql, struct proc *p)); static int pshm_closefile __P((struct file *fp, struct proc *p)); +static int pshm_kqfilter __P((struct file *fp, struct knote *kn, struct proc *p)); + struct fileops pshmops = - { pshm_read, pshm_write, pshm_ioctl, pshm_select, pshm_closefile }; + { pshm_read, pshm_write, pshm_ioctl, pshm_select, pshm_closefile, pshm_kqfilter }; /* * Lookup an entry in the cache @@ -210,8 +212,8 @@ pshm_cache_add(pshmp, pnp) { register struct pshmcache *pcp; register struct pshmhashhead *pcpp; - register struct pshminfo *dpinfo; - register struct pshmcache *dpcp; + struct pshminfo *dpinfo; + struct pshmcache *dpcp; #if DIAGNOSTIC if (pnp->pshm_namelen > NCHNAMLEN) @@ -337,7 +339,7 @@ shm_open(p, uap, retval) MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); pathlen = MAXPATHLEN; - error = copyinstr(uap->name, pnbuf, + error = copyinstr((void *)uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen); if (error) { goto bad; @@ -384,11 +386,13 @@ shm_open(p, uap, retval) } else incache = 1; fmode = FFLAGS(uap->oflag); - if ((fmode & (FREAD | FWRITE))==0) - return(EINVAL); + if ((fmode & (FREAD | FWRITE))==0) { + error = EINVAL; + goto bad; + } if (error = falloc(p, &nfp, &indx)) - return (error); + goto bad; fp = nfp; cmode &= ALLPERMS; @@ -462,7 +466,7 @@ shm_open(p, uap, retval) fp->f_data = (caddr_t)pnode; *fdflags(p, indx) &= ~UF_RESERVED; *retval = indx; - _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); return (0); bad3: _FREE(pnode, M_SHM); @@ -474,7 +478,7 @@ bad1: fdrelse(p, indx); ffree(nfp); bad: - _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); return (error); } @@ -510,7 +514,7 @@ pshm_truncate(p, fp, fd, length, retval) return(EINVAL); } - size = round_page (length); + size = round_page_64(length); kret = vm_allocate(current_map(), &user_addr, size, TRUE); if (kret != KERN_SUCCESS) goto out; @@ -616,8 +620,8 @@ struct mmap_args { int pshm_mmap(struct proc *p, struct mmap_args *uap, register_t *retval, struct file *fp, vm_size_t pageoff) { - vm_offset_t user_addr = uap->addr; - vm_size_t user_size = uap->len ; + vm_offset_t user_addr = (vm_offset_t)uap->addr; + vm_size_t user_size = (vm_size_t)uap->len ; int prot = uap->prot; int flags = uap->flags; vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos; @@ -664,9 +668,9 @@ pshm_mmap(struct proc *p, struct mmap_args *uap, register_t *retval, struct file if ((flags & MAP_FIXED) == 0) { find_space = TRUE; - user_addr = round_page(user_addr); + user_addr = round_page_32(user_addr); } else { - if (user_addr != trunc_page(user_addr)) + if (user_addr != trunc_page_32(user_addr)) return (EINVAL); find_space = FALSE; (void) vm_deallocate(user_map, user_addr, user_size); @@ -738,7 +742,7 @@ shm_unlink(p, uap, retval) MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); pathlen = MAXPATHLEN; - error = copyinstr(uap->name, pnbuf, + error = copyinstr((void *)uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen); if (error) { goto bad; @@ -808,7 +812,7 @@ shm_unlink(p, uap, retval) pinfo->pshm_flags |= PSHM_REMOVED; error = 0; bad: - _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); return (error); out: switch (kret) { @@ -901,3 +905,12 @@ pshm_select(fp, which, wql, p) { return(EOPNOTSUPP); } + +static int +pshm_kqfilter(fp, kn, p) + struct file *fp; + struct knote *kn; + struct proc *p; +{ + return(EOPNOTSUPP); +} diff --git a/bsd/kern/qsort.c b/bsd/kern/qsort.c index d0424e501..eadaa43fc 100644 --- a/bsd/kern/qsort.c +++ b/bsd/kern/qsort.c @@ -129,16 +129,16 @@ qsort(a, n, es, cmp) loop: SWAPINIT(a, es); swap_cnt = 0; if (n < 7) { - for (pm = a + es; pm < (char *) a + n * es; pm += es) + for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es) for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; pl -= es) swap(pl, pl - es); return; } - pm = a + (n / 2) * es; + pm = (char *)a + (n / 2) * es; if (n > 7) { pl = a; - pn = a + (n - 1) * es; + pn = (char *)a + (n - 1) * es; if (n > 40) { d = (n / 8) * es; pl = med3(pl, pl + d, pl + 2 * d, cmp); @@ -148,9 +148,9 @@ loop: SWAPINIT(a, es); pm = med3(pl, pm, pn, cmp); } swap(a, pm); - pa = pb = a + es; + pa = pb = (char *)a + es; - pc = pd = a + (n - 1) * es; + pc = pd = (char *)a + (n - 1) * es; for (;;) { while (pb <= pc && (r = cmp(pb, a)) <= 0) { if (r == 0) { @@ -176,14 +176,14 @@ loop: SWAPINIT(a, es); pc -= es; } if (swap_cnt == 0) { /* Switch to insertion sort */ - for (pm = a + es; pm < (char *) a + n * es; pm += es) + for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es) for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; pl -= es) swap(pl, pl - es); return; } - pn = a + n * es; + pn = (char *)a + n * es; r = min(pa - (char *)a, pb - pa); vecswap(a, pb - r, r); r = min(pd - pc, pn - pd - es); diff --git a/bsd/kern/subr_log.c b/bsd/kern/subr_log.c index 6703f0c19..bd3e360a2 100644 --- a/bsd/kern/subr_log.c +++ b/bsd/kern/subr_log.c @@ -259,7 +259,7 @@ klogwakeup() /*ARGSUSED*/ int -logioctl(com, data, flag) +logioctl(dev, com, data, flag) caddr_t data; { long l; diff --git a/bsd/kern/subr_prf.c b/bsd/kern/subr_prf.c index 200115441..1a4051963 100644 --- a/bsd/kern/subr_prf.c +++ b/bsd/kern/subr_prf.c @@ -210,14 +210,13 @@ tprintf(tpr_t tpr, const char *fmt, ...) flags |= TOTTY; tp = sess->s_ttyp; } - if (tp != NULL) { - pca.flags = TOTTY; - pca.tty = tp; - - va_start(ap, fmt); - __doprnt(fmt, &ap, putchar, &pca, 10); - va_end(ap); - } + + pca.flags = flags; + pca.tty = tp; + va_start(ap, fmt); + __doprnt(fmt, &ap, putchar, &pca, 10); + va_end(ap); + logwakeup(); } diff --git a/bsd/kern/subr_prof.c b/bsd/kern/subr_prof.c index 4b84e1430..3812d0722 100644 --- a/bsd/kern/subr_prof.c +++ b/bsd/kern/subr_prof.c @@ -75,6 +75,8 @@ #include #include +decl_simple_lock_data(,mcount_lock); + /* * Froms is actually a bunch of unsigned shorts indexing tos */ @@ -121,6 +123,7 @@ kmstartup() p->kcount = (u_short *)cp; cp += p->kcountsize; p->froms = (u_short *)cp; + simple_lock_init(&mcount_lock); } /* @@ -183,7 +186,6 @@ mcount( register struct tostruct *top, *prevtop; struct gmonparam *p = &_gmonparam; register long toindex; - MCOUNT_INIT; /* * check that we are profiling @@ -192,7 +194,7 @@ mcount( if (p->state != GMON_PROF_ON) return; - MCOUNT_ENTER; + usimple_lock(&mcount_lock); /* * check that frompcindex is a reasonable pc value. @@ -275,25 +277,20 @@ mcount( } done: - MCOUNT_EXIT; + usimple_unlock(&mcount_lock); return; overflow: p->state = GMON_PROF_ERROR; - MCOUNT_EXIT; + usimple_unlock(&mcount_lock); printf("mcount: tos overflow\n"); return; } #endif /* GPROF */ -#if NCPUS > 1 #define PROFILE_LOCK(x) simple_lock(x) #define PROFILE_UNLOCK(x) simple_unlock(x) -#else -#define PROFILE_LOCK(x) -#define PROFILE_UNLOCK(x) -#endif struct profil_args { short *bufbase; @@ -319,7 +316,7 @@ profil(p, uap, retval) } /* Block profile interrupts while changing state. */ - s = splstatclock(); + s = ml_set_interrupts_enabled(FALSE); PROFILE_LOCK(&upp->pr_lock); upp->pr_base = (caddr_t)uap->bufbase; upp->pr_size = uap->bufsize; @@ -335,7 +332,7 @@ profil(p, uap, retval) upp->pr_next = 0; PROFILE_UNLOCK(&upp->pr_lock); startprofclock(p); - splx(s); + ml_set_interrupts_enabled(s); return(0); } @@ -356,7 +353,7 @@ add_profil(p, uap, retval) if (upp->pr_scale == 0) return (0); - s = splstatclock(); + s = ml_set_interrupts_enabled(FALSE); upc = (struct uprof *) kalloc(sizeof (struct uprof)); upc->pr_base = (caddr_t)uap->bufbase; upc->pr_size = uap->bufsize; @@ -366,7 +363,7 @@ add_profil(p, uap, retval) upc->pr_next = upp->pr_next; upp->pr_next = upc; PROFILE_UNLOCK(&upp->pr_lock); - splx(s); + ml_set_interrupts_enabled(s); return(0); } diff --git a/bsd/kern/sys_generic.c b/bsd/kern/sys_generic.c index eb721f871..5e2545890 100644 --- a/bsd/kern/sys_generic.c +++ b/bsd/kern/sys_generic.c @@ -87,6 +87,7 @@ #include #include #include +#include #include #include @@ -109,13 +110,10 @@ #if KTRACE #include #endif +#include -static int dofileread __P((struct proc *, struct file *, int, void *, - size_t, off_t, int, int*)); -static int dofilewrite __P((struct proc *, struct file *, int, - const void *, size_t, off_t, int, int*)); -static struct file* +__private_extern__ struct file* holdfp(fdp, fd, flag) struct filedesc* fdp; int fd, flag; @@ -191,13 +189,18 @@ pread(p, uap, retval) uap->offset, FOF_OFFSET, retval); } frele(fp); + + if (!error) + KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE), + uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); + return(error); } /* * Code common for read and pread */ -int +__private_extern__ int dofileread(p, fp, fd, buf, nbyte, offset, flags, retval) struct proc *p; struct file *fp; @@ -357,10 +360,15 @@ pwrite(p, uap, retval) uap->offset, FOF_OFFSET, retval); } frele(fp); + + if (!error) + KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE), + uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); + return(error); } -static int +__private_extern__ int dofilewrite(p, fp, fd, buf, nbyte, offset, flags, retval) struct proc *p; struct file *fp; @@ -407,8 +415,9 @@ dofilewrite(p, fp, fd, buf, nbyte, offset, flags, retval) if (auio.uio_resid != cnt && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; - if (error == EPIPE) - psignal(p, SIGPIPE); + /* The socket layer handles SIGPIPE */ + if (error == EPIPE && fp->f_type != DTYPE_SOCKET) + psignal(p, SIGPIPE); } cnt -= auio.uio_resid; #if KTRACE @@ -1031,6 +1040,7 @@ selscan(p, sel, nfd, retval, sel_pass) int nfunnel = 0; int count, nfcount; char * wql_ptr; + struct vnode *vp; /* * Problems when reboot; due to MacOSX signal probs @@ -1072,7 +1082,18 @@ selscan(p, sel, nfd, retval, sel_pass) wql_ptr = (char *)0; else wql_ptr = (wql+ nc * SIZEOF_WAITQUEUE_LINK); - if (fp->f_ops && (fp->f_type != DTYPE_SOCKET) + /* + * Merlot: need to remove the bogus f_data check + * from the following "if" statement. It's there + * because of various problems stemming from + * races due to the split-funnels and lack of real + * referencing on sockets... + */ + if (fp->f_ops && (fp->f_type != DTYPE_SOCKET) + && (fp->f_data != (caddr_t)-1) + && !(fp->f_type == DTYPE_VNODE + && (vp = (struct vnode *)fp->f_data) + && vp->v_type == VFIFO) && fo_select(fp, flag[msk], wql_ptr, p)) { optr[fd/NFDBITS] |= (1 << (fd % NFDBITS)); n++; @@ -1105,8 +1126,13 @@ selscan(p, sel, nfd, retval, sel_pass) wql_ptr = (char *)0; else wql_ptr = (wql+ nc * SIZEOF_WAITQUEUE_LINK); - if (fp->f_ops && (fp->f_type == DTYPE_SOCKET) && - fo_select(fp, flag[msk], wql_ptr, p)) { + if (fp->f_ops + && (fp->f_type == DTYPE_SOCKET + || (fp->f_type == DTYPE_VNODE + && (vp = (struct vnode *)fp->f_data) + && vp != (struct vnode *)-1 + && vp->v_type == VFIFO)) + && fo_select(fp, flag[msk], wql_ptr, p)) { optr[fd/NFDBITS] |= (1 << (fd % NFDBITS)); n++; } @@ -1150,6 +1176,7 @@ selcount(p, ibits, obits, nfd, count, nfcount) static int flag[3] = { FREAD, FWRITE, 0 }; u_int32_t *iptr, *fptr, *fbits; u_int nw; + struct vnode *vp; /* * Problems when reboot; due to MacOSX signal probs @@ -1177,7 +1204,10 @@ selcount(p, ibits, obits, nfd, count, nfcount) *nfcount=0; return(EBADF); } - if (fp->f_type == DTYPE_SOCKET) + if (fp->f_type == DTYPE_SOCKET || + (fp->f_type == DTYPE_VNODE + && (vp = (struct vnode *)fp->f_data) + && vp->v_type == VFIFO)) nfc++; n++; } @@ -1212,7 +1242,7 @@ selrecord(selector, sip, p_wql) } if ((sip->si_flags & SI_INITED) == 0) { - wait_queue_init(&sip->wait_queue, SYNC_POLICY_FIFO); + wait_queue_init(&sip->si_wait_queue, SYNC_POLICY_FIFO); sip->si_flags |= SI_INITED; sip->si_flags &= ~SI_CLEAR; } @@ -1223,8 +1253,8 @@ selrecord(selector, sip, p_wql) sip->si_flags &= ~SI_COLL; sip->si_flags |= SI_RECORDED; - if (!wait_queue_member(&sip->wait_queue, ut->uu_wqsub)) - wait_queue_link_noalloc(&sip->wait_queue, ut->uu_wqsub, (wait_queue_link_t)p_wql); + if (!wait_queue_member(&sip->si_wait_queue, ut->uu_wqsub)) + wait_queue_link_noalloc(&sip->si_wait_queue, ut->uu_wqsub, (wait_queue_link_t)p_wql); return; } @@ -1248,7 +1278,7 @@ selwakeup(sip) } if (sip->si_flags & SI_RECORDED) { - wait_queue_wakeup_all(&sip->wait_queue, &selwait, THREAD_AWAKENED); + wait_queue_wakeup_all(&sip->si_wait_queue, &selwait, THREAD_AWAKENED); sip->si_flags &= ~SI_RECORDED; } @@ -1267,7 +1297,7 @@ selthreadclear(sip) sip->si_flags &= ~(SI_RECORDED | SI_COLL); } sip->si_flags |= SI_CLEAR; - wait_queue_unlinkall_nofree(&sip->wait_queue); + wait_queue_unlinkall_nofree(&sip->si_wait_queue); } @@ -1644,7 +1674,7 @@ retry: } if (interval != 0) - clock_absolutetime_interval_to_deadline(interval, &abstime) + clock_absolutetime_interval_to_deadline(interval, &abstime); KERNEL_DEBUG(DBG_MISC_WAIT, 1,&p->p_evlist,0,0,0); error = tsleep1(&p->p_evlist, PSOCK | PCATCH, @@ -1702,8 +1732,10 @@ modwatch(p, uap, retval) return(EBADF); if (fp->f_type != DTYPE_SOCKET) return(EINVAL); // for now must be sock sp = (struct socket *)fp->f_data; - assert(sp != NULL); + /* soo_close sets f_data to 0 before switching funnel */ + if (sp == (struct socket *)0) + return(EBADF); // locate event if possible for (evq = sp->so_evlist.tqh_first; diff --git a/bsd/kern/sys_socket.c b/bsd/kern/sys_socket.c index a215a42bc..c501134a1 100644 --- a/bsd/kern/sys_socket.c +++ b/bsd/kern/sys_socket.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -80,8 +81,10 @@ int soo_close __P((struct file *fp, struct proc *p)); int soo_select __P((struct file *fp, int which, void * wql, struct proc *p)); +int soo_kqfilter __P((struct file *fp, struct knote *kn, struct proc *p)); + struct fileops socketops = - { soo_read, soo_write, soo_ioctl, soo_select, soo_close }; + { soo_read, soo_write, soo_ioctl, soo_select, soo_close, soo_kqfilter }; /* ARGSUSED */ int @@ -346,6 +349,7 @@ soo_select(fp, which, wql, p) register int s = splnet(); int retnum=0; + if (so == NULL || so == (struct socket*)-1) goto done; switch (which) { @@ -414,14 +418,17 @@ soo_close(fp, p) struct proc *p; { int error = 0; + struct socket *sp; + + sp = (struct socket *)fp->f_data; + fp->f_data = NULL; thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - if (fp->f_data) - error = soclose((struct socket *)fp->f_data); + if (sp) + error = soclose(sp); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - fp->f_data = 0; return (error); } diff --git a/bsd/kern/syscalls.c b/bsd/kern/syscalls.c index bc52d8b63..c16e2052f 100644 --- a/bsd/kern/syscalls.c +++ b/bsd/kern/syscalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -128,7 +128,11 @@ char *syscallnames[] = { "getpriority", /* 100 = getpriority */ "old_send", /* 101 = old send */ "old_recv", /* 102 = old recv */ +#ifdef __ppc__ + "osigreturn", /* 103 = sigreturn */ +#else "sigreturn", /* 103 = sigreturn */ +#endif "bind", /* 104 = bind */ "setsockopt", /* 105 = setsockopt */ "listen", /* 106 = listen */ @@ -209,7 +213,11 @@ char *syscallnames[] = { "setgid", /* 181 = setgid */ "setegid", /* 182 = setegid */ "seteuid", /* 183 = seteuid */ +#ifdef __ppc__ + "sigreturn", /* 184 = sigreturn */ +#else "#184", /* 184 = nosys */ +#endif "#185", /* 185 = nosys */ "#186", /* 186 = nosys */ "#187", /* 187 = nosys */ @@ -239,8 +247,8 @@ char *syscallnames[] = { "ATPgetreq", /* 211 = ATPgetreq */ "ATPgetrsp", /* 212 = ATPgetrsp */ "#213", /* 213 = Reserved for AppleTalk */ - "#214", /* 214 = Reserved for AppleTalk */ - "#215", /* 215 = Reserved for AppleTalk */ + "kqueue_from_portset_np", /* 214 = kqueue_from_portset_np */ + "kqueue_portset_np", /* 215 = kqueue_portset_np */ "#216", /* 216 = Reserved */ "#217", /* 217 = Reserved */ "#218", /* 218 = Reserved */ @@ -272,8 +280,8 @@ char *syscallnames[] = { "#244", /* 244 = nosys */ "#245", /* 245 = nosys */ "#246", /* 246 = nosys */ - "#247", /* 247 = nosys */ - "#248", /* 248 = nosys */ + "nfsclnt", /* 247 = nfsclnt */ + "fhopen", /* 248 = fhopen */ "#249", /* 249 = nosys */ "minherit", /* 250 = minherit */ "semsys", /* 251 = semsys */ @@ -338,41 +346,61 @@ char *syscallnames[] = { "getsid", /* 310 = getsid */ "#311", /* 311 = setresuid */ "#312", /* 312 = setresgid */ - "#313", /* 313 = obsolete signanosleep */ - "#314", /* 314 = aio_return */ - "#315", /* 315 = aio_suspend */ - "#316", /* 316 = aio_cancel */ - "#317", /* 317 = aio_error */ - "#318", /* 318 = aio_read */ - "#319", /* 319 = aio_write */ - "#320", /* 320 = lio_listio */ + "aio_fsync", /* 313 = aio_fsync */ + "aio_return", /* 314 = aio_return */ + "aio_suspend", /* 315 = aio_suspend */ + "aio_cancel", /* 316 = aio_cancel */ + "aio_error", /* 317 = aio_error */ + "aio_read", /* 318 = aio_read */ + "aio_write", /* 319 = aio_write */ + "lio_listio", /* 320 = lio_listio */ "#321", /* 321 = yield */ "#322", /* 322 = thr_sleep */ "#323", /* 323 = thr_wakeup */ "mlockall", /* 324 = mlockall */ "munlockall", /* 325 = munlockall */ - "#326", /* 326 */ + "#326", /* 326 */ "issetugid", /* 327 = issetugid */ "__pthread_kill", /* 328 = __pthread_kill */ "pthread_sigmask", /* 329 = pthread_sigmask */ - "sigwait", /* 330 = sigwait */ - "#331", /* 331 */ - "#332", /* 332 */ - "#333", /* 333 */ - "#334", /* 334 */ - "utrace", /* 335 = utrace */ - "#336", /* 336 */ - "#337", /* 337 */ - "#338", /* 338 */ - "#339", /* 339 */ - "#340", /* 340 */ - "#341", /* 341 */ - "#342", /* 342 */ - "#343", /* 343 */ - "#344", /* 344 */ - "#345", /* 345 */ - "#346", /* 346 */ - "#347", /* 347 */ - "#348", /* 348 */ - "#349" /* 349 */ + "sigwait", /* 330 = sigwait */ + "#331", /* 331 */ + "#332", /* 332 */ + "#333", /* 333 */ + "#334", /* 334 */ + "utrace", /* 335 = utrace */ + "#336", /* 336 */ + "#337", /* 337 */ + "#338", /* 338 */ + "#339", /* 339 */ + "#340", /* 340 = TBD sigprocmask */ + "#341", /* 341 = TBD sigsuspend */ + "#342", /* 342 = TBD sigaction */ + "#343", /* 343 = TBD sigpending */ + "#344", /* 344 = TBD sigreturn */ + "#345", /* 345 = TBD sigtimedwait */ + "#346", /* 346 = TBD sigwaitinfo */ + "#347", /* 347 */ + "#348", /* 348 */ + "#349" /* 349 */ + "audit", /* 350 */ + "auditon", /* 351 */ + "auditsvc", /* 352 */ + "getauid", /* 353 */ + "setauid", /* 354 */ + "getaudit", /* 355 */ + "setaudit", /* 356 */ + "getaudit_addr", /* 357 */ + "setaudit_addr", /* 358 */ + "auditctl", /* 359 */ + "#360", /* 360 */ + "#361", /* 361 */ + "kqueue", /* 362 = kqueue */ + "kevent", /* 363 = kevent */ + "#364", /* 364 */ + "#365", /* 365 */ + "#366", /* 366 */ + "#367", /* 367 */ + "#368", /* 368 */ + "#369" /* 369 */ }; diff --git a/bsd/kern/sysctl_init.c b/bsd/kern/sysctl_init.c index 44e80aeb6..55a510fff 100644 --- a/bsd/kern/sysctl_init.c +++ b/bsd/kern/sysctl_init.c @@ -82,23 +82,29 @@ extern struct sysctl_oid sysctl__hw_l2cachesize_compat; extern struct sysctl_oid sysctl__hw_l3cachesize_compat; extern struct sysctl_oid sysctl__hw_tbfrequency_compat; -extern struct sysctl_oid sysctl__hw__cpu_capabilities; - extern struct sysctl_oid sysctl__kern_sysv_shmmax; extern struct sysctl_oid sysctl__kern_sysv_shmmin; extern struct sysctl_oid sysctl__kern_sysv_shmmni; extern struct sysctl_oid sysctl__kern_sysv_shmseg; extern struct sysctl_oid sysctl__kern_sysv_shmall; +extern struct sysctl_oid sysctl__kern_sysv_semmni; +extern struct sysctl_oid sysctl__kern_sysv_semmns; +extern struct sysctl_oid sysctl__kern_sysv_semmnu; +extern struct sysctl_oid sysctl__kern_sysv_semmsl; +extern struct sysctl_oid sysctl__kern_sysv_semume; + extern struct sysctl_oid sysctl__kern_dummy; extern struct sysctl_oid sysctl__kern_ipc_maxsockbuf; extern struct sysctl_oid sysctl__kern_ipc_nmbclusters; extern struct sysctl_oid sysctl__kern_ipc_sockbuf_waste_factor; extern struct sysctl_oid sysctl__kern_ipc_somaxconn; extern struct sysctl_oid sysctl__kern_ipc_sosendminchain; +extern struct sysctl_oid sysctl__kern_ipc_sorecvmincopy; extern struct sysctl_oid sysctl__kern_ipc_maxsockets; extern struct sysctl_oid sysctl__net_inet_icmp_icmplim; extern struct sysctl_oid sysctl__net_inet_icmp_maskrepl; +extern struct sysctl_oid sysctl__net_inet_icmp_timestamp; extern struct sysctl_oid sysctl__net_inet_icmp_bmcastecho; extern struct sysctl_oid sysctl__net_inet_icmp_log_redirect; extern struct sysctl_oid sysctl__net_inet_icmp_drop_redirect; @@ -123,6 +129,7 @@ extern struct sysctl_oid sysctl__net_inet_ip_keepfaith; extern struct sysctl_oid sysctl__net_inet_ip_maxfragpackets; extern struct sysctl_oid sysctl__net_inet_ip_check_interface; extern struct sysctl_oid sysctl__net_inet_ip_check_route_selfref; +extern struct sysctl_oid sysctl__net_inet_ip_use_route_genid; #if NGIF > 0 extern struct sysctl_oid sysctl__net_inet_ip_gifttl; #endif @@ -181,6 +188,7 @@ extern struct sysctl_oid sysctl__net_inet_tcp_drop_synfin; #if TCPDEBUG extern struct sysctl_oid sysctl__net_inet_tcp_tcpconsdebug; #endif +extern struct sysctl_oid sysctl__net_inet_tcp_sockthreshold; extern struct sysctl_oid sysctl__net_inet_udp_log_in_vain; extern struct sysctl_oid sysctl__net_inet_udp_checksum; extern struct sysctl_oid sysctl__net_inet_udp_maxdgram; @@ -246,6 +254,15 @@ extern struct sysctl_oid sysctl__vfs_nfs_diskless_rootpath; extern struct sysctl_oid sysctl__vfs_nfs_diskless_swappath; extern struct sysctl_oid sysctl__vfs_nfs_nfsstats; #endif +extern struct sysctl_oid sysctl__vfs_generic_nfs_client_initialdowndelay; +extern struct sysctl_oid sysctl__vfs_generic_nfs_client_nextdowndelay; +extern struct sysctl_oid sysctl__vfs_generic_nfs_client; +extern struct sysctl_oid sysctl__vfs_generic_nfs; + +extern struct sysctl_oid sysctl__vfs_generic; +extern struct sysctl_oid sysctl__vfs_generic_vfsidlist; +extern struct sysctl_oid sysctl__vfs_generic_ctlbyfsid; +extern struct sysctl_oid sysctl__vfs_generic_noremotehang; extern struct sysctl_oid sysctl__kern_ipc; extern struct sysctl_oid sysctl__kern_sysv; @@ -301,6 +318,7 @@ extern struct sysctl_oid sysctl__net_inet_tcp_stats; extern struct sysctl_oid sysctl__net_inet_udp_stats; extern struct sysctl_oid sysctl__kern; extern struct sysctl_oid sysctl__hw; +extern struct sysctl_oid sysctl__machdep; extern struct sysctl_oid sysctl__net; extern struct sysctl_oid sysctl__debug; extern struct sysctl_oid sysctl__vfs; @@ -336,7 +354,6 @@ extern struct sysctl_oid sysctl__net_inet6_ip6_rtminexpire; extern struct sysctl_oid sysctl__net_inet6_ip6_rtmaxcache; extern struct sysctl_oid sysctl__net_inet6_ip6_temppltime; extern struct sysctl_oid sysctl__net_inet6_ip6_tempvltime; -extern struct sysctl_oid sysctl__net_inet6_ip6_auto_on; #if IPV6FIREWALL extern struct sysctl_oid sysctl__net_inet6_ip6_fw; extern struct sysctl_oid sysctl__net_inet6_ip6_fw_debug; @@ -371,6 +388,7 @@ extern struct sysctl_oid sysctl__net_inet6_ipsec6_esp_randpad; #endif #if IPSEC extern struct sysctl_oid sysctl__net_inet_ipsec; +extern struct sysctl_oid sysctl__net_inet_ipsec_esp_port; extern struct sysctl_oid sysctl__net_inet_ipsec_bypass; extern struct sysctl_oid sysctl__net_inet_ipsec_def_policy; extern struct sysctl_oid sysctl__net_inet_ipsec_esp_randpad; @@ -386,6 +404,7 @@ extern struct sysctl_oid sysctl__net_inet_ipsec_debug; extern struct sysctl_oid sysctl__net_inet_ipsec_stats; extern struct sysctl_oid sysctl__net_key; extern struct sysctl_oid sysctl__net_key_debug; +extern struct sysctl_oid sysctl__net_key_prefered_oldsa; extern struct sysctl_oid sysctl__net_key_spi_trycnt; extern struct sysctl_oid sysctl__net_key_spi_minval; extern struct sysctl_oid sysctl__net_key_spi_maxval; @@ -394,7 +413,9 @@ extern struct sysctl_oid sysctl__net_key_larval_lifetime; extern struct sysctl_oid sysctl__net_key_blockacq_count; extern struct sysctl_oid sysctl__net_key_blockacq_lifetime; extern struct sysctl_oid sysctl__net_key_esp_keymin; +extern struct sysctl_oid sysctl__net_key_esp_auth; extern struct sysctl_oid sysctl__net_key_ah_keymin; +extern struct sysctl_oid sysctl__net_key_natt_keepalive_interval; #endif @@ -402,6 +423,7 @@ struct sysctl_oid *newsysctl_list[] = { &sysctl__kern, &sysctl__hw, + &sysctl__machdep, &sysctl__net, &sysctl__debug, &sysctl__vfs, @@ -423,12 +445,18 @@ struct sysctl_oid *newsysctl_list[] = ,&sysctl__kern_sysv_shmmni ,&sysctl__kern_sysv_shmseg ,&sysctl__kern_sysv_shmall + ,&sysctl__kern_sysv_semmni + ,&sysctl__kern_sysv_semmns + ,&sysctl__kern_sysv_semmnu + ,&sysctl__kern_sysv_semmsl + ,&sysctl__kern_sysv_semume ,&sysctl__kern_dummy ,&sysctl__kern_ipc_maxsockbuf ,&sysctl__kern_ipc_nmbclusters ,&sysctl__kern_ipc_sockbuf_waste_factor ,&sysctl__kern_ipc_somaxconn ,&sysctl__kern_ipc_sosendminchain + ,&sysctl__kern_ipc_sorecvmincopy ,&sysctl__kern_ipc_maxsockets ,&sysctl__hw_machine @@ -471,10 +499,9 @@ struct sysctl_oid *newsysctl_list[] = ,&sysctl__hw_l3cachesize_compat ,&sysctl__hw_tbfrequency_compat - ,&sysctl__hw__cpu_capabilities - ,&sysctl__net_inet_icmp_icmplim ,&sysctl__net_inet_icmp_maskrepl + ,&sysctl__net_inet_icmp_timestamp ,&sysctl__net_inet_icmp_bmcastecho ,&sysctl__net_inet_icmp_drop_redirect ,&sysctl__net_inet_icmp_log_redirect @@ -497,6 +524,7 @@ struct sysctl_oid *newsysctl_list[] = ,&sysctl__net_inet_ip_maxfragpackets ,&sysctl__net_inet_ip_check_interface ,&sysctl__net_inet_ip_check_route_selfref + ,&sysctl__net_inet_ip_use_route_genid #if NGIF > 0 ,&sysctl__net_inet_ip_gifttl #endif @@ -552,6 +580,7 @@ struct sysctl_oid *newsysctl_list[] = #if TCPDEBUG ,&sysctl__net_inet_tcp_tcpconsdebug #endif + ,&sysctl__net_inet_tcp_sockthreshold ,&sysctl__net_inet_udp_log_in_vain ,&sysctl__net_inet_udp_checksum ,&sysctl__net_inet_udp_maxdgram @@ -616,6 +645,14 @@ struct sysctl_oid *newsysctl_list[] = ,&sysctl__vfs_nfs_diskless_swappath ,&sysctl__vfs_nfs_nfsstats #endif + ,&sysctl__vfs_generic + ,&sysctl__vfs_generic_vfsidlist + ,&sysctl__vfs_generic_ctlbyfsid + ,&sysctl__vfs_generic_noremotehang + ,&sysctl__vfs_generic_nfs + ,&sysctl__vfs_generic_nfs_client + ,&sysctl__vfs_generic_nfs_client_initialdowndelay + ,&sysctl__vfs_generic_nfs_client_nextdowndelay ,&sysctl__kern_ipc ,&sysctl__kern_sysv ,&sysctl__net_inet @@ -689,7 +726,6 @@ struct sysctl_oid *newsysctl_list[] = ,&sysctl__net_inet6_ip6_rtmaxcache ,&sysctl__net_inet6_ip6_temppltime ,&sysctl__net_inet6_ip6_tempvltime - ,&sysctl__net_inet6_ip6_auto_on ,&sysctl__net_inet6_icmp6_rediraccept ,&sysctl__net_inet6_icmp6_redirtimeout ,&sysctl__net_inet6_icmp6_nd6_prune @@ -725,6 +761,7 @@ struct sysctl_oid *newsysctl_list[] = #if IPSEC ,&sysctl__net_key ,&sysctl__net_key_debug + ,&sysctl__net_key_prefered_oldsa ,&sysctl__net_key_spi_trycnt ,&sysctl__net_key_spi_minval ,&sysctl__net_key_spi_maxval @@ -733,7 +770,9 @@ struct sysctl_oid *newsysctl_list[] = ,&sysctl__net_key_blockacq_count ,&sysctl__net_key_blockacq_lifetime ,&sysctl__net_key_esp_keymin + ,&sysctl__net_key_esp_auth ,&sysctl__net_key_ah_keymin + ,&sysctl__net_key_natt_keepalive_interval ,&sysctl__net_inet_ipsec ,&sysctl__net_inet_ipsec_stats ,&sysctl__net_inet_ipsec_def_policy @@ -748,6 +787,7 @@ struct sysctl_oid *newsysctl_list[] = ,&sysctl__net_inet_ipsec_debug ,&sysctl__net_inet_ipsec_esp_randpad ,&sysctl__net_inet_ipsec_bypass + ,&sysctl__net_inet_ipsec_esp_port #endif ,(struct sysctl_oid *) 0 }; diff --git a/bsd/kern/sysv_msg.c b/bsd/kern/sysv_msg.c index 55080db24..9e7e3b946 100644 --- a/bsd/kern/sysv_msg.c +++ b/bsd/kern/sysv_msg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -48,6 +48,7 @@ #include #include #include +#include static void msginit __P((void *)); SYSINIT(sysv_msg, SI_SUB_SYSV_MSG, SI_ORDER_FIRST, msginit, NULL) @@ -209,6 +210,8 @@ msgctl(p, uap) printf("call to msgctl(%d, %d, 0x%x)\n", msqid, cmd, user_msqptr); #endif + AUDIT_ARG(svipc_cmd, cmd); + AUDIT_ARG(svipc_id, msqid); msqid = IPCID_TO_IX(msqid); if (msqid < 0 || msqid >= msginfo.msgmni) { @@ -426,6 +429,7 @@ msgget(p, uap) found: /* Construct the unique msqid */ p->p_retval[0] = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm); + AUDIT_ARG(svipc_id, p->p_retval[0]); return(0); } @@ -458,6 +462,7 @@ msgsnd(p, uap) msgflg); #endif + AUDIT_ARG(svipc_id, msqid); msqid = IPCID_TO_IX(msqid); if (msqid < 0 || msqid >= msginfo.msgmni) { @@ -796,6 +801,7 @@ msgrcv(p, uap) msgsz, msgtyp, msgflg); #endif + AUDIT_ARG(svipc_id, msqid); msqid = IPCID_TO_IX(msqid); if (msqid < 0 || msqid >= msginfo.msgmni) { diff --git a/bsd/kern/sysv_sem.c b/bsd/kern/sysv_sem.c index 7270428f8..8c4c3d47c 100644 --- a/bsd/kern/sysv_sem.c +++ b/bsd/kern/sysv_sem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -43,6 +43,8 @@ #include #include +#include +#include /*#include */ /*#include */ @@ -573,7 +575,7 @@ semundo_adjust(p, supptr, semid, semnum, adjval) /* Didn't find the right entry - create it */ if (adjval == 0) return(0); - if (suptr->un_cnt != seminfo.semume) { + if (suptr->un_cnt != limitseminfo.semume) { sunptr = &suptr->un_ent[suptr->un_cnt]; suptr->un_cnt++; sunptr->un_adjval = adjval; @@ -641,13 +643,15 @@ semctl(p, uap, retval) struct semid_ds sbuf; register struct semid_ds *semaptr; + AUDIT_ARG(svipc_cmd, cmd); + AUDIT_ARG(svipc_id, semid); SUBSYSTEM_LOCK_AQUIRE(p); #ifdef SEM_DEBUG printf("call to semctl(%d, %d, %d, 0x%x)\n", semid, semnum, cmd, arg); #endif semid = IPCID_TO_IX(semid); - if (semid < 0 || semid >= seminfo.semmsl) + if (semid < 0 || semid >= seminfo.semmni) { #ifdef SEM_DEBUG printf("Invalid semid\n"); @@ -864,7 +868,7 @@ semget(p, uap, retval) printf("need to allocate an id for the request\n"); #endif if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) { - if (nsems <= 0 || nsems > seminfo.semmsl) { + if (nsems <= 0 || nsems > limitseminfo.semmsl) { #ifdef SEM_DEBUG printf("nsems out of range (0<%d<=%d)\n", nsems, seminfo.semmsl); @@ -931,6 +935,7 @@ semget(p, uap, retval) found: *retval = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm); + AUDIT_ARG(svipc_id, *retval); #ifdef SEM_DEBUG printf("semget is done, returning %d\n", *retval); #endif @@ -963,6 +968,7 @@ semop(p, uap, retval) int i, j, eval; int do_wakeup, do_undos; + AUDIT_ARG(svipc_id, uap->semid); SUBSYSTEM_LOCK_AQUIRE(p); #ifdef SEM_DEBUG printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops); @@ -970,7 +976,7 @@ semop(p, uap, retval) semid = IPCID_TO_IX(semid); /* Convert back to zero origin */ - if (semid < 0 || semid >= seminfo.semmsl) + if (semid < 0 || semid >= seminfo.semmni) UNLOCK_AND_RETURN(EINVAL); semaptr = &sema[semid]; @@ -1366,4 +1372,49 @@ unlock: SUBSYSTEM_LOCK_RELEASE; } +/* (struct sysctl_oid *oidp, void *arg1, int arg2, \ + struct sysctl_req *req) */ +static int +sysctl_seminfo SYSCTL_HANDLER_ARGS +{ + int error = 0; + + error = SYSCTL_OUT(req, arg1, sizeof(int)); + if (error || !req->newptr) + return(error); + + SUBSYSTEM_LOCK_AQUIRE(current_proc()); + /* Set the values only if shared memory is not initialised */ + if ((sem == (struct sem *) 0) && + (sema == (struct semid_ds *) 0) && + (semu == (struct semid_ds *) 0) && + (semu_list == (struct sem_undo *) 0)) { + if (error = SYSCTL_IN(req, arg1, sizeof(int))) { + goto out; + } + } else + error = EINVAL; +out: + SUBSYSTEM_LOCK_RELEASE; + return(error); + +} + +/* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */ +extern struct sysctl_oid_list sysctl__kern_sysv_children; +SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNI, semmni, CTLTYPE_INT | CTLFLAG_RW, + &limitseminfo.semmni, 0, &sysctl_seminfo ,"I","semmni"); + +SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNS, semmns, CTLTYPE_INT | CTLFLAG_RW, + &limitseminfo.semmns, 0, &sysctl_seminfo ,"I","semmns"); + +SYSCTL_PROC(_kern_sysv, KSYSV_SEMMNU, semmnu, CTLTYPE_INT | CTLFLAG_RW, + &limitseminfo.semmnu, 0, &sysctl_seminfo ,"I","semmnu"); + +SYSCTL_PROC(_kern_sysv, KSYSV_SEMMSL, semmsl, CTLTYPE_INT | CTLFLAG_RW, + &limitseminfo.semmsl, 0, &sysctl_seminfo ,"I","semmsl"); + +SYSCTL_PROC(_kern_sysv, KSYSV_SEMUNE, semume, CTLTYPE_INT | CTLFLAG_RW, + &limitseminfo.semume, 0, &sysctl_seminfo ,"I","semume"); + diff --git a/bsd/kern/sysv_shm.c b/bsd/kern/sysv_shm.c index 0330862b0..bd38b7afd 100644 --- a/bsd/kern/sysv_shm.c +++ b/bsd/kern/sysv_shm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -65,6 +65,7 @@ #include #include #include +#include #include #include @@ -120,7 +121,7 @@ struct shmmap_state { static void shm_deallocate_segment __P((struct shmid_ds *)); static int shm_find_segment_by_key __P((key_t)); static struct shmid_ds *shm_find_segment_by_shmid __P((int)); -static int shm_delete_mapping __P((struct proc *, struct shmmap_state *)); +static int shm_delete_mapping __P((struct proc *, struct shmmap_state *, int)); #ifdef __APPLE_API_PRIVATE struct shminfo shminfo = { @@ -173,7 +174,7 @@ shm_deallocate_segment(shmseg) char * ptr; shm_handle = shmseg->shm_internal; - size = round_page(shmseg->shm_segsz); + size = round_page_32(shmseg->shm_segsz); mach_destroy_memory_entry(shm_handle->shm_object); FREE((caddr_t)shm_handle, M_SHM); shmseg->shm_internal = NULL; @@ -183,9 +184,10 @@ shm_deallocate_segment(shmseg) } static int -shm_delete_mapping(p, shmmap_s) +shm_delete_mapping(p, shmmap_s, deallocate) struct proc *p; struct shmmap_state *shmmap_s; + int deallocate; { struct shmid_ds *shmseg; int segnum, result; @@ -193,10 +195,12 @@ shm_delete_mapping(p, shmmap_s) segnum = IPCID_TO_IX(shmmap_s->shmid); shmseg = &shmsegs[segnum]; - size = round_page(shmseg->shm_segsz); + size = round_page_32(shmseg->shm_segsz); + if (deallocate) { result = vm_deallocate(current_map(), shmmap_s->va, size); if (result != KERN_SUCCESS) return EINVAL; + } shmmap_s->shmid = -1; shmseg->shm_dtime = time_second; if ((--shmseg->shm_nattch <= 0) && @@ -220,6 +224,7 @@ shmdt(p, uap, retval) struct shmmap_state *shmmap_s; int i; + AUDIT_ARG(svipc_addr, uap->shmaddr); if (!shm_inited) return(EINVAL); shmmap_s = (struct shmmap_state *)p->vm_shm; @@ -231,7 +236,7 @@ shmdt(p, uap, retval) break; if (i == shminfo.shmseg) return EINVAL; - return shm_delete_mapping(p, shmmap_s); + return shm_delete_mapping(p, shmmap_s, 1); } #ifndef _SYS_SYSPROTO_H_ @@ -258,6 +263,8 @@ shmat(p, uap, retval) vm_size_t size; kern_return_t rv; + AUDIT_ARG(svipc_id, uap->shmid); + AUDIT_ARG(svipc_addr, uap->shmaddr); if (!shm_inited) return(EINVAL); shmmap_s = (struct shmmap_state *)p->vm_shm; @@ -271,6 +278,8 @@ shmat(p, uap, retval) shmseg = shm_find_segment_by_shmid(uap->shmid); if (shmseg == NULL) return EINVAL; + + AUDIT_ARG(svipc_perm, &shmseg->shm_perm); error = ipcperm(cred, &shmseg->shm_perm, (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); if (error) @@ -282,7 +291,7 @@ shmat(p, uap, retval) } if (i >= shminfo.shmseg) return EMFILE; - size = round_page(shmseg->shm_segsz); + size = round_page_32(shmseg->shm_segsz); prot = VM_PROT_READ; if ((uap->shmflg & SHM_RDONLY) == 0) prot |= VM_PROT_WRITE; @@ -296,7 +305,7 @@ shmat(p, uap, retval) else return EINVAL; } else { - attach_va = round_page(uap->shmaddr); + attach_va = round_page_32((unsigned int)uap->shmaddr); } shm_handle = shmseg->shm_internal; @@ -413,11 +422,18 @@ shmctl(p, uap, retval) struct shmid_ds inbuf; struct shmid_ds *shmseg; + AUDIT_ARG(svipc_cmd, uap->cmd); + AUDIT_ARG(svipc_id, uap->shmid); if (!shm_inited) return(EINVAL); shmseg = shm_find_segment_by_shmid(uap->shmid); if (shmseg == NULL) return EINVAL; + /* XXAUDIT: This is the perms BEFORE any change by this call. This + * may not be what is desired. + */ + AUDIT_ARG(svipc_perm, &shmseg->shm_perm); + switch (uap->cmd) { case IPC_STAT: error = ipcperm(cred, &shmseg->shm_perm, IPC_R); @@ -525,7 +541,7 @@ shmget_allocate_segment(p, uap, mode, retval) return EINVAL; if (shm_nused >= shminfo.shmmni) /* any shmids left? */ return ENOSPC; - size = round_page(uap->size); + size = round_page_32(uap->size); if (shm_committed + btoc(size) > shminfo.shmall) return ENOMEM; if (shm_last_free < 0) { @@ -573,6 +589,7 @@ shmget_allocate_segment(p, uap, mode, retval) shmseg->shm_ctime = time_second; shm_committed += btoc(size); shm_nused++; + AUDIT_ARG(svipc_perm, &shmseg->shm_perm); if (shmseg->shm_perm.mode & SHMSEG_WANTED) { /* * Somebody else wanted this key while we were asleep. Wake @@ -582,6 +599,7 @@ shmget_allocate_segment(p, uap, mode, retval) wakeup((caddr_t)shmseg); } *retval = shmid; + AUDIT_ARG(svipc_id, shmid); return 0; out: switch (kret) { @@ -604,6 +622,7 @@ shmget(p, uap, retval) { int segnum, mode, error; + /* Auditing is actually done in shmget_allocate_segment() */ if (!shm_inited) return(EINVAL); @@ -676,7 +695,28 @@ shmexit(p) shmmap_s = (struct shmmap_state *)p->vm_shm; for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) if (shmmap_s->shmid != -1) - shm_delete_mapping(p, shmmap_s); + shm_delete_mapping(p, shmmap_s, 1); + FREE((caddr_t)p->vm_shm, M_SHM); + p->vm_shm = NULL; +} + +/* + * shmexec() is like shmexit(), only it doesn't delete the mappings, + * since the old address space has already been destroyed and the new + * one instantiated. Instead, it just does the housekeeping work we + * need to do to keep the System V shared memory subsystem sane. + */ +__private_extern__ void +shmexec(p) + struct proc *p; +{ + struct shmmap_state *shmmap_s; + int i; + + shmmap_s = (struct shmmap_state *)p->vm_shm; + for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) + if (shmmap_s->shmid != -1) + shm_delete_mapping(p, shmmap_s, 0); FREE((caddr_t)p->vm_shm, M_SHM); p->vm_shm = NULL; } @@ -732,7 +772,7 @@ sysctl_shminfo SYSCTL_HANDLER_ARGS (shminfo.shmmni != -1) && (shminfo.shmseg != -1) && (shminfo.shmall != -1)) { - shminit(); + shminit(NULL); } } return(0); diff --git a/bsd/kern/tty_pty.c b/bsd/kern/tty_pty.c index 34dc94304..d5a53bfbb 100644 --- a/bsd/kern/tty_pty.c +++ b/bsd/kern/tty_pty.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -84,14 +84,13 @@ #include #define FREE_BSDSTATIC __private_extern__ -#define d_open_t open_close_fcn_t -#define d_close_t open_close_fcn_t #define d_devtotty_t struct tty ** -#define d_ioctl_t ioctl_fcn_t -#define d_read_t read_write_fcn_t -#define d_write_t read_write_fcn_t -#define d_select_t select_fcn_t + +#ifdef d_stop_t +#undef d_stop_t +#endif typedef void d_stop_t __P((struct tty *tp, int rw)); + #endif /* NeXT */ #ifdef notyet @@ -238,7 +237,7 @@ int pty_init(int n_ptys) done: return (0); } -#endif DEVFS +#endif /* DEVFS */ /*ARGSUSED*/ FREE_BSDSTATIC int diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c index cd2e0d71c..5fb09e492 100644 --- a/bsd/kern/ubc_subr.c +++ b/bsd/kern/ubc_subr.c @@ -102,7 +102,7 @@ ubc_busy(struct vnode *vp) while (ISSET(uip->ui_flags, UI_BUSY)) { - if (uip->ui_owner == (void *)current_thread()) + if (uip->ui_owner == (void *)current_act()) return (2); SET(uip->ui_flags, UI_WANTED); @@ -111,7 +111,7 @@ ubc_busy(struct vnode *vp) if (!UBCINFOEXISTS(vp)) return (0); } - uip->ui_owner = (void *)current_thread(); + uip->ui_owner = (void *)current_act(); SET(uip->ui_flags, UI_BUSY); @@ -321,7 +321,8 @@ ubc_setsize(struct vnode *vp, off_t nsize) memory_object_control_t control; kern_return_t kret; - assert(nsize >= (off_t)0); + if (nsize < (off_t)0) + return (0); if (UBCINVALID(vp)) return (0); @@ -590,6 +591,9 @@ ubc_getobject(struct vnode *vp, int flags) if (UBCINVALID(vp)) return (0); + if (flags & UBC_FOR_PAGEOUT) + return(vp->v_ubcinfo->ui_control); + if ((recursed = ubc_busy(vp)) == 0) return (0); @@ -747,7 +751,7 @@ ubc_clean(struct vnode *vp, int invalidate) control = uip->ui_control; assert(control); - vp->v_flag &= ~VHASDIRTY; + cluster_release(vp); vp->v_clen = 0; /* Write the dirty data in the file and discard cached pages */ @@ -854,9 +858,28 @@ ubc_hold(struct vnode *vp) int recursed; memory_object_control_t object; +retry: + if (UBCINVALID(vp)) return (0); + ubc_lock(vp); + if (ISSET(vp->v_flag, VUINIT)) { + /* + * other thread is not done initializing this + * yet, wait till it's done and try again + */ + while (ISSET(vp->v_flag, VUINIT)) { + SET(vp->v_flag, VUWANT); /* XXX overloaded! */ + ubc_unlock(vp); + (void) tsleep((caddr_t)vp, PINOD, "ubchold", 0); + ubc_lock(vp); + } + ubc_unlock(vp); + goto retry; + } + ubc_unlock(vp); + if ((recursed = ubc_busy(vp)) == 0) { /* must be invalid or dying vnode */ assert(UBCINVALID(vp) || @@ -972,6 +995,12 @@ ubc_release_named(struct vnode *vp) (uip->ui_refcount == 1) && !uip->ui_mapped) { control = uip->ui_control; assert(control); + + // XXXdbg + if (vp->v_flag & VDELETED) { + ubc_setsize(vp, (off_t)0); + } + CLR(uip->ui_flags, UI_HASOBJREF); kret = memory_object_release_name(control, MEMORY_OBJECT_RESPECT_CACHE); @@ -1102,24 +1131,22 @@ ubc_invalidate(struct vnode *vp, off_t offset, size_t size) * Returns 1 if file is in use by UBC, 0 if not */ int -ubc_isinuse(struct vnode *vp, int tookref) +ubc_isinuse(struct vnode *vp, int busycount) { - int busycount = tookref ? 2 : 1; - if (!UBCINFOEXISTS(vp)) return (0); - if (tookref == 0) { + if (busycount == 0) { printf("ubc_isinuse: called without a valid reference" ": v_tag = %d\v", vp->v_tag); vprint("ubc_isinuse", vp); return (0); } - if (vp->v_usecount > busycount) + if (vp->v_usecount > busycount+1) return (1); - if ((vp->v_usecount == busycount) + if ((vp->v_usecount == busycount+1) && (vp->v_ubcinfo->ui_mapped == 1)) return (1); else @@ -1166,7 +1193,7 @@ ubc_page_op( struct vnode *vp, off_t f_offset, int ops, - vm_offset_t *phys_entryp, + ppnum_t *phys_entryp, int *flagsp) { memory_object_control_t control; @@ -1182,6 +1209,42 @@ ubc_page_op( flagsp)); } +__private_extern__ kern_return_t +ubc_page_op_with_control( + memory_object_control_t control, + off_t f_offset, + int ops, + ppnum_t *phys_entryp, + int *flagsp) +{ + return (memory_object_page_op(control, + (memory_object_offset_t)f_offset, + ops, + phys_entryp, + flagsp)); +} + +kern_return_t +ubc_range_op( + struct vnode *vp, + off_t f_offset_beg, + off_t f_offset_end, + int ops, + int *range) +{ + memory_object_control_t control; + + control = ubc_getobject(vp, UBC_FLAGS_NONE); + if (control == MEMORY_OBJECT_CONTROL_NULL) + return KERN_INVALID_ARGUMENT; + + return (memory_object_range_op(control, + (memory_object_offset_t)f_offset_beg, + (memory_object_offset_t)f_offset_end, + ops, + range)); +} + kern_return_t ubc_create_upl( struct vnode *vp, @@ -1192,18 +1255,29 @@ ubc_create_upl( int uplflags) { memory_object_control_t control; - int count; - off_t file_offset; - kern_return_t kr; + int count; + int ubcflags; + off_t file_offset; + kern_return_t kr; if (bufsize & 0xfff) return KERN_INVALID_ARGUMENT; - control = ubc_getobject(vp, UBC_FLAGS_NONE); + if (uplflags & UPL_FOR_PAGEOUT) { + uplflags &= ~UPL_FOR_PAGEOUT; + ubcflags = UBC_FOR_PAGEOUT; + } else + ubcflags = UBC_FLAGS_NONE; + + control = ubc_getobject(vp, ubcflags); if (control == MEMORY_OBJECT_CONTROL_NULL) return KERN_INVALID_ARGUMENT; - uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); + if (uplflags & UPL_WILL_BE_DUMPED) { + uplflags &= ~UPL_WILL_BE_DUMPED; + uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL); + } else + uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); count = 0; kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, &count, uplflags); diff --git a/bsd/kern/uipc_mbuf.c b/bsd/kern/uipc_mbuf.c index 6a914e689..b539543f2 100644 --- a/bsd/kern/uipc_mbuf.c +++ b/bsd/kern/uipc_mbuf.c @@ -82,10 +82,22 @@ #include #include +#include + #define _MCLREF(p) (++mclrefcnt[mtocl(p)]) #define _MCLUNREF(p) (--mclrefcnt[mtocl(p)] == 0) - -extern kernel_pmap; /* The kernel's pmap */ +#define _M_CLEAR_PKTHDR(mbuf_ptr) (mbuf_ptr)->m_pkthdr.rcvif = NULL; \ + (mbuf_ptr)->m_pkthdr.len = 0; \ + (mbuf_ptr)->m_pkthdr.header = NULL; \ + (mbuf_ptr)->m_pkthdr.csum_flags = 0; \ + (mbuf_ptr)->m_pkthdr.csum_data = 0; \ + (mbuf_ptr)->m_pkthdr.aux = (struct mbuf*)NULL; \ + (mbuf_ptr)->m_pkthdr.reserved1 = NULL; \ + (mbuf_ptr)->m_pkthdr.reserved2 = NULL; + +extern pmap_t kernel_pmap; /* The kernel's pmap */ +/* kernel translater */ +extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); decl_simple_lock_data(, mbuf_slock); struct mbuf *mfree; /* mbuf free list */ @@ -95,6 +107,7 @@ int m_want; /* sleepers on mbufs */ extern int nmbclusters; /* max number of mapped clusters */ short *mclrefcnt; /* mapped cluster reference counts */ int *mcl_paddr; +static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */ union mcluster *mclfree; /* mapped cluster free list */ int max_linkhdr; /* largest link-level header */ int max_protohdr; /* largest protocol header */ @@ -112,9 +125,11 @@ static int m_howmany(); /* The number of cluster mbufs that are allocated, to start. */ #define MINCL max(16, 2) -extern int dlil_input_thread_wakeup; -extern int dlil_expand_mcl; -extern int dlil_initialized; +static int mbuf_expand_thread_wakeup = 0; +static int mbuf_expand_mcl = 0; +static int mbuf_expand_thread_initialized = 0; + +static void mbuf_expand_thread_init(void); #if 0 static int mfree_munge = 0; @@ -168,10 +183,11 @@ mbinit() { int s,m; int initmcl = 32; + int mcl_pages; if (nclpp) return; - nclpp = round_page(MCLBYTES) / MCLBYTES; /* see mbufgc() */ + nclpp = round_page_32(MCLBYTES) / MCLBYTES; /* see mbufgc() */ if (nclpp < 1) nclpp = 1; MBUF_LOCKINIT(); // NETISR_LOCKINIT(); @@ -191,11 +207,14 @@ mbinit() for (m = 0; m < nmbclusters; m++) mclrefcnt[m] = -1; - MALLOC(mcl_paddr, int *, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int), - M_TEMP, M_WAITOK); + /* Calculate the number of pages assigned to the cluster pool */ + mcl_pages = nmbclusters/(PAGE_SIZE/CLBYTES); + MALLOC(mcl_paddr, int *, mcl_pages * sizeof(int), M_TEMP, M_WAITOK); if (mcl_paddr == 0) panic("mbinit1"); - bzero((char *)mcl_paddr, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int)); + /* Register with the I/O Bus mapper */ + mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages); + bzero((char *)mcl_paddr, mcl_pages * sizeof(int)); embutl = (union mcluster *)((unsigned char *)mbutl + (nmbclusters * MCLBYTES)); @@ -204,6 +223,9 @@ mbinit() if (m_clalloc(max(PAGE_SIZE/CLBYTES, 1) * initmcl, M_WAIT) == 0) goto bad; MBUF_UNLOCK(); + + (void) kernel_thread(kernel_task, mbuf_expand_thread_init); + return; bad: panic("mbinit"); @@ -236,11 +258,11 @@ m_clalloc(ncl, nowait) if (ncl < i) ncl = i; - size = round_page(ncl * MCLBYTES); + size = round_page_32(ncl * MCLBYTES); mcl = (union mcluster *)kmem_mb_alloc(mb_map, size); if (mcl == 0 && ncl > 1) { - size = round_page(MCLBYTES); /* Try for 1 if failed */ + size = round_page_32(MCLBYTES); /* Try for 1 if failed */ mcl = (union mcluster *)kmem_mb_alloc(mb_map, size); } @@ -250,8 +272,19 @@ m_clalloc(ncl, nowait) for (i = 0; i < ncl; i++) { if (++mclrefcnt[mtocl(mcl)] != 0) panic("m_clalloc already there"); - if (((int)mcl & PAGE_MASK) == 0) - mcl_paddr[((char *)mcl - (char *)mbutl)/PAGE_SIZE] = pmap_extract(kernel_pmap, (char *)mcl); + if (((int)mcl & PAGE_MASK) == 0) { + ppnum_t offset = ((char *)mcl - (char *)mbutl)/PAGE_SIZE; + ppnum_t new_page = pmap_find_phys(kernel_pmap, (vm_address_t) mcl); + + /* + * In the case of no mapper being available + * the following code nops and returns the + * input page, if there is a mapper the I/O + * page appropriate is returned. + */ + new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page); + mcl_paddr[offset] = new_page << 12; + } mcl->mcl_next = mclfree; mclfree = mcl++; @@ -268,16 +301,14 @@ out: * pool or if the number of free clusters is less than requested. */ if ((nowait == M_DONTWAIT) && (i > 0 || ncl >= mbstat.m_clfree)) { - dlil_expand_mcl = 1; - if (dlil_initialized) - wakeup((caddr_t)&dlil_input_thread_wakeup); + mbuf_expand_mcl = 1; + if (mbuf_expand_thread_initialized) + wakeup((caddr_t)&mbuf_expand_thread_wakeup); } if (mbstat.m_clfree >= ncl) return 1; - mbstat.m_drops++; - return 0; } @@ -345,37 +376,39 @@ m_retry(canwait, type) break; MBUF_LOCK(); wait = m_want++; - dlil_expand_mcl = 1; + mbuf_expand_mcl = 1; if (wait == 0) mbstat.m_drain++; else mbstat.m_wait++; MBUF_UNLOCK(); - if (dlil_initialized) - wakeup((caddr_t)&dlil_input_thread_wakeup); + if (mbuf_expand_thread_initialized) + wakeup((caddr_t)&mbuf_expand_thread_wakeup); /* - * Grab network funnel because m_reclaim calls into the + * Need to be inside network funnel for m_reclaim because it calls into the * socket domains and tsleep end-up calling splhigh */ fnl = thread_funnel_get(); - if (fnl && (fnl == kernel_flock)) { - fnl_switch = 1; - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - } else - funnel_state = thread_funnel_set(network_flock, TRUE); - if (wait == 0) { + if (wait == 0 && fnl == network_flock) { m_reclaim(); + } else if (fnl != THR_FUNNEL_NULL) { + /* Sleep with a small timeout as insurance */ + (void) tsleep((caddr_t)&mfree, PZERO-1, "m_retry", hz); } else { - /* Sleep with a small timeout as insurance */ - (void) tsleep((caddr_t)&mfree, PZERO-1, "m_retry", hz); + /* We are called from a non-BSD context: use mach primitives */ + u_int64_t abstime = 0; + + assert_wait((event_t)&mfree, THREAD_UNINT); + clock_interval_to_deadline(hz, NSEC_PER_SEC / hz, &abstime); + thread_set_timer_deadline(abstime); + if (thread_block(THREAD_CONTINUE_NULL) != THREAD_TIMED_OUT) + thread_cancel_timer(); } - if (fnl_switch) - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - else - thread_funnel_set(network_flock, funnel_state); } + if (m == 0) + mbstat.m_drops++; return (m); } @@ -391,14 +424,7 @@ m_retryhdr(canwait, type) if (m = m_retry(canwait, type)) { m->m_flags |= M_PKTHDR; m->m_data = m->m_pktdat; - m->m_pkthdr.rcvif = NULL; - m->m_pkthdr.len = 0; - m->m_pkthdr.header = NULL; - m->m_pkthdr.csum_flags = 0; - m->m_pkthdr.csum_data = 0; - m->m_pkthdr.aux = (struct mbuf *)NULL; - m->m_pkthdr.reserved1 = NULL; - m->m_pkthdr.reserved2 = NULL; + _M_CLEAR_PKTHDR(m); } return (m); } @@ -450,13 +476,7 @@ m_gethdr(nowait, type) m->m_type = type; m->m_data = m->m_pktdat; m->m_flags = M_PKTHDR; - m->m_pkthdr.rcvif = NULL; - m->m_pkthdr.header = NULL; - m->m_pkthdr.csum_flags = 0; - m->m_pkthdr.csum_data = 0; - m->m_pkthdr.aux = (struct mbuf *)NULL; - m->m_pkthdr.reserved1 = NULL; - m->m_pkthdr.reserved2 = NULL; + _M_CLEAR_PKTHDR(m) } else m = m_retryhdr(nowait, type); @@ -564,6 +584,8 @@ m_mclalloc( nowait) ++mclrefcnt[mtocl(p)]; mbstat.m_clfree--; mclfree = ((union mcluster *)p)->mcl_next; + } else { + mbstat.m_drops++; } MBUF_UNLOCK(); @@ -630,14 +652,7 @@ m_getpacket(void) m->m_type = MT_DATA; m->m_data = m->m_ext.ext_buf; m->m_flags = M_PKTHDR | M_EXT; - m->m_pkthdr.len = 0; - m->m_pkthdr.rcvif = NULL; - m->m_pkthdr.header = NULL; - m->m_pkthdr.csum_data = 0; - m->m_pkthdr.csum_flags = 0; - m->m_pkthdr.aux = (struct mbuf *)NULL; - m->m_pkthdr.reserved1 = 0; - m->m_pkthdr.reserved2 = 0; + _M_CLEAR_PKTHDR(m) m->m_ext.ext_free = 0; m->m_ext.ext_size = MCLBYTES; m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = @@ -705,14 +720,7 @@ m_getpackets(int num_needed, int num_with_pkthdrs, int how) m->m_flags = M_EXT; else { m->m_flags = M_PKTHDR | M_EXT; - m->m_pkthdr.len = 0; - m->m_pkthdr.rcvif = NULL; - m->m_pkthdr.header = NULL; - m->m_pkthdr.csum_flags = 0; - m->m_pkthdr.csum_data = 0; - m->m_pkthdr.aux = (struct mbuf *)NULL; - m->m_pkthdr.reserved1 = NULL; - m->m_pkthdr.reserved2 = NULL; + _M_CLEAR_PKTHDR(m); num_with_pkthdrs--; } @@ -778,14 +786,7 @@ m_getpackethdrs(int num_needed, int how) m->m_type = MT_DATA; m->m_flags = M_PKTHDR; m->m_data = m->m_pktdat; - m->m_pkthdr.len = 0; - m->m_pkthdr.rcvif = NULL; - m->m_pkthdr.header = NULL; - m->m_pkthdr.csum_flags = 0; - m->m_pkthdr.csum_data = 0; - m->m_pkthdr.aux = (struct mbuf *)NULL; - m->m_pkthdr.reserved1 = NULL; - m->m_pkthdr.reserved2 = NULL; + _M_CLEAR_PKTHDR(m); } else { @@ -835,11 +836,13 @@ m_freem_list(m) if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux) { /* * Treat the current m as the nextpkt and set m - * to the aux data. This lets us free the aux - * data in this loop without having to call - * m_freem recursively, which wouldn't work - * because we've still got the lock. + * to the aux data. Preserve nextpkt in m->m_nextpkt. + * This lets us free the aux data in this loop + * without having to call m_freem recursively, + * which wouldn't work because we've still got + * the lock. */ + m->m_nextpkt = nextpkt; nextpkt = m; m = nextpkt->m_pkthdr.aux; nextpkt->m_pkthdr.aux = NULL; @@ -1154,14 +1157,7 @@ m_copym_with_hdrs(m, off0, len, wait, m_last, m_off) } else { n->m_data = n->m_pktdat; n->m_flags = M_PKTHDR; - n->m_pkthdr.len = 0; - n->m_pkthdr.rcvif = NULL; - n->m_pkthdr.header = NULL; - n->m_pkthdr.csum_flags = 0; - n->m_pkthdr.csum_data = 0; - n->m_pkthdr.aux = (struct mbuf *)NULL; - n->m_pkthdr.reserved1 = NULL; - n->m_pkthdr.reserved2 = NULL; + _M_CLEAR_PKTHDR(n); } } else { MBUF_UNLOCK(); @@ -1810,54 +1806,29 @@ void m_mcheck(struct mbuf *m) panic("mget MCHECK: m_type=%x m=%x", m->m_type, m); } -#if 0 -#include - -static int mhog_num = 0; -static struct mbuf *mhog_chain = 0; -static int mhog_wait = 1; - -static int -sysctl_mhog_num SYSCTL_HANDLER_ARGS +void +mbuf_expand_thread(void) { - int old = mhog_num; - int error; - - error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); - if (!error && req->newptr) { - int i; - struct mbuf *m; - - if (mhog_chain) { - m_freem(mhog_chain); - mhog_chain = 0; - } - - for (i = 0; i < mhog_num; i++) { - MGETHDR(m, mhog_wait ? M_WAIT : M_DONTWAIT, MT_DATA); - if (m == 0) - break; - - MCLGET(m, mhog_wait ? M_WAIT : M_DONTWAIT); - if ((m->m_flags & M_EXT) == 0) { - m_free(m); - m = 0; - break; - } - m->m_next = mhog_chain; - mhog_chain = m; + while (1) { + int expand_mcl; + MBUF_LOCK(); + expand_mcl = mbuf_expand_mcl; + mbuf_expand_mcl = 0; + MBUF_UNLOCK(); + if (expand_mcl) { + caddr_t p; + MCLALLOC(p, M_WAIT); + if (p) MCLFREE(p); } - mhog_num = i; + assert_wait(&mbuf_expand_thread_wakeup, THREAD_UNINT); + (void) thread_block(mbuf_expand_thread); } - - return error; } -SYSCTL_NODE(_kern_ipc, OID_AUTO, mhog, CTLFLAG_RW, 0, "mbuf hog"); - -SYSCTL_PROC(_kern_ipc_mhog, OID_AUTO, cluster, CTLTYPE_INT|CTLFLAG_RW, - &mhog_num, 0, &sysctl_mhog_num, "I", ""); -SYSCTL_INT(_kern_ipc_mhog, OID_AUTO, wait, CTLFLAG_RW, &mhog_wait, - 0, ""); -#endif +void +mbuf_expand_thread_init(void) +{ + mbuf_expand_thread_initialized++; + mbuf_expand_thread(); +} diff --git a/bsd/kern/uipc_mbuf2.c b/bsd/kern/uipc_mbuf2.c index cc6064fb0..fcf8daf18 100644 --- a/bsd/kern/uipc_mbuf2.c +++ b/bsd/kern/uipc_mbuf2.c @@ -382,7 +382,7 @@ m_aux_add(m, af, type) if (n) return n; - MGET(n, M_WAIT, m->m_type); + MGET(n, M_DONTWAIT, m->m_type); if (n == NULL) return NULL; diff --git a/bsd/kern/uipc_socket.c b/bsd/kern/uipc_socket.c index 5572ca926..21595dab8 100644 --- a/bsd/kern/uipc_socket.c +++ b/bsd/kern/uipc_socket.c @@ -62,12 +62,15 @@ #include #include +#include #include +#include #include #include #include #include #include +#include #include #include #include @@ -98,6 +101,19 @@ extern int get_tcp_str_size(); #include +static void filt_sordetach(struct knote *kn); +static int filt_soread(struct knote *kn, long hint); +static void filt_sowdetach(struct knote *kn); +static int filt_sowrite(struct knote *kn, long hint); +static int filt_solisten(struct knote *kn, long hint); + +static struct filterops solisten_filtops = + { 1, NULL, filt_sordetach, filt_solisten }; +static struct filterops soread_filtops = + { 1, NULL, filt_sordetach, filt_soread }; +static struct filterops sowrite_filtops = + { 1, NULL, filt_sowdetach, filt_sowrite }; + int socket_debug = 0; int socket_zone = M_SOCKET; so_gen_t so_gencnt; /* generation count for sockets */ @@ -123,8 +139,11 @@ SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, &somaxconn, /* Should we get a maximum also ??? */ static int sosendmaxchain = 65536; static int sosendminchain = 16384; +static int sorecvmincopy = 16384; SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain, CTLFLAG_RW, &sosendminchain, 0, ""); +SYSCTL_INT(_kern_ipc, OID_AUTO, sorecvmincopy, CTLFLAG_RW, &sorecvmincopy, + 0, ""); void so_cache_timer(); struct mbuf *m_getpackets(int, int, int); @@ -366,7 +385,9 @@ socreate(dom, aso, type, proto) register struct protosw *prp; register struct socket *so; register int error = 0; - +#if TCPDEBUG + extern int tcpconsdebug; +#endif if (proto) prp = pffindproto(dom, proto, type); else @@ -414,6 +435,11 @@ socreate(dom, aso, type, proto) #endif error = (*prp->pr_usrreqs->pru_attach)(so, proto, p); if (error) { + /* + * Warning: + * If so_pcb is not zero, the socket will be leaked, + * so protocol attachment handler must be coded carefuly + */ so->so_state |= SS_NOFDREF; sofree(so); return (error); @@ -422,7 +448,12 @@ socreate(dom, aso, type, proto) prp->pr_domain->dom_refs++; so->so_rcv.sb_so = so->so_snd.sb_so = so; TAILQ_INIT(&so->so_evlist); +#if TCPDEBUG + if (tcpconsdebug == 2) + so->so_options |= SO_DEBUG; +#endif #endif + *aso = so; return (0); } @@ -968,7 +999,7 @@ restart: if ((atomic && resid > so->so_snd.sb_hiwat) || clen > so->so_snd.sb_hiwat) snderr(EMSGSIZE); - if (space < resid + clen && uio && + if (space < resid + clen && (atomic || space < so->so_snd.sb_lowat || space < clen)) { if (so->so_state & SS_NBIO) snderr(EWOULDBLOCK); @@ -1209,15 +1240,20 @@ soreceive(so, psa, uio, mp0, controlp, flagsp) struct mbuf **controlp; int *flagsp; { - register struct mbuf *m, **mp; - register struct mbuf *free_list, *ml; + register struct mbuf *m, **mp, *ml; register int flags, len, error, s, offset; struct protosw *pr = so->so_proto; struct mbuf *nextrecord; int moff, type = 0; int orig_resid = uio->uio_resid; struct kextcb *kp; - + volatile struct mbuf *free_list; + volatile int delayed_copy_len; + int can_delay; + int need_event; + struct proc *p = current_proc(); + + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START, so, uio->uio_resid, @@ -1231,8 +1267,10 @@ soreceive(so, psa, uio, mp0, controlp, flagsp) error = (*kp->e_soif->sf_soreceive)(so, psa, &uio, mp0, controlp, flagsp, kp); - if (error) + if (error) { + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0); return((error == EJUSTRETURN) ? 0 : error); + } } kp = kp->e_next; } @@ -1256,8 +1294,10 @@ soreceive(so, psa, uio, mp0, controlp, flagsp) (so->so_options & SO_OOBINLINE) == 0 && (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) { m = m_get(M_WAIT, MT_DATA); - if (m == NULL) + if (m == NULL) { + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, ENOBUFS,0,0,0,0); return (ENOBUFS); + } error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); if (error) goto bad; @@ -1292,6 +1332,9 @@ nooob: if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) (*pr->pr_usrreqs->pru_rcvd)(so, 0); + + free_list = (struct mbuf *)0; + delayed_copy_len = 0; restart: error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); if (error) { @@ -1314,9 +1357,10 @@ restart: */ if (m == 0 || (((flags & MSG_DONTWAIT) == 0 && so->so_rcv.sb_cc < uio->uio_resid) && - (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || + (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { + KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1")); if (so->so_error) { if (m) @@ -1351,6 +1395,7 @@ restart: sbunlock(&so->so_rcv); if (socket_debug) printf("Waiting for socket data\n"); + error = sbwait(&so->so_rcv); if (socket_debug) printf("SORECEIVE - sbwait returned %d\n", error); @@ -1365,7 +1410,16 @@ dontblock: #ifndef __APPLE__ if (uio->uio_procp) uio->uio_procp->p_stats->p_ru.ru_msgrcv++; -#endif +#else /* __APPLE__ */ + /* + * 2207985 + * This should be uio->uio-procp; however, some callers of this + * function use auto variables with stack garbage, and fail to + * fill out the uio structure properly. + */ + if (p) + p->p_stats->p_ru.ru_msgrcv++; +#endif /* __APPLE__ */ nextrecord = m->m_nextpkt; if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) { KASSERT(m->m_type == MT_SONAME, ("receive 1a")); @@ -1417,10 +1471,15 @@ dontblock: moff = 0; offset = 0; - free_list = m; - ml = (struct mbuf *)0; + if (!(flags & MSG_PEEK) && uio->uio_resid > sorecvmincopy) + can_delay = 1; + else + can_delay = 0; + + need_event = 0; - while (m && uio->uio_resid > 0 && error == 0) { + + while (m && (uio->uio_resid - delayed_copy_len) > 0 && error == 0) { if (m->m_type == MT_OOBDATA) { if (type != MT_OOBDATA) break; @@ -1447,7 +1506,7 @@ dontblock: } #endif so->so_state &= ~SS_RCVATMARK; - len = uio->uio_resid; + len = uio->uio_resid - delayed_copy_len; if (so->so_oobmark && len > so->so_oobmark - offset) len = so->so_oobmark - offset; if (len > m->m_len - moff) @@ -1461,13 +1520,48 @@ dontblock: * block interrupts again. */ if (mp == 0) { - splx(s); - error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); - s = splnet(); - if (error) - goto release; + if (can_delay && len == m->m_len) { + /* + * only delay the copy if we're consuming the + * mbuf and we're NOT in MSG_PEEK mode + * and we have enough data to make it worthwile + * to drop and retake the funnel... can_delay + * reflects the state of the 2 latter constraints + * moff should always be zero in these cases + */ + delayed_copy_len += len; + } else { + splx(s); + + if (delayed_copy_len) { + error = sodelayed_copy(uio, &free_list, &delayed_copy_len); + + if (error) { + s = splnet(); + goto release; + } + if (m != so->so_rcv.sb_mb) { + /* + * can only get here if MSG_PEEK is not set + * therefore, m should point at the head of the rcv queue... + * if it doesn't, it means something drastically changed + * while we were out from behind the funnel in sodelayed_copy... + * perhaps a RST on the stream... in any event, the stream has + * been interrupted... it's probably best just to return + * whatever data we've moved and let the caller sort it out... + */ + break; + } + } + error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); + + s = splnet(); + if (error) + goto release; + } } else uio->uio_resid -= len; + if (len == m->m_len - moff) { if (m->m_flags & M_EOR) flags |= MSG_EOR; @@ -1477,6 +1571,7 @@ dontblock: } else { nextrecord = m->m_nextpkt; sbfree(&so->so_rcv, m); + if (mp) { *mp = m; mp = &m->m_next; @@ -1484,7 +1579,9 @@ dontblock: *mp = (struct mbuf *)0; } else { m->m_nextpkt = 0; - if (ml != 0) + if (free_list == NULL) + free_list = m; + else ml->m_next = m; ml = m; so->so_rcv.sb_mb = m = m->m_next; @@ -1509,7 +1606,11 @@ dontblock: so->so_oobmark -= len; if (so->so_oobmark == 0) { so->so_state |= SS_RCVATMARK; - postevent(so, 0, EV_OOB); + /* + * delay posting the actual event until after + * any delayed copy processing has finished + */ + need_event = 1; break; } } else { @@ -1521,38 +1622,49 @@ dontblock: if (flags & MSG_EOR) break; /* - * If the MSG_WAITALL flag is set (for non-atomic socket), + * If the MSG_WAITALL or MSG_WAITSTREAM flag is set (for non-atomic socket), * we must not quit until "uio->uio_resid == 0" or an error * termination. If a signal/timeout occurs, return * with a short count but without error. * Keep sockbuf locked against other readers. */ - while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && + while (flags & (MSG_WAITALL|MSG_WAITSTREAM) && m == 0 && (uio->uio_resid - delayed_copy_len) > 0 && !sosendallatonce(so) && !nextrecord) { if (so->so_error || so->so_state & SS_CANTRCVMORE) - break; + goto release; - if (ml) { - m_freem_list(free_list); + if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) + (*pr->pr_usrreqs->pru_rcvd)(so, flags); + if (sbwait(&so->so_rcv)) { + error = 0; + goto release; } - error = sbwait(&so->so_rcv); - if (error) { - sbunlock(&so->so_rcv); - splx(s); - KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, 0,0,0,0,0); - return (0); + /* + * have to wait until after we get back from the sbwait to do the copy because + * we will drop the funnel if we have enough data that has been delayed... by dropping + * the funnel we open up a window allowing the netisr thread to process the incoming packets + * and to change the state of this socket... we're issuing the sbwait because + * the socket is empty and we're expecting the netisr thread to wake us up when more + * packets arrive... if we allow that processing to happen and then sbwait, we + * could stall forever with packets sitting in the socket if no further packets + * arrive from the remote side. + * + * we want to copy before we've collected all the data to satisfy this request to + * allow the copy to overlap the incoming packet processing on an MP system + */ + if (delayed_copy_len > sorecvmincopy && (delayed_copy_len > (so->so_rcv.sb_hiwat / 2))) { + + error = sodelayed_copy(uio, &free_list, &delayed_copy_len); + + if (error) + goto release; } m = so->so_rcv.sb_mb; if (m) { nextrecord = m->m_nextpkt; - free_list = m; } - ml = (struct mbuf *)0; } } - if (ml) { - m_freem_list(free_list); - } if (m && pr->pr_flags & PR_ATOMIC) { #ifdef __APPLE__ @@ -1576,6 +1688,19 @@ dontblock: #ifdef __APPLE__ if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) flags |= MSG_HAVEMORE; + + if (delayed_copy_len) { + error = sodelayed_copy(uio, &free_list, &delayed_copy_len); + + if (error) + goto release; + } + if (free_list) { + m_freem_list((struct mbuf *)free_list); + free_list = (struct mbuf *)0; + } + if (need_event) + postevent(so, 0, EV_OOB); #endif if (orig_resid == uio->uio_resid && orig_resid && (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { @@ -1587,6 +1712,12 @@ dontblock: if (flagsp) *flagsp |= flags; release: + if (delayed_copy_len) { + error = sodelayed_copy(uio, &free_list, &delayed_copy_len); + } + if (free_list) { + m_freem_list((struct mbuf *)free_list); + } sbunlock(&so->so_rcv); splx(s); @@ -1600,6 +1731,38 @@ release: return (error); } + +int sodelayed_copy(struct uio *uio, struct mbuf **free_list, int *resid) +{ + int error = 0; + boolean_t dropped_funnel = FALSE; + struct mbuf *m; + + m = *free_list; + + if (*resid >= sorecvmincopy) { + dropped_funnel = TRUE; + + (void)thread_funnel_set(network_flock, FALSE); + } + while (m && error == 0) { + + error = uiomove(mtod(m, caddr_t), (int)m->m_len, uio); + + m = m->m_next; + } + m_freem_list(*free_list); + + *free_list = (struct mbuf *)NULL; + *resid = 0; + + if (dropped_funnel == TRUE) + (void)thread_funnel_set(network_flock, TRUE); + + return (error); +} + + int soshutdown(so, how) register struct socket *so; @@ -1615,8 +1778,10 @@ soshutdown(so, how) while (kp) { if (kp->e_soif && kp->e_soif->sf_soshutdown) { ret = (*kp->e_soif->sf_soshutdown)(so, how, kp); - if (ret) + if (ret) { + KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0); return((ret == EJUSTRETURN) ? 0 : ret); + } } kp = kp->e_next; } @@ -1665,12 +1830,10 @@ sorflush(so) #endif asb = *sb; bzero((caddr_t)sb, sizeof (*sb)); -#ifndef __APPLE__ if (asb.sb_flags & SB_KNOTE) { sb->sb_sel.si_note = asb.sb_sel.si_note; sb->sb_flags = SB_KNOTE; } -#endif splx(s); if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) (*pr->pr_domain->dom_dispose)(asb.sb_mb); @@ -1887,6 +2050,18 @@ sosetopt(so, sopt) break; + case SO_NOADDRERR: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + goto bad; + if (optval) + so->so_flags |= SOF_NOADDRAVAIL; + else + so->so_flags &= ~SOF_NOADDRAVAIL; + + break; + default: error = ENOPROTOOPT; break; @@ -2060,6 +2235,10 @@ integer: optval = (so->so_flags & SOF_NOSIGPIPE); goto integer; + case SO_NOADDRERR: + optval = (so->so_flags & SOF_NOADDRAVAIL); + goto integer; + default: error = ENOPROTOOPT; break; @@ -2297,3 +2476,115 @@ sopoll(struct socket *so, int events, struct ucred *cred, void * wql) splx(s); return (revents); } + + +int +soo_kqfilter(struct file *fp, struct knote *kn, struct proc *p) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_data; + struct sockbuf *sb; + int s; + + switch (kn->kn_filter) { + case EVFILT_READ: + if (so->so_options & SO_ACCEPTCONN) + kn->kn_fop = &solisten_filtops; + else + kn->kn_fop = &soread_filtops; + sb = &so->so_rcv; + break; + case EVFILT_WRITE: + kn->kn_fop = &sowrite_filtops; + sb = &so->so_snd; + break; + default: + return (1); + } + + if (sb->sb_sel.si_flags & SI_INITED) + return (1); + + s = splnet(); + if (KNOTE_ATTACH(&sb->sb_sel.si_note, kn)) + sb->sb_flags |= SB_KNOTE; + splx(s); + return (0); +} + +static void +filt_sordetach(struct knote *kn) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_data; + int s = splnet(); + + if (so->so_rcv.sb_flags & SB_KNOTE && + !(so->so_rcv.sb_sel.si_flags & SI_INITED)) + if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn)) + so->so_rcv.sb_flags &= ~SB_KNOTE; + splx(s); +} + +/*ARGSUSED*/ +static int +filt_soread(struct knote *kn, long hint) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_data; + + kn->kn_data = so->so_rcv.sb_cc; + if (so->so_state & SS_CANTRCVMORE) { + kn->kn_flags |= EV_EOF; + kn->kn_fflags = so->so_error; + return (1); + } + if (so->so_error) /* temporary udp error */ + return (1); + if (kn->kn_sfflags & NOTE_LOWAT) + return (kn->kn_data >= kn->kn_sdata); + return (kn->kn_data >= so->so_rcv.sb_lowat); +} + +static void +filt_sowdetach(struct knote *kn) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_data; + int s = splnet(); + + if(so->so_snd.sb_flags & SB_KNOTE && + !(so->so_snd.sb_sel.si_flags & SI_INITED)) + if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn)) + so->so_snd.sb_flags &= ~SB_KNOTE; + splx(s); +} + +/*ARGSUSED*/ +static int +filt_sowrite(struct knote *kn, long hint) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_data; + + kn->kn_data = sbspace(&so->so_snd); + if (so->so_state & SS_CANTSENDMORE) { + kn->kn_flags |= EV_EOF; + kn->kn_fflags = so->so_error; + return (1); + } + if (so->so_error) /* temporary udp error */ + return (1); + if (((so->so_state & SS_ISCONNECTED) == 0) && + (so->so_proto->pr_flags & PR_CONNREQUIRED)) + return (0); + if (kn->kn_sfflags & NOTE_LOWAT) + return (kn->kn_data >= kn->kn_sdata); + return (kn->kn_data >= so->so_snd.sb_lowat); +} + +/*ARGSUSED*/ +static int +filt_solisten(struct knote *kn, long hint) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_data; + + kn->kn_data = so->so_qlen; + return (! TAILQ_EMPTY(&so->so_comp)); +} + diff --git a/bsd/kern/uipc_socket2.c b/bsd/kern/uipc_socket2.c index 735acaf4c..82e610589 100644 --- a/bsd/kern/uipc_socket2.c +++ b/bsd/kern/uipc_socket2.c @@ -456,6 +456,9 @@ sowakeup(so, sb) } if (sb->sb_flags & SB_UPCALL) (*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT); + if (sb->sb_flags & SB_KNOTE && + !(sb->sb_sel.si_flags & SI_INITED)) + KNOTE(&sb->sb_sel.si_note, 0); } /* @@ -607,8 +610,10 @@ sbappend(sb, m) kp = sotokextcb(sbtoso(sb)); while (kp) { if (kp->e_sout && kp->e_sout->su_sbappend) { - if ((*kp->e_sout->su_sbappend)(sb, m, kp)) + if ((*kp->e_sout->su_sbappend)(sb, m, kp)) { + KERNEL_DEBUG((DBG_FNC_SBAPPEND | DBG_FUNC_END), sb, sb->sb_cc, kp, 0, 0); return; + } } kp = kp->e_next; } @@ -619,6 +624,7 @@ sbappend(sb, m) do { if (n->m_flags & M_EOR) { sbappendrecord(sb, m); /* XXXXXX!!!! */ + KERNEL_DEBUG((DBG_FNC_SBAPPEND | DBG_FUNC_END), sb, sb->sb_cc, 0, 0, 0); return; } } while (n->m_next && (n = n->m_next)); @@ -945,8 +951,7 @@ sbflush(sb) kp = kp->e_next; } - if (sb->sb_flags & SB_LOCK) - sb_lock(sb); + (void)sblock(sb, M_WAIT); while (sb->sb_mbcnt) { /* * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty: @@ -958,6 +963,9 @@ sbflush(sb) } if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt) panic("sbflush: cc %ld || mb %p || mbcnt %ld", sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt); + + sbunlock(sb); + postevent(0, sb, EV_RWBYTES); } @@ -986,8 +994,10 @@ sbdrop(sb, len) kp = sotokextcb(sbtoso(sb)); while (kp) { if (kp->e_sout && kp->e_sout->su_sbdrop) { - if ((*kp->e_sout->su_sbdrop)(sb, len, kp)) + if ((*kp->e_sout->su_sbdrop)(sb, len, kp)) { + KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_END), sb, len, kp, 0, 0); return; + } } kp = kp->e_next; } @@ -1278,7 +1288,7 @@ int pru_sopoll_notsupp(struct socket *so, int events, int sb_notify(struct sockbuf *sb) { - return ((sb->sb_flags & (SB_WAIT|SB_SEL|SB_ASYNC|SB_UPCALL)) != 0); + return ((sb->sb_flags & (SB_WAIT|SB_SEL|SB_ASYNC|SB_UPCALL|SB_KNOTE)) != 0); } /* diff --git a/bsd/kern/uipc_syscalls.c b/bsd/kern/uipc_syscalls.c index 7e73f9ff1..e2902e519 100644 --- a/bsd/kern/uipc_syscalls.c +++ b/bsd/kern/uipc_syscalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -77,6 +77,7 @@ #include #endif #include +#include #include @@ -96,29 +97,29 @@ #endif struct getsockname_args { - int fdes; + int fdes; caddr_t asa; - int *alen; + socklen_t *alen; }; struct getsockopt_args { - int s; - int level; - int name; + int s; + int level; + int name; caddr_t val; - int *avalsize; + socklen_t *avalsize; } ; struct accept_args { - int s; - caddr_t name; - int *anamelen; + int s; + caddr_t name; + socklen_t *anamelen; }; struct getpeername_args { - int fdes; - caddr_t asa; - int *alen; + int fdes; + caddr_t asa; + socklen_t *alen; }; @@ -172,6 +173,7 @@ socket(p, uap, retval) struct file *fp; int fd, error; + AUDIT_ARG(socket, uap->domain, uap->type, uap->protocol); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); error = falloc(p, &fp, &fd); thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); @@ -196,9 +198,9 @@ socket(p, uap, retval) } struct bind_args { - int s; - caddr_t name; - int namelen; + int s; + caddr_t name; + socklen_t namelen; }; /* ARGSUSED */ @@ -212,13 +214,18 @@ bind(p, uap, retval) struct sockaddr *sa; int error; + AUDIT_ARG(fd, uap->s); error = getsock(p->p_fd, uap->s, &fp); if (error) return (error); error = getsockaddr(&sa, uap->name, uap->namelen); if (error) return (error); - error = sobind((struct socket *)fp->f_data, sa); + AUDIT_ARG(sockaddr, p, sa); + if (fp->f_data != NULL) + error = sobind((struct socket *)fp->f_data, sa); + else + error = EBADF; FREE(sa, M_SONAME); return (error); } @@ -239,10 +246,14 @@ listen(p, uap, retval) struct file *fp; int error; + AUDIT_ARG(fd, uap->s); error = getsock(p->p_fd, uap->s, &fp); if (error) return (error); - return (solisten((struct socket *)fp->f_data, uap->backlog)); + if (fp->f_data != NULL) + return (solisten((struct socket *)fp->f_data, uap->backlog)); + else + return (EBADF); } #ifndef COMPAT_OLDSOCK @@ -267,6 +278,7 @@ accept1(p, uap, retval, compat) short fflag; /* type must match fp->f_flag */ int tmpfd; + AUDIT_ARG(fd, uap->s); if (uap->name) { error = copyin((caddr_t)uap->anamelen, (caddr_t)&namelen, sizeof (namelen)); @@ -278,6 +290,10 @@ accept1(p, uap, retval, compat) return (error); s = splnet(); head = (struct socket *)fp->f_data; + if (head == NULL) { + splx(s); + return (EBADF); + } if ((head->so_options & SO_ACCEPTCONN) == 0) { splx(s); return (EINVAL); @@ -352,6 +368,7 @@ accept1(p, uap, retval, compat) goto gotnoname; return 0; } + AUDIT_ARG(sockaddr, p, sa); if (uap->name) { /* check sa_len before it is destroyed */ if (namelen > sa->sa_len) @@ -395,9 +412,9 @@ oaccept(p, uap, retval) #endif /* COMPAT_OLDSOCK */ struct connect_args { - int s; - caddr_t name; - int namelen; + int s; + caddr_t name; + socklen_t namelen; }; /* ARGSUSED */ int @@ -411,15 +428,19 @@ connect(p, uap, retval) struct sockaddr *sa; int error, s; + AUDIT_ARG(fd, uap->s); error = getsock(p->p_fd, uap->s, &fp); if (error) return (error); so = (struct socket *)fp->f_data; + if (so == NULL) + return (EBADF); if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) return (EALREADY); error = getsockaddr(&sa, uap->name, uap->namelen); if (error) return (error); + AUDIT_ARG(sockaddr, p, sa); error = soconnect(so, sa); if (error) goto bad; @@ -464,6 +485,7 @@ socketpair(p, uap, retval) struct socket *so1, *so2; int fd, error, sv[2]; + AUDIT_ARG(socket, uap->domain, uap->type, uap->protocol); error = socreate(uap->domain, &so1, uap->type, uap->protocol); if (error) return (error); @@ -583,6 +605,7 @@ sendit(p, s, mp, flags, retsize) KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_END, error,0,0,0,0); return (error); } + AUDIT_ARG(sockaddr, p, to); } else to = 0; if (mp->msg_control) { @@ -628,8 +651,11 @@ sendit(p, s, mp, flags, retsize) #endif len = auio.uio_resid; so = (struct socket *)fp->f_data; - error = so->so_proto->pr_usrreqs->pru_sosend(so, to, &auio, 0, control, - flags); + if (so == NULL) + error = EBADF; + else + error = so->so_proto->pr_usrreqs->pru_sosend(so, to, &auio, 0, control, + flags); if (error) { if (auio.uio_resid != len && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) @@ -686,6 +712,7 @@ sendto(p, uap, retval) int stat; KERNEL_DEBUG(DBG_FNC_SENDTO | DBG_FUNC_START, 0,0,0,0,0); + AUDIT_ARG(fd, uap->s); msg.msg_name = uap->to; msg.msg_namelen = uap->tolen; @@ -798,6 +825,7 @@ sendmsg(p, uap, retval) int error; KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_START, 0,0,0,0,0); + AUDIT_ARG(fd, uap->s); if (error = copyin(uap->msg, (caddr_t)&msg, sizeof (msg))) { KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_END, error,0,0,0,0); @@ -884,9 +912,13 @@ recvit(p, s, mp, namelenp, retval) #endif len = auio.uio_resid; so = (struct socket *)fp->f_data; - error = so->so_proto->pr_usrreqs->pru_soreceive(so, &fromsa, &auio, - (struct mbuf **)0, mp->msg_control ? &control : (struct mbuf **)0, - &mp->msg_flags); + if (so == NULL) + error = EBADF; + else + error = so->so_proto->pr_usrreqs->pru_soreceive(so, &fromsa, &auio, + (struct mbuf **)0, mp->msg_control ? &control : (struct mbuf **)0, + &mp->msg_flags); + AUDIT_ARG(sockaddr, p, fromsa); if (error) { if (auio.uio_resid != len && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) @@ -1019,6 +1051,7 @@ recvfrom(p, uap, retval) int error; KERNEL_DEBUG(DBG_FNC_RECVFROM | DBG_FUNC_START, 0,0,0,0,0); + AUDIT_ARG(fd, uap->s); if (uap->fromlenaddr) { error = copyin((caddr_t)uap->fromlenaddr, @@ -1047,7 +1080,7 @@ orecvfrom(p, uap, retval) { uap->flags |= MSG_COMPAT; - return (recvfrom(p, uap)); + return (recvfrom(p, uap, retval)); } #endif @@ -1148,6 +1181,7 @@ recvmsg(p, uap, retval) register int error; KERNEL_DEBUG(DBG_FNC_RECVMSG | DBG_FUNC_START, 0,0,0,0,0); + AUDIT_ARG(fd, uap->s); if (error = copyin((caddr_t)uap->msg, (caddr_t)&msg, sizeof (msg))) { @@ -1203,9 +1237,12 @@ shutdown(p, uap, retval) struct file *fp; int error; + AUDIT_ARG(fd, uap->s); error = getsock(p->p_fd, uap->s, &fp); if (error) return (error); + if (fp->f_data == NULL) + return (EBADF); return (soshutdown((struct socket *)fp->f_data, uap->how)); } @@ -1215,11 +1252,11 @@ shutdown(p, uap, retval) /* ARGSUSED */ struct setsockopt_args { - int s; - int level; - int name; - caddr_t val; - int valsize; + int s; + int level; + int name; + caddr_t val; + socklen_t valsize; }; int @@ -1232,6 +1269,7 @@ setsockopt(p, uap, retval) struct sockopt sopt; int error; + AUDIT_ARG(fd, uap->s); if (uap->val == 0 && uap->valsize != 0) return (EFAULT); if (uap->valsize < 0) @@ -1248,6 +1286,8 @@ setsockopt(p, uap, retval) sopt.sopt_valsize = uap->valsize; sopt.sopt_p = p; + if (fp->f_data == NULL) + return (EBADF); return (sosetopt((struct socket *)fp->f_data, &sopt)); } @@ -1283,6 +1323,8 @@ getsockopt(p, uap, retval) sopt.sopt_valsize = (size_t)valsize; /* checked non-negative above */ sopt.sopt_p = p; + if (fp->f_data == NULL) + return (EBADF); error = sogetopt((struct socket *)fp->f_data, &sopt); if (error == 0) { valsize = sopt.sopt_valsize; @@ -1382,6 +1424,8 @@ getsockname1(p, uap, retval, compat) if (error) return (error); so = (struct socket *)fp->f_data; + if (so == NULL) + return (EBADF); sa = 0; error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &sa); if (error) @@ -1450,6 +1494,8 @@ getpeername1(p, uap, retval, compat) if (error) return (error); so = (struct socket *)fp->f_data; + if (so == NULL) + return (EBADF); if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) return (ENOTCONN); error = copyin((caddr_t)uap->alen, (caddr_t)&len, sizeof (len)); @@ -1735,6 +1781,10 @@ sendfile(struct proc *p, struct sendfile_args *uap) if (error) goto done; so = (struct socket *)fp->f_data; + if (so == NULL) { + error = EBADF; + goto done; + } if (so->so_type != SOCK_STREAM) { error = EINVAL; goto done; diff --git a/bsd/kern/uipc_usrreq.c b/bsd/kern/uipc_usrreq.c index d8022230c..10688922f 100644 --- a/bsd/kern/uipc_usrreq.c +++ b/bsd/kern/uipc_usrreq.c @@ -663,6 +663,18 @@ unp_connect(so, nam, p) goto bad; } thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + /* + * Check if socket was connected while we were trying to + * acquire the funnel. + * XXX - probably shouldn't return an error for SOCK_DGRAM + */ + if ((so->so_state & SS_ISCONNECTED) != 0) { + error = EISCONN; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + if (so->so_proto->pr_flags & PR_CONNREQUIRED) { if ((so2->so_options & SO_ACCEPTCONN) == 0 || (so3 = sonewconn(so2, 0)) == 0) { diff --git a/bsd/man/man2/Makefile b/bsd/man/man2/Makefile index 504f0f10a..986739b08 100644 --- a/bsd/man/man2/Makefile +++ b/bsd/man/man2/Makefile @@ -35,6 +35,7 @@ DATAFILES = \ fpathconf.2 \ fstat.2 \ fstatfs.2 \ + fsctl.2 \ fsync.2 \ ftruncate.2 \ futimes.2 \ @@ -64,6 +65,7 @@ DATAFILES = \ ioctl.2 \ issetugid.2 \ kill.2 \ + kqueue.2 \ ktrace.2 \ lchown.2 \ link.2 \ @@ -105,6 +107,9 @@ DATAFILES = \ rmdir.2 \ sbrk.2 \ select.2 \ + semctl.2 \ + semget.2 \ + semop.2 \ send.2 \ sendmsg.2 \ sendto.2 \ diff --git a/bsd/man/man2/chflags.2 b/bsd/man/man2/chflags.2 index 48a949e8f..70cb5097b 100644 --- a/bsd/man/man2/chflags.2 +++ b/bsd/man/man2/chflags.2 @@ -134,7 +134,7 @@ will fail if: .It Bq Er EBADF The descriptor is not valid. .It Bq Er EINVAL -.Fa Fd +.Fa fd refers to a socket, not to a file. .It Bq Er EPERM The effective user ID does not match the owner of the file and diff --git a/bsd/man/man2/chmod.2 b/bsd/man/man2/chmod.2 index 302be5974..bc34d2417 100644 --- a/bsd/man/man2/chmod.2 +++ b/bsd/man/man2/chmod.2 @@ -161,7 +161,7 @@ will fail if: .It Bq Er EBADF The descriptor is not valid. .It Bq Er EINVAL -.Fa Fd +.Fa fd refers to a socket, not to a file. .It Bq Er EROFS The file resides on a read-only file system. diff --git a/bsd/man/man2/chown.2 b/bsd/man/man2/chown.2 index 3e5f14801..3ce057f3b 100644 --- a/bsd/man/man2/chown.2 +++ b/bsd/man/man2/chown.2 @@ -117,10 +117,10 @@ An I/O error occurred while reading from or writing to the file system. will fail if: .Bl -tag -width Er .It Bq Er EBADF -.Fa Fd +.Fa fd does not refer to a valid descriptor. .It Bq Er EINVAL -.Fa Fd +.Fa fd refers to a socket, not a file. .It Bq Er EPERM The effective user ID is not the super-user. diff --git a/bsd/man/man2/connect.2 b/bsd/man/man2/connect.2 index 6f8d8db18..e06e59fc5 100644 --- a/bsd/man/man2/connect.2 +++ b/bsd/man/man2/connect.2 @@ -69,7 +69,10 @@ only once; datagram sockets may use .Fn connect multiple times to change their association. Datagram sockets may dissolve the association -by connecting to an invalid address, such as a null address. +by connecting to an invalid address, such as a null address +or an address with +the address family set to AF_UNPSEC (the error +EAFNOSUPPORT will be harmlessly returned). .Sh RETURN VALUES If the connection or binding succeeds, 0 is returned. Otherwise a -1 is returned, and a more specific error diff --git a/bsd/man/man2/execve.2 b/bsd/man/man2/execve.2 index c5c9838e6..f2bc9dd8c 100644 --- a/bsd/man/man2/execve.2 +++ b/bsd/man/man2/execve.2 @@ -233,10 +233,10 @@ is allowed by the imposed maximum .It Bq Er E2BIG The number of bytes in the new process's argument list is larger than the system-imposed limit. -The limit in the system as released is 20480 bytes -.Pf ( Dv NCARGS -in -.Ao Pa sys/param.h Ac ) . +This limit is specified by the +.Xr sysctl 3 +MIB variable +.Dv KERN_ARGMAX . .It Bq Er EFAULT The new process file is not as long as indicated by the size values in its header. diff --git a/bsd/man/man2/fork.2 b/bsd/man/man2/fork.2 index e9a810626..22fbd9354 100644 --- a/bsd/man/man2/fork.2 +++ b/bsd/man/man2/fork.2 @@ -106,6 +106,6 @@ There is insufficient swap space for the new process. .Xr wait 2 .Sh HISTORY A -.Fn fork 2 +.Fn fork function call appeared in .At v6 . diff --git a/bsd/man/man2/fsctl.2 b/bsd/man/man2/fsctl.2 new file mode 100644 index 000000000..d5fc742b3 --- /dev/null +++ b/bsd/man/man2/fsctl.2 @@ -0,0 +1,135 @@ +.\" +.\" Copyright (c) 2003 Apple Computer, Inc. All rights reserved. +.\" +.\" @APPLE_LICENSE_HEADER_START@ +.\" +.\" The contents of this file constitute Original Code as defined in and +.\" are subject to the Apple Public Source License Version 1.1 (the +.\" "License"). You may not use this file except in compliance with the +.\" License. Please obtain a copy of the License at +.\" http://www.apple.com/publicsource and read it before using this file. +.\" +.\" This Original Code and all software distributed under the License are +.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER +.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the +.\" License for the specific language governing rights and limitations +.\" under the License. +.\" +.\" @APPLE_LICENSE_HEADER_END@ +.\" +.Dd January 14, 2003 +.Dt FSCTL 2 +.Os "Mac OS X" +.Sh NAME +.Nm fsctl +.Nd control filesystems +.Sh SYNOPSIS +.Fd #include +.Fd #include +.Ft int +.Fn fsctl "const char *path" "unsigned long request" "void *data" "unsigned long options" +.Sh DESCRIPTION +The +.Fn fsctl +function manipulates the filesystem controlling mounted volumes. +In particular, many filesystem-specific operating +characteristics of mounted filesystems may be controlled +with +.Fn fsctl +requests. +.Fn fsctl +requests can also be used to extract filesystem-specific +information for a mounted volumes. +.Pp +.Fa path +is the path name of any file within the mounted filesystem. +An fsctl +.Fa request +has encoded in it whether the argument is an +.Dq in +parameter +or +.Dq out +parameter, and the size of the argument +.Fa data +in bytes. +Values for +.Fa request +are entirely filesystem-specific except for the following, defined in +.Ao Pa sys/fsctl.h Ac : +.Bd -literal -offset indent +FSGETMOUNTINFOSIZE /* Return size of mount info data */ +.Ed +.Pp +Macros and defines used in specifying an fsctl +.Fa request +are the same as for +.Fn ioctl +requests and are located in the file +.Ao Pa sys/ioccom.h Ac . +.Fa options +may specify special flags for the processing of the +.Fn fsctl +call. The options are specified by +.Em or Ns 'ing +the option values. The only option currently defined is +.Bd -literal -offset indent +#define FSOPT_NOFOLLOW 0x00000001 /* Don't follow symlinks */ +.Ed +.Pp +which is interpreted by the +.Fn fsctl +call to prevent following of symlinks. The +.Fa options +argument is passed to the filesystem, which may define and handle +additional +.Fa options +bit values. +.Sh RETURN VALUES +.Pp +If an error has occurred, a value of -1 is returned and +.Va errno +is set to indicate the error. +.Sh ERRORS +.Fn fsctl +will fail if: +.Bl -tag -width Er +.It Bq Er ENOTDIR +A component of the path prefix is not a directory. +.It Bq Er ENAMETOOLONG +A component of a pathname exceeded +.Dv {NAME_MAX} +characters, or an entire path name exceeded +.Dv {PATH_MAX} +characters. +.It Bq Er ENOENT +The named file does not exist. +.It Bq Er EACCES +Search permission is denied for a component of the path prefix. +.It Bq Er ELOOP +Too many symbolic links were encountered in translating the pathname. +.It Bq Er EFAULT +.Fa path +or +.Em data +points to an invalid address. +.It Bq Er EIO +An +.Tn I/O +error occurred while reading from or writing to the file system. +.It Bq Er EINVAL +.Fa request +or +.Fa data +is not valid. +.El +.Sh SEE ALSO +.Xr ioctl 2 , +.Xr getattrlist 2 , +.Xr setattrlist 2 +.Sh HISTORY +The +.Fn fsctl +function call appeared in Mac OS X version 10.0. diff --git a/bsd/man/man2/fsync.2 b/bsd/man/man2/fsync.2 index 3f4b0d694..7d72c2599 100644 --- a/bsd/man/man2/fsync.2 +++ b/bsd/man/man2/fsync.2 @@ -64,10 +64,10 @@ The fails if: .Bl -tag -width Er .It Bq Er EBADF -.Fa Fd +.Fa fd is not a valid descriptor. .It Bq Er EINVAL -.Fa Fd +.Fa fd refers to a socket, not to a file. .It Bq Er EIO An I/O error occurred while reading from or writing to the file system. diff --git a/bsd/man/man2/getdirentries.2 b/bsd/man/man2/getdirentries.2 index e6cc24a50..798847160 100644 --- a/bsd/man/man2/getdirentries.2 +++ b/bsd/man/man2/getdirentries.2 @@ -40,7 +40,8 @@ .Nm getdirentries .Nd "get directory entries in a filesystem independent format" .Sh SYNOPSIS -.Fd #include +.Fd #include +.Fd #include .Ft int .Fn getdirentries "int fd" "char *buf" "int nbytes" "long *basep" .Sh DESCRIPTION @@ -67,9 +68,10 @@ The data in the buffer is a series of .Em dirent structures each containing the following entries: .Bd -literal -offset indent -unsigned long d_fileno; -unsigned short d_reclen; -unsigned short d_namlen; +u_int32_t d_fileno; /* file number of entry */ +u_int16_t d_reclen; /* length of this record */ +u_int8_t d_type; /* file type, see below */ +u_int8_t d_namlen; /* length of string in d_name */ char d_name[MAXNAMELEN + 1]; /* see below */ .Ed .Pp @@ -81,6 +83,12 @@ Files that are linked by hard links (see .Xr link 2 ) have the same .Fa d_fileno . +Users of +.Fn getdirentries +should skip +entries with +.Fa d_fileno += 0, as such entries represent files which have been deleted but not yet removed from the directory entry. The .Fa d_reclen entry is the length, in bytes, of the directory record. @@ -95,6 +103,20 @@ Thus the actual size of may vary from 1 to .Dv MAXNAMELEN \&+ 1. +.Fa d_type +is a integer representing the type of the directory entry. The following types are defined in +.Aq sys/dirent.h : +.Bd -literal -offset indent +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 +.Ed .Pp Entries may be separated by extra space. The diff --git a/bsd/man/man2/getfsstat.2 b/bsd/man/man2/getfsstat.2 index c924dd62c..525f6a04f 100644 --- a/bsd/man/man2/getfsstat.2 +++ b/bsd/man/man2/getfsstat.2 @@ -55,12 +55,12 @@ structures defined as follows: .Bd -literal typedef struct { int32_t val[2]; } fsid_t; -#define MFSNAMELEN 16 /* length of fs type name, including nul */ -#define MNAMELEN 32 /* length of buffer for returned name */ +#define MFSNAMELEN 15 /* length of fs type name, not inc. nul */ +#define MNAMELEN 90 /* length of buffer for returned name */ struct statfs { - short f_type; /* type of file system (unused; zero) */ - short f_flags; /* copy of mount flags */ + short f_otype; /* type of file system (reserved: zero) */ + short f_oflags; /* copy of mount flags (reserved: zero) */ long f_bsize; /* fundamental file system block size */ long f_iosize; /* optimal transfer block size */ long f_blocks; /* total data blocks in file system */ @@ -68,12 +68,17 @@ struct statfs { long f_bavail; /* free blocks avail to non-superuser */ long f_files; /* total file nodes in file system */ long f_ffree; /* free file nodes in fs */ - fsid_t f_fsid; /* file system id */ + fsid_t f_fsid; /* file system id (super-user only) */ uid_t f_owner; /* user that mounted the file system */ - long f_spare[4]; /* spare for later */ + short f_reserved1; /* reserved for future use */ + short f_type; /* type of file system (reserved) */ + long f_flags; /* copy of mount flags (reserved) */ + long f_reserved2[2]; /* reserved for future use */ char f_fstypename[MFSNAMELEN]; /* fs type name */ char f_mntonname[MNAMELEN]; /* directory on which mounted */ char f_mntfromname[MNAMELEN]; /* mounted file system */ + char f_reserved3; /* reserved for future use */ + long f_reserved4[4]; /* reserved for future use */ }; .Ed .Pp diff --git a/bsd/man/man2/getsockopt.2 b/bsd/man/man2/getsockopt.2 index cd618692a..b1ced804a 100644 --- a/bsd/man/man2/getsockopt.2 +++ b/bsd/man/man2/getsockopt.2 @@ -294,7 +294,12 @@ If a receive operation has been blocked for this much time without receiving additional data, it returns with a short count or with the error .Er EWOULDBLOCK -if no data were received. +if no data were received. The struct timeval parameter must represent a +positive time interval less than SHRT_MAX * 10 milliseconds (5 minutes +and 28 seconds) otherwise +.Fn setsockopt +returns with the error +.Er EDOM . .Pp .Dv SO_NOSIGPIPE is an option that prevents SIGPIPE from being raised when a write fails on a socket to which there is no reader; @@ -341,6 +346,8 @@ For this error may also be returned if .Fa optlen is not in a valid part of the process address space. +.It Bq Er EDOM +The argument value is out of bounds. .El .Sh SEE ALSO .Xr ioctl 2 , diff --git a/bsd/man/man2/intro.2 b/bsd/man/man2/intro.2 index 3892834cd..e9a29acd5 100644 --- a/bsd/man/man2/intro.2 +++ b/bsd/man/man2/intro.2 @@ -404,6 +404,47 @@ locks was reached. .It Er 78 ENOSYS Em "Function not implemented" . Attempted a system call that is not available on this system. +.It Er 79 EFTYPE Em "Inappropriate file type or format" . +The file was the wrong type for the operation, or a data +file had the wrong format. +.It Er 80 EAUTH Em "Authentication error" . +Attempted to use an invalid authentication ticket to +mount an NFS file system. +.It Er 81 ENEEDAUTH Em "Need authenticator" . +An authentication ticket must be obtained before the +given NFS file system may be mounted. +.It Er 82 EPWROFF Em "Device power is off" . +The device power is off. +.It Er 83 EDEVERR Em "Device error" . +A device error has occurred, e.g. a printer running out of paper. +.It Er 84 EOVERFLOW Em "Value too large to be stored in data type" . +A numerical result of the function was too large to be +stored in the caller provided space. +.It Er 85 EBADEXEC Em "Bad executable (or shared library)" . +The executable or shared library being referenced was malformed. +.It Er 86 EBADARCH Em "Bad CPU type in executable" . +The executable in question does not support the current CPU. +.It Er 87 ESHLIBVERS Em "Shared library version mismatch" . +The version of the shared library on the system does not match +the version which was expected. +.It Er 88 EBADMACHO Em "Malformed Mach-o file" . +The Mach object file is malformed. +.It Er 89 ECANCELED Em "Operation canceled" . +The scheduled operation was canceled. +.It Er 90 EIDRM Em "Identifier removed" . +An IPC identifier was removed while the current process +was waiting on it. +.It Er 91 ENOMSG Em "No message of desired type" . +An IPC message queue does not contain a message of the +desired type, or a message catalog does not contain the +requested message. +.It Er 92 EILSEQ Em "Illegal byte sequence" . +While decoding a multibyte character the function came +along an invalid or an incomplete sequence of bytes or +the given wide character is invalid. +.It Er 93 ENOATTR Em "Attribute not found" . +The specified extended attribute does not exist. +.El .Sh DEFINITIONS .Bl -tag -width Ds .It Process ID . @@ -664,8 +705,8 @@ communications protocols. Each protocol set supports addresses of a certain format. An Address Family is the set of addresses for a specific group of protocols. Each socket has an address chosen from the address family in which the socket was created. +.El .Sh SEE ALSO -.Xr intro 3 , .Xr perror 3 .Sh HISTORY An diff --git a/bsd/man/man2/kqueue.2 b/bsd/man/man2/kqueue.2 new file mode 100644 index 000000000..7006995f1 --- /dev/null +++ b/bsd/man/man2/kqueue.2 @@ -0,0 +1,499 @@ +.\" Copyright (c) 2000 Jonathan Lemon +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $FreeBSD: src/lib/libc/sys/kqueue.2,v 1.32 2002/12/19 09:40:25 ru Exp $ +.\" +.Dd April 14, 2000 +.Dt KQUEUE 2 +.Os +.Sh NAME +.Nm kqueue , +.Nm kevent +.Nd kernel event notification mechanism +.Sh LIBRARY +.Lb libc +.Sh SYNOPSIS +.In sys/types.h +.In sys/event.h +.In sys/time.h +.Ft int +.Fn kqueue "void" +.Ft int +.Fn kevent "int kq" "const struct kevent *changelist" "int nchanges" "struct kevent *eventlist" "int nevents" "const struct timespec *timeout" +.Fn EV_SET "&kev" ident filter flags fflags data udata +.Sh DESCRIPTION +The +.Fn kqueue +system call +provides a generic method of notifying the user when an kernel +event (kevent) happens or a condition holds, based on the results +of small pieces of kernel code termed filters. +A kevent is identified by an (ident, filter) pair and specifies +the interesting conditions to be notified about for that pair. +An (ident, filter) pair can only appear once is a given kqueue. +Subsequent attempts to register the same pair for a given kqueue +will result in the replacement of the conditions being watched, +not an addition. +.Pp +The filter identified in a kevent is executed upon the initial +registration of that event in order to detect whether a preexisting +condition is present, and is also executed whenever an event is +passed to the filter for evaluation. +If the filter determines that the condition should be reported, +then the kevent is placed on the kqueue for the user to retrieve. +.Pp +The filter is also run when the user attempts to retrieve the kevent +from the kqueue. +If the filter indicates that the condition that triggered +the event no longer holds, the kevent is removed from the kqueue and +is not returned. +.Pp +Multiple events which trigger the filter do not result in multiple +kevents being placed on the kqueue; instead, the filter will aggregate +the events into a single struct kevent. +Calling +.Fn close +on a file descriptor will remove any kevents that reference the descriptor. +.Pp +The +.Fn kqueue +system call +creates a new kernel event queue and returns a descriptor. +The queue is not inherited by a child created with +.Xr fork 2 . +.Pp +The +.Fn kevent +system call +is used to register events with the queue, and return any pending +events to the user. +The +.Fa changelist +argument +is a pointer to an array of +.Va kevent +structures, as defined in +.Aq Pa sys/event.h . +All changes contained in the +.Fa changelist +are applied before any pending events are read from the queue. +The +.Fa nchanges +argument +gives the size of +.Fa changelist . +The +.Fa eventlist +argument +is a pointer to an array of kevent structures. +The +.Fa nevents +argument +determines the size of +.Fa eventlist . +If +.Fa timeout +is a non-NULL pointer, it specifies a maximum interval to wait +for an event, which will be interpreted as a struct timespec. If +.Fa timeout +is a NULL pointer, +.Fn kevent +waits indefinitely. To effect a poll, the +.Fa timeout +argument should be non-NULL, pointing to a zero-valued +.Va timespec +structure. The same array may be used for the +.Fa changelist +and +.Fa eventlist . +.Pp +The +.Fn EV_SET +macro is provided for ease of initializing a +kevent structure. +.Pp +The +.Va kevent +structure is defined as: +.Bd -literal +struct kevent { + uintptr_t ident; /* identifier for this event */ + short filter; /* filter for event */ + u_short flags; /* action flags for kqueue */ + u_int fflags; /* filter flag value */ + intptr_t data; /* filter data value */ + void *udata; /* opaque user data identifier */ +}; +.Ed +.Pp +The fields of +.Fa struct kevent +are: +.Bl -tag -width XXXfilter +.It ident +Value used to identify this event. +The exact interpretation is determined by the attached filter, +but often is a file descriptor. +.It filter +Identifies the kernel filter used to process this event. The pre-defined +system filters are described below. +.It flags +Actions to perform on the event. +.It fflags +Filter-specific flags. +.It data +Filter-specific data value. +.It udata +Opaque user-defined value passed through the kernel unchanged. +.El +.Pp +The +.Va flags +field can contain the following values: +.Bl -tag -width XXXEV_ONESHOT +.It EV_ADD +Adds the event to the kqueue. Re-adding an existing event +will modify the parameters of the original event, and not result +in a duplicate entry. Adding an event automatically enables it, +unless overridden by the EV_DISABLE flag. +.It EV_ENABLE +Permit +.Fn kevent +to return the event if it is triggered. +.It EV_DISABLE +Disable the event so +.Fn kevent +will not return it. The filter itself is not disabled. +.It EV_DELETE +Removes the event from the kqueue. Events which are attached to +file descriptors are automatically deleted on the last close of +the descriptor. +.It EV_ONESHOT +Causes the event to return only the first occurrence of the filter +being triggered. After the user retrieves the event from the kqueue, +it is deleted. +.It EV_CLEAR +After the event is retrieved by the user, its state is reset. +This is useful for filters which report state transitions +instead of the current state. Note that some filters may automatically +set this flag internally. +.It EV_EOF +Filters may set this flag to indicate filter-specific EOF condition. +.It EV_ERROR +See +.Sx RETURN VALUES +below. +.El +.Pp +The predefined system filters are listed below. +Arguments may be passed to and from the filter via the +.Va fflags +and +.Va data +fields in the kevent structure. +.Bl -tag -width EVFILT_SIGNAL +.It EVFILT_READ +Takes a file descriptor as the identifier, and returns whenever +there is data available to read. +The behavior of the filter is slightly different depending +on the descriptor type. +.Pp +.Bl -tag -width 2n +.It Sockets +Sockets which have previously been passed to +.Fn listen +return when there is an incoming connection pending. +.Va data +contains the size of the listen backlog. +.Pp +Other socket descriptors return when there is data to be read, +subject to the +.Dv SO_RCVLOWAT +value of the socket buffer. +This may be overridden with a per-filter low water mark at the +time the filter is added by setting the +NOTE_LOWAT +flag in +.Va fflags , +and specifying the new low water mark in +.Va data . +On return, +.Va data +contains the number of bytes of protocol data available to read. +.Pp +If the read direction of the socket has shutdown, then the filter +also sets EV_EOF in +.Va flags , +and returns the socket error (if any) in +.Va fflags . +It is possible for EOF to be returned (indicating the connection is gone) +while there is still data pending in the socket buffer. +.It Vnodes +Returns when the file pointer is not at the end of file. +.Va data +contains the offset from current position to end of file, +and may be negative. +.It "Fifos, Pipes" +Returns when the there is data to read; +.Va data +contains the number of bytes available. +.Pp +When the last writer disconnects, the filter will set EV_EOF in +.Va flags . +This may be cleared by passing in EV_CLEAR, at which point the +filter will resume waiting for data to become available before +returning. +.El +.It EVFILT_WRITE +Takes a file descriptor as the identifier, and returns whenever +it is possible to write to the descriptor. For sockets, pipes +and fifos, +.Va data +will contain the amount of space remaining in the write buffer. +The filter will set EV_EOF when the reader disconnects, and for +the fifo case, this may be cleared by use of EV_CLEAR. +Note that this filter is not supported for vnodes. +.Pp +For sockets, the low water mark and socket error handling is +identical to the EVFILT_READ case. +.It EVFILT_AIO +This filter is currently unsupported. +.\"The sigevent portion of the AIO request is filled in, with +.\".Va sigev_notify_kqueue +.\"containing the descriptor of the kqueue that the event should +.\"be attached to, +.\".Va sigev_value +.\"containing the udata value, and +.\".Va sigev_notify +.\"set to SIGEV_KEVENT. +.\"When the +.\".Fn aio_* +.\"system call is made, the event will be registered +.\"with the specified kqueue, and the +.\".Va ident +.\"argument set to the +.\".Fa struct aiocb +.\"returned by the +.\".Fn aio_* +.\"system call. +.\"The filter returns under the same conditions as aio_error. +.\".Pp +.\"Alternatively, a kevent structure may be initialized, with +.\".Va ident +.\"containing the descriptor of the kqueue, and the +.\"address of the kevent structure placed in the +.\".Va aio_lio_opcode +.\"field of the AIO request. However, this approach will not work on +.\"architectures with 64-bit pointers, and should be considered deprecated. +.It EVFILT_VNODE +Takes a file descriptor as the identifier and the events to watch for in +.Va fflags , +and returns when one or more of the requested events occurs on the descriptor. +The events to monitor are: +.Bl -tag -width XXNOTE_RENAME +.It NOTE_DELETE +The +.Fn unlink +system call +was called on the file referenced by the descriptor. +.It NOTE_WRITE +A write occurred on the file referenced by the descriptor. +.It NOTE_EXTEND +The file referenced by the descriptor was extended. +.It NOTE_ATTRIB +The file referenced by the descriptor had its attributes changed. +.It NOTE_LINK +The link count on the file changed. +.It NOTE_RENAME +The file referenced by the descriptor was renamed. +.It NOTE_REVOKE +Access to the file was revoked via +.Xr revoke 2 +or the underlying fileystem was unmounted. +.El +.Pp +On return, +.Va fflags +contains the events which triggered the filter. +.It EVFILT_PROC +Takes the process ID to monitor as the identifier and the events to watch for +in +.Va fflags , +and returns when the process performs one or more of the requested events. +If a process can normally see another process, it can attach an event to it. +The events to monitor are: +.Bl -tag -width XXNOTE_TRACKERR +.It NOTE_EXIT +The process has exited. +.It NOTE_FORK +The process has called +.Fn fork . +.It NOTE_EXEC +The process has executed a new process via +.Xr execve 2 +or similar call. +.It NOTE_TRACK +Follow a process across +.Fn fork +calls. The parent process will return with NOTE_TRACK set in the +.Va fflags +field, while the child process will return with NOTE_CHILD set in +.Va fflags +and the parent PID in +.Va data . +.It NOTE_TRACKERR +This flag is returned if the system was unable to attach an event to +the child process, usually due to resource limitations. +.El +.Pp +On return, +.Va fflags +contains the events which triggered the filter. +.It EVFILT_SIGNAL +Takes the signal number to monitor as the identifier and returns +when the given signal is delivered to the process. +This coexists with the +.Fn signal +and +.Fn sigaction +facilities, and has a lower precedence. The filter will record +all attempts to deliver a signal to a process, even if the signal has +been marked as SIG_IGN. Event notification happens after normal +signal delivery processing. +.Va data +returns the number of times the signal has occurred since the last call to +.Fn kevent . +This filter automatically sets the EV_CLEAR flag internally. +.It EVFILT_TIMER +This filter is currently unsupported. +.\"Establishes an arbitrary timer identified by +.\".Va ident . +.\"When adding a timer, +.\".Va data +.\"specifies the timeout period in milliseconds. +.\"The timer will be periodic unless EV_ONESHOT is specified. +.\"On return, +.\".Va data +.\"contains the number of times the timeout has expired since the last call to +.\".Fn kevent . +.\"This filter automatically sets the EV_CLEAR flag internally. +.El +.Sh RETURN VALUES +The +.Fn kqueue +system call +creates a new kernel event queue and returns a file descriptor. +If there was an error creating the kernel event queue, a value of -1 is +returned and errno set. +.Pp +The +.Fn kevent +system call +returns the number of events placed in the +.Fa eventlist , +up to the value given by +.Fa nevents . +If an error occurs while processing an element of the +.Fa changelist +and there is enough room in the +.Fa eventlist , +then the event will be placed in the +.Fa eventlist +with +.Dv EV_ERROR +set in +.Va flags +and the system error in +.Va data . +Otherwise, +.Dv -1 +will be returned, and +.Dv errno +will be set to indicate the error condition. +If the time limit expires, then +.Fn kevent +returns 0. +.Sh ERRORS +The +.Fn kqueue +system call fails if: +.Bl -tag -width Er +.It Bq Er ENOMEM +The kernel failed to allocate enough memory for the kernel queue. +.It Bq Er EMFILE +The per-process descriptor table is full. +.It Bq Er ENFILE +The system file table is full. +.El +.Pp +The +.Fn kevent +system call fails if: +.Bl -tag -width Er +.It Bq Er EACCES +The process does not have permission to register a filter. +.It Bq Er EFAULT +There was an error reading or writing the +.Va kevent +structure. +.It Bq Er EBADF +The specified descriptor is invalid. +.It Bq Er EINTR +A signal was delivered before the timeout expired and before any +events were placed on the kqueue for return. +.It Bq Er EINVAL +The specified time limit or filter is invalid. +.It Bq Er ENOENT +The event could not be found to be modified or deleted. +.It Bq Er ENOMEM +No memory was available to register the event. +.It Bq Er ESRCH +The specified process to attach to does not exist. +.El +.Sh SEE ALSO +.Xr aio_error 2 , +.Xr aio_read 2 , +.Xr aio_return 2 , +.Xr read 2 , +.Xr select 2 , +.Xr sigaction 2 , +.Xr write 2 , +.Xr signal 3 +.Sh HISTORY +The +.Fn kqueue +and +.Fn kevent +system calls first appeared in +.Fx 4.1 . +.Sh AUTHORS +The +.Fn kqueue +system and this manual page were written by +.An Jonathan Lemon Aq jlemon@FreeBSD.org . +.Sh BUGS +Not all filesystem types support kqueue-style notifications. +And even some that do, like some remote filesystems, may only +support a subset of the notification semantics described +here. diff --git a/bsd/man/man2/mmap.2 b/bsd/man/man2/mmap.2 index 7038690f1..ba4541c78 100644 --- a/bsd/man/man2/mmap.2 +++ b/bsd/man/man2/mmap.2 @@ -125,10 +125,6 @@ Use of this option is discouraged. .It Dv MAP_HASSEMAPHORE Notify the kernel that the region may contain semaphores and that special handling may be necessary. -.It Dv MAP_INHERIT -Permit regions to be inherited across -.Xr exec 2 -system calls. .It Dv MAP_PRIVATE Modifications are private. .It Dv MAP_SHARED diff --git a/bsd/man/man2/mount.2 b/bsd/man/man2/mount.2 index 7d6bac642..aae88f178 100644 --- a/bsd/man/man2/mount.2 +++ b/bsd/man/man2/mount.2 @@ -82,9 +82,6 @@ suppress default semantics which affect filesystem access. .It Dv MNT_RDONLY The filesystem should be treated as read-only; Even the super-user may not write on it. -.It Dv MNT_NOATIME -Do not update the access time on files in the filesystem unless -the modification or status change times are also being updated. .It Dv MNT_NOEXEC Do not allow files to be executed from the filesystem. .It Dv MNT_NOSUID @@ -107,58 +104,20 @@ Some filesystems may not allow all flags to be changed. For example, most filesystems will not allow a change from read-write to read-only. .Pp +The flag +.Dv MNT_RELOAD +causes the vfs subsystem to update its data structures pertaining to +the specified already mounted filesystem. +.Pp The .Fa type argument defines the type of the filesystem. -The types of filesystems known to the system are defined in -.Aq Pa sys/mount.h . +.Pp .Fa Data is a pointer to a structure that contains the type specific arguments to mount. -The currently supported types of filesystems and -their type specific data are: -.Pp -.Dv MOUNT_FFS -.Bd -literal -offset indent -compact -struct ufs_args { - char *fspec; /* block special file to mount */ - struct export_args export; /* network export information */ -}; -.Ed -.Pp -.Dv MOUNT_NFS -.Bd -literal -offset indent -compact -struct nfs_args { - int version; /* args structure version */ - struct sockaddr *addr; /* file server address */ - int addrlen; /* length of address */ - int sotype; /* Socket type */ - int proto; /* and Protocol */ - u_char *fh; /* File handle to be mounted */ - int fhsize; /* Size, in bytes, of fh */ - int flags; /* flags */ - int wsize; /* write size in bytes */ - int rsize; /* read size in bytes */ - int readdirsize; /* readdir size in bytes */ - int timeo; /* initial timeout in .1 secs */ - int retrans; /* times to retry send */ - int maxgrouplist; /* Max. size of group list */ - int readahead; /* # of blocks to readahead */ - int leaseterm; /* Term (sec) of lease */ - int deadthresh; /* Retrans threshold */ - char *hostname; /* server's name */ -}; -.Ed -.Pp -.Dv MOUNT_MFS -.Bd -literal -offset indent -compact -struct mfs_args { - char *fspec; /* name to export for statfs */ - struct export_args export; /* if we can export an MFS */ - caddr_t base; /* base of filesystem in mem */ - u_long size; /* size of filesystem */ -}; -.Ed +The format for these argument structures is described in the +manual page for each filesystem. .Pp The .Fn umount @@ -193,7 +152,8 @@ is set to indicate the error. will fail when one of the following occurs: .Bl -tag -width [ENAMETOOLONG] .It Bq Er EPERM -The caller is not the super-user. +The caller is not the super-user, and the device-node and the mountpoint +do not have adequate ownership and permissions. .It Bq Er ENAMETOOLONG A component of a pathname exceeded .Dv {NAME_MAX} @@ -223,78 +183,13 @@ Another process currently holds a reference to points outside the process's allocated address space. .El .Pp -The following errors can occur for a -.Em ufs -filesystem mount: -.Bl -tag -width [ENOTBLK] -.It Bq Er ENODEV -A component of ufs_args -.Ar fspec -does not exist. -.It Bq Er ENOTBLK -.Ar Fspec -is not a block device. -.It Bq Er ENXIO -The major device number of -.Ar fspec -is out of range (this indicates no device driver exists -for the associated hardware). -.It Bq Er EBUSY -.Ar Fspec -is already mounted. -.It Bq Er EMFILE -No space remains in the mount table. -.It Bq Er EINVAL -The super block for the filesystem had a bad magic -number or an out of range block size. -.It Bq Er ENOMEM -Not enough memory was available to read the cylinder -group information for the filesystem. -.It Bq Er EIO -An I/O error occurred while reading the super block or -cylinder group information. -.It Bq Er EFAULT -.Ar Fspec -points outside the process's allocated address space. -.El -.Pp -The following errors can occur for a -.Em nfs -filesystem mount: -.Bl -tag -width [ETIMEDOUT] -.It Bq Er ETIMEDOUT -.Em Nfs -timed out trying to contact the server. -.It Bq Er EFAULT -Some part of the information described by nfs_args -points outside the process's allocated address space. -.El -.Pp -The following errors can occur for a -.Em mfs -filesystem mount: -.Bl -tag -width [EMFILE] -.It Bq Er EMFILE -No space remains in the mount table. -.It Bq Er EINVAL -The super block for the filesystem had a bad magic -number or an out of range block size. -.It Bq Er ENOMEM -Not enough memory was available to read the cylinder -group information for the filesystem. -.It Bq Er EIO -A paging error occurred while reading the super block or -cylinder group information. -.It Bq Er EFAULT -.Em Name -points outside the process's allocated address space. -.El -.Pp .Nm Umount may fail with one of the following errors: .Bl -tag -width [ENAMETOOLONG] .It Bq Er EPERM -The caller is not the super-user. +The caller is not the super-user, and the +.Nm mount() +was not done by the user. .It Bq Er ENOTDIR A component of the path is not a directory. .It Bq Er EINVAL @@ -318,17 +213,9 @@ An I/O error occurred while writing cached filesystem information. .Fa Dir points outside the process's allocated address space. .El -.Pp -A -.Em ufs -or -.Em mfs -mount can also fail if the maximum number of filesystems are currently -mounted. .Sh SEE ALSO .Xr mount 8 , .Xr umount 8 , -.Xr mfs 8 .Sh BUGS Some of the error codes need translation to more obvious messages. .Sh HISTORY diff --git a/bsd/man/man2/msync.2 b/bsd/man/man2/msync.2 index 831b8af34..32ec238f4 100644 --- a/bsd/man/man2/msync.2 +++ b/bsd/man/man2/msync.2 @@ -48,17 +48,9 @@ .Sh DESCRIPTION The .Fn msync -system call -writes any modified pages back to the filesystem and updates -the file modification time. -If -.Fa len -is 0, all modified pages within the region containing -.Fa addr -will be flushed; -if -.Fa len -is non-zero, only those pages containing +system call writes modified whole pages back to the filesystem +and updates the file modification time. +Only those pages containing .Fa addr and .Fa len-1 @@ -71,6 +63,10 @@ MS_ASYNC Return immediately MS_SYNC Perform synchronous writes MS_INVALIDATE Invalidate all cached data .Ed +.Pp +The +.Fa MS_ASYNC +flag is not permitted to be combined with other flags. .Sh RETURN VALUES If any errors occur, -1 is returned and errno is set to indicate the error. @@ -84,11 +80,10 @@ will fail if: is not a multiple of the hardware page size. .It Bq Er EINVAL .Fa len -is too large or negative. +is too large, or less than 1. .It Bq Er EINVAL .Fa flags -was both MS_ASYNC and MS_INVALIDATE. -Only one of these flags is allowed. +combined MS_ASYNC with another flag, which is not permitted. .It Bq Er EIO An I/O error occurred while writing to the file system. .El diff --git a/bsd/man/man2/munmap.2 b/bsd/man/man2/munmap.2 index ab097b704..7dd9cdbcf 100644 --- a/bsd/man/man2/munmap.2 +++ b/bsd/man/man2/munmap.2 @@ -70,6 +70,7 @@ parameter was not page aligned, the parameter was negative, or some part of the region being unmapped is not part of the currently valid address space. +.El .Sh "SEE ALSO" .Xr getpagesize 3 , .Xr msync 2 , diff --git a/bsd/man/man2/ptrace.2 b/bsd/man/man2/ptrace.2 index b3590325c..649458fec 100644 --- a/bsd/man/man2/ptrace.2 +++ b/bsd/man/man2/ptrace.2 @@ -370,6 +370,7 @@ on a process in violation of the requirements listed under .Dv PT_ATTACH above. .El +.El .Sh BUGS On the SPARC, the PC is set to the provided PC value for .Dv PT_CONTINUE diff --git a/bsd/man/man2/select.2 b/bsd/man/man2/select.2 index cc00e641d..39fd5d84b 100644 --- a/bsd/man/man2/select.2 +++ b/bsd/man/man2/select.2 @@ -40,6 +40,8 @@ .Nm select .Nd synchronous I/O multiplexing .Sh SYNOPSIS +.Fd #include +.D1 "- or -" .Fd #include .Fd #include .Fd #include diff --git a/bsd/man/man2/semctl.2 b/bsd/man/man2/semctl.2 new file mode 100644 index 000000000..2a7e8eb3b --- /dev/null +++ b/bsd/man/man2/semctl.2 @@ -0,0 +1,202 @@ +.\" +.\" Copyright (c) 1995 David Hovemeyer +.\" +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR +.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +.\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, +.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.\" +.\" $FreeBSD: src/lib/libc/sys/semctl.2,v 1.18 2002/12/19 09:40:25 ru Exp $ +.\" +.Dd September 12, 1995 +.Dt SEMCTL 2 +.Os +.Sh NAME +.Nm semctl +.Nd control operations on a semaphore set +.Sh LIBRARY +.Lb libc +.Sh SYNOPSIS +.In sys/types.h +.In sys/ipc.h +.In sys/sem.h +.Ft int +.Fn semctl "int semid" "int semnum" "int cmd" ... +.Sh DESCRIPTION +The +.Fn semctl +system call +performs the operation indicated by +.Fa cmd +on the semaphore set indicated by +.Fa semid . +A fourth argument, a +.Fa "union semun arg" , +is required for certain values of +.Fa cmd . +For the commands that use the +.Fa arg +argument, +.Fa "union semun" +is defined as follows: +.Bd -literal +.\" +.\" From : +.\" +union semun { + int val; /* value for SETVAL */ + struct semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */ + u_short *array; /* array for GETALL & SETALL */ +}; +.Ed +.Pp +Commands are performed as follows: +.\" +.\" This section based on Stevens, _Advanced Programming in the UNIX +.\" Environment_. +.\" +.Bl -tag -width IPC_RMIDXXX +.It Dv IPC_STAT +Fetch the semaphore set's +.Fa "struct semid_ds" , +storing it in the memory pointed to by +.Fa arg.buf . +.It Dv IPC_SET +Changes the +.Fa sem_perm.uid , +.Fa sem_perm.gid , +and +.Fa sem_perm.mode +members of the semaphore set's +.Fa "struct semid_ds" +to match those of the struct pointed to by +.Fa arg.buf . +The calling process's effective uid must +match either +.Fa sem_perm.uid +or +.Fa sem_perm.cuid , +or it must have superuser privileges. +.It IPC_RMID +Immediately removes the semaphore set from the system. The calling +process's effective uid must equal the semaphore set's +.Fa sem_perm.uid +or +.Fa sem_perm.cuid , +or the process must have superuser privileges. +.It Dv GETVAL +Return the value of semaphore number +.Fa semnum . +.It Dv SETVAL +Set the value of semaphore number +.Fa semnum +to +.Fa arg.val . +Outstanding adjust on exit values for this semaphore in any process +are cleared. +.It Dv GETPID +Return the pid of the last process to perform an operation on +semaphore number +.Fa semnum . +.It Dv GETNCNT +Return the number of processes waiting for semaphore number +.Fa semnum Ns 's +value to become greater than its current value. +.It Dv GETZCNT +Return the number of processes waiting for semaphore number +.Fa semnum Ns 's +value to become 0. +.It Dv GETALL +Fetch the value of all of the semaphores in the set into the +array pointed to by +.Fa arg.array . +.It Dv SETALL +Set the values of all of the semaphores in the set to the values +in the array pointed to by +.Fa arg.array . +Outstanding adjust on exit values for all semaphores in this set, +in any process are cleared. +.El +.Pp +The +.Vt "struct semid_ds" +is defined as follows: +.Bd -literal +.\" +.\" Taken straight from . +.\" +struct semid_ds { + struct ipc_perm sem_perm; /* operation permission struct */ + struct sem *sem_base; /* pointer to first semaphore in set */ + u_short sem_nsems; /* number of sems in set */ + time_t sem_otime; /* last operation time */ + long sem_pad1; /* SVABI/386 says I need this here */ + time_t sem_ctime; /* last change time */ + /* Times measured in secs since */ + /* 00:00:00 GMT, Jan. 1, 1970 */ + long sem_pad2; /* SVABI/386 says I need this here */ + long sem_pad3[4]; /* SVABI/386 says I need this here */ +}; +.Ed +.Sh RETURN VALUES +On success, when +.Fa cmd +is one of +.Dv GETVAL , GETPID , GETNCNT +or +.Dv GETZCNT , +.Fn semctl +returns the corresponding value; otherwise, 0 is returned. +On failure, -1 is returned, and +.Va errno +is set to indicate the error. +.Sh ERRORS +The +.Fn semctl +system call +will fail if: +.Bl -tag -width Er +.It Bq Er EINVAL +No semaphore set corresponds to +.Fa semid . +.It Bq Er EINVAL +The +.Fa semnum +argument +is not in the range of valid semaphores for given semaphore set. +.It Bq Er EPERM +The calling process's effective uid does not match the uid of +the semaphore set's owner or creator. +.It Bq Er EACCES +Permission denied due to mismatch between operation and mode of +semaphore set. +.It Bq Er ERANGE +.Dv SETVAL +or +.Dv SETALL +attempted to set a semaphore outside the allowable range +.Bq 0 .. Dv SEMVMX . +.El +.Sh SEE ALSO +.Xr semget 2 , +.Xr semop 2 +.Sh BUGS +.Dv SETALL +may update some semaphore elements before returning an error. diff --git a/bsd/man/man2/semget.2 b/bsd/man/man2/semget.2 new file mode 100644 index 000000000..47ef04913 --- /dev/null +++ b/bsd/man/man2/semget.2 @@ -0,0 +1,146 @@ +.\" +.\" Copyright (c) 1995 David Hovemeyer +.\" +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR +.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +.\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, +.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.\" +.\" $FreeBSD: src/lib/libc/sys/semget.2,v 1.14 2002/12/19 09:40:25 ru Exp $ +.\" +.Dd September 12, 1995 +.Dt SEMGET 2 +.Os +.Sh NAME +.Nm semget +.Nd obtain a semaphore id +.Sh LIBRARY +.Lb libc +.Sh SYNOPSIS +.In sys/types.h +.In sys/ipc.h +.In sys/sem.h +.Ft int +.Fn semget "key_t key" "int nsems" "int flag" +.Sh DESCRIPTION +Based on the values of +.Fa key +and +.Fa flag , +.Fn semget +returns the identifier of a newly created or previously existing +set of semaphores. +.\" +.\" This is copied verbatim from the shmget manpage. Perhaps +.\" it should go in a common manpage, such as .Xr ipc 2 +.\" +The key +is analogous to a filename: it provides a handle that names an +IPC object. There are three ways to specify a key: +.Bl -bullet +.It +IPC_PRIVATE may be specified, in which case a new IPC object +will be created. +.It +An integer constant may be specified. If no IPC object corresponding +to +.Fa key +is specified and the IPC_CREAT bit is set in +.Fa flag , +a new one will be created. +.It +The +.Xr ftok 3 +function +may be used to generate a key from a pathname. +.El +.\" +.\" Likewise for this section, except SHM_* becomes SEM_*. +.\" +.Pp +The mode of a newly created IPC object is determined by +.Em OR Ns 'ing +the following constants into the +.Fa flag +argument: +.Bl -tag -width XSEM_WXX6XXX +.It Dv SEM_R +Read access for user. +.It Dv SEM_A +Alter access for user. +.It Dv ( SEM_R>>3 ) +Read access for group. +.It Dv ( SEM_A>>3 ) +Alter access for group. +.It Dv ( SEM_R>>6 ) +Read access for other. +.It Dv ( SEM_A>>6 ) +Alter access for other. +.El +.Pp +If a new set of semaphores is being created, +.Fa nsems +is used to indicate the number of semaphores the set should contain. +Otherwise, +.Fa nsems +may be specified as 0. +.Sh RETURN VALUES +The +.Fn semget +system call +returns the id of a semaphore set if successful; otherwise, -1 +is returned and +.Va errno +is set to indicate the error. +.Sh ERRORS +The +.Fn semget +system call +will fail if: +.Bl -tag -width Er +.\" ipcperm could fail (we're opening to read and write, as it were) +.It Bq Er EACCES +Access permission failure. +.\" +.\" sysv_sem.c is quite explicit about these, so I'm pretty sure +.\" this is accurate +.\" +.It Bq Er EEXIST +IPC_CREAT and IPC_EXCL were specified, and a semaphore set +corresponding to +.Fa key +already exists. +.It Bq Er EINVAL +The number of semaphores requested exceeds the system imposed maximum +per set. +.It Bq Er ENOSPC +Insufficiently many semaphores are available. +.It Bq Er ENOSPC +The kernel could not allocate a +.Fa "struct semid_ds" . +.It Bq Er ENOENT +No semaphore set was found corresponding to +.Fa key , +and IPC_CREAT was not specified. +.El +.Sh SEE ALSO +.Xr semctl 2 , +.Xr semop 2 , +.Xr ftok 3 diff --git a/bsd/man/man2/semop.2 b/bsd/man/man2/semop.2 new file mode 100644 index 000000000..94896e750 --- /dev/null +++ b/bsd/man/man2/semop.2 @@ -0,0 +1,289 @@ +.\" +.\" Copyright (c) 1995 David Hovemeyer +.\" +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR +.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +.\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, +.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.\" +.\" $FreeBSD: src/lib/libc/sys/semop.2,v 1.18 2003/01/25 21:27:37 alfred Exp $ +.\" +.Dd September 22, 1995 +.Dt SEMOP 2 +.Os +.Sh NAME +.Nm semop +.Nd atomic array of operations on a semaphore set +.Sh LIBRARY +.Lb libc +.Sh SYNOPSIS +.In sys/types.h +.In sys/ipc.h +.In sys/sem.h +.Ft int +.Fn semop "int semid" "struct sembuf *array" "size_t nops" +.Sh DESCRIPTION +The +.Fn semop +system call +atomically performs the array of operations indicated by +.Fa array +on the semaphore set indicated by +.Fa semid . +The length of +.Fa array +is indicated by +.Fa nops . +Each operation is encoded in a +.Vt "struct sembuf" , +which is defined as follows: +.Bd -literal +.\" +.\" From +.\" +struct sembuf { + u_short sem_num; /* semaphore # */ + short sem_op; /* semaphore operation */ + short sem_flg; /* operation flags */ +}; +.Ed +.Pp +For each element in +.Fa array , +.Va sem_op +and +.Va sem_flg +determine an operation to be performed on semaphore number +.Va sem_num +in the set. +The values +.Dv SEM_UNDO +and +.Dv IPC_NOWAIT +may be +.Em OR Ns 'ed +into the +.Va sem_flg +member in order to modify the behavior of the given operation. +.Pp +The operation performed depends as follows on the value of +.Va sem_op : +.\" +.\" This section is based on the description of semop() in +.\" Stevens, _Advanced Programming in the UNIX Environment_, +.\" and the semop(2) description in The Open Group Unix2 specification. +.\" +.Bl -bullet +.It +When +.Va sem_op +is positive and the process has alter permission, +the semaphore's value is incremented by +.Va sem_op Ns 's +value. +If +.Dv SEM_UNDO +is specified, the semaphore's adjust on exit value is decremented by +.Va sem_op Ns 's +value. +A positive value for +.Va sem_op +generally corresponds to a process releasing a resource +associated with the semaphore. +.It +The behavior when +.Va sem_op +is negative and the process has alter permission, +depends on the current value of the semaphore: +.Bl -bullet +.It +If the current value of the semaphore is greater than or equal to +the absolute value of +.Va sem_op , +then the value is decremented by the absolute value of +.Va sem_op . +If +.Dv SEM_UNDO +is specified, the semaphore's adjust on exit +value is incremented by the absolute value of +.Va sem_op . +.It +If the current value of the semaphore is less than the absolute value of +.Va sem_op , +one of the following happens: +.\" XXX a *second* sublist? +.Bl -bullet +.It +If +.Dv IPC_NOWAIT +was specified, then +.Fn semop +returns immediately with a return value of +.Er EAGAIN . +.It +Otherwise, the calling process is put to sleep until one of the following +conditions is satisfied: +.\" XXX We already have two sublists, why not a third? +.Bl -bullet +.It +Some other process removes the semaphore with the +.Dv IPC_RMID +option of +.Xr semctl 2 . +In this case, +.Fn semop +returns immediately with a return value of +.Er EIDRM . +.It +The process receives a signal that is to be caught. +In this case, the process will resume execution as defined by +.Xr sigaction 2 . +.It +The semaphore's +value is greater than or equal to the absolute value of +.Va sem_op . +When this condition becomes true, the semaphore's value is decremented +by the absolute value of +.Va sem_op , +the semaphore's adjust on exit value is incremented by the +absolute value of +.Va sem_op . +.El +.El +.El +.Pp +A negative value for +.Va sem_op +generally means that a process is waiting for a resource to become +available. +.It +When +.Va sem_op +is zero and the process has read permission, +one of the following will occur: +.Bl -bullet +.It +If the current value of the semaphore is equal to zero +then +.Fn semop +can return immediately. +.It +If +.Dv IPC_NOWAIT +was specified, then +.Fn semop +returns immediately with a return value of +.Er EAGAIN . +.It +Otherwise, the calling process is put to sleep until one of the following +conditions is satisfied: +.\" XXX Another nested sublists +.Bl -bullet +.It +Some other process removes the semaphore with the +.Dv IPC_RMID +option of +.Xr semctl 2 . +In this case, +.Fn semop +returns immediately with a return value of +.Er EIDRM . +.It +The process receives a signal that is to be caught. +In this case, the process will resume execution as defined by +.Xr sigaction 2 . +.It +The semaphore's value becomes zero. +.El +.El +.El +.Pp +For each semaphore a process has in use, the kernel maintains an +.Dq "adjust on exit" +value, as alluded to earlier. +When a process +exits, either voluntarily or involuntarily, the adjust on exit value +for each semaphore is added to the semaphore's value. +This can +be used to insure that a resource is released if a process terminates +unexpectedly. +.Sh RETURN VALUES +.Rv -std semop +.Sh ERRORS +The +.Fn semop +system call will fail if: +.Bl -tag -width Er +.It Bq Er EINVAL +No semaphore set corresponds to +.Fa semid , +or the process would exceed the system-defined limit for the number of +per-process +.Dv SEM_UNDO +structures. +.It Bq Er EACCES +Permission denied due to mismatch between operation and mode of +semaphore set. +.It Bq Er EAGAIN +The semaphore's value would have resulted in the process being put to sleep +and +.Dv IPC_NOWAIT +was specified. +.It Bq Er E2BIG +Too many operations were specified. +.Bq Dv SEMOPM +.It Bq Er EFBIG +.\" +.\" I'd have thought this would be EINVAL, but the source says +.\" EFBIG. +.\" +.Va sem_num +was not in the range of valid semaphores for the set. +.It Bq Er EIDRM +The semaphore set was removed from the system. +.It Bq Er EINTR +The +.Fn semop +system call was interrupted by a signal. +.It Bq Er ENOSPC +The system +.Dv SEM_UNDO +pool +.Bq Dv SEMMNU +is full. +.It Bq Er ERANGE +The requested operation would cause either +the semaphore's current value +.Bq Dv SEMVMX +or its adjust on exit value +.Bq Dv SEMAEM +to exceed the system-imposed limits. +.El +.Sh SEE ALSO +.Xr semctl 2 , +.Xr semget 2 , +.Xr sigaction 2 +.Sh BUGS +The +.Fn semop +system call +may block waiting for memory even if +.Dv IPC_NOWAIT +was specified. diff --git a/bsd/man/man2/setpgid.2 b/bsd/man/man2/setpgid.2 index 6b2cdbd94..7767b23fc 100644 --- a/bsd/man/man2/setpgid.2 +++ b/bsd/man/man2/setpgid.2 @@ -69,7 +69,7 @@ indicates the reason. .Fn Setpgid will fail and the process group will not be altered if: .Bl -tag -width Er -.It Bq Er EACCESS +.It Bq Er EACCES The value of the .Fa pid argument matches the process ID of a child process of the calling process, diff --git a/bsd/man/man2/shmat.2 b/bsd/man/man2/shmat.2 index 7ed9d0449..be77bdce0 100644 --- a/bsd/man/man2/shmat.2 +++ b/bsd/man/man2/shmat.2 @@ -62,11 +62,9 @@ the system will round the address down to a multiple of SHMLBA bytes (SHMLBA is defined in .Aq Pa sys/shm.h ). - A shared memory segment can be mapped read-only by specifying the SHM_RDONLY flag in .Fa shmflg . - .Fn shmdt unmaps the shared memory segment that is currently mapped at .Fa shmaddr @@ -91,7 +89,7 @@ is set to indicate the error. .Fn shmat will fail if: .Bl -tag -width Er -.It Bq Er EACCESS +.It Bq Er EACCES The calling process has no permission to access this shared memory segment. .It Bq Er ENOMEM There is not enough available data space for the calling process to @@ -99,19 +97,18 @@ map the shared memory segment. .It Bq Er EINVAL .Fa shmid is not a valid shared memory identifier. - .Fa shmaddr specifies an illegal address. .It Bq Er EMFILE The number of shared memory segments has reached the system-wide limit. .El - .Fn shmdt will fail if: .Bl -tag -width Er .It Bq Er EINVAL .Fa shmaddr is not the start address of a mapped shared memory segment. +.El .Sh SEE ALSO .Xr shmctl 2 , .Xr shmget 2 , diff --git a/bsd/man/man2/shmctl.2 b/bsd/man/man2/shmctl.2 index 036db38b3..6efae862d 100644 --- a/bsd/man/man2/shmctl.2 +++ b/bsd/man/man2/shmctl.2 @@ -48,13 +48,11 @@ The system call performs some control operations on the shared memory area specified by .Fa shmid . - Each shared memory segment has a data structure associated with it, parts of which may be altered by .Fn shmctl and parts of which determine the actions of .Fn shmctl . - This structure is defined as follows in .Aq Pa sys/shm.h : .Bd -literal @@ -92,7 +90,6 @@ struct ipc_perm { key_t key; /* user specified msg/sem/shm key */ }; .Ed - The operation to be performed by .Fn shmctl is specified in @@ -120,7 +117,6 @@ has an effective user id equal to either or .Va shm_perm.uid in the data structure associated with the shared memory segment. - .It Dv IPC_RMID Remove the shared memory segment specified by .Fa shmid @@ -131,7 +127,6 @@ or .Va shm_perm.uid values in the data structure associated with the queue can do this. .El - The read and write permissions on a shared memory identifier are determined by the .Va shm_perm.mode @@ -164,18 +159,16 @@ the effective uid match either the or .Va shm_perm.cuid fields of the data structure associated with the shared memory segment. - An attempt is made to increase the value of .Va shm_qbytes through IPC_SET but the caller is not the super-user. -.It Bq Er EACCESS +.It Bq Er EACCES The command is IPC_STAT and the caller has no read permission for this shared memory segment. .It Bq Er EINVAL .Fa shmid is not a valid shared memory segment identifier. - .Va cmd is not a valid command. .It Bq Er EFAULT diff --git a/bsd/man/man2/sigaction.2 b/bsd/man/man2/sigaction.2 index 24435274a..42e779002 100644 --- a/bsd/man/man2/sigaction.2 +++ b/bsd/man/man2/sigaction.2 @@ -1,5 +1,3 @@ -.\" $NetBSD: sigaction.2,v 1.7 1995/10/12 15:41:16 jtc Exp $ -.\" .\" Copyright (c) 1980, 1990, 1993 .\" The Regents of the University of California. All rights reserved. .\" @@ -31,7 +29,8 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.\" @(#)sigaction.2 8.2 (Berkeley) 4/3/94 +.\" From: @(#)sigaction.2 8.2 (Berkeley) 4/3/94 +.\" $FreeBSD: src/lib/libc/sys/sigaction.2,v 1.48 2003/03/24 16:07:19 charnier Exp $ .\" .Dd April 3, 1994 .Dt SIGACTION 2 @@ -39,24 +38,36 @@ .Sh NAME .Nm sigaction .Nd software signal facilities +.Sh LIBRARY +.Lb libc .Sh SYNOPSIS -.Fd #include +.In signal.h .Bd -literal -struct sigaction { - void (*sa_handler)(); - sigset_t sa_mask; - int sa_flags; +struct sigaction { + union { + void (*__sa_handler)(int); + void (*__sa_sigaction)(int, struct __siginfo *, void *); + } __sigaction_u; /* signal handler */ + int sa_flags; /* see signal options below */ + sigset_t sa_mask; /* signal mask to apply */ }; + +#define sa_handler __sigaction_u.__sa_handler +#define sa_sigaction __sigaction_u.__sa_sigaction .Ed .Ft int -.Fn sigaction "int sig" "const struct sigaction *act" "struct sigaction *oact" +.Fo sigaction +.Fa "int sig" +.Fa "const struct sigaction * restrict act" +.Fa "struct sigaction * restrict oact" +.Fc .Sh DESCRIPTION The system defines a set of signals that may be delivered to a process. Signal delivery resembles the occurrence of a hardware interrupt: -the signal is blocked from further occurrence, the current process +the signal is normally blocked from further occurrence, the current process context is saved, and a new one is built. A process may specify a .Em handler -to which a signal is delivered, or specify that a signal is to be +to which a signal is delivered, or specify that a signal is to be .Em ignored . A process may also specify that a default action is to be taken by the system when a signal occurs. @@ -71,11 +82,11 @@ of the process. This may be changed, on a per-handler basis, so that signals are taken on a special .Em "signal stack" . .Pp -Signal routines execute with the signal that caused their +Signal routines normally execute with the signal that caused their invocation .Em blocked , but other signals may yet occur. -A global +A global .Em "signal mask" defines the set of signals currently blocked from delivery to a process. The signal mask for a process is initialized @@ -99,10 +110,10 @@ appearing to interrupt the handlers for the previous signals before their first instructions. The set of pending signals is returned by the .Xr sigpending 2 -function. +system call. When a caught signal is delivered, the current state of the process is saved, -a new signal mask is calculated (as described below), +a new signal mask is calculated (as described below), and the signal handler is invoked. The call to the handler is arranged so that if the signal handling routine returns normally the process will resume execution in the context @@ -113,31 +124,34 @@ must arrange to restore the previous context itself. When a signal is delivered to a process a new signal mask is installed for the duration of the process' signal handler (or until a -.Xr sigprocmask -call is made). +.Xr sigprocmask 2 +system call is made). This mask is formed by taking the union of the current signal mask set, -the signal to be delivered, and +the signal to be delivered, and the signal mask associated with the handler to be invoked. .Pp -.Fn Sigaction -assigns an action for a specific signal. +The +.Fn sigaction +system call +assigns an action for a signal specified by +.Fa sig . If .Fa act is non-zero, it specifies an action -.Pf ( Dv SIG_DFL , +.Dv ( SIG_DFL , .Dv SIG_IGN , or a handler routine) and mask to be used when delivering the specified signal. -If +If .Fa oact is non-zero, the previous handling information for the signal is returned to the user. .Pp -Once a signal handler is installed, it remains installed +Once a signal handler is installed, it normally remains installed until another .Fn sigaction -call is made, or an +system call is made, or an .Xr execve 2 is performed. A signal-specific default action may be reset by @@ -163,10 +177,11 @@ current and pending instances of the signal are ignored and discarded. .Pp Options may be specified by setting -.Em sa_flags . -If the -.Dv SA_NOCLDSTOP -bit is set when installing a catching function +.Va sa_flags . +The meaning of the various bits is as follows: +.Bl -tag -offset indent -width SA_RESETHANDXX +.It Dv SA_NOCLDSTOP +If this bit is set when installing a catching function for the .Dv SIGCHLD signal, @@ -174,40 +189,56 @@ the .Dv SIGCHLD signal will be generated only when a child process exits, not when a child process stops. -Further, if the -.Dv SA_ONSTACK -bit is set in -.Em sa_flags , -the system will deliver the signal to the process on a +.It Dv SA_NOCLDWAIT +If this bit is set when calling +.Fn sigaction +for the +.Dv SIGCHLD +signal, the system will not create zombie processes when children of +the calling process exit. If the calling process subsequently issues +a +.Xr wait 2 +(or equivalent), it blocks until all of the calling process's child +processes terminate, and then returns a value of -1 with errno set to +.Er ECHILD . +.It Dv SA_ONSTACK +If this bit is set, the system will deliver the signal to the process +on a .Em "signal stack" , specified with -.Xr sigstack 2 . -.Pp -Finally, the -.Dv SA_SIGINFO -option causes the 2nd argument for the signal handler to be a pointer -to a -.Em siginfo_t -as described in -.Pa . -The -.Em siginfo_t -is a part of -.St -p1003.1b . -and provides much more information about the causes and -attributes of the signal that is being delivered. +.Xr sigaltstack 2 . +.It Dv SA_NODEFER +If this bit is set, further occurrences of the delivered signal are +not masked during the execution of the handler. +.It Dv SA_RESETHAND +If this bit is set, the handler is reset back to +.Dv SIG_DFL +at the moment the signal is delivered. +.It Dv SA_RESTART +See paragraph below. +.It Dv SA_SIGINFO +If this bit is set, the handler function is assumed to be pointed to by the +.Dv sa_sigaction +member of struct sigaction and should match the prototype shown above or as +below in +.Sx EXAMPLES . +This bit should not be set when assigning +.Dv SIG_DFL +or +.Dv SIG_IGN . +.El .Pp If a signal is caught during the system calls listed below, the call may be forced to terminate with the error -.Dv EINTR , +.Er EINTR , the call may return with a data transfer shorter than requested, or the call may be restarted. Restart of pending calls is requested by setting the .Dv SA_RESTART bit in -.Ar sa_flags . +.Va sa_flags . The affected system calls include .Xr open 2 , .Xr read 2 , @@ -233,8 +264,9 @@ or all signals, the signal mask, the signal stack, and the restart/interrupt flags are inherited by the child. .Pp -.Xr Execve 2 -reinstates the default +The +.Xr execve 2 +system call reinstates the default action for all signals which were caught and resets all signals to be caught on the user stack. Ignored signals remain ignored; @@ -245,13 +277,13 @@ The following is a list of all signals with names as in the include file .Aq Pa signal.h : .Bl -column SIGVTALARMXX "create core imagexxx" -.It Sy " NAME " " Default Action " " Description" +.It Sy "NAME Default Action Description" .It Dv SIGHUP No " terminate process" " terminal line hangup" .It Dv SIGINT No " terminate process" " interrupt program" .It Dv SIGQUIT No " create core image" " quit program" .It Dv SIGILL No " create core image" " illegal instruction" .It Dv SIGTRAP No " create core image" " trace trap" -.It Dv SIGABRT No " create core image" Xr abort 2 +.It Dv SIGABRT No " create core image" Ta Xr abort 3 call (formerly .Dv SIGIOT ) .It Dv SIGEMT No " create core image" " emulate instruction executed" @@ -259,7 +291,7 @@ call (formerly .It Dv SIGKILL No " terminate process" " kill program" .It Dv SIGBUS No " create core image" " bus error" .It Dv SIGSEGV No " create core image" " segmentation violation" -.It Dv SIGSYS No " create core image" " system call given invalid argument" +.It Dv SIGSYS No " create core image" " non-existent system call invoked" .It Dv SIGPIPE No " terminate process" " write on a pipe with no reader" .It Dv SIGALRM No " terminate process" " real-time timer expired" .It Dv SIGTERM No " terminate process" " software termination signal" @@ -287,62 +319,232 @@ is possible on a descriptor (see .It Dv SIGUSR2 No " terminate process" " User defined signal 2" .El .Sh NOTE -The mask specified in +The +.Fa sa_mask +field specified in .Fa act is not allowed to block .Dv SIGKILL or .Dv SIGSTOP . -This is done silently by the system. -.Sh RETURN VALUES -A 0 value indicated that the call succeeded. A \-1 return value -indicates an error occurred and +Any attempt to do so will be silently ignored. +.Pp +The following functions are either reentrant or not interruptible +by signals and are async-signal safe. +Therefore applications may +invoke them, without restriction, from signal-catching functions: +.Pp +Base Interfaces: +.Pp +.Fn _exit , +.Fn access , +.Fn alarm , +.Fn cfgetispeed , +.Fn cfgetospeed , +.Fn cfsetispeed , +.Fn cfsetospeed , +.Fn chdir , +.Fn chmod , +.Fn chown , +.Fn close , +.Fn creat , +.Fn dup , +.Fn dup2 , +.Fn execle , +.Fn execve , +.Fn fcntl , +.Fn fork , +.Fn fpathconf , +.Fn fstat , +.Fn fsync , +.Fn getegid , +.Fn geteuid , +.Fn getgid , +.Fn getgroups , +.Fn getpgrp , +.Fn getpid , +.Fn getppid , +.Fn getuid , +.Fn kill , +.Fn link , +.Fn lseek , +.Fn mkdir , +.Fn mkfifo , +.Fn open , +.Fn pathconf , +.Fn pause , +.Fn pipe , +.Fn raise , +.Fn read , +.Fn rename , +.Fn rmdir , +.Fn setgid , +.Fn setpgid , +.Fn setsid , +.Fn setuid , +.Fn sigaction , +.Fn sigaddset , +.Fn sigdelset , +.Fn sigemptyset , +.Fn sigfillset , +.Fn sigismember , +.Fn signal , +.Fn sigpending , +.Fn sigprocmask , +.Fn sigsuspend , +.Fn sleep , +.Fn stat , +.Fn sysconf , +.Fn tcdrain , +.Fn tcflow , +.Fn tcflush , +.Fn tcgetattr , +.Fn tcgetpgrp , +.Fn tcsendbreak , +.Fn tcsetattr , +.Fn tcsetpgrp , +.Fn time , +.Fn times , +.Fn umask , +.Fn uname , +.Fn unlink , +.Fn utime , +.Fn wait , +.Fn waitpid , +.Fn write . +.Pp +Realtime Interfaces: +.Pp +.Fn aio_error , +.Fn clock_gettime , +.Fn sigpause , +.Fn timer_getoverrun , +.Fn aio_return , +.Fn fdatasync , +.Fn sigqueue , +.Fn timer_gettime , +.Fn aio_suspend , +.Fn sem_post , +.Fn sigset , +.Fn timer_settime . +.Pp +ANSI C Interfaces: +.Pp +.Fn strcpy , +.Fn strcat , +.Fn strncpy , +.Fn strncat , +and perhaps some others. +.Pp +Extension Interfaces: +.Pp +.Fn strlcpy , +.Fn strlcat . +.Pp +All functions not in the above lists are considered to be unsafe +with respect to signals. That is to say, the behaviour of such +functions when called from a signal handler is undefined. +In general though, signal handlers should do little more than set a +flag; most other actions are not safe. +.Pp +Also, it is good practice to make a copy of the global variable .Va errno -is set to indicated the reason. -.Sh EXAMPLE -The handler routine can be declared: -.Bd -literal -offset indent -void handler(sig, sip, scp) -int sig; -siginfo_t *sip; -struct sigcontext *scp; -.Ed +and restore it before returning from the signal handler. +This protects against the side effect of +.Va errno +being set by functions called from inside the signal handler. +.Sh RETURN VALUES +.Rv -std sigaction +.Sh EXAMPLES +There are three possible prototypes the handler may match: +.Bl -tag -offset indent -width short +.It ANSI C: +.Ft void +.Fn handler int ; +.It POSIX SA_SIGINFO: +.Ft void +.Fn handler int "siginfo_t *info" "ucontext_t *uap" ; +.El +.Pp +The handler function should match the SA_SIGINFO prototype if the +SA_SIGINFO bit is set in flags. +It then should be pointed to by the +.Dv sa_sigaction +member of +.Dv struct sigaction . +Note that you should not assign SIG_DFL or SIG_IGN this way. +.Pp +If the SA_SIGINFO flag is not set, the handler function should match +either the ANSI C or traditional +.Bx +prototype and be pointed to by +the +.Dv sa_handler +member of +.Dv struct sigaction . +In practice, +.Fx +always sends the three arguments of the latter and since the ANSI C +prototype is a subset, both will work. +The +.Dv sa_handler +member declaration in +.Fx +include files is that of ANSI C (as required by POSIX), +so a function pointer of a +.Bx Ns -style +function needs to be casted to +compile without warning. +The traditional +.Bx +style is not portable and since its capabilities +are a full subset of a SA_SIGINFO handler, +its use is deprecated. .Pp -Here +The .Fa sig -is the signal number, into which the hardware faults and traps are -mapped. -If the -.Dv SA_SIGINFO -option is set, -.Fa sip -is a pointer to a -.Dv siginfo_t -as described in -.Pa . -If -.Dv SA_SIGINFO -is not set, this is NULL. -.Fa Scp -is a pointer to the -.Fa sigcontext -structure (defined in -.Aq Pa signal.h ) , -used to restore the context from before the signal. +argument is the signal number, one of the +.Dv SIG... +values from . +.Pp +The +.Fa code +argument of the +.Bx Ns -style +handler and the +.Dv si_code +member of the +.Dv info +argument to a SA_SIGINFO handler contain a numeric code explaining the +cause of the signal, usually one of the +.Dv SI_... +values from + or codes specific to a signal, i.e. one of the +.Dv FPE_... +values for SIGFPE. +.Pp +The +.Fa uap +argument to a POSIX SA_SIGINFO handler points to an instance of +ucontext_t. .Sh ERRORS -.Fn Sigaction +The +.Fn sigaction +system call will fail and no new signal handler will be installed if one of the following occurs: .Bl -tag -width Er .It Bq Er EFAULT Either .Fa act -or +or .Fa oact points to memory that is not a valid part of the process address space. .It Bq Er EINVAL -.Fa Sig +The +.Fa sig +argument is not a valid signal number. .It Bq Er EINVAL An attempt is made to ignore or supply a handler for @@ -352,9 +554,9 @@ or .El .Sh STANDARDS The -.Nm sigaction -function is defined by -.St -p1003.1-88 . +.Fn sigaction +system call is expected to conform to +.St -p1003.1-90 . The .Dv SA_ONSTACK and @@ -375,21 +577,36 @@ as are the signals, and .Dv SIGINFO . Those signals are available on most -.Tn BSD Ns \-derived +.Bx Ns \-derived systems. +The +.Dv SA_NODEFER +and +.Dv SA_RESETHAND +flags are intended for backwards compatibility with other operating +systems. The +.Dv SA_NOCLDSTOP , +and +.Dv SA_NOCLDWAIT +.\" and +.\" SA_SIGINFO +flags are featuring options commonly found in other operating systems. .Sh SEE ALSO .Xr kill 1 , -.Xr ptrace 2 , .Xr kill 2 , -.Xr sigaction 2 , -.Xr sigprocmask 2 , -.Xr sigsuspend 2 , +.Xr ptrace 2 , +.Xr sigaltstack 2 , .Xr sigblock 2 , -.Xr sigsetmask 2 , .Xr sigpause 2 , -.Xr sigstack 2 , -.Xr sigvec 3 , +.Xr sigpending 2 , +.Xr sigprocmask 2 , +.Xr sigsetmask 2 , +.Xr sigsuspend 2 , +.Xr sigvec 2 , +.Xr wait 2 , +.Xr fpsetmask 3 , .Xr setjmp 3 , .Xr siginterrupt 3 , .Xr sigsetops 3 , +.Xr ucontext 3 , .Xr tty 4 diff --git a/bsd/man/man2/socket.2 b/bsd/man/man2/socket.2 index 364ab4a3c..3083d3983 100644 --- a/bsd/man/man2/socket.2 +++ b/bsd/man/man2/socket.2 @@ -221,7 +221,7 @@ within this domain. The per-process descriptor table is full. .It Bq Er ENFILE The system file table is full. -.It Bq Er EACCESS +.It Bq Er EACCES Permission to create a socket of the specified type and/or protocol is denied. .It Bq Er ENOBUFS diff --git a/bsd/man/man2/statfs.2 b/bsd/man/man2/statfs.2 index f29355d91..1a534dd9b 100644 --- a/bsd/man/man2/statfs.2 +++ b/bsd/man/man2/statfs.2 @@ -56,12 +56,12 @@ is a pointer to a statfs structure defined as follows: .Bd -literal typedef struct { int32_t val[2]; } fsid_t; -#define MFSNAMELEN 16 /* length of fs type name, including nul */ -#define MNAMELEN 32 /* length of buffer for returned name */ +#define MFSNAMELEN 15 /* length of fs type name, not inc. nul */ +#define MNAMELEN 90 /* length of buffer for returned name */ struct statfs { - short f_type; /* type of file system (unused; zero) */ - short f_flags; /* copy of mount flags */ + short f_otype; /* type of file system (reserved: zero) */ + short f_oflags; /* copy of mount flags (reserved: zero) */ long f_bsize; /* fundamental file system block size */ long f_iosize; /* optimal transfer block size */ long f_blocks; /* total data blocks in file system */ @@ -71,10 +71,15 @@ struct statfs { long f_ffree; /* free file nodes in fs */ fsid_t f_fsid; /* file system id (super-user only) */ uid_t f_owner; /* user that mounted the file system */ - long f_spare[4]; /* spare for later */ + short f_reserved1; /* reserved for future use */ + short f_type; /* type of file system (reserved) */ + long f_flags; /* copy of mount flags (reserved) */ + long f_reserved2[2]; /* reserved for future use */ char f_fstypename[MFSNAMELEN]; /* fs type name */ char f_mntonname[MNAMELEN]; /* directory on which mounted */ char f_mntfromname[MNAMELEN]; /* mounted file system */ + char f_reserved3; /* reserved for future use */ + long f_reserved4[4]; /* reserved for future use */ }; .Ed .Pp diff --git a/bsd/man/man2/wait.2 b/bsd/man/man2/wait.2 index d72a93dce..054a0d0ac 100644 --- a/bsd/man/man2/wait.2 +++ b/bsd/man/man2/wait.2 @@ -158,7 +158,7 @@ One of the first three macros will evaluate to a non-zero (true) value: True if the process terminated normally by a call to .Xr _exit 2 or -.Xr exit 2 . +.Xr exit 3 . .It Fn WIFSIGNALED status True if the process terminated due to receipt of a signal. .It Fn WIFSTOPPED status @@ -180,7 +180,7 @@ is true, evaluates to the low-order 8 bits of the argument passed to .Xr _exit 2 or -.Xr exit 2 +.Xr exit 3 by the child. .It Fn WTERMSIG status If @@ -293,7 +293,7 @@ and the ability to restart a pending .Fn wait call are extensions to the POSIX interface. .Sh SEE ALSO -.Xr exit 2 , +.Xr exit 3 , .Xr sigaction 2 .Sh HISTORY A diff --git a/bsd/man/man4/Makefile b/bsd/man/man4/Makefile index 855721d5e..031eb7e86 100644 --- a/bsd/man/man4/Makefile +++ b/bsd/man/man4/Makefile @@ -29,7 +29,6 @@ DATAFILES = \ pty.4 \ random.4 \ route.4 \ - scsi.4 \ stderr.4 \ stdin.4 \ stdout.4 \ diff --git a/bsd/man/man4/icmp.4 b/bsd/man/man4/icmp.4 index 054b77018..65015098d 100644 --- a/bsd/man/man4/icmp.4 +++ b/bsd/man/man4/icmp.4 @@ -94,7 +94,7 @@ sockets can be opened with the .Dv SOCK_DGRAM socket type without requiring root privileges. The synopsis is the following: .Pp -.Fn socket AF_INET SOCK_DGRAM IPPROTO_IP +.Fn socket AF_INET SOCK_DGRAM IPPROTO_ICMP .Pp This can be used by non root privileged processes to send .Tn ICMP diff --git a/bsd/man/man4/scsi.4 b/bsd/man/man4/scsi.4 deleted file mode 100644 index 85c6cb914..000000000 --- a/bsd/man/man4/scsi.4 +++ /dev/null @@ -1,156 +0,0 @@ -.\" $OpenBSD: scsi.4,v 1.1 1996/08/04 20:28:20 tholo Exp $ -.\" -.Dd August 4, 1996 -.Dt SD 4 -.Os OpenBSD -.Sh NAME -.Nm scsi -.Nd scsi system -.Sh SYNOPSIS -.Nm scsibus* at aha? -.Nm scsibus* at ncr? -.Nm device cd* at scsibus? target ? lun ? -.Nm device ch* at scsibus? target ? lun ? -.Nm device sd* at scsibus? target ? lun ? -.Nm device st* at scsibus? target ? lun ? -.Nm device ss* at scsibus? target ? lun ? -.Nm device su* at scsibus? target ? lun ? -.Nm device uk* at scsibus? target ? lun ? -.Sh DESCRIPTION -The -.Em scsi -system provides a uniform and modular system for the implementation -of drivers to control various scsi devices, and to utilize different -scsi host adapters through host adapter drivers. When the system probes the -.Em SCSI -busses, it attaches any devices it finds to the appropriate -drivers. If no driver seems appropriate, then it attaches the device to the -uk (unknown) driver so that user level scsi ioctls may -still be performed against the device. -.Sh KERNEL CONFIGURATION -The option SCSIDEBUG enables the debug ioctl. -.Pp -All devices and the SCSI busses support boot time allocation so that -an upper number of devices and controllers does not need to be configured; -.Em "device sd* at scsibus? target ? lun ?" -will suffice for any number of disk drivers. -.Pp -The devices are either -.Em wired -so they appear as a particular device unit or -.Em counted -so that they appear as the next available unused unit. -.Pp -To configure a driver in the kernel without wiring down the device use a -config line similar to -.Em "device ch* at scsibus? target ? lun ?" -to include the changer driver. -.Pp -To wire down a unit use a config line similar to -.Em "device ch1 at scsibus0 target 4 lun 0" -to assign changer 1 as the changer with SCSI ID 4, -SCSI logical unit 0 on SCSI bus 0. -Individual scsibuses can be wired down to specific controllers with -a config line similar to -.Em "scsibus0 at ahc0" -which assigns scsi bus 0 to the first unit using the ahc driver. -For controllers supporting more than one bus, -the particular bus can be specified as in -.Em "scsibus3 at ahc1 bus 1" -which assigns scsibus 1 to the second bus probed on the ahc1 device. -.Pp -When you have a mixture of wired down and counted devices then the -counting begins with the first non-wired down unit for a particular -type. That is, if you have a disk wired down as -.Em "disk sd1 at scsibus? target ? lun ?" , -then the first non-wired disk shall come on line as -.Em sd2 . -.Sh IOCTLS -There are a number of ioctls that work on any -.Em SCSI -device. They are defined in -.Em sys/scsiio.h -and can be applied against any scsi device that permits them. -For the tape, it must be applied against the control -device. See the manual page for each device type for more information about -how generic scsi ioctls may be applied to a specific device. -.Bl -tag -width DIOCSDINFO____ -.It Dv SCIOCRESET* -reset a device. -.It Dv SCIOCDEBUG -Turn on debugging.. All scsi operations originating from this device's driver -will be traced to the console, along with other information. Debugging is -controlled by four bits, described in the header file. If no debugging is -configured into the kernel, debugging will have no effect. -.Em SCSI -debugging is controlled by the configuration option -.Em SCSIDEBUG. -.It Dv SCIOCCOMMAND -Take a scsi command and data from a user process and apply them to the scsi -device. Return all status information and return data to the process. The -ioctl will return a successful status even if the device rejected the -command. As all status is returned to the user, it is up to the user -process to examine this information to decide the success of the command. -.It Dv SCIOCREPROBE -Ask the system to probe the scsi busses for any new devices. If it finds -any, they will be attached to the appropriate drivers. The search can be -narrowed to a specific bus, target or lun. The new device may or may not -be related to the device on which the ioctl was performed. -.It Dv SCIOCIDENTIFY -Ask the driver what it's bus, target and lun are. -.It Dv SCIOCDECONFIG -Ask the device to disappear. This may not happen if the device is in use. -.El -.Sh NOTES -the generic scsi part of the system is still being mapped out. -Watch this space for changes. -.Pp - A device by the name of su (scsi_user) -(e.g su0-0-0) will map bus, target and lun to minor numbers. It has not -yet decided yet whether this device will be able to open a device that is -already controlled by an explicit driver. -.Sh ADAPTERS -The system allows common device drivers to work through many different -types of adapters. The adapters take requests from the upper layers and do -all IO between the -.Em SCSI -bus and the system. The maximum size of a transfer is governed by the -adapter. Most adapters can transfer 64KB in a single operation, however -many can transfer larger amounts. -.Sh TARGET MODE -Some adapters support -.Em target mode -in which the system is capable of operating as a device, responding to -operations initiated by another system. Target mode will be supported for -some adapters, but is not yet complete for this version of the scsi system. -.Sh DIAGNOSTICS -When the kernel is compiled with option SCSIDEBUG, the SCIOCDEBUG ioctl -can be used to enable various amounts of tracing information on any -specific device. Devices not being traced will not produce trace information. -The four bits that make up the debug level, each control certain types -of debugging information. -.Bl -tag -width "Bit 0" -.It Dv Bit 0 -shows all scsi bus operations including scsi commands, -error information and the first 48 bytes of any data transferred. -.It Dv Bit 1 -shows routines called. -.It Dv Bit 2 -shows information about what branches are taken and often some -of the return values of functions. -.It Dv Bit 3 -shows more detailed information including DMA scatter-gather logs. -.El -.Sh SEE ALSO -.Xr ch 4 , -.Xr cd 4 , -.Xr sd 4 , -.Xr ss 4 , -.Xr st 4 , -.Xr su 4 -and -.Xr uk 4 -.Sh HISTORY -This -.Nm -system appeared in MACH 2.5 at TRW. diff --git a/bsd/man/man5/core.5 b/bsd/man/man5/core.5 index 097ce4f34..6e6ea7a2d 100644 --- a/bsd/man/man5/core.5 +++ b/bsd/man/man5/core.5 @@ -41,17 +41,25 @@ Files which would be larger than the limit are not created. The core file consists of the .Pa Xr Mach-O 5 header as described in the -.Aq Pa sys/loader.h +.Aq Pa mach-o/loader.h file. The remainder of the core file consists of various sections described in the .Xr Mach-O 5 header. +.Sh NOTE +Core dumps are disabled by default under Darwin/Mac OS X. To re-enable core dumps, a +privlaged user must edit +.Pa /etc/hostconfig +to contain the line: +.Bd -literal +COREDUMPS=-YES- +.Ed .Sh SEE ALSO .Xr gdb 1 , .Xr setrlimit 2 , .Xr sigaction 2 , -.Xr Mach-O 5, +.Xr Mach-O 5 , .Xr sysctl 8 .Sh HISTORY A diff --git a/bsd/man/man5/dir.5 b/bsd/man/man5/dir.5 index d852d0c86..b33c223c0 100644 --- a/bsd/man/man5/dir.5 +++ b/bsd/man/man5/dir.5 @@ -82,64 +82,120 @@ and .Xr mount 8 . ) .Pp The directory entry format is defined in the file +.Aq sys/dirent.h +and further in the file .Aq dirent.h : .Bd -literal -#ifndef _DIRENT_H_ -#define _DIRENT_H_ - +/*** Excerpt from ***/ /* -* A directory entry has a struct dirent at the front of it, containing its -* inode number, the length of the entry, and the length of the name -* contained in the entry. These are followed by the name padded to a 4 -* byte boundary with null bytes. All names are guaranteed null terminated. -* The maximum length of a name in a directory is MAXNAMLEN. -*/ + * The dirent structure defines the format of directory entries returned by + * the getdirentries(2) system call. + * + * A directory entry has a struct dirent at the front of it, containing its + * inode number, the length of the entry, and the length of the name + * contained in the entry. These are followed by the name padded to a 4 + * byte boundary with null bytes. All names are guaranteed null terminated. + * The maximum length of a name in a directory is MAXNAMLEN. + * The dirent structure defines the format of directory entries returned by + * the getdirentries(2) system call. + */ + +#ifndef _SYS_DIRENT_H +#define _SYS_DIRENT_H struct dirent { - u_long d_fileno; /* file number of entry */ - u_short d_reclen; /* length of this record */ - u_short d_namlen; /* length of string in d_name */ + u_int32_t d_fileno; /* file number of entry */ + u_int16_t d_reclen; /* length of this record */ + u_int8_t d_type; /* file type, see below */ + u_int8_t d_namlen; /* length of string in d_name */ #ifdef _POSIX_SOURCE - char d_name[MAXNAMLEN + 1]; /* maximum name length */ + char d_name[255 + 1]; /* name must be no longer than this */ #else #define MAXNAMLEN 255 - char d_name[MAXNAMLEN + 1]; /* maximum name length */ + char d_name[MAXNAMLEN + 1]; /* name must be no longer than this */ #endif - }; +/* + * File types + */ +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 + +#endif /* !_SYS_DIRENT_H_ */ + +.Ed +----------------------------------------- +.Bd -literal +/*** Excerpt from ***/ + +#ifndef _DIRENT_H +#define _DIRENT_H + #ifdef _POSIX_SOURCE -typedef void * DIR; +typedef void * DIR; #else -#define d_ino d_fileno /* backward compatibility */ +#define d_ino d_fileno /* backward compatibility */ /* definitions for library routines operating on directories. */ -#define DIRBLKSIZ 1024 +#define DIRBLKSIZ 1024 + +struct _telldir; /* see telldir.h */ /* structure describing an open directory. */ typedef struct _dirdesc { - int dd_fd; /* file descriptor associated with directory */ - long dd_loc; /* offset in current buffer */ - long dd_size; /* amount of data returned by getdirentries */ - char *dd_buf; /* data buffer */ - int dd_len; /* size of data buffer */ - long dd_seek; /* magic cookie returned by getdirentries */ + int dd_fd; /* file descriptor associated with directory */ + long dd_loc; /* offset in current buffer */ + long dd_size; /* amount of data returned by getdirentries */ + char *dd_buf; /* data buffer */ + int dd_len; /* size of data buffer */ + long dd_seek; /* magic cookie returned by getdirentries */ + long dd_rewind; /* magic cookie for rewinding */ + int dd_flags; /* flags for readdir */ + pthread_mutex_t dd_lock; /* for thread locking */ + struct _telldir *dd_td; /* telldir position recording */ } DIR; -#define dirfd(dirp) ((dirp)->dd_fd) +#define dirfd(dirp) ((dirp)->dd_fd) -#ifndef NULL -#define NULL 0 -#endif +/* flags for opendir2 */ +#define DTF_HIDEW 0x0001 /* hide whiteout entries */ +#define DTF_NODUP 0x0002 /* don't return duplicate names */ +/* structure describing an open directory. */ +typedef struct _dirdesc { + int dd_fd; /* file descriptor associated with directory */ + long dd_loc; /* offset in current buffer */ + long dd_size; /* amount of data returned by getdirentries */ + char *dd_buf; /* data buffer */ + int dd_len; /* size of data buffer */ + long dd_seek; /* magic cookie returned by getdirentries */ + long dd_rewind; /* magic cookie for rewinding */ + int dd_flags; /* flags for readdir */ + pthread_mutex_t dd_lock; /* for thread locking */ + struct _telldir *dd_td; /* telldir position recording */ +} DIR; -#endif /* _POSIX_SOURCE */ +#define dirfd(dirp) ((dirp)->dd_fd) -#ifndef _KERNEL +/* flags for opendir2 */ +#define DTF_HIDEW 0x0001 /* hide whiteout entries */ +#define DTF_NODUP 0x0002 /* don't return duplicate names */ +#define DTF_REWIND 0x0004 /* rewind after reading union stack */ +#define __DTF_READALL 0x0008 /* everything has been read */ -#include +#ifndef NULL +#define NULL 0 +#endif -#endif /* !_KERNEL */ +#endif /* _POSIX_SOURCE */ #endif /* !_DIRENT_H_ */ .Ed diff --git a/bsd/man/man9/Makefile b/bsd/man/man9/Makefile index 1e5e8000b..407b7d235 100644 --- a/bsd/man/man9/Makefile +++ b/bsd/man/man9/Makefile @@ -11,6 +11,7 @@ DATAFILES = \ fetch.9 \ store.9 \ style.9 \ + intro.9 INSTALL_MAN_LIST = ${DATAFILES} diff --git a/bsd/man/man9/intro.9 b/bsd/man/man9/intro.9 new file mode 100644 index 000000000..a6a501d29 --- /dev/null +++ b/bsd/man/man9/intro.9 @@ -0,0 +1,109 @@ +.\" Copyright (c) 1983, 1991, 1993 +.\" The Regents of the University of California. All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" 3. All advertising materials mentioning features or use of this software +.\" must display the following acknowledgement: +.\" This product includes software developed by the University of +.\" California, Berkeley and its contributors. +.\" 4. Neither the name of the University nor the names of its contributors +.\" may be used to endorse or promote products derived from this software +.\" without specific prior written permission. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $FreeBSD: src/share/man/man9/intro.9,v 1.15 2001/07/14 19:41:16 schweikh Exp $ +.\" +.Dd December 13, 1995 +.Dt INTRO 9 +.Os +.Sh NAME +.Nm intro +.Nd "introduction to system kernel interfaces" +.Sh DESCRIPTION +This section contains information about the interfaces and +subroutines in the kernel. +.Sh PROTOTYPES ANSI-C AND ALL THAT +Yes please. +.Pp +We would like all code to be fully prototyped. +.Pp +If your code compiles cleanly with +.Nm cc +.Ar -Wall +we would feel happy about it. +It is important to understand that this isn't a question of just shutting up +.Nm cc , +it is a question about avoiding the things it complains about. +To put it bluntly, don't hide the problem by casting and other +obfuscating practices, solve the problem. +.Sh INDENTATION AND STYLE +Believe it or not, there actually exists a guide for indentation and style. +It isn't generally applied though. +.Pp +We would appreciate if people would pay attention to it, and at least not +violate it blatantly. +.Pp +We don't mind it too badly if you have your own style, but please make +sure we can read it too. +.Pp +Please take time to read +.Xr style 9 +for more information. +.Sh NAMING THINGS +Some general rules exist: +.Bl -enum +.It +If a function is meant as a debugging aid in DDB, it should be enclosed +in +.Bd -literal -offset indent +#ifdef DDB + +#endif /* DDB */ +.Ed +.Pp +And the name of the procedure should start with the prefix +.Li DDB_ +to clearly identify the procedure as a debugger routine. +.El +.Sh SCOPE OF SYMBOLS +It is important to carefully consider the scope of symbols in the kernel. +The default is to make everything static, unless some reason requires +the opposite. +.Pp +There are several reasons for this policy, +the main one is that the kernel is one monolithic name-space, +and pollution is not a good idea here either. +.Pp +For device drivers and other modules that don't add new internal interfaces +to the kernel, the entire source should be in one file if possible. +That way all symbols can be made static. +.Pp +If for some reason a module is split over multiple source files, then try +to split the module along some major fault-line and consider using the +number of global symbols as your guide. +The fewer the better. +.Sh SEE ALSO +.Xr style 9 +.Sh HISTORY +The +.Nm +section manual page appeared in +.Fx 2.2 . diff --git a/bsd/miscfs/devfs/devfs_tree.c b/bsd/miscfs/devfs/devfs_tree.c index b380cb3aa..082fa947e 100644 --- a/bsd/miscfs/devfs/devfs_tree.c +++ b/bsd/miscfs/devfs/devfs_tree.c @@ -99,7 +99,7 @@ struct devfs_stats devfs_stats; /* hold stats */ #ifdef HIDDEN_MOUNTPOINT static struct mount *devfs_hidden_mount; -#endif HIDDEN_MOINTPOINT +#endif /* HIDDEN_MOINTPOINT */ static int devfs_ready = 0; @@ -137,7 +137,7 @@ devfs_sinit(void) devfs_mount(devfs_hidden_mount,"dummy",NULL,NULL,NULL); dev_root->de_dnp->dn_dvm = (struct devfsmount *)devfs_hidden_mount->mnt_data; -#endif HIDDEN_MOUNTPOINT +#endif /* HIDDEN_MOUNTPOINT */ devfs_ready = 1; return (0); } @@ -287,7 +287,7 @@ dev_finddir(char * orig_path, /* find this dir (err if not dir) */ return 0; } } -#endif 0 +#endif /***********************************************************************\ * Given a starting node (0 for root) and a pathname, return the node * * for the end item on the path. It MUST BE A DIRECTORY. If the 'CREATE' * @@ -338,6 +338,7 @@ dev_finddir(char * path, scan++; strncpy(component, start, scan - start); + component[ scan - start ] = '\0'; if (*scan == '/') scan++; @@ -670,14 +671,14 @@ devfs_dn_free(devnode_t * dnp) if (dnp->dn_vn == NULL) { #if 0 printf("devfs_dn_free: free'ing %x\n", (unsigned int)dnp); -#endif 0 +#endif devnode_free(dnp); /* no accesses/references */ } else { #if 0 printf("devfs_dn_free: marking %x for deletion\n", (unsigned int)dnp); -#endif 0 +#endif dnp->dn_delete = TRUE; } } diff --git a/bsd/miscfs/devfs/devfs_vfsops.c b/bsd/miscfs/devfs/devfs_vfsops.c index 84cc758ae..0d73574f0 100644 --- a/bsd/miscfs/devfs/devfs_vfsops.c +++ b/bsd/miscfs/devfs/devfs_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -90,7 +90,6 @@ devfs_init(struct vfsconf *vfsp) if (devfs_sinit()) return (EOPNOTSUPP); - printf("devfs enabled\n"); devfs_make_node(makedev(0, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0622, "console"); devfs_make_node(makedev(2, 0), DEVFS_CHAR, @@ -379,7 +378,8 @@ devfs_kernel_mount(char * mntname) /* * Allocate and initialize the filesystem. */ - mp = _MALLOC_ZONE((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); + MALLOC_ZONE(mp, struct mount *, (u_long)sizeof(struct mount), + M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); /* Initialize the default IO constraints */ @@ -406,12 +406,15 @@ devfs_kernel_mount(char * mntname) if (error) { printf("devfs_kernel_mount: mount %s failed: %d", mntname, error); mp->mnt_vfc->vfc_refcount--; + + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + FREE(mp->mnt_xinfo_ptr, M_TEMP); vfs_unbusy(mp, procp); - _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + + FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); vput(vp); return (error); } - printf("devfs on %s\n", mntname); simple_lock(&mountlist_slock); CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); simple_unlock(&mountlist_slock); diff --git a/bsd/miscfs/devfs/devfs_vnops.c b/bsd/miscfs/devfs/devfs_vnops.c index 5db06b9ce..a76b043aa 100644 --- a/bsd/miscfs/devfs/devfs_vnops.c +++ b/bsd/miscfs/devfs/devfs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -1153,7 +1153,7 @@ devfs_symlink(struct vop_symlink_args *ap) char *a_target; } */ { - struct componentname * cnp = ap->a_cnp; + struct componentname * cnp = ap->a_cnp; struct vnode *vp = NULL; int error = 0; devnode_t * dir_p; @@ -1186,9 +1186,13 @@ devfs_symlink(struct vop_symlink_args *ap) goto failure; vp = *vpp; vput(vp); - failure: - if ((cnp->cn_flags & SAVESTART) == 0) - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); +failure: + if ((cnp->cn_flags & SAVESTART) == 0) { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); + } vput(ap->a_dvp); return error; } @@ -1239,13 +1243,17 @@ devfs_mknod(ap) dev_p->dn_uid = cnp->cn_cred->cr_uid; dev_p->dn_gid = dir_p->dn_gid; dev_p->dn_mode = vap->va_mode; - failure: +failure: if (*vpp) { vput(*vpp); *vpp = 0; } - if ((cnp->cn_flags & SAVESTART) == 0) - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + if ((cnp->cn_flags & SAVESTART) == 0) { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); + } vput(dvp); return (error); } @@ -1382,20 +1390,6 @@ devfs_readlink(struct vop_readlink_args *ap) return error; } -static int -devfs_abortop(struct vop_abortop_args *ap) - /*struct vop_abortop_args { - struct vnode *a_dvp; - struct componentname *a_cnp; - } */ -{ - if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { - FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); - } - return 0; -} - - static int devfs_reclaim(struct vop_reclaim_args *ap) /*struct vop_reclaim_args { @@ -1519,7 +1513,7 @@ static struct vnodeopv_entry_desc devfs_vnodeop_entries[] = { { &vop_symlink_desc, (VOPFUNC)devfs_symlink }, /* symlink */ { &vop_readdir_desc, (VOPFUNC)devfs_readdir }, /* readdir */ { &vop_readlink_desc, (VOPFUNC)devfs_readlink }, /* readlink */ - { &vop_abortop_desc, (VOPFUNC)devfs_abortop }, /* abortop */ + { &vop_abortop_desc, (VOPFUNC)nop_abortop }, /* abortop */ { &vop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ { &vop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ { &vop_lock_desc, (VOPFUNC)nop_lock }, /* lock */ diff --git a/bsd/miscfs/devfs/devfsdefs.h b/bsd/miscfs/devfs/devfsdefs.h index 118576cdc..6056ca7d5 100644 --- a/bsd/miscfs/devfs/devfsdefs.h +++ b/bsd/miscfs/devfs/devfsdefs.h @@ -71,7 +71,7 @@ typedef enum { DEV_DIR, DEV_BDEV, DEV_CDEV, - DEV_SLNK, + DEV_SLNK } devfstype_t; extern int (**devfs_vnodeop_p)(void *); /* our own vector array for dirs */ @@ -180,19 +180,7 @@ struct devfsmount #define M_DEVFSNODE M_DEVFS #define M_DEVFSMNT M_DEVFS -static __inline__ void -getnanotime(struct timespec * t_p) -{ - struct timeval tv; - - microtime(&tv); - t_p->tv_sec = tv.tv_sec; - t_p->tv_nsec = tv.tv_usec * 1000; - return; -} - #define VTODN(vp) ((devnode_t *)(vp)->v_data) -extern void cache_purge(struct vnode *vp); /* vfs_cache.c */ static __inline__ int DEVFS_LOCK(struct proc * p) diff --git a/bsd/miscfs/fdesc/fdesc.h b/bsd/miscfs/fdesc/fdesc.h index 6c14f2c75..3c10b81b1 100644 --- a/bsd/miscfs/fdesc/fdesc.h +++ b/bsd/miscfs/fdesc/fdesc.h @@ -85,7 +85,7 @@ typedef enum { Froot, Fdevfd, Fdesc, - Flink, + Flink } fdntype; struct fdescnode { diff --git a/bsd/miscfs/fdesc/fdesc_vfsops.c b/bsd/miscfs/fdesc/fdesc_vfsops.c index 1d26b20e9..47e969141 100644 --- a/bsd/miscfs/fdesc/fdesc_vfsops.c +++ b/bsd/miscfs/fdesc/fdesc_vfsops.c @@ -91,7 +91,7 @@ fdesc_mount(mp, path, data, ndp, p) struct proc *p; { int error = 0; - u_int size; + size_t size; struct fdescmount *fmp; struct vnode *rvp; @@ -253,7 +253,7 @@ fdesc_sync(mp, waitfor) struct proc *)))eopnotsupp) #define fdesc_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \ size_t, struct proc *)))eopnotsupp) -#define fdesc_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \ +#define fdesc_vget ((int (*) __P((struct mount *, void *, struct vnode **))) \ eopnotsupp) #define fdesc_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp) diff --git a/bsd/miscfs/fdesc/fdesc_vnops.c b/bsd/miscfs/fdesc/fdesc_vnops.c index 466921d5b..172eef3f7 100644 --- a/bsd/miscfs/fdesc/fdesc_vnops.c +++ b/bsd/miscfs/fdesc/fdesc_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -823,7 +823,7 @@ fdesc_badop() #define fdesc_mkdir ((int (*) __P((struct vop_mkdir_args *)))eopnotsupp) #define fdesc_rmdir ((int (*) __P((struct vop_rmdir_args *)))eopnotsupp) #define fdesc_symlink ((int (*) __P((struct vop_symlink_args *)))eopnotsupp) -#define fdesc_abortop ((int (*) __P((struct vop_abortop_args *)))nullop) +#define fdesc_abortop ((int (*) __P((struct vop_abortop_args *)))nop_abortop) #define fdesc_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) #define fdesc_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) #define fdesc_bmap ((int (*) __P((struct vop_bmap_args *)))fdesc_badop) diff --git a/bsd/miscfs/fifofs/fifo_vnops.c b/bsd/miscfs/fifofs/fifo_vnops.c index 446c8216b..e836f7b3b 100644 --- a/bsd/miscfs/fifofs/fifo_vnops.c +++ b/bsd/miscfs/fifofs/fifo_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -179,22 +179,22 @@ fifo_open(ap) int error; if ((fip = vp->v_fifoinfo) == NULL) { - MALLOC_ZONE(fip, struct fifoinfo *, - sizeof(*fip), M_VNODE, M_WAITOK); + MALLOC(fip, struct fifoinfo *, + sizeof(*fip), M_TEMP, M_WAITOK); vp->v_fifoinfo = fip; thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); if (error = socreate(AF_LOCAL, &rso, SOCK_STREAM, 0)) { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - _FREE_ZONE(fip, sizeof *fip, M_VNODE); vp->v_fifoinfo = NULL; + FREE(fip, M_TEMP); return (error); } fip->fi_readsock = rso; if (error = socreate(AF_LOCAL, &wso, SOCK_STREAM, 0)) { (void)soclose(rso); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - _FREE_ZONE(fip, sizeof *fip, M_VNODE); vp->v_fifoinfo = NULL; + FREE(fip, M_TEMP); return (error); } fip->fi_writesock = wso; @@ -202,8 +202,8 @@ fifo_open(ap) (void)soclose(wso); (void)soclose(rso); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - _FREE_ZONE(fip, sizeof *fip, M_VNODE); vp->v_fifoinfo = NULL; + FREE(fip, M_TEMP); return (error); } wso->so_state |= SS_CANTRCVMORE; @@ -479,8 +479,8 @@ fifo_close(ap) error1 = soclose(fip->fi_readsock); error2 = soclose(fip->fi_writesock); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - FREE_ZONE(fip, sizeof *fip, M_VNODE); vp->v_fifoinfo = NULL; + FREE(fip, M_TEMP); if (error1) return (error1); return (error2); diff --git a/bsd/miscfs/specfs/spec_vnops.c b/bsd/miscfs/specfs/spec_vnops.c index d5efd260b..1a455e7ca 100644 --- a/bsd/miscfs/specfs/spec_vnops.c +++ b/bsd/miscfs/specfs/spec_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -72,7 +72,7 @@ #include #include #include -#include +#include #include #include @@ -275,7 +275,30 @@ spec_open(ap) return (error); error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p); if (!error) { + u_int64_t blkcnt; + u_int32_t blksize; + set_blocksize(vp, dev); + + /* + * Cache the size in bytes of the block device for later + * use by spec_write(). + */ + vp->v_specdevsize = (u_int64_t)0; /* Default: Can't get */ + if (!VOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, NOCRED, p)) { + /* Switch to 512 byte sectors (temporarily) */ + u_int32_t size512 = 512; + + if (!VOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, NOCRED, p)) { + /* Get the number of 512 byte physical blocks. */ + if (!VOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, NOCRED, p)) { + vp->v_specdevsize = blkcnt * (u_int64_t)size512; + } + } + /* If it doesn't set back, we can't recover */ + if (VOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, NOCRED, p)) + error = ENXIO; + } } return(error); } @@ -439,11 +462,35 @@ spec_write(ap) n = min((unsigned)(bsize - on), uio->uio_resid); + /* + * Use getblk() as an optimization IFF: + * + * 1) We are reading exactly a block on a block + * aligned boundary + * 2) We know the size of the device from spec_open + * 3) The read doesn't span the end of the device + * + * Otherwise, we fall back on bread(). + */ + if (n == bsize && + vp->v_specdevsize != (u_int64_t)0 && + (uio->uio_offset + (u_int64_t)n) > vp->v_specdevsize) { + /* reduce the size of the read to what is there */ + n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize; + } + if (n == bsize) bp = getblk(vp, bn, bsize, 0, 0, BLK_WRITE); else error = bread(vp, bn, bsize, NOCRED, &bp); + /* Translate downstream error for upstream, if needed */ + if (!error) { + error = bp->b_error; + if (!error && (bp->b_flags & B_ERROR) != 0) { + error = EIO; + } + } if (error) { brelse(bp); return (error); @@ -595,6 +642,7 @@ spec_strategy(ap) } */ *ap; { struct buf *bp; + extern int hard_throttle_on_root; bp = ap->a_bp; @@ -612,8 +660,11 @@ spec_strategy(ap) code |= DKIO_PAGING; KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, - bp, bp->b_dev, bp->b_blkno, bp->b_bcount, 0); + (unsigned int)bp, bp->b_dev, bp->b_blkno, bp->b_bcount, 0); } + if ((bp->b_flags & B_PGIN) && (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) + hard_throttle_on_root = 1; + (*bdevsw[major(bp->b_dev)].d_strategy)(bp); return (0); } diff --git a/bsd/miscfs/specfs/specdev.h b/bsd/miscfs/specfs/specdev.h index bcaf8d9fd..0a67d1894 100644 --- a/bsd/miscfs/specfs/specdev.h +++ b/bsd/miscfs/specfs/specdev.h @@ -76,7 +76,8 @@ struct specinfo { struct vnode *si_specnext; long si_flags; dev_t si_rdev; - daddr_t si_size; /* block device size in bytes */ + daddr_t si_size; /* device block size in bytes */ + u_int64_t si_devsize; /* actual device size in bytes */ }; /* * Exported shorthand @@ -86,6 +87,7 @@ struct specinfo { #define v_specnext v_specinfo->si_specnext #define v_specflags v_specinfo->si_flags #define v_specsize v_specinfo->si_size +#define v_specdevsize v_specinfo->si_devsize /* * Flags for specinfo diff --git a/bsd/miscfs/synthfs/synthfs_util.c b/bsd/miscfs/synthfs/synthfs_util.c index a712ff879..09e8857ee 100644 --- a/bsd/miscfs/synthfs/synthfs_util.c +++ b/bsd/miscfs/synthfs/synthfs_util.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -194,27 +194,25 @@ int synthfs_move_rename_entry(struct vnode *source_vp, struct vnode *newparent_v struct synthfsnode *source_sp = VTOS(source_vp); struct synthfsnode *parent_sp = VTOS(newparent_vp); char *new_name_ptr; - int result; - - if (parent_sp == source_sp->s_parent) return 0; + int result = 0; /* Unlink the entry from its current place: */ result = synthfs_remove_entry(source_vp); - if (result) return result; + if (result) goto err_exit; /* Change the name as necessary: */ - FREE(source_sp->s_name, M_TEMP); - if (new_name == NULL) { - MALLOC(new_name_ptr, char *, 1, M_TEMP, M_WAITOK); - new_name_ptr[0] = 0; - } else { - MALLOC(new_name_ptr, char *, strlen(new_name) + 1, M_TEMP, M_WAITOK); - strcpy(new_name_ptr, new_name); - }; - source_sp->s_name = new_name_ptr; - + if (new_name) { + FREE(source_sp->s_name, M_TEMP); + MALLOC(new_name_ptr, char *, strlen(new_name) + 1, M_TEMP, M_WAITOK); + strcpy(new_name_ptr, new_name); + source_sp->s_name = new_name_ptr; + }; + /* Insert the entry in its new home: */ - return synthfs_insertnode(source_sp, parent_sp); + result = synthfs_insertnode(source_sp, parent_sp); + +err_exit: + return result; } @@ -320,7 +318,7 @@ long synthfs_adddirentry(u_int32_t fileno, u_int8_t type, const char *name, stru long padtext = 0; unsigned short direntrylength; - namelength = ((name == NULL) ? 0 : strlen(name)); + namelength = ((name == NULL) ? 0 : strlen(name) + 1); padding = (4 - (namelength & 3)) & 3; direntrylength = sizeof(struct synthfs_direntry_head) + namelength + padding; diff --git a/bsd/miscfs/synthfs/synthfs_vfsops.c b/bsd/miscfs/synthfs/synthfs_vfsops.c index 525a0a3ee..600ad66f4 100644 --- a/bsd/miscfs/synthfs/synthfs_vfsops.c +++ b/bsd/miscfs/synthfs/synthfs_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -43,7 +43,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/bsd/miscfs/synthfs/synthfs_vnops.c b/bsd/miscfs/synthfs/synthfs_vnops.c index 64afea17a..f6f9d782c 100644 --- a/bsd/miscfs/synthfs/synthfs_vnops.c +++ b/bsd/miscfs/synthfs/synthfs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -65,13 +65,6 @@ #include #endif -/* external routines defined in vfs_cache.c */ -extern void cache_purge (struct vnode *vp); -extern int cache_lookup (struct vnode *dvp, struct vnode **vpp, struct componentname *cnp); -extern void cache_enter (struct vnode *dvp, struct vnode *vpp, struct componentname *cnp); - -//extern void vnode_uncache(struct vnode *); - extern int groupmember(gid_t gid, struct ucred* cred); #define VOPFUNC int (*)(void *) @@ -185,7 +178,7 @@ struct vop_create_args /* { Debugger(debugmsg); #endif - return EOPNOTSUPP; + return err_create(ap); } diff --git a/bsd/miscfs/union/union_vfsops.c b/bsd/miscfs/union/union_vfsops.c index 06578a15a..007989d44 100644 --- a/bsd/miscfs/union/union_vfsops.c +++ b/bsd/miscfs/union/union_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -226,7 +226,8 @@ union_mount(mp, path, data, ndp, p) mp->mnt_data = (qaddr_t) um; vfs_getnewfsid(mp); - (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); + (void) copyinstr(path, mp->mnt_stat.f_mntonname, + MNAMELEN - 1, (size_t *)&size); bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); switch (um->um_op) { @@ -246,7 +247,7 @@ union_mount(mp, path, data, ndp, p) cp = mp->mnt_stat.f_mntfromname + len; len = MNAMELEN - len; - (void) copyinstr(args.target, cp, len - 1, &size); + (void) copyinstr(args.target, cp, len - 1, (size_t *)&size); bzero(cp + size, len - size); #ifdef UNION_DIAGNOSTIC @@ -507,7 +508,7 @@ int union_init __P((struct vfsconf *)); struct proc *)))eopnotsupp) #define union_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \ size_t, struct proc *)))eopnotsupp) -#define union_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \ +#define union_vget ((int (*) __P((struct mount *, void *, struct vnode **))) \ eopnotsupp) #define union_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp) diff --git a/bsd/miscfs/union/union_vnops.c b/bsd/miscfs/union/union_vnops.c index e8a9f20a1..2d89cce55 100644 --- a/bsd/miscfs/union/union_vnops.c +++ b/bsd/miscfs/union/union_vnops.c @@ -154,10 +154,10 @@ union_lookup1(udvp, dvpp, vpp, cnp) */ while (dvp != udvp && (dvp->v_type == VDIR) && (mp = dvp->v_mountedhere)) { - - if (vfs_busy(mp, 0, 0, p)) - continue; - + if (vfs_busy(mp, LK_NOWAIT, 0, p)) { + vput(dvp); + return(ENOENT); + } error = VFS_ROOT(mp, &tdvp); vfs_unbusy(mp, p); if (error) { diff --git a/bsd/miscfs/volfs/volfs.h b/bsd/miscfs/volfs/volfs.h index 86d2aa74f..4ae456ca8 100644 --- a/bsd/miscfs/volfs/volfs.h +++ b/bsd/miscfs/volfs/volfs.h @@ -61,6 +61,9 @@ struct volfs_vndata #define ROOT_DIRID 2 +#define MAXPLCENTRIES 250 +#define PLCHASHSIZE 128 + extern int (**volfs_vnodeop_p)(void *); __BEGIN_DECLS diff --git a/bsd/miscfs/volfs/volfs_vfsops.c b/bsd/miscfs/volfs/volfs_vfsops.c index b4aee57e5..4a2f6d964 100644 --- a/bsd/miscfs/volfs/volfs_vfsops.c +++ b/bsd/miscfs/volfs/volfs_vfsops.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include #include @@ -265,6 +265,8 @@ volfs_mount(mp, path, data, ndp, p) root_vp->v_data = priv_vn_data; priv_mnt_data->volfs_rootvp = root_vp; + + mp->mnt_flag &= ~MNT_RDONLY; return (0); } @@ -403,6 +405,14 @@ volfs_sync(mp, waitfor, cred, p) struct proc *p; { // DBG_VOP(("volfs_sync called\n")); + + /* Release a few entries from the permissions cache to keep them from getting stale. + * Since sync is called at least every 30 seconds or so, releasing 1/20 of the cache + * every time through should free all entries in no less than 10 minutes, which should + * be adequate to prevent pid-wrapping from mis-associating PLC entries: + */ + volfs_PLC_reclaim_entries(MAXPLCENTRIES / 20); + return 0; } /* @@ -462,6 +472,9 @@ volfs_init(vfsp) struct vfsconf *vfsp; { DBG_VOP(("volfs_init called\n")); + + volfs_PLChashinit(); + return (0); } diff --git a/bsd/miscfs/volfs/volfs_vnops.c b/bsd/miscfs/volfs/volfs_vnops.c index 881614d21..dbfb467eb 100644 --- a/bsd/miscfs/volfs/volfs_vnops.c +++ b/bsd/miscfs/volfs/volfs_vnops.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -56,6 +57,8 @@ #include #include #include +#include +#include #include #include @@ -168,14 +171,164 @@ struct vnodeopv_entry_desc volfs_vnodeop_entries[] = { struct vnodeopv_desc volfs_vnodeop_opv_desc = {&volfs_vnodeop_p, volfs_vnodeop_entries}; +static char gDot[] = "."; +static char gDotDot[] = ".."; + +struct finfo { + fsobj_id_t parID; +}; + +struct finfoattrbuf { + unsigned long length; + struct finfo fi; +}; static int validfsnode(struct mount *fsnode); +struct volfs_PLCEntry +{ + LIST_ENTRY(volfs_PLCEntry) vplc_hash_link; /* entry's hash chain */ + TAILQ_ENTRY(volfs_PLCEntry) vplc_lru_link; /* entry's LRU chain link */ + int32_t vplc_fsid; + u_int vplc_item_id; + uid_t vplc_uid; + pid_t vplc_pid; +}; + +#define VOLFSPLCHASH(fsid, inum) ((((unsigned long)fsid) + (unsigned long)(inum)) & volfs_PLCHashMask) + +static struct slock volfs_PLChashtable_slock; +static TAILQ_HEAD(volfs_PLCLRUListHead, volfs_PLCEntry) volfs_PLCLRUList; +static TAILQ_HEAD(volfs_PLCFreeListHead, volfs_PLCEntry) volfs_PLCFreeList; +static LIST_HEAD(, volfs_PLCEntry) *volfs_PLCHashTable; +static u_long volfs_PLCHashMask; /* size of hash table - 1 */ +static u_long volfs_PLCEntryCount; + #if DBG_VOP_TEST_LOCKS static void DbgVopTest (int max, int error, VopDbgStoreRec *VopDbgStore, char *funcname); #endif /* DBG_VOP_TEST_LOCKS */ +/* + * volfs_PLChashinit + */ +__private_extern__ void +volfs_PLChashinit(void) +{ + int i; + + TAILQ_INIT(&volfs_PLCLRUList); + TAILQ_INIT(&volfs_PLCFreeList); + simple_lock_init(&volfs_PLChashtable_slock); +#if MAXPLCENTRIES + volfs_PLCHashTable = hashinit(PLCHASHSIZE, M_TEMP, &volfs_PLCHashMask); + + for (i = 0; i < PLCHASHSIZE; ++i) { + LIST_INIT(&volfs_PLCHashTable[i]); + }; +#endif + volfs_PLCEntryCount = 0; +} + + + +__private_extern__ void +volfs_PLC_reclaim_entries(int entrycount) +{ +#if MAXPLCENTRIES + int i; + struct volfs_PLCEntry *reclaim_target; + + simple_lock(&volfs_PLChashtable_slock); + + for (i = entrycount; i > 0; --i) { + if (TAILQ_EMPTY(&volfs_PLCLRUList)) break; + + /* Pick the next entry to be recycled and free it: */ + reclaim_target = TAILQ_FIRST(&volfs_PLCLRUList); + TAILQ_REMOVE(&volfs_PLCLRUList, reclaim_target, vplc_lru_link); + LIST_REMOVE(reclaim_target, vplc_hash_link); + TAILQ_INSERT_TAIL(&volfs_PLCFreeList, reclaim_target, vplc_lru_link); + }; + + simple_unlock(&volfs_PLChashtable_slock); +#endif +} + + + +#if MAXPLCENTRIES +/* + * volfs_PLCLookup + * + * Look up a PLC entry in the hash + */ +static int +volfs_PLCLookup(int32_t fsid, u_int target_id, uid_t uid, pid_t pid) +{ + struct volfs_PLCEntry *hash_entry; + int result = 0; + + simple_lock(&volfs_PLChashtable_slock); + LIST_FOREACH(hash_entry, &volfs_PLCHashTable[VOLFSPLCHASH(fsid, target_id)], vplc_hash_link) { + if ((hash_entry->vplc_item_id == target_id) && + (hash_entry->vplc_pid == pid) && + (hash_entry->vplc_uid == uid) && + (hash_entry->vplc_fsid == fsid)) { + result = 1; +#if 0 + if (hash_entry != TAILQ_LAST(&volfs_PLCLRUList, volfs_PLCLRUListHead)) { + TAILQ_REMOVE(&volfs_PLCLRUList, hash_entry, vplc_lru_link); + TAILQ_INSERT_TAIL(&volfs_PLCLRUList, hash_entry, vplc_lru_link); + }; +#endif + break; + }; + }; + simple_unlock(&volfs_PLChashtable_slock); + return result; +} + + +static void +volfs_PLCEnter(int32_t fsid, u_int target_id, uid_t uid, pid_t pid) +{ + struct volfs_PLCEntry *new_entry; + + simple_lock(&volfs_PLChashtable_slock); + if (!TAILQ_EMPTY(&volfs_PLCFreeList)) { + new_entry = TAILQ_FIRST(&volfs_PLCFreeList); + TAILQ_REMOVE(&volfs_PLCFreeList, new_entry, vplc_lru_link); + } else { + /* + * Allocate up to the predetermined maximum number of new entries: + * [must be done now to avoid blocking in MALLOC() with volfs_PLChashtable_slock held locked] + */ + if (volfs_PLCEntryCount < MAXPLCENTRIES) { + simple_unlock(&volfs_PLChashtable_slock); + new_entry = MALLOC(new_entry, struct volfs_PLCEntry *, sizeof(struct volfs_PLCEntry), M_TEMP, M_WAITOK); + simple_lock(&volfs_PLChashtable_slock); + ++volfs_PLCEntryCount; + } else { + new_entry = TAILQ_FIRST(&volfs_PLCLRUList); + TAILQ_REMOVE(&volfs_PLCLRUList, new_entry, vplc_lru_link); + LIST_REMOVE(new_entry, vplc_hash_link); + }; + }; + + new_entry->vplc_fsid = fsid; + new_entry->vplc_item_id = target_id; + new_entry->vplc_uid = uid; + new_entry->vplc_pid = pid; + + /* Link the new entry on the hash list for the fsid/target_id as well as the tail of the LRU list: */ + LIST_INSERT_HEAD(&volfs_PLCHashTable[VOLFSPLCHASH(fsid, target_id)], new_entry, vplc_hash_link); + TAILQ_INSERT_TAIL(&volfs_PLCLRUList, new_entry, vplc_lru_link); + simple_unlock(&volfs_PLChashtable_slock); +} +#endif + + /* * volfs_reclaim - Reclaim a vnode so that it can be used for other purposes. * @@ -222,7 +375,7 @@ volfs_access(ap) /* * We don't need to check credentials! FS is read-only for everyone */ - if (ap->a_mode == VREAD || ap->a_mode == VEXEC) + if ((ap->a_mode & ~(VREAD | VEXEC)) == 0) ret_err = 0; else ret_err = EACCES; @@ -555,18 +708,26 @@ int volfs_lock(ap) struct vop_lock_args /* { struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap; -{ +{ int retval; struct volfs_vndata *priv_data; DBG_FUNC_NAME("volfs_lock"); DBG_VOP_LOCKS_DECL(1); +#if 0 + KERNEL_DEBUG((FSDBG_CODE(DBG_FSVN, 0)) | DBG_FUNC_START, + (unsigned int)ap->a_vp, (unsigned int)ap->a_flags, (unsigned int)ap->a_p, 0, 0); +#endif DBG_VOP_PRINT_FUNCNAME();DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_UNLOCKED, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_ZERO); - + priv_data = (struct volfs_vndata *) ap->a_vp->v_data; retval = lockmgr(&priv_data->lock, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p); DBG_VOP_LOCKS_TEST(retval); +#if 0 + KERNEL_DEBUG((FSDBG_CODE(DBG_FSVN, 0)) | DBG_FUNC_END, + (unsigned int)ap->a_vp, (unsigned int)ap->a_flags, (unsigned int)ap->a_p, retval, 0); +#endif return (retval); } @@ -584,6 +745,10 @@ volfs_unlock(ap) struct volfs_vndata *priv_data; DBG_FUNC_NAME("volfs_unlock"); DBG_VOP_LOCKS_DECL(1); +#if 0 + KERNEL_DEBUG((FSDBG_CODE(DBG_FSVN, 4)) | DBG_FUNC_START, + (unsigned int)ap->a_vp, (unsigned int)ap->a_flags, (unsigned int)ap->a_p, 0, 0); +#endif DBG_VOP_PRINT_FUNCNAME();DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_LOCKED, VOPDBG_ZERO); @@ -593,6 +758,10 @@ volfs_unlock(ap) &ap->a_vp->v_interlock, ap->a_p); DBG_VOP_LOCKS_TEST(retval); +#if 0 + KERNEL_DEBUG((FSDBG_CODE(DBG_FSVN, 4)) | DBG_FUNC_END, + (unsigned int)ap->a_vp, (unsigned int)ap->a_flags, (unsigned int)ap->a_p, retval, 0); +#endif return (retval); } @@ -658,6 +827,237 @@ volfs_pathconf(ap) /* NOTREACHED */ } + +/* + * Call VOP_GETATTRLIST on a given vnode + */ +static int +vp_getattrlist(struct vnode *vp, struct attrlist alist, void *attrbufptr, size_t bufsize, unsigned long options, struct proc *p) { + struct iovec iov; + struct uio bufuio; + + iov.iov_base = (char *)attrbufptr; + iov.iov_len = bufsize; + + bufuio.uio_iov = &iov; + bufuio.uio_iovcnt = 1; + bufuio.uio_offset = 0; + bufuio.uio_resid = iov.iov_len; + bufuio.uio_segflg = UIO_SYSSPACE; + bufuio.uio_rw = UIO_READ; + bufuio.uio_procp = p; + + return VOP_GETATTRLIST(vp, &alist, &bufuio, p->p_ucred, p); +} + +/* + * get_parentvp() - internal routine that tries to lookup the parent of vpp. + * On success, *vpp is the parent vp and is returned locked and the original child + * is left unlocked. On failure, the original child will be locked upon return. + */ +static int +get_parentvp(struct vnode **vpp, struct mount *mp, struct proc *p) +{ + int result; + struct attrlist alist; + struct finfoattrbuf finfobuf; + struct vnode *child_vp = *vpp; + + alist.bitmapcount = 5; + alist.reserved = 0; + alist.commonattr = ATTR_CMN_PAROBJID; + alist.volattr = 0; + alist.dirattr = 0; + alist.fileattr = 0; + alist.forkattr = 0; + result = vp_getattrlist(child_vp, alist, &finfobuf, sizeof(finfobuf), 0, p); + if (result) + return result; + + /* Release the child vnode before trying to acquire its parent + to avoid vnode deadlock problems with parsing code + coming top-down through the directory hierarchy: */ + VOP_UNLOCK(child_vp, 0, p); + + /* Shift attention to the parent directory vnode: */ + result = VFS_VGET(mp, &finfobuf.fi.parID.fid_objno, vpp); + if (result) { + /* Make sure child_vp is still locked on exit: */ + vn_lock(child_vp, LK_EXCLUSIVE | LK_RETRY, p); + } + + return result; +} + + +/* + * Look up the parent directory of a given vnode. + */ +static int +lookup_parent(u_int id, struct vnode *child_vp, struct vnode **parent_vp, struct proc *p) +{ + struct nameidata nd; + struct componentname *cnp = &nd.ni_cnd; + struct filedesc *fdp = p->p_fd; + int error; + + *parent_vp = NULL; + + /* + * Special case lookups for root's parent directory, + * recognized by its special id of "1": + */ + if (id != 1) { + VREF(child_vp); + nd.ni_startdir = child_vp; + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, (caddr_t)&gDotDot, p); + } else { + struct vnode *root_vp; + + error = VFS_ROOT(child_vp->v_mount, &root_vp); + if (error) return error; + VOP_UNLOCK(root_vp, 0, p); /* Hold on to the reference */ + nd.ni_startdir = root_vp; + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, (caddr_t)&gDot, p); + }; + nd.ni_cnd.cn_cred = nd.ni_cnd.cn_proc->p_ucred; + + /* Since we can't hit any symlinks, use the source path string directly: */ + cnp->cn_pnbuf = nd.ni_dirp; + nd.ni_pathlen = strlen(cnp->cn_pnbuf); + cnp->cn_pnlen = nd.ni_pathlen + 1; + cnp->cn_flags |= (HASBUF | SAVENAME); + + nd.ni_loopcnt = 0; + + if ((nd.ni_rootdir = fdp->fd_rdir) == NULL) nd.ni_rootdir = rootvnode; + cnp->cn_nameptr = cnp->cn_pnbuf; + if (error = lookup(&nd)) { + cnp->cn_pnbuf = NULL; + return (error); + } + /* + * Check for symbolic link + */ + if (cnp->cn_flags & ISSYMLINK) return ENOENT; + if (nd.ni_vp == child_vp) return ELOOP; + + *parent_vp = nd.ni_vp; + return 0; +} + + + +/* + * verify_fullpathaccess(ret_vnode); + */ + +static int +verify_fullpathaccess(u_int id, struct vnode *targetvp, struct proc *p) { + struct vnode *vp, *parent_vp; + struct mount *mp = targetvp->v_mount; + struct attrlist alist; + struct finfoattrbuf finfobuf; + int result; + struct filedesc *fdp = p->p_fd; /* pointer to file descriptor state */ + u_int target_id; + u_long vp_id; + +#if 0 + KERNEL_DEBUG((FSDBG_CODE(DBG_FSVN, 12)) | DBG_FUNC_START, + (unsigned int)targetvp, (unsigned int)mp, (unsigned int)p, 0, 0); +#endif + + vp = targetvp; + vp_id = vp->v_id; + if (vp->v_type != VDIR) { + + /* The target is a file: get the parent directory. */ + result = get_parentvp(&vp, mp, p); + if (result) goto err_exit; + + /* At this point, targetvp is unlocked (but still referenced), and + vp is the parent directory vnode, held locked */ + }; + + +#if MAXPLCENTRIES + if (volfs_PLCLookup(mp->mnt_stat.f_fsid.val[0], id, p->p_ucred->cr_uid, p->p_pid)) goto lookup_success; +#endif + /* Keep going up until either the process's root or the process's working directory is hit, + either one of which are potential valid starting points for a full pathname: */ + target_id = id; + while (vp != NULL && (!((vp->v_flag & VROOT) || /* Hit "/" */ + (vp == fdp->fd_cdir) || /* Hit process's working directory */ + (vp == fdp->fd_rdir)))) { /* Hit process chroot()-ed root */ + + /* At this point, vp is some directory node and it's always locked */ + /* Unlock the starting directory for namei(), retaining a reference... */ + VOP_UNLOCK(vp, 0, p); + + if (result = lookup_parent(target_id, vp, &parent_vp, p)) { + /* + * If the lookup fails with EACCES and the targetvp is a directory, + * we should try again using get_parentvp(). Without this check, + * directories that you can navigate to but not traverse will + * disappear when clicked in the Finder. + */ + if (result == EACCES && vp == targetvp && vp->v_type == VDIR && (vp->v_flag & VROOT) == 0) { + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + parent_vp = vp; + if (get_parentvp(&parent_vp, mp, p)) { + /* on error, vp is still locked... unlock for lookup_err_exit path */ + VOP_UNLOCK(vp, 0, p); + } else { + /* on success, vp is returned unlocked, parent_vp is returned locked */ + result = 0; + } + }; + if (result) goto lookup_err_exit; + }; + + if (vp != targetvp) { + vrele(vp); /* Completely done with that vp now... */ + }; + + vp = parent_vp; + target_id = 0; /* It's unknown at this point */ + + if (((result = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0) && + ((result = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) != 0)) { + VOP_UNLOCK(vp, 0, p); + goto lookup_err_exit; + }; + }; + +#if MAXPLCENTRIES + volfs_PLCEnter(mp->mnt_stat.f_fsid.val[0], id, p->p_ucred->cr_uid, p->p_pid); +#endif + +lookup_success: + /* Success: the caller has complete access to the initial vnode: */ + result = 0; + + if (vp && vp != targetvp) VOP_UNLOCK(vp, 0, p); + +lookup_err_exit: + if (vp && vp != targetvp) { + vrele(vp); + vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY, p); + if (vp_id != targetvp->v_id || targetvp->v_type == VBAD) { + result = EAGAIN; /* vnode was recycled */ + } + }; + +err_exit: +#if 0 + KERNEL_DEBUG((FSDBG_CODE(DBG_FSVN, 12)) | DBG_FUNC_END, + (unsigned int)targetvp, (unsigned int)mp, (unsigned int)p, result, 0); +#endif + return result; +}; + + /* * get_fsvnode - internal routine to create a vnode for a file system. Called with mount pointer, * id of filesystem to lookup and pointer to vnode pointer to fill in @@ -769,15 +1169,15 @@ search_vnodelist: * to a vnode pointer */ static int -get_filevnode(parent_fs, id, ret_vnode) +get_filevnode(parent_fs, id, ret_vnode, p) struct mount *parent_fs; u_int id; struct vnode **ret_vnode; + struct proc *p; { int retval; - DBG_VOP(("get_filevnode called for ID %d\n", id)); - +again: /* * Special case 2 to mean the root of a file system */ @@ -785,7 +1185,23 @@ get_filevnode(parent_fs, id, ret_vnode) retval = VFS_ROOT(parent_fs, ret_vnode); else retval = VFS_VGET(parent_fs, &id, ret_vnode); + if (retval) goto error; + retval = verify_fullpathaccess(id, *ret_vnode, p); + if (retval) { + /* An error was encountered verifying that the caller has, + in fact, got access all the way from "/" or their working + directory to the specified item... + */ + vput(*ret_vnode); + *ret_vnode = NULL; + /* vnode was recycled during access verification. */ + if (retval == EAGAIN) { + goto again; + } + }; + +error: return (retval); } @@ -799,11 +1215,16 @@ volfs_lookup(ap) char *cnp; long namelen; struct mount *parent_fs; - int unlocked_parent = 0; + int unlocked_parent = 0, isdot_or_dotdot = 0; int ret_err = ENOENT; DBG_FUNC_NAME("volfs_lookup"); DBG_VOP_LOCKS_DECL(2); +#if 0 + KERNEL_DEBUG((FSDBG_CODE(DBG_FSVN, 8)) | DBG_FUNC_START, + (unsigned int)ap->a_dvp, (unsigned int)ap->a_cnp, (unsigned int)p, 0, 0); +#endif + DBG_VOP(("volfs_lookup called, name = %s, namelen = %ld\n", ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen)); DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); @@ -851,14 +1272,16 @@ volfs_lookup(ap) if (namelen == 1) { /* "." requested */ + isdot_or_dotdot = 1; *ap->a_vpp = ap->a_dvp; VREF(*ap->a_vpp); DBG_VOP_LOCKS_TEST(0); - return (0); + ret_err = 0; } else if (cnp[1] == '.' && namelen == 2) { /* ".." requested */ + isdot_or_dotdot = 1; ret_err = volfs_root(ap->a_dvp->v_mount, ap->a_vpp); } } @@ -901,14 +1324,22 @@ volfs_lookup(ap) ret_err = get_fsvnode(ap->a_dvp->v_mount, id, ap->a_vpp); else { parent_fs = priv_data->fs_mount; - if (!(ap->a_cnp->cn_flags & LOCKPARENT) || !(ap->a_cnp->cn_flags & ISLASTCN)) { - VOP_UNLOCK(ap->a_dvp, 0, ap->a_cnp->cn_proc); - unlocked_parent = 1; - }; - ret_err = get_filevnode(parent_fs, id, ap->a_vpp); + if (!(ap->a_cnp->cn_flags & LOCKPARENT) || !(ap->a_cnp->cn_flags & ISLASTCN)) { + VOP_UNLOCK(ap->a_dvp, 0, ap->a_cnp->cn_proc); + unlocked_parent = 1; + }; + ret_err = get_filevnode(parent_fs, id, ap->a_vpp, ap->a_cnp->cn_proc); } } + } + if (!isdot_or_dotdot && *ap->a_vpp && VPARENT(*ap->a_vpp) == NULL && ap->a_dvp != *ap->a_vpp) { + if (VPARENT(ap->a_dvp) == *ap->a_vpp) { + panic("volfs: ap->a_dvp 0x%x has parent == a_vpp 0x%x\n", + ap->a_dvp, *ap->a_vpp); + } + vget(ap->a_dvp, 0, ap->a_cnp->cn_proc); + VPARENT(*ap->a_vpp) = ap->a_dvp; } if (!unlocked_parent && (!(ap->a_cnp->cn_flags & LOCKPARENT) || !(ap->a_cnp->cn_flags & ISLASTCN))) { @@ -922,6 +1353,10 @@ Err_Exit: DBG_VOP_UPDATE_VP(1, *ap->a_vpp); DBG_VOP_LOCKS_TEST(ret_err); +#if 0 + KERNEL_DEBUG((FSDBG_CODE(DBG_FSVN, 8)) | DBG_FUNC_START, + (unsigned int)ap->a_dvp, (unsigned int)ap->a_cnp, (unsigned int)p, ret_err, 0); +#endif return (ret_err); } diff --git a/bsd/net/Makefile b/bsd/net/Makefile index 946c7517a..304c2be7d 100644 --- a/bsd/net/Makefile +++ b/bsd/net/Makefile @@ -23,14 +23,14 @@ DATAFILES= \ bpf.h bpf_compat.h bpfdesc.h dlil.h dlil_pvt.h \ etherdefs.h ethernet.h if.h if_arp.h \ if_dl.h if_llc.h if_media.h if_mib.h \ - if_ppp.h if_slvar.h \ + if_slvar.h \ if_types.h if_var.h iso88025.h \ kext_net.h ndrv.h net_osdep.h netisr.h pfkeyv2.h \ - ppp_defs.h radix.h raw_cb.h route.h slcompress.h slip.h + radix.h raw_cb.h route.h slcompress.h slip.h PRIVATE_DATAFILES = \ ndrv_var.h zlib.h if_pppvar.h if_sppp.h ppp_comp.h if_atm.h \ - if_tun.h if_vlan_var.h + if_tun.h if_vlan_var.h if_ppp.h firewire.h ppp_defs.h INSTALL_MI_LIST = ${DATAFILES} diff --git a/bsd/net/bpf.c b/bsd/net/bpf.c index 9f99cabae..93868de98 100644 --- a/bsd/net/bpf.c +++ b/bsd/net/bpf.c @@ -101,8 +101,9 @@ #include #include #include +#include - +#include #include #include @@ -122,6 +123,7 @@ static caddr_t bpf_alloc(); #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) #endif + #define PRINET 26 /* interruptible */ /* @@ -136,7 +138,7 @@ SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, /* * bpf_iflist is the list of interfaces; each corresponds to an ifnet - * bpf_dtab holds the descriptors, indexed by minor device # + * bpf_dtab holds pointer to the descriptors, indexed by minor device # */ static struct bpf_if *bpf_iflist; #ifdef __APPLE__ @@ -145,10 +147,19 @@ static struct bpf_if *bpf_iflist; * on their system. Our dev_t is an int, so we still store * the bpf_d in a separate table indexed by minor device #. */ -static struct bpf_d bpf_dtab[NBPFILTER]; -static int bpf_dtab_init; -static int nbpfilter = NBPFILTER; -#endif +static struct bpf_d **bpf_dtab = NULL; +static int bpf_dtab_size = 0; +static int nbpfilter = 0; + +/* + * Mark a descriptor free by making it point to itself. + * This is probably cheaper than marking with a constant since + * the address should be in a register anyway. + */ +#define D_ISFREE(d) ((d) == (d)->bd_next) +#define D_MARKFREE(d) ((d)->bd_next = (d)) +#define D_MARKUSED(d) ((d)->bd_next = 0) +#endif /* __APPLE__ */ static int bpf_allocbufs __P((struct bpf_d *)); static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); @@ -165,6 +176,13 @@ static void catchpacket __P((struct bpf_d *, u_char *, u_int, static void reset_d __P((struct bpf_d *)); static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); +/*static void *bpf_devfs_token[MAXBPFILTER];*/ + +static int bpf_devsw_installed; + +void bpf_init __P((void *unused)); + + /* * Darwin differs from BSD here, the following are static * on BSD and not static on Darwin. @@ -202,6 +220,7 @@ static struct cdevsw bpf_cdevsw = { /* type */ 0 }; +#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data) static int bpf_movein(uio, linktype, mp, sockp, datlen) @@ -270,10 +289,17 @@ bpf_movein(uio, linktype, mp, sockp, datlen) hlen = 4; /* This should match PPP_HDRLEN */ break; + case DLT_APPLE_IP_OVER_IEEE1394: + sockp->sa_family = AF_UNSPEC; + hlen = sizeof(struct firewire_header); + break; + default: return (EIO); } - + if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) { + return (EIO); + } len = uio->uio_resid; *datlen = len - hlen; if ((unsigned)len > MCLBYTES) @@ -340,6 +366,62 @@ int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) thread_funnel_set(network_flock, funnel_state); return 0; } + +/* + * Returns 1 on sucess, 0 on failure + */ +static int +bpf_dtab_grow(int increment) +{ + struct bpf_d **new_dtab = NULL; + + new_dtab = (struct bpf_d **)_MALLOC(sizeof(struct bpf_d *) * (bpf_dtab_size + increment), M_DEVBUF, M_WAIT); + if (new_dtab == NULL) + return 0; + + if (bpf_dtab) { + struct bpf_d **old_dtab; + + bcopy(bpf_dtab, new_dtab, sizeof(struct bpf_d *) * bpf_dtab_size); + /* + * replace must be atomic with respect to free do bpf_dtab + * is always valid. + */ + old_dtab = bpf_dtab; + bpf_dtab = new_dtab; + _FREE(old_dtab, M_DEVBUF); + } + else bpf_dtab = new_dtab; + + bzero(bpf_dtab + bpf_dtab_size, sizeof(struct bpf_d *) * increment); + + bpf_dtab_size += increment; + + return 1; +} + +static struct bpf_d * +bpf_make_dev_t(int maj) +{ + struct bpf_d *d; + + if (nbpfilter >= bpf_dtab_size && bpf_dtab_grow(NBPFILTER) == 0) + return NULL; + + d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, M_WAIT); + if (d != NULL) { + int i = nbpfilter++; + + bzero(d, sizeof(struct bpf_d)); + bpf_dtab[i] = d; + D_MARKFREE(bpf_dtab[i]); + /*bpf_devfs_token[i] = */devfs_make_node(makedev(maj, i), + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, + "bpf%d", i); + } + return d; +} + #endif /* @@ -420,16 +502,6 @@ bpf_detachd(d) } -#ifdef __APPLE__ -/* - * Mark a descriptor free by making it point to itself. - * This is probably cheaper than marking with a constant since - * the address should be in a register anyway. - */ -#define D_ISFREE(d) ((d) == (d)->bd_next) -#define D_MARKFREE(d) ((d)->bd_next = (d)) -#define D_MARKUSED(d) ((d)->bd_next = 0) -#endif /* * Open ethernet device. Returns ENXIO for illegal minor device number, * EBUSY if file is open by another process. @@ -445,12 +517,16 @@ bpfopen(dev, flags, fmt, p) register struct bpf_d *d; #ifdef __APPLE__ + /* new device nodes on demand when opening the last one */ + if (minor(dev) == nbpfilter - 1) + bpf_make_dev_t(major(dev)); + if (minor(dev) >= nbpfilter) return (ENXIO); + + d = bpf_dtab[minor(dev)]; thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - - d = &bpf_dtab[minor(dev)]; #else if (p->p_prison) return (EPERM); @@ -480,7 +556,11 @@ bpfopen(dev, flags, fmt, p) d->bd_bufsize = bpf_bufsize; d->bd_sig = SIGIO; d->bd_seesent = 1; + +#ifdef __APPLE__ thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); +#endif + return (0); } @@ -498,16 +578,33 @@ bpfclose(dev, flags, fmt, p) { register struct bpf_d *d; register int s; - - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); +#ifdef __APPLE__ + struct bpf_d **bpf_dtab_schk; +#endif #ifndef __APPLE__ funsetown(d->bd_sigio); #endif s = splimp(); #ifdef __APPLE__ - d = &bpf_dtab[minor(dev)]; +again: + d = bpf_dtab[minor(dev)]; + bpf_dtab_schk = bpf_dtab; +#endif + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + +#ifdef __APPLE__ + /* + * If someone grows bpf_dtab[] while we were waiting for the + * funnel, then we will be pointing off into freed memory; + * check to see if this is the case. + */ + if (bpf_dtab_schk != bpf_dtab) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto again; + } #endif + if (d->bd_bif) bpf_detachd(d); splx(s); @@ -585,8 +682,9 @@ bpfread(dev, uio, ioflag) int error; int s; + d = bpf_dtab[minor(dev)]; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; /* * Restrict application to use a buffer the same size as @@ -707,6 +805,9 @@ bpf_wakeup(d) #endif } +/* keep in sync with bpf_movein above: */ +#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header)) + int bpfwrite(dev, uio, ioflag) dev_t dev; @@ -717,11 +818,12 @@ bpfwrite(dev, uio, ioflag) struct ifnet *ifp; struct mbuf *m; int error, s; - static struct sockaddr dst; + char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN]; int datlen; + d = bpf_dtab[minor(dev)]; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; if (d->bd_bif == 0) { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); @@ -734,8 +836,9 @@ bpfwrite(dev, uio, ioflag) thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (0); } - - error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); + ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf); + error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, + (struct sockaddr *)dst_buf, &datlen); if (error) { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (error); @@ -746,13 +849,14 @@ bpfwrite(dev, uio, ioflag) return (EMSGSIZE); } - if (d->bd_hdrcmplt) - dst.sa_family = pseudo_AF_HDRCMPLT; + if (d->bd_hdrcmplt) { + ((struct sockaddr *)dst_buf)->sa_family = pseudo_AF_HDRCMPLT; + } s = splnet(); - error = dlil_output(ifp->if_data.default_proto, m, - (caddr_t) 0, &dst, 0); + error = dlil_output(ifptodlt(ifp, PF_INET), m, + (caddr_t) 0, (struct sockaddr *)dst_buf, 0); splx(s); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); @@ -813,9 +917,9 @@ bpfioctl(dev, cmd, addr, flags, p) register struct bpf_d *d; int s, error = 0; + d = bpf_dtab[minor(dev)]; thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; switch (cmd) { @@ -1204,12 +1308,12 @@ bpfpoll(dev, events, wql, p) register int s; int revents = 0; + d = bpf_dtab[minor(dev)]; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); /* * An imitation of the FIONREAD ioctl code. */ - d = &bpf_dtab[minor(dev)]; - if (d->bd_bif == NULL) { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (ENXIO); @@ -1284,7 +1388,7 @@ bpf_mcopy(src_arg, dst_arg, len) if (m == 0) panic("bpf_mcopy"); count = min(m->m_len, len); - bcopy(mtod(m, void *), dst, count); + bcopy(mtod((struct mbuf *)m, void *), dst, count); m = m->m_next; dst += count; len -= count; @@ -1475,16 +1579,7 @@ bpfattach(ifp, dlt, hdrlen) */ bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; -#ifdef __APPLE__ - /* - * Mark all the descriptors free if this hasn't been done. - */ - if (!bpf_dtab_init) { - for (i = 0; i < nbpfilter; ++i) - D_MARKFREE(&bpf_dtab[i]); - bpf_dtab_init = 1; - } -#else +#ifndef __APPLE__ if (bootverbose) printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); #endif @@ -1547,12 +1642,6 @@ bpfdetach(ifp) splx(s); } -static void *bpf_devfs_token[NBPFILTER]; - -static int bpf_devsw_installed; - -void bpf_init __P((void *unused)); - void bpf_init(unused) void *unused; @@ -1569,11 +1658,12 @@ bpf_init(unused) nbpfilter = 0; return; } - for (i = 0 ; i < nbpfilter; i++) { - bpf_devfs_token[i] = devfs_make_node(makedev(maj, i), - DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, - "bpf%x", i); + if (bpf_dtab_grow(NBPFILTER) == 0) { + printf("bpf_init: failed to allocate bpf_dtab\n"); + return; } + for (i = 0 ; i < NBPFILTER; i++) + bpf_make_dev_t(maj); } #else cdevsw_add(&bpf_cdevsw); diff --git a/bsd/net/bpf.h b/bsd/net/bpf.h index 8e611be77..1545e6074 100644 --- a/bsd/net/bpf.h +++ b/bsd/net/bpf.h @@ -179,6 +179,7 @@ struct bpf_hdr { #define DLT_FDDI 10 /* FDDI */ #define DLT_ATM_RFC1483 11 /* LLC/SNAP encapsulated atm */ #define DLT_RAW 12 /* raw IP */ +#define DLT_APPLE_IP_OVER_IEEE1394 138 /* * These are values from BSD/OS's "bpf.h". diff --git a/bsd/net/dlil.c b/bsd/net/dlil.c index 88afe0401..1b7d0db4e 100644 --- a/bsd/net/dlil.c +++ b/bsd/net/dlil.c @@ -132,6 +132,16 @@ struct if_family_str { }; +struct proto_family_str { + TAILQ_ENTRY(proto_family_str) proto_fam_next; + u_long proto_family; + u_long if_family; + + int (*attach_proto)(struct ifnet *ifp, u_long *dl_tag); + int (*detach_proto)(struct ifnet *ifp, u_long dl_tag); +}; + + struct dlil_stats_str dlil_stats; @@ -147,6 +157,9 @@ TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head; static TAILQ_HEAD(, if_family_str) if_family_head; +static +TAILQ_HEAD(, proto_family_str) proto_family_head; + static ifnet_inited = 0; static u_long dl_tag_nb = 0; static u_long dlil_filters_nb = 0; @@ -154,7 +167,6 @@ static u_long dlil_filters_nb = 0; int dlil_initialized = 0; decl_simple_lock_data(, dlil_input_lock) int dlil_input_thread_wakeup = 0; -int dlil_expand_mcl; static struct mbuf *dlil_input_mbuf_head = NULL; static struct mbuf *dlil_input_mbuf_tail = NULL; #if NLOOP > 1 @@ -162,11 +174,13 @@ static struct mbuf *dlil_input_mbuf_tail = NULL; #endif static struct mbuf *dlil_input_loop_head = NULL; static struct mbuf *dlil_input_loop_tail = NULL; +extern struct ifmultihead ifma_lostlist; static void dlil_input_thread(void); extern void run_netisr(void); extern void bpfdetach(struct ifnet*); +int dlil_expand_mcl; /* * Internal functions. @@ -185,6 +199,20 @@ struct if_family_str *find_family_module(u_long if_family) return mod; } +static +struct proto_family_str *find_proto_module(u_long proto_family, u_long if_family) +{ + struct proto_family_str *mod = NULL; + + TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) { + if ((mod->proto_family == (proto_family & 0xffff)) + && (mod->if_family == (if_family & 0xffff))) + break; + } + + return mod; +} + /* * Public functions. @@ -296,6 +324,7 @@ dlil_init() TAILQ_INIT(&dlil_ifnet_head); TAILQ_INIT(&if_family_head); + TAILQ_INIT(&proto_family_head); // create the dl tag array MALLOC(dl_tag_array, void *, sizeof(struct dl_tag_str) * MAX_DL_TAGS, M_NKE, M_WAITOK); @@ -497,13 +526,11 @@ end: return retval; } - void dlil_input_thread_continue(void) { while (1) { struct mbuf *m, *m_loop; - int expand_mcl; usimple_lock(&dlil_input_lock); m = dlil_input_mbuf_head; @@ -514,16 +541,6 @@ dlil_input_thread_continue(void) dlil_input_loop_tail = NULL; usimple_unlock(&dlil_input_lock); - MBUF_LOCK(); - expand_mcl = dlil_expand_mcl; - dlil_expand_mcl = 0; - MBUF_UNLOCK(); - if (expand_mcl) { - caddr_t p; - MCLALLOC(p, M_WAIT); - if (p) MCLFREE(p); - } - /* * NOTE warning %%% attention !!!! * We should think about putting some thread starvation safeguards if @@ -565,17 +582,10 @@ dlil_input_thread_continue(void) void dlil_input_thread(void) { - register thread_t self = current_thread(); - extern void stack_privilege(thread_t thread); + register thread_t self = current_act(); - /* - * Make sure that this thread - * always has a kernel stack, and - * bind it to the master cpu. - */ - stack_privilege(self); - ml_thread_policy(current_thread(), MACHINE_GROUP, - (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR)); + ml_thread_policy(self, MACHINE_GROUP, + (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR)); /* The dlil thread is always funneled */ thread_funnel_set(network_flock, TRUE); @@ -1443,59 +1453,69 @@ dlil_if_attach(struct ifnet *ifp) int dlil_if_detach(struct ifnet *ifp) { - struct if_proto *proto; - struct dlil_filterq_entry *if_filter; - struct if_family_str *if_family; - struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; - int s; - struct kev_msg ev_msg; - boolean_t funnel_state; - - funnel_state = thread_funnel_set(network_flock, TRUE); - s = splnet(); - - if_family = find_family_module(ifp->if_family); - - if (!if_family) { - kprintf("Attempt to detach interface without family module - %s\n", - ifp->if_name); - splx(s); - thread_funnel_set(network_flock, funnel_state); - return ENODEV; - } - - while (if_filter = TAILQ_FIRST(fhead)) - dlil_detach_filter(if_filter->filter_id); - - ifp->refcnt--; - - if (ifp->refcnt == 0) { - /* Let BPF know the interface is detaching. */ - bpfdetach(ifp); + struct if_proto *proto; + struct dlil_filterq_entry *if_filter; + struct if_family_str *if_family; + struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + struct kev_msg ev_msg; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + if_family = find_family_module(ifp->if_family); + + if (!if_family) { + kprintf("Attempt to detach interface without family module - %s\n", + ifp->if_name); + thread_funnel_set(network_flock, funnel_state); + return ENODEV; + } + + while (if_filter = TAILQ_FIRST(fhead)) + dlil_detach_filter(if_filter->filter_id); + + ifp->refcnt--; + + if (ifp->refcnt > 0) { + dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0); + thread_funnel_set(network_flock, funnel_state); + return DLIL_WAIT_FOR_FREE; + } + + while (ifp->if_multiaddrs.lh_first) { + struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first; + + /* + * When the interface is gone, we will no + * longer be listening on these multicasts. + * Various bits of the stack may be referencing + * these multicasts, so we can't just free them. + * We place them on a list so they may be cleaned + * up later as the other bits of the stack release + * them. + */ + LIST_REMOVE(ifma, ifma_link); + ifma->ifma_ifp = NULL; + LIST_INSERT_HEAD(&ifma_lostlist, ifma, ifma_link); + } + + /* Let BPF know the interface is detaching. */ + bpfdetach(ifp); TAILQ_REMOVE(&ifnet, ifp, if_link); (*if_family->del_if)(ifp); - + if (--if_family->refcnt == 0) { - if (if_family->shutdown) - (*if_family->shutdown)(); - - TAILQ_REMOVE(&if_family_head, if_family, if_fam_next); - FREE(if_family, M_IFADDR); + if (if_family->shutdown) + (*if_family->shutdown)(); + + TAILQ_REMOVE(&if_family_head, if_family, if_fam_next); + FREE(if_family, M_IFADDR); } - - dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0); - splx(s); + + dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0); thread_funnel_set(network_flock, funnel_state); return 0; - } - else - { - dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0); - splx(s); - thread_funnel_set(network_flock, funnel_state); - return DLIL_WAIT_FOR_FREE; - } } @@ -1605,6 +1625,126 @@ int dlil_dereg_if_modules(u_long interface_family) +int +dlil_reg_proto_module(u_long protocol_family, u_long interface_family, + struct dlil_protomod_reg_str *protomod_reg) +{ + struct proto_family_str *proto_family; + int s; + boolean_t funnel_state; + + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + if (find_proto_module(protocol_family, interface_family)) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return EEXIST; + } + + if (protomod_reg->reserved[0] != 0 || protomod_reg->reserved[1] != 0 + || protomod_reg->reserved[2] != 0 || protomod_reg->reserved[3] !=0) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return EINVAL; + } + + if (protomod_reg->attach_proto == NULL) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return EINVAL; + } + + proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK); + if (!proto_family) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOMEM; + } + + bzero(proto_family, sizeof(struct proto_family_str)); + proto_family->proto_family = protocol_family; + proto_family->if_family = interface_family & 0xffff; + proto_family->attach_proto = protomod_reg->attach_proto; + proto_family->detach_proto = protomod_reg->detach_proto; + + TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; +} + +int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family) +{ + struct proto_family_str *proto_family; + int s, ret = 0; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + proto_family = find_proto_module(protocol_family, interface_family); + if (proto_family == 0) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOENT; + } + + TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next); + FREE(proto_family, M_IFADDR); + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ret; +} + +int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp, u_long *dl_tag) +{ + struct proto_family_str *proto_family; + int s, ret = 0; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + proto_family = find_proto_module(protocol_family, ifp->if_family); + if (proto_family == 0) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOENT; + } + + ret = (*proto_family->attach_proto)(ifp, dl_tag); + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ret; +} + + +int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp) +{ + struct proto_family_str *proto_family; + int s, ret = 0; + u_long tag; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + + ret = dlil_find_dltag(ifp->if_family, ifp->if_unit, protocol_family, &tag); + + if (ret == 0) { + proto_family = find_proto_module(protocol_family, ifp->if_family); + if (proto_family && proto_family->detach_proto) + ret = (*proto_family->detach_proto)(ifp, tag); + else + ret = dlil_detach_protocol(tag); + } + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ret; +} + /* diff --git a/bsd/net/dlil.h b/bsd/net/dlil.h index 5a34f2823..7bcbde123 100644 --- a/bsd/net/dlil.h +++ b/bsd/net/dlil.h @@ -344,6 +344,161 @@ struct dlil_ifmod_reg_str { int dlil_reg_if_modules(u_long interface_family, struct dlil_ifmod_reg_str *ifmod_reg); +struct dlil_protomod_reg_str { + /* + * attach the protocol to the interface and return the dl_tag + */ + int (*attach_proto)(struct ifnet *ifp, u_long *dl_tag); + + /* + * detach the protocol from the interface. + * this is optionnal. If it is NULL, DLIL will use 0 default detach function. + */ + int (*detach_proto)(struct ifnet *ifp, u_long dl_tag); + + /* + * reserved for future use. MUST be NULL. + */ + u_long reserved[4]; +}; + +/* + +Function : dlil_reg_proto_module + + A DLIL protocol module is a piece of code that know how to handle a certain type + of protocol (PF_INET, PF_INET6, ...) for a certain family of interface (APPLE_IF_FAM_ETHERNET, + APPLE_IF_FAM_PPP, ...). + + dlil_reg_proto_module() allows the registration of such a protocol/interface handler before any + interface is attached. + Typically, the attach and detach function of the protocol handler will call + dlil_{attach/detach}_protocol with the parameter specific to the protocol. + + The goal of this modules is to insulate the actual protocol (IP, IPv6) from the DLIL details. + +Parameters : + 'protocol_family' is PF_INET, PF_INET6, ... + 'interface_family' is APPLE_IF_FAM_ETHERNET, APPLE_IF_FAM_PPP, ... + 'protomod_reg' is the protocol registration structure. + 'attach_proto' funtion is mandatory. + 'detach_proto' funtion is optional (DLIL will manage it). + +Return code : + +0 : + + No error. + +ENOMEM: + + No memory can be allocated for internal data structure. + +EEXIST: + + The protocol family has already been registered for this interface family. + +EINVAL: + + The dlil_protomod_reg_str structure contains incorrect values. + +*/ + +int dlil_reg_proto_module(u_long protocol_family, u_long interface_family, + struct dlil_protomod_reg_str *protomod_reg); + +/* + +Function : dlil_dereg_proto_module + + dlil_dereg_proto_module() will unregister the protocol module previously + registered with dlil_dereg_proto_module(). + + There is no restriction when to call it. + Interfaces or protoco can be attached, it will not prevent the deregistration of the module. + +Parameters : + 'protocol_family' is PF_INET, PF_INET6, ... + 'interface_family' is APPLE_IF_FAM_ETHERNET, APPLE_IF_FAM_PPP, ... + +Return code : + +0 : + + No error. + +ENOENT: + + No module was registered.. + +*/ + +int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family); + +/* + +Function : dlil_plumb_protocol + + dlil_plumb_protocol() will plumb a protocol to an actual interface. + This will find a registered protocol module and call its attach function. + The module will typically call dlil_attach_protocol with the appropriate parameters, + and will return the dl_tag of the attachement. + It is up to the caller to handle the dl_tag. + Some protocol (IPv4) will stick it in their internal structure for future use. + Some other protocol (IPv6) can ignore the dl_tag. + +Parameters : + 'protocol_family' is PF_INET, PF_INET6, ... + 'ifp' is the interface to plumb the protocol to. + 'dl_tag' is the tag returned from the succesful attachement. + +Return code : + +0 : + + No error. + +ENOENT: + + No module was registered. + +other: + + Error returned by the attach_proto function + +*/ +int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp, u_long *dl_tag); + +/* + +Function : dlil_unplumb_protocol + + dlil_unplumb_protocol() will unplumb a protocol from an interface. + This will find a registered protocol module and call its detach function. + The module will typically call dlil_detach_protocol with the appropriate parameters. + If no module is found, this function will call dlil_detach_protocol directly. + +Parameters : + 'protocol_family' is PF_INET, PF_INET6, ... + 'ifp' is APPLE_IF_FAM_ETHERNET, APPLE_IF_FAM_PPP, ... + +Return code : + +0 : + + No error. + +ENOENT: + + No module was registered. + +other: + + Error returned by the attach_proto function + +*/ +int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp); + int dlil_inject_if_input(struct mbuf *m, char *frame_header, u_long from_id); diff --git a/bsd/net/ether_if_module.c b/bsd/net/ether_if_module.c index 4710f419b..0ce4f1727 100644 --- a/bsd/net/ether_if_module.c +++ b/bsd/net/ether_if_module.c @@ -139,6 +139,10 @@ struct ether_desc_blk_str { static struct ether_desc_blk_str ether_desc_blk[MAX_INTERFACES]; +/* from if_ethersubr.c */ +int ether_resolvemulti __P((struct ifnet *, struct sockaddr **, + struct sockaddr *)); + /* * Release all descriptor entries owned by this dl_tag (there may be several). * Setting the type to 0 releases the entry. Eventually we should compact-out @@ -500,6 +504,7 @@ int ether_add_if(struct ifnet *ifp) ifp->if_framer = ether_frameout; ifp->if_demux = ether_demux; ifp->if_event = 0; + ifp->if_resolvemulti = ether_resolvemulti; for (i=0; i < MAX_INTERFACES; i++) if (ether_desc_blk[i].n_count == 0) @@ -605,10 +610,15 @@ ether_ifmod_ioctl(ifp, command, data) } +extern int ether_attach_inet(struct ifnet *ifp, u_long *dl_tag); +extern int ether_detach_inet(struct ifnet *ifp, u_long dl_tag); +extern int ether_attach_inet6(struct ifnet *ifp, u_long *dl_tag); +extern int ether_detach_inet6(struct ifnet *ifp, u_long dl_tag); int ether_family_init() { - int i; + int i, error=0; struct dlil_ifmod_reg_str ifmod_reg; + struct dlil_protomod_reg_str enet_protoreg; /* ethernet family is built-in, called from bsd_init */ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); @@ -630,7 +640,23 @@ int ether_family_init() for (i=0; i < MAX_INTERFACES; i++) ether_desc_blk[i].n_count = 0; + /* Register protocol registration functions */ + + bzero(&enet_protoreg, sizeof(enet_protoreg)); + enet_protoreg.attach_proto = ether_attach_inet; + enet_protoreg.detach_proto = ether_detach_inet; + + if ( error = dlil_reg_proto_module(PF_INET, APPLE_IF_FAM_ETHERNET, &enet_protoreg) != 0) + kprintf("dlil_reg_proto_module failed for AF_INET6 error=%d\n", error); + + + enet_protoreg.attach_proto = ether_attach_inet6; + enet_protoreg.detach_proto = ether_detach_inet6; + + if ( error = dlil_reg_proto_module(PF_INET6, APPLE_IF_FAM_ETHERNET, &enet_protoreg) != 0) + kprintf("dlil_reg_proto_module failed for AF_INET6 error=%d\n", error); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return 0; + return (error); } diff --git a/bsd/net/ether_inet6_pr_module.c b/bsd/net/ether_inet6_pr_module.c index 65901c1eb..8d6c582a2 100644 --- a/bsd/net/ether_inet6_pr_module.c +++ b/bsd/net/ether_inet6_pr_module.c @@ -257,7 +257,7 @@ inet6_ether_pre_output(ifp, m0, dst_netaddr, route, type, edst, dl_tag ) if (!nd6_storelladdr(&ac->ac_if, rt, m, dst_netaddr, (u_char *)edst)) { /* this must be impossible, so we bark */ printf("nd6_storelladdr failed\n"); - return(0); + return(EADDRNOTAVAIL); /* dlil_output will free the mbuf */ } *(u_short *)type = htons(ETHERTYPE_IPV6); break; @@ -266,6 +266,7 @@ inet6_ether_pre_output(ifp, m0, dst_netaddr, route, type, edst, dl_tag ) printf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit, dst_netaddr->sa_family); + /* dlil_output will free the mbuf */ return EAFNOSUPPORT; } @@ -372,19 +373,18 @@ ether_inet6_prmod_ioctl(dl_tag, ifp, command, data) -u_long ether_attach_inet6(struct ifnet *ifp) +int ether_attach_inet6(struct ifnet *ifp, u_long *dl_tag) { struct dlil_proto_reg_str reg; struct dlil_demux_desc desc; - u_long ip_dl_tag=0; u_short en_6native=ETHERTYPE_IPV6; int stat; int i; - stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET6, &ip_dl_tag); + stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET6, dl_tag); if (stat == 0) - return ip_dl_tag; + return stat; TAILQ_INIT(®.demux_desc_head); desc.type = DLIL_DESC_RAW; @@ -403,23 +403,21 @@ u_long ether_attach_inet6(struct ifnet *ifp) reg.default_proto = 0; reg.protocol_family = PF_INET6; - stat = dlil_attach_protocol(®, &ip_dl_tag); + stat = dlil_attach_protocol(®, dl_tag); if (stat) { printf("WARNING: ether_attach_inet6 can't attach ip to interface\n"); - return stat; } - return ip_dl_tag; + return stat; } -int ether_detach_inet6(struct ifnet *ifp) +int ether_detach_inet6(struct ifnet *ifp, u_long dl_tag) { - u_long ip_dl_tag = 0; int stat; - stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET6, &ip_dl_tag); + stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET6, &dl_tag); if (stat == 0) { - stat = dlil_detach_protocol(ip_dl_tag); + stat = dlil_detach_protocol(dl_tag); if (stat) { printf("WARNING: ether_detach_inet6 can't detach ip6 from interface\n"); } diff --git a/bsd/net/ether_inet_pr_module.c b/bsd/net/ether_inet_pr_module.c index f181de956..81ea2564b 100644 --- a/bsd/net/ether_inet_pr_module.c +++ b/bsd/net/ether_inet_pr_module.c @@ -365,19 +365,6 @@ ether_inet_prmod_ioctl(dl_tag, ifp, command, data) if (ifp->if_init) ifp->if_init(ifp->if_softc); /* before arpwhohas */ - // - // See if another station has *our* IP address. - // i.e.: There is an address conflict! If a - // conflict exists, a message is sent to the - // console. - // - if (IA_SIN(ifa)->sin_addr.s_addr != 0) - { - /* don't bother for 0.0.0.0 */ - ac->ac_ipaddr = IA_SIN(ifa)->sin_addr; - arpwhohas(ac, &IA_SIN(ifa)->sin_addr); - } - arp_ifinit(IFP2AC(ifp), ifa); /* @@ -425,22 +412,21 @@ ether_inet_prmod_ioctl(dl_tag, ifp, command, data) -u_long -ether_attach_inet(struct ifnet *ifp) +int +ether_attach_inet(struct ifnet *ifp, u_long *dl_tag) { struct dlil_proto_reg_str reg; struct dlil_demux_desc desc; struct dlil_demux_desc desc2; - u_long ip_dl_tag=0; u_short en_native=ETHERTYPE_IP; u_short arp_native=ETHERTYPE_ARP; int stat; int i; - stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET, &ip_dl_tag); + stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET, dl_tag); if (stat == 0) - return ip_dl_tag; + return (stat); TAILQ_INIT(®.demux_desc_head); desc.type = DLIL_DESC_RAW; @@ -463,22 +449,21 @@ ether_attach_inet(struct ifnet *ifp) desc2.native_type = (char *) &arp_native; TAILQ_INSERT_TAIL(®.demux_desc_head, &desc2, next); - stat = dlil_attach_protocol(®, &ip_dl_tag); + stat = dlil_attach_protocol(®, dl_tag); if (stat) { printf("WARNING: ether_attach_inet can't attach ip to interface\n"); return stat; } - return ip_dl_tag; + return (0); } -int ether_detach_inet(struct ifnet *ifp) +int ether_detach_inet(struct ifnet *ifp, u_long dl_tag) { - u_long ip_dl_tag = 0; int stat; - stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET, &ip_dl_tag); + stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET, &dl_tag); if (stat == 0) { - stat = dlil_detach_protocol(ip_dl_tag); + stat = dlil_detach_protocol(dl_tag); if (stat) { printf("WARNING: ether_detach_inet can't detach ip from interface\n"); } diff --git a/bsd/net/ethernet.h b/bsd/net/ethernet.h index 86f3152b9..09c78d9ea 100644 --- a/bsd/net/ethernet.h +++ b/bsd/net/ethernet.h @@ -121,7 +121,7 @@ __BEGIN_DECLS int ether_hostton __P((char *, struct ether_addr *)); int ether_line __P((char *, struct ether_addr *, char *)); -char *ether_ntoa __P((struct ether_addr *)); +char *ether_ntoa __P((const struct ether_addr *)); int ether_ntohost __P((char *, struct ether_addr *)); __END_DECLS #endif /* !KERNEL */ diff --git a/bsd/net/firewire.h b/bsd/net/firewire.h new file mode 100644 index 000000000..c219b80e5 --- /dev/null +++ b/bsd/net/firewire.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Fundamental constants relating to FireWire network device. + */ + +#ifndef _NET_FIREWIRE_H_ +#define _NET_FIREWIRE_H_ + +#include + +/* + * The number of bytes in a FireWire EUI-64. + */ +#define FIREWIRE_EUI64_LEN 8 + +/* + * The number of bytes in the type field. + */ +#define FIREWIRE_TYPE_LEN 2 + +/* + * The length of the header provided by the FireWire network device. + */ +#define FIREWIRE_HDR_LEN (FIREWIRE_EUI64_LEN*2+FIREWIRE_TYPE_LEN) + +/* + * The minimum packet length. + */ +#define FIREWIRE_MIN_LEN 64 + +/* + * The maximum packet length. + */ +#define FIREWIRE_MAX_LEN 4096 + +/* + * A macro to validate a length with + */ +#define FIREWIRE_IS_VALID_LEN(foo) \ + ((foo) >= FIREWIRE_MIN_LEN && (foo) <= FIREWIRE_MAX_LEN) + +/* + * Structure of header provided by the FireWire network device. + * + * The device uses a simplified header with just the non-changing + * EUI-64 addresses and ethernet type specified; + */ +struct firewire_header { + u_char firewire_dhost[FIREWIRE_EUI64_LEN]; + u_char firewire_shost[FIREWIRE_EUI64_LEN]; + u_short firewire_type; /* ethertype */ +}; + +/* + * Format of FireWire EUI-64. + */ +struct firewire_eui64 { + u_char octet[FIREWIRE_EUI64_LEN]; +}; + +/* + * Format of FireWire hardware address. + */ +struct firewire_address { + u_char eui64[FIREWIRE_EUI64_LEN]; + u_char maxRec; + u_char spd; + u_int16_t unicastFifoHi; + u_int32_t unicastFifoLo; +}; + +#define FIREWIRE_ADDR_LEN 16 /* sizeof(struct firewire_address) */ + + +#define FIREWIRE_MTU (FIREWIRE_MAX_LEN - FIREWIRE_HDR_LEN) +#define FIREWIRE_MIN (FIREWIRE_MIN_LEN - FIREWIRE_HDR_LEN) + +#endif /* !_NET_FIREWIRE_H_ */ diff --git a/bsd/net/if.c b/bsd/net/if.c index 1f10ed96f..312e293a3 100644 --- a/bsd/net/if.c +++ b/bsd/net/if.c @@ -107,6 +107,7 @@ MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); int ifqmaxlen = IFQ_MAXLEN; struct ifnethead ifnet; /* depend on static init XXX */ +struct ifmultihead ifma_lostlist = LIST_HEAD_INITIALIZER(ifma_lostlist); #if INET6 /* @@ -114,7 +115,6 @@ struct ifnethead ifnet; /* depend on static init XXX */ * should be more generalized? */ extern void nd6_setmtu __P((struct ifnet *)); -extern int ip6_auto_on; #endif /* @@ -154,7 +154,9 @@ old_if_attach(ifp) } TAILQ_INSERT_TAIL(&ifnet, ifp, if_link); - ifp->if_index = ++if_index; + /* if the interface is recycled, keep the index */ + if (!((ifp->if_eflags & IFEF_REUSE) && ifp->if_index)) + ifp->if_index = ++if_index; /* * XXX - * The old code would work if the interface passed a pre-existing @@ -226,6 +228,28 @@ old_if_attach(ifp) } } +__private_extern__ int +ifa_foraddr(addr) + unsigned int addr; +{ + register struct ifnet *ifp; + register struct ifaddr *ifa; + register unsigned int addr2; + + + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) { + if (ifa->ifa_addr->sa_family != AF_INET) + continue; + addr2 = IA_SIN(ifa)->sin_addr.s_addr; + + if (addr == addr2) + return (1); + } + return (0); +} + /* * Locate an interface based on a complete address. */ @@ -498,10 +522,6 @@ if_route(ifp, flag, fam) pfctlinput(PRC_IFUP, ifa->ifa_addr); rt_ifmsg(ifp); -#if INET6 - if (ip6_auto_on) /* Only if IPv6 is on on configured on on all ifs */ - in6_if_up(ifp); -#endif } /* @@ -1220,41 +1240,65 @@ if_addmulti(ifp, sa, retifma) return 0; } -/* - * Remove a reference to a multicast address on this interface. Yell - * if the request does not match an existing membership. - */ int -if_delmulti(ifp, sa) - struct ifnet *ifp; - struct sockaddr *sa; +if_delmultiaddr(struct ifmultiaddr *ifma) { - struct ifmultiaddr *ifma; - int s; - - for (ifma = ifp->if_multiaddrs.lh_first; ifma; - ifma = ifma->ifma_link.le_next) - if (equal(sa, ifma->ifma_addr)) - break; - if (ifma == 0) - return ENOENT; - + struct sockaddr *sa; + struct ifnet *ifp; + + /* Verify ifma is valid */ + { + struct ifmultiaddr *match = NULL; + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) { + for (match = ifp->if_multiaddrs.lh_first; match; match = match->ifma_link.le_next) { + if (match->ifma_ifp != ifp) { + printf("if_delmultiaddr: ifma (%x) on ifp i(%s) is stale\n", + match, if_name(ifp)); + return (0) ; /* swallow error ? */ + } + if (match == ifma) + break; + } + if (match == ifma) + break; + } + if (match != ifma) { + for (match = ifma_lostlist.lh_first; match; match = match->ifma_link.le_next) { + if (match->ifma_ifp != NULL) { + printf("if_delmultiaddr: item on lost list (%x) contains non-null ifp=%s\n", + match, if_name(match->ifma_ifp)); + return (0) ; /* swallow error ? */ + } + if (match == ifma) + break; + } + } + + if (match != ifma) { + printf("if_delmultiaddr: ifma 0x%X is invalid\n", ifma); + return 0; + } + } + if (ifma->ifma_refcount > 1) { ifma->ifma_refcount--; return 0; } - rt_newmaddrmsg(RTM_DELMADDR, ifma); sa = ifma->ifma_lladdr; - s = splimp(); + + if (sa) /* send a routing msg for network addresses only */ + rt_newmaddrmsg(RTM_DELMADDR, ifma); + + ifp = ifma->ifma_ifp; + LIST_REMOVE(ifma, ifma_link); /* * Make sure the interface driver is notified * in the case of a link layer mcast group being left. */ - if (ifma->ifma_addr->sa_family == AF_LINK && sa == 0) + if (ifp && ifma->ifma_addr->sa_family == AF_LINK && sa == 0) dlil_ioctl(0, ifp, SIOCDELMULTI, 0); - splx(s); FREE(ifma->ifma_addr, M_IFMADDR); FREE(ifma, M_IFMADDR); if (sa == 0) @@ -1271,27 +1315,41 @@ if_delmulti(ifp, sa) * in the record for the link-layer address. (So we don't complain * in that case.) */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma; - ifma = ifma->ifma_link.le_next) + if (ifp) + ifma = ifp->if_multiaddrs.lh_first; + else + ifma = ifma_lostlist.lh_first; + for (; ifma; ifma = ifma->ifma_link.le_next) if (equal(sa, ifma->ifma_addr)) break; - if (ifma == 0) - return 0; - - if (ifma->ifma_refcount > 1) { - ifma->ifma_refcount--; + + FREE(sa, M_IFMADDR); + if (ifma == 0) { return 0; } - s = splimp(); - LIST_REMOVE(ifma, ifma_link); - dlil_ioctl(0, ifp, SIOCDELMULTI, (caddr_t) 0); - splx(s); - FREE(ifma->ifma_addr, M_IFMADDR); - FREE(sa, M_IFMADDR); - FREE(ifma, M_IFMADDR); + return if_delmultiaddr(ifma); +} - return 0; +/* + * Remove a reference to a multicast address on this interface. Yell + * if the request does not match an existing membership. + */ +int +if_delmulti(ifp, sa) + struct ifnet *ifp; + struct sockaddr *sa; +{ + struct ifmultiaddr *ifma; + + for (ifma = ifp->if_multiaddrs.lh_first; ifma; + ifma = ifma->ifma_link.le_next) + if (equal(sa, ifma->ifma_addr)) + break; + if (ifma == 0) + return ENOENT; + + return if_delmultiaddr(ifma); } diff --git a/bsd/net/if.h b/bsd/net/if.h index f572ff980..e034a4faa 100644 --- a/bsd/net/if.h +++ b/bsd/net/if.h @@ -120,6 +120,7 @@ /* extended flags definitions: (all bits are reserved for internal/future use) */ #define IFEF_AUTOCONFIGURING 0x1 #define IFEF_DVR_REENTRY_OK 0x20 /* When set, driver may be reentered from its own thread */ +#define IFEF_ACCEPT_RTADVD 0x40 /* set to accept IPv6 router advertisement on the interface */ #define IFEF_INUSE 0x40000000 /* DLIL ifnet recycler, ifnet in use */ #define IFEF_REUSE 0x20000000 /* DLIL ifnet recycler, ifnet is not new */ #endif /* KERNEL_PRIVATE */ diff --git a/bsd/net/if_arp.h b/bsd/net/if_arp.h index 9718e6291..259b8be82 100644 --- a/bsd/net/if_arp.h +++ b/bsd/net/if_arp.h @@ -78,6 +78,8 @@ struct arphdr { #define ARPHRD_ETHER 1 /* ethernet hardware format */ #define ARPHRD_IEEE802 6 /* token-ring hardware format */ #define ARPHRD_FRELAY 15 /* frame relay hardware format */ +#define ARPHRD_IEEE1394 24 /* IEEE1394 hardware address */ +#define ARPHRD_IEEE1394_EUI64 27 /* IEEE1394 EUI-64 */ u_short ar_pro; /* format of protocol address */ u_char ar_hln; /* length of hardware address */ u_char ar_pln; /* length of protocol address */ diff --git a/bsd/net/if_atm.h b/bsd/net/if_atm.h index cccc59b6d..0ab149256 100644 --- a/bsd/net/if_atm.h +++ b/bsd/net/if_atm.h @@ -67,8 +67,6 @@ #define RTALLOC1(A,B) rtalloc1((A),(B),0UL) #endif -#warning if_atm.h is not used by the darwin kernel - /* * pseudo header for packet transmission diff --git a/bsd/net/if_ethersubr.c b/bsd/net/if_ethersubr.c index 7e6c57d76..07c17892f 100644 --- a/bsd/net/if_ethersubr.c +++ b/bsd/net/if_ethersubr.c @@ -102,8 +102,6 @@ extern struct ifqueue pkintrq; #include #endif /* NVLAN > 0 */ -static int ether_resolvemulti __P((struct ifnet *, struct sockaddr **, - struct sockaddr *)); extern u_char etherbroadcastaddr[]; #define senderr(e) do { error = (e); goto bad;} while (0) #define IFP2AC(IFP) ((struct arpcom *)IFP) @@ -132,7 +130,6 @@ ether_ifattach(ifp) ifp->if_addrlen = 6; ifp->if_hdrlen = 14; ifp->if_mtu = ETHERMTU; - ifp->if_resolvemulti = ether_resolvemulti; if (ifp->if_baudrate == 0) ifp->if_baudrate = 10000000; diff --git a/bsd/net/if_faith.c b/bsd/net/if_faith.c index feed430a3..acd20e9db 100644 --- a/bsd/net/if_faith.c +++ b/bsd/net/if_faith.c @@ -178,29 +178,10 @@ int faith_shutdown() return 0; } -void faith_reg_if_mods() -{ - struct dlil_ifmod_reg_str faith_ifmod; - - bzero(&faith_ifmod, sizeof(faith_ifmod)); - faith_ifmod.add_if = faith_add_if; - faith_ifmod.del_if = faith_del_if; - faith_ifmod.add_proto = faith_add_proto; - faith_ifmod.del_proto = faith_del_proto; - faith_ifmod.ifmod_ioctl = 0; - faith_ifmod.shutdown = faith_shutdown; - - - if (dlil_reg_if_modules(APPLE_IF_FAM_FAITH, &faith_ifmod)) - panic("Couldn't register faith modules\n"); - -} - -u_long faith_attach_inet(struct ifnet *ifp) +int faith_attach_inet(struct ifnet *ifp, u_long *dl_tag) { struct dlil_proto_reg_str reg; struct dlil_demux_desc desc; - u_long dl_tag=0; short native=0; int stat; int i; @@ -212,7 +193,8 @@ u_long faith_attach_inet(struct ifnet *ifp) kprintf("faith_array for %s%d found dl_tag=%d\n", ifp->if_name, ifp->if_unit, faith_array[i]->dl_tag); #endif - return faith_array[i]->dl_tag; + *dl_tag = faith_array[i]->dl_tag; + return 0; } } @@ -234,14 +216,44 @@ u_long faith_attach_inet(struct ifnet *ifp) reg.default_proto = 0; reg.protocol_family = PF_INET; - stat = dlil_attach_protocol(®, &dl_tag); + stat = dlil_attach_protocol(®, dl_tag); if (stat) { panic("faith_attach_inet can't attach interface\n"); } - return dl_tag; + return stat; } +void faith_reg_if_mods() +{ + struct dlil_ifmod_reg_str faith_ifmod; + struct dlil_protomod_reg_str faith_protoreg; + int error; + + bzero(&faith_ifmod, sizeof(faith_ifmod)); + faith_ifmod.add_if = faith_add_if; + faith_ifmod.del_if = faith_del_if; + faith_ifmod.add_proto = faith_add_proto; + faith_ifmod.del_proto = faith_del_proto; + faith_ifmod.ifmod_ioctl = 0; + faith_ifmod.shutdown = faith_shutdown; + + + if (dlil_reg_if_modules(APPLE_IF_FAM_FAITH, &faith_ifmod)) + panic("Couldn't register faith modules\n"); + + /* Register protocol registration functions */ + + bzero(&faith_protoreg, sizeof(faith_protoreg)); + faith_protoreg.attach_proto = faith_attach_inet; + faith_protoreg.detach_proto = 0; + + if ( error = dlil_reg_proto_module(PF_INET, APPLE_IF_FAM_FAITH, &faith_protoreg) != 0) + kprintf("dlil_reg_proto_module failed for AF_INET error=%d\n", error); + + +} + void faithattach(void) { diff --git a/bsd/net/if_gif.c b/bsd/net/if_gif.c index b8b75f002..5b4e12a88 100644 --- a/bsd/net/if_gif.c +++ b/bsd/net/if_gif.c @@ -296,17 +296,55 @@ u_long gif_detach_proto_family(struct ifnet *ifp, int af) return (stat); } +int gif_attach_inet(struct ifnet *ifp, u_long *dl_tag) { + *dl_tag = gif_attach_proto_family(ifp, AF_INET); + return 0; +} + +int gif_detach_inet(struct ifnet *ifp, u_long dl_tag) { + gif_detach_proto_family(ifp, AF_INET); + return 0; +} + +int gif_attach_inet6(struct ifnet *ifp, u_long *dl_tag) { + *dl_tag = gif_attach_proto_family(ifp, AF_INET6); + return 0; +} + +int gif_detach_inet6(struct ifnet *ifp, u_long dl_tag) { + gif_detach_proto_family(ifp, AF_INET6); + return 0; +} #endif /* Function to setup the first gif interface */ void gifattach(void) { + struct dlil_protomod_reg_str gif_protoreg; + int error; + /* Init the list of interfaces */ TAILQ_INIT(&gifs); gif_reg_if_mods(); /* DLIL modules */ + /* Register protocol registration functions */ + + bzero(&gif_protoreg, sizeof(gif_protoreg)); + gif_protoreg.attach_proto = gif_attach_inet; + gif_protoreg.detach_proto = gif_detach_inet; + + if ( error = dlil_reg_proto_module(AF_INET, APPLE_IF_FAM_GIF, &gif_protoreg) != 0) + printf("dlil_reg_proto_module failed for AF_INET error=%d\n", error); + + gif_protoreg.attach_proto = gif_attach_inet6; + gif_protoreg.detach_proto = gif_detach_inet6; + + if ( error = dlil_reg_proto_module(AF_INET6, APPLE_IF_FAM_GIF, &gif_protoreg) != 0) + printf("dlil_reg_proto_module failed for AF_INET6 error=%d\n", error); + + /* Create first device */ gif_create_dev(); } @@ -463,6 +501,7 @@ gif_pre_output(ifp, m0, dst, rt, frame, address, dl_tag) log(LOG_NOTICE, "gif_output: recursively called too many times(%d)\n", called); + m_freem(m); /* free it here not in dlil_output*/ error = EIO; /* is there better errno? */ goto end; } @@ -471,6 +510,7 @@ gif_pre_output(ifp, m0, dst, rt, frame, address, dl_tag) m->m_flags &= ~(M_BCAST|M_MCAST); if (!(ifp->if_flags & IFF_UP) || sc->gif_psrc == NULL || sc->gif_pdst == NULL) { + m_freem(m); /* free it here not in dlil_output */ error = ENETDOWN; goto end; } @@ -518,8 +558,11 @@ gif_pre_output(ifp, m0, dst, rt, frame, address, dl_tag) end: called = 0; /* reset recursion counter */ - if (error) + if (error) { + /* the mbuf was freed either by in_gif_output or in here */ + *m0 = NULL; /* avoid getting dlil_output freeing it */ ifp->if_oerrors++; + } if (error == 0) error = EJUSTRETURN; /* if no error, packet got sent already */ return error; diff --git a/bsd/net/if_llc.h b/bsd/net/if_llc.h index 16b8b3ef9..f43a83e97 100644 --- a/bsd/net/if_llc.h +++ b/bsd/net/if_llc.h @@ -76,7 +76,7 @@ struct llc { struct { u_char control; u_char format_id; - u_char class; + u_char class_id; u_char window_x2; } type_u; struct { @@ -111,7 +111,7 @@ struct llc { #define llc_control llc_un.type_u.control #define llc_control_ext llc_un.type_raw.control_ext #define llc_fid llc_un.type_u.format_id -#define llc_class llc_un.type_u.class +#define llc_class llc_un.type_u.class_id #define llc_window llc_un.type_u.window_x2 #define llc_frmrinfo llc_un.type_frmr.frmrinfo #define llc_frmr_pdu0 llc_un.type_frmr.frmrinfo.rej_pdu0 diff --git a/bsd/net/if_loop.c b/bsd/net/if_loop.c index ca5e52385..ddbc94393 100644 --- a/bsd/net/if_loop.c +++ b/bsd/net/if_loop.c @@ -353,7 +353,7 @@ lo_pre_output(ifp, m, dst, route, frame_type, dst_addr, dl_tag) ifq = &atalkintrq; isr = NETISR_APPLETALK; break; -#endif NETAT +#endif /* NETAT */ default: return (EAFNOSUPPORT); } @@ -498,37 +498,20 @@ int lo_shutdown() return 0; } - -void lo_reg_if_mods() -{ - struct dlil_ifmod_reg_str lo_ifmod; - - bzero(&lo_ifmod, sizeof(lo_ifmod)); - lo_ifmod.add_if = lo_add_if; - lo_ifmod.del_if = lo_del_if; - lo_ifmod.add_proto = lo_add_proto; - lo_ifmod.del_proto = lo_del_proto; - lo_ifmod.ifmod_ioctl = 0; - lo_ifmod.shutdown = lo_shutdown; - - if (dlil_reg_if_modules(APPLE_IF_FAM_LOOPBACK, &lo_ifmod)) - panic("Couldn't register lo modules\n"); -} - - -u_long lo_attach_inet(struct ifnet *ifp) +int lo_attach_inet(struct ifnet *ifp, u_long *dl_tag) { struct dlil_proto_reg_str reg; struct dlil_demux_desc desc; - u_long dl_tag=0; short native=0; - int stat; + int stat =0 ; int i; for (i=0; i < lo_count; i++) { if ((lo_array[i]) && (lo_array[i]->ifp == ifp)) { - if (lo_array[i]->protocol_family == PF_INET) - return lo_array[i]->dl_tag; + if (lo_array[i]->protocol_family == PF_INET) { + *dl_tag = lo_array[i]->dl_tag; + return (0); + } } } @@ -549,27 +532,28 @@ u_long lo_attach_inet(struct ifnet *ifp) reg.default_proto = 0; reg.protocol_family = PF_INET; - stat = dlil_attach_protocol(®, &dl_tag); - if (stat) { - panic("lo_attach_inet can't attach interface\n"); - } + stat = dlil_attach_protocol(®, dl_tag); + + if (stat) + printf("lo_attach_inet: dlil_attach_protocol returned=%d\n", stat); - return dl_tag; + return stat; } -u_long lo_attach_inet6(struct ifnet *ifp) +int lo_attach_inet6(struct ifnet *ifp, u_long *dl_tag) { struct dlil_proto_reg_str reg; struct dlil_demux_desc desc; - u_long dl_tag=0; short native=0; int stat; int i; for (i=0; i < lo_count; i++) { if ((lo_array[i]) && (lo_array[i]->ifp == ifp)) { - if (lo_array[i]->protocol_family == PF_INET6) - return lo_array[i]->dl_tag; + if (lo_array[i]->protocol_family == PF_INET6) { + *dl_tag = lo_array[i]->dl_tag; + return (0); + } } } @@ -590,14 +574,47 @@ u_long lo_attach_inet6(struct ifnet *ifp) reg.default_proto = 0; reg.protocol_family = PF_INET6; - stat = dlil_attach_protocol(®, &dl_tag); - if (stat) { - panic("lo_attach_inet6 can't attach interface\n"); - } + stat = dlil_attach_protocol(®, dl_tag); + + if (stat) + printf("lo_attach_inet6: dlil_attach_protocol returned=%d\n", stat); - return dl_tag; + return stat; } +void lo_reg_if_mods() +{ + struct dlil_ifmod_reg_str lo_ifmod; + struct dlil_protomod_reg_str lo_protoreg; + int error; + + bzero(&lo_ifmod, sizeof(lo_ifmod)); + lo_ifmod.add_if = lo_add_if; + lo_ifmod.del_if = lo_del_if; + lo_ifmod.add_proto = lo_add_proto; + lo_ifmod.del_proto = lo_del_proto; + lo_ifmod.ifmod_ioctl = 0; + lo_ifmod.shutdown = lo_shutdown; + + if (dlil_reg_if_modules(APPLE_IF_FAM_LOOPBACK, &lo_ifmod)) + panic("Couldn't register lo modules\n"); + + /* Register protocol registration functions */ + + bzero(&lo_protoreg, sizeof(lo_protoreg)); + lo_protoreg.attach_proto = lo_attach_inet; + lo_protoreg.detach_proto = NULL; /* no detach function for loopback */ + + if ( error = dlil_reg_proto_module(PF_INET, APPLE_IF_FAM_LOOPBACK, &lo_protoreg) != 0) + printf("dlil_reg_proto_module failed for AF_INET error=%d\n", error); + + lo_protoreg.attach_proto = lo_attach_inet6; + lo_protoreg.detach_proto = NULL; + + if ( error = dlil_reg_proto_module(PF_INET6, APPLE_IF_FAM_LOOPBACK, &lo_protoreg) != 0) + printf("dlil_reg_proto_module failed for AF_INET6 error=%d\n", error); + +} int lo_set_bpf_tap(struct ifnet *ifp, int mode, int (*bpf_callback)(struct ifnet *, struct mbuf *)) { diff --git a/bsd/net/if_stf.c b/bsd/net/if_stf.c index 7bdf6faa5..379550287 100644 --- a/bsd/net/if_stf.c +++ b/bsd/net/if_stf.c @@ -145,7 +145,7 @@ static int ip_stf_ttl = 40; extern struct domain inetdomain; struct protosw in_stf_protosw = { SOCK_RAW, &inetdomain, IPPROTO_IPV6, PR_ATOMIC|PR_ADDR, - in_stf_input, rip_output, 0, rip_ctloutput, + in_stf_input, 0, 0, rip_ctloutput, 0, 0, 0, 0, 0, 0, @@ -209,33 +209,17 @@ int stf_shutdown() return 0; } -void stf_reg_if_mods() -{ - struct dlil_ifmod_reg_str stf_ifmod; - - bzero(&stf_ifmod, sizeof(stf_ifmod)); - stf_ifmod.add_if = stf_add_if; - stf_ifmod.del_if = stf_del_if; - stf_ifmod.add_proto = stf_add_proto; - stf_ifmod.del_proto = stf_del_proto; - stf_ifmod.ifmod_ioctl = 0; - stf_ifmod.shutdown = stf_shutdown; - - - if (dlil_reg_if_modules(APPLE_IF_FAM_STF, &stf_ifmod)) - panic("Couldn't register stf modules\n"); - -} - -u_long stf_attach_inet6(struct ifnet *ifp) +int stf_attach_inet6(struct ifnet *ifp, u_long *dl_tag) { struct dlil_proto_reg_str reg; struct dlil_demux_desc desc; short native=0; int stat, i; - if (stf_dl_tag != 0) - return stf_dl_tag; + if (stf_dl_tag != 0) { + *dl_tag = stf_dl_tag; + return 0; + } TAILQ_INIT(®.demux_desc_head); desc.type = DLIL_DESC_RAW; @@ -255,21 +239,18 @@ u_long stf_attach_inet6(struct ifnet *ifp) reg.protocol_family = PF_INET6; stat = dlil_attach_protocol(®, &stf_dl_tag); - if (stat) { - panic("stf_attach_inet6 can't attach interface\n"); - } + *dl_tag = stf_dl_tag; - return stf_dl_tag; + return stat; } -u_long stf_detach_inet6(struct ifnet *ifp) +int stf_detach_inet6(struct ifnet *ifp, u_long dl_tag) { - u_long ip_dl_tag = 0; int stat; - stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, AF_INET6, &ip_dl_tag); + stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, AF_INET6, &dl_tag); if (stat == 0) { - stat = dlil_detach_protocol(ip_dl_tag); + stat = dlil_detach_protocol(dl_tag); if (stat) { printf("WARNING: stf_detach can't detach IP AF_INET6 from interface\n"); } @@ -277,6 +258,33 @@ u_long stf_detach_inet6(struct ifnet *ifp) return (stat); } +void stf_reg_if_mods() +{ + struct dlil_ifmod_reg_str stf_ifmod; + struct dlil_protomod_reg_str stf_protoreg; + int error; + + bzero(&stf_ifmod, sizeof(stf_ifmod)); + stf_ifmod.add_if = stf_add_if; + stf_ifmod.del_if = stf_del_if; + stf_ifmod.add_proto = stf_add_proto; + stf_ifmod.del_proto = stf_del_proto; + stf_ifmod.ifmod_ioctl = 0; + stf_ifmod.shutdown = stf_shutdown; + + + if (dlil_reg_if_modules(APPLE_IF_FAM_STF, &stf_ifmod)) + panic("Couldn't register stf modules\n"); + + /* Register protocol registration functions */ + + bzero(&stf_protoreg, sizeof(stf_protoreg)); + stf_protoreg.attach_proto = stf_attach_inet6; + stf_protoreg.detach_proto = stf_detach_inet6; + + if ( error = dlil_reg_proto_module(AF_INET6, APPLE_IF_FAM_STF, &stf_protoreg) != 0) + kprintf("dlil_reg_proto_module failed for AF_INET6 error=%d\n", error); +} void stfattach(void) @@ -753,6 +761,8 @@ in_stf_input(m, off) ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; splx(s); + + return; } /* ARGSUSED */ diff --git a/bsd/net/if_var.h b/bsd/net/if_var.h index 2c69ea514..2b27c7489 100644 --- a/bsd/net/if_var.h +++ b/bsd/net/if_var.h @@ -74,7 +74,8 @@ #define APPLE_IF_FAM_MDECAP 9 #define APPLE_IF_FAM_GIF 10 #define APPLE_IF_FAM_FAITH 11 -#define APPLE_IF_FAM_STF 12 +#define APPLE_IF_FAM_STF 12 +#define APPLE_IF_FAM_FIREWIRE 13 #endif /* @@ -490,6 +491,7 @@ int if_addmulti __P((struct ifnet *, struct sockaddr *, struct ifmultiaddr **)); int if_allmulti __P((struct ifnet *, int)); void if_attach __P((struct ifnet *)); +int if_delmultiaddr __P((struct ifmultiaddr *ifma)); int if_delmulti __P((struct ifnet *, struct sockaddr *)); void if_down __P((struct ifnet *)); void if_route __P((struct ifnet *, int flag, int fam)); diff --git a/bsd/net/ndrv.c b/bsd/net/ndrv.c index 4707ef7c5..de86a5602 100644 --- a/bsd/net/ndrv.c +++ b/bsd/net/ndrv.c @@ -241,6 +241,10 @@ ndrv_attach(struct socket *so, int proto, struct proc *p) #if NDRV_DEBUG kprintf("NDRV attach: %x, %x, %x\n", so, proto, np); #endif + + if ((error = soreserve(so, ndrv_sendspace, ndrv_recvspace))) + return(error); + MALLOC(np, struct ndrv_cb *, sizeof(*np), M_PCB, M_WAITOK); if (np == NULL) return (ENOMEM); @@ -249,8 +253,6 @@ ndrv_attach(struct socket *so, int proto, struct proc *p) #if NDRV_DEBUG kprintf("NDRV attach: %x, %x, %x\n", so, proto, np); #endif - if ((error = soreserve(so, ndrv_sendspace, ndrv_recvspace))) - return(error); TAILQ_INIT(&np->nd_dlist); np->nd_signature = NDRV_SIGNATURE; np->nd_socket = so; @@ -600,7 +602,7 @@ ndrv_do_detach(register struct ndrv_cb *np) struct ndrv_cb* cur_np = NULL; struct socket *so = np->nd_socket; struct ndrv_multicast* next; - int error; + int error = 0; #if NDRV_DEBUG kprintf("NDRV detach: %x, %x\n", so, np); diff --git a/bsd/net/netisr.h b/bsd/net/netisr.h index 47b4b349b..74508a3e8 100644 --- a/bsd/net/netisr.h +++ b/bsd/net/netisr.h @@ -91,4 +91,4 @@ extern int dlil_input_thread_wakeup; #endif /* defined(KERNEL) && !defined(LOCORE) */ #define schednetisr(anisr) { netisr |= 1<<(anisr); setsoftnet(); } -#endif __APPLE_API_PRIVATE +#endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/net/pfkeyv2.h b/bsd/net/pfkeyv2.h index 14d610ea6..b32b130a9 100644 --- a/bsd/net/pfkeyv2.h +++ b/bsd/net/pfkeyv2.h @@ -128,6 +128,15 @@ struct sadb_sa { u_int32_t sadb_sa_flags; }; +#ifdef __APPLE_API_PRIVATE +struct sadb_sa_2 { + struct sadb_sa sa; + u_int16_t sadb_sa_natt_port; + u_int16_t sadb_reserved0; + u_int32_t sadb_reserved1; +}; +#endif + struct sadb_lifetime { u_int16_t sadb_lifetime_len; u_int16_t sadb_lifetime_exttype; @@ -237,7 +246,7 @@ struct sadb_x_sa2 { u_int8_t sadb_x_sa2_mode; u_int8_t sadb_x_sa2_reserved1; u_int16_t sadb_x_sa2_reserved2; - u_int32_t sadb_x_sa2_reserved3; + u_int32_t sadb_x_sa2_sequence; u_int32_t sadb_x_sa2_reqid; }; @@ -367,6 +376,11 @@ struct sadb_x_ipsecrequest { /* `flags' in sadb_sa structure holds followings */ #define SADB_X_EXT_NONE 0x0000 /* i.e. new format. */ #define SADB_X_EXT_OLD 0x0001 /* old format. */ +#ifdef __APPLE_API_PRIVATE +#define SADB_X_EXT_NATT 0x0002 /* Use UDP encapsulation to traverse NAT */ +#define SADB_X_EXT_NATT_KEEPALIVE 0x0004 /* Local node is behind NAT, send keepalives */ + /* Should only be set for outbound SAs */ +#endif #define SADB_X_EXT_IV4B 0x0010 /* IV length of 4 bytes in use */ #define SADB_X_EXT_DERIV 0x0020 /* DES derived */ diff --git a/bsd/net/route.c b/bsd/net/route.c index ea463b5ef..0cb34d42b 100644 --- a/bsd/net/route.c +++ b/bsd/net/route.c @@ -72,6 +72,8 @@ #include #include +#include + #define SA(p) ((struct sockaddr *)(p)) struct route_cb route_cb; @@ -84,6 +86,10 @@ static void rt_maskedcopy __P((struct sockaddr *, struct sockaddr *, struct sockaddr *)); static void rtable_init __P((void **)); +__private_extern__ u_long route_generation = 0; +extern int use_routegenid; + + static void rtable_init(table) void **table; @@ -130,6 +136,8 @@ rtalloc_ign(ro, ignore) splx(s); } ro->ro_rt = rtalloc1(&ro->ro_dst, 1, ignore); + if (ro->ro_rt) + ro->ro_rt->generation_id = route_generation; } /* @@ -220,11 +228,12 @@ rtfree(rt) { /* * find the tree for that address family + * Note: in the case of igmp packets, there might not be an rnh */ register struct radix_node_head *rnh = rt_tables[rt_key(rt)->sa_family]; - if (rt == 0 || rnh == 0) + if (rt == 0) panic("rtfree"); /* @@ -232,7 +241,7 @@ rtfree(rt) * and there is a close function defined, call the close function */ rt->rt_refcnt--; - if(rnh->rnh_close && rt->rt_refcnt == 0) { + if(rnh && rnh->rnh_close && rt->rt_refcnt == 0) { rnh->rnh_close((struct radix_node *)rt, rnh); } @@ -717,9 +726,7 @@ rtrequest(req, dst, gateway, netmask, flags, ret_nrt) ifaref(ifa); rt->rt_ifa = ifa; rt->rt_ifp = ifa->ifa_ifp; -#ifdef __APPLE__ - rt->rt_dlt = ifa->ifa_dlt; /* dl_tag */ -#endif + /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */ rn = rnh->rnh_addaddr((caddr_t)ndst, (caddr_t)netmask, @@ -956,7 +963,7 @@ rt_setgate(rt0, dst, gate) int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len); register struct rtentry *rt = rt0; struct radix_node_head *rnh = rt_tables[dst->sa_family]; - + extern void kdp_set_gateway_mac (void *gatewaymac); /* * A host route with the destination equal to the gateway * will interfere with keeping LLINFO in the routing @@ -1035,6 +1042,12 @@ rt_setgate(rt0, dst, gate) rt->rt_gwroute = 0; return EDQUOT; /* failure */ } + /* Tell the kernel debugger about the new default gateway */ + if ((AF_INET == rt->rt_gateway->sa_family) && + rt->rt_gwroute && rt->rt_gwroute->rt_gateway && + (AF_LINK == rt->rt_gwroute->rt_gateway->sa_family)) { + kdp_set_gateway_mac(((struct sockaddr_dl *)rt0->rt_gwroute->rt_gateway)->sdl_data); + } } /* @@ -1166,6 +1179,8 @@ rtinit(ifa, cmd, flags) * notify any listenning routing agents of the change */ rt_newaddrmsg(cmd, ifa, error, nrt); + if (use_routegenid) + route_generation++; if (rt->rt_refcnt <= 0) { rt->rt_refcnt++; /* need a 1->0 transition to free */ rtfree(rt); @@ -1206,9 +1221,6 @@ rtinit(ifa, cmd, flags) * we are adding. */ rt->rt_ifp = ifa->ifa_ifp; -#ifdef __APPLE__ - rt->rt_dlt = ifa->ifa_dlt; /* dl_tag */ -#endif rt->rt_rmx.rmx_mtu = ifa->ifa_ifp->if_mtu; /*XXX*/ /* * Now ask the protocol to check if it needs @@ -1221,6 +1233,8 @@ rtinit(ifa, cmd, flags) * notify any listenning routing agents of the change */ rt_newaddrmsg(cmd, ifa, error, nrt); + if (use_routegenid) + route_generation++; } return (error); } diff --git a/bsd/net/route.h b/bsd/net/route.h index 8c887994c..2cc379b55 100644 --- a/bsd/net/route.h +++ b/bsd/net/route.h @@ -145,7 +145,7 @@ struct rtentry { struct sockaddr *, struct rtentry *)); /* output routine for this (rt,if) */ struct rtentry *rt_parent; /* cloning parent of this route */ - void *rt_filler2; /* more filler */ + u_long generation_id; /* route generation id */ }; #endif /* __APPLE_API_UNSTABLE */ @@ -240,6 +240,9 @@ struct rt_msghdr { #define RTM_IFINFO 0xe /* iface going up/down etc. */ #define RTM_NEWMADDR 0xf /* mcast group membership being added to if */ #define RTM_DELMADDR 0x10 /* mcast group membership being deleted */ +#ifdef KERNEL_PRIVATE +#define RTM_GET_SILENT 0x11 +#endif /* * Bitmask values for rtm_inits and rmx_locks. diff --git a/bsd/net/rtsock.c b/bsd/net/rtsock.c index ae49c24c5..ed72eaf27 100644 --- a/bsd/net/rtsock.c +++ b/bsd/net/rtsock.c @@ -149,11 +149,12 @@ rts_attach(struct socket *so, int proto, struct proc *p) */ s = splnet(); so->so_pcb = (caddr_t)rp; - error = raw_usrreqs.pru_attach(so, proto, p); + error = raw_attach(so, proto); /* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */ rp = sotorawcb(so); if (error) { splx(s); FREE(rp, M_PCB); + so->so_pcb = 0; return error; } switch(rp->rcb_proto.sp_protocol) { @@ -311,6 +312,7 @@ route_output(m, so) struct ifnet *ifp = 0; struct ifaddr *ifa = 0; struct proc *curproc = current_proc(); + int sendonlytoself = 0; #define senderr(e) { error = e; goto flush;} if (m == 0 || ((m->m_len < sizeof(long)) && @@ -334,6 +336,26 @@ route_output(m, so) dst = 0; senderr(EPROTONOSUPPORT); } + + /* + * Silent version of RTM_GET for Reachabiltiy APIs. We may change + * all RTM_GETs to be silent in the future, so this is private for now. + */ + if (rtm->rtm_type == RTM_GET_SILENT) { + if ((so->so_options & SO_USELOOPBACK) == 0) + senderr(EINVAL); + sendonlytoself = 1; + rtm->rtm_type = RTM_GET; + } + + /* + * Perform permission checking, only privileged sockets + * may perform operations other than RTM_GET + */ + if (rtm->rtm_type != RTM_GET && (so->so_state & SS_PRIV) == 0) { + dst = 0; + senderr(EPERM); + } rtm->rtm_pid = curproc->p_pid; info.rti_addrs = rtm->rtm_addrs; if (rt_xaddrs((caddr_t)(rtm + 1), len + (caddr_t)rtm, &info)) { @@ -566,15 +588,24 @@ flush: m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len); Free(rtm); } - if (rp) - rp->rcb_proto.sp_family = 0; /* Avoid us */ - if (dst) - route_proto.sp_protocol = dst->sa_family; - if (m) - raw_input(m, &route_proto, &route_src, &route_dst); - if (rp) - rp->rcb_proto.sp_family = PF_ROUTE; - } + if (sendonlytoself && m) { + if (sbappendaddr(&so->so_rcv, &route_src, m, (struct mbuf*)0) == 0) { + m_freem(m); + error = ENOBUFS; + } else { + sorwakeup(so); + } + } else { + if (rp) + rp->rcb_proto.sp_family = 0; /* Avoid us */ + if (dst) + route_proto.sp_protocol = dst->sa_family; + if (m) + raw_input(m, &route_proto, &route_src, &route_dst); + if (rp) + rp->rcb_proto.sp_family = PF_ROUTE; + } + } return (error); } diff --git a/bsd/net/zlib.c b/bsd/net/zlib.c index 5d0935e67..0f71337a1 100644 --- a/bsd/net/zlib.c +++ b/bsd/net/zlib.c @@ -52,7 +52,7 @@ subject to change. Applications should only use zlib.h. */ -/* @(#) $Id: zlib.c,v 1.8 2002/03/29 03:16:07 lindak Exp $ */ +/* @(#) $Id: zlib.c,v 1.9 2002/11/28 00:56:55 lindak Exp $ */ #ifndef _Z_UTIL_H #define _Z_UTIL_H @@ -298,7 +298,7 @@ void zcfree OF((voidpf opaque, voidpf ptr)); subject to change. Applications should only use zlib.h. */ -/* @(#) $Id: zlib.c,v 1.8 2002/03/29 03:16:07 lindak Exp $ */ +/* @(#) $Id: zlib.c,v 1.9 2002/11/28 00:56:55 lindak Exp $ */ #ifndef _DEFLATE_H #define _DEFLATE_H @@ -658,7 +658,7 @@ void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, * */ -/* @(#) $Id: zlib.c,v 1.8 2002/03/29 03:16:07 lindak Exp $ */ +/* @(#) $Id: zlib.c,v 1.9 2002/11/28 00:56:55 lindak Exp $ */ /* #include "deflate.h" */ @@ -2000,7 +2000,7 @@ local block_state deflate_slow(s, flush) * Addison-Wesley, 1983. ISBN 0-201-06672-6. */ -/* @(#) $Id: zlib.c,v 1.8 2002/03/29 03:16:07 lindak Exp $ */ +/* @(#) $Id: zlib.c,v 1.9 2002/11/28 00:56:55 lindak Exp $ */ /* #define GEN_TREES_H */ @@ -2058,31 +2058,31 @@ local const uch bl_order[BL_CODES] #if defined(GEN_TREES_H) || !defined(STDC) /* non ANSI compilers may not accept trees.h */ -local ct_data static_ltree[L_CODES+2]; +local ct_data *static_ltree = Z_NULL; /* The static literal tree. Since the bit lengths are imposed, there is no * need for the L_CODES extra codes used during heap construction. However * The codes 286 and 287 are needed to build a canonical tree (see _tr_init * below). */ -local ct_data static_dtree[D_CODES]; +local ct_data *static_dtree = Z_NULL; /* The static distance tree. (Actually a trivial tree since all codes use * 5 bits.) */ -uch _dist_code[DIST_CODE_LEN]; +uch *_dist_code = Z_NULL; /* Distance codes. The first 256 values correspond to the distances * 3 .. 258, the last 256 values correspond to the top 8 bits of * the 15 bit distances. */ -uch _length_code[MAX_MATCH-MIN_MATCH+1]; +uch *_length_code = Z_NULL; /* length code for each normalized match length (0 == MIN_MATCH) */ -local int base_length[LENGTH_CODES]; +local int *base_length = Z_NULL; /* First normalized length for each code (0 = MIN_MATCH) */ -local int base_dist[D_CODES]; +local int *base_dist = Z_NULL; /* First normalized distance for each code (0 = distance of 1) */ #else @@ -2227,10 +2227,10 @@ struct static_tree_desc_s { }; local static_tree_desc static_l_desc = -{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; +{NULL, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; local static_tree_desc static_d_desc = -{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; +{NULL, extra_dbits, 0, D_CODES, MAX_BITS}; local static_tree_desc static_bl_desc = {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; @@ -2239,7 +2239,7 @@ local static_tree_desc static_bl_desc = * Local (static) routines in this file. */ -local void tr_static_init OF((void)); +local int tr_static_init OF((z_streamp z)); local void init_block OF((deflate_state *s)); local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); @@ -2335,10 +2335,22 @@ local void send_bits(s, value, length) #endif /* the arguments must not have side effects */ +typedef struct { + ct_data static_ltree[L_CODES+2]; + ct_data static_dtree[D_CODES]; + uch _dist_code[DIST_CODE_LEN]; + uch _length_code[MAX_MATCH-MIN_MATCH+1]; + int base_length[LENGTH_CODES]; + int base_dist[D_CODES]; +} __used_to_be_static; + +static __used_to_be_static *static_storage = Z_NULL; + /* =========================================================================== * Initialize the various 'constant' tables. */ -local void tr_static_init() +local int tr_static_init( + z_streamp z) { #if defined(GEN_TREES_H) || !defined(STDC) static int static_init_done = 0; @@ -2351,7 +2363,21 @@ local void tr_static_init() /* number of codes at each bit length for an optimal tree */ if (static_init_done) return; - + + /* allocate storage for static structures */ + if (static_storage == Z_NULL) { + static_storage = (__used_to_be_static*)ZALLOC(z, 1, sizeof(__used_to_be_static)); + if (static_storage == Z_NULL) + return Z_MEM_ERROR; + } + + static_ltree = static_storage->static_ltree; + static_dtree = static_storage->static_dtree; + _dist_code = static_storage->_dist_code; + _length_code = static_storage->_length_code; + base_length = static_storage->base_length; + base_dist = static_storage->base_dist; + /* For some embedded targets, global variables are not initialized: */ static_l_desc.static_tree = static_ltree; static_l_desc.extra_bits = extra_lbits; @@ -2485,7 +2511,7 @@ void gen_trees_header() void _tr_init(s) deflate_state *s; { - tr_static_init(); + tr_static_init(s->strm); s->l_desc.dyn_tree = s->dyn_ltree; s->l_desc.stat_desc = &static_l_desc; @@ -4731,7 +4757,7 @@ z_streamp z; /* for messages */ #ifdef BUILDFIXED local int fixed_built = 0; #define FIXEDH 544 /* number of hufts used by fixed tables */ -local inflate_huft fixed_mem[FIXEDH]; +local inflate_huft *fixed_mem = NULL; local uInt fixed_bl; local uInt fixed_bd; local inflate_huft *fixed_tl; @@ -4917,6 +4943,13 @@ z_streamp z; /* for memory allocation */ ZFREE(z, c); return Z_MEM_ERROR; } + + if ((fixed_mem = (inflate_huft*)ZALLOC(z, FIXEDH, sizeof(inflate_huft))) == Z_NULL) + { + ZFREE(z, c); + ZFREE(z, v); + return Z_MEM_ERROR; + } /* literal table */ for (k = 0; k < 144; k++) @@ -5511,7 +5544,7 @@ z_streamp z; * For conditions of distribution and use, see copyright notice in zlib.h */ -/* @(#) $Id: zlib.c,v 1.8 2002/03/29 03:16:07 lindak Exp $ */ +/* @(#) $Id: zlib.c,v 1.9 2002/11/28 00:56:55 lindak Exp $ */ /* #include "zutil.h" */ @@ -5741,7 +5774,7 @@ void zcfree (opaque, ptr) * For conditions of distribution and use, see copyright notice in zlib.h */ -/* @(#) $Id: zlib.c,v 1.8 2002/03/29 03:16:07 lindak Exp $ */ +/* @(#) $Id: zlib.c,v 1.9 2002/11/28 00:56:55 lindak Exp $ */ /* #include "zlib.h" */ diff --git a/bsd/netat/adsp_Close.c b/bsd/netat/adsp_Close.c index d2e9bbb75..ea68f9a0f 100644 --- a/bsd/netat/adsp_Close.c +++ b/bsd/netat/adsp_Close.c @@ -176,7 +176,7 @@ void RemoveCCB(sp, pb) /* (CCBPtr sp, DSPPBPtr pb) */ /* * Unlink CCB from list */ - qRemove(AT_ADSP_STREAMS, sp); /* remove sp from active streams queue */ + qRemove((CCB *)AT_ADSP_STREAMS, sp); /* remove sp from active streams queue */ if (pb) { pb->ioResult = 0; diff --git a/bsd/netat/adsp_RxData.c b/bsd/netat/adsp_RxData.c index 96280bb61..2121cb2b4 100644 --- a/bsd/netat/adsp_RxData.c +++ b/bsd/netat/adsp_RxData.c @@ -338,7 +338,7 @@ int RXData(sp, mp, f, len) /* (CCBPtr sp, ADSP_FRAMEPtr f, word len) */ { sp->rData = 1; /* Not empty any more */ - if ((sp->rpb)->ioc == mp) { + if ((sp->rpb)->ioc == (caddr_t)mp) { dPrintf(D_M_ADSP, D_L_TRACE, ("RXData: (pb->ioc == mp) no stored data\n")); KERNEL_DEBUG(DBG_ADSP_RCV, 4, sp, sp->rpb, 0, 0); diff --git a/bsd/netat/adsp_Timer.c b/bsd/netat/adsp_Timer.c index f84909360..7f4472a1d 100644 --- a/bsd/netat/adsp_Timer.c +++ b/bsd/netat/adsp_Timer.c @@ -55,6 +55,8 @@ #include #include +void TimerTick(); + /* * TrashSession * @@ -178,7 +180,7 @@ send: CheckSend(sp); } -void TimerTick_funnel() +void TimerTick_funnel(void *arg) { thread_funnel_set(network_flock, TRUE); TimerTick(); diff --git a/bsd/netat/asp_proto.c b/bsd/netat/asp_proto.c index e582936a8..80291b249 100644 --- a/bsd/netat/asp_proto.c +++ b/bsd/netat/asp_proto.c @@ -84,11 +84,10 @@ void asp_init(); void asp_ack_reply(); void asp_nak_reply(); void asp_clock(); -void asp_clock_funnel(); +void asp_clock_funnel(void *); int asp_open(); int asp_close(); int asp_wput(); -void atp_retry_req(); StaticProc asp_scb_t *asp_find_scb(); StaticProc asp_scb_t *asp_scb_alloc(); @@ -101,7 +100,7 @@ StaticProc void asp_timout(); StaticProc void asp_untimout(); StaticProc void asp_hangup(); StaticProc void asp_send_tickle(); -StaticProc void asp_send_tickle_funnel(); +StaticProc void asp_send_tickle_funnel(void *); StaticProc void asp_accept(); StaticProc int asp_send_req(); @@ -374,7 +373,7 @@ void trace_end(str) dPrintf(D_M_ASP, D_L_TRACE, (" %s: %s\n", str, mbuf_totals())); } -#endif AT_MBUF_TRACE +#endif /* AT_MBUF_TRACE */ /* * the write routine @@ -662,7 +661,7 @@ int asp_wput(gref, m) { struct atp_state *atp = (struct atp_state *)gref->info; if (atp->dflag) - atp = atp->atp_msgq; + atp = (struct atp_state *)atp->atp_msgq; if (gbuf_cont(mioc) == 0) { asp_iocnak(gref, mioc, EINVAL); @@ -782,10 +781,10 @@ asp_send_req(gref, mioc, dest, retry, awp, xo, state, bitmap) */ StaticProc void asp_send_tickle_funnel(scb) - asp_scb_t *scb; + void *scb; { thread_funnel_set(network_flock, TRUE); - asp_send_tickle(scb); + asp_send_tickle((asp_scb_t *)scb); thread_funnel_set(network_flock, FALSE); } @@ -1915,8 +1914,8 @@ asp_putnext(gref, mproto) int ASPputmsg(gref_t *gref, strbuf_t *ctlptr, strbuf_t *datptr, gbuf_t *mreq, int flags, int *errp) { - int s, i, err, len; - gbuf_t *mioc, *mdata, *mx; + int s, i, err, len, offset, remain, size, copy_len; + gbuf_t *mioc, *mdata, *mx, *m0; ioc_t *iocbp; strbuf_t ctlbuf; strbuf_t datbuf; @@ -1930,6 +1929,7 @@ int ASPputmsg(gref_t *gref, strbuf_t *ctlptr, strbuf_t *datptr, gbuf_t *mreq, in asp_word_t *awp; union asp_primitives *primitives; unsigned short tid; + caddr_t dataptr; if ((scb = (asp_scb_t *)gref->info) == 0) { dPrintf(D_M_ASP, D_L_ERROR, @@ -1991,46 +1991,77 @@ int ASPputmsg(gref_t *gref, strbuf_t *ctlptr, strbuf_t *datptr, gbuf_t *mreq, in ("ASPputmsg: %s\n", aspCmdStr(Primitive))); /* - * allocate buffer and copy in the data content + * copy in the data content into multiple mbuf clusters if + * required. ATP now expects reply data to be placed in + * standard clusters, not the large external clusters that + * were used previously. */ - len = (Primitive == ASPFUNC_CmdReply) ? 0 : aspCMDsize; + + /* set offset for use by some commands */ + offset = (Primitive == ASPFUNC_CmdReply) ? 0 : aspCMDsize; + size = 0; + if (mreq != NULL) { + /* The data from the in-kernel call for use by AFP is passed + * in as one large external cluster. This needs to be copied + * to a chain of standard clusters. + */ + remain = gbuf_len(mreq); + dataptr = mtod(mreq, caddr_t); + } else { + /* copyin from user space */ + remain = datbuf.len; + dataptr = (caddr_t)datbuf.buf; + } - if (!(mdata = gbuf_alloc_wait(datbuf.len+len, TRUE))) { + /* allocate first buffer */ + if (!(mdata = gbuf_alloc_wait((remain + offset > MCLBYTES ? MCLBYTES : remain + offset), TRUE))) { /* error return should not be possible */ err = ENOBUFS; gbuf_freem(mioc); goto l_err; } - gbuf_wset(mdata, (datbuf.len+len)); + gbuf_wset(mdata, 0); /* init length to zero */ gbuf_cont(mioc) = mdata; - - if (mreq != NULL) { - /* being called from kernel space */ - gbuf_t *tmp = mreq; - unsigned long offset = 0; - - /* copy afp cmd data from the passed in mbufs to mdata. I cant - chain mreq to mdata since the rest of this code assumes - just one big mbuf with space in front for the BDS */ - offset = len; - while (tmp != NULL) { - bcopy (gbuf_rptr(tmp), (gbuf_rptr(mdata) + offset), gbuf_len(tmp)); - offset += gbuf_len(tmp); - tmp = gbuf_cont(tmp); /* on to next mbuf in chain */ - } - - /* all data copied out of mreq so free it */ - gbuf_freem(mreq); - } else { - /* being called from user space */ - if ((err = copyin((caddr_t)datbuf.buf, - (caddr_t)(gbuf_rptr(mdata)+len), datbuf.len)) != 0) { - gbuf_freem(mioc); - goto l_err; - } - } - switch (Primitive) { + while (remain) { + if (remain + offset > MCLBYTES) + copy_len = MCLBYTES - offset; + else + copy_len = remain; + remain -= copy_len; + if (mreq != NULL) + bcopy (dataptr, (gbuf_rptr(mdata) + offset), copy_len); + else if ((err = copyin(dataptr, (caddr_t)(gbuf_rptr(mdata) + offset), copy_len)) != 0) { + gbuf_freem(mioc); + goto l_err; + } + gbuf_wset(mdata, (copy_len + offset)); + size += copy_len + offset; + dataptr += copy_len; + offset = 0; + if (remain) { + /* allocate the next mbuf */ + if ((gbuf_cont(mdata) = m_get((M_WAIT), MSG_DATA)) == 0) { + err = ENOBUFS; + gbuf_freem(mioc); + goto l_err; + } + mdata = gbuf_cont(mdata); + MCLGET(mdata, M_WAIT); + if (!(mdata->m_flags & M_EXT)) { + err = ENOBUFS; + gbuf_freem(mioc); + goto l_err; + } + } + } + mdata = gbuf_cont(mioc); /* code further on down expects this to b e set */ + mdata->m_pkthdr.len = size; /* set packet hdr len */ + + if (mreq != 0) + gbuf_freem(mreq); + + switch (Primitive) { case ASPFUNC_Command: case ASPFUNC_Write: @@ -2147,16 +2178,20 @@ int ASPputmsg(gref_t *gref, strbuf_t *ctlptr, strbuf_t *datptr, gbuf_t *mreq, in atp->xo = 1; atp->xo_relt = 1; } + /* setup the atpBDS struct - only the length field is used, + * except for the first one which contains the bds count in + * bdsDataSz. + */ atpBDS = (struct atpBDS *)gbuf_wptr(mioc); msize = mdata ? gbuf_msgsize(mdata) : 0; - for (nbds=0; (nbds < ATP_TRESP_MAX) && (msize > 0); nbds++) { + for (nbds=0; (nbds < ATP_TRESP_MAX) && (msize > 0); nbds++) { len = msize < ATP_DATA_SIZE ? msize : ATP_DATA_SIZE; msize -= ATP_DATA_SIZE; *(long *)atpBDS[nbds].bdsUserData = 0; UAL_ASSIGN(atpBDS[nbds].bdsBuffAddr, 1); UAS_ASSIGN(atpBDS[nbds].bdsBuffSz, len); } - UAS_ASSIGN(atpBDS[0].bdsDataSz, nbds); + UAS_ASSIGN(atpBDS[0].bdsDataSz, nbds); *(long *)atpBDS[0].bdsUserData = (long)result; *(long *)atp->user_bytes = (long)result; gbuf_winc(mioc,atpBDSsize); diff --git a/bsd/netat/at_aarp.h b/bsd/netat/at_aarp.h index 3f8bc1754..8abfedde2 100644 --- a/bsd/netat/at_aarp.h +++ b/bsd/netat/at_aarp.h @@ -109,7 +109,7 @@ typedef struct { gbuf_t *m; /* ptr to msg blk to be sent out */ at_ifaddr_t *elapp; int error; - void *tmo; + int tmo; } aarp_amt_t; #define AMT_BSIZ 4 /* bucket size */ diff --git a/bsd/netat/at_snmp.h b/bsd/netat/at_snmp.h index fcfd083cc..e38cff4e9 100644 --- a/bsd/netat/at_snmp.h +++ b/bsd/netat/at_snmp.h @@ -215,4 +215,4 @@ typedef struct snmpStats { #define SNMP_TYPE(var,type) ((var & SNMP_OBJ_TYPE_MASK) == type) -#endif _NETAT_AT_SNMP_H_ +#endif /* _NETAT_AT_SNMP_H_ */ diff --git a/bsd/netat/at_var.h b/bsd/netat/at_var.h index 943ee75da..b2581d50a 100644 --- a/bsd/netat/at_var.h +++ b/bsd/netat/at_var.h @@ -305,4 +305,7 @@ struct kev_atalk_data { } node_data; }; +void atalk_post_msg(struct ifnet *ifp, u_long event_code, struct at_addr *address, at_nvestr_t *zone); +void aarp_sched_probe(void *); + #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/netat/atp.h b/bsd/netat/atp.h index 00de460c5..5e1b33ba0 100644 --- a/bsd/netat/atp.h +++ b/bsd/netat/atp.h @@ -434,7 +434,7 @@ void atp_drop_req(gref_t *, gbuf_t *); void atp_send_rsp(gref_t *, gbuf_t *, int); void atp_wput(gref_t *, gbuf_t *); void atp_rput(gref_t *, gbuf_t *); -void atp_retry_req(gbuf_t *); +void atp_retry_req(void *); void atp_stop(gbuf_t *, int); void atp_cancel_req(gref_t *, unsigned short); int atp_open(gref_t *, int); diff --git a/bsd/netat/atp_read.c b/bsd/netat/atp_read.c index 5013d2a3f..ef1251ab7 100644 --- a/bsd/netat/atp_read.c +++ b/bsd/netat/atp_read.c @@ -53,7 +53,7 @@ static void atp_trans_complete(); void atp_x_done(); -void atp_x_done_funnel(); +void atp_x_done_funnel(void *); extern void atp_req_timeout(); /* @@ -61,9 +61,9 @@ extern void atp_req_timeout(); * Version 1.7 of atp_read.c on 89/02/09 17:53:16 */ -void atp_treq_event(gref) -register gref_t *gref; +void atp_treq_event(void *arg) { + register gref_t *gref = (gref_t *)arg; register gbuf_t *m; register struct atp_state *atp; boolean_t funnel_state; @@ -459,10 +459,10 @@ gbuf_t *m; void atp_x_done_funnel(trp) -register struct atp_trans *trp; +void *trp; { thread_funnel_set(network_flock, TRUE); - atp_x_done(trp); + atp_x_done((struct atp_trans *)trp); (void) thread_funnel_set(network_flock, FALSE); } diff --git a/bsd/netat/atp_write.c b/bsd/netat/atp_write.c index 1eebbd182..551ebfd8f 100644 --- a/bsd/netat/atp_write.c +++ b/bsd/netat/atp_write.c @@ -70,7 +70,7 @@ static int loop_cnt; /* for debugging loops */ static void atp_pack_bdsp(struct atp_trans *, struct atpBDS *); static int atp_unpack_bdsp(struct atp_state *, gbuf_t *, struct atp_rcb *, int, int); -void atp_retry_req(), atp_trp_clock(), asp_clock(), asp_clock_funnel(), atp_trp_clock_funnel();; +void atp_trp_clock(), asp_clock(), asp_clock_funnel(), atp_trp_clock_funnel();; extern struct atp_rcb_qhead atp_need_rel; extern int atp_inited; @@ -455,11 +455,11 @@ void atp_send_replies(atp, rcbp) register struct atp_rcb *rcbp; { register gbuf_t *m; register int i, len; - int s_gen, s, cnt; + int s_gen, s, cnt, err, offset, space; unsigned char *m0_rptr = NULL, *m0_wptr = NULL; register at_atp_t *athp; register struct atpBDS *bdsp; - register gbuf_t *m2, *m1, *m0, *m3; + register gbuf_t *m2, *m1, *m0, *mhdr; caddr_t lastPage; gbuf_t *mprev, *mlist = 0; at_socket src_socket = (at_socket)atp->atp_socket_no; @@ -497,109 +497,67 @@ void atp_send_replies(atp, rcbp) m = rcbp->rc_xmt; m0 = gbuf_cont(m); - if (m0) { - m0_rptr = gbuf_rptr(m0); - m0_wptr = gbuf_wptr(m0); - } if (gbuf_len(m) > TOTAL_ATP_HDR_SIZE) bdsp = (struct atpBDS *)(AT_ATP_HDR(m)->data); else bdsp = 0; - + offset = 0; + if (m0) + space = gbuf_msgsize(m0); for (i = 0; i < cnt; i++) { - if (rcbp->rc_snd[i] == 0) { - if ((len = UAS_VALUE(bdsp->bdsBuffSz))) - gbuf_rinc(m0,len); - - } else { - m2 = rc_xmt[i]; - gbuf_rinc(m2,AT_WR_OFFSET); - gbuf_wset(m2,TOTAL_ATP_HDR_SIZE); - *(struct ddp_atp *)(gbuf_rptr(m2))= *(struct ddp_atp *)(gbuf_rptr(m)); - athp = AT_ATP_HDR(m2); - ATP_CLEAR_CONTROL(athp); - athp->cmd = ATP_CMD_TRESP; - athp->bitmap = i; - if (i == (cnt - 1)) - athp->eom = 1; /* for the last fragment */ - if (bdsp) - UAL_UAL(athp->user_bytes, bdsp->bdsUserData); - - if (bdsp) - if (len = UAS_VALUE(bdsp->bdsBuffSz)) { /* copy in data */ - if (m0 && gbuf_len(m0)) { - if ((m1 = gbuf_dupb(m0)) == NULL) { - for (i = 0; i < cnt; i++) - if (rc_xmt[i]) - gbuf_freem(rc_xmt[i]); - gbuf_rptr(m0) = m0_rptr; - gbuf_wset(m0,(m0_wptr-m0_rptr)); - goto nothing_to_send; + if (rcbp->rc_snd[i] == 0) { + if ((len = UAS_VALUE(bdsp->bdsBuffSz))) { + offset += len; + space -= len; } - gbuf_wset(m1,len); - gbuf_rinc(m0,len); - if ((len = gbuf_len(m0)) < 0) { - gbuf_rdec(m0,len); - gbuf_wdec(m1,len); - if (!append_copy((struct mbuf *)m1, - (struct mbuf *)gbuf_cont(m0), FALSE)) { - for (i = 0; i < cnt; i++) - if (rc_xmt[i]) - gbuf_freem(rc_xmt[i]); - gbuf_rptr(m0) = m0_rptr; - gbuf_wset(m0,(m0_wptr-m0_rptr)); - goto nothing_to_send; + } else { + mhdr = rc_xmt[i]; + /* setup header fields */ + gbuf_rinc(mhdr,AT_WR_OFFSET); + gbuf_wset(mhdr,TOTAL_ATP_HDR_SIZE); + *(struct ddp_atp *)(gbuf_rptr(mhdr))= *(struct ddp_atp *)(gbuf_rptr(m)); + athp = AT_ATP_HDR(mhdr); + ATP_CLEAR_CONTROL(athp); + athp->cmd = ATP_CMD_TRESP; + athp->bitmap = i; + if (i == (cnt - 1)) + athp->eom = 1; /* for the last fragment */ + if (bdsp) { + UAL_UAL(athp->user_bytes, bdsp->bdsUserData); + if ((len = UAS_VALUE(bdsp->bdsBuffSz)) && m0 != 0 && space > 0) { + if ((m1 = m_copym(m0, offset, len, M_DONTWAIT)) == 0) { + for (i = 0; i < cnt; i++) + if (rc_xmt[i]) + gbuf_freem(rc_xmt[i]); + goto nothing_to_send; + } + offset += len; + space -= len; + gbuf_cont(mhdr) = m1; } - } else - gbuf_cont(m1) = 0; - gbuf_cont(m2) = m1; + } - /* temp fix for page boundary problem - bug# 2703163 */ - lastPage = (caddr_t)((int)(gbuf_wptr(m1) - 1) & ~PAGE_MASK); /* 4k page of last byte */ - if (lastPage != (caddr_t)((int)(gbuf_rptr(m1)) & ~PAGE_MASK)) { /* 1st byte and last on same page ? */ - if ((m3 = gbuf_dupb(m1)) == NULL) { - for (i = 0; i < cnt; i++) - if (rc_xmt[i]) - gbuf_freem(rc_xmt[i]); - (gbuf_rptr(m0)) = m0_rptr; - gbuf_wset(m0, (m0_wptr - m0_rptr)); - goto nothing_to_send; - } - (gbuf_rptr(m3)) = lastPage; /* new mbuf starts at beginning of page */ - gbuf_wset(m3, (gbuf_wptr(m1) - lastPage)); /* len = remaining data crossing over page boundary */ - gbuf_wset(m1, (lastPage - (gbuf_rptr(m1)))); /* adjust len of m1 */ - (gbuf_cont(m1)) = m3; - (gbuf_cont(m3)) = 0; - } - } + AT_DDP_HDR(mhdr)->src_socket = src_socket; + dPrintf(D_M_ATP_LOW, D_L_OUTPUT, + ("atp_send_replies: %d, socket=%d, size=%d\n", + i, atp->atp_socket_no, gbuf_msgsize(gbuf_cont(m2)))); + + if (mlist) + gbuf_next(mprev) = mhdr; + else + mlist = mhdr; + mprev = mhdr; + + rcbp->rc_snd[i] = 0; + rcbp->rc_not_sent_bitmap &= ~atp_mask[i]; + if (rcbp->rc_not_sent_bitmap == 0) + break; } - - AT_DDP_HDR(m2)->src_socket = src_socket; - dPrintf(D_M_ATP_LOW, D_L_OUTPUT, - ("atp_send_replies: %d, socket=%d, size=%d\n", - i, atp->atp_socket_no, gbuf_msgsize(gbuf_cont(m2)))); - - if (mlist) - gbuf_next(mprev) = m2; - else - mlist = m2; - mprev = m2; - - rcbp->rc_snd[i] = 0; - rcbp->rc_not_sent_bitmap &= ~atp_mask[i]; - if (rcbp->rc_not_sent_bitmap == 0) - break; - } - /* - * on to the next frag - */ - bdsp++; - } - if (m0) { - gbuf_rptr(m0) = m0_rptr; - gbuf_wset(m0,(m0_wptr-m0_rptr)); + /* + * on to the next frag + */ + bdsp++; } - if (mlist) { ATENABLE(s, atp->atp_lock); DDP_OUTPUT(mlist); @@ -706,6 +664,11 @@ atp_pack_bdsp(trp, bdsp) } /* atp_pack_bdsp */ +/* create an mbuf chain with mbuf packet headers for each ATP response packet + * to be sent. m contains the DDP hdr, ATP hdr, and and array of atpBDS structs. + * chained to m is an mbuf that contians the actual data pointed to by the atpBDS + * structs. + */ static int atp_unpack_bdsp(atp, m, rcbp, cnt, wait) struct atp_state *atp; @@ -714,17 +677,19 @@ atp_unpack_bdsp(atp, m, rcbp, cnt, wait) register int cnt, wait; { register struct atpBDS *bdsp; - register gbuf_t *m2, *m1, *m0, *m3; - caddr_t lastPage; - register at_atp_t *athp; - register int i, len, s_gen; - at_socket src_socket; - struct ddp_atp { + register gbuf_t *m2, *m1, *m0, *mhdr; + caddr_t lastPage; + at_atp_t *athp; + int i, len, s_gen; + at_socket src_socket; + + struct ddp_atp { char ddp_atp_hdr[TOTAL_ATP_HDR_SIZE]; }; - gbuf_t *mprev, *mlist = 0; - gbuf_t *rc_xmt[ATP_TRESP_MAX]; - unsigned char *m0_rptr, *m0_wptr; + gbuf_t *mprev, *mlist = 0; + gbuf_t *rc_xmt[ATP_TRESP_MAX]; + unsigned char *m0_rptr, *m0_wptr; + int err, offset, space; /* * get the user data structure pointer @@ -790,101 +755,70 @@ atp_unpack_bdsp(atp, m, rcbp, cnt, wait) goto l_send; } + /* create an array of mbuf packet headers for the packets to be sent + * to contain the atp and ddp headers with room at the front for the + * datalink header. + */ for (i = 0; i < cnt; i++) { /* all hdrs, packet data and dst addr storage */ if ((rc_xmt[i] = - gbuf_alloc_wait(AT_WR_OFFSET+TOTAL_ATP_HDR_SIZE, - wait)) == NULL) { - for (cnt = 0; cnt < i; cnt++) - if (rc_xmt[cnt]) - gbuf_freeb(rc_xmt[cnt]); - return 0; + gbuf_alloc_wait(AT_WR_OFFSET+TOTAL_ATP_HDR_SIZE, wait)) == NULL) { + for (cnt = 0; cnt < i; cnt++) + if (rc_xmt[cnt]) + gbuf_freeb(rc_xmt[cnt]); + return 0; } } - if (m0) { - m0_rptr = gbuf_rptr(m0); - m0_wptr = gbuf_wptr(m0); - } - for (i = 0; i < cnt; i++) { - m2 = rc_xmt[i]; - gbuf_rinc(m2,AT_WR_OFFSET); - gbuf_wset(m2,TOTAL_ATP_HDR_SIZE); - *(struct ddp_atp *)(gbuf_rptr(m2))= *(struct ddp_atp *)(gbuf_rptr(m)); - athp = AT_ATP_HDR(m2); + /* run through the atpBDS structs and create an mbuf for the data + * portion of each packet to be sent. these get chained to the mbufs + * containing the ATP and DDP headers. this code assumes that no ATP + * packet is contained in more than 2 mbufs (e.i crosses mbuf boundary + * no more than one time). + */ + offset = 0; + if (m0) + space = gbuf_msgsize(m0); + for (i = 0; i < cnt; i++) { /* for each hdr mbuf */ + mhdr = rc_xmt[i]; + /* setup header fields */ + gbuf_rinc(mhdr,AT_WR_OFFSET); + gbuf_wset(mhdr,TOTAL_ATP_HDR_SIZE); + *(struct ddp_atp *)(gbuf_rptr(mhdr))= *(struct ddp_atp *)(gbuf_rptr(m)); + athp = AT_ATP_HDR(mhdr); ATP_CLEAR_CONTROL(athp); athp->cmd = ATP_CMD_TRESP; athp->bitmap = i; if (i == (cnt - 1)) athp->eom = 1; /* for the last fragment */ UAL_UAL(athp->user_bytes, bdsp->bdsUserData); - - if ((len = UAS_VALUE(bdsp->bdsBuffSz))) { /* copy in data */ - if (m0 && gbuf_len(m0)) { - if ((m1 = gbuf_dupb_wait(m0, wait)) == NULL) { + + if ((len = UAS_VALUE(bdsp->bdsBuffSz)) != 0 && m0 != 0 && space > 0) { + if ((m1 = m_copym(m0, offset, len, wait)) == 0) { for (i = 0; i < cnt; i++) if (rc_xmt[i]) gbuf_freem(rc_xmt[i]); - gbuf_rptr(m0) = m0_rptr; - gbuf_wset(m0,(m0_wptr-m0_rptr)); return 0; } - gbuf_wset(m1,len); /* *** m1 is first len bytes of m0? *** */ - gbuf_rinc(m0,len); - if ((len = gbuf_len(m0)) < 0) { - gbuf_rdec(m0,len); - gbuf_wdec(m1,len); - if (!append_copy((struct mbuf *)m1, - (struct mbuf *)gbuf_cont(m0), wait)) { - for (i = 0; i < cnt; i++) - if (rc_xmt[i]) - gbuf_freem(rc_xmt[i]); - gbuf_rptr(m0) = m0_rptr; - gbuf_wset(m0,(m0_wptr-m0_rptr)); - return 0; - } - } else - gbuf_cont(m1) = 0; - gbuf_cont(m2) = m1; - - /* temp fix for page boundary problem - bug# 2703163 */ - lastPage = (caddr_t)((int)(gbuf_wptr(m1) - 1) & ~PAGE_MASK); /* 4k page of last byte */ - if (lastPage != (caddr_t)((int)(gbuf_rptr(m1)) & ~PAGE_MASK)) { /* 1st byte and last on same page ? */ - if ((m3 = gbuf_dupb_wait(m1, wait)) == NULL) { - for (i = 0; i < cnt; i++) - if (rc_xmt[i]) - gbuf_freem(rc_xmt[i]); - (gbuf_rptr(m0)) = m0_rptr; - gbuf_wset(m0, (m0_wptr - m0_rptr)); - return 0; - } - (gbuf_rptr(m3)) = lastPage; /* new mbuf starts at beginning of page */ - gbuf_wset(m3, (gbuf_wptr(m1) - lastPage)); /* len = remaining data crossing over page boundary */ - gbuf_wset(m1, (lastPage - (gbuf_rptr(m1)))); /* adjust len of m1 */ - (gbuf_cont(m1)) = m3; - (gbuf_cont(m3)) = 0; - } - } + gbuf_cont(mhdr) = m1; + space -= len; + offset += len; } - - AT_DDP_HDR(m2)->src_socket = src_socket; + + AT_DDP_HDR(mhdr)->src_socket = src_socket; dPrintf(D_M_ATP_LOW,D_L_INFO, ("atp_unpack_bdsp %d, socket=%d, size=%d, cnt=%d\n", - i,atp->atp_socket_no,gbuf_msgsize(gbuf_cont(m2)),cnt)); + i,atp->atp_socket_no,gbuf_msgsize(gbuf_cont(mhdr)),cnt)); if (mlist) - gbuf_next(mprev) = m2; + gbuf_next(mprev) = mhdr; else - mlist = m2; - mprev = m2; + mlist = mhdr; + mprev = mhdr; /* * on to the next frag */ bdsp++; } - if (m0) { - gbuf_rptr(m0) = m0_rptr; - gbuf_wset(m0,(m0_wptr-m0_rptr)); - } /* * send the message */ @@ -901,6 +835,7 @@ l_send: DDP_OUTPUT(mlist); return 0; + } /* atp_unpack_bdsp */ #define ATP_SOCKET_LAST (DDP_SOCKET_LAST-6) @@ -1325,9 +1260,10 @@ l_retry: } } /* atp_send_req */ -void atp_retry_req(m) - gbuf_t *m; +void atp_retry_req(arg) + void *arg; { + gbuf_t *m = (gbuf_t *)arg; gref_t *gref; boolean_t funnel_state; @@ -1671,7 +1607,7 @@ _ATPsndreq(fd, buf, len, nowait, err, proc) /* * copy out the recv data */ - atp_pack_bdsp(trp, bds); + atp_pack_bdsp(trp, (struct atpBDS *)bds); /* * copyout the result info @@ -1683,6 +1619,14 @@ _ATPsndreq(fd, buf, len, nowait, err, proc) return (int)tid; } /* _ATPsndreq */ + +/* entry point for ATP send response. respbuf contains a DDP hdr, + * ATP hdr, and atpBDS array. The bdsDataSz field of the first atpBDS + * struct contains the number of atpBDS structs in the array. resplen + * contains the len of the data in respbuf and datalen contains the + * len of the data buffer holding the response packets which the atpBDS + * struct entries point to. + */ int _ATPsndrsp(fd, respbuff, resplen, datalen, err, proc) int fd; @@ -1692,15 +1636,18 @@ _ATPsndrsp(fd, respbuff, resplen, datalen, err, proc) int *err; void *proc; { - gref_t *gref; - int s, rc; - long bufaddr; - gbuf_t *m, *mdata; - register short len; - register int size; - register struct atp_state *atp; - register struct atpBDS *bdsp; - register char *buf; + gref_t *gref; + int s, rc; + long bufaddr; + gbuf_t *m, *mdata; + short space; + int size; + struct atp_state *atp; + struct atpBDS *bdsp; + u_int16_t *bufsz; + char *buf; + int bds_cnt, count, len; + caddr_t dataptr; if ((*err = atalk_getref(0, fd, &gref, proc)) != 0) return -1; @@ -1728,33 +1675,68 @@ _ATPsndrsp(fd, respbuff, resplen, datalen, err, proc) gbuf_wset(m,resplen); ((at_ddp_t *)gbuf_rptr(m))->src_node = 0; bdsp = (struct atpBDS *)(gbuf_rptr(m) + TOTAL_ATP_HDR_SIZE); - if ((resplen == TOTAL_ATP_HDR_SIZE) || ((len = UAS_VALUE(bdsp->bdsDataSz)) == 1)) - len = 0; - else - len = 16 * sizeof(gbuf_t); /* - * allocate buffer and copy in the response data + * allocate buffers and copy in the response data. + * note that only the size field of the atpBDS field + * is used internally in the kernel. */ - if ((mdata = gbuf_alloc_wait(datalen+len, TRUE)) == 0) { - gbuf_freem(m); + bds_cnt = get_bds_entries(m); /* count of # entries */ + /* check correctness of parameters */ + if (bds_cnt > ATP_TRESP_MAX) { + gbuf_freem(m); + *err = EINVAL; + return -1; + } + + for (size = 0, count = 0; count < bds_cnt; count++) { + size += UAS_VALUE(bdsp[count].bdsBuffSz); + } + if (size > datalen) { + gbuf_freem(m); + *err = EINVAL; + return -1; + } + + /* get the first mbuf */ + if ((mdata = gbuf_alloc_wait((space = (size > MCLBYTES ? MCLBYTES : size)), TRUE)) == 0) { + gbuf_freem(m); *err = ENOMEM; return -1; } gbuf_cont(m) = mdata; - for (size=0; bdsp < (struct atpBDS *)gbuf_wptr(m); bdsp++) { - if ((bufaddr = UAL_VALUE(bdsp->bdsBuffAddr)) != 0) { - len = UAS_VALUE(bdsp->bdsBuffSz); - buf = (char *)bufaddr; - if ((*err = copyin((caddr_t)buf, - (caddr_t)&gbuf_rptr(mdata)[size], len)) != 0) { + dataptr = mtod(mdata, caddr_t); + for (count = 0; count < bds_cnt; bdsp++, count++) { + if ((bufaddr = UAL_VALUE(bdsp->bdsBuffAddr)) != 0 && + (len = UAS_VALUE(bdsp->bdsBuffSz)) != 0) { + if (len > space) { /* enough room ? */ + gbuf_wset(mdata, dataptr - mtod(mdata, caddr_t)); /* set len of last mbuf */ + /* allocate the next mbuf */ + if ((gbuf_cont(mdata) = m_get((M_WAIT), MSG_DATA)) == 0) { + gbuf_freem(m); + *err = ENOMEM; + return -1; + } + mdata = gbuf_cont(mdata); + MCLGET(mdata, M_WAIT); + if (!(mdata->m_flags & M_EXT)) { + m_freem(m); + return(NULL); + } + dataptr = mtod(mdata, caddr_t); + space = MCLBYTES; + } + /* do the copyin */ + if ((*err = copyin((caddr_t)bufaddr, dataptr, len)) != 0) { gbuf_freem(m); return -1; } - size += len; + dataptr += len; + space -= len; } } - gbuf_wset(mdata,size); + gbuf_wset(mdata, dataptr - mtod(mdata, caddr_t)); /* set len of last mbuf */ + gbuf_cont(m)->m_pkthdr.len = size; /* set packet hdr len */ atp_send_rsp(gref, m, TRUE); return 0; @@ -1861,7 +1843,7 @@ _ATPgetrsp(fd, bdsp, err, proc) if ((*err = copyin((caddr_t)bdsp, (caddr_t)bds, sizeof(bds))) != 0) return -1; - atp_pack_bdsp(trp, bds); + atp_pack_bdsp(trp, (struct atpBDS *)bds); tid = (int)trp->tr_tid; atp_free(trp); copyout((caddr_t)bds, (caddr_t)bdsp, sizeof(bds)); diff --git a/bsd/netat/aurp_aurpd.c b/bsd/netat/aurp_aurpd.c index 02a387be2..8d9989e00 100644 --- a/bsd/netat/aurp_aurpd.c +++ b/bsd/netat/aurp_aurpd.c @@ -262,7 +262,7 @@ AURPgetmsg(err) ("AURPgetmsg: soreceive returned %d, aurp_global.event==0x%x\n", *err, events)); /* soreceive() sets *mp to zero! at start */ if (p_mbuf) - ip_to_atalk(from, p_mbuf); + ip_to_atalk((struct sockaddr_in *)from, p_mbuf); if (*err || (p_mbuf == NULL)) { /* * An error occurred in soreceive(), diff --git a/bsd/netat/aurp_ri.c b/bsd/netat/aurp_ri.c index 9d64f0a35..87107c91f 100644 --- a/bsd/netat/aurp_ri.c +++ b/bsd/netat/aurp_ri.c @@ -53,6 +53,9 @@ #include #include + +static void AURPsndRIRsp(aurp_state_t *); + /* */ void AURPsndRIAck(state, m, flags) aurp_state_t *state; diff --git a/bsd/netat/ddp_aarp.c b/bsd/netat/ddp_aarp.c index cbb18fc4d..a5c6c4736 100644 --- a/bsd/netat/ddp_aarp.c +++ b/bsd/netat/ddp_aarp.c @@ -81,7 +81,6 @@ aarp_amt_array *aarp_table[IF_TOTAL_MAX]; int aarp_init1(), aarp_init2(); int aarp_send_data(); -int aarp_sched_probe(); StaticProc int aarp_req_cmd_in(); StaticProc int aarp_resp_cmd_in(); @@ -93,7 +92,7 @@ StaticProc aarp_amt_t *aarp_lru_entry(); StaticProc int aarp_glean_info(); StaticProc int aarp_delete_amt_info(); StaticProc void aarp_build_pkt(); -StaticProc int aarp_sched_req(); +StaticProc void aarp_sched_req(void *); StaticProc int aarp_get_rand_node(); StaticProc int aarp_get_next_node(); StaticProc int aarp_get_rand_net(); @@ -767,13 +766,14 @@ register aarp_amt_t *amt_ptr; * ****************************************************************************/ -int aarp_sched_probe() +void aarp_sched_probe(void *arg) { boolean_t funnel_state; funnel_state = thread_funnel_set(network_flock, TRUE); - if (probe_cb.no_of_retries != AARP_MAX_PROBE_RETRIES) { + if (probe_cb.elapp->aa_ifp != 0 && + probe_cb.no_of_retries != AARP_MAX_PROBE_RETRIES) { if (aarp_send_probe() == -1) AARPwakeup(&probe_cb); } else { @@ -782,7 +782,6 @@ int aarp_sched_probe() } (void) thread_funnel_set(network_flock, FALSE); - return(0); } @@ -810,11 +809,12 @@ StaticProc void aarp_build_pkt(pkt, elapp) * ****************************************************************************/ -StaticProc int aarp_sched_req(amt_ptr) - register aarp_amt_t *amt_ptr; +StaticProc void aarp_sched_req(arg) + void *arg; { int s, i; boolean_t funnel_state; + aarp_amt_t *amt_ptr = (aarp_amt_t *)arg; funnel_state = thread_funnel_set(network_flock, TRUE); @@ -824,7 +824,8 @@ StaticProc int aarp_sched_req(amt_ptr) * into one of the amt arrays. */ for (i = 0; i < IF_TOTAL_MAX; i++) { - if (aarp_table[i] == NULL || amt_ptr < aarp_table[i] || amt_ptr >= (aarp_table[i] + 1)) + if (aarp_table[i] == NULL || (void *)amt_ptr < (void *)aarp_table[i] || + (void *)amt_ptr >= (void *)(aarp_table[i] + 1)) continue; /* no match - try next entry */ /* @@ -834,13 +835,13 @@ StaticProc int aarp_sched_req(amt_ptr) if (amt_ptr->tmo == 0) { ATENABLE(s, arpinp_lock); (void) thread_funnel_set(network_flock, FALSE); - return(0); + return; } if (amt_ptr->no_of_retries < AARP_MAX_REQ_RETRIES) { ATENABLE(s, arpinp_lock); if (aarp_send_req(amt_ptr) == 0) { (void) thread_funnel_set(network_flock, FALSE); - return(0); + return; } ATDISABLE(s, arpinp_lock); } @@ -850,7 +851,7 @@ StaticProc int aarp_sched_req(amt_ptr) } (void) thread_funnel_set(network_flock, FALSE); - return(0); + return; } diff --git a/bsd/netat/ddp_brt.c b/bsd/netat/ddp_brt.c index 961d535e4..3d4437de3 100644 --- a/bsd/netat/ddp_brt.c +++ b/bsd/netat/ddp_brt.c @@ -71,6 +71,8 @@ ddp_brt_t at_ddp_brt[BRTSIZE]; int ddp_brt_sweep_timer; +void ddp_brt_sweep(); + void ddp_glean(mp, ifID, src_addr) register gbuf_t *mp; register at_ifaddr_t *ifID; diff --git a/bsd/netat/ddp_lap.c b/bsd/netat/ddp_lap.c index 8418e61af..88762a301 100644 --- a/bsd/netat/ddp_lap.c +++ b/bsd/netat/ddp_lap.c @@ -887,10 +887,22 @@ static int elap_online1(elapp) /* Get DDP started */ if ((errno = ddp_add_if(elapp))) return(errno); - + + // check if we still have an interface - can be lost when + // ddp_add_if calls malloc + // need to make check here after ddp_add_if completes because + // lap_online will call ddp_rem_if if we fail here + if (elapp->aa_ifp == 0) + return ENOENT; + /* set up multicast address for cable-wide broadcasts */ (void)at_reg_mcast(elapp, (caddr_t)&elapp->cable_multicast_addr); + // need to check again if interface is present + // can be lost in at_reg_mcast + if (elapp->aa_ifp == 0) + return ENOENT; + elapp->startup_inprogress = TRUE; if (! (elapp->startup_error = re_aarp(elapp))) (void)tsleep(&elapp->startup_inprogress, PSOCK | PCATCH, @@ -1083,8 +1095,6 @@ int ddp_shutdown(count_only) vm_offset_t temp_rcb_data, temp_state_data; int i, s, active_skts = 0; /* count of active pids for non-socketized AppleTalk protocols */ - extern int aarp_sched_probe(); - /* Network is shutting down... send error messages up on each open * socket. @@ -1235,29 +1245,6 @@ int ddp_shutdown(count_only) } ddp_start(); - /* free buffers for large arrays used by atp. - * to prevent a race condition if the funnel is dropped - * while calling kmem_free, the fields are grabbed and - * zeroed first. - */ - if (atp_rcb_data != NULL) { - temp_rcb_data = (vm_offset_t)atp_rcb_data; - atp_rcb_data = NULL; - atp_rcb_free_list = NULL; - } else - temp_rcb_data = NULL; - if (atp_state_data != NULL) { - temp_state_data = (vm_offset_t)atp_state_data; - atp_state_data = NULL; - atp_free_list = NULL; - } else - temp_state_data = NULL; - - if (temp_rcb_data) - kmem_free(kernel_map, temp_rcb_data, sizeof(struct atp_rcb) * NATP_RCB); - if (temp_state_data) - kmem_free(kernel_map, temp_state_data, sizeof(struct atp_state) * NATP_STATE); - splx(s); return(0); } /* ddp_shutdown */ @@ -1364,7 +1351,7 @@ void AARPwakeup(probe_cb) ATDISABLE(s, arpinp_lock); elapp = probe_cb->elapp; - if ( (elapp != NULL) && elapp->startup_inprogress ) { + if ( (elapp != NULL) && elapp->startup_inprogress && elapp->aa_ifp != 0) { ATENABLE(s, arpinp_lock); /* was AARPContinue */ diff --git a/bsd/netat/ddp_r_rtmp.c b/bsd/netat/ddp_r_rtmp.c index a239df24b..ca4120351 100644 --- a/bsd/netat/ddp_r_rtmp.c +++ b/bsd/netat/ddp_r_rtmp.c @@ -86,7 +86,7 @@ gbuf_t *rtmp_prep_new_packet(); void rtmp_timeout(); void rtmp_send_port(); void rtmp_send_port_funnel(); -void rtmp_dropper(); +void rtmp_dropper(void *); void rtmp_shutdown(); static void rtmp_update(); static void rtmp_request(); @@ -1223,7 +1223,7 @@ void rtmp_send_port(ifID) * the actual packet dropping is done in ddp_input */ -void rtmp_dropper() +void rtmp_dropper(void *arg) { boolean_t funnel_state; diff --git a/bsd/netat/ddp_r_zip.c b/bsd/netat/ddp_r_zip.c index 1169c8f40..bbf448a34 100644 --- a/bsd/netat/ddp_r_zip.c +++ b/bsd/netat/ddp_r_zip.c @@ -92,8 +92,8 @@ extern short ErrorZIPoverflow; static int netinfo_reply_pending; static void zip_netinfo_reply(at_x_zip_t *, at_ifaddr_t *); static void zip_getnetinfo(at_ifaddr_t *); -static void zip_getnetinfo_funnel(at_ifaddr_t *); -static void send_phony_reply(gbuf_t *); +static void zip_getnetinfo_funnel(void *); +static void send_phony_reply(void *); /* * zip_send_getnetinfo_reply: we received a GetNetInfo packet, we need to reply @@ -992,9 +992,10 @@ int zip_control (ifID, control) } /* funnel version of zip_getnetinfo */ -static void zip_getnetinfo_funnel(ifID) - register at_ifaddr_t *ifID; +static void zip_getnetinfo_funnel(arg) + void *arg; { + at_ifaddr_t *ifID = (at_ifaddr_t *)arg; thread_funnel_set(network_flock, TRUE); zip_getnetinfo(ifID); thread_funnel_set(network_flock, FALSE); @@ -1261,9 +1262,10 @@ int zip_handle_getmyzone(ifID, m) } static void -send_phony_reply(rm) - gbuf_t *rm; +send_phony_reply(arg) + void *arg; { + gbuf_t *rm = (gbuf_t *)arg; boolean_t funnel_state; funnel_state = thread_funnel_set(network_flock, TRUE); diff --git a/bsd/netat/ddp_usrreq.c b/bsd/netat/ddp_usrreq.c index 215a72510..9cc4e09ee 100644 --- a/bsd/netat/ddp_usrreq.c +++ b/bsd/netat/ddp_usrreq.c @@ -87,12 +87,15 @@ int ddp_pru_attach(struct socket *so, int proto, at_ddp_t *ddp = NULL; struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + error = soreserve(so, ddp_sendspace, ddp_recvspace); + if (error != 0) + return error; + s = splnet(); error = at_pcballoc(so, &ddp_head); splx(s); if (error) return error; - error = soreserve(so, ddp_sendspace, ddp_recvspace); pcb = (struct atpcb *)((so)->so_pcb); pcb->pid = current_proc()->p_pid; pcb->ddptype = (u_char) proto; /* set in socreate() */ diff --git a/bsd/netat/drv_dep.c b/bsd/netat/drv_dep.c index 8cb5e1483..7b64ba9e6 100644 --- a/bsd/netat/drv_dep.c +++ b/bsd/netat/drv_dep.c @@ -296,7 +296,7 @@ next: m_freem(m); continue; /* was EAFNOSUPPORT */ } -#endif COMMENT +#endif /* COMMENT */ llc_header = (llc_header_t *)(enet_header+1); diff --git a/bsd/netat/sys_dep.c b/bsd/netat/sys_dep.c index 639ab54da..55b12d8a7 100644 --- a/bsd/netat/sys_dep.c +++ b/bsd/netat/sys_dep.c @@ -63,11 +63,12 @@ int (*sys_ATPgetrsp)() = 0; extern at_state_t at_state; /* global state of AT network */ extern at_ifaddr_t *ifID_home; /* default interface */ +struct ATsocket_args { + int proto; +}; int ATsocket(proc, uap, retval) void *proc; - struct { - int proto; - } *uap; + struct ATsocket_args *uap; int *retval; { int err; @@ -87,14 +88,15 @@ int ATsocket(proc, uap, retval) return err; } +struct ATgetmsg_args { + int fd; + void *ctlptr; + void *datptr; + int *flags; +}; int ATgetmsg(proc, uap, retval) void *proc; - struct { - int fd; - void *ctlptr; - void *datptr; - int *flags; - } *uap; + struct ATgetmsg_args *uap; int *retval; { int err; @@ -116,14 +118,15 @@ int ATgetmsg(proc, uap, retval) return err; } -int ATputmsg(proc, uap, retval) - void *proc; - struct { +struct ATputmsg_args { int fd; void *ctlptr; void *datptr; int flags; - } *uap; +}; +int ATputmsg(proc, uap, retval) + void *proc; + struct ATputmsg_args *uap; int *retval; { int err; @@ -145,14 +148,15 @@ int ATputmsg(proc, uap, retval) return err; } +struct ATPsndreq_args { + int fd; + unsigned char *buf; + int len; + int nowait; +}; int ATPsndreq(proc, uap, retval) void *proc; - struct { - int fd; - unsigned char *buf; - int len; - int nowait; - } *uap; + struct ATPsndreq_args *uap; int *retval; { int err; @@ -174,14 +178,15 @@ int ATPsndreq(proc, uap, retval) return err; } -int ATPsndrsp(proc, uap, retval) - void *proc; - struct { +struct ATPsndrsp_args { int fd; unsigned char *respbuff; int resplen; int datalen; - } *uap; +}; +int ATPsndrsp(proc, uap, retval) + void *proc; + struct ATPsndrsp_args *uap; int *retval; { int err; @@ -203,13 +208,14 @@ int ATPsndrsp(proc, uap, retval) return err; } -int ATPgetreq(proc, uap, retval) - void *proc; - struct { +struct ATPgetreq_args { int fd; unsigned char *buf; int buflen; - } *uap; +}; +int ATPgetreq(proc, uap, retval) + void *proc; + struct ATPgetreq_args *uap; int *retval; { int err; @@ -231,12 +237,13 @@ int ATPgetreq(proc, uap, retval) return err; } -int ATPgetrsp(proc, uap, retval) - void *proc; - struct { +struct ATPgetrsp_args { int fd; unsigned char *bdsp; - } *uap; +}; +int ATPgetrsp(proc, uap, retval) + void *proc; + struct ATPgetrsp_args *uap; int *retval; { int err = 0; @@ -277,9 +284,9 @@ int atalk_openref(gref, retfd, proc) int *retfd; struct proc *proc; { - extern int _ATread(), _ATwrite(),_ATioctl(), _ATselect(), _ATclose(); + extern int _ATread(), _ATwrite(),_ATioctl(), _ATselect(), _ATclose(), _ATkqfilter(); static struct fileops fileops = - {_ATread, _ATwrite, _ATioctl, _ATselect, _ATclose}; + {_ATread, _ATwrite, _ATioctl, _ATselect, _ATclose, _ATkqfilter}; int err, fd; struct file *fp; @@ -324,7 +331,8 @@ struct proc *proc; return EBADF; } } - if ((*grefp = (gref_t *)fp->f_data) == 0) { + *grefp = (gref_t *)fp->f_data; + if (*grefp == 0 || *grefp == (gref_t *)(-1)) { thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); return EBADF; } diff --git a/bsd/netat/sys_glue.c b/bsd/netat/sys_glue.c index c8a0f1149..981282f1e 100644 --- a/bsd/netat/sys_glue.c +++ b/bsd/netat/sys_glue.c @@ -82,7 +82,7 @@ SYSCTL_STRUCT(_net_appletalk, OID_AUTO, debug, CTLFLAG_WR, &dbgBits, dbgBits, "AppleTalk Debug Flags"); volatile int RouterMix = RT_MIX_DEFAULT; /* default for nbr of ppsec */ SYSCTL_INT(_net_appletalk, OID_AUTO, routermix, CTLFLAG_WR, - &RouterMix, 0, "Appletalk RouterMix"); + (int *)&RouterMix, 0, "Appletalk RouterMix"); at_ddp_stats_t at_ddp_stats; /* DDP statistics */ SYSCTL_STRUCT(_net_appletalk, OID_AUTO, ddpstats, CTLFLAG_RD, &at_ddp_stats, at_ddp_stats, "AppleTalk DDP Stats"); @@ -635,6 +635,14 @@ int _ATselect(fp, which, wql, proc) return rc; } +int _ATkqfilter(fp, kn, p) + struct file *fp; + struct knote *kn; + struct proc *p; +{ + return (EOPNOTSUPP); +} + void atalk_putnext(gref, m) gref_t *gref; gbuf_t *m; @@ -925,9 +933,9 @@ int gref_close(gref_t *gref) struct mbuf *m_clattach(extbuf, extfree, extsize, extarg, wait) caddr_t extbuf; - int (*extfree)(); - int extsize; - int extarg; + void (*extfree)(caddr_t , u_int, caddr_t); + u_int extsize; + caddr_t extarg; int wait; { struct mbuf *m; @@ -985,8 +993,9 @@ void atp_delete_free_clusters() */ void m_lgbuf_free(buf, size, arg) - void *buf; - int size, arg; /* not needed, but they're in m_free() */ + caddr_t buf; + u_int size; + caddr_t arg; /* not needed, but they're in m_free() */ { /* FREE(buf, M_MCLUST); - can't free here - called from m_free while under lock */ @@ -1030,7 +1039,7 @@ struct mbuf *m_lgbuf_alloc(size, wait) if (NULL == (m = m_clattach(buf, m_lgbuf_free, size, 0, (wait)? M_WAIT: M_DONTWAIT))) { - m_lgbuf_free(buf); + m_lgbuf_free(buf, 0, 0); return(NULL); } } else { diff --git a/bsd/netat/sysglue.h b/bsd/netat/sysglue.h index 0c7d82bb9..7fb85da1f 100644 --- a/bsd/netat/sysglue.h +++ b/bsd/netat/sysglue.h @@ -103,7 +103,6 @@ typedef struct { * in MacOSX. Need to find a better Error code ###LD */ #define ENOTREADY ESHUTDOWN -#define ENOMSG EOPNOTSUPP #define EPROTO EPROTOTYPE /* T_MPSAFE is used only in atp_open. I suspect it's a diff --git a/bsd/netinet/dhcp_options.c b/bsd/netinet/dhcp_options.c index f6114a058..8dccba9fd 100644 --- a/bsd/netinet/dhcp_options.c +++ b/bsd/netinet/dhcp_options.c @@ -102,7 +102,7 @@ ptrlist_grow(ptrlist_t * list) else if (list->size == list->count) { #ifdef DEBUG printf("doubling %d to %d\n", list->size, list->size * 2); -#endif DEBUG +#endif /* DEBUG */ list->array = my_realloc(list->array, sizeof(*list->array) * list->size, sizeof(*list->array) * list->size * 2); @@ -540,4 +540,4 @@ main() } exit(0); } -#endif TEST_DHCP_OPTIONS +#endif /* TEST_DHCP_OPTIONS */ diff --git a/bsd/netinet/icmp6.h b/bsd/netinet/icmp6.h index 428caa828..0310f21d7 100644 --- a/bsd/netinet/icmp6.h +++ b/bsd/netinet/icmp6.h @@ -658,7 +658,7 @@ struct in6_multi; void icmp6_init __P((void)); void icmp6_paramerror __P((struct mbuf *, int)); void icmp6_error __P((struct mbuf *, int, int, int)); -int icmp6_input __P((struct mbuf **, int *, int)); +int icmp6_input __P((struct mbuf **, int *)); void icmp6_fasttimo __P((void)); void icmp6_reflect __P((struct mbuf *, size_t)); void icmp6_prepare __P((struct mbuf *)); diff --git a/bsd/netinet/icmp_var.h b/bsd/netinet/icmp_var.h index 96a28ce5d..bd03dd9af 100644 --- a/bsd/netinet/icmp_var.h +++ b/bsd/netinet/icmp_var.h @@ -90,13 +90,15 @@ struct icmpstat { #define ICMPCTL_MASKREPL 1 /* allow replies to netmask requests */ #define ICMPCTL_STATS 2 /* statistics (read-only) */ #define ICMPCTL_ICMPLIM 3 -#define ICMPCTL_MAXID 4 +#define ICMPCTL_TIMESTAMP 4 /* allow replies to time stamp requests */ +#define ICMPCTL_MAXID 5 #define ICMPCTL_NAMES { \ { 0, 0 }, \ { "maskrepl", CTLTYPE_INT }, \ { "stats", CTLTYPE_STRUCT }, \ { "icmplim", CTLTYPE_INT }, \ + { "icmptimestamp", CTLTYPE_INT }, \ } #endif /* __APPLE_API_UNSTABLE */ diff --git a/bsd/netinet/if_ether.c b/bsd/netinet/if_ether.c index c5a79397d..a93a69c8c 100644 --- a/bsd/netinet/if_ether.c +++ b/bsd/netinet/if_ether.c @@ -405,7 +405,7 @@ arprequest(ac, sip, tip, enaddr) (void)memcpy(ea->arp_tpa, tip, sizeof(ea->arp_tpa)); sa.sa_family = AF_UNSPEC; sa.sa_len = sizeof(sa); - dlil_output(((struct ifnet *)ac)->if_data.default_proto, m, 0, &sa, 0); + dlil_output(ifptodlt(((struct ifnet *)ac), PF_INET), m, 0, &sa, 0); } /* diff --git a/bsd/netinet/igmp.c b/bsd/netinet/igmp.c index 58abc8d35..022ca8c77 100644 --- a/bsd/netinet/igmp.c +++ b/bsd/netinet/igmp.c @@ -144,31 +144,35 @@ static struct router_info * find_rti(ifp) struct ifnet *ifp; { - register struct router_info *rti = Head; - + register struct router_info *rti = Head; + + #if IGMP_DEBUG printf("[igmp.c, _find_rti] --> entering \n"); #endif - while (rti) { - if (rti->rti_ifp == ifp) { + while (rti) { + if (rti->rti_ifp == ifp) { #if IGMP_DEBUG printf("[igmp.c, _find_rti] --> found old entry \n"); #endif - return rti; - } - rti = rti->rti_next; - } - + return rti; + } + rti = rti->rti_next; + } + MALLOC(rti, struct router_info *, sizeof *rti, M_IGMP, M_NOWAIT); - rti->rti_ifp = ifp; - rti->rti_type = IGMP_V2_ROUTER; - rti->rti_time = 0; - rti->rti_next = Head; - Head = rti; + if (rti != NULL) + { + rti->rti_ifp = ifp; + rti->rti_type = IGMP_V2_ROUTER; + rti->rti_time = 0; + rti->rti_next = Head; + Head = rti; + } #if IGMP_DEBUG - printf("[igmp.c, _find_rti] --> created an entry \n"); + if (rti) printf("[igmp.c, _find_rti] --> created an entry \n"); #endif - return rti; + return rti; } void @@ -227,6 +231,10 @@ igmp_input(m, iphlen) if (timer == 0) timer = 1; rti = find_rti(ifp); + if (rti == NULL) { + m_freem(m); + return; + } /* * In the IGMPv2 specification, there are 3 states and a flag. @@ -364,7 +372,7 @@ igmp_input(m, iphlen) rip_input(m, iphlen); } -void +int igmp_joingroup(inm) struct in_multi *inm; { @@ -376,12 +384,14 @@ igmp_joingroup(inm) inm->inm_state = IGMP_OTHERMEMBER; } else { inm->inm_rti = find_rti(inm->inm_ifp); + if (inm->inm_rti == NULL) return ENOMEM; igmp_sendpkt(inm, inm->inm_rti->rti_type, 0); inm->inm_timer = IGMP_RANDOM_DELAY( IGMP_MAX_HOST_REPORT_DELAY*PR_FASTHZ); inm->inm_state = IGMP_IREPORTEDLAST; igmp_timers_are_running = 1; } + return 0; splx(s); } diff --git a/bsd/netinet/igmp_var.h b/bsd/netinet/igmp_var.h index e73a9c76e..1c30da4cd 100644 --- a/bsd/netinet/igmp_var.h +++ b/bsd/netinet/igmp_var.h @@ -114,7 +114,7 @@ struct igmpstat { void igmp_init __P((void)); void igmp_input __P((struct mbuf *, int)); -void igmp_joingroup __P((struct in_multi *)); +int igmp_joingroup __P((struct in_multi *)); void igmp_leavegroup __P((struct in_multi *)); void igmp_fasttimo __P((void)); void igmp_slowtimo __P((void)); diff --git a/bsd/netinet/in.c b/bsd/netinet/in.c index b26199f21..ac87e8ac5 100644 --- a/bsd/netinet/in.c +++ b/bsd/netinet/in.c @@ -105,11 +105,6 @@ SYSCTL_INT(_net_inet_ip, OID_AUTO, subnets_are_local, CTLFLAG_RW, struct in_multihead in_multihead; /* XXX BSS initialization */ extern void arp_rtrequest(); -extern int ether_detach_inet(struct ifnet *ifp); - -#if INET6 -extern int ip6_auto_on; -#endif /* * Return 1 if an internet address is for a ``local'' host @@ -340,24 +335,15 @@ in_control(so, cmd, data, ifp, p) * Temorary code for protocol attachment XXX */ - if (ifp->if_type == IFT_ETHER) - dl_tag = ether_attach_inet(ifp); - - if (ifp->if_type == IFT_LOOP) - dl_tag = lo_attach_inet(ifp); -#if NFAITH - /* Is this right? */ - if (ifp && ifp->if_type == IFT_FAITH) - dl_tag = faith_attach_inet(ifp); -#endif -#if NGIF - /* Is this right? */ - if (ifp && ifp->if_type == IFT_GIF) - dl_tag = gif_attach_proto_family(ifp, PF_INET); -#endif + /* Generic protocol plumbing */ + + if (error = dlil_plumb_protocol(PF_INET, ifp, &dl_tag)) { + kprintf("in.c: warning can't plumb proto if=%s%n type %d error=%d\n", + ifp->if_name, ifp->if_unit, ifp->if_type, error); + error = 0; /*discard error, can be cold with unsupported interfaces */ + } /* End of temp code */ - ifa->ifa_dlt = dl_tag; ifa->ifa_addr = (struct sockaddr *)&ia->ia_addr; ifa->ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr; ifa->ifa_netmask = (struct sockaddr *)&ia->ia_sockmask; @@ -379,8 +365,6 @@ in_control(so, cmd, data, ifp, p) return error; if (ifp == 0) return (EADDRNOTAVAIL); - if (strcmp(ifp->if_name, "en")) - return ENODEV; break; case SIOCSIFBRDADDR: @@ -523,11 +507,9 @@ in_control(so, cmd, data, ifp, p) (struct sockaddr_in *) &ifr->ifr_addr, 1)); case SIOCPROTOATTACH: - ether_attach_inet(ifp); -#if INET6 - if (ip6_auto_on) /* FreeBSD compat mode: Acquire linklocal addresses for IPv6 for if */ - in6_if_up(ifp); -#endif + error = dlil_plumb_protocol(PF_INET, ifp, &dl_tag); + if (error) + return(error); break; case SIOCPROTODETACH: @@ -535,17 +517,10 @@ in_control(so, cmd, data, ifp, p) TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) if (ifa->ifa_addr->sa_family == AF_INET) return EBUSY; - error = ether_detach_inet(ifp); + + error = dlil_unplumb_protocol(PF_INET, ifp); if (error) return(error); -#if INET6 - if (ip6_auto_on) { /* if we linked ipv6 addresses to v4, remove them now */ - in6_purgeif(ifp); - error = ether_detach_inet6(ifp); - if (error) - return(error); - } -#endif break; @@ -1062,7 +1037,6 @@ in_ifinit(ifp, ia, sin, scrub) register u_long i = ntohl(sin->sin_addr.s_addr); struct sockaddr_in oldaddr; int s = splimp(), flags = RTF_UP, error; - u_long dl_tag; oldaddr = ia->ia_addr; ia->ia_addr = *sin; @@ -1243,7 +1217,13 @@ in_addmulti(ap, ifp) /* * Let IGMP know that we have joined a new IP multicast group. */ - igmp_joingroup(inm); + error = igmp_joingroup(inm); + if (error) { + if_delmultiaddr(ifma); + LIST_REMOVE(inm, inm_link); + _FREE(inm, M_IPMADDR); + inm = NULL; + } splx(s); return (inm); } @@ -1260,7 +1240,7 @@ in_delmulti(inm) /* We intentionally do this a bit differently than BSD */ - if (ifma->ifma_refcount == 1) { + if (ifma && ifma->ifma_refcount == 1) { /* * No remaining claims to this record; let IGMP know that * we are leaving the multicast group. @@ -1271,6 +1251,7 @@ in_delmulti(inm) FREE(inm, M_IPMADDR); } /* XXX - should be separate API for when we have an ifma? */ - if_delmulti(ifma->ifma_ifp, ifma->ifma_addr); + if (ifma) + if_delmultiaddr(ifma); splx(s); } diff --git a/bsd/netinet/in.h b/bsd/netinet/in.h index 1634fe316..ebb2bb195 100644 --- a/bsd/netinet/in.h +++ b/bsd/netinet/in.h @@ -363,6 +363,7 @@ struct ip_opts { #ifdef __APPLE__ #define IP_STRIPHDR 23 /* bool: drop receive of raw IP header */ #endif +#define IP_RECVTTL 24 /* bool; receive reception TTL w/dgram */ #define IP_FW_ADD 40 /* add a firewall rule to chain */ diff --git a/bsd/netinet/in_bootp.c b/bsd/netinet/in_bootp.c index 041d9e5ca..6a3b034e8 100644 --- a/bsd/netinet/in_bootp.c +++ b/bsd/netinet/in_bootp.c @@ -71,9 +71,9 @@ #ifdef BOOTP_DEBUG #define dprintf(x) printf x; -#else BOOTP_DEBUG +#else /* !BOOTP_DEBUG */ #define dprintf(x) -#endif BOOTP_DEBUG +#endif /* BOOTP_DEBUG */ /* ip address formatting macros */ #define IP_FORMAT "%d.%d.%d.%d" @@ -228,7 +228,7 @@ link_print(struct sockaddr_dl * dl_p) " slen %d addr ", dl_p->sdl_len, dl_p->sdl_index, dl_p->sdl_family, dl_p->sdl_type, dl_p->sdl_nlen, dl_p->sdl_alen, dl_p->sdl_slen); -#endif 0 +#endif for (i = 0; i < dl_p->sdl_alen; i++) printf("%s%x", i ? ":" : "", (link_address(dl_p))[i]); @@ -272,7 +272,7 @@ send_bootp_request(struct ifnet * ifp, struct socket * so, sin.sin_addr.s_addr = INADDR_BROADCAST; m = ip_pkt_to_mbuf((caddr_t)pkt, sizeof(*pkt)); - return (dlil_output(ifp->if_data.default_proto, m, 0, (struct sockaddr *)&sin, 0)); + return (dlil_output(ifptodlt(ifp, PF_INET), m, 0, (struct sockaddr *)&sin, 0)); } /* @@ -451,7 +451,7 @@ bootp_loop(struct socket * so, struct ifnet * ifp, int max_try, #ifdef BOOTP_DEBUG print_reply_short(reply, n); -#endif BOOTP_DEBUG +#endif /* BOOTP_DEBUG */ (void)dhcpol_parse_packet(&options, (struct dhcp *)reply, n, NULL); rating = rate_packet(reply, n, &options); diff --git a/bsd/netinet/in_pcb.c b/bsd/netinet/in_pcb.c index 3e1a98d30..ef52ab61f 100644 --- a/bsd/netinet/in_pcb.c +++ b/bsd/netinet/in_pcb.c @@ -105,6 +105,8 @@ extern int ipsec_bypass; #endif +extern u_long route_generation; + #define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8)) #define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1)) @@ -451,12 +453,16 @@ in_pcbladdr(inp, nam, plocal_sin) /* * If route is known or can be allocated now, * our src addr is taken from the i/f, else punt. + * Note that we should check the address family of the cached + * destination, in case of sharing the cache with IPv6. */ ro = &inp->inp_route; if (ro->ro_rt && - (satosin(&ro->ro_dst)->sin_addr.s_addr != + (ro->ro_dst.sa_family != AF_INET || + satosin(&ro->ro_dst)->sin_addr.s_addr != sin->sin_addr.s_addr || - inp->inp_socket->so_options & SO_DONTROUTE)) { + inp->inp_socket->so_options & SO_DONTROUTE || + ro->ro_rt->generation_id != route_generation)) { rtfree(ro->ro_rt); ro->ro_rt = (struct rtentry *)0; } @@ -464,6 +470,7 @@ in_pcbladdr(inp, nam, plocal_sin) (ro->ro_rt == (struct rtentry *)0 || ro->ro_rt->rt_ifp == (struct ifnet *)0)) { /* No route yet, so try to acquire one */ + bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); ro->ro_dst.sa_family = AF_INET; ro->ro_dst.sa_len = sizeof(struct sockaddr_in); ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = @@ -557,6 +564,7 @@ in_pcbconnect(inp, nam, p) return (error); } inp->inp_laddr = ifaddr->sin_addr; + inp->inp_flags |= INP_INADDR_ANY; } inp->inp_faddr = sin->sin_addr; inp->inp_fport = sin->sin_port; @@ -614,8 +622,10 @@ in_pcbdetach(inp) rt->rt_gateway, rt_mask(rt), rt->rt_flags, (struct rtentry **)0); } - else + else { rtfree(rt); + inp->inp_route.ro_rt = 0; + } } ip_freemoptions(inp->inp_moptions); inp->inp_vflag = 0; @@ -1115,7 +1125,7 @@ in_pcbremlists(inp) LIST_REMOVE(inp, inp_hash); LIST_REMOVE(inp, inp_portlist); - if (LIST_FIRST(&phd->phd_pcblist) == NULL) { + if (phd != NULL && (LIST_FIRST(&phd->phd_pcblist) == NULL)) { LIST_REMOVE(phd, phd_hash); FREE(phd, M_PCB); } diff --git a/bsd/netinet/in_pcb.h b/bsd/netinet/in_pcb.h index c01f8d8b6..1a8377df6 100644 --- a/bsd/netinet/in_pcb.h +++ b/bsd/netinet/in_pcb.h @@ -103,8 +103,8 @@ struct icmp6_filter; struct inpcb { LIST_ENTRY(inpcb) inp_hash; /* hash list */ - struct in_addr inp_faddr; /* foreign host table entry */ - struct in_addr inp_laddr; /* local host table entry */ + struct in_addr reserved1; /* APPLE reserved: inp_faddr defined in protcol indep. part */ + struct in_addr reserved2; /* APPLE reserved */ u_short inp_fport; /* foreign port */ u_short inp_lport; /* local port */ LIST_ENTRY(inpcb) inp_list; /* list for all PCBs of this proto */ @@ -273,7 +273,10 @@ struct inpcbinfo { /* XXX documentation, prefixes */ #ifdef __APPLE__ #define INP_STRIPHDR 0x200 /* Strip headers in raw_ip, for OT support */ #endif -#define INP_FAITH 0x400 /* accept FAITH'ed connections */ +#define INP_FAITH 0x400 /* accept FAITH'ed connections */ +#define INP_INADDR_ANY 0x800 /* local address wasn't specified */ + +#define INP_RECVTTL 0x1000 #define IN6P_IPV6_V6ONLY 0x008000 /* restrict AF_INET6 socket for v6 */ @@ -290,7 +293,7 @@ struct inpcbinfo { /* XXX documentation, prefixes */ INP_RECVIF|\ IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\ IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS|\ - IN6P_AUTOFLOWLABEL) + IN6P_AUTOFLOWLABEL|INP_RECVTTL) #define INP_UNMAPPABLEOPTS (IN6P_HOPOPTS|IN6P_DSTOPTS|IN6P_RTHDR|\ IN6P_AUTOFLOWLABEL) diff --git a/bsd/netinet/in_rmx.c b/bsd/netinet/in_rmx.c index bd8e20902..b3143e11d 100644 --- a/bsd/netinet/in_rmx.c +++ b/bsd/netinet/in_rmx.c @@ -223,6 +223,9 @@ SYSCTL_INT(_net_inet_ip, OID_AUTO, check_route_selfref, CTLFLAG_RW, &check_routeselfref , 0, ""); #endif +__private_extern__ int use_routegenid = 1; +SYSCTL_INT(_net_inet_ip, OID_AUTO, use_route_genid, CTLFLAG_RW, + &use_routegenid , 0, ""); /* * On last reference drop, mark the route as belong to us so that it can be diff --git a/bsd/netinet/in_var.h b/bsd/netinet/in_var.h index 16baa2c15..4b62eda82 100644 --- a/bsd/netinet/in_var.h +++ b/bsd/netinet/in_var.h @@ -108,7 +108,7 @@ struct in_aliasreq { * Event data, internet style. */ struct kev_in_data { - struct net_event_data link_data; + struct net_event_data link_data; struct in_addr ia_addr; u_long ia_net; /* network number of interface */ u_long ia_netmask; /* mask of net part */ diff --git a/bsd/netinet/ip_divert.c b/bsd/netinet/ip_divert.c index 512c65ca7..e68def21d 100644 --- a/bsd/netinet/ip_divert.c +++ b/bsd/netinet/ip_divert.c @@ -554,7 +554,9 @@ div_pcblist SYSCTL_HANDLER_ARGS return error; } +#ifndef __APPLE__ #warning Fix SYSCTL net_inet_divert +#endif #if 0 SYSCTL_DECL(_net_inet_divert); SYSCTL_PROC(_net_inet_divert, OID_AUTO, pcblist, CTLFLAG_RD, 0, 0, diff --git a/bsd/netinet/ip_flow.c b/bsd/netinet/ip_flow.c index 70233a1d5..66e8459e8 100644 --- a/bsd/netinet/ip_flow.c +++ b/bsd/netinet/ip_flow.c @@ -197,7 +197,7 @@ ipflow_fastforward( dst = &ipf->ipf_ro.ro_dst; #ifdef __APPLE__ /* Not sure the rt_dlt is valid here !! XXX */ - if ((error = dlil_output((u_long)rt->rt_dlt, m, (caddr_t) rt, dst, 0)) != 0) { + if ((error = dlil_output(ifptodlt(rt->rt_ifp, PF_INET), m, (caddr_t) rt, dst, 0)) != 0) { #else if ((error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, dst, rt)) != 0) { diff --git a/bsd/netinet/ip_icmp.c b/bsd/netinet/ip_icmp.c index fb9424877..5ed68b7f5 100644 --- a/bsd/netinet/ip_icmp.c +++ b/bsd/netinet/ip_icmp.c @@ -102,6 +102,10 @@ static int icmpmaskrepl = 0; SYSCTL_INT(_net_inet_icmp, ICMPCTL_MASKREPL, maskrepl, CTLFLAG_RW, &icmpmaskrepl, 0, ""); +static int icmptimestamp = 0; +SYSCTL_INT(_net_inet_icmp, ICMPCTL_TIMESTAMP, timestamp, CTLFLAG_RW, + &icmptimestamp, 0, ""); + static int drop_redirect = 0; SYSCTL_INT(_net_inet_icmp, OID_AUTO, drop_redirect, CTLFLAG_RW, &drop_redirect, 0, ""); @@ -117,7 +121,7 @@ SYSCTL_INT(_net_inet_icmp, OID_AUTO, log_redirect, CTLFLAG_RW, * variable content is -1 and read-only. */ -static int icmplim = 100; +static int icmplim = 250; SYSCTL_INT(_net_inet_icmp, ICMPCTL_ICMPLIM, icmplim, CTLFLAG_RW, &icmplim, 0, ""); #else @@ -483,6 +487,10 @@ icmp_input(m, hlen) goto reflect; case ICMP_TSTAMP: + + if (icmptimestamp == 0) + break; + if (!icmpbmcastecho && (m->m_flags & (M_MCAST | M_BCAST)) != 0) { icmpstat.icps_bmcasttstamp++; @@ -1011,6 +1019,7 @@ icmp_dgram_ctloutput(struct socket *so, struct sockopt *sopt) case IP_FAITH: #endif case IP_STRIPHDR: + case IP_RECVTTL: error = rip_ctloutput(so, sopt); break; diff --git a/bsd/netinet/ip_input.c b/bsd/netinet/ip_input.c index bd5edb4fb..424c4445d 100644 --- a/bsd/netinet/ip_input.c +++ b/bsd/netinet/ip_input.c @@ -286,6 +286,9 @@ void ipintr __P((void)); extern u_short ip_id; #endif +extern u_long route_generation; +extern int apple_hwcksum_rx; + /* * IP initialization: fill in IP protocol switch table. * All protocols not implemented in kernel go to raw IP protocol handler. @@ -363,7 +366,7 @@ ip_input(struct mbuf *m) u_int16_t divert_cookie; /* firewall cookie */ struct in_addr pkt_dst; #if IPDIVERT - u_int32_t divert_info = 0; /* packet divert/tee info */ + u_int16_t divert_info = 0; /* packet divert/tee info */ #endif struct ip_fw_chain *rule = NULL; @@ -450,11 +453,9 @@ ip_input(struct mbuf *m) goto bad; } } - if (m->m_pkthdr.rcvif->if_hwassist == 0) - m->m_pkthdr.csum_flags = 0; - - if ((m->m_pkthdr.csum_flags & CSUM_TCP_SUM16) && ip->ip_p != IPPROTO_TCP) - m->m_pkthdr.csum_flags = 0; + if ((m->m_pkthdr.rcvif->if_hwassist == 0) || (apple_hwcksum_rx == 0) || + ((m->m_pkthdr.csum_flags & CSUM_TCP_SUM16) && ip->ip_p != IPPROTO_TCP)) + m->m_pkthdr.csum_flags = 0; /* invalidate HW generated checksum flags */ if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); @@ -658,7 +659,7 @@ pass: * ether_output() with the loopback into the stack for * SIMPLEX interfaces handled by ether_output(). */ - if (ia->ia_ifp == m->m_pkthdr.rcvif && + if ((!checkif || ia->ia_ifp == m->m_pkthdr.rcvif) && ia->ia_ifp && ia->ia_ifp->if_flags & IFF_BROADCAST) { if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == pkt_dst.s_addr) @@ -685,7 +686,7 @@ pass: } /* - * The process-level routing demon needs to receive + * The process-level routing daemon needs to receive * all multicast IGMP packets, whether or not this * host belongs to their destination groups. */ @@ -836,6 +837,9 @@ found: goto bad; } m->m_flags |= M_FRAG; + } else { + /* Clear the flag in case packet comes from loopback */ + m->m_flags &= ~M_FRAG; } ip->ip_off <<= 3; @@ -1567,7 +1571,8 @@ ip_rtaddr(dst) sin = (struct sockaddr_in *) &ipforward_rt.ro_dst; - if (ipforward_rt.ro_rt == 0 || dst.s_addr != sin->sin_addr.s_addr) { + if (ipforward_rt.ro_rt == 0 || dst.s_addr != sin->sin_addr.s_addr || + ipforward_rt.ro_rt->generation_id != route_generation) { if (ipforward_rt.ro_rt) { rtfree(ipforward_rt.ro_rt); ipforward_rt.ro_rt = 0; @@ -1769,7 +1774,8 @@ ip_forward(m, srcrt) sin = (struct sockaddr_in *)&ipforward_rt.ro_dst; if ((rt = ipforward_rt.ro_rt) == 0 || - ip->ip_dst.s_addr != sin->sin_addr.s_addr) { + ip->ip_dst.s_addr != sin->sin_addr.s_addr || + ipforward_rt.ro_rt->generation_id != route_generation) { if (ipforward_rt.ro_rt) { rtfree(ipforward_rt.ro_rt); ipforward_rt.ro_rt = 0; @@ -2033,6 +2039,10 @@ makedummy: if (*mp) mp = &(*mp)->m_next; } + if (inp->inp_flags & INP_RECVTTL) { + *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, sizeof(ip->ip_ttl), IP_RECVTTL, IPPROTO_IP); + if (*mp) mp = &(*mp)->m_next; + } } int diff --git a/bsd/netinet/ip_output.c b/bsd/netinet/ip_output.c index e365ce7f1..39daf8099 100644 --- a/bsd/netinet/ip_output.c +++ b/bsd/netinet/ip_output.c @@ -87,6 +87,7 @@ #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 1) #define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 3) #define DBG_FNC_IP_OUTPUT NETDBG_CODE(DBG_NETIP, (1 << 8) | 1) +#define DBG_FNC_IPSEC4_OUTPUT NETDBG_CODE(DBG_NETIP, (2 << 8) | 1) #if vax @@ -134,6 +135,9 @@ static int ip_pcbopts __P((int, struct mbuf **, struct mbuf *)); static int ip_setmoptions __P((struct sockopt *, struct ip_moptions **)); +int ip_createmoptions(struct ip_moptions **imop); +int ip_addmembership(struct ip_moptions *imo, struct ip_mreq *mreq); +int ip_dropmembership(struct ip_moptions *imo, struct ip_mreq *mreq); int ip_optcopy __P((struct ip *, struct ip *)); extern int (*fr_checkp) __P((struct ip *, int, struct ifnet *, int, struct mbuf **)); #ifdef __APPLE__ @@ -144,6 +148,7 @@ static u_long lo_dl_tag = 0; void in_delayed_cksum(struct mbuf *m); extern int apple_hwcksum_tx; +extern u_long route_generation; extern struct protosw inetsw[]; @@ -169,12 +174,11 @@ ip_output(m0, opt, ro, flags, imo) struct ip_moptions *imo; { struct ip *ip, *mhip; - struct ifnet *ifp; - u_long dl_tag; + struct ifnet *ifp = NULL; struct mbuf *m = m0; int hlen = sizeof (struct ip); int len, off, error = 0; - struct sockaddr_in *dst; + struct sockaddr_in *dst = NULL; struct in_ifaddr *ia = NULL; int isbroadcast, sw_csum; #if IPSEC @@ -216,10 +220,10 @@ ip_output(m0, opt, ro, flags, imo) imo = NULL ; dst = ((struct dn_pkt *)m)->dn_dst ; ifp = ((struct dn_pkt *)m)->ifp ; - flags = ((struct dn_pkt *)m)->flags ; + flags = ((struct dn_pkt *)m)->flags; m0 = m = m->m_next ; #if IPSEC - if (ipsec_bypass == 0) { + if (ipsec_bypass == 0 && (flags & IP_NOIPSEC) == 0) { so = ipsec_getsocket(m); (void)ipsec_setsocket(m, NULL); } @@ -233,7 +237,7 @@ ip_output(m0, opt, ro, flags, imo) rule = NULL ; #endif #if IPSEC - if (ipsec_bypass == 0) { + if (ipsec_bypass == 0 && (flags & IP_NOIPSEC) == 0) { so = ipsec_getsocket(m); (void)ipsec_setsocket(m, NULL); } @@ -271,17 +275,24 @@ ip_output(m0, opt, ro, flags, imo) ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); dst = (struct sockaddr_in *)&ro->ro_dst; + /* * If there is a cached route, * check that it is to the same destination * and is still up. If not, free it and try again. + * The address family should also be checked in case of sharing the + * cache with IPv6. */ + if (ro->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 || - dst->sin_addr.s_addr != ip->ip_dst.s_addr)) { + dst->sin_family != AF_INET || + dst->sin_addr.s_addr != ip->ip_dst.s_addr || + ro->ro_rt->generation_id != route_generation) ) { rtfree(ro->ro_rt); ro->ro_rt = (struct rtentry *)0; } if (ro->ro_rt == 0) { + bzero(dst, sizeof(*dst)); dst->sin_family = AF_INET; dst->sin_len = sizeof(*dst); dst->sin_addr = ip->ip_dst; @@ -300,7 +311,6 @@ ip_output(m0, opt, ro, flags, imo) goto bad; } ifp = ia->ia_ifp; - dl_tag = ia->ia_ifa.ifa_dlt; ip->ip_ttl = 1; isbroadcast = in_broadcast(dst->sin_addr, ifp); } else { @@ -322,7 +332,6 @@ ip_output(m0, opt, ro, flags, imo) } ia = ifatoia(ro->ro_rt->rt_ifa); ifp = ro->ro_rt->rt_ifp; - dl_tag = ro->ro_rt->rt_dlt; ro->ro_rt->rt_use++; if (ro->ro_rt->rt_flags & RTF_GATEWAY) dst = (struct sockaddr_in *)ro->ro_rt->rt_gateway; @@ -345,16 +354,16 @@ ip_output(m0, opt, ro, flags, imo) * See if the caller provided any multicast options */ if (imo != NULL) { - ip->ip_ttl = imo->imo_multicast_ttl; + if ((flags & IP_RAWOUTPUT) == 0) ip->ip_ttl = imo->imo_multicast_ttl; if (imo->imo_multicast_ifp != NULL) { ifp = imo->imo_multicast_ifp; - dl_tag = ifp->if_data.default_proto; } - if (imo->imo_multicast_vif != -1) + if (imo->imo_multicast_vif != -1 && + ((flags & IP_RAWOUTPUT) == 0 || ip->ip_src.s_addr == INADDR_ANY)) ip->ip_src.s_addr = - ip_mcast_src(imo->imo_multicast_vif); + ip_mcast_src(imo->imo_multicast_vif); } else - ip->ip_ttl = IP_DEFAULT_MULTICAST_TTL; + if ((flags & IP_RAWOUTPUT) == 0) ip->ip_ttl = IP_DEFAULT_MULTICAST_TTL; /* * Confirm that the outgoing interface supports multicast. */ @@ -375,8 +384,13 @@ ip_output(m0, opt, ro, flags, imo) TAILQ_FOREACH(ia1, &in_ifaddrhead, ia_link) if (ia1->ia_ifp == ifp) { ip->ip_src = IA_SIN(ia1)->sin_addr; + break; } + if (ip->ip_src.s_addr == INADDR_ANY) { + error = ENETUNREACH; + goto bad; + } } IN_LOOKUP_MULTI(ip->ip_dst, ifp, inm); @@ -499,9 +513,11 @@ sendit: #if IPSEC /* temporary for testing only: bypass ipsec alltogether */ - if (ipsec_bypass != 0) + if (ipsec_bypass != 0 || (flags & IP_NOIPSEC) != 0) goto skip_ipsec; + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + /* get SP for this packet */ if (so == NULL) sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND, flags, &error); @@ -510,6 +526,7 @@ sendit: if (sp == NULL) { ipsecstat.out_inval++; + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); goto bad; } @@ -522,17 +539,20 @@ sendit: * This packet is just discarded. */ ipsecstat.out_polvio++; + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 1,0,0,0,0); goto bad; case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: /* no need to do IPsec. */ + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 2,0,0,0,0); goto skip_ipsec; case IPSEC_POLICY_IPSEC: if (sp->req == NULL) { /* acquire a policy */ error = key_spdacquire(sp); + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 3,0,0,0,0); goto bad; } break; @@ -568,7 +588,8 @@ sendit: error = ipsec4_output(&state, sp, flags); - m = state.m; + m0 = m = state.m; + if (flags & IP_ROUTETOIF) { /* * if we have tunnel mode SA, we may need to ignore @@ -580,6 +601,7 @@ sendit: } } else ro = state.ro; + dst = (struct sockaddr_in *)state.dst; if (error) { /* mbuf is already reclaimed in ipsec4_output. */ @@ -599,33 +621,48 @@ sendit: error = 0; break; } + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 4,0,0,0,0); goto bad; } } /* be sure to update variables that are affected by ipsec4_output() */ ip = mtod(m, struct ip *); + #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else hlen = ip->ip_hl << 2; #endif + /* Check that there wasn't a route change and src is still valid */ + + if (ro->ro_rt->generation_id != route_generation) { + if (ifa_foraddr(ip->ip_src.s_addr) == NULL && ((flags & (IP_ROUTETOIF | IP_FORWARDING)) == 0)) { + error = EADDRNOTAVAIL; + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 5,0,0,0,0); + goto bad; + } + rtfree(ro->ro_rt); + ro->ro_rt = NULL; + } + if (ro->ro_rt == NULL) { if ((flags & IP_ROUTETOIF) == 0) { printf("ip_output: " "can't update route after IPsec processing\n"); - error = EHOSTUNREACH; /*XXX*/ + error = EHOSTUNREACH; /*XXX*/ + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 6,0,0,0,0); goto bad; } } else { ia = ifatoia(ro->ro_rt->rt_ifa); ifp = ro->ro_rt->rt_ifp; - dl_tag = ia->ia_ifa.ifa_dlt; } /* make it flipped, again. */ NTOHS(ip->ip_len); NTOHS(ip->ip_off); + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 7,0xff,0xff,0xff,0xff); skip_ipsec: #endif /*IPSEC*/ @@ -641,7 +678,7 @@ skip_ipsec: if ((error = (*fr_checkp)(ip, hlen, ifp, 1, &m1)) || !m1) goto done; - ip = mtod(m = m1, struct ip *); + ip = mtod(m0 = m = m1, struct ip *); } /* @@ -666,6 +703,7 @@ skip_ipsec: * unsupported rules), but better play safe and drop * packets in case of doubt. */ + m0 = m; if ( (off & IP_FW_PORT_DENY_FLAG) || m == NULL) { if (m) m_freem(m); @@ -718,7 +756,7 @@ skip_ipsec: /* If 'tee', continue with original packet */ if (clone != NULL) { - m = clone; + m0 = m = clone; ip = mtod(m, struct ip *); goto pass; } @@ -778,7 +816,7 @@ skip_ipsec: if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; - m0->m_pkthdr.csum_data = 0xffff; + m->m_pkthdr.csum_data = 0xffff; } m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; @@ -806,7 +844,6 @@ skip_ipsec: ia = ifatoia(ro_fwd->ro_rt->rt_ifa); ifp = ro_fwd->ro_rt->rt_ifp; - dl_tag = ro_fwd->ro_rt->rt_dlt; ro_fwd->ro_rt->rt_use++; if (ro_fwd->ro_rt->rt_flags & RTF_GATEWAY) dst = (struct sockaddr_in *)ro_fwd->ro_rt->rt_gateway; @@ -895,11 +932,11 @@ pass: #if IPSEC /* clean ipsec history once it goes out of the node */ - if (ipsec_bypass == 0) + if (ipsec_bypass == 0 && (flags & IP_NOIPSEC) == 0) ipsec_delaux(m); #endif #if __APPLE__ - error = dlil_output(dl_tag, m, (void *) ro->ro_rt, + error = dlil_output(ifptodlt(ifp, PF_INET), m, (void *) ro->ro_rt, (struct sockaddr *)dst, 0); #else error = (*ifp->if_output)(ifp, m, @@ -1032,7 +1069,7 @@ sendorfree: m->m_nextpkt = 0; #if IPSEC /* clean ipsec history once it goes out of the node */ - if (ipsec_bypass == 0) + if (ipsec_bypass == 0 && (flags & IP_NOIPSEC) == 0) ipsec_delaux(m); #endif if (error == 0) { @@ -1045,7 +1082,7 @@ sendorfree: #endif #if __APPLE__ - error = dlil_output(dl_tag, m, (void *) ro->ro_rt, + error = dlil_output(ifptodlt(ifp, PF_INET), m, (void *) ro->ro_rt, (struct sockaddr *)dst, 0); #else error = (*ifp->if_output)(ifp, m, @@ -1060,7 +1097,7 @@ sendorfree: } done: #if IPSEC - if (ipsec_bypass == 0) { + if (ipsec_bypass == 0 && (flags & IP_NOIPSEC) == 0) { if (ro == &iproute && ro->ro_rt) { rtfree(ro->ro_rt); ro->ro_rt = NULL; @@ -1255,6 +1292,7 @@ ip_ctloutput(so, sopt) case IP_RECVRETOPTS: case IP_RECVDSTADDR: case IP_RECVIF: + case IP_RECVTTL: #if defined(NFAITH) && NFAITH > 0 case IP_FAITH: #endif @@ -1293,6 +1331,10 @@ ip_ctloutput(so, sopt) OPTSET(INP_RECVIF); break; + case IP_RECVTTL: + OPTSET(INP_RECVTTL); + break; + #if defined(NFAITH) && NFAITH > 0 case IP_FAITH: OPTSET(INP_FAITH); @@ -1391,6 +1433,7 @@ ip_ctloutput(so, sopt) case IP_RECVRETOPTS: case IP_RECVDSTADDR: case IP_RECVIF: + case IP_RECVTTL: case IP_PORTRANGE: #if defined(NFAITH) && NFAITH > 0 case IP_FAITH: @@ -1423,6 +1466,10 @@ ip_ctloutput(so, sopt) optval = OPTBIT(INP_RECVIF); break; + case IP_RECVTTL: + optval = OPTBIT(INP_RECVTTL); + break; + case IP_PORTRANGE: if (inp->inp_flags & INP_HIGHPORT) optval = IP_PORTRANGE_HIGH; @@ -1632,8 +1679,6 @@ ip_setmoptions(sopt, imop) struct ip_mreq mreq; struct ifnet *ifp = NULL; struct ip_moptions *imo = *imop; - struct route ro; - struct sockaddr_in *dst; int ifindex; int s; @@ -1642,18 +1687,10 @@ ip_setmoptions(sopt, imop) * No multicast option buffer attached to the pcb; * allocate one and initialize to default values. */ - imo = (struct ip_moptions*) _MALLOC(sizeof(*imo), M_IPMOPTS, - M_WAITOK); - - if (imo == NULL) - return (ENOBUFS); - *imop = imo; - imo->imo_multicast_ifp = NULL; - imo->imo_multicast_addr.s_addr = INADDR_ANY; - imo->imo_multicast_vif = -1; - imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; - imo->imo_multicast_loop = IP_DEFAULT_MULTICAST_LOOP; - imo->imo_num_memberships = 0; + error = ip_createmoptions(imop); + if (error != 0) + return error; + imo = *imop; } switch (sopt->sopt_name) { @@ -1766,78 +1803,8 @@ ip_setmoptions(sopt, imop) error = sooptcopyin(sopt, &mreq, sizeof mreq, sizeof mreq); if (error) break; - - if (!IN_MULTICAST(ntohl(mreq.imr_multiaddr.s_addr))) { - error = EINVAL; - break; - } - s = splimp(); - /* - * If no interface address was provided, use the interface of - * the route to the given multicast address. - */ - if (mreq.imr_interface.s_addr == INADDR_ANY) { - bzero((caddr_t)&ro, sizeof(ro)); - dst = (struct sockaddr_in *)&ro.ro_dst; - dst->sin_len = sizeof(*dst); - dst->sin_family = AF_INET; - dst->sin_addr = mreq.imr_multiaddr; - rtalloc(&ro); - if (ro.ro_rt != NULL) { - ifp = ro.ro_rt->rt_ifp; - rtfree(ro.ro_rt); - } - else { - /* If there's no default route, try using loopback */ - mreq.imr_interface.s_addr = INADDR_LOOPBACK; - } - } - if (ifp == NULL) { - ifp = ip_multicast_if(&mreq.imr_interface, NULL); - } - - /* - * See if we found an interface, and confirm that it - * supports multicast. - */ - if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { - error = EADDRNOTAVAIL; - splx(s); - break; - } - /* - * See if the membership already exists or if all the - * membership slots are full. - */ - for (i = 0; i < imo->imo_num_memberships; ++i) { - if (imo->imo_membership[i]->inm_ifp == ifp && - imo->imo_membership[i]->inm_addr.s_addr - == mreq.imr_multiaddr.s_addr) - break; - } - if (i < imo->imo_num_memberships) { - error = EADDRINUSE; - splx(s); - break; - } - if (i == IP_MAX_MEMBERSHIPS) { - error = ETOOMANYREFS; - splx(s); - break; - } - /* - * Everything looks good; add a new record to the multicast - * address list for the given interface. - */ - if ((imo->imo_membership[i] = - in_addmulti(&mreq.imr_multiaddr, ifp)) == NULL) { - error = ENOBUFS; - splx(s); - break; - } - ++imo->imo_num_memberships; - splx(s); + error = ip_addmembership(imo, &mreq); break; case IP_DROP_MEMBERSHIP: @@ -1848,54 +1815,8 @@ ip_setmoptions(sopt, imop) error = sooptcopyin(sopt, &mreq, sizeof mreq, sizeof mreq); if (error) break; - - if (!IN_MULTICAST(ntohl(mreq.imr_multiaddr.s_addr))) { - error = EINVAL; - break; - } - - s = splimp(); - /* - * If an interface address was specified, get a pointer - * to its ifnet structure. - */ - if (mreq.imr_interface.s_addr == INADDR_ANY) - ifp = NULL; - else { - ifp = ip_multicast_if(&mreq.imr_interface, NULL); - if (ifp == NULL) { - error = EADDRNOTAVAIL; - splx(s); - break; - } - } - /* - * Find the membership in the membership array. - */ - for (i = 0; i < imo->imo_num_memberships; ++i) { - if ((ifp == NULL || - imo->imo_membership[i]->inm_ifp == ifp) && - imo->imo_membership[i]->inm_addr.s_addr == - mreq.imr_multiaddr.s_addr) - break; - } - if (i == imo->imo_num_memberships) { - error = EADDRNOTAVAIL; - splx(s); - break; - } - /* - * Give up the multicast address record to which the - * membership points. - */ - in_delmulti(imo->imo_membership[i]); - /* - * Remove the gap in the membership array. - */ - for (++i; i < imo->imo_num_memberships; ++i) - imo->imo_membership[i-1] = imo->imo_membership[i]; - --imo->imo_num_memberships; - splx(s); + + error = ip_dropmembership(imo, &mreq); break; default: @@ -1918,6 +1839,184 @@ ip_setmoptions(sopt, imop) return (error); } +/* + * Set the IP multicast options in response to user setsockopt(). + */ +__private_extern__ int +ip_createmoptions( + struct ip_moptions **imop) +{ + struct ip_moptions *imo; + imo = (struct ip_moptions*) _MALLOC(sizeof(*imo), M_IPMOPTS, + M_WAITOK); + + if (imo == NULL) + return (ENOBUFS); + *imop = imo; + imo->imo_multicast_ifp = NULL; + imo->imo_multicast_addr.s_addr = INADDR_ANY; + imo->imo_multicast_vif = -1; + imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; + imo->imo_multicast_loop = IP_DEFAULT_MULTICAST_LOOP; + imo->imo_num_memberships = 0; + + return 0; +} + +/* + * Add membership to an IPv4 multicast. + */ +__private_extern__ int +ip_addmembership( + struct ip_moptions *imo, + struct ip_mreq *mreq) +{ + struct route ro; + struct sockaddr_in *dst; + struct ifnet *ifp = NULL; + int error = 0; + int s = 0; + int i; + + if (!IN_MULTICAST(ntohl(mreq->imr_multiaddr.s_addr))) { + error = EINVAL; + return error; + } + s = splimp(); + /* + * If no interface address was provided, use the interface of + * the route to the given multicast address. + */ + if (mreq->imr_interface.s_addr == INADDR_ANY) { + bzero((caddr_t)&ro, sizeof(ro)); + dst = (struct sockaddr_in *)&ro.ro_dst; + dst->sin_len = sizeof(*dst); + dst->sin_family = AF_INET; + dst->sin_addr = mreq->imr_multiaddr; + rtalloc(&ro); + if (ro.ro_rt != NULL) { + ifp = ro.ro_rt->rt_ifp; + rtfree(ro.ro_rt); + } + else { + /* If there's no default route, try using loopback */ + mreq->imr_interface.s_addr = INADDR_LOOPBACK; + } + } + + if (ifp == NULL) { + ifp = ip_multicast_if(&mreq->imr_interface, NULL); + } + + /* + * See if we found an interface, and confirm that it + * supports multicast. + */ + if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { + error = EADDRNOTAVAIL; + splx(s); + return error; + } + /* + * See if the membership already exists or if all the + * membership slots are full. + */ + for (i = 0; i < imo->imo_num_memberships; ++i) { + if (imo->imo_membership[i]->inm_ifp == ifp && + imo->imo_membership[i]->inm_addr.s_addr + == mreq->imr_multiaddr.s_addr) + break; + } + if (i < imo->imo_num_memberships) { + error = EADDRINUSE; + splx(s); + return error; + } + if (i == IP_MAX_MEMBERSHIPS) { + error = ETOOMANYREFS; + splx(s); + return error; + } + /* + * Everything looks good; add a new record to the multicast + * address list for the given interface. + */ + if ((imo->imo_membership[i] = + in_addmulti(&mreq->imr_multiaddr, ifp)) == NULL) { + error = ENOBUFS; + splx(s); + return error; + } + ++imo->imo_num_memberships; + splx(s); + + return error; +} + +/* + * Drop membership of an IPv4 multicast. + */ +__private_extern__ int +ip_dropmembership( + struct ip_moptions *imo, + struct ip_mreq *mreq) +{ + int error = 0; + int s = 0; + struct ifnet* ifp = NULL; + int i; + + if (!IN_MULTICAST(ntohl(mreq->imr_multiaddr.s_addr))) { + error = EINVAL; + return error; + } + + s = splimp(); + /* + * If an interface address was specified, get a pointer + * to its ifnet structure. + */ + if (mreq->imr_interface.s_addr == INADDR_ANY) + ifp = NULL; + else { + ifp = ip_multicast_if(&mreq->imr_interface, NULL); + if (ifp == NULL) { + error = EADDRNOTAVAIL; + splx(s); + return error; + } + } + /* + * Find the membership in the membership array. + */ + for (i = 0; i < imo->imo_num_memberships; ++i) { + if ((ifp == NULL || + imo->imo_membership[i]->inm_ifp == ifp) && + imo->imo_membership[i]->inm_addr.s_addr == + mreq->imr_multiaddr.s_addr) + break; + } + if (i == imo->imo_num_memberships) { + error = EADDRNOTAVAIL; + splx(s); + return error; + } + /* + * Give up the multicast address record to which the + * membership points. + */ + in_delmulti(imo->imo_membership[i]); + /* + * Remove the gap in the membership array. + */ + for (++i; i < imo->imo_num_memberships; ++i) + imo->imo_membership[i-1] = imo->imo_membership[i]; + --imo->imo_num_memberships; + splx(s); + + return error; +} + /* * Return the IP multicast options in response to user getsockopt(). */ diff --git a/bsd/netinet/ip_var.h b/bsd/netinet/ip_var.h index 635f33548..422c95c1a 100644 --- a/bsd/netinet/ip_var.h +++ b/bsd/netinet/ip_var.h @@ -172,8 +172,9 @@ struct ip_linklocal_stat { /* flags passed to ip_output as last parameter */ #define IP_FORWARDING 0x1 /* most of ip header exists */ #define IP_RAWOUTPUT 0x2 /* raw ip header exists */ -#define IP_ROUTETOIF SO_DONTROUTE /* bypass routing tables */ -#define IP_ALLOWBROADCAST SO_BROADCAST /* can send broadcast packets */ +#define IP_NOIPSEC 0x4 /* No IPSec processing */ +#define IP_ROUTETOIF SO_DONTROUTE /* bypass routing tables (0x0010) */ +#define IP_ALLOWBROADCAST SO_BROADCAST /* can send broadcast packets (0x0020) */ struct ip; struct inpcb; diff --git a/bsd/netinet/raw_ip.c b/bsd/netinet/raw_ip.c index f524dd41a..ab01abe0f 100644 --- a/bsd/netinet/raw_ip.c +++ b/bsd/netinet/raw_ip.c @@ -300,6 +300,23 @@ rip_output(m, so, dst) inp->inp_moptions)); } +int +load_ipfw() +{ + kern_return_t err; + + /* Load the kext by the identifier */ + err = kmod_load_extension("com.apple.nke.IPFirewall"); + if (err) return err; + + if (ip_fw_ctl_ptr == NULL) { + /* Wait for the kext to finish loading */ + err = tsleep(&ip_fw_ctl_ptr, PWAIT | PCATCH, "load_ipfw_kext", 5 * 60 /* 5 seconds */); + } + + return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err; +} + /* * Raw IP socket option processing. */ @@ -334,9 +351,11 @@ rip_ctloutput(so, sopt) case IP_OLD_FW_ADD: case IP_OLD_FW_GET: if (ip_fw_ctl_ptr == 0) - error = ENOPROTOOPT; - else + error = load_ipfw(); + if (ip_fw_ctl_ptr && error == 0) error = ip_fw_ctl_ptr(sopt); + else + error = ENOPROTOOPT; break; #if DUMMYNET @@ -401,9 +420,11 @@ rip_ctloutput(so, sopt) case IP_OLD_FW_ZERO: case IP_OLD_FW_RESETLOG: if (ip_fw_ctl_ptr == 0) - error = ENOPROTOOPT; - else + error = load_ipfw(); + if (ip_fw_ctl_ptr && error == 0) error = ip_fw_ctl_ptr(sopt); + else + error = ENOPROTOOPT; break; #if DUMMYNET diff --git a/bsd/netinet/tcp.h b/bsd/netinet/tcp.h index 85ea2021a..0b536e84c 100644 --- a/bsd/netinet/tcp.h +++ b/bsd/netinet/tcp.h @@ -158,5 +158,6 @@ struct tcphdr { #define TCP_MAXSEG 0x02 /* set maximum segment size */ #define TCP_NOPUSH 0x04 /* don't push last block of write */ #define TCP_NOOPT 0x08 /* don't use TCP options */ +#define TCP_KEEPALIVE 0x10 /* idle time used when SO_KEEPALIVE is enabled */ #endif diff --git a/bsd/netinet/tcp_debug.c b/bsd/netinet/tcp_debug.c index 48608fc80..914ef69c5 100644 --- a/bsd/netinet/tcp_debug.c +++ b/bsd/netinet/tcp_debug.c @@ -75,6 +75,7 @@ #include #include #include +#include #include #include @@ -91,7 +92,9 @@ #include #if TCPDEBUG -static int tcpconsdebug = 0; +__private_extern__ int tcpconsdebug = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcpconsdebug, CTLFLAG_RW, + &tcpconsdebug, 0, "Turn tcp debugging on or off"); #endif static struct tcp_debug tcp_debug[TCP_NDEBUG]; @@ -186,7 +189,7 @@ tcp_trace(act, ostate, tp, ipgen, th, req) if (tcpconsdebug == 0) return; if (tp) - printf("%p %s:", tp, tcpstates[ostate]); + printf("%x %s:", tp, tcpstates[ostate]); else printf("???????? "); printf("%s ", tanames[act]); diff --git a/bsd/netinet/tcp_input.c b/bsd/netinet/tcp_input.c index 5ecafc97c..0b54573e1 100644 --- a/bsd/netinet/tcp_input.c +++ b/bsd/netinet/tcp_input.c @@ -157,7 +157,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_lq_overflow, CTLFLAG_RW, "Listen Queue Overflow"); #if TCP_DROP_SYNFIN -static int drop_synfin = 0; +static int drop_synfin = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); #endif @@ -365,9 +365,9 @@ present: */ #if INET6 int -tcp6_input(mp, offp, proto) +tcp6_input(mp, offp) struct mbuf **mp; - int *offp, proto; + int *offp; { register struct mbuf *m = *mp; struct in6_ifaddr *ia6; @@ -800,6 +800,7 @@ findpcb: #if INET6 struct inpcb *oinp = sotoinpcb(so); #endif /* INET6 */ + int ogencnt = so->so_gencnt; #if !IPSEC /* @@ -879,6 +880,12 @@ findpcb: if (!so2) goto drop; } + /* + * Make sure listening socket did not get closed during socket allocation, + * not only this is incorrect but it is know to cause panic + */ + if (so->so_gencnt != ogencnt) + goto drop; #if IPSEC oso = so; #endif @@ -1000,7 +1007,7 @@ findpcb: */ tp->t_rcvtime = 0; if (TCPS_HAVEESTABLISHED(tp->t_state)) - tp->t_timer[TCPT_KEEP] = tcp_keepidle; + tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp); /* * Process options if not in LISTEN state, @@ -1499,7 +1506,7 @@ findpcb: thflags &= ~TH_SYN; } else { tp->t_state = TCPS_ESTABLISHED; - tp->t_timer[TCPT_KEEP] = tcp_keepidle; + tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp); } } else { /* @@ -1527,7 +1534,7 @@ findpcb: tp->t_flags &= ~TF_NEEDFIN; } else { tp->t_state = TCPS_ESTABLISHED; - tp->t_timer[TCPT_KEEP] = tcp_keepidle; + tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp); } tp->t_flags |= TF_NEEDSYN; } else @@ -1598,6 +1605,16 @@ trimthenstep6: goto drop; } break; /* continue normal processing */ + + /* Received a SYN while connection is already established. + * This is a "half open connection and other anomalies" described + * in RFC793 page 34, send an ACK so the remote reset the connection + * or recovers by adjusting its sequence numberering + */ + case TCPS_ESTABLISHED: + if (thflags & TH_SYN) + goto dropafterack; + break; } /* @@ -1918,7 +1935,7 @@ trimthenstep6: tp->t_flags &= ~TF_NEEDFIN; } else { tp->t_state = TCPS_ESTABLISHED; - tp->t_timer[TCPT_KEEP] = tcp_keepidle; + tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp); } /* * If segment contains data or ACK, will call tcp_reass() @@ -2992,21 +3009,16 @@ tcp_mss(tp, offer) (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)) mss -= TCPOLEN_CC_APPA; -#if (MCLBYTES & (MCLBYTES - 1)) == 0 - if (mss > MCLBYTES) - mss &= ~(MCLBYTES-1); -#else - if (mss > MCLBYTES) - mss = mss / MCLBYTES * MCLBYTES; -#endif /* - * If there's a pipesize, change the socket buffer - * to that size. Make the socket buffers an integral + * If there's a pipesize (ie loopback), change the socket + * buffer to that size only if it's bigger than the current + * sockbuf size. Make the socket buffers an integral * number of mss units; if the mss is larger than * the socket buffer, decrease the mss. */ #if RTV_SPIPE - if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) + bufsize = rt->rt_rmx.rmx_sendpipe; + if (bufsize < so->so_snd.sb_hiwat) #endif bufsize = so->so_snd.sb_hiwat; if (bufsize < mss) @@ -3020,7 +3032,8 @@ tcp_mss(tp, offer) tp->t_maxseg = mss; #if RTV_RPIPE - if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) + bufsize = rt->rt_rmx.rmx_recvpipe; + if (bufsize < so->so_rcv.sb_hiwat) #endif bufsize = so->so_rcv.sb_hiwat; if (bufsize > mss) { diff --git a/bsd/netinet/tcp_output.c b/bsd/netinet/tcp_output.c index 2bfc95262..2f5bab119 100644 --- a/bsd/netinet/tcp_output.c +++ b/bsd/netinet/tcp_output.c @@ -133,6 +133,8 @@ extern int ipsec_bypass; #endif extern int slowlink_wsize; /* window correction for slow links */ +extern u_long route_generation; + /* * Tcp output routine: figure out what should be sent and send it. @@ -157,35 +159,15 @@ tcp_output(tp) int maxburst = TCP_MAXBURST; struct rmxp_tao *taop; struct rmxp_tao tao_noncached; -#if INET6 - int isipv6; -#endif - int last_off; + int last_off = 0; int m_off; struct mbuf *m_last = 0; struct mbuf *m_head = 0; - - - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); #if INET6 - if (isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)) { - - KERNEL_DEBUG(DBG_LAYER_BEG, - ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport), - (((tp->t_inpcb->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | - (tp->t_inpcb->in6p_faddr.s6_addr16[0] & 0xffff)), - 0,0,0); - } - else + int isipv6 = tp->t_inpcb->inp_vflag & INP_IPV6 ; #endif - { - KERNEL_DEBUG(DBG_LAYER_BEG, - ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport), - (((tp->t_inpcb->inp_laddr.s_addr & 0xffff) << 16) | - (tp->t_inpcb->inp_faddr.s_addr & 0xffff)), - 0,0,0); - } + /* * Determine length of data that should be transmitted, * and flags that will be used. @@ -220,7 +202,68 @@ tcp_output(tp) else tp->snd_cwnd = tp->t_maxseg * ss_fltsz; } + again: + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + +#if INET6 + if (isipv6) { + + KERNEL_DEBUG(DBG_LAYER_BEG, + ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport), + (((tp->t_inpcb->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | + (tp->t_inpcb->in6p_faddr.s6_addr16[0] & 0xffff)), + sendalot,0,0); + } + else +#endif + + { + KERNEL_DEBUG(DBG_LAYER_BEG, + ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport), + (((tp->t_inpcb->inp_laddr.s_addr & 0xffff) << 16) | + (tp->t_inpcb->inp_faddr.s_addr & 0xffff)), + sendalot,0,0); + /* + * If the route generation id changed, we need to check that our + * local (source) IP address is still valid. If it isn't either + * return error or silently do nothing (assuming the address will + * come back before the TCP connection times out). + */ + + if (tp->t_inpcb->inp_route.ro_rt != NULL && + (tp->t_inpcb->inp_route.ro_rt->generation_id != route_generation)) { + /* check that the source address is still valid */ + if (ifa_foraddr(tp->t_inpcb->inp_laddr.s_addr) == NULL) { + if (tp->t_state >= TCPS_CLOSE_WAIT) { + tcp_close(tp); + return(EADDRNOTAVAIL); + } + + /* set Retransmit timer if it wasn't set + * reset Persist timer and shift register as the + * adversed peer window may not be valid anymore + */ + + if (!tp->t_timer[TCPT_REXMT]) { + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + if (tp->t_timer[TCPT_PERSIST]) { + tp->t_timer[TCPT_PERSIST] = 0; + tp->t_rxtshift = 0; + } + } + + if (so->so_flags & SOF_NOADDRAVAIL) + return(EADDRNOTAVAIL); + else + return(0); /* silently ignore and keep data in socket */ + } + else { /* Clear the cached route, will be reacquired later */ + rtfree(tp->t_inpcb->inp_route.ro_rt); + tp->t_inpcb->inp_route.ro_rt = (struct rtentry *)0; + } + } + } sendalot = 0; off = tp->snd_nxt - tp->snd_una; win = min(tp->snd_wnd, tp->snd_cwnd); @@ -678,6 +721,12 @@ send: m->m_data += max_linkhdr; m->m_len = hdrlen; } + /* makes sure we still have data left to be sent at this point */ + if (so->so_snd.sb_mb == NULL || off == -1) { + if (m != NULL) m_freem(m); + error = 0; /* should we return an error? */ + goto out; + } m_copydata(so->so_snd.sb_mb, off, (int) len, mtod(m, caddr_t) + hdrlen); m->m_len += len; @@ -704,7 +753,13 @@ send: m_last = NULL; last_off = off + len; m_head = so->so_snd.sb_mb; - + + /* makes sure we still have data left to be sent at this point */ + if (m_head == NULL) { + error = 0; /* should we return an error? */ + goto out; + } + /* * m_copym_with_hdrs will always return the last mbuf pointer and the offset into it that * it acted on to fullfill the current request, whether a valid 'hint' was passed in or not @@ -956,7 +1011,7 @@ send: struct rtentry *rt; ip->ip_len = m->m_pkthdr.len; #if INET6 - if (INP_CHECK_SOCKAF(so, AF_INET6)) + if (isipv6) ip->ip_ttl = in6_selecthlim(tp->t_inpcb, tp->t_inpcb->in6p_route.ro_rt ? tp->t_inpcb->in6p_route.ro_rt->rt_ifp @@ -1060,9 +1115,10 @@ out: tp->rcv_adv = tp->rcv_nxt + win; tp->last_ack_sent = tp->rcv_nxt; tp->t_flags &= ~(TF_ACKNOW|TF_DELACK); + + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); if (sendalot) goto again; - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); return (0); } diff --git a/bsd/netinet/tcp_subr.c b/bsd/netinet/tcp_subr.c index cee36d93b..0ce5d476f 100644 --- a/bsd/netinet/tcp_subr.c +++ b/bsd/netinet/tcp_subr.c @@ -214,6 +214,7 @@ static struct tcpcb dummy_tcb; extern struct inpcbhead time_wait_slots[]; extern int cur_tw_slot; extern u_long *delack_bitmask; +extern u_long route_generation; int get_inpcb_str_size() @@ -702,6 +703,14 @@ tcp_close(tp) callout_stop(tp->tt_keep); callout_stop(tp->tt_2msl); callout_stop(tp->tt_delack); +#else + /* Clear the timers before we delete the PCB. */ + { + int i; + for (i = 0; i < TCPT_NTIMERS; i++) { + tp->t_timer[i] = 0; + } + } #endif KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0); @@ -740,11 +749,16 @@ tcp_close(tp) goto no_valid_rt; } else -#endif /* INET6 */ - if ((rt = inp->inp_route.ro_rt) == NULL || +#endif /* INET6 */ + rt = inp->inp_route.ro_rt; + if (rt == NULL || ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr - == INADDR_ANY) + == INADDR_ANY || rt->generation_id != route_generation) { + if (tp->t_state >= TCPS_CLOSE_WAIT) + tp->t_state = TCPS_CLOSING; + goto no_valid_rt; + } if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { i = tp->t_srtt * @@ -915,7 +929,12 @@ tcp_notify(inp, error) struct inpcb *inp; int error; { - struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; + struct tcpcb *tp; + + if (inp == NULL) + return; /* pcb is gone already */ + + tp = (struct tcpcb *)inp->inp_ppcb; /* * Ignore some errors if we are hooked up. @@ -1453,13 +1472,7 @@ tcp_mtudisc(inp, errno) if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) mss -= TCPOLEN_CC_APPA; -#if (MCLBYTES & (MCLBYTES - 1)) == 0 - if (mss > MCLBYTES) - mss &= ~(MCLBYTES-1); -#else - if (mss > MCLBYTES) - mss = mss / MCLBYTES * MCLBYTES; -#endif + if (so->so_snd.sb_hiwat < mss) mss = so->so_snd.sb_hiwat; @@ -1489,7 +1502,7 @@ tcp_rtlookup(inp) if (ro == NULL) return (NULL); rt = ro->ro_rt; - if (rt == NULL || !(rt->rt_flags & RTF_UP)) { + if (rt == NULL || !(rt->rt_flags & RTF_UP) || rt->generation_id != route_generation) { /* No route yet, so try to acquire one */ if (inp->inp_faddr.s_addr != INADDR_ANY) { ro->ro_dst.sa_family = AF_INET; diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c index 3d92a0d82..3d340b369 100644 --- a/bsd/netinet/tcp_timer.c +++ b/bsd/netinet/tcp_timer.c @@ -93,6 +93,13 @@ #define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8)) #define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1) +/* + * NOTE - WARNING + * + * + * + * + */ static int sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS { @@ -360,6 +367,10 @@ tcp_timers(tp, timer) struct socket *so_tmp; struct tcptemp *t_template; +#if TCPDEBUG + int ostate; +#endif + #if INET6 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0; #endif /* INET6 */ @@ -537,7 +548,7 @@ tcp_timers(tp, timer) if ((always_keepalive || tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) && tp->t_state <= TCPS_CLOSING) { - if (tp->t_rcvtime >= tcp_keepidle + tcp_maxidle) + if (tp->t_rcvtime >= TCP_KEEPIDLE(tp) + tcp_maxidle) goto dropit; /* * Send a packet designed to force a response @@ -561,7 +572,7 @@ tcp_timers(tp, timer) } tp->t_timer[TCPT_KEEP] = tcp_keepintvl; } else - tp->t_timer[TCPT_KEEP] = tcp_keepidle; + tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp); break; #if TCPDEBUG diff --git a/bsd/netinet/tcp_timer.h b/bsd/netinet/tcp_timer.h index dea6ce3c0..30f028838 100644 --- a/bsd/netinet/tcp_timer.h +++ b/bsd/netinet/tcp_timer.h @@ -155,6 +155,11 @@ static char *tcptimers[] = } while(0) #ifdef KERNEL + +#define TCP_KEEPIDLE(tp) \ + (tp->t_keepidle && (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ? \ + tp->t_keepidle : tcp_keepidle) + extern int tcp_keepinit; /* time to establish connection */ extern int tcp_keepidle; /* time before keepalive probes begin */ extern int tcp_keepintvl; /* time between keepalive probes */ diff --git a/bsd/netinet/tcp_usrreq.c b/bsd/netinet/tcp_usrreq.c index cc9202e54..e14fc8ca8 100644 --- a/bsd/netinet/tcp_usrreq.c +++ b/bsd/netinet/tcp_usrreq.c @@ -262,7 +262,7 @@ tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) } inp->inp_vflag &= ~INP_IPV4; inp->inp_vflag |= INP_IPV6; - if (ip6_mapped_addr_on && (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { + if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { if (IN6_IS_ADDR_UNSPECIFIED(&sin6p->sin6_addr)) inp->inp_vflag |= INP_IPV4; else if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { @@ -313,8 +313,7 @@ tcp6_usr_listen(struct socket *so, struct proc *p) COMMON_START(); if (inp->inp_lport == 0) { inp->inp_vflag &= ~INP_IPV4; - if (ip6_mapped_addr_on && - (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) + if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) inp->inp_vflag |= INP_IPV4; error = in6_pcbbind(inp, (struct sockaddr *)0, p); } @@ -387,9 +386,8 @@ tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { struct sockaddr_in sin; - if (!ip6_mapped_addr_on || - (inp->inp_flags & IN6P_IPV6_V6ONLY)) - return(EINVAL); + if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) + return (EINVAL); in6_sin6_2_sin(&sin, sin6p); inp->inp_vflag |= INP_IPV4; @@ -993,6 +991,17 @@ tcp_ctloutput(so, sopt) error = EINVAL; break; + case TCP_KEEPALIVE: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + if (optval < 0) + error = EINVAL; + else + tp->t_keepidle = optval * PR_SLOWHZ; + break; + default: error = ENOPROTOOPT; break; @@ -1007,6 +1016,9 @@ tcp_ctloutput(so, sopt) case TCP_MAXSEG: optval = tp->t_maxseg; break; + case TCP_KEEPALIVE: + optval = tp->t_keepidle / PR_SLOWHZ; + break; case TCP_NOOPT: optval = tp->t_flags & TF_NOOPT; break; @@ -1037,6 +1049,11 @@ u_long tcp_recvspace = 1024*16; SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW, &tcp_recvspace , 0, "Maximum incoming TCP datagram size"); +__private_extern__ int tcp_sockthreshold = 256; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, sockthreshold, CTLFLAG_RW, + &tcp_sockthreshold , 0, "TCP Socket size increased if less than threshold"); + +#define TCP_INCREASED_SPACE 65535 /* Automatically increase tcp send/rcv space to this value */ /* * Attach TCP protocol to socket, allocating * internet protocol control block, tcp control block, @@ -1054,15 +1071,28 @@ tcp_attach(so, p) int isipv6 = INP_CHECK_SOCKAF(so, AF_INET6) != NULL; #endif - if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { - error = soreserve(so, tcp_sendspace, tcp_recvspace); - if (error) - return (error); - } error = in_pcballoc(so, &tcbinfo, p); if (error) return (error); + inp = sotoinpcb(so); + + if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { + /* + * The goal is to let clients have large send/rcv default windows (TCP_INCREASED_SPACE) + * while not hogging mbuf space for servers. This is done by watching a threshold + * of tcpcbs in use and bumping the default send and rcvspace only if under that threshold. + * The theory being that busy servers have a lot more active tcpcbs and don't want the potential + * memory penalty of having much larger sockbuffs. The sysctl allows to fine tune that threshold value. */ + + if (inp->inp_pcbinfo->ipi_count < tcp_sockthreshold) + error = soreserve(so, MAX(TCP_INCREASED_SPACE, tcp_sendspace), MAX(TCP_INCREASED_SPACE,tcp_recvspace)); + else + error = soreserve(so, tcp_sendspace, tcp_recvspace); + if (error) + return (error); + } + #if INET6 if (isipv6) { inp->inp_vflag |= INP_IPV6; diff --git a/bsd/netinet/tcp_var.h b/bsd/netinet/tcp_var.h index d48a74f15..afd802f61 100644 --- a/bsd/netinet/tcp_var.h +++ b/bsd/netinet/tcp_var.h @@ -101,6 +101,7 @@ delack_bitmask[((hash_elem) >> 5)] |= 1 << ((hash_elem) & 0x1F) * Tcp control block, one per tcp; fields: * Organized for 16 byte cacheline efficiency. */ +#if KERNEL struct tcpcb { struct tsegqe_head t_segq; int t_dupacks; /* consecutive dup acks recd */ @@ -197,8 +198,119 @@ struct tcpcb { u_long snd_cwnd_prev; /* cwnd prior to retransmit */ u_long snd_ssthresh_prev; /* ssthresh prior to retransmit */ u_long t_badrxtwin; /* window for retransmit recovery */ + + int t_keepidle; /* keepalive idle timer (override global if > 0) */ +}; +#else + +#define tcpcb otcpcb + +#endif + + +/* + * Jaguar compatible TCP control block, for xtcpcb + * Does not have the old fields + */ +struct otcpcb { + struct tsegqe_head t_segq; + int t_dupacks; /* consecutive dup acks recd */ + struct tcptemp *unused; /* unused now: was t_template */ + + int t_timer[TCPT_NTIMERS]; /* tcp timers */ + + struct inpcb *t_inpcb; /* back pointer to internet pcb */ + int t_state; /* state of this connection */ + u_int t_flags; +#define TF_ACKNOW 0x00001 /* ack peer immediately */ +#define TF_DELACK 0x00002 /* ack, but try to delay it */ +#define TF_NODELAY 0x00004 /* don't delay packets to coalesce */ +#define TF_NOOPT 0x00008 /* don't use tcp options */ +#define TF_SENTFIN 0x00010 /* have sent FIN */ +#define TF_REQ_SCALE 0x00020 /* have/will request window scaling */ +#define TF_RCVD_SCALE 0x00040 /* other side has requested scaling */ +#define TF_REQ_TSTMP 0x00080 /* have/will request timestamps */ +#define TF_RCVD_TSTMP 0x00100 /* a timestamp was received in SYN */ +#define TF_SACK_PERMIT 0x00200 /* other side said I could SACK */ +#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) */ +#define TF_NEEDFIN 0x00800 /* send FIN (implicit state) */ +#define TF_NOPUSH 0x01000 /* don't push */ +#define TF_REQ_CC 0x02000 /* have/will request CC */ +#define TF_RCVD_CC 0x04000 /* a CC was received in SYN */ +#define TF_SENDCCNEW 0x08000 /* send CCnew instead of CC in SYN */ +#define TF_MORETOCOME 0x10000 /* More data to be appended to sock */ +#define TF_LQ_OVERFLOW 0x20000 /* listen queue overflow */ +#define TF_RXWIN0SENT 0x40000 /* sent a receiver win 0 in response */ +#define TF_SLOWLINK 0x80000 /* route is a on a modem speed link */ + + int t_force; /* 1 if forcing out a byte */ + + tcp_seq snd_una; /* send unacknowledged */ + tcp_seq snd_max; /* highest sequence number sent; + * used to recognize retransmits + */ + tcp_seq snd_nxt; /* send next */ + tcp_seq snd_up; /* send urgent pointer */ + + tcp_seq snd_wl1; /* window update seg seq number */ + tcp_seq snd_wl2; /* window update seg ack number */ + tcp_seq iss; /* initial send sequence number */ + tcp_seq irs; /* initial receive sequence number */ + + tcp_seq rcv_nxt; /* receive next */ + tcp_seq rcv_adv; /* advertised window */ + u_long rcv_wnd; /* receive window */ + tcp_seq rcv_up; /* receive urgent pointer */ + + u_long snd_wnd; /* send window */ + u_long snd_cwnd; /* congestion-controlled window */ + u_long snd_ssthresh; /* snd_cwnd size threshold for + * for slow start exponential to + * linear switch + */ + u_int t_maxopd; /* mss plus options */ + + u_long t_rcvtime; /* inactivity time */ + u_long t_starttime; /* time connection was established */ + int t_rtttime; /* round trip time */ + tcp_seq t_rtseq; /* sequence number being timed */ + + int t_rxtcur; /* current retransmit value (ticks) */ + u_int t_maxseg; /* maximum segment size */ + int t_srtt; /* smoothed round-trip time */ + int t_rttvar; /* variance in round-trip time */ + + int t_rxtshift; /* log(2) of rexmt exp. backoff */ + u_int t_rttmin; /* minimum rtt allowed */ + u_long t_rttupdated; /* number of times rtt sampled */ + u_long max_sndwnd; /* largest window peer has offered */ + + int t_softerror; /* possible error not yet reported */ +/* out-of-band data */ + char t_oobflags; /* have some */ + char t_iobc; /* input character */ +#define TCPOOB_HAVEDATA 0x01 +#define TCPOOB_HADDATA 0x02 +/* RFC 1323 variables */ + u_char snd_scale; /* window scaling for send window */ + u_char rcv_scale; /* window scaling for recv window */ + u_char request_r_scale; /* pending window scaling */ + u_char requested_s_scale; + u_long ts_recent; /* timestamp echo data */ + + u_long ts_recent_age; /* when last updated */ + tcp_seq last_ack_sent; +/* RFC 1644 variables */ + tcp_cc cc_send; /* send connection count */ + tcp_cc cc_recv; /* receive connection count */ + tcp_seq snd_recover; /* for use in fast recovery */ +/* experimental */ + u_long snd_cwnd_prev; /* cwnd prior to retransmit */ + u_long snd_ssthresh_prev; /* ssthresh prior to retransmit */ + u_long t_badrxtwin; /* window for retransmit recovery */ }; + /* * Structure to hold TCP options that are only used during segment * processing (in tcp_input), but not held in the tcpcb. @@ -356,7 +468,11 @@ struct tcpstat { struct xtcpcb { size_t xt_len; struct inpcb xt_inp; - struct tcpcb xt_tp; +#if KERNEL + struct otcpcb xt_tp; +#else + struct tcpcb xt_tp; +#endif struct xsocket xt_socket; u_quad_t xt_alignment_hack; }; diff --git a/bsd/netinet/udp_usrreq.c b/bsd/netinet/udp_usrreq.c index 107b135ee..9aafb73fb 100644 --- a/bsd/netinet/udp_usrreq.c +++ b/bsd/netinet/udp_usrreq.c @@ -104,8 +104,6 @@ extern int ipsec_bypass; #define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8)) #define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1) - -#define __STDC__ 1 /* * UDP protocol implementation. * Per RFC 768, August, 1980. @@ -135,6 +133,8 @@ struct inpcbinfo udbinfo; #endif extern int apple_hwcksum_rx; +extern int esp_udp_encap_port; +extern u_long route_generation; struct udpstat udpstat; /* from udp_var.h */ SYSCTL_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RD, @@ -429,6 +429,53 @@ doudpcksum: udp_append(last, ip, m, iphlen + sizeof(struct udphdr)); return; } + + /* + * UDP to port 4500 with a payload where the first four bytes are + * not zero is a UDP encapsulated IPSec packet. Packets where + * the payload is one byte and that byte is 0xFF are NAT keepalive + * packets. Decapsulate the ESP packet and carry on with IPSec input + * or discard the NAT keep-alive. + */ + if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 && + uh->uh_dport == ntohs((u_short)esp_udp_encap_port)) { + int payload_len = len - sizeof(struct udphdr) > 4 ? 4 : len - sizeof(struct udphdr); + if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) { + if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) + payload_len)) == 0) { + udpstat.udps_hdrops++; + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); + return; + } + ip = mtod(m, struct ip *); + uh = (struct udphdr *)((caddr_t)ip + iphlen); + } + /* Check for NAT keepalive packet */ + if (payload_len == 1 && *(u_int8_t*)((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) { + m_freem(m); + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); + return; + } + else if (payload_len == 4 && *(u_int32_t*)((caddr_t)uh + sizeof(struct udphdr)) != 0) { + /* UDP encapsulated IPSec packet to pass through NAT */ + size_t stripsiz; + + stripsiz = sizeof(struct udphdr); + + ip = mtod(m, struct ip *); + ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen); + m->m_data += stripsiz; + m->m_len -= stripsiz; + m->m_pkthdr.len -= stripsiz; + ip = mtod(m, struct ip *); + ip->ip_len = ip->ip_len - stripsiz; + ip->ip_p = IPPROTO_ESP; + + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); + esp4_input(m, iphlen); + return; + } + } + /* * Locate pcb for datagram. */ @@ -757,6 +804,24 @@ udp_output(inp, m, addr, control, p) goto release; } + /* If there was a routing change, discard cached route and check + * that we have a valid source address. + * Reacquire a new source address if INADDR_ANY was specified + */ + + if (inp->inp_route.ro_rt && inp->inp_route.ro_rt->generation_id != route_generation) { + if (ifa_foraddr(inp->inp_laddr.s_addr) == NULL) { /* src address is gone */ + if (inp->inp_flags & INP_INADDR_ANY) + inp->inp_faddr.s_addr = INADDR_ANY; /* new src will be set later */ + else { + error = EADDRNOTAVAIL; + goto release; + } + } + rtfree(inp->inp_route.ro_rt); + inp->inp_route.ro_rt = (struct rtentry *)0; + } + if (addr) { laddr = inp->inp_laddr; if (inp->inp_faddr.s_addr != INADDR_ANY) { @@ -778,6 +843,8 @@ udp_output(inp, m, addr, control, p) goto release; } } + + /* * Calculate data length and get a mbuf * for UDP and IP headers. @@ -785,9 +852,7 @@ udp_output(inp, m, addr, control, p) M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT); if (m == 0) { error = ENOBUFS; - if (addr) - splx(s); - goto release; + goto abort; } /* @@ -825,7 +890,7 @@ udp_output(inp, m, addr, control, p) #if IPSEC if (ipsec_bypass == 0 && ipsec_setsocket(m, inp->inp_socket) != 0) { error = ENOBUFS; - goto release; + goto abort; } #endif /*IPSEC*/ error = ip_output(m, inp->inp_options, &inp->inp_route, @@ -840,6 +905,13 @@ udp_output(inp, m, addr, control, p) KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0); return (error); +abort: + if (addr) { + in_pcbdisconnect(inp); + inp->inp_laddr = laddr; /* XXX rehash? */ + splx(s); + } + release: m_freem(m); KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0); diff --git a/bsd/netinet6/Makefile b/bsd/netinet6/Makefile index 8de432c3c..37a4991a7 100644 --- a/bsd/netinet6/Makefile +++ b/bsd/netinet6/Makefile @@ -27,7 +27,7 @@ DATAFILES = \ esp.h in6.h in6_prefix.h \ ipcomp.h mld6_var.h raw_ip6.h esp6.h \ in6_gif.h in6_var.h ip6_mroute.h ipcomp6.h \ - nd6.h scope6_var.h + nd6.h scope6_var.h ip6_fw.h diff --git a/bsd/netinet6/ah6.h b/bsd/netinet6/ah6.h index 02edd317f..8ac8dd613 100644 --- a/bsd/netinet6/ah6.h +++ b/bsd/netinet6/ah6.h @@ -42,7 +42,7 @@ #ifdef __APPLE_API_PRIVATE struct secasvar; -extern int ah6_input __P((struct mbuf **, int *, int)); +extern int ah6_input __P((struct mbuf **, int *)); extern int ah6_output __P((struct mbuf *, u_char *, struct mbuf *, struct ipsecrequest *)); extern int ah6_calccksum __P((struct mbuf *, caddr_t, size_t, diff --git a/bsd/netinet6/ah_input.c b/bsd/netinet6/ah_input.c index 7f72ff119..e055cd53b 100644 --- a/bsd/netinet6/ah_input.c +++ b/bsd/netinet6/ah_input.c @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/netinet6/ah_input.c,v 1.1.2.4 2001/07/03 11:01:49 ume Exp $ */ -/* $KAME: ah_input.c,v 1.59 2001/05/16 04:01:27 jinmei Exp $ */ +/* $FreeBSD: src/sys/netinet6/ah_input.c,v 1.1.2.6 2002/04/28 05:40:26 suz Exp $ */ +/* $KAME: ah_input.c,v 1.67 2002/01/07 11:39:56 kjc Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. @@ -417,14 +417,6 @@ ah4_input(struct mbuf *m, int off) goto fail; } -#if 0 /* XXX should we call ipfw rather than ipsec_in_reject? */ - /* drop it if it does not match the default policy */ - if (ipsec4_in_reject(m, NULL)) { - ipsecstat.in_polvio++; - goto fail; - } -#endif - #if 1 /* * Should the inner packet be considered authentic? @@ -505,9 +497,9 @@ ah4_input(struct mbuf *m, int off) goto fail; } m_adj(n, stripsiz); - m_cat(m, n); /* m_cat does not update m_pkthdr.len */ m->m_pkthdr.len += n->m_pkthdr.len; + m_cat(m, n); } #endif @@ -567,9 +559,9 @@ fail: #if INET6 int -ah6_input(mp, offp, proto) +ah6_input(mp, offp) struct mbuf **mp; - int *offp, proto; + int *offp; { struct mbuf *m = *mp; int off = *offp; @@ -842,14 +834,6 @@ ah6_input(mp, offp, proto) goto fail; } -#if 0 /* XXX should we call ipfw rather than ipsec_in_reject? */ - /* drop it if it does not match the default policy */ - if (ipsec6_in_reject(m, NULL)) { - ipsec6stat.in_polvio++; - goto fail; - } -#endif - #if 1 /* * should the inner packet be considered authentic? @@ -874,7 +858,7 @@ ah6_input(mp, offp, proto) } IF_ENQUEUE(&ip6intrq, m); m = NULL; - schednetisr(NETISR_IPV6); /*can be skipped but to make sure*/ + schednetisr(NETISR_IPV6); /* can be skipped but to make sure */ splx(s); nxt = IPPROTO_DONE; } else { @@ -924,9 +908,9 @@ ah6_input(mp, offp, proto) goto fail; } m_adj(n, stripsiz); - m_cat(m, n); /* m_cat does not update m_pkthdr.len */ m->m_pkthdr.len += n->m_pkthdr.len; + m_cat(m, n); } #endif ip6 = mtod(m, struct ip6_hdr *); @@ -975,7 +959,7 @@ ah6_ctlinput(cmd, sa, d) struct mbuf *m; struct ip6ctlparam *ip6cp = NULL; int off; - struct sockaddr_in6 sa6_src, sa6_dst; + struct sockaddr_in6 *sa6_src, *sa6_dst; if (sa->sa_family != AF_INET6 || sa->sa_len != sizeof(struct sockaddr_in6)) @@ -1021,9 +1005,11 @@ ah6_ctlinput(cmd, sa, d) * Check to see if we have a valid SA corresponding to * the address in the ICMP message payload. */ + sa6_src = ip6cp->ip6c_src; + sa6_dst = (struct sockaddr_in6 *)sa; sav = key_allocsa(AF_INET6, - (caddr_t)&sa6_src.sin6_addr, - (caddr_t)&sa6_dst.sin6_addr, + (caddr_t)&sa6_src->sin6_addr, + (caddr_t)&sa6_dst->sin6_addr, IPPROTO_AH, ahp->ah_spi); if (sav) { if (sav->state == SADB_SASTATE_MATURE || diff --git a/bsd/netinet6/dest6.c b/bsd/netinet6/dest6.c index c5b713b14..8127ebeae 100644 --- a/bsd/netinet6/dest6.c +++ b/bsd/netinet6/dest6.c @@ -54,9 +54,9 @@ * Destination options header processing. */ int -dest6_input(mp, offp, proto) +dest6_input(mp, offp) struct mbuf **mp; - int *offp, proto; + int *offp; { struct mbuf *m = *mp; int off = *offp, dstoptlen, optlen; diff --git a/bsd/netinet6/esp6.h b/bsd/netinet6/esp6.h index 06dc625a9..74b5acc91 100644 --- a/bsd/netinet6/esp6.h +++ b/bsd/netinet6/esp6.h @@ -42,7 +42,7 @@ #ifdef __APPLE_API_PRIVATE extern int esp6_output __P((struct mbuf *, u_char *, struct mbuf *, struct ipsecrequest *)); -extern int esp6_input __P((struct mbuf **, int *, int)); +extern int esp6_input __P((struct mbuf **, int *)); extern void esp6_ctlinput __P((int, struct sockaddr *, void *)); #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/netinet6/esp_core.c b/bsd/netinet6/esp_core.c index 0cdede849..7b8b124c6 100644 --- a/bsd/netinet6/esp_core.c +++ b/bsd/netinet6/esp_core.c @@ -1,4 +1,4 @@ -/* $FreeBSD: src/sys/netinet6/esp_core.c,v 1.1.2.2 2001/07/03 11:01:49 ume Exp $ */ +/* $FreeBSD: src/sys/netinet6/esp_core.c,v 1.1.2.4 2002/03/26 10:12:29 ume Exp $ */ /* $KAME: esp_core.c,v 1.50 2000/11/02 12:27:38 itojun Exp $ */ /* @@ -77,6 +77,11 @@ #include +#include +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) +#define DBG_FNC_ESPAUTH NETDBG_CODE(DBG_NETIPSEC, (8 << 8)) + static int esp_null_mature __P((struct secasvar *)); static int esp_null_decrypt __P((struct mbuf *, size_t, struct secasvar *, const struct esp_algorithm *, int)); @@ -219,6 +224,8 @@ esp_schedule(algo, sav) sav->schedlen = (*algo->schedlen)(algo); if (sav->schedlen < 0) return EINVAL; + +//#### that malloc should be replaced by a saved buffer... sav->sched = _MALLOC(sav->schedlen, M_SECA, M_DONTWAIT); if (!sav->sched) { sav->schedlen = 0; @@ -229,6 +236,7 @@ esp_schedule(algo, sav) if (error) { ipseclog((LOG_ERR, "esp_schedule %s: error %d\n", algo->name, error)); + bzero(sav->sched, sav->schedlen); FREE(sav->sched, M_SECA); sav->sched = NULL; sav->schedlen = 0; @@ -470,13 +478,13 @@ esp_blowfish_blockdecrypt(algo, sav, s, d) u_int8_t *s; u_int8_t *d; { - /* HOLY COW! BF_encrypt() takes values in host byteorder */ + /* HOLY COW! BF_decrypt() takes values in host byteorder */ BF_LONG t[2]; bcopy(s, t, sizeof(t)); t[0] = ntohl(t[0]); t[1] = ntohl(t[1]); - BF_encrypt(t, (BF_KEY *)sav->sched, BF_DECRYPT); + BF_decrypt(t, (BF_KEY *)sav->sched); t[0] = htonl(t[0]); t[1] = htonl(t[1]); bcopy(t, d, sizeof(t)); @@ -496,7 +504,7 @@ esp_blowfish_blockencrypt(algo, sav, s, d) bcopy(s, t, sizeof(t)); t[0] = ntohl(t[0]); t[1] = ntohl(t[1]); - BF_encrypt(t, (BF_KEY *)sav->sched, BF_ENCRYPT); + BF_encrypt(t, (BF_KEY *)sav->sched); t[0] = htonl(t[0]); t[1] = htonl(t[1]); bcopy(t, d, sizeof(t)); @@ -592,9 +600,8 @@ esp_3des_blockdecrypt(algo, sav, s, d) /* assumption: d has a good alignment */ p = (des_key_schedule *)sav->sched; bcopy(s, d, sizeof(DES_LONG) * 2); - des_ecb_encrypt((des_cblock *)d, (des_cblock *)d, p[2], DES_DECRYPT); - des_ecb_encrypt((des_cblock *)d, (des_cblock *)d, p[1], DES_ENCRYPT); - des_ecb_encrypt((des_cblock *)d, (des_cblock *)d, p[0], DES_DECRYPT); + des_ecb3_encrypt((des_cblock *)d, (des_cblock *)d, + p[0], p[1], p[2], DES_DECRYPT); return 0; } @@ -610,9 +617,8 @@ esp_3des_blockencrypt(algo, sav, s, d) /* assumption: d has a good alignment */ p = (des_key_schedule *)sav->sched; bcopy(s, d, sizeof(DES_LONG) * 2); - des_ecb_encrypt((des_cblock *)d, (des_cblock *)d, p[0], DES_ENCRYPT); - des_ecb_encrypt((des_cblock *)d, (des_cblock *)d, p[1], DES_DECRYPT); - des_ecb_encrypt((des_cblock *)d, (des_cblock *)d, p[2], DES_ENCRYPT); + des_ecb3_encrypt((des_cblock *)d, (des_cblock *)d, + p[0], p[1], p[2], DES_ENCRYPT); return 0; } @@ -637,8 +643,8 @@ esp_cbc_decrypt(m, off, sav, algo, ivlen) { struct mbuf *s; struct mbuf *d, *d0, *dp; - int soff, doff; /*offset from the head of chain, to head of this mbuf */ - int sn, dn; /*offset from the head of the mbuf, to meat */ + int soff, doff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t iv[MAXIVLEN], *ivp; u_int8_t sbuf[MAXIVLEN], *sp; @@ -841,8 +847,8 @@ esp_cbc_encrypt(m, off, plen, sav, algo, ivlen) { struct mbuf *s; struct mbuf *d, *d0, *dp; - int soff, doff; /*offset from the head of chain, to head of this mbuf */ - int sn, dn; /*offset from the head of the mbuf, to meat */ + int soff, doff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t iv[MAXIVLEN], *ivp; u_int8_t sbuf[MAXIVLEN], *sp; @@ -1067,16 +1073,20 @@ esp_auth(m0, skip, length, sav, sum) "esp_auth: mbuf length < skip + length\n")); return EINVAL; } + + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_START, skip,length,0,0,0); /* * length of esp part (excluding authentication data) must be 4n, * since nexthdr must be at offset 4n+3. */ if (length % 4) { ipseclog((LOG_ERR, "esp_auth: length is not multiple of 4\n")); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 1,0,0,0,0); return EINVAL; } if (!sav) { ipseclog((LOG_DEBUG, "esp_auth: NULL SA passed\n")); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 2,0,0,0,0); return EINVAL; } algo = ah_algorithm_lookup(sav->alg_auth); @@ -1084,6 +1094,7 @@ esp_auth(m0, skip, length, sav, sum) ipseclog((LOG_ERR, "esp_auth: bad ESP auth algorithm passed: %d\n", sav->alg_auth)); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 3,0,0,0,0); return EINVAL; } @@ -1095,6 +1106,7 @@ esp_auth(m0, skip, length, sav, sum) ipseclog((LOG_DEBUG, "esp_auth: AH_MAXSUMSIZE is too small: siz=%lu\n", (u_long)siz)); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 4,0,0,0,0); return EINVAL; } @@ -1113,8 +1125,10 @@ esp_auth(m0, skip, length, sav, sum) } error = (*algo->init)(&s, sav); - if (error) + if (error) { + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 5,0,0,0,0); return error; + } while (0 < length) { if (!m) @@ -1134,5 +1148,6 @@ esp_auth(m0, skip, length, sav, sum) (*algo->result)(&s, sumbuf); bcopy(sumbuf, sum, siz); /*XXX*/ + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 6,0,0,0,0); return 0; } diff --git a/bsd/netinet6/esp_input.c b/bsd/netinet6/esp_input.c index 549b37d41..463a4182d 100644 --- a/bsd/netinet6/esp_input.c +++ b/bsd/netinet6/esp_input.c @@ -89,6 +89,11 @@ #include +#include +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) +#define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8)) +#define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8)) #define IPLEN_FLIPPED #if INET @@ -116,6 +121,7 @@ esp4_input(m, off) size_t esplen; int s; + KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0); /* sanity check for alignment. */ if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) { ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem " @@ -308,14 +314,17 @@ noreplaycheck: */ if (!algo->decrypt) panic("internal error: no decrypt function"); + KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0); if ((*algo->decrypt)(m, off, sav, algo, ivlen)) { /* m is already freed */ m = NULL; ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n", ipsec_logsastr(sav))); ipsecstat.in_inval++; + KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0); goto bad; } + KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0); ipsecstat.in_esphist[sav->alg_enc]++; m->m_flags |= M_DECRYPTED; @@ -378,20 +387,15 @@ noreplaycheck: goto bad; } -#if 0 /* XXX should call ipfw rather than ipsec_in_reject, shouldn't it ? */ - /* drop it if it does not match the default policy */ - if (ipsec4_in_reject(m, NULL)) { - ipsecstat.in_polvio++; - goto bad; - } -#endif - key_sa_recordxfer(sav, m); if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 || ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) { ipsecstat.in_nomem++; goto bad; } + + /* Clear the csum flags, they can't be valid for the inner headers */ + m->m_pkthdr.csum_flags = 0; s = splimp(); if (IF_QFULL(&ipintrq)) { @@ -404,6 +408,7 @@ noreplaycheck: schednetisr(NETISR_IP); /*can be skipped but to make sure*/ splx(s); nxt = IPPROTO_DONE; + KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0); } else { /* * strip off ESP header and IV. @@ -433,6 +438,17 @@ noreplaycheck: ipsecstat.in_nomem++; goto bad; } + + /* + * Set the csum valid flag, if we authenticated the + * packet, the payload shouldn't be corrupt unless + * it was corrupted before being signed on the other + * side. + */ + if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) { + m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xFFFF; + } if (nxt != IPPROTO_DONE) { if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 && @@ -440,6 +456,7 @@ noreplaycheck: ipsecstat.in_polvio++; goto bad; } + KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0); (*ip_protox[nxt]->pr_input)(m, off); } else m_freem(m); @@ -462,15 +479,16 @@ bad: } if (m) m_freem(m); + KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0); return; } #endif /* INET */ #if INET6 int -esp6_input(mp, offp, proto) +esp6_input(mp, offp) struct mbuf **mp; - int *offp, proto; + int *offp; { struct mbuf *m = *mp; int off = *offp; @@ -752,14 +770,6 @@ noreplaycheck: goto bad; } -#if 0 /* XXX should call ipfw rather than ipsec_in_reject, shouldn't it ? */ - /* drop it if it does not match the default policy */ - if (ipsec6_in_reject(m, NULL)) { - ipsec6stat.in_polvio++; - goto bad; - } -#endif - key_sa_recordxfer(sav, m); if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 || ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) { @@ -814,9 +824,9 @@ noreplaycheck: goto bad; } m_adj(n, stripsiz); - m_cat(m, n); /* m_cat does not update m_pkthdr.len */ m->m_pkthdr.len += n->m_pkthdr.len; + m_cat(m, n); } #ifndef PULLDOWN_TEST @@ -855,10 +865,10 @@ noreplaycheck: m_freem(m); } else { m_copydata(m, 0, maxlen, mtod(n, caddr_t)); - m_adj(m, maxlen); n->m_len = maxlen; n->m_pkthdr.len = m->m_pkthdr.len; n->m_next = m; + m_adj(m, maxlen); m->m_flags &= ~M_PKTHDR; } m = n; @@ -910,7 +920,7 @@ esp6_ctlinput(cmd, sa, d) struct ip6_hdr *ip6; struct mbuf *m; int off; - struct sockaddr_in6 sa6_src, sa6_dst; + struct sockaddr_in6 *sa6_src, *sa6_dst; if (sa->sa_family != AF_INET6 || sa->sa_len != sizeof(struct sockaddr_in6)) @@ -974,10 +984,12 @@ esp6_ctlinput(cmd, sa, d) * Check to see if we have a valid SA corresponding to * the address in the ICMP message payload. */ + sa6_src = ip6cp->ip6c_src; + sa6_dst = (struct sockaddr_in6 *)sa; sav = key_allocsa(AF_INET6, - (caddr_t)&sa6_src.sin6_addr, - (caddr_t)&sa6_dst, IPPROTO_ESP, - espp->esp_spi); + (caddr_t)&sa6_src->sin6_addr, + (caddr_t)&sa6_dst->sin6_addr, + IPPROTO_ESP, espp->esp_spi); if (sav) { if (sav->state == SADB_SASTATE_MATURE || sav->state == SADB_SASTATE_DYING) diff --git a/bsd/netinet6/esp_output.c b/bsd/netinet6/esp_output.c index 9ada8fc7c..b3dc4d22c 100644 --- a/bsd/netinet6/esp_output.c +++ b/bsd/netinet6/esp_output.c @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/netinet6/esp_output.c,v 1.1.2.2 2001/07/03 11:01:50 ume Exp $ */ -/* $KAME: esp_output.c,v 1.43 2001/03/01 07:10:45 itojun Exp $ */ +/* $FreeBSD: src/sys/netinet6/esp_output.c,v 1.1.2.3 2002/04/28 05:40:26 suz Exp $ */ +/* $KAME: esp_output.c,v 1.44 2001/07/26 06:53:15 jinmei Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. @@ -56,6 +56,7 @@ #include #include #include +#include /* for nat traversal */ #if INET6 #include @@ -80,9 +81,18 @@ #include +#include +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) +#define DBG_FNC_ESPOUT NETDBG_CODE(DBG_NETIPSEC, (4 << 8)) +#define DBG_FNC_ENCRYPT NETDBG_CODE(DBG_NETIPSEC, (5 << 8)) + static int esp_output __P((struct mbuf *, u_char *, struct mbuf *, struct ipsecrequest *, int)); +extern int esp_udp_encap_port; +extern u_int32_t natt_now; + /* * compute ESP header size. */ @@ -96,6 +106,7 @@ esp_hdrsiz(isr) size_t ivlen; size_t authlen; size_t hdrsiz; + size_t maxpad; /* sanity check */ if (isr == NULL) @@ -120,16 +131,15 @@ esp_hdrsiz(isr) if (ivlen < 0) goto estimate; - /* - * XXX - * right now we don't calcurate the padding size. simply - * treat the padding size as constant, for simplicity. - * - * XXX variable size padding support - */ + if (algo->padbound) + maxpad = algo->padbound; + else + maxpad = 4; + maxpad += 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */ + if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ - hdrsiz = sizeof(struct esp) + ivlen + 9; + hdrsiz = sizeof(struct esp) + ivlen + maxpad; } else { /* RFC 2406 */ aalgo = ah_algorithm_lookup(sav->alg_auth); @@ -137,21 +147,28 @@ esp_hdrsiz(isr) authlen = (aalgo->sumsiz)(sav); else authlen = 0; - hdrsiz = sizeof(struct newesp) + ivlen + 9 + authlen; + hdrsiz = sizeof(struct newesp) + ivlen + maxpad + authlen; } + + /* + * If the security association indicates that NATT is required, + * add the size of the NATT encapsulation header: + */ + if ((sav->flags & SADB_X_EXT_NATT) != 0) hdrsiz += sizeof(struct udphdr) + 4; return hdrsiz; estimate: /* * ASSUMING: - * sizeof(struct newesp) > sizeof(struct esp). + * sizeof(struct newesp) > sizeof(struct esp). (8) * esp_max_ivlen() = max ivlen for CBC mode - * 9 = (maximum padding length without random padding length) + * 17 = (maximum padding length without random padding length) * + (Pad Length field) + (Next Header field). * 16 = maximum ICV we support. + * sizeof(struct udphdr) in case NAT traversal is used */ - return sizeof(struct newesp) + esp_max_ivlen() + 9 + 16; + return sizeof(struct newesp) + esp_max_ivlen() + 17 + 16 + sizeof(struct udphdr); } /* @@ -197,7 +214,11 @@ esp_output(m, nexthdrp, md, isr, af) size_t extendsiz; int error = 0; struct ipsecstat *stat; + struct udphdr *udp = NULL; + int udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && af == AF_INET && + (esp_udp_encap_port & 0xFFFF) != 0); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen,0,0,0,0); switch (af) { #if INET case AF_INET: @@ -213,6 +234,7 @@ esp_output(m, nexthdrp, md, isr, af) #endif default: ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af)); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1,0,0,0,0); return 0; /* no change at all */ } @@ -246,6 +268,7 @@ esp_output(m, nexthdrp, md, isr, af) panic("esp_output: should not reach here"); } m_freem(m); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2,0,0,0,0); return EINVAL; } @@ -254,6 +277,7 @@ esp_output(m, nexthdrp, md, isr, af) ipseclog((LOG_ERR, "esp_output: unsupported algorithm: " "SPI=%u\n", (u_int32_t)ntohl(sav->spi))); m_freem(m); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3,0,0,0,0); return EINVAL; } spi = sav->spi; @@ -276,9 +300,9 @@ esp_output(m, nexthdrp, md, isr, af) #if INET6 struct ip6_hdr *ip6 = NULL; #endif - size_t esplen; /*sizeof(struct esp/newesp)*/ - size_t esphlen; /*sizeof(struct esp/newesp) + ivlen*/ - size_t hlen = 0; /*ip header len*/ + size_t esplen; /* sizeof(struct esp/newesp) */ + size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */ + size_t hlen = 0; /* ip header len */ if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ @@ -298,6 +322,7 @@ esp_output(m, nexthdrp, md, isr, af) ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n", afnumber)); m_freem(m); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4,0,0,0,0); return EINVAL; } @@ -334,11 +359,16 @@ esp_output(m, nexthdrp, md, isr, af) mprev->m_next = md; espoff = m->m_pkthdr.len - plen; + + if (udp_encapsulate) { + esphlen += sizeof(struct udphdr); + espoff += sizeof(struct udphdr); + } /* * grow the mbuf to accomodate ESP header. * before: IP ... payload - * after: IP ... ESP IV payload + * after: IP ... [UDP] ESP IV payload */ if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) { MGET(n, M_DONTWAIT, MT_DATA); @@ -351,16 +381,25 @@ esp_output(m, nexthdrp, md, isr, af) mprev->m_next = n; n->m_next = md; m->m_pkthdr.len += esphlen; - esp = mtod(n, struct esp *); + if (udp_encapsulate) { + udp = mtod(n, struct udphdr *); + esp = (struct esp *)((caddr_t)udp + sizeof(struct udphdr)); + } else { + esp = mtod(n, struct esp *); + } } else { md->m_len += esphlen; md->m_data -= esphlen; m->m_pkthdr.len += esphlen; esp = mtod(md, struct esp *); + if (udp_encapsulate) { + udp = mtod(md, struct udphdr *); + esp = (struct esp *)((caddr_t)udp + sizeof(struct udphdr)); + } else { + esp = mtod(md, struct esp *); + } } - nxt = *nexthdrp; - *nexthdrp = IPPROTO_ESP; switch (af) { #if INET case AF_INET: @@ -397,6 +436,7 @@ esp_output(m, nexthdrp, md, isr, af) ipsec_logsastr(sav))); stat->out_inval++; m_freem(m); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5,0,0,0,0); return EINVAL; } } @@ -523,6 +563,22 @@ esp_output(m, nexthdrp, md, isr, af) extend[i] = (i + 1) & 0xff; break; } + + nxt = *nexthdrp; + if (udp_encapsulate) { + *nexthdrp = IPPROTO_UDP; + + /* Fill out the UDP header */ + udp->uh_sport = ntohs((u_short)esp_udp_encap_port); + udp->uh_dport = ntohs(sav->remote_ike_port); +// udp->uh_len set later, after all length tweaks are complete + udp->uh_sum = 0; + + /* Update last sent so we know if we need to send keepalive */ + sav->natt_last_activity = natt_now; + } else { + *nexthdrp = IPPROTO_ESP; + } /* initialize esp trailer. */ esptail = (struct esptail *) @@ -571,13 +627,16 @@ esp_output(m, nexthdrp, md, isr, af) */ if (!algo->encrypt) panic("internal error: no encrypt function"); + KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0,0,0,0,0); if ((*algo->encrypt)(m, espoff, plen + extendsiz, sav, algo, ivlen)) { /* m is already freed */ ipseclog((LOG_ERR, "packet encryption failure\n")); stat->out_inval++; error = EINVAL; + KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1,error,0,0,0); goto fail; } + KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2,0,0,0,0); /* * calculate ICV if required. @@ -618,7 +677,7 @@ esp_output(m, nexthdrp, md, isr, af) while (n->m_next) n = n->m_next; - if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /*XXX*/ + if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /* XXX */ n->m_len += siz; m->m_pkthdr.len += siz; p = mtod(n, u_char *) + n->m_len - siz; @@ -666,6 +725,13 @@ esp_output(m, nexthdrp, md, isr, af) #endif } } + + if (udp_encapsulate) { + struct ip *ip; + ip = mtod(m, struct ip *); + udp->uh_ulen = htons(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2)); + } + noantireplay: if (!m) { @@ -675,10 +741,12 @@ noantireplay: stat->out_success++; stat->out_esphist[sav->alg_enc]++; key_sa_recordxfer(sav, m); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6,0,0,0,0); return 0; fail: #if 1 + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7,error,0,0,0); return error; #else panic("something bad in esp_output"); diff --git a/bsd/netinet6/frag6.c b/bsd/netinet6/frag6.c index 80b021c27..01888c6cb 100644 --- a/bsd/netinet6/frag6.c +++ b/bsd/netinet6/frag6.c @@ -128,9 +128,9 @@ frag6_init() * Fragment input */ int -frag6_input(mp, offp, proto) +frag6_input(mp, offp) struct mbuf **mp; - int *offp, proto; + int *offp; { struct mbuf *m = *mp, *t; struct ip6_hdr *ip6; diff --git a/bsd/netinet6/icmp6.c b/bsd/netinet6/icmp6.c index 82d2d1bea..654dc8b15 100644 --- a/bsd/netinet6/icmp6.c +++ b/bsd/netinet6/icmp6.c @@ -374,7 +374,7 @@ icmp6_error(m, type, code, param) m->m_pkthdr.rcvif = NULL; icmp6stat.icp6s_outhist[type]++; - icmp6_reflect(m, sizeof(struct ip6_hdr)); /*header order: IPv6 - ICMPv6*/ + icmp6_reflect(m, sizeof(struct ip6_hdr)); /* header order: IPv6 - ICMPv6 */ return; @@ -389,9 +389,9 @@ icmp6_error(m, type, code, param) * Process a received ICMP6 message. */ int -icmp6_input(mp, offp, proto) +icmp6_input(mp, offp) struct mbuf **mp; - int *offp, proto; + int *offp; { struct mbuf *m = *mp, *n; struct ip6_hdr *ip6, *nip6; @@ -402,7 +402,7 @@ icmp6_input(mp, offp, proto) #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, off, sizeof(struct icmp6_hdr), IPPROTO_DONE); - /* m might change if M_LOOP. So, call mtod after this */ + /* m might change if M_LOOP. So, call mtod after this */ #endif /* @@ -706,9 +706,9 @@ icmp6_input(mp, offp, proto) bcopy(icmp6, nicmp6, sizeof(struct icmp6_hdr)); p = (u_char *)(nicmp6 + 1); bzero(p, 4); - bcopy(hostname, p + 4, maxhlen); /*meaningless TTL*/ + bcopy(hostname, p + 4, maxhlen); /* meaningless TTL */ noff = sizeof(struct ip6_hdr); - M_COPY_PKTHDR(n, m); /* just for recvif */ + M_COPY_PKTHDR(n, m); /* just for rcvif */ n->m_pkthdr.len = n->m_len = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr) + 4 + maxhlen; nicmp6->icmp6_type = ICMP6_WRUREPLY; @@ -859,7 +859,7 @@ icmp6_input(mp, offp, proto) static int icmp6_notify_error(m, off, icmp6len, code) struct mbuf *m; - int off, icmp6len; + int off, icmp6len, code; { struct icmp6_hdr *icmp6; struct ip6_hdr *eip6; @@ -899,7 +899,7 @@ icmp6_notify_error(m, off, icmp6len, code) struct ip6_rthdr0 *rth0; int rthlen; - while (1) { /* XXX: should avoid inf. loop explicitly? */ + while (1) { /* XXX: should avoid infinite loop explicitly? */ struct ip6_ext *eh; switch (nxt) { @@ -1013,7 +1013,7 @@ icmp6_notify_error(m, off, icmp6len, code) default: /* * This case includes ESP and the No Next - * Header. In such cases going to the notify + * Header. In such cases going to the notify * label does not have any meaning * (i.e. ctlfunc will be NULL), but we go * anyway since we might have to update @@ -1562,7 +1562,7 @@ ni6_nametodns(name, namelen, old) } panic("should not reach here"); - /*NOTREACHED*/ + /* NOTREACHED */ fail: if (m) @@ -1713,7 +1713,7 @@ ni6_addrs(ni6, m, ifpp, subj) /* * check if anycast is okay. - * XXX: just experimental. not in the spec. + * XXX: just experimental. not in the spec. */ if ((ifa6->ia6_flags & IN6_IFF_ANYCAST) != 0 && (niflags & NI_NODEADDR_FLAG_ANYCAST) == 0) @@ -2114,7 +2114,7 @@ icmp6_reflect(m, off) if (ia == NULL && IN6_IS_ADDR_LINKLOCAL(&t) && (m->m_flags & M_LOOP)) { /* * This is the case if the dst is our link-local address - * and the sender is also ourseleves. + * and the sender is also ourselves. */ src = &t; } @@ -2125,7 +2125,7 @@ icmp6_reflect(m, off) /* * This case matches to multicasts, our anycast, or unicasts - * that we do not own. Select a source address based on the + * that we do not own. Select a source address based on the * source address of the erroneous packet. */ bzero(&ro, sizeof(ro)); @@ -2361,7 +2361,7 @@ icmp6_redirect_input(m, off) nd6_cache_lladdr(ifp, &redtgt6, lladdr, lladdrlen, ND_REDIRECT, is_onlink ? ND_REDIRECT_ONLINK : ND_REDIRECT_ROUTER); - if (!is_onlink) { /* better router case. perform rtredirect. */ + if (!is_onlink) { /* better router case. perform rtredirect. */ /* perform rtredirect */ struct sockaddr_in6 sdst; struct sockaddr_in6 sgw; @@ -2423,14 +2423,14 @@ icmp6_redirect_output(m0, rt) icmp6_errcount(&icmp6stat.icp6s_outerrhist, ND_REDIRECT, 0); - /* if we are not router, we don't send icmp6 redirect */ - if (!ip6_forwarding || ip6_accept_rtadv) - goto fail; - /* sanity check */ if (!m0 || !rt || !(rt->rt_flags & RTF_UP) || !(ifp = rt->rt_ifp)) goto fail; + /* if we are not router, we don't send icmp6 redirect */ + if (!ip6_forwarding || ip6_accept_rtadv || (ifp->if_eflags & IFEF_ACCEPT_RTADVD)) + goto fail; + /* * Address check: * the source address must identify a neighbor, and @@ -2549,7 +2549,7 @@ icmp6_redirect_output(m0, rt) if (!rt_router) goto nolladdropt; len = sizeof(*nd_opt) + ifp->if_addrlen; - len = (len + 7) & ~7; /*round by 8*/ + len = (len + 7) & ~7; /* round by 8 */ /* safety check */ if (len + (p - (u_char *)ip6) > maxlen) goto nolladdropt; @@ -2808,8 +2808,8 @@ ppsratecheck(lasttime, curpps, maxpps) timersub(&tv, lasttime, &delta); /* - * check for 0,0 is so that the message will be seen at least once. - * if more than one second have passed since the last update of + * Check for 0,0 so that the message will be seen at least once. + * If more than one second has passed since the last update of * lasttime, reset the counter. * * we do increment *curpps even in *curpps < maxpps case, as some may @@ -2827,7 +2827,7 @@ ppsratecheck(lasttime, curpps, maxpps) else rv = 0; -#if 1 /*DIAGNOSTIC?*/ +#if 1 /* DIAGNOSTIC? */ /* be careful about wrap-around */ if (*curpps + 1 > *curpps) *curpps = *curpps + 1; @@ -2862,7 +2862,7 @@ icmp6_ratelimit(dst, type, code) { int ret; - ret = 0; /*okay to send*/ + ret = 0; /* okay to send */ /* PPS limit */ if (!ppsratecheck(&icmp6errppslim_last, &icmp6errpps_count, diff --git a/bsd/netinet6/in6.c b/bsd/netinet6/in6.c index 35ffb4a8f..f3648e697 100644 --- a/bsd/netinet6/in6.c +++ b/bsd/netinet6/in6.c @@ -182,7 +182,6 @@ in6_ifloop_request(int cmd, struct ifaddr *ifa) */ if (cmd == RTM_ADD && nrt && ifa != nrt->rt_ifa) { rtsetifa(nrt, ifa); - nrt->rt_dlt = ifa->ifa_dlt; } /* @@ -249,7 +248,7 @@ in6_ifremloop(struct ifaddr *ifa) */ /* - * Delete the entry only if exact one ifa exists. More than one ifa + * Delete the entry only if exact one ifa exists. More than one ifa * can exist if we assign a same single address to multiple * (probably p2p) interfaces. * XXX: we should avoid such a configuration in IPv6... @@ -265,9 +264,9 @@ in6_ifremloop(struct ifaddr *ifa) if (ia_count == 1) { /* * Before deleting, check if a corresponding loopbacked host - * route surely exists. With this check, we can avoid to + * route surely exists. With this check, we can avoid to * delete an interface direct route whose destination is same - * as the address being removed. This can happen when remofing + * as the address being removed. This can happen when remofing * a subnet-router anycast address on an interface attahced * to a shared medium. */ @@ -398,7 +397,7 @@ in6_control(so, cmd, data, ifp, p) case SIOCSIFINFO_FLAGS: if (!privileged) return(EPERM); - /*fall through*/ + /* fall through */ case OSIOCGIFINFO_IN6: case SIOCGIFINFO_IN6: case SIOCGDRLST_IN6: @@ -421,7 +420,7 @@ in6_control(so, cmd, data, ifp, p) return(EOPNOTSUPP); } - switch(cmd) { + switch (cmd) { case SIOCSSCOPE6: if (!privileged) return(EPERM); @@ -440,7 +439,7 @@ in6_control(so, cmd, data, ifp, p) case SIOCDLIFADDR: if (!privileged) return(EPERM); - /*fall through*/ + /* fall through */ case SIOCGLIFADDR: return in6_lifaddr_ioctl(so, cmd, data, ifp, p); } @@ -449,35 +448,92 @@ in6_control(so, cmd, data, ifp, p) switch (cmd) { - case SIOCPROTOATTACH: - in6_if_up(ifp); - break; - case SIOCPROTODETACH: - in6_purgeif(ifp); + case SIOCAUTOCONF_START: + ifp->if_eflags |= IFEF_ACCEPT_RTADVD; + return (0); + + case SIOCAUTOCONF_STOP: + { + struct ifaddr *ifa, *nifa = NULL; + + ifp->if_eflags &= ~IFEF_ACCEPT_RTADVD; + + /* nuke prefix list. this may try to remove some of ifaddrs as well */ + in6_purgeprefix(ifp); + + /* removed autoconfigured address from interface */ + + for (ifa = TAILQ_FIRST(&ifp->if_addrlist); ifa != NULL; ifa = nifa) + { + nifa = TAILQ_NEXT(ifa, ifa_list); + if (ifa->ifa_addr == NULL || ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_AUTOCONF) + in6_purgeaddr(ifa); + } + return (0); + } + + + case SIOCLL_START: + + /* NOTE: All the interface specific DLIL attachements should be done here + * They are currently done in in6_ifattach() for the interfaces that need it + */ + + if (ifp->if_type == IFT_PPP && ifra->ifra_addr.sin6_family == AF_INET6 && + ifra->ifra_dstaddr.sin6_family == AF_INET6) + in6_if_up(ifp, ifra); /* PPP may provide LinkLocal addresses */ + else + in6_if_up(ifp, 0); + + return(0); + + case SIOCLL_STOP: + { + struct ifaddr *ifa, *nifa = NULL; + + /* removed link local addresses from interface */ + + for (ifa = TAILQ_FIRST(&ifp->if_addrlist); ifa != NULL; ifa = nifa) + { + nifa = TAILQ_NEXT(ifa, ifa_list); + if (ifa->ifa_addr == NULL || ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (IN6_IS_ADDR_LINKLOCAL(IFA_IN6(ifa))) + in6_purgeaddr(ifa); + } + return (0); + } + + + case SIOCPROTOATTACH_IN6: + switch (ifp->if_type) { - case IFT_ETHER: - error = ether_detach_inet6(ifp); - break; - case IFT_GIF: - error = gif_detach_proto_family(ifp, PF_INET6); - break; - case IFT_STF: - error = stf_detach_inet6(ifp); - break; - case IFT_LOOP: /* do not detach loopback */ - break; - default: - printf("SIOCPROTODETACH: %s%d unknown type, can't detach\n", - ifp->if_name, ifp->if_unit); - return(ENOENT); +#if IFT_BRIDGE /*OpenBSD 2.8*/ + /* some of the interfaces are inherently not IPv6 capable */ + case IFT_BRIDGE: + return; +#endif + default: + + if (error = dlil_plumb_protocol(PF_INET6, ifp, &dl_tag)) + printf("SIOCPROTOATTACH_IN6: %s error=%d\n", + if_name(ifp), error); break; + } - if (error) { - printf("SIOCPROTODETACH: %s%d ether_detach_inet6 error=%x\n", - ifp->if_name, ifp->if_unit, error); - return(error); - } - break; + return (error); + + + case SIOCPROTODETACH_IN6: + + in6_purgeif(ifp); /* Cleanup interface routes and addresses */ + + if (error = dlil_unplumb_protocol(PF_INET6, ifp)) + printf("SIOCPROTODETACH_IN6: %s error=%d\n", + if_name(ifp), error); + return(error); } #endif @@ -648,10 +704,17 @@ in6_control(so, cmd, data, ifp, p) { int i, error = 0; struct nd_prefix pr0, *pr; - + if (dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET6, &dl_tag) == EPROTONOSUPPORT) { - in6_if_up(ifp); /* no dl_tag, the interface is not "up" for IPv6 yet */ + /* Address is added without previous IPv6 configurator support (gif, stf etc...) */ + if (error = dlil_plumb_protocol(PF_INET6, ifp, &dl_tag)) { + printf("SIOCAIFADDR_IN6: %s can't plumb protocol error=%d\n", + if_name(ifp), error); + return (error); + } + in6_if_up(ifp, NULL); } + /* * first, make or update the interface address structure, @@ -685,10 +748,11 @@ in6_control(so, cmd, data, ifp, p) ifra->ifra_prefixmask.sin6_addr.s6_addr32[i]; } /* - * XXX: since we don't have enough APIs, we just set inifinity - * to lifetimes. They can be overridden by later advertised - * RAs (when accept_rtadv is non 0), but we'd rather intend - * such a behavior. + * XXX: since we don't have an API to set prefix (not address) + * lifetimes, we just use the same lifetimes as addresses. + * The (temporarily) installed lifetimes can be overridden by + * later advertised RAs (when accept_rtadv is non 0), which is + * an intended behavior. */ pr0.ndpr_raf_onlink = 1; /* should be configurable? */ pr0.ndpr_raf_auto = @@ -745,12 +809,9 @@ in6_control(so, cmd, data, ifp, p) * other addresses detached. */ pfxlist_onlink_check(); + in6_post_msg(ifp, KEV_INET6_NEW_USER_ADDR, ia); } - dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET6, &dl_tag); - ia->ia_ifa.ifa_dlt = dl_tag; - - in6_post_msg(ifp, KEV_INET6_NEW_USER_ADDR, ia); break; } @@ -803,9 +864,7 @@ in6_control(so, cmd, data, ifp, p) default: #ifdef __APPLE__ - error = dlil_ioctl(0, ifp, cmd, (caddr_t)data); - if (error == EOPNOTSUPP) - error = 0; + error = dlil_ioctl(PF_INET6, ifp, cmd, (caddr_t)data); return error; #else @@ -870,7 +929,7 @@ in6_update_ifa(ifp, ifra, ia) } else { /* - * In this case, ia must not be NULL. We just use its prefix + * In this case, ia must not be NULL. We just use its prefix * length. */ plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); @@ -1226,7 +1285,7 @@ in6_purgeaddr(ifa) struct in6_ifaddr *ia = (struct in6_ifaddr *) ifa; /* stop DAD processing */ - nd6_dad_stoptimer(ifa); + nd6_dad_stop(ifa); /* * delete route to the destination of the address being purged. @@ -1402,7 +1461,7 @@ in6_lifaddr_ioctl(so, cmd, data, ifp, p) /* address must be specified on GET with IFLR_PREFIX */ if ((iflr->flags & IFLR_PREFIX) == 0) break; - /*FALLTHROUGH*/ + /* FALLTHROUGH */ case SIOCALIFADDR: case SIOCDLIFADDR: /* address must be specified on ADD and DELETE */ @@ -1418,10 +1477,10 @@ in6_lifaddr_ioctl(so, cmd, data, ifp, p) if (sa->sa_len && sa->sa_len != sizeof(struct sockaddr_in6)) return EINVAL; break; - default: /*shouldn't happen*/ + default: /* shouldn't happen */ #if 0 panic("invalid cmd to in6_lifaddr_ioctl"); - /*NOTREACHED*/ + /* NOTREACHED */ #else return EOPNOTSUPP; #endif @@ -1523,7 +1582,7 @@ in6_lifaddr_ioctl(so, cmd, data, ifp, p) } else { if (cmd == SIOCGLIFADDR) { /* on getting an address, take the 1st match */ - cmp = 0; /*XXX*/ + cmp = 0; /* XXX */ } else { /* on deleting an address, do exact match */ in6_len2mask(&mask, 128); @@ -1596,7 +1655,7 @@ in6_lifaddr_ioctl(so, cmd, data, ifp, p) in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); - iflr->flags = ia->ia6_flags; /*XXX*/ + iflr->flags = ia->ia6_flags; /* XXX */ return 0; } else { @@ -1626,7 +1685,7 @@ in6_lifaddr_ioctl(so, cmd, data, ifp, p) } } - return EOPNOTSUPP; /*just for safety*/ + return EOPNOTSUPP; /* just for safety */ } /* @@ -1663,9 +1722,7 @@ in6_ifinit(ifp, ia, sin6, newhost) if (ifacount <= 1 && #ifdef __APPLE__ - (error = dlil_ioctl(0, ifp, SIOCSIFADDR, (caddr_t)ia))) { - if (error == EOPNOTSUPP) - error = 0; + (error = dlil_ioctl(PF_INET6, ifp, SIOCSIFADDR, (caddr_t)ia))) { if (error) { splx(s); return(error); @@ -1703,7 +1760,7 @@ in6_ifinit(ifp, ia, sin6, newhost) ia->ia_ifa.ifa_flags |= RTF_CLONING; } - /* Add ownaddr as loopback rtentry, if necessary(ex. on p2p link). */ + /* Add ownaddr as loopback rtentry, if necessary (ex. on p2p link). */ if (newhost) { /* set the rtrequest function to create llinfo */ ia->ia_ifa.ifa_rtrequest = nd6_rtrequest; @@ -1786,7 +1843,7 @@ in6_delmulti(in6m) struct ifmultiaddr *ifma = in6m->in6m_ifma; int s = splnet(); - if (ifma->ifma_refcount == 1) { + if (ifma && ifma->ifma_refcount == 1) { /* * No remaining claims to this record; let MLD6 know * that we are leaving the multicast group. @@ -1797,7 +1854,8 @@ in6_delmulti(in6m) FREE(in6m, M_IPMADDR); } /* XXX - should be separate API for when we have an ifma? */ - if_delmulti(ifma->ifma_ifp, ifma->ifma_addr); + if (ifma) + if_delmultiaddr(ifma); splx(s); } @@ -1864,8 +1922,8 @@ ip6_sprintf(addr) static char ip6buf[8][48]; int i; char *cp; - u_short *a = (u_short *)addr; - u_char *d; + const u_short *a = (const u_short *)addr; + const u_char *d; int dcolon = 0; ip6round = (ip6round + 1) & 7; @@ -1894,7 +1952,7 @@ ip6_sprintf(addr) a++; continue; } - d = (u_char *)a; + d = (const u_char *)a; *cp++ = digits[*d >> 4]; *cp++ = digits[*d++ & 0xf]; *cp++ = digits[*d >> 4]; @@ -2310,7 +2368,7 @@ in6_ifawithifp(ifp, dst) int dst_scope = in6_addrscope(dst), blen = -1, tlen; struct ifaddr *ifa; struct in6_ifaddr *besta = 0; - struct in6_ifaddr *dep[2]; /*last-resort: deprecated*/ + struct in6_ifaddr *dep[2]; /* last-resort: deprecated */ dep[0] = dep[1] = NULL; @@ -2389,8 +2447,9 @@ extern int in6_init2done; * perform DAD when interface becomes IFF_UP. */ void -in6_if_up(ifp) +in6_if_up(ifp, ifra) struct ifnet *ifp; + struct in6_aliasreq *ifra; { struct ifaddr *ifa; struct in6_ifaddr *ia; @@ -2402,7 +2461,7 @@ in6_if_up(ifp) /* * special cases, like 6to4, are handled in in6_ifattach */ - in6_ifattach(ifp, NULL); + in6_ifattach(ifp, NULL, ifra); dad_delay = 0; TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) @@ -2473,7 +2532,7 @@ in6_setmaxmtu() } /* - * Convert sockaddr_in6 to sockaddr_in. Original sockaddr_in6 must be + * Convert sockaddr_in6 to sockaddr_in. Original sockaddr_in6 must be * v4 mapped addr or v4 compat addr */ void diff --git a/bsd/netinet6/in6.h b/bsd/netinet6/in6.h index 64356fa23..d9df90b9b 100644 --- a/bsd/netinet6/in6.h +++ b/bsd/netinet6/in6.h @@ -418,10 +418,11 @@ struct route_in6 { #define IPV6_CHECKSUM 26 /* int; checksum offset for raw socket */ #define IPV6_V6ONLY 27 /* bool; only bind INET6 at wildcard bind */ -#ifndef _KERNEL +#ifndef KERNEL #define IPV6_BINDV6ONLY IPV6_V6ONLY #endif + #if 1 /*IPSEC*/ #define IPV6_IPSEC_POLICY 28 /* struct; get/set security policy */ #endif @@ -587,13 +588,14 @@ struct in6_pktinfo { struct cmsghdr; struct mbuf; struct ifnet; +struct in6_aliasreq; int in6_cksum __P((struct mbuf *, u_int8_t, u_int32_t, u_int32_t)); int in6_localaddr __P((struct in6_addr *)); int in6_addrscope __P((struct in6_addr *)); struct in6_ifaddr *in6_ifawithscope __P((struct ifnet *, struct in6_addr *)); struct in6_ifaddr *in6_ifawithifp __P((struct ifnet *, struct in6_addr *)); -extern void in6_if_up __P((struct ifnet *)); +extern void in6_if_up __P((struct ifnet *, struct in6_aliasreq *)); struct sockaddr; void in6_sin6_2_sin __P((struct sockaddr_in *sin, diff --git a/bsd/netinet6/in6_gif.c b/bsd/netinet6/in6_gif.c index 8d963768c..2886c784b 100644 --- a/bsd/netinet6/in6_gif.c +++ b/bsd/netinet6/in6_gif.c @@ -282,53 +282,45 @@ int in6_gif_input(mp, offp) } /* - * we know that we are in IFF_UP, outer address available, and outer family - * matched the physical addr family. see gif_encapcheck(). + * validate outer address. */ -int -gif_encapcheck6(m, off, proto, arg) - const struct mbuf *m; - int off; - int proto; - void *arg; -{ - struct ip6_hdr ip6; +static int +gif_validate6(ip6, sc, ifp) + const struct ip6_hdr *ip6; struct gif_softc *sc; + struct ifnet *ifp; +{ struct sockaddr_in6 *src, *dst; - int addrmatch; - /* sanity check done in caller */ - sc = (struct gif_softc *)arg; src = (struct sockaddr_in6 *)sc->gif_psrc; dst = (struct sockaddr_in6 *)sc->gif_pdst; - /* LINTED const cast */ - m_copydata((struct mbuf *)m, 0, sizeof(ip6), (caddr_t)&ip6); - - /* check for address match */ - addrmatch = 0; - if (IN6_ARE_ADDR_EQUAL(&src->sin6_addr, &ip6.ip6_dst)) - addrmatch |= 1; - if (IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6.ip6_src)) - addrmatch |= 2; - if (addrmatch != 3) + /* + * Check for address match. Note that the check is for an incoming + * packet. We should compare the *source* address in our configuration + * and the *destination* address of the packet, and vice versa. + */ + if (!IN6_ARE_ADDR_EQUAL(&src->sin6_addr, &ip6->ip6_dst) || + !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_src)) return 0; /* martian filters on outer source - done in ip6_input */ /* ingress filters on outer source */ - if ((sc->gif_if.if_flags & IFF_LINK2) == 0 && - (m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.rcvif) { + if ((sc->gif_if.if_flags & IFF_LINK2) == 0 && ifp) { struct sockaddr_in6 sin6; struct rtentry *rt; bzero(&sin6, sizeof(sin6)); sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(struct sockaddr_in6); - sin6.sin6_addr = ip6.ip6_src; - /* XXX scopeid */ + sin6.sin6_addr = ip6->ip6_src; +#ifndef SCOPEDROUTING + sin6.sin6_scope_id = 0; /* XXX */ +#endif + rt = rtalloc1((struct sockaddr *)&sin6, 0, 0UL); - if (!rt || rt->rt_ifp != m->m_pkthdr.rcvif) { + if (!rt || rt->rt_ifp != ifp) { #if 0 log(LOG_WARNING, "%s: packet from %s dropped " "due to ingress filter\n", if_name(&sc->gif_if), @@ -343,3 +335,29 @@ gif_encapcheck6(m, off, proto, arg) return 128 * 2; } + +/* + * we know that we are in IFF_UP, outer address available, and outer family + * matched the physical addr family. see gif_encapcheck(). + * sanity check for arg should have been done in the caller. + */ +int +gif_encapcheck6(m, off, proto, arg) + const struct mbuf *m; + int off; + int proto; + void *arg; +{ + struct ip6_hdr ip6; + struct gif_softc *sc; + struct ifnet *ifp; + + /* sanity check done in caller */ + sc = (struct gif_softc *)arg; + + /* LINTED const cast */ + m_copydata(m, 0, sizeof(ip6), (caddr_t)&ip6); + ifp = ((m->m_flags & M_PKTHDR) != 0) ? m->m_pkthdr.rcvif : NULL; + + return gif_validate6(&ip6, sc, ifp); +} diff --git a/bsd/netinet6/in6_ifattach.c b/bsd/netinet6/in6_ifattach.c index 63c5d5710..8fe4ea5eb 100644 --- a/bsd/netinet6/in6_ifattach.c +++ b/bsd/netinet6/in6_ifattach.c @@ -1,4 +1,5 @@ -/* $KAME: in6_ifattach.c,v 1.41 2000/03/16 07:05:34 jinmei Exp $ */ +/* $FreeBSD: src/sys/netinet6/in6_ifattach.c,v 1.8 2002/04/19 04:46:22 suz Exp $ */ +/* $KAME: in6_ifattach.c,v 1.118 2001/05/24 07:44:00 itojun Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. @@ -72,9 +73,6 @@ int ip6_auto_linklocal = IP6_AUTO_LINKLOCAL; int ip6_auto_linklocal = 1; /* enable by default */ #endif -#ifndef __APPLE__ -struct callout in6_tmpaddrtimer_ch; -#endif extern struct inpcbinfo udbinfo; extern struct inpcbinfo ripcbinfo; @@ -83,7 +81,7 @@ static int get_rand_ifid __P((struct ifnet *, struct in6_addr *)); static int generate_tmp_ifid __P((u_int8_t *, const u_int8_t *, u_int8_t *)); static int get_hw_ifid __P((struct ifnet *, struct in6_addr *)); static int get_ifid __P((struct ifnet *, struct ifnet *, struct in6_addr *)); -static int in6_ifattach_linklocal __P((struct ifnet *, struct ifnet *)); +static int in6_ifattach_linklocal __P((struct ifnet *, struct ifnet *, struct in6_aliasreq *)); static int in6_ifattach_loopback __P((struct ifnet *)); #define EUI64_GBIT 0x01 @@ -107,7 +105,7 @@ static int in6_ifattach_loopback __P((struct ifnet *)); static int get_rand_ifid(ifp, in6) struct ifnet *ifp; - struct in6_addr *in6; /*upper 64bits are preserved */ + struct in6_addr *in6; /* upper 64bits are preserved */ { MD5_CTX ctxt; u_int8_t digest[16]; @@ -158,8 +156,9 @@ generate_tmp_ifid(seed0, seed1, ret) val32 = random() ^ tv.tv_usec; bcopy(&val32, seed + sizeof(val32) * i, sizeof(val32)); } - } else + } else { bcopy(seed0, seed, 8); + } /* copy the right-most 64-bits of the given address */ /* XXX assumption on the size of IFID */ @@ -229,7 +228,7 @@ generate_tmp_ifid(seed0, seed1, ret) static int get_hw_ifid(ifp, in6) struct ifnet *ifp; - struct in6_addr *in6; /*upper 64bits are preserved */ + struct in6_addr *in6; /* upper 64bits are preserved */ { struct ifaddr *ifa; struct sockaddr_dl *sdl; @@ -362,7 +361,7 @@ found: static int get_ifid(ifp0, altifp, in6) struct ifnet *ifp0; - struct ifnet *altifp; /*secondary EUI64 source*/ + struct ifnet *altifp; /* secondary EUI64 source */ struct in6_addr *in6; { struct ifnet *ifp; @@ -424,50 +423,56 @@ success: } static int -in6_ifattach_linklocal(ifp, altifp) +in6_ifattach_linklocal(ifp, altifp, ifra_passed) struct ifnet *ifp; struct ifnet *altifp; /* secondary EUI64 source */ + struct in6_aliasreq *ifra_passed; { struct in6_ifaddr *ia; struct in6_aliasreq ifra; struct nd_prefix pr0; - int i, error; + int i, dl_tag, error; /* * configure link-local address. */ bzero(&ifra, sizeof(ifra)); + dlil_plumb_protocol(PF_INET6, ifp, &dl_tag); + /* * in6_update_ifa() does not use ifra_name, but we accurately set it * for safety. */ strncpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name)); - ifra.ifra_addr.sin6_family = AF_INET6; - ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6); - ifra.ifra_addr.sin6_addr.s6_addr16[0] = htons(0xfe80); + if (ifp->if_type == IFT_PPP && ifra_passed != NULL) /* PPP provided both addresses for us */ + bcopy(&ifra_passed->ifra_addr, &(ifra.ifra_addr), sizeof(struct sockaddr_in6)); + else { + ifra.ifra_addr.sin6_family = AF_INET6; + ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6); + ifra.ifra_addr.sin6_addr.s6_addr16[0] = htons(0xfe80); #if SCOPEDROUTING - ifra.ifra_addr.sin6_addr.s6_addr16[1] = 0 + ifra.ifra_addr.sin6_addr.s6_addr16[1] = 0 #else - ifra.ifra_addr.sin6_addr.s6_addr16[1] = htons(ifp->if_index); /* XXX */ + ifra.ifra_addr.sin6_addr.s6_addr16[1] = htons(ifp->if_index); /* XXX */ #endif - ifra.ifra_addr.sin6_addr.s6_addr32[1] = 0; - if ((ifp->if_flags & IFF_LOOPBACK) != 0) { - ifra.ifra_addr.sin6_addr.s6_addr32[2] = 0; - ifra.ifra_addr.sin6_addr.s6_addr32[3] = htonl(1); - } else { - if (get_ifid(ifp, altifp, &ifra.ifra_addr.sin6_addr) != 0) { - nd6log((LOG_ERR, - "%s: no ifid available\n", if_name(ifp))); - return -1; + ifra.ifra_addr.sin6_addr.s6_addr32[1] = 0; + if ((ifp->if_flags & IFF_LOOPBACK) != 0) { + ifra.ifra_addr.sin6_addr.s6_addr32[2] = 0; + ifra.ifra_addr.sin6_addr.s6_addr32[3] = htonl(1); + } else { + if (get_ifid(ifp, altifp, &ifra.ifra_addr.sin6_addr) != 0) { + nd6log((LOG_ERR, + " %s: no ifid available\n", if_name(ifp))); + return -1; + } } - } #if SCOPEDROUTING - ifra.ifra_addr.sin6_scope_id = - in6_addr2scopeid(ifp, &ifra.ifra_addr.sin6_addr); + ifra.ifra_addr.sin6_scope_id = + in6_addr2scopeid(ifp, &ifra.ifra_addr.sin6_addr); #endif - + } ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_prefixmask.sin6_family = AF_INET6; ifra.ifra_prefixmask.sin6_addr = in6mask64; @@ -481,7 +486,7 @@ in6_ifattach_linklocal(ifp, altifp) /* * Do not let in6_update_ifa() do DAD, since we need a random delay - * before sending an NS at the first time the inteface becomes up. + * before sending an NS at the first time the interface becomes up. * Instead, in6_if_up() will start DAD with a proper random delay. */ ifra.ifra_flags |= IN6_IFF_NODAD; @@ -489,7 +494,8 @@ in6_ifattach_linklocal(ifp, altifp) /* * Now call in6_update_ifa() to do a bunch of procedures to configure * a link-local address. We can set NULL to the 3rd argument, because - * we know there's no other link-local address on the interface. + * we know there's no other link-local address on the interface + * and therefore we are adding one (instead of updating one). */ if ((error = in6_update_ifa(ifp, &ifra, NULL)) != 0) { /* @@ -600,15 +606,15 @@ in6_ifattach_loopback(ifp) ifra.ifra_lifetime.ia6t_vltime = ND6_INFINITE_LIFETIME; ifra.ifra_lifetime.ia6t_pltime = ND6_INFINITE_LIFETIME; - /* we don't need to perfrom DAD on loopback interfaces. */ + /* we don't need to perform DAD on loopback interfaces. */ ifra.ifra_flags |= IN6_IFF_NODAD; /* skip registration to the prefix list. XXX should be temporary. */ ifra.ifra_flags |= IN6_IFF_NOPFX; /* - * We can set NULL to the 3rd arg. See comments in - * in6_ifattach_linklocal(). + * We are sure that this is a newly assigned address, so we can set + * NULL to the 3rd arg. */ if ((error = in6_update_ifa(ifp, &ifra, NULL)) != 0) { log(LOG_ERR, "in6_ifattach_loopback: failed to configure " @@ -647,7 +653,7 @@ in6_nigroup(ifp, name, namelen, in6) while (p && *p && *p != '.' && p - name < namelen) p++; if (p - name > sizeof(n) - 1) - return -1; /*label too long*/ + return -1; /* label too long */ l = p - name; strncpy(n, name, l); n[(int)l] = '\0'; @@ -734,40 +740,15 @@ in6_nigroup_detach(name, namelen) * XXX multiple link-local address case */ void -in6_ifattach(ifp, altifp) +in6_ifattach(ifp, altifp, ifra) struct ifnet *ifp; struct ifnet *altifp; /* secondary EUI64 source */ + struct in6_aliasreq *ifra; { static size_t if_indexlim = 8; struct in6_ifaddr *ia; struct in6_addr in6; - u_long dl_tag; - - switch (ifp->if_type) { -#if IFT_BRIDGE /*OpenBSD 2.8*/ - /* some of the interfaces are inherently not IPv6 capable */ - case IFT_BRIDGE: - return; -#endif -#ifdef __APPLE__ - case IFT_ETHER: - dl_tag = ether_attach_inet6(ifp); - break; - case IFT_LOOP: - dl_tag = lo_attach_inet6(ifp); -#if NGIF > 0 - case IFT_GIF: - dl_tag = gif_attach_proto_family(ifp, PF_INET6); - break; -#endif -#if NSTF > 0 - case IFT_STF: - dl_tag = stf_attach_inet6(ifp); - break; -#endif -#endif - } /* * We have some arrays that should be indexed by if_index. @@ -810,6 +791,9 @@ in6_ifattach(ifp, altifp) icmp6_ifstatmax = if_indexlim; } + /* initialize NDP variables */ + nd6_ifattach(ifp); + /* initialize scope identifiers */ scope6_ifattach(ifp); @@ -820,8 +804,10 @@ in6_ifattach(ifp, altifp) #if IFT_STF case IFT_STF: /* - * 6to4 interface is a very speical kind of beast. - * no multicast, no linklocal (based on 03 draft). + * 6to4 interface is a very special kind of beast. + * no multicast, no linklocal. RFC2529 specifies how to make + * linklocals for 6to4 interface, but there's no use and + * it is rather harmful to have one. */ goto statinit; #endif @@ -839,9 +825,6 @@ in6_ifattach(ifp, altifp) return; } - /* initialize NDP variables */ - nd6_ifattach(ifp); - /* * assign loopback address for loopback interface. * XXX multiple loopback interface case. @@ -860,9 +843,12 @@ in6_ifattach(ifp, altifp) if (ip6_auto_linklocal) { ia = in6ifa_ifpforlinklocal(ifp, 0); if (ia == NULL) { - if (in6_ifattach_linklocal(ifp, altifp) == 0) { + if (in6_ifattach_linklocal(ifp, altifp, ifra) == 0) { /* linklocal address assigned */ } else { + log(LOG_INFO, "in6_ifattach: " + "%s failed to attach a linklocal address.\n", + if_name(ifp)); /* failed to assign linklocal address. bark? */ } } @@ -886,7 +872,6 @@ statinit: _MALLOC(sizeof(struct icmp6_ifstat), M_IFADDR, M_WAITOK); bzero(icmp6_ifstat[ifp->if_index], sizeof(struct icmp6_ifstat)); } - } /* @@ -969,6 +954,12 @@ in6_ifdetach(ifp) IFAFREE(&oia->ia_ifa); } +#ifndef __APPLE__ + +/* This is a cause for reentrency, as those multicast addresses are + * freed both from the interface detaching and triggered by the closing of the socket + * Let the socket do the cleanup and not force it from the interface level + */ /* leave from all multicast groups joined */ in6_pcbpurgeif0(LIST_FIRST(udbinfo.listhead), ifp); in6_pcbpurgeif0(LIST_FIRST(ripcbinfo.listhead), ifp); @@ -979,6 +970,7 @@ in6_ifdetach(ifp) in6_delmulti(in6m); in6m = NULL; } +#endif /* __APPLE__ */ /* * remove neighbor management table. we call it twice just to make diff --git a/bsd/netinet6/in6_ifattach.h b/bsd/netinet6/in6_ifattach.h index de4a6fea4..307bd0b8f 100644 --- a/bsd/netinet6/in6_ifattach.h +++ b/bsd/netinet6/in6_ifattach.h @@ -37,7 +37,7 @@ #ifdef __APPLE_API_PRIVATE void in6_nigroup_attach __P((const char *, int)); void in6_nigroup_detach __P((const char *, int)); -void in6_ifattach __P((struct ifnet *, struct ifnet *)); +void in6_ifattach __P((struct ifnet *, struct ifnet *, struct in6_aliasreq *)); void in6_ifdetach __P((struct ifnet *)); void in6_get_tmpifid __P((struct ifnet *, u_int8_t *, const u_int8_t *, int)); void in6_tmpaddrtimer __P((void *)); diff --git a/bsd/netinet6/in6_pcb.c b/bsd/netinet6/in6_pcb.c index e2e04cd23..709196f22 100644 --- a/bsd/netinet6/in6_pcb.c +++ b/bsd/netinet6/in6_pcb.c @@ -342,7 +342,8 @@ in6_pcbconnect(inp, nam, p) int error; /* - * Call inner routine, to assign local interface address. + * Call inner routine, to assign local interface address. + * in6_pcbladdr() may automatically fill in sin6_scope_id. */ if ((error = in6_pcbladdr(inp, nam, &addr6)) != 0) return(error); @@ -618,12 +619,56 @@ in6_pcbdetach(inp) /* Check and free IPv4 related resources in case of mapped addr */ if (inp->inp_options) (void)m_free(inp->inp_options); - ip_freemoptions(inp->inp_moptions); + ip_freemoptions(inp->inp_moptions); inp->inp_vflag = 0; zfree(ipi->ipi_zone, inp); } +struct sockaddr * +in6_sockaddr(port, addr_p) + in_port_t port; + struct in6_addr *addr_p; +{ + struct sockaddr_in6 *sin6; + + MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, M_SONAME, M_WAITOK); + bzero(sin6, sizeof *sin6); + sin6->sin6_family = AF_INET6; + sin6->sin6_len = sizeof(*sin6); + sin6->sin6_port = port; + sin6->sin6_addr = *addr_p; + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); + else + sin6->sin6_scope_id = 0; /*XXX*/ + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + sin6->sin6_addr.s6_addr16[1] = 0; + + return (struct sockaddr *)sin6; +} + +struct sockaddr * +in6_v4mapsin6_sockaddr(port, addr_p) + in_port_t port; + struct in_addr *addr_p; +{ + struct sockaddr_in sin; + struct sockaddr_in6 *sin6_p; + + bzero(&sin, sizeof sin); + sin.sin_family = AF_INET; + sin.sin_len = sizeof(sin); + sin.sin_port = port; + sin.sin_addr = *addr_p; + + MALLOC(sin6_p, struct sockaddr_in6 *, sizeof *sin6_p, M_SONAME, + M_WAITOK); + in6_sin_2_v4mapsin6(&sin, sin6_p); + + return (struct sockaddr *)sin6_p; +} + /* * The calling convention of in6_setsockaddr() and in6_setpeeraddr() was * modified to match the pru_sockaddr() and pru_peeraddr() entry points @@ -641,34 +686,20 @@ in6_setsockaddr(so, nam) { int s; register struct inpcb *inp; - register struct sockaddr_in6 *sin6; - - /* - * Do the malloc first in case it blocks. - */ - MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, M_SONAME, M_WAITOK); - bzero(sin6, sizeof *sin6); - sin6->sin6_family = AF_INET6; - sin6->sin6_len = sizeof(*sin6); + struct in6_addr addr; + in_port_t port; s = splnet(); inp = sotoinpcb(so); if (!inp) { splx(s); - _FREE(sin6, M_SONAME); return EINVAL; } - sin6->sin6_port = inp->inp_lport; - sin6->sin6_addr = inp->in6p_laddr; + port = inp->inp_lport; + addr = inp->in6p_laddr; splx(s); - if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) - sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); - else - sin6->sin6_scope_id = 0; /*XXX*/ - if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) - sin6->sin6_addr.s6_addr16[1] = 0; - *nam = (struct sockaddr *)sin6; + *nam = in6_sockaddr(port, &addr); return 0; } @@ -679,34 +710,20 @@ in6_setpeeraddr(so, nam) { int s; struct inpcb *inp; - register struct sockaddr_in6 *sin6; - - /* - * Do the malloc first in case it blocks. - */ - MALLOC(sin6, struct sockaddr_in6 *, sizeof(*sin6), M_SONAME, M_WAITOK); - bzero((caddr_t)sin6, sizeof (*sin6)); - sin6->sin6_family = AF_INET6; - sin6->sin6_len = sizeof(struct sockaddr_in6); + struct in6_addr addr; + in_port_t port; s = splnet(); inp = sotoinpcb(so); if (!inp) { splx(s); - _FREE(sin6, M_SONAME); return EINVAL; } - sin6->sin6_port = inp->inp_fport; - sin6->sin6_addr = inp->in6p_faddr; + port = inp->inp_fport; + addr = inp->in6p_faddr; splx(s); - if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) - sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); - else - sin6->sin6_scope_id = 0; /*XXX*/ - if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) - sin6->sin6_addr.s6_addr16[1] = 0; - *nam = (struct sockaddr *)sin6; + *nam = in6_sockaddr(port, &addr); return 0; } @@ -723,6 +740,7 @@ in6_mapped_sockaddr(struct socket *so, struct sockaddr **nam) if (error == 0) in6_sin_2_v4mapsin6_in_sock(nam); } else + /* scope issues will be handled in in6_setsockaddr(). */ error = in6_setsockaddr(so, nam); return error; @@ -741,6 +759,7 @@ in6_mapped_peeraddr(struct socket *so, struct sockaddr **nam) if (error == 0) in6_sin_2_v4mapsin6_in_sock(nam); } else + /* scope issues will be handled in in6_setpeeraddr(). */ error = in6_setpeeraddr(so, nam); return error; @@ -760,9 +779,11 @@ in6_mapped_peeraddr(struct socket *so, struct sockaddr **nam) void in6_pcbnotify(head, dst, fport_arg, src, lport_arg, cmd, notify) struct inpcbhead *head; - struct sockaddr *dst, *src; + struct sockaddr *dst; + const struct sockaddr *src; u_int fport_arg, lport_arg; int cmd; +// struct inpcb *(*notify) __P((struct inpcb *, int)); void (*notify) __P((struct inpcb *, int)); { struct inpcb *inp, *ninp; @@ -781,7 +802,7 @@ in6_pcbnotify(head, dst, fport_arg, src, lport_arg, cmd, notify) /* * note that src can be NULL when we get notify by local fragmentation. */ - sa6_src = (src == NULL) ? sa6_any : *(struct sockaddr_in6 *)src; + sa6_src = (src == NULL) ? sa6_any : *(const struct sockaddr_in6 *)src; flowinfo = sa6_src.sin6_flowinfo; /* diff --git a/bsd/netinet6/in6_pcb.h b/bsd/netinet6/in6_pcb.h index a3dbf5ba6..b0ebf339e 100644 --- a/bsd/netinet6/in6_pcb.h +++ b/bsd/netinet6/in6_pcb.h @@ -90,9 +90,14 @@ struct inpcb * struct in6_addr *, u_int, struct in6_addr *, u_int, int, struct ifnet *)); void in6_pcbnotify __P((struct inpcbhead *, struct sockaddr *, - u_int, struct sockaddr *, u_int, int, + u_int, const struct sockaddr *, u_int, int, void (*)(struct inpcb *, int))); -void in6_rtchange __P((struct inpcb *, int)); +void + in6_rtchange __P((struct inpcb *, int)); +struct sockaddr * + in6_sockaddr __P((in_port_t port, struct in6_addr *addr_p)); +struct sockaddr * + in6_v4mapsin6_sockaddr __P((in_port_t port, struct in_addr *addr_p)); int in6_setpeeraddr __P((struct socket *so, struct sockaddr **nam)); int in6_setsockaddr __P((struct socket *so, struct sockaddr **nam)); int in6_mapped_sockaddr __P((struct socket *so, struct sockaddr **nam)); diff --git a/bsd/netinet6/in6_proto.c b/bsd/netinet6/in6_proto.c index 3fafd2426..d33c161ce 100644 --- a/bsd/netinet6/in6_proto.c +++ b/bsd/netinet6/in6_proto.c @@ -1,4 +1,4 @@ -/* $FreeBSD: src/sys/netinet6/in6_proto.c,v 1.6.2.7 2001/07/24 19:10:18 brooks Exp $ */ +/* $FreeBSD: src/sys/netinet6/in6_proto.c,v 1.19 2002/10/16 02:25:05 sam Exp $ */ /* $KAME: in6_proto.c,v 1.91 2001/05/27 13:28:35 itojun Exp $ */ /* @@ -141,6 +141,8 @@ extern struct domain inet6domain; extern int in6_inithead __P((void **, int)); void in6_dinit(void); +static int rip6_pr_output(struct mbuf *m, struct socket *so, struct sockaddr_in6 *, struct mbuf *); + struct ip6protosw inet6sw[] = { { 0, &inet6domain, IPPROTO_IPV6, 0, 0, 0, 0, 0, @@ -165,13 +167,13 @@ struct ip6protosw inet6sw[] = { 0, &tcp6_usrreqs, }, { SOCK_RAW, &inet6domain, IPPROTO_RAW, PR_ATOMIC|PR_ADDR, - rip6_input, rip6_output, rip6_ctlinput, rip6_ctloutput, + rip6_input, rip6_pr_output, rip6_ctlinput, rip6_ctloutput, 0, 0, 0, 0, 0, 0, &rip6_usrreqs }, { SOCK_RAW, &inet6domain, IPPROTO_ICMPV6, PR_ATOMIC|PR_ADDR|PR_LASTHDR, - icmp6_input, rip6_output, rip6_ctlinput, rip6_ctloutput, + icmp6_input, rip6_pr_output, rip6_ctlinput, rip6_ctloutput, 0, icmp6_init, icmp6_fasttimo, 0, 0, 0, &rip6_usrreqs @@ -220,27 +222,27 @@ struct ip6protosw inet6sw[] = { #endif /* IPSEC */ #if INET { SOCK_RAW, &inet6domain, IPPROTO_IPV4, PR_ATOMIC|PR_ADDR|PR_LASTHDR, - encap6_input, rip6_output, 0, rip6_ctloutput, + encap6_input, rip6_pr_output, 0, rip6_ctloutput, 0, encap_init, 0, 0, 0, 0, &rip6_usrreqs }, #endif /*INET*/ { SOCK_RAW, &inet6domain, IPPROTO_IPV6, PR_ATOMIC|PR_ADDR|PR_LASTHDR, - encap6_input, rip6_output, 0, rip6_ctloutput, + encap6_input, rip6_pr_output, 0, rip6_ctloutput, 0, encap_init, 0, 0, 0, 0, &rip6_usrreqs }, { SOCK_RAW, &inet6domain, IPPROTO_PIM, PR_ATOMIC|PR_ADDR|PR_LASTHDR, - pim6_input, rip6_output, 0, rip6_ctloutput, + pim6_input, rip6_pr_output, 0, rip6_ctloutput, 0, 0, 0, 0, 0, 0, &rip6_usrreqs }, /* raw wildcard */ { SOCK_RAW, &inet6domain, 0, PR_ATOMIC|PR_ADDR, - rip6_input, rip6_output, 0, rip6_ctloutput, + rip6_input, rip6_pr_output, 0, rip6_ctloutput, 0, 0, 0, 0, 0, 0, &rip6_usrreqs @@ -252,7 +254,7 @@ int in6_proto_count = (sizeof (inet6sw) / sizeof (struct ip6protosw)); struct domain inet6domain = { AF_INET6, "internet6", in6_dinit, 0, 0, - inet6sw, 0, + (struct protosw *)inet6sw, 0, in6_inithead, offsetof(struct sockaddr_in6, sin6_addr) << 3, sizeof(struct sockaddr_in6) , sizeof(struct sockaddr_in6), 0 }; @@ -278,6 +280,11 @@ in6_dinit() } } +int rip6_pr_output(struct mbuf *m, struct socket *so, struct sockaddr_in6 *sin6, struct mbuf *m1) +{ + panic("rip6_pr_output\n"); + return 0; +} /* * Internet configuration info @@ -298,8 +305,7 @@ int ip6_forwarding = IPV6FORWARDING; /* act as router? */ int ip6_sendredirects = IPV6_SENDREDIRECTS; int ip6_defhlim = IPV6_DEFHLIM; int ip6_defmcasthlim = IPV6_DEFAULT_MULTICAST_HOPS; -//int ip6_accept_rtadv = 0; /* "IPV6FORWARDING ? 0 : 1" is dangerous */ -int ip6_accept_rtadv = 1; /* "IPV6FORWARDING ? 0 : 1" is dangerous */ +int ip6_accept_rtadv = 0; /* "IPV6FORWARDING ? 0 : 1" is dangerous */ int ip6_maxfragpackets; /* initialized in frag6.c:frag6_init() */ int ip6_log_interval = 5; int ip6_hdrnestlimit = 50; /* appropriate? */ @@ -310,10 +316,7 @@ int ip6_gif_hlim = 0; int ip6_use_deprecated = 1; /* allow deprecated addr (RFC2462 5.5.4) */ int ip6_rr_prune = 5; /* router renumbering prefix * walk list every 5 sec. */ -int ip6_v6only = 0; -#ifdef __APPLE__ -int ip6_auto_on = 1; /* Start IPv6 per interface triggered by IPv4 address assignment */ -#endif +int ip6_v6only = 0; /* Mapped addresses on by default - Radar 3347718 */ u_int32_t ip6_id = 0UL; int ip6_keepfaith = 0; @@ -342,7 +345,7 @@ u_long rip6_recvspace = RIPV6RCVQ; int icmp6_rediraccept = 1; /* accept and process redirects */ int icmp6_redirtimeout = 10 * 60; /* 10 minutes */ int icmp6errppslim = 100; /* 100pps */ -int icmp6_nodeinfo = 1; /* enable/disable NI response */ +int icmp6_nodeinfo = 3; /* enable/disable NI response */ /* UDP on IP6 parameters */ int udp6_sendspace = 9216; /* really max datagram size */ @@ -475,6 +478,3 @@ SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_MAXNUDHINT, SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_DEBUG, nd6_debug, CTLFLAG_RW, &nd6_debug, 0, ""); -#ifdef __APPLE__ -SYSCTL_INT(_net_inet6_ip6, OID_AUTO, auto_on, CTLFLAG_RW, &ip6_auto_on,0, ""); -#endif diff --git a/bsd/netinet6/in6_src.c b/bsd/netinet6/in6_src.c index 5207bae56..3f9c8e4ff 100644 --- a/bsd/netinet6/in6_src.c +++ b/bsd/netinet6/in6_src.c @@ -237,7 +237,10 @@ in6_selectsrc(dstsock, opts, mopts, ro, laddr, errorp) */ if (ro) { if (ro->ro_rt && - !IN6_ARE_ADDR_EQUAL(&satosin6(&ro->ro_dst)->sin6_addr, dst)) { + (!(ro->ro_rt->rt_flags & RTF_UP) || + satosin6(&ro->ro_dst)->sin6_family != AF_INET6 || + !IN6_ARE_ADDR_EQUAL(&satosin6(&ro->ro_dst)->sin6_addr, + dst))) { rtfree(ro->ro_rt); ro->ro_rt = (struct rtentry *)0; } diff --git a/bsd/netinet6/in6_var.h b/bsd/netinet6/in6_var.h index e33393b75..071ad9226 100644 --- a/bsd/netinet6/in6_var.h +++ b/bsd/netinet6/in6_var.h @@ -85,7 +85,7 @@ * hour rule for hosts). they should never be modified by nd6_timeout or * anywhere else. * userland -> kernel: accept pltime/vltime - * kernel -> userland: throuw up everything + * kernel -> userland: throw up everything * in kernel: modify preferred/expire only */ struct in6_addrlifetime { @@ -467,6 +467,20 @@ void in6_post_msg(struct ifnet *, u_long, struct in6_ifaddr *); #define SIOCGETMIFCNT_IN6 _IOWR('u', 107, \ struct sioc_mif_req6) /* get pkt cnt per if */ +#ifdef KERNEL_PRIVATE +/* + * temporary control calls to attach/detach IP to/from an ethernet interface + */ +#define SIOCPROTOATTACH_IN6 _IOWR('i', 110, struct in6_aliasreq) /* attach proto to interface */ +#define SIOCPROTODETACH_IN6 _IOWR('i', 111, struct in6_ifreq) /* detach proto from interface */ + +#define SIOCLL_START _IOWR('i', 130, struct in6_aliasreq) /* start aquiring linklocal on interface */ +#define SIOCLL_STOP _IOWR('i', 131, struct in6_ifreq) /* deconfigure linklocal from interface */ +#define SIOCAUTOCONF_START _IOWR('i', 132, struct in6_ifreq) /* accept rtadvd on this interface */ +#define SIOCAUTOCONF_STOP _IOWR('i', 133, struct in6_ifreq) /* stop accepting rtadv for this interface */ +#endif KERNEL_PRIVATE + + #define IN6_IFF_ANYCAST 0x01 /* anycast address */ #define IN6_IFF_TENTATIVE 0x02 /* tentative address */ #define IN6_IFF_DUPLICATED 0x04 /* DAD detected duplicate */ @@ -564,7 +578,7 @@ extern LIST_HEAD(in6_multihead, in6_multi) in6_multihead; /* * Structure used by macros below to remember position when stepping through - * all of eht in6_multi records. + * all of the in6_multi records. */ struct in6_multistep { struct in6_ifaddr *i_ia; diff --git a/bsd/netinet6/ip6_forward.c b/bsd/netinet6/ip6_forward.c index 18cde6fcc..d87abb122 100644 --- a/bsd/netinet6/ip6_forward.c +++ b/bsd/netinet6/ip6_forward.c @@ -1,4 +1,4 @@ -/* $FreeBSD: src/sys/netinet6/ip6_forward.c,v 1.4.2.4 2001/07/03 11:01:53 ume Exp $ */ +/* $FreeBSD: src/sys/netinet6/ip6_forward.c,v 1.16 2002/10/16 02:25:05 sam Exp $ */ /* $KAME: ip6_forward.c,v 1.69 2001/05/17 03:48:30 itojun Exp $ */ /* @@ -268,7 +268,7 @@ ip6_forward(m, srcrt) break; default: printf("ip6_output (ipsec): error code %d\n", error); - /*fall through*/ + /* fall through */ case ENOENT: /* don't show these error codes to the user */ break; @@ -344,7 +344,7 @@ ip6_forward(m, srcrt) * for the reason that the destination is beyond the scope of the * source address, discard the packet and return an icmp6 destination * unreachable error with Code 2 (beyond scope of source address). - * [draft-ietf-ipngwg-icmp-v3-00.txt, Section 3.1] + * [draft-ietf-ipngwg-icmp-v3-02.txt, Section 3.1] */ if (in6_addr2scopeid(m->m_pkthdr.rcvif, &ip6->ip6_src) != in6_addr2scopeid(rt->rt_ifp, &ip6->ip6_src)) { @@ -380,7 +380,7 @@ ip6_forward(m, srcrt) #endif mtu = rt->rt_ifp->if_mtu; -#if IPSEC_IPV6FWD +#if IPSEC /* * When we do IPsec tunnel ingress, we need to play * with if_mtu value (decrement IPsec header size @@ -482,11 +482,11 @@ ip6_forward(m, srcrt) #endif { printf("ip6_forward: outgoing interface is loopback. " - "src %s, dst %s, nxt %d, rcvif %s, outif %s\n", - ip6_sprintf(&ip6->ip6_src), - ip6_sprintf(&ip6->ip6_dst), - ip6->ip6_nxt, if_name(m->m_pkthdr.rcvif), - if_name(rt->rt_ifp)); + "src %s, dst %s, nxt %d, rcvif %s, outif %s\n", + ip6_sprintf(&ip6->ip6_src), + ip6_sprintf(&ip6->ip6_dst), + ip6->ip6_nxt, if_name(m->m_pkthdr.rcvif), + if_name(rt->rt_ifp)); } /* we can just use rcvif in forwarding. */ diff --git a/bsd/netinet6/ip6_fw.h b/bsd/netinet6/ip6_fw.h index eb41437f4..d2acdf05d 100644 --- a/bsd/netinet6/ip6_fw.h +++ b/bsd/netinet6/ip6_fw.h @@ -42,9 +42,8 @@ #include #include -#ifdef __APPLE_API_PRIVATE -#define IP6_FW_CURRENT_API_VERSION 20 /* Version of this API */ +#define IPV6_FW_CURRENT_API_VERSION 20 /* Version of this API */ /* @@ -212,6 +211,7 @@ struct ip6_fw_chain { * Main firewall chains definitions and global var's definitions. */ #ifdef KERNEL +#ifdef __APPLE_API_PRIVATE #define M_IP6FW M_IPFW @@ -223,14 +223,15 @@ void ip6_fw_init(void); /* Firewall hooks */ struct ip6_hdr; +struct sockopt; typedef int ip6_fw_chk_t __P((struct ip6_hdr**, struct ifnet*, u_short *, struct mbuf**)); -typedef int ip6_fw_ctl_t __P((int, struct mbuf**)); +typedef int ip6_fw_ctl_t __P((struct sockopt *)); extern ip6_fw_chk_t *ip6_fw_chk_ptr; extern ip6_fw_ctl_t *ip6_fw_ctl_ptr; extern int ip6_fw_enable; -#endif /* KERNEL */ #endif /* __APPLE_API_PRIVATE */ +#endif /* KERNEL */ #endif /* _IP6_FW_H */ diff --git a/bsd/netinet6/ip6_input.c b/bsd/netinet6/ip6_input.c index 2d87b6f5c..2dc986a66 100644 --- a/bsd/netinet6/ip6_input.c +++ b/bsd/netinet6/ip6_input.c @@ -199,6 +199,7 @@ ip6_init() #endif nd6_init(); frag6_init(); + icmp6_init(); /* * in many cases, random() here does NOT return random number * as initialization during bootstrap time occur in fixed order. @@ -207,7 +208,7 @@ ip6_init() ip6_flow_seq = random() ^ tv.tv_usec; microtime(&tv); ip6_desync_factor = (random() ^ tv.tv_usec) % MAX_TEMP_DESYNC_FACTOR; - timeout(ip6_init2, (caddr_t)0, 1 * hz); + timeout(ip6_init2, (caddr_t)0, 2 * hz); } static void @@ -222,7 +223,7 @@ ip6_init2(dummy) * to route local address of p2p link to loopback, * assign loopback address first. */ - in6_ifattach(&loif[0], NULL); + in6_ifattach(&loif[0], NULL, NULL); #ifdef __APPLE__ /* nd6_timer_init */ @@ -323,7 +324,7 @@ ip6_input(m) ip6_delaux(m); /* - * mbuf statistics by kazu + * mbuf statistics */ if (m->m_flags & M_EXT) { if (m->m_next) @@ -334,7 +335,7 @@ ip6_input(m) #define M2MMAX (sizeof(ip6stat.ip6s_m2m)/sizeof(ip6stat.ip6s_m2m[0])) if (m->m_next) { if (m->m_flags & M_LOOP) { - ip6stat.ip6s_m2m[loif[0].if_index]++; /*XXX*/ + ip6stat.ip6s_m2m[loif[0].if_index]++; /* XXX */ } else if (m->m_pkthdr.rcvif->if_index < M2MMAX) ip6stat.ip6s_m2m[m->m_pkthdr.rcvif->if_index]++; else @@ -366,7 +367,7 @@ ip6_input(m) n = NULL; } } - if (!n) { + if (n == NULL) { m_freem(m); return; /*ENOBUFS*/ } @@ -433,6 +434,7 @@ ip6_input(m) in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr); goto bad; } + /* * The following check is not documented in specs. A malicious * party may be able to use IPv4 mapped addr to confuse tcp/udp stack @@ -660,7 +662,7 @@ ip6_input(m) && ip6_forward_rt.ro_rt->rt_ifp->if_type == IFT_FAITH) { /* XXX do we need more sanity checks? */ ours = 1; - deliverifp = ip6_forward_rt.ro_rt->rt_ifp; /*faith*/ + deliverifp = ip6_forward_rt.ro_rt->rt_ifp; /* faith */ goto hbhcheck; } } @@ -718,7 +720,7 @@ ip6_input(m) ip6 = mtod(m, struct ip6_hdr *); /* - * if the payload length field is 0 and the next header field + * if the payload length field is 0 and the next header field * indicates Hop-by-Hop Options header, then a Jumbo Payload * option MUST be included. */ @@ -1652,8 +1654,10 @@ ip6_addaux(m) } } else { n = m_aux_add(m, AF_INET6, -1); - n->m_len = sizeof(struct ip6aux); - bzero(mtod(n, caddr_t), n->m_len); + if (n) { + n->m_len = sizeof(struct ip6aux); + bzero(mtod(n, caddr_t), n->m_len); + } } return n; } diff --git a/bsd/netinet6/ip6_mroute.c b/bsd/netinet6/ip6_mroute.c index e4b060f9e..b8633a43b 100644 --- a/bsd/netinet6/ip6_mroute.c +++ b/bsd/netinet6/ip6_mroute.c @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/netinet6/ip6_mroute.c,v 1.2.2.4 2001/07/03 11:01:53 ume Exp $ */ -/* $KAME: ip6_mroute.c,v 1.46 2001/04/04 05:17:30 itojun Exp $ */ +/* $FreeBSD: src/sys/netinet6/ip6_mroute.c,v 1.16.2.1 2002/12/18 21:39:40 suz Exp $ */ +/* $KAME: ip6_mroute.c,v 1.58 2001/12/18 02:36:31 itojun Exp $ */ /* * Copyright (C) 1998 WIDE Project. @@ -100,7 +100,7 @@ struct mrt6stat mrt6stat; #define RTE_FOUND 0x2 struct mf6c *mf6ctable[MF6CTBLSIZ]; -u_char nexpire[MF6CTBLSIZ]; +u_char n6expire[MF6CTBLSIZ]; static struct mif6 mif6table[MAXMIFS]; #if MRT6DEBUG u_int mrt6debug = 0; /* debug level */ @@ -144,11 +144,6 @@ static mifi_t nummifs = 0; static mifi_t reg_mif_num = (mifi_t)-1; static struct pim6stat pim6stat; - -/* - * one-back cache used by ipip_input to locate a tunnel's mif - * given a datagram's src ip address. - */ static int pim6; /* @@ -414,7 +409,7 @@ ip6_mrouter_init(so, m, cmd) ip6_mrouter_ver = cmd; bzero((caddr_t)mf6ctable, sizeof(mf6ctable)); - bzero((caddr_t)nexpire, sizeof(nexpire)); + bzero((caddr_t)n6expire, sizeof(n6expire)); pim6 = 0;/* used for stubbing out/in pim stuff */ @@ -685,7 +680,8 @@ add_m6fc(mfccp) if (rt) { #if MRT6DEBUG if (mrt6debug & DEBUG_MFC) - log(LOG_DEBUG,"add_m6fc update o %s g %s p %x\n", + log(LOG_DEBUG, + "add_m6fc no upcall h %d o %s g %s p %x\n", ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), mfccp->mf6cc_parent); @@ -738,7 +734,7 @@ add_m6fc(mfccp) rt->mf6c_wrong_if = 0; rt->mf6c_expire = 0; /* Don't clean this guy up */ - nexpire[hash]--; + n6expire[hash]--; /* free packets Qed at the end of this entry */ for (rte = rt->mf6c_stall; rte != NULL; ) { @@ -785,7 +781,7 @@ add_m6fc(mfccp) rt->mf6c_wrong_if = 0; if (rt->mf6c_expire) - nexpire[hash]--; + n6expire[hash]--; rt->mf6c_expire = 0; } } @@ -1149,7 +1145,7 @@ ip6_mforward(ip6, ifp, m) rt->mf6c_mcastgrp.sin6_len = sizeof(struct sockaddr_in6); rt->mf6c_mcastgrp.sin6_addr = ip6->ip6_dst; rt->mf6c_expire = UPCALL_EXPIRE; - nexpire[hash]++; + n6expire[hash]++; rt->mf6c_parent = MF6C_INCOMPLETE_PARENT; /* link into table */ @@ -1217,7 +1213,7 @@ expire_upcalls(unused) s = splnet(); for (i = 0; i < MF6CTBLSIZ; i++) { - if (nexpire[i] == 0) + if (n6expire[i] == 0) continue; nptr = &mf6ctable[i]; while ((mfc = *nptr) != NULL) { @@ -1247,7 +1243,7 @@ expire_upcalls(unused) rte = n; } while (rte != NULL); mrt6stat.mrt6s_cache_cleanups++; - nexpire[i]--; + n6expire[i]--; *nptr = mfc->mf6c_next; FREE(mfc, M_MRTABLE); @@ -1283,7 +1279,7 @@ ip6_mdq(m, ifp, rt) /* * Macro to send packet on mif. Since RSVP packets don't get counted on * input, they shouldn't get counted on output, so statistics keeping is - * seperate. + * separate. */ #define MC6_SEND(ip6, mifp, m) do { \ @@ -1510,7 +1506,7 @@ phyint_send(ip6, mifp, m) * Put the packet into the sending queue of the outgoing interface * if it would fit in the MTU of the interface. */ - if (mb_copy->m_pkthdr.len < ifp->if_mtu || ifp->if_mtu < IPV6_MMTU) { + if (mb_copy->m_pkthdr.len <= ifp->if_mtu || ifp->if_mtu < IPV6_MMTU) { dst6->sin6_len = sizeof(struct sockaddr_in6); dst6->sin6_family = AF_INET6; dst6->sin6_addr = ip6->ip6_dst; @@ -1617,7 +1613,7 @@ register_send(ip6, mif, m) #if MRT6DEBUG if (mrt6debug) log(LOG_WARNING, - "register_send: ip_mrouter socket queue full\n"); + "register_send: ip6_mrouter socket queue full\n"); #endif ++mrt6stat.mrt6s_upq_sockfull; return ENOBUFS; diff --git a/bsd/netinet6/ip6_output.c b/bsd/netinet6/ip6_output.c index a079aa4b0..3d5eebd78 100644 --- a/bsd/netinet6/ip6_output.c +++ b/bsd/netinet6/ip6_output.c @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/netinet6/ip6_output.c,v 1.13.2.10 2001/07/15 18:18:34 ume Exp $ */ -/* $KAME: ip6_output.c,v 1.180 2001/05/21 05:37:50 jinmei Exp $ */ +/* $FreeBSD: src/sys/netinet6/ip6_output.c,v 1.43 2002/10/31 19:45:48 ume Exp $ */ +/* $KAME: ip6_output.c,v 1.279 2002/01/26 06:12:30 jinmei Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. @@ -82,6 +82,7 @@ #include #include +#include #include #include #include @@ -108,6 +109,8 @@ static MALLOC_DEFINE(M_IPMOPTS, "ip6_moptions", "internet multicast options"); static u_long lo_dl_tag = 0; +extern u_long route_generation; + struct ip6_exthdrs { struct mbuf *ip6e_ip6; @@ -119,7 +122,7 @@ struct ip6_exthdrs { static int ip6_pcbopts __P((struct ip6_pktopts **, struct mbuf *, struct socket *, struct sockopt *sopt)); -static int ip6_setmoptions __P((int, struct ip6_moptions **, struct mbuf *)); +static int ip6_setmoptions __P((int, struct inpcb *, struct mbuf *)); static int ip6_getmoptions __P((int, struct ip6_moptions *, struct mbuf **)); static int ip6_copyexthdr __P((struct mbuf **, caddr_t, int)); static int ip6_insertfraghdr __P((struct mbuf *, struct mbuf *, int, @@ -127,6 +130,10 @@ static int ip6_insertfraghdr __P((struct mbuf *, struct mbuf *, int, static int ip6_insert_jumboopt __P((struct ip6_exthdrs *, u_int32_t)); static int ip6_splithdr __P((struct mbuf *, struct ip6_exthdrs *)); +extern int ip_createmoptions(struct ip_moptions **imop); +extern int ip_addmembership(struct ip_moptions *imo, struct ip_mreq *mreq); +extern int ip_dropmembership(struct ip_moptions *imo, struct ip_mreq *mreq); + /* * IP6 output. The packet in mbuf chain m contains a skeletal IP6 * header (with pri, len, nxt, hlim, src, dst). @@ -314,7 +321,8 @@ ip6_output(m0, opt, ro, flags, im6o, ifpp) /* * we treat dest2 specially. this makes IPsec processing - * much easier. + * much easier. the goal here is to make mprev point the + * mbuf prior to dest2. * * result: IPv6 dest2 payload * m and mprev will point to IPv6 header. @@ -392,7 +400,7 @@ ip6_output(m0, opt, ro, flags, im6o, ifpp) break; default: printf("ip6_output (ipsec): error code %d\n", error); - /*fall through*/ + /* fall through */ case ENOENT: /* don't show these error codes to the user */ error = 0; @@ -468,7 +476,9 @@ skip_ipsec2:; * and is still up. If not, free it and try again. */ if (ro->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 || - !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst))) { + dst->sin6_family != AF_INET6 || + !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst) || + ro->ro_rt->generation_id != route_generation)) { rtfree(ro->ro_rt); ro->ro_rt = (struct rtentry *)0; } @@ -521,7 +531,7 @@ skip_ipsec2:; break; default: printf("ip6_output (ipsec): error code %d\n", error); - /*fall through*/ + /* fall through */ case ENOENT: /* don't show these error codes to the user */ error = 0; @@ -532,7 +542,7 @@ skip_ipsec2:; exthdrs.ip6e_ip6 = m; } -#endif /*IPSEC*/ +#endif /* IPSEC */ if (!IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { /* Unicast */ @@ -785,9 +795,8 @@ skip_ipsec2:; * We eventually have sockaddr_in6 and use the sin6_scope_id * field of the structure here. * We rely on the consistency between two scope zone ids - * of source add destination, which should already be assured - * larger scopes than link will be supported in the near - * future. + * of source and destination, which should already be assured. + * Larger scopes than link will be supported in the future. */ origifp = NULL; if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) @@ -825,7 +834,7 @@ skip_ipsec2:; */ if (ip6_fw_enable && ip6_fw_chk_ptr) { u_short port = 0; - m->m_pkthdr.rcvif = NULL; /*XXX*/ + m->m_pkthdr.rcvif = NULL; /* XXX */ /* If ipfw says divert, we have to just drop packet */ if ((*ip6_fw_chk_ptr)(&ip6, ifp, &port, &m)) { m_freem(m); @@ -963,7 +972,8 @@ skip_ipsec2:; /* * Loop through length of segment after first fragment, - * make new header and copy data of each part and link onto chain. + * make new header and copy data of each part and link onto + * chain. */ m0 = m; for (off = hlen; off < tlen; off += len) { @@ -1240,6 +1250,8 @@ ip6_insertfraghdr(m0, m, hlen, frghdrp) return(0); } +extern int load_ipfw(); + /* * IP6 socket option processing. */ @@ -1255,14 +1267,14 @@ ip6_ctloutput(so, sopt) int optlen; struct proc *p; - if (sopt) { + if (sopt == NULL) + panic("ip6_ctloutput: arg soopt is NULL"); + else { level = sopt->sopt_level; op = sopt->sopt_dir; optname = sopt->sopt_name; optlen = sopt->sopt_valsize; p = sopt->sopt_p; - } else { - panic("ip6_ctloutput: arg soopt is NULL"); } error = optval = 0; @@ -1358,12 +1370,11 @@ do { \ error = EINVAL; break; } - /* - * XXX: BINDV6ONLY should be integrated - * into V6ONLY. - */ - OPTSET(IN6P_BINDV6ONLY); OPTSET(IN6P_IPV6_V6ONLY); + if (optval) + in6p->in6p_vflag &= ~INP_IPV4; + else + in6p->in6p_vflag |= INP_IPV4; break; } break; @@ -1430,9 +1441,7 @@ do { \ m->m_len = sopt->sopt_valsize; error = sooptcopyin(sopt, mtod(m, char *), m->m_len, m->m_len); - error = ip6_setmoptions(sopt->sopt_name, - &in6p->in6p_moptions, - m); + error = ip6_setmoptions(sopt->sopt_name, in6p, m); (void)m_free(m); } break; @@ -1474,7 +1483,7 @@ do { \ if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */ break; - if (error = soopt_mcopyin(sopt, m)) /* XXX */ + if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */ break; if (m) { req = mtod(m, caddr_t); @@ -1491,21 +1500,12 @@ do { \ case IPV6_FW_DEL: case IPV6_FW_FLUSH: case IPV6_FW_ZERO: - { - struct mbuf *m; - struct mbuf **mp = &m; - - if (ip6_fw_ctl_ptr == NULL) + { + if (ip6_fw_ctl_ptr == NULL && load_ipfw() != 0) return EINVAL; - /* XXX */ - if ((error = soopt_getm(sopt, &m)) != 0) - break; - /* XXX */ - if ((error = soopt_mcopyin(sopt, m)) != 0) - break; - error = (*ip6_fw_ctl_ptr)(optname, mp); - m = *mp; - } + + error = (*ip6_fw_ctl_ptr)(sopt); + } break; default: @@ -1550,8 +1550,7 @@ do { \ break; case IPV6_V6ONLY: - /* XXX: see the setopt case. */ - optval = OPTBIT(IN6P_BINDV6ONLY); + optval = OPTBIT(IN6P_IPV6_V6ONLY); break; case IPV6_PORTRANGE: @@ -1649,20 +1648,12 @@ do { \ #endif /* KAME IPSEC */ case IPV6_FW_GET: - { - struct mbuf *m; - struct mbuf **mp = &m; - - if (ip6_fw_ctl_ptr == NULL) - { + { + if (ip6_fw_ctl_ptr == NULL && load_ipfw() != 0) return EINVAL; + + error = (*ip6_fw_ctl_ptr)(sopt); } - error = (*ip6_fw_ctl_ptr)(optname, mp); - if (error == 0) - error = soopt_mcopyout(sopt, m); /* XXX */ - if (error == 0 && m) - m_freem(m); - } break; default: @@ -1708,7 +1699,8 @@ ip6_pcbopts(pktopt, m, so, sopt) if (!m || m->m_len == 0) { /* - * Only turning off any previous options. + * Only turning off any previous options, regardless of + * whether the opt is just created or given. */ if (opt) FREE(opt, M_IP6OPT); @@ -1720,6 +1712,7 @@ ip6_pcbopts(pktopt, m, so, sopt) priv = 1; if ((error = ip6_setpktoptions(m, opt, priv, 1)) != 0) { ip6_clearpktopts(opt, 1, -1); /* XXX: discard all options */ + FREE(opt, M_IP6OPT); return(error); } *pktopt = opt; @@ -1811,7 +1804,7 @@ ip6_copypktopts(src, canwait) dst = _MALLOC(sizeof(*dst), M_IP6OPT, canwait); if (dst == NULL && canwait == M_NOWAIT) - goto bad; + return (NULL); bzero(dst, sizeof(*dst)); dst->ip6po_hlim = src->ip6po_hlim; @@ -1837,13 +1830,13 @@ ip6_copypktopts(src, canwait) return(dst); bad: - printf("ip6_copypktopts: copy failed"); if (dst->ip6po_pktinfo) FREE(dst->ip6po_pktinfo, M_IP6OPT); if (dst->ip6po_nexthop) FREE(dst->ip6po_nexthop, M_IP6OPT); if (dst->ip6po_hbh) FREE(dst->ip6po_hbh, M_IP6OPT); if (dst->ip6po_dest1) FREE(dst->ip6po_dest1, M_IP6OPT); if (dst->ip6po_dest2) FREE(dst->ip6po_dest2, M_IP6OPT); if (dst->ip6po_rthdr) FREE(dst->ip6po_rthdr, M_IP6OPT); + FREE(dst, M_IP6OPT); return(NULL); } #undef PKTOPT_EXTHDRCPY @@ -1864,16 +1857,18 @@ ip6_freepcbopts(pktopt) * Set the IP6 multicast options in response to user setsockopt(). */ static int -ip6_setmoptions(optname, im6op, m) +ip6_setmoptions(optname, in6p, m) int optname; - struct ip6_moptions **im6op; + struct inpcb* in6p; struct mbuf *m; { int error = 0; u_int loop, ifindex; struct ipv6_mreq *mreq; struct ifnet *ifp; + struct ip6_moptions **im6op = &in6p->in6p_moptions; struct ip6_moptions *im6o = *im6op; + struct ip_moptions *imo; struct route_in6 ro; struct sockaddr_in6 *dst; struct in6_multi_mship *imm; @@ -1895,6 +1890,18 @@ ip6_setmoptions(optname, im6op, m) im6o->im6o_multicast_loop = IPV6_DEFAULT_MULTICAST_LOOP; LIST_INIT(&im6o->im6o_memberships); } + + if (in6p->inp_moptions == NULL) { + /* + * No IPv4 multicast option buffer attached to the pcb; + * call ip_createmoptions to allocate one and initialize + * to default values. + */ + error = ip_createmoptions(&in6p->inp_moptions); + if (error != 0) + return error; + } + imo = in6p->inp_moptions; switch (optname) { @@ -1917,6 +1924,7 @@ ip6_setmoptions(optname, im6op, m) break; } im6o->im6o_multicast_ifp = ifp; + imo->imo_multicast_ifp = ifp; break; case IPV6_MULTICAST_HOPS: @@ -1932,10 +1940,13 @@ ip6_setmoptions(optname, im6op, m) bcopy(mtod(m, u_int *), &optval, sizeof(optval)); if (optval < -1 || optval >= 256) error = EINVAL; - else if (optval == -1) + else if (optval == -1) { im6o->im6o_multicast_hlim = ip6_defmcasthlim; - else + imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; + } else { im6o->im6o_multicast_hlim = optval; + imo->imo_multicast_ttl = optval; + } break; } @@ -1954,6 +1965,7 @@ ip6_setmoptions(optname, im6op, m) break; } im6o->im6o_multicast_loop = loop; + imo->imo_multicast_loop = loop; break; case IPV6_JOIN_GROUP: @@ -1966,6 +1978,15 @@ ip6_setmoptions(optname, im6op, m) break; } mreq = mtod(m, struct ipv6_mreq *); + /* + * If the interface is specified, validate it. + */ + if (mreq->ipv6mr_interface < 0 + || if_index < mreq->ipv6mr_interface) { + error = ENXIO; /* XXX EINVAL? */ + break; + } + if (IN6_IS_ADDR_UNSPECIFIED(&mreq->ipv6mr_multiaddr)) { /* * We use the unspecified address to specify to accept @@ -1977,19 +1998,38 @@ ip6_setmoptions(optname, im6op, m) error = EACCES; break; } + } else if (IN6_IS_ADDR_V4MAPPED(&mreq->ipv6mr_multiaddr)) { + struct ip_mreq v4req; + + v4req.imr_multiaddr.s_addr = mreq->ipv6mr_multiaddr.s6_addr32[3]; + v4req.imr_interface.s_addr = INADDR_ANY; + + /* Find an IPv4 address on the specified interface. */ + if (mreq->ipv6mr_interface != 0) { + struct in_ifaddr *ifa; + + ifp = ifindex2ifnet[mreq->ipv6mr_interface]; + + TAILQ_FOREACH(ifa, &in_ifaddrhead, ia_link) { + if (ifa->ia_ifp == ifp) { + v4req.imr_interface = IA_SIN(ifa)->sin_addr; + break; + } + } + + if (v4req.imr_multiaddr.s_addr == 0) { + /* Interface has no IPv4 address. */ + error = EINVAL; + break; + } + } + + error = ip_addmembership(imo, &v4req); + break; } else if (!IN6_IS_ADDR_MULTICAST(&mreq->ipv6mr_multiaddr)) { error = EINVAL; break; } - - /* - * If the interface is specified, validate it. - */ - if (mreq->ipv6mr_interface < 0 - || if_index < mreq->ipv6mr_interface) { - error = ENXIO; /* XXX EINVAL? */ - break; - } /* * If no interface was explicitly specified, choose an * appropriate one according to the given multicast address. @@ -2078,15 +2118,6 @@ ip6_setmoptions(optname, im6op, m) break; } mreq = mtod(m, struct ipv6_mreq *); - if (IN6_IS_ADDR_UNSPECIFIED(&mreq->ipv6mr_multiaddr)) { - if (suser(p->p_ucred, &p->p_acflag)) { - error = EACCES; - break; - } - } else if (!IN6_IS_ADDR_MULTICAST(&mreq->ipv6mr_multiaddr)) { - error = EINVAL; - break; - } /* * If an interface address was specified, get a pointer * to its ifnet structure. @@ -2097,6 +2128,35 @@ ip6_setmoptions(optname, im6op, m) break; } ifp = ifindex2ifnet[mreq->ipv6mr_interface]; + + if (IN6_IS_ADDR_UNSPECIFIED(&mreq->ipv6mr_multiaddr)) { + if (suser(p->p_ucred, &p->p_acflag)) { + error = EACCES; + break; + } + } else if (IN6_IS_ADDR_V4MAPPED(&mreq->ipv6mr_multiaddr)) { + struct ip_mreq v4req; + + v4req.imr_multiaddr.s_addr = mreq->ipv6mr_multiaddr.s6_addr32[3]; + v4req.imr_interface.s_addr = INADDR_ANY; + + if (ifp != NULL) { + struct in_ifaddr *ifa; + + TAILQ_FOREACH(ifa, &in_ifaddrhead, ia_link) { + if (ifa->ia_ifp == ifp) { + v4req.imr_interface = IA_SIN(ifa)->sin_addr; + break; + } + } + } + + error = ip_dropmembership(imo, &v4req); + break; + } else if (!IN6_IS_ADDR_MULTICAST(&mreq->ipv6mr_multiaddr)) { + error = EINVAL; + break; + } /* * Put interface index into the multicast address, * if the address has link-local scope. @@ -2145,6 +2205,14 @@ ip6_setmoptions(optname, im6op, m) FREE(*im6op, M_IPMOPTS); *im6op = NULL; } + if (imo->imo_multicast_ifp == NULL && + imo->imo_multicast_vif == -1 && + imo->imo_multicast_ttl == IP_DEFAULT_MULTICAST_TTL && + imo->imo_multicast_loop == IP_DEFAULT_MULTICAST_LOOP && + imo->imo_num_memberships == 0) { + ip_freemoptions(imo); + in6p->inp_moptions = 0; + } return(error); } @@ -2491,12 +2559,13 @@ ip6_mloopback(ifp, m, dst) if (lo_dl_tag == 0) dlil_find_dltag(APPLE_IF_FAM_LOOPBACK, 0, PF_INET, &lo_dl_tag); - if (lo_dl_tag) - dlil_output(lo_dl_tag, copym, 0, (struct sockaddr *)&dst, 0); - else + if (lo_dl_tag) { + copym->m_pkthdr.rcvif = ifp; + dlil_output(lo_dl_tag, copym, 0, (struct sockaddr *)dst, 0); + } else m_free(copym); #else - (void)if_simloop(ifp, copym, dst->sin6_family, NULL); + (void)if_simloop(ifp, copym, dst->sin6_family, NULL); #endif } diff --git a/bsd/netinet6/ip6_var.h b/bsd/netinet6/ip6_var.h index 65a5047b7..254c8559f 100644 --- a/bsd/netinet6/ip6_var.h +++ b/bsd/netinet6/ip6_var.h @@ -334,10 +334,10 @@ void ip6_clearpktopts __P((struct ip6_pktopts *, int, int)); struct ip6_pktopts *ip6_copypktopts __P((struct ip6_pktopts *, int)); int ip6_optlen __P((struct inpcb *)); -int route6_input __P((struct mbuf **, int *, int)); +int route6_input __P((struct mbuf **, int *)); void frag6_init __P((void)); -int frag6_input __P((struct mbuf **, int *, int)); +int frag6_input __P((struct mbuf **, int *)); void frag6_slowtimo __P((void)); void frag6_drain __P((void)); @@ -349,8 +349,8 @@ int rip6_output __P((struct mbuf *, struct socket *, struct sockaddr_in6 *, stru int rip6_usrreq __P((struct socket *, int, struct mbuf *, struct mbuf *, struct mbuf *, struct proc *)); -int dest6_input __P((struct mbuf **, int *, int)); -int none_input __P((struct mbuf **, int *, int)); +int dest6_input __P((struct mbuf **, int *)); +int none_input __P((struct mbuf **, int *)); #endif /* KERNEL */ #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/netinet6/ip6protosw.h b/bsd/netinet6/ip6protosw.h index 3f8758f9d..beee88937 100644 --- a/bsd/netinet6/ip6protosw.h +++ b/bsd/netinet6/ip6protosw.h @@ -129,8 +129,7 @@ struct ip6protosw { int (*pr_output) __P((struct mbuf *m, struct socket *so, struct sockaddr_in6 *, struct mbuf *)); /* output to protocol (from above) */ - void (*pr_ctlinput)__P((int, struct sockaddr *, struct ip6_hdr *, - struct mbuf *, int)); + void (*pr_ctlinput)__P((int, struct sockaddr *, void *)); /* control input (from below) */ int (*pr_ctloutput)__P((struct socket *, struct sockopt *)); /* control output (from above) */ diff --git a/bsd/netinet6/ipsec.c b/bsd/netinet6/ipsec.c index 5c14febec..86f4639dc 100644 --- a/bsd/netinet6/ipsec.c +++ b/bsd/netinet6/ipsec.c @@ -104,6 +104,14 @@ int ipsec_debug = 1; int ipsec_debug = 0; #endif +#include +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) +#define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8)) +#define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8)) +#define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8)) + + struct ipsecstat ipsecstat; int ip4_ah_cleartos = 1; int ip4_ah_offsetmask = 0; /* maybe IP_DF? */ @@ -115,7 +123,9 @@ int ip4_ah_net_deflev = IPSEC_LEVEL_USE; struct secpolicy ip4_def_policy; int ip4_ipsec_ecn = 0; /* ECN ignore(-1)/forbidden(0)/allowed(1) */ int ip4_esp_randpad = -1; +int esp_udp_encap_port = 0; static int sysctl_def_policy SYSCTL_HANDLER_ARGS; +extern u_int32_t natt_now; SYSCTL_DECL(_net_inet_ipsec); #if INET6 @@ -151,6 +161,15 @@ SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD, int ipsec_bypass = 1; SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD, &ipsec_bypass,0, ""); +/* + * NAT Traversal requires a UDP port for encapsulation, + * esp_udp_encap_port controls which port is used. Racoon + * must set this port to the port racoon is using locally + * for nat traversal. + */ +SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port, + CTLFLAG_RW, &esp_udp_encap_port, 0, ""); + #if INET6 struct ipsecstat ipsec6stat; int ip6_esp_trans_deflev = IPSEC_LEVEL_USE; @@ -219,6 +238,7 @@ static int ipsec6_encapsulate __P((struct mbuf *, struct secasvar *)); static struct mbuf *ipsec_addaux __P((struct mbuf *)); static struct mbuf *ipsec_findaux __P((struct mbuf *)); static void ipsec_optaux __P((struct mbuf *, struct mbuf *)); +void ipsec_send_natt_keepalive(struct secasvar *sav); static int sysctl_def_policy SYSCTL_HANDLER_ARGS @@ -282,6 +302,8 @@ ipsec4_getpolicybysock(m, dir, so, error) return ipsec4_getpolicybyaddr(m, dir, 0, error); } + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0,0,0,0,0); + switch (so->so_proto->pr_domain->dom_family) { case AF_INET: /* set spidx in pcb */ @@ -296,8 +318,10 @@ ipsec4_getpolicybysock(m, dir, so, error) default: panic("ipsec4_getpolicybysock: unsupported address family\n"); } - if (*error) + if (*error) { + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1,*error,0,0,0); return NULL; + } /* sanity check */ if (pcbsp == NULL) @@ -324,6 +348,7 @@ ipsec4_getpolicybysock(m, dir, so, error) case IPSEC_POLICY_BYPASS: currsp->refcnt++; *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2,*error,0,0,0); return currsp; case IPSEC_POLICY_ENTRUST: @@ -336,6 +361,7 @@ ipsec4_getpolicybysock(m, dir, so, error) printf("DP ipsec4_getpolicybysock called " "to allocate SP:%p\n", kernsp)); *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3,*error,0,0,0); return kernsp; } @@ -349,17 +375,20 @@ ipsec4_getpolicybysock(m, dir, so, error) } ip4_def_policy.refcnt++; *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4,*error,0,0,0); return &ip4_def_policy; case IPSEC_POLICY_IPSEC: currsp->refcnt++; *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5,*error,0,0,0); return currsp; default: ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " "Invalid policy for PCB %d\n", currsp->policy)); *error = EINVAL; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6,*error,0,0,0); return NULL; } /* NOTREACHED */ @@ -375,6 +404,7 @@ ipsec4_getpolicybysock(m, dir, so, error) printf("DP ipsec4_getpolicybysock called " "to allocate SP:%p\n", kernsp)); *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7,*error,0,0,0); return kernsp; } @@ -385,6 +415,7 @@ ipsec4_getpolicybysock(m, dir, so, error) "Illegal policy for non-priviliged defined %d\n", currsp->policy)); *error = EINVAL; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8,*error,0,0,0); return NULL; case IPSEC_POLICY_ENTRUST: @@ -397,17 +428,20 @@ ipsec4_getpolicybysock(m, dir, so, error) } ip4_def_policy.refcnt++; *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9,*error,0,0,0); return &ip4_def_policy; case IPSEC_POLICY_IPSEC: currsp->refcnt++; *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10,*error,0,0,0); return currsp; default: ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " "Invalid policy for PCB %d\n", currsp->policy)); *error = EINVAL; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11,*error,0,0,0); return NULL; } /* NOTREACHED */ @@ -442,14 +476,17 @@ ipsec4_getpolicybyaddr(m, dir, flag, error) { struct secpolicyindex spidx; + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0); bzero(&spidx, sizeof(spidx)); /* make a index to look for a policy */ *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m, (flag & IP_FORWARDING) ? 0 : 1); - if (*error != 0) + if (*error != 0) { + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,*error,0,0,0); return NULL; + } sp = key_allocsp(&spidx, dir); } @@ -460,6 +497,7 @@ ipsec4_getpolicybyaddr(m, dir, flag, error) printf("DP ipsec4_getpolicybyaddr called " "to allocate SP:%p\n", sp)); *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0); return sp; } @@ -473,6 +511,7 @@ ipsec4_getpolicybyaddr(m, dir, flag, error) } ip4_def_policy.refcnt++; *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3,*error,0,0,0); return &ip4_def_policy; } @@ -803,9 +842,11 @@ ipsec6_setspidx_in6pcb(m, pcb) goto bad; spidx->dir = IPSEC_DIR_INBOUND; - KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec_setspidx_mbuf: end\n"); - kdebug_secpolicyindex(spidx)); + spidx = &pcb->in6p_sp->sp_out->spidx; + error = ipsec_setspidx(m, spidx, 1); + if (error) + goto bad; + spidx->dir = IPSEC_DIR_OUTBOUND; return 0; @@ -1872,7 +1913,7 @@ ipsec_hdrsiz(sp) size_t siz, clen; KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("ipsec_in_reject: using SP\n"); + printf("ipsec_hdrsiz: using SP\n"); kdebug_secpolicy(sp)); /* check policy */ @@ -2105,13 +2146,13 @@ ipsec4_encapsulate(m, sav) ip->ip_off &= htons(~IP_OFFMASK); ip->ip_off &= htons(~IP_MF); switch (ip4_ipsec_dfbit) { - case 0: /*clear DF bit*/ + case 0: /* clear DF bit */ ip->ip_off &= htons(~IP_DF); break; - case 1: /*set DF bit*/ + case 1: /* set DF bit */ ip->ip_off |= htons(IP_DF); break; - default: /*copy DF bit*/ + default: /* copy DF bit */ break; } ip->ip_p = IPPROTO_IPIP; @@ -2381,7 +2422,7 @@ ok: } /* - * shift variable length bunffer to left. + * shift variable length buffer to left. * IN: bitmap: pointer to the buffer * nbit: the number of to shift. * wsize: buffer size (bytes). @@ -2559,6 +2600,8 @@ ipsec4_output(state, sp, flags) if (!state->dst) panic("state->dst == NULL in ipsec4_output"); + KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0,0,0,0,0); + KEYDEBUG(KEYDEBUG_IPSEC_DATA, printf("ipsec4_output: applyed SP\n"); kdebug_secpolicy(sp)); @@ -2746,11 +2789,13 @@ ipsec4_output(state, sp, flags) ip = mtod(state->m, struct ip *); } + KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0); return 0; bad: m_freem(state->m); state->m = NULL; + KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0); return error; } #endif @@ -2776,17 +2821,17 @@ ipsec6_output_trans(state, nexthdrp, mprev, sp, flags, tun) struct sockaddr_in6 *sin6; if (!state) - panic("state == NULL in ipsec6_output"); + panic("state == NULL in ipsec6_output_trans"); if (!state->m) - panic("state->m == NULL in ipsec6_output"); + panic("state->m == NULL in ipsec6_output_trans"); if (!nexthdrp) - panic("nexthdrp == NULL in ipsec6_output"); + panic("nexthdrp == NULL in ipsec6_output_trans"); if (!mprev) - panic("mprev == NULL in ipsec6_output"); + panic("mprev == NULL in ipsec6_output_trans"); if (!sp) - panic("sp == NULL in ipsec6_output"); + panic("sp == NULL in ipsec6_output_trans"); if (!tun) - panic("tun == NULL in ipsec6_output"); + panic("tun == NULL in ipsec6_output_trans"); KEYDEBUG(KEYDEBUG_IPSEC_DATA, printf("ipsec6_output_trans: applyed SP\n"); @@ -2947,11 +2992,11 @@ ipsec6_output_tunnel(state, sp, flags) int s; if (!state) - panic("state == NULL in ipsec6_output"); + panic("state == NULL in ipsec6_output_tunnel"); if (!state->m) - panic("state->m == NULL in ipsec6_output"); + panic("state->m == NULL in ipsec6_output_tunnel"); if (!sp) - panic("sp == NULL in ipsec6_output"); + panic("sp == NULL in ipsec6_output_tunnel"); KEYDEBUG(KEYDEBUG_IPSEC_DATA, printf("ipsec6_output_tunnel: applyed SP\n"); @@ -2966,9 +3011,48 @@ ipsec6_output_tunnel(state, sp, flags) break; } - for (/*already initialized*/; isr; isr = isr->next) { - /* When tunnel mode, SA peers must be specified. */ - bcopy(&isr->saidx, &saidx, sizeof(saidx)); + for (/* already initialized */; isr; isr = isr->next) { + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { + /* When tunnel mode, SA peers must be specified. */ + bcopy(&isr->saidx, &saidx, sizeof(saidx)); + } else { + /* make SA index to look for a proper SA */ + struct sockaddr_in6 *sin6; + + bzero(&saidx, sizeof(saidx)); + saidx.proto = isr->saidx.proto; + saidx.mode = isr->saidx.mode; + saidx.reqid = isr->saidx.reqid; + + ip6 = mtod(state->m, struct ip6_hdr *); + sin6 = (struct sockaddr_in6 *)&saidx.src; + if (sin6->sin6_len == 0) { + sin6->sin6_len = sizeof(*sin6); + sin6->sin6_family = AF_INET6; + sin6->sin6_port = IPSEC_PORT_ANY; + bcopy(&ip6->ip6_src, &sin6->sin6_addr, + sizeof(ip6->ip6_src)); + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { + /* fix scope id for comparing SPD */ + sin6->sin6_addr.s6_addr16[1] = 0; + sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]); + } + } + sin6 = (struct sockaddr_in6 *)&saidx.dst; + if (sin6->sin6_len == 0) { + sin6->sin6_len = sizeof(*sin6); + sin6->sin6_family = AF_INET6; + sin6->sin6_port = IPSEC_PORT_ANY; + bcopy(&ip6->ip6_dst, &sin6->sin6_addr, + sizeof(ip6->ip6_dst)); + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) { + /* fix scope id for comparing SPD */ + sin6->sin6_addr.s6_addr16[1] = 0; + sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]); + } + } + } + if (key_checkrequest(isr, &saidx) == ENOENT) { /* * IPsec processing is required, but no SA found. @@ -3354,6 +3438,14 @@ ipsec6_tunnel_validate(m, off, nxt0, sav) sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst, (struct sockaddr *)&isrc, (struct sockaddr *)&idst); + /* + * when there is no suitable inbound policy for the packet of the ipsec + * tunnel mode, the kernel never decapsulate the tunneled packet + * as the ipsec tunnel mode even when the system wide policy is "none". + * then the kernel leaves the generic tunnel module to process this + * packet. if there is no rule of the generic tunnel, the packet + * is rejected and the statistics will be counted up. + */ if (!sp) return 0; key_freesp(sp); @@ -3577,7 +3669,7 @@ ipsec_addhist(m, proto, spi) if (!n) return ENOBUFS; if (M_TRAILINGSPACE(n) < sizeof(*p)) - return ENOSPC; /*XXX*/ + return ENOSPC; /* XXX */ p = (struct ipsec_history *)(mtod(n, caddr_t) + n->m_len); n->m_len += sizeof(*p); bzero(p, sizeof(*p)); @@ -3620,3 +3712,42 @@ ipsec_clearhist(m) n->m_len = sizeof(struct socket *); ipsec_optaux(m, n); } + +__private_extern__ void +ipsec_send_natt_keepalive( + struct secasvar *sav) +{ + struct mbuf *m; + struct udphdr *uh; + struct ip *ip; + + if ((esp_udp_encap_port & 0xFFFF) == 0 || sav->remote_ike_port == 0) return; + + m = m_gethdr(M_NOWAIT, MT_DATA); + if (m == NULL) return; + + /* + * Create a UDP packet complete with IP header. + * We must do this because UDP output requires + * an inpcb which we don't have. UDP packet + * contains one byte payload. The byte is set + * to 0xFF. + */ + ip = (struct ip*)m_mtod(m); + uh = (struct udphdr*)((char*)m_mtod(m) + sizeof(struct ip)); + m->m_len = sizeof(struct udpiphdr) + 1; + bzero(m_mtod(m), m->m_len); + ip->ip_len = ntohs(m->m_len); + ip->ip_ttl = ip_defttl; + ip->ip_p = IPPROTO_UDP; + ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr; + ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr; + uh->uh_sport = ntohs((u_short)esp_udp_encap_port); + uh->uh_dport = ntohs(sav->remote_ike_port); + uh->uh_ulen = htons(1 + sizeof(struct udphdr)); + uh->uh_sum = 0; + *(u_int8_t*)((char*)m_mtod(m) + sizeof(struct ip) + sizeof(struct udphdr)) = 0xFF; + + if (ip_output(m, NULL, &sav->sah->sa_route, IP_NOIPSEC, NULL) == 0) + sav->natt_last_activity = natt_now; +} diff --git a/bsd/netinet6/ipsec.h b/bsd/netinet6/ipsec.h index e4948d4aa..4fcddc80c 100644 --- a/bsd/netinet6/ipsec.h +++ b/bsd/netinet6/ipsec.h @@ -45,10 +45,9 @@ #ifdef __APPLE_API_PRIVATE /* * Security Policy Index - * NOTE: Ensure to be same address family and upper layer protocol. - * NOTE: ul_proto, port number, uid, gid: - * ANY: reserved for waldcard. - * 0 to (~0 - 1): is one of the number of each value. + * Ensure that both address families in the "src" and "dst" are same. + * When the value of the ul_proto is ICMPv6, the port field in "src" + * specifies ICMPv6 type, and the port field in "dst" specifies ICMPv6 code. */ struct secpolicyindex { u_int8_t dir; /* direction of packet flow, see blow */ @@ -224,7 +223,7 @@ struct ipsecstat { #define IPSECCTL_DEF_ESP_NETLEV 4 /* int; ESP tunnel mode */ #define IPSECCTL_DEF_AH_TRANSLEV 5 /* int; AH transport mode */ #define IPSECCTL_DEF_AH_NETLEV 6 /* int; AH tunnel mode */ -#if 0 /*obsolete, do not reuse*/ +#if 0 /* obsolete, do not reuse */ #define IPSECCTL_INBOUND_CALL_IKE 7 #endif #define IPSECCTL_AH_CLEARTOS 8 diff --git a/bsd/netinet6/mld6.c b/bsd/netinet6/mld6.c index 3d90c11e4..7b1b091ce 100644 --- a/bsd/netinet6/mld6.c +++ b/bsd/netinet6/mld6.c @@ -102,6 +102,7 @@ static struct ip6_pktopts ip6_opts; static int mld6_timers_are_running; +static int mld6_init_done = 0 ; /* XXX: These are necessary for KAME's link-local hack */ static struct in6_addr mld6_all_nodes_linklocal = IN6ADDR_LINKLOCAL_ALLNODES_INIT; static struct in6_addr mld6_all_routers_linklocal = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; @@ -115,6 +116,10 @@ mld6_init() struct ip6_hbh *hbh = (struct ip6_hbh *)hbh_buf; u_int16_t rtalert_code = htons((u_int16_t)IP6OPT_RTALERT_MLD); + if (mld6_init_done) + return; + + mld6_init_done = 1; mld6_timers_are_running = 0; /* ip6h_nxt will be fill in later */ diff --git a/bsd/netinet6/nd6.c b/bsd/netinet6/nd6.c index 60d206276..ffa6af8ec 100644 --- a/bsd/netinet6/nd6.c +++ b/bsd/netinet6/nd6.c @@ -1,4 +1,4 @@ -/* $FreeBSD: src/sys/netinet6/nd6.c,v 1.2.2.9 2001/07/11 09:39:04 ume Exp $ */ +/* $FreeBSD: src/sys/netinet6/nd6.c,v 1.20 2002/08/02 20:49:14 rwatson Exp $ */ /* $KAME: nd6.c,v 1.144 2001/05/24 07:44:00 itojun Exp $ */ /* @@ -200,9 +200,24 @@ nd6_setmtu(ifp) #ifndef MIN #define MIN(a,b) ((a) < (b) ? (a) : (b)) #endif - struct nd_ifinfo *ndi = &nd_ifinfo[ifp->if_index]; - u_long oldmaxmtu = ndi->maxmtu; - u_long oldlinkmtu = ndi->linkmtu; + + struct nd_ifinfo *ndi; + u_long oldmaxmtu, oldlinkmtu, dl_tag; + + /* + * Make sure IPv6 is enabled for the interface first, + * because this can be called directly from SIOCSIFMTU for IPv4 + */ + + if (ifp->if_index >= nd_ifinfo_indexlim) { + if (dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET6, &dl_tag) != EPROTONOSUPPORT) + nd6log((LOG_INFO, "setmtu for ifp=% but nd6 is not attached\n", if_name(ifp))); + return; /* we're out of bound for nd_ifinfo */ + } + + ndi = &nd_ifinfo[ifp->if_index]; + oldmaxmtu = ndi->maxmtu; + oldlinkmtu = ndi->linkmtu; switch (ifp->if_type) { case IFT_ARCNET: /* XXX MTU handling needs more work */ @@ -438,7 +453,6 @@ nd6_timer(ignored_arg) timeout(nd6_timer_funneled, (caddr_t)0, nd6_prune * hz); ln = llinfo_nd6.ln_next; - /* XXX BSD/OS separates this code -- itojun */ while (ln && ln != &llinfo_nd6) { struct rtentry *rt; struct sockaddr_in6 *dst; @@ -461,15 +475,24 @@ nd6_timer(ignored_arg) ln = next; continue; } - + /* sanity check */ - if (!rt) - panic("rt=0 in nd6_timer(ln=%p)\n", ln); - if (rt->rt_llinfo && (struct llinfo_nd6 *)rt->rt_llinfo != ln) - panic("rt_llinfo(%p) is not equal to ln(%p)\n", + if (!rt) { + printf("rt=0 in nd6_timer(ln=%p)\n", ln); + ln = next; + continue; + } + if (rt->rt_llinfo && (struct llinfo_nd6 *)rt->rt_llinfo != ln) { + printf("rt_llinfo(%p) is not equal to ln(%p)\n", rt->rt_llinfo, ln); - if (!dst) - panic("dst=0 in nd6_timer(ln=%p)\n", ln); + ln = next; + continue; + } + if (!dst) { + printf("dst=0 in nd6_timer(ln=%p)\n", ln); + ln = next; + continue; + } switch (ln->ln_state) { case ND6_LLINFO_INCOMPLETE: @@ -481,6 +504,7 @@ nd6_timer(ignored_arg) ln, 0); } else { struct mbuf *m = ln->ln_hold; + ln->ln_hold = NULL; if (m) { if (rt->rt_ifp) { /* @@ -572,7 +596,7 @@ nd6_timer(ignored_arg) /* * If the expiring address is temporary, try * regenerating a new one. This would be useful when - * we suspended a laptop PC, then turned on after a + * we suspended a laptop PC, then turned it on after a * period that could invalidate all temporary * addresses. Although we may have to restart the * loop (see below), it must be after purging the @@ -589,7 +613,8 @@ nd6_timer(ignored_arg) if (regen) goto addrloop; /* XXX: see below */ - } else if (IFA6_IS_DEPRECATED(ia6)) { + } + if (IFA6_IS_DEPRECATED(ia6)) { int oldflags = ia6->ia6_flags; ia6->ia6_flags |= IN6_IFF_DEPRECATED; @@ -610,15 +635,15 @@ nd6_timer(ignored_arg) * has changed while we are still in * the loop. Although the change * would not cause disaster (because - * it's not an addition, but a - * deletion,) we'd rather restart the + * it's not a deletion, but an + * addition,) we'd rather restart the * loop just for safety. Or does this * significantly reduce performance?? */ goto addrloop; } } - } else if (IFA6_IS_DEPRECATED(ia6)) { + } else { /* * A new RA might have made a deprecated address * preferred. @@ -634,14 +659,6 @@ nd6_timer(ignored_arg) * check prefix lifetime. * since pltime is just for autoconf, pltime processing for * prefix is not necessary. - * - * we offset expire time by NDPR_KEEP_EXPIRE, so that we - * can use the old prefix information to validate the - * next prefix information to come. See prelist_update() - * for actual validation. - * - * I don't think such an offset is necessary. - * (jinmei@kame.net, 20010130). */ if (pr->ndpr_expire && pr->ndpr_expire < time_second) { struct nd_prefix *t; @@ -772,7 +789,7 @@ nd6_purge(ifp) if (nd6_defifindex == ifp->if_index) nd6_setdefaultiface(0); - if (!ip6_forwarding && ip6_accept_rtadv) { /* XXX: too restrictive? */ + if (!ip6_forwarding && (ip6_accept_rtadv || (ifp->if_eflags & IFEF_ACCEPT_RTADVD))) { /* refresh default router list */ bzero(&drany, sizeof(drany)); defrouter_delreq(&drany, 0); @@ -848,10 +865,10 @@ nd6_lookup(addr6, create, ifp) return(NULL); /* - * Create a new route. RTF_LLINFO is necessary + * Create a new route. RTF_LLINFO is necessary * to create a Neighbor Cache entry for the * destination in nd6_rtrequest which will be - * called in rtequest via ifa->ifa_rtrequest. + * called in rtrequest via ifa->ifa_rtrequest. */ if ((e = rtrequest(RTM_ADD, (struct sockaddr *)&sin6, ifa->ifa_addr, @@ -877,20 +894,26 @@ nd6_lookup(addr6, create, ifp) rtunref(rt); /* * Validation for the entry. + * Note that the check for rt_llinfo is necessary because a cloned + * route from a parent route that has the L flag (e.g. the default + * route to a p2p interface) may have the flag, too, while the + * destination is not actually a neighbor. * XXX: we can't use rt->rt_ifp to check for the interface, since * it might be the loopback interface if the entry is for our * own address on a non-loopback interface. Instead, we should - * use rt->rt_ifa->ifa_ifp, which would specify the REAL interface. + * use rt->rt_ifa->ifa_ifp, which would specify the REAL + * interface. */ - if ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 || - rt->rt_gateway->sa_family != AF_LINK || - (ifp && rt->rt_ifa->ifa_ifp != ifp)) { + if ((ifp->if_type !=IFT_PPP) && ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 || + rt->rt_gateway->sa_family != AF_LINK || rt->rt_llinfo == NULL || + + (ifp && rt->rt_ifa->ifa_ifp != ifp))) { if (create) { log(LOG_DEBUG, "nd6_lookup: failed to lookup %s (if = %s)\n", ip6_sprintf(addr6), ifp ? if_name(ifp) : "unspec"); /* xxx more logs... kazu */ } - return(0); + return(NULL); } return(rt); } @@ -943,7 +966,7 @@ nd6_is_addr_neighbor(addr, ifp) * Even if the address matches none of our addresses, it might be * in the neighbor cache. */ - if (nd6_lookup(&addr->sin6_addr, 0, ifp)) + if (nd6_lookup(&addr->sin6_addr, 0, ifp) != NULL) return(1); return(0); @@ -967,13 +990,13 @@ nd6_free(rt) * even though it is not harmful, it was not really necessary. */ - if (!ip6_forwarding && ip6_accept_rtadv) { /* XXX: too restrictive? */ + if (!ip6_forwarding && (ip6_accept_rtadv || (rt->rt_ifp->if_eflags & IFEF_ACCEPT_RTADVD))) { int s; s = splnet(); dr = defrouter_lookup(&((struct sockaddr_in6 *)rt_key(rt))->sin6_addr, rt->rt_ifp); - if (ln->ln_router || dr) { + if (ln && ln->ln_router || dr) { /* * rt6_flush must be called whether or not the neighbor * is in the Default Router List. @@ -992,7 +1015,7 @@ nd6_free(rt) /* * Temporarily fake the state to choose a new default * router and to perform on-link determination of - * prefixes coreectly. + * prefixes correctly. * Below the state will be set correctly, * or the entry itself will be deleted. */ @@ -1027,9 +1050,12 @@ nd6_free(rt) * Before deleting the entry, remember the next entry as the * return value. We need this because pfxlist_onlink_check() above * might have freed other entries (particularly the old next entry) as - * a side effect (XXX). + * a side effect (XXX). */ - next = ln->ln_next; + if (ln) + next = ln->ln_next; + else + next = 0; /* * Detach the route from the routing tree and the list of neighbor @@ -1106,7 +1132,7 @@ nd6_rtrequest(req, rt, sa) struct ifnet *ifp = rt->rt_ifp; struct ifaddr *ifa; - if (rt->rt_flags & RTF_GATEWAY) + if ((rt->rt_flags & RTF_GATEWAY)) return; if (nd6_need_cache(ifp) == 0 && (rt->rt_flags & RTF_HOST) == 0) { @@ -1120,6 +1146,27 @@ nd6_rtrequest(req, rt, sa) return; } + if (req == RTM_RESOLVE && + (nd6_need_cache(ifp) == 0 || /* stf case */ + !nd6_is_addr_neighbor((struct sockaddr_in6 *)rt_key(rt), ifp))) { + /* + * FreeBSD and BSD/OS often make a cloned host route based + * on a less-specific route (e.g. the default route). + * If the less specific route does not have a "gateway" + * (this is the case when the route just goes to a p2p or an + * stf interface), we'll mistakenly make a neighbor cache for + * the host route, and will see strange neighbor solicitation + * for the corresponding destination. In order to avoid the + * confusion, we check if the destination of the route is + * a neighbor in terms of neighbor discovery, and stop the + * process if not. Additionally, we remove the LLINFO flag + * so that ndp(8) will not try to get the neighbor information + * of the destination. + */ + rt->rt_flags &= ~RTF_LLINFO; + return; + } + switch (req) { case RTM_ADD: /* @@ -1132,7 +1179,7 @@ nd6_rtrequest(req, rt, sa) if (rt->rt_flags & (RTF_CLONING | RTF_LLINFO)) { /* * Case 1: This route should come from - * a route to interface. RTF_LLINFO flag is set + * a route to interface. RTF_LLINFO flag is set * for a host route whose destination should be * treated as on-link. */ @@ -1147,13 +1194,13 @@ nd6_rtrequest(req, rt, sa) if (ln && ln->ln_expire == 0) { /* kludge for desktops */ #if 0 - printf("nd6_request: time.tv_sec is zero; " + printf("nd6_rtequest: time.tv_sec is zero; " "treat it as 1\n"); #endif ln->ln_expire = 1; } #endif - if (rt->rt_flags & RTF_CLONING) + if ((rt->rt_flags & RTF_CLONING)) break; } /* @@ -1255,7 +1302,7 @@ nd6_rtrequest(req, rt, sa) SDL(gate)->sdl_alen = ifp->if_addrlen; } if (nd6_useloopback) { - rt->rt_ifp = &loif[0]; /*XXX*/ + rt->rt_ifp = &loif[0]; /* XXX */ /* * Make sure rt_ifa be equal to the ifaddr * corresponding to the address. @@ -1322,6 +1369,7 @@ nd6_rtrequest(req, rt, sa) rt->rt_flags &= ~RTF_LLINFO; if (ln->ln_hold) m_freem(ln->ln_hold); + ln->ln_hold = NULL; Free((caddr_t)ln); } } @@ -1503,7 +1551,7 @@ nd6_ioctl(cmd, data, ifp) /* do we really have to remove addresses as well? */ for (ia = in6_ifaddr; ia; ia = ia_next) { - /* ia might be removed. keep the next ptr. */ + /* ia might be removed. keep the next ptr. */ ia_next = ia->ia_next; if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0) @@ -1673,7 +1721,7 @@ fail: * 1 -- y -- (7) * STALE */ - if (lladdr) { /*(3-5) and (7)*/ + if (lladdr) { /* (3-5) and (7) */ /* * Record source link-layer address * XXX is it dependent to ifp->if_type? @@ -1683,17 +1731,17 @@ fail: } if (!is_newentry) { - if ((!olladdr && lladdr) /*(3)*/ - || (olladdr && lladdr && llchange)) { /*(5)*/ + if ((!olladdr && lladdr) /* (3) */ + || (olladdr && lladdr && llchange)) { /* (5) */ do_update = 1; newstate = ND6_LLINFO_STALE; - } else /*(1-2,4)*/ + } else /* (1-2,4) */ do_update = 0; } else { do_update = 1; - if (!lladdr) /*(6)*/ + if (!lladdr) /* (6) */ newstate = ND6_LLINFO_NOSTATE; - else /*(7)*/ + else /* (7) */ newstate = ND6_LLINFO_STALE; } @@ -1762,7 +1810,7 @@ fail: /* * New entry must have is_router flag cleared. */ - if (is_newentry) /*(6-7)*/ + if (is_newentry) /* (6-7) */ ln->ln_router = 0; break; case ND_REDIRECT: @@ -1773,7 +1821,7 @@ fail: */ if (code == ND_REDIRECT_ROUTER) ln->ln_router = 1; - else if (is_newentry) /*(6-7)*/ + else if (is_newentry) /* (6-7) */ ln->ln_router = 0; break; case ND_ROUTER_SOLICIT: @@ -1786,8 +1834,8 @@ fail: /* * Mark an entry with lladdr as a router. */ - if ((!is_newentry && (olladdr || lladdr)) /*(2-5)*/ - || (is_newentry && lladdr)) { /*(7)*/ + if ((!is_newentry && (olladdr || lladdr)) /* (2-5) */ + || (is_newentry && lladdr)) { /* (7) */ ln->ln_router = 1; } break; @@ -1808,13 +1856,12 @@ fail: * for those are not autoconfigured hosts, we explicitly avoid such * cases for safety. */ - if (do_update && ln->ln_router && !ip6_forwarding && ip6_accept_rtadv) + if (do_update && ln->ln_router && !ip6_forwarding && (ip6_accept_rtadv || (ifp->if_eflags & IFEF_ACCEPT_RTADVD))) defrouter_select(); return rt; } - static void nd6_slowtimo(ignored_arg) void *ignored_arg; @@ -1880,7 +1927,7 @@ nd6_output(ifp, origifp, m0, dst, rt0) goto sendpkt; /* - * next hop determination. This routine is derived from ether_outpout. + * next hop determination. This routine is derived from ether_outpout. */ if (rt) { if ((rt->rt_flags & RTF_UP) == 0) { @@ -1903,9 +1950,8 @@ nd6_output(ifp, origifp, m0, dst, rt0) /* * We skip link-layer address resolution and NUD * if the gateway is not a neighbor from ND point - * of view, regardless the value of the - * nd_ifinfo.flags. - * The second condition is a bit tricky: we skip + * of view, regardless of the value of nd_ifinfo.flags. + * The second condition is a bit tricky; we skip * if the gateway is our own address, which is * sometimes used to install a route to a p2p link. */ @@ -1946,7 +1992,7 @@ nd6_output(ifp, origifp, m0, dst, rt0) else { /* * Since nd6_is_addr_neighbor() internally calls nd6_lookup(), - * the condition below is not very efficient. But we believe + * the condition below is not very efficient. But we believe * it is tolerable, because this should be a rare case. */ if (nd6_is_addr_neighbor(dst, ifp) && @@ -1988,7 +2034,7 @@ nd6_output(ifp, origifp, m0, dst, rt0) /* * If the neighbor cache entry has a state other than INCOMPLETE - * (i.e. its link-layer address is already reloved), just + * (i.e. its link-layer address is already resolved), just * send the packet. */ if (ln->ln_state > ND6_LLINFO_INCOMPLETE) @@ -1996,11 +2042,12 @@ nd6_output(ifp, origifp, m0, dst, rt0) /* * There is a neighbor cache entry, but no ethernet address - * response yet. Replace the held mbuf (if any) with this + * response yet. Replace the held mbuf (if any) with this * latest one. * - * XXX Does the code conform to rate-limiting rule? - * (RFC 2461 7.2.2) + * This code conforms to the rate-limiting rule described in Section + * 7.2.2 of RFC 2461, because the timer is set correctly after sending + * an NS below. */ if (ln->ln_state == ND6_LLINFO_NOSTATE) ln->ln_state = ND6_LLINFO_INCOMPLETE; @@ -2023,14 +2070,15 @@ nd6_output(ifp, origifp, m0, dst, rt0) /* Make sure the HW checksum flags are cleaned before sending the packet */ - m->m_pkthdr.rcvif = (struct ifnet *)0; m->m_pkthdr.csum_data = 0; m->m_pkthdr.csum_flags = 0; if ((ifp->if_flags & IFF_LOOPBACK) != 0) { + m->m_pkthdr.rcvif = origifp; /* forwarding rules require the original scope_id */ return (dlil_output(ifptodlt(origifp, PF_INET6), m, (caddr_t)rt, (struct sockaddr *)dst,0)); } + m->m_pkthdr.rcvif = (struct ifnet *)0; return (dlil_output(ifptodlt(ifp, PF_INET6), m, (caddr_t)rt, (struct sockaddr *)dst, 0)); #else if ((ifp->if_flags & IFF_LOOPBACK) != 0) { @@ -2108,27 +2156,23 @@ nd6_storelladdr(ifp, rt, m, dst, desten) *desten = 0; return(1); default: - m_freem(m); - return(0); + return(0); /* caller will free mbuf */ } } if (rt == NULL) { /* this could happen, if we could not allocate memory */ - m_freem(m); - return(0); + return(0); /* caller will free mbuf */ } if (rt->rt_gateway->sa_family != AF_LINK) { printf("nd6_storelladdr: something odd happens\n"); - m_freem(m); - return(0); + return(0); /* caller will free mbuf */ } sdl = SDL(rt->rt_gateway); if (sdl->sdl_alen == 0) { /* this should be impossible, but we bark here for debugging */ printf("nd6_storelladdr: sdl_alen == 0\n"); - m_freem(m); - return(0); + return(0); /* caller will free mbuf */ } bcopy(LLADDR(sdl), desten, sdl->sdl_alen); diff --git a/bsd/netinet6/nd6.h b/bsd/netinet6/nd6.h index a84135d9a..d774afb3a 100644 --- a/bsd/netinet6/nd6.h +++ b/bsd/netinet6/nd6.h @@ -80,7 +80,7 @@ struct nd_ifinfo { int recalctm; /* BaseReacable re-calculation timer */ u_int8_t chlim; /* CurHopLimit */ u_int8_t receivedra; - /* the followings are for privacy extension for addrconf */ + /* the following 3 members are for privacy extension for addrconf */ u_int8_t randomseed0[8]; /* upper 64 bits of MD5 digest */ u_int8_t randomseed1[8]; /* lower 64 bits (usually the EUI64 IFID) */ u_int8_t randomid[8]; /* current random ID */ @@ -205,7 +205,7 @@ TAILQ_HEAD(nd_drhead, nd_defrouter); struct nd_defrouter { TAILQ_ENTRY(nd_defrouter) dr_entry; struct in6_addr rtaddr; - u_char flags; + u_char flags; /* flags on RA message */ u_short rtlifetime; u_long expire; u_long advint; /* Mobile IPv6 addition (milliseconds) */ @@ -250,7 +250,7 @@ struct nd_prefix { */ struct inet6_ndpr_msghdr { u_short inpm_msglen; /* to skip over non-understood messages */ - u_char inpm_version; /* future binary compatability */ + u_char inpm_version; /* future binary compatibility */ u_char inpm_type; /* message type */ struct in6_addr inpm_prefix; u_long prm_vltim; @@ -312,7 +312,7 @@ union nd_opts { struct nd_opt_hdr *zero; struct nd_opt_hdr *src_lladdr; struct nd_opt_hdr *tgt_lladdr; - struct nd_opt_prefix_info *pi_beg;/* multiple opts, start */ + struct nd_opt_prefix_info *pi_beg; /* multiple opts, start */ struct nd_opt_rd_hdr *rh; struct nd_opt_mtu *mtu; struct nd_opt_hdr *six; diff --git a/bsd/netinet6/nd6_nbr.c b/bsd/netinet6/nd6_nbr.c index 70fbfef80..ae8185854 100644 --- a/bsd/netinet6/nd6_nbr.c +++ b/bsd/netinet6/nd6_nbr.c @@ -134,11 +134,11 @@ nd6_ns_input(m, off, icmp6len) if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) { /* dst has to be solicited node multicast address. */ if (daddr6.s6_addr16[0] == IPV6_ADDR_INT16_MLL - /*don't check ifindex portion*/ + /* don't check ifindex portion */ && daddr6.s6_addr32[1] == 0 && daddr6.s6_addr32[2] == IPV6_ADDR_INT32_ONE && daddr6.s6_addr8[12] == 0xff) { - ; /*good*/ + ; /* good */ } else { nd6log((LOG_INFO, "nd6_ns_input: bad DAD packet " "(wrong ip6 dst)\n")); @@ -164,7 +164,7 @@ nd6_ns_input(m, off, icmp6len) } if (ndopts.nd_opts_src_lladdr) { - lladdr = (char *)(ndopts.nd_opts_src_lladdr +1); + lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1); lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3; } @@ -253,9 +253,9 @@ nd6_ns_input(m, off, icmp6len) } if (IN6_ARE_ADDR_EQUAL(&myaddr6, &saddr6)) { - log(LOG_INFO, - "nd6_ns_input: duplicate IP6 address %s\n", - ip6_sprintf(&saddr6)); + nd6log((LOG_INFO, + "nd6_ns_input: duplicate IP6 address %s\n", + ip6_sprintf(&saddr6))); goto freeit; } @@ -384,7 +384,7 @@ nd6_ns_output(ifp, daddr6, taddr6, ln, dad) icmp6len = sizeof(*nd_ns); m->m_pkthdr.len = m->m_len = sizeof(*ip6) + icmp6len; - m->m_data += max_linkhdr; /*or MH_ALIGN() equivalent?*/ + m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */ /* fill neighbor solicitation packet */ ip6 = mtod(m, struct ip6_hdr *); @@ -434,7 +434,7 @@ nd6_ns_output(ifp, daddr6, taddr6, ln, dad) * - saddr6 belongs to the outgoing interface. * Otherwise, we perform a scope-wise match. */ - struct ip6_hdr *hip6; /*hold ip6*/ + struct ip6_hdr *hip6; /* hold ip6 */ struct in6_addr *saddr6; if (ln && ln->ln_hold) { @@ -451,7 +451,10 @@ nd6_ns_output(ifp, daddr6, taddr6, ln, dad) else { ia = in6_ifawithifp(ifp, &ip6->ip6_dst); if (ia == NULL) { - m_freem(m); /*XXX*/ + if (ln && ln->ln_hold) + m_freem(ln->ln_hold); + ln->ln_hold = NULL; + m_freem(m); return; } ip6->ip6_src = ia->ia_addr.sin6_addr; @@ -624,7 +627,7 @@ nd6_na_input(m, off, icmp6len) goto freeit; } - /* Just for safety, maybe unnecessery. */ + /* Just for safety, maybe unnecessary. */ if (ifa) { log(LOG_ERR, "nd6_na_input: duplicate IP6 address %s\n", @@ -769,11 +772,18 @@ nd6_na_input(m, off, icmp6len) int s; in6 = &((struct sockaddr_in6 *)rt_key(rt))->sin6_addr; + + /* + * Lock to protect the default router list. + * XXX: this might be unnecessary, since this function + * is only called under the network software interrupt + * context. However, we keep it just for safety. + */ s = splnet(); dr = defrouter_lookup(in6, rt->rt_ifp); if (dr) defrtrlist_del(dr); - else if (!ip6_forwarding && ip6_accept_rtadv) { + else if (!ip6_forwarding && (ip6_accept_rtadv || (rt->rt_ifp->if_eflags & IFEF_ACCEPT_RTADVD))) { /* * Even if the neighbor is not in the default * router list, the neighbor may be used @@ -791,7 +801,7 @@ nd6_na_input(m, off, icmp6len) ln->ln_asked = 0; if (ln->ln_hold) { /* - * we assume ifp is not a p2p here, so just set the 2nd + * we assume ifp is not a loopback here, so just set the 2nd * argument as the 1st one. */ nd6_output(ifp, ifp, ln->ln_hold, @@ -832,7 +842,7 @@ nd6_na_output(ifp, daddr6, taddr6, flags, tlladdr, sdl0) struct ip6_moptions im6o; int icmp6len; int maxlen; - caddr_t mac; + caddr_t mac = NULL; struct ifnet *outif = NULL; /* estimate the size of message */ @@ -867,7 +877,7 @@ nd6_na_output(ifp, daddr6, taddr6, flags, tlladdr, sdl0) icmp6len = sizeof(*nd_na); m->m_pkthdr.len = m->m_len = sizeof(struct ip6_hdr) + icmp6len; - m->m_data += max_linkhdr; /*or MH_ALIGN() equivalent?*/ + m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */ /* fill neighbor advertisement packet */ ip6 = mtod(m, struct ip6_hdr *); @@ -910,7 +920,6 @@ nd6_na_output(ifp, daddr6, taddr6, flags, tlladdr, sdl0) * target lladdr option SHOULD NOT be included. */ if (tlladdr) { - mac = NULL; /* * sdl0 != NULL indicates proxy NA. If we do proxy, use * lladdr in sdl0. If we are not proxying (sending NA for @@ -992,9 +1001,6 @@ struct dadq { int dad_ns_ocount; /* NS sent so far */ int dad_ns_icount; int dad_na_icount; -#if defined(__FreeBSD__) && __FreeBSD__ >= 3 - struct callout_handle dad_timer; -#endif }; static struct dadq_head dadq; @@ -1031,6 +1037,7 @@ nd6_dad_starttimer(dp, ticks) callout_reset(&dp->dad_timer_ch, ticks, (void (*) __P((void *)))nd6_dad_timer, (void *)dp->dad_ifa); } + static void nd6_dad_stoptimer(dp) struct dadq *dp; @@ -1096,9 +1103,6 @@ nd6_dad_start(ifa, tick) return; } bzero(dp, sizeof(*dp)); -#if defined(__FreeBSD__) && __FreeBSD__ >= 3 - callout_init(&dp->dad_timer_ch); -#endif TAILQ_INSERT_TAIL(&dadq, (struct dadq *)dp, dad_list); nd6log((LOG_DEBUG, "%s: starting DAD for %s\n", if_name(ifa->ifa_ifp), @@ -1115,11 +1119,8 @@ nd6_dad_start(ifa, tick) dp->dad_count = ip6_dad_count; dp->dad_ns_icount = dp->dad_na_icount = 0; dp->dad_ns_ocount = dp->dad_ns_tcount = 0; - if (!tick) { + if (tick == NULL) { nd6_dad_ns_output(dp, ifa); -#if defined(__FreeBSD__) && __FreeBSD__ >= 3 - dp->dad_timer = -#endif timeout((void (*) __P((void *)))nd6_dad_timer_funnel, (void *)ifa, nd_ifinfo[ifa->ifa_ifp->if_index].retrans * hz / 1000); } else { @@ -1130,9 +1131,6 @@ nd6_dad_start(ifa, tick) else ntick = *tick + random() % (hz / 2); *tick = ntick; -#if defined(__FreeBSD__) && __FreeBSD__ >= 3 - dp->dad_timer = -#endif timeout((void (*) __P((void *)))nd6_dad_timer_funnel, (void *)ifa, ntick); } @@ -1188,7 +1186,7 @@ nd6_dad_timer(ifa) struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa; struct dadq *dp; - s = splnet(); /*XXX*/ + s = splnet(); /* XXX */ /* Sanity check */ if (ia == NULL) { @@ -1233,9 +1231,6 @@ nd6_dad_timer(ifa) * We have more NS to go. Send NS packet for DAD. */ nd6_dad_ns_output(dp, ifa); -#if defined(__FreeBSD__) && __FreeBSD__ >= 3 - dp->dad_timer = -#endif timeout((void (*) __P((void *)))nd6_dad_timer_funnel, (void *)ifa, nd_ifinfo[ifa->ifa_ifp->if_index].retrans * hz / 1000); } else { @@ -1256,7 +1251,7 @@ nd6_dad_timer(ifa) } if (dp->dad_ns_icount) { -#if 0 /*heuristics*/ +#if 0 /* heuristics */ /* * if * - we have sent many(?) DAD NS, and diff --git a/bsd/netinet6/nd6_rtr.c b/bsd/netinet6/nd6_rtr.c index a99c8a5dd..c3fd29bc6 100644 --- a/bsd/netinet6/nd6_rtr.c +++ b/bsd/netinet6/nd6_rtr.c @@ -1,4 +1,4 @@ -/* $FreeBSD: src/sys/netinet6/nd6_rtr.c,v 1.2.2.3 2001/07/03 11:01:54 ume Exp $ */ +/* $FreeBSD: src/sys/netinet6/nd6_rtr.c,v 1.11 2002/04/19 04:46:23 suz Exp $ */ /* $KAME: nd6_rtr.c,v 1.111 2001/04/27 01:37:15 jinmei Exp $ */ /* @@ -126,7 +126,7 @@ nd6_rs_input(m, off, icmp6len) union nd_opts ndopts; /* If I'm not a router, ignore it. */ - if (ip6_accept_rtadv != 0 || ip6_forwarding != 1) + if (ip6_accept_rtadv != 0 || (ifp->if_eflags & IFEF_ACCEPT_RTADVD) || ip6_forwarding != 1) goto freeit; /* Sanity checks */ @@ -215,7 +215,7 @@ nd6_ra_input(m, off, icmp6len) union nd_opts ndopts; struct nd_defrouter *dr; - if (ip6_accept_rtadv == 0) + if (ip6_accept_rtadv == 0 && ((ifp->if_eflags & IFEF_ACCEPT_RTADVD) == 0)) goto freeit; if (ip6->ip6_hlim != 255) { @@ -267,7 +267,7 @@ nd6_ra_input(m, off, icmp6len) dr0.advints_lost = 0; /* Mobile IPv6 */ /* unspecified or not? (RFC 2461 6.3.4) */ if (advreachable) { - NTOHL(advreachable); + advreachable = ntohl(advreachable); if (advreachable <= MAX_REACHABLE_TIME && ndi->basereachable != advreachable) { ndi->basereachable = advreachable; @@ -396,7 +396,7 @@ nd6_ra_input(m, off, icmp6len) skip: /* - * Src linkaddress + * Source link layer address */ { char *lladdr = NULL; @@ -451,7 +451,7 @@ nd6_rtmsg(cmd, rt) info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; info.rti_info[RTAX_NETMASK] = rt_mask(rt); info.rti_info[RTAX_IFP] = - (struct sockaddr *)TAILQ_FIRST(&rt->rt_ifp->if_addrlist); + TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr; info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr; rt_missmsg(cmd, &info, rt->rt_flags, 0); @@ -530,7 +530,7 @@ defrouter_addifreq(ifp) nd6_rtmsg(RTM_ADD, newrt); rtunref(newrt); } - in6_post_msg(ifp, KEV_INET6_DEFROUTER, &def); + in6_post_msg(ifp, KEV_INET6_DEFROUTER, (struct in6_ifaddr *)ifa); } } @@ -598,7 +598,7 @@ defrtrlist_del(dr) * Flush all the routing table entries that use the router * as a next hop. */ - if (!ip6_forwarding && ip6_accept_rtadv) { + if (!ip6_forwarding && (ip6_accept_rtadv || (dr->ifp->if_eflags & IFEF_ACCEPT_RTADVD))) { /* above is a good condition? */ rt6_flush(&dr->rtaddr, dr->ifp); } @@ -1735,6 +1735,7 @@ in6_ifadd(pr, ifid) int in6_tmpifadd(ia0, forcegen) const struct in6_ifaddr *ia0; /* corresponding public address */ + int forcegen; { struct ifnet *ifp = ia0->ia_ifa.ifa_ifp; struct in6_ifaddr *newia; @@ -1831,6 +1832,16 @@ in6_tmpifadd(ia0, forcegen) newia->ia6_ndpr = ia0->ia6_ndpr; newia->ia6_ndpr->ndpr_refcnt++; + /* + * A newly added address might affect the status of other addresses. + * XXX: when the temporary address is generated with a new public + * address, the onlink check is redundant. However, it would be safe + * to do the check explicitly everywhere a new address is generated, + * and, in fact, we surely need the check when we create a new + * temporary address due to deprecation of an old temporary address. + */ + pfxlist_onlink_check(); + return(0); } diff --git a/bsd/netinet6/raw_ip6.c b/bsd/netinet6/raw_ip6.c index f49d75e92..97eca96dc 100644 --- a/bsd/netinet6/raw_ip6.c +++ b/bsd/netinet6/raw_ip6.c @@ -553,7 +553,9 @@ rip6_attach(struct socket *so, int proto, struct proc *p) inp->in6p_hops = -1; /* use kernel default */ inp->in6p_cksum = -1; MALLOC(inp->in6p_icmp6filt, struct icmp6_filter *, - sizeof(struct icmp6_filter), M_PCB, M_NOWAIT); + sizeof(struct icmp6_filter), M_PCB, M_WAITOK); + if (inp->in6p_icmp6filt == NULL) + return (ENOMEM); ICMP6_FILTER_SETPASSALL(inp->in6p_icmp6filt); return 0; } diff --git a/bsd/netinet6/route6.c b/bsd/netinet6/route6.c index 7867df06f..acd4263cb 100644 --- a/bsd/netinet6/route6.c +++ b/bsd/netinet6/route6.c @@ -48,9 +48,9 @@ static int ip6_rthdr0 __P((struct mbuf *, struct ip6_hdr *, struct ip6_rthdr0 *)); int -route6_input(mp, offp, proto) +route6_input(mp, offp) struct mbuf **mp; - int *offp, proto; /* proto is unused */ + int *offp; { struct ip6_hdr *ip6; struct mbuf *m = *mp; diff --git a/bsd/netinet6/scope6.c b/bsd/netinet6/scope6.c index 2a9e9ce0f..ed33f804d 100644 --- a/bsd/netinet6/scope6.c +++ b/bsd/netinet6/scope6.c @@ -1,4 +1,4 @@ -/* $FreeBSD: src/sys/netinet6/scope6.c,v 1.1.2.2 2001/07/03 11:01:55 ume Exp $ */ +/* $FreeBSD: src/sys/netinet6/scope6.c,v 1.3 2002/03/25 10:12:51 ume Exp $ */ /* $KAME: scope6.c,v 1.10 2000/07/24 13:29:31 itojun Exp $ */ /* @@ -222,7 +222,7 @@ struct in6_addr *addr; } } - if (bcmp(&in6addr_loopback, addr, sizeof(addr) - 1) == 0) { + if (bcmp(&in6addr_loopback, addr, sizeof(*addr) - 1) == 0) { if (addr->s6_addr8[15] == 1) /* loopback */ return IPV6_ADDR_SCOPE_NODELOCAL; if (addr->s6_addr8[15] == 0) /* unspecified */ diff --git a/bsd/netinet6/tcp6_var.h b/bsd/netinet6/tcp6_var.h index 8e1fce90a..286307c32 100644 --- a/bsd/netinet6/tcp6_var.h +++ b/bsd/netinet6/tcp6_var.h @@ -80,7 +80,7 @@ extern int tcp_v6mssdflt; /* XXX */ struct ip6_hdr; void tcp6_ctlinput __P((int, struct sockaddr *, void *)); void tcp6_init __P((void)); -int tcp6_input __P((struct mbuf **, int *, int)); +int tcp6_input __P((struct mbuf **, int *)); struct rtentry *tcp_rtlookup6 __P((struct inpcb *)); extern struct pr_usrreqs tcp6_usrreqs; diff --git a/bsd/netinet6/udp6_usrreq.c b/bsd/netinet6/udp6_usrreq.c index a6a5951dc..9bab08c72 100644 --- a/bsd/netinet6/udp6_usrreq.c +++ b/bsd/netinet6/udp6_usrreq.c @@ -142,9 +142,9 @@ in6_mcmatch(in6p, ia6, ifp) } int -udp6_input(mp, offp, proto) +udp6_input(mp, offp) struct mbuf **mp; - int *offp, proto; + int *offp; { struct mbuf *m = *mp; register struct ip6_hdr *ip6; @@ -547,6 +547,8 @@ udp6_attach(struct socket *so, int proto, struct proc *p) return error; inp = (struct inpcb *)so->so_pcb; inp->inp_vflag |= INP_IPV6; + if (ip6_mapped_addr_on) + inp->inp_vflag |= INP_IPV4; inp->in6p_hops = -1; /* use kernel default */ inp->in6p_cksum = -1; /* just to be sure */ /* @@ -635,7 +637,7 @@ udp6_connect(struct socket *so, struct sockaddr *nam, struct proc *p) error = in6_pcbconnect(inp, nam, p); splx(s); if (error == 0) { - if (ip6_mapped_addr_on) { /* should be non mapped addr */ + if (ip6_mapped_addr_on || (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { /* should be non mapped addr */ inp->inp_vflag &= ~INP_IPV4; inp->inp_vflag |= INP_IPV6; } @@ -711,7 +713,7 @@ udp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, } } - if (ip6_mapped_addr_on) { + if (ip6_mapped_addr_on || (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { int hasv4addr; struct sockaddr_in6 *sin6 = 0; diff --git a/bsd/netinet6/udp6_var.h b/bsd/netinet6/udp6_var.h index 9b74ea7c7..417190c62 100644 --- a/bsd/netinet6/udp6_var.h +++ b/bsd/netinet6/udp6_var.h @@ -73,7 +73,7 @@ SYSCTL_DECL(_net_inet6_udp6); extern struct pr_usrreqs udp6_usrreqs; void udp6_ctlinput __P((int, struct sockaddr *, void *)); -int udp6_input __P((struct mbuf **, int *, int)); +int udp6_input __P((struct mbuf **, int *)); int udp6_output __P((struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct proc *p)); diff --git a/bsd/netkey/key.c b/bsd/netkey/key.c index 6d6e92fd1..bb183c5a1 100644 --- a/bsd/netkey/key.c +++ b/bsd/netkey/key.c @@ -1,5 +1,5 @@ -/* $FreeBSD: src/sys/netkey/key.c,v 1.16.2.5 2001/07/03 11:01:58 ume Exp $ */ -/* $KAME: key.c,v 1.187 2001/05/24 07:41:22 itojun Exp $ */ +/* $FreeBSD: src/sys/netkey/key.c,v 1.16.2.13 2002/07/24 18:17:40 ume Exp $ */ +/* $KAME: key.c,v 1.191 2001/06/27 10:46:49 sakane Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -108,6 +109,8 @@ #define satosin(s) ((struct sockaddr_in *)s) #endif +#define FULLMASK 0xff + /* * Note on SA reference counting: * - SAs that are not in DEAD state will have (total external reference + 1) @@ -128,9 +131,12 @@ static u_int key_int_random = 60; /*interval to initialize randseed,1(m)*/ static u_int key_larval_lifetime = 30; /* interval to expire acquiring, 30(s)*/ static int key_blockacq_count = 10; /* counter for blocking SADB_ACQUIRE.*/ static int key_blockacq_lifetime = 20; /* lifetime for blocking SADB_ACQUIRE.*/ +static int key_preferred_oldsa = 0; /* preferred old sa rather than new sa.*/ +static int natt_keepalive_interval = 29; /* interval between natt keepalives.*/ static u_int32_t acq_seq = 0; static int key_tick_init_random = 0; +__private_extern__ u_int32_t natt_now = 0; static LIST_HEAD(_sptree, secpolicy) sptree[IPSEC_DIR_MAX]; /* SPD */ static LIST_HEAD(_sahtree, secashead) sahtree; /* SAD */ @@ -144,18 +150,17 @@ static LIST_HEAD(_spacqtree, secspacq) spacqtree; /* SP acquiring list */ struct key_cb key_cb; /* search order for SAs */ -static u_int saorder_state_valid[] = { +static const u_int saorder_state_valid_prefer_old[] = { SADB_SASTATE_DYING, SADB_SASTATE_MATURE, - /* - * This order is important because we must select a oldest SA - * for outbound processing. For inbound, This is not important. - */ }; -static u_int saorder_state_alive[] = { +static const u_int saorder_state_valid_prefer_new[] = { + SADB_SASTATE_MATURE, SADB_SASTATE_DYING, +}; +static const u_int saorder_state_alive[] = { /* except DEAD */ SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL }; -static u_int saorder_state_any[] = { +static const u_int saorder_state_any[] = { SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL, SADB_SASTATE_DEAD }; @@ -184,7 +189,7 @@ static const int minsize[] = { }; static const int maxsize[] = { sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ - sizeof(struct sadb_sa), /* SADB_EXT_SA */ + sizeof(struct sadb_sa_2), /* SADB_EXT_SA */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ @@ -243,6 +248,10 @@ SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_COUNT, blockacq_count, CTLFLAG_RW, \ SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_LIFETIME, blockacq_lifetime, CTLFLAG_RW, \ &key_blockacq_lifetime, 0, ""); +/* ESP auth */ +SYSCTL_INT(_net_key, KEYCTL_ESP_AUTH, esp_auth, CTLFLAG_RW, \ + &ipsec_esp_auth, 0, ""); + /* minimum ESP key length */ SYSCTL_INT(_net_key, KEYCTL_ESP_KEYMIN, esp_keymin, CTLFLAG_RW, \ &ipsec_esp_keymin, 0, ""); @@ -251,6 +260,14 @@ SYSCTL_INT(_net_key, KEYCTL_ESP_KEYMIN, esp_keymin, CTLFLAG_RW, \ SYSCTL_INT(_net_key, KEYCTL_AH_KEYMIN, ah_keymin, CTLFLAG_RW, \ &ipsec_ah_keymin, 0, ""); +/* perfered old SA rather than new SA */ +SYSCTL_INT(_net_key, KEYCTL_PREFERED_OLDSA, prefered_oldsa, CTLFLAG_RW,\ + &key_preferred_oldsa, 0, ""); + +/* time between NATT keepalives in seconds, 0 disabled */ +SYSCTL_INT(_net_key, KEYCTL_NATT_KEEPALIVE_INTERVAL, natt_keepalive_interval, CTLFLAG_RW,\ + &natt_keepalive_interval, 0, ""); + #ifndef LIST_FOREACH #define LIST_FOREACH(elm, head, field) \ for (elm = LIST_FIRST(head); elm; elm = LIST_NEXT(elm, field)) @@ -271,20 +288,20 @@ do {\ #define KEY_CHKSASTATE(head, sav, name) \ do { \ - if ((head) != (sav)) { \ - printf("%s: state mismatched (TREE=%d SA=%d)\n", \ - (name), (head), (sav)); \ - continue; \ - } \ + if ((head) != (sav)) { \ + ipseclog((LOG_DEBUG, "%s: state mismatched (TREE=%d SA=%d)\n", \ + (name), (head), (sav))); \ + continue; \ + } \ } while (0) #define KEY_CHKSPDIR(head, sp, name) \ do { \ - if ((head) != (sp)) { \ - printf("%s: direction mismatched (TREE=%d SP=%d), " \ - "anyway continue.\n", \ - (name), (head), (sp)); \ - } \ + if ((head) != (sp)) { \ + ipseclog((LOG_DEBUG, "%s: direction mismatched (TREE=%d SP=%d), " \ + "anyway continue.\n", \ + (name), (head), (sp))); \ + } \ } while (0) #if 1 @@ -396,21 +413,22 @@ static struct mbuf *key_setsadbaddr __P((u_int16_t, static struct mbuf *key_setsadbident __P((u_int16_t, u_int16_t, caddr_t, int, u_int64_t)); #endif -static struct mbuf *key_setsadbxsa2(u_int8_t, u_int32_t); +static struct mbuf *key_setsadbxsa2 __P((u_int8_t, u_int32_t, u_int32_t)); static struct mbuf *key_setsadbxpolicy __P((u_int16_t, u_int8_t, u_int32_t)); static void *key_newbuf __P((const void *, u_int)); #if INET6 static int key_ismyaddr6 __P((struct sockaddr_in6 *)); #endif -static int key_cmpsaidx_exactly - __P((struct secasindex *, struct secasindex *)); -static int key_cmpsaidx_withmode - __P((struct secasindex *, struct secasindex *)); -static int key_cmpsaidx_withoutmode2 - __P((struct secasindex *, struct secasindex *)); -static int key_cmpsaidx_withoutmode - __P((struct secasindex *, struct secasindex *)); + +/* flags for key_cmpsaidx() */ +#define CMP_HEAD 1 /* protocol, addresses. */ +#define CMP_MODE_REQID 2 /* additionally HEAD, reqid, mode. */ +#define CMP_REQID 3 /* additionally HEAD, reaid. */ +#define CMP_EXACTLY 4 /* all elements. */ +static int key_cmpsaidx + __P((struct secasindex *, struct secasindex *, int)); + static int key_cmpspidx_exactly __P((struct secpolicyindex *, struct secpolicyindex *)); static int key_cmpspidx_withmask @@ -479,6 +497,7 @@ static void key_sa_chgstate __P((struct secasvar *, u_int8_t)); static struct mbuf *key_alloc_mbuf __P((int)); extern int ipsec_bypass; +void ipsec_send_natt_keepalive(struct secasvar *sav); /* %%% IPsec policy management */ /* @@ -561,6 +580,12 @@ key_gettunnel(osrc, odst, isrc, idst) struct sockaddr *os, *od, *is, *id; struct secpolicyindex spidx; + if (isrc->sa_family != idst->sa_family) { + ipseclog((LOG_ERR, "protocol family mismatched %d != %d\n.", + isrc->sa_family, idst->sa_family)); + return NULL; + } + s = splnet(); /*called from softclock()*/ LIST_FOREACH(sp, &sptree[dir], chain) { if (sp->state == IPSEC_SPSTATE_DEAD) @@ -692,11 +717,9 @@ key_checkrequest(isr, saidx) /* there is no SA */ if ((error = key_acquire(saidx, isr->sp)) != 0) { - /* XXX What I do ? */ -#if IPSEC_DEBUG - printf("key_checkrequest: error %d returned " - "from key_acquire.\n", error); -#endif + /* XXX What should I do ? */ + ipseclog((LOG_DEBUG, "key_checkrequest: error %d returned " + "from key_acquire.\n", error)); return error; } @@ -716,11 +739,13 @@ key_allocsa_policy(saidx) struct secashead *sah; struct secasvar *sav; u_int stateidx, state; + const u_int *saorder_state_valid; + int arraysize; LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; - if (key_cmpsaidx_withmode(&sah->saidx, saidx)) + if (key_cmpsaidx(&sah->saidx, saidx, CMP_MODE_REQID)) goto found; } @@ -728,10 +753,19 @@ key_allocsa_policy(saidx) found: - /* search valid state */ - for (stateidx = 0; - stateidx < _ARRAYLEN(saorder_state_valid); - stateidx++) { + /* + * search a valid state list for outbound packet. + * This search order is important. + */ + if (key_preferred_oldsa) { + saorder_state_valid = saorder_state_valid_prefer_old; + arraysize = _ARRAYLEN(saorder_state_valid_prefer_old); + } else { + saorder_state_valid = saorder_state_valid_prefer_new; + arraysize = _ARRAYLEN(saorder_state_valid_prefer_new); + } + + for (stateidx = 0; stateidx < arraysize; stateidx++) { state = saorder_state_valid[stateidx]; @@ -755,12 +789,16 @@ key_do_allocsa_policy(sah, state) struct secashead *sah; u_int state; { - struct secasvar *sav, *candidate; + struct secasvar *sav, *nextsav, *candidate, *d; /* initilize */ candidate = NULL; - LIST_FOREACH(sav, &sah->savtree[state], chain) { + for (sav = LIST_FIRST(&sah->savtree[state]); + sav != NULL; + sav = nextsav) { + + nextsav = LIST_NEXT(sav, chain); /* sanity check */ KEY_CHKSASTATE(sav->state, state, "key_do_allocsa_policy"); @@ -778,11 +816,82 @@ key_do_allocsa_policy(sah, state) panic("key_do_allocsa_policy: " "lifetime_current is NULL.\n"); - /* XXX What the best method is to compare ? */ - if (candidate->lft_c->sadb_lifetime_addtime > + /* What the best method is to compare ? */ + if (key_preferred_oldsa) { + if (candidate->lft_c->sadb_lifetime_addtime > + sav->lft_c->sadb_lifetime_addtime) { + candidate = sav; + } + continue; + /*NOTREACHED*/ + } + + /* prefered new sa rather than old sa */ + if (candidate->lft_c->sadb_lifetime_addtime < sav->lft_c->sadb_lifetime_addtime) { + d = candidate; candidate = sav; - continue; + } else + d = sav; + + /* + * prepared to delete the SA when there is more + * suitable candidate and the lifetime of the SA is not + * permanent. + */ + if (d->lft_c->sadb_lifetime_addtime != 0) { + struct mbuf *m, *result; + + key_sa_chgstate(d, SADB_SASTATE_DEAD); + + m = key_setsadbmsg(SADB_DELETE, 0, + d->sah->saidx.proto, 0, 0, d->refcnt - 1); + if (!m) + goto msgfail; + result = m; + + /* set sadb_address for saidx's. */ + m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&d->sah->saidx.src, + d->sah->saidx.src.ss_len << 3, + IPSEC_ULPROTO_ANY); + if (!m) + goto msgfail; + m_cat(result, m); + + /* set sadb_address for saidx's. */ + m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&d->sah->saidx.src, + d->sah->saidx.src.ss_len << 3, + IPSEC_ULPROTO_ANY); + if (!m) + goto msgfail; + m_cat(result, m); + + /* create SA extension */ + m = key_setsadbsa(d); + if (!m) + goto msgfail; + m_cat(result, m); + + if (result->m_len < sizeof(struct sadb_msg)) { + result = m_pullup(result, + sizeof(struct sadb_msg)); + if (result == NULL) + goto msgfail; + } + + result->m_pkthdr.len = 0; + for (m = result; m; m = m->m_next) + result->m_pkthdr.len += m->m_len; + mtod(result, struct sadb_msg *)->sadb_msg_len = + PFKEY_UNIT64(result->m_pkthdr.len); + + if (key_sendup_mbuf(NULL, result, + KEY_SENDUP_REGISTERED)) + goto msgfail; + msgfail: + key_freesav(d); } } @@ -823,11 +932,25 @@ key_allocsa(family, src, dst, proto, spi) struct sockaddr_in sin; struct sockaddr_in6 sin6; int s; + const u_int *saorder_state_valid; + int arraysize; /* sanity check */ if (src == NULL || dst == NULL) panic("key_allocsa: NULL pointer is passed.\n"); + /* + * when both systems employ similar strategy to use a SA. + * the search order is important even in the inbound case. + */ + if (key_preferred_oldsa) { + saorder_state_valid = saorder_state_valid_prefer_old; + arraysize = _ARRAYLEN(saorder_state_valid_prefer_old); + } else { + saorder_state_valid = saorder_state_valid_prefer_new; + arraysize = _ARRAYLEN(saorder_state_valid_prefer_new); + } + /* * searching SAD. * XXX: to be checked internal IP header somewhere. Also when @@ -836,10 +959,11 @@ key_allocsa(family, src, dst, proto, spi) */ s = splnet(); /*called from softclock()*/ LIST_FOREACH(sah, &sahtree, chain) { - /* search valid state */ - for (stateidx = 0; - stateidx < _ARRAYLEN(saorder_state_valid); - stateidx++) { + /* + * search a valid state list for inbound packet. + * the search order is not important. + */ + for (stateidx = 0; stateidx < arraysize; stateidx++) { state = saorder_state_valid[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { /* sanity check */ @@ -883,8 +1007,9 @@ key_allocsa(family, src, dst, proto, spi) continue; break; default: - printf("key_allocsa: unknown address family=%d.\n", - family); + ipseclog((LOG_DEBUG, "key_allocsa: " + "unknown address family=%d.\n", + family)); continue; } @@ -919,8 +1044,9 @@ key_allocsa(family, src, dst, proto, spi) continue; break; default: - printf("key_allocsa: unknown address family=%d.\n", - family); + ipseclog((LOG_DEBUG, "key_allocsa: " + "unknown address family=%d.\n", + family)); continue; } @@ -1015,10 +1141,8 @@ key_freeso(so) break; #endif /* INET6 */ default: -#if IPSEC_DEBUG - printf("key_freeso: unknown address family=%d.\n", - so->so_proto->pr_domain->dom_family); -#endif + ipseclog((LOG_DEBUG, "key_freeso: unknown address family=%d.\n", + so->so_proto->pr_domain->dom_family)); return; } @@ -1216,9 +1340,7 @@ key_msg2sp(xpl0, len, error) if (len < sizeof(*xpl0)) panic("key_msg2sp: invalid length.\n"); if (len != PFKEY_EXTLEN(xpl0)) { -#if IPSEC_DEBUG - printf("key_msg2sp: Invalid msg length.\n"); -#endif + ipseclog((LOG_DEBUG, "key_msg2sp: Invalid msg length.\n")); *error = EINVAL; return NULL; } @@ -1248,9 +1370,8 @@ key_msg2sp(xpl0, len, error) /* validity check */ if (PFKEY_EXTLEN(xpl0) < sizeof(*xpl0)) { -#if IPSEC_DEBUG - printf("key_msg2sp: Invalid msg length.\n"); -#endif + ipseclog((LOG_DEBUG, + "key_msg2sp: Invalid msg length.\n")); key_freesp(newsp); *error = EINVAL; return NULL; @@ -1263,10 +1384,8 @@ key_msg2sp(xpl0, len, error) /* length check */ if (xisr->sadb_x_ipsecrequest_len < sizeof(*xisr)) { -#if IPSEC_DEBUG - printf("key_msg2sp: " - "invalid ipsecrequest length.\n"); -#endif + ipseclog((LOG_DEBUG, "key_msg2sp: " + "invalid ipsecrequest length.\n")); key_freesp(newsp); *error = EINVAL; return NULL; @@ -1275,9 +1394,8 @@ key_msg2sp(xpl0, len, error) /* allocate request buffer */ KMALLOC(*p_isr, struct ipsecrequest *, sizeof(**p_isr)); if ((*p_isr) == NULL) { -#if IPSEC_DEBUG - printf("key_msg2sp: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, + "key_msg2sp: No more memory.\n")); key_freesp(newsp); *error = ENOBUFS; return NULL; @@ -1293,10 +1411,9 @@ key_msg2sp(xpl0, len, error) case IPPROTO_IPCOMP: break; default: -#if IPSEC_DEBUG - printf("key_msg2sp: invalid proto type=%u\n", - xisr->sadb_x_ipsecrequest_proto); -#endif + ipseclog((LOG_DEBUG, + "key_msg2sp: invalid proto type=%u\n", + xisr->sadb_x_ipsecrequest_proto)); key_freesp(newsp); *error = EPROTONOSUPPORT; return NULL; @@ -1309,10 +1426,9 @@ key_msg2sp(xpl0, len, error) break; case IPSEC_MODE_ANY: default: -#if IPSEC_DEBUG - printf("key_msg2sp: invalid mode=%u\n", - xisr->sadb_x_ipsecrequest_mode); -#endif + ipseclog((LOG_DEBUG, + "key_msg2sp: invalid mode=%u\n", + xisr->sadb_x_ipsecrequest_mode)); key_freesp(newsp); *error = EINVAL; return NULL; @@ -1332,12 +1448,10 @@ key_msg2sp(xpl0, len, error) */ if (xisr->sadb_x_ipsecrequest_reqid > IPSEC_MANUAL_REQID_MAX) { -#if IPSEC_DEBUG - printf("key_msg2sp: reqid=%d " - "range violation, " - "updated by kernel.\n", - xisr->sadb_x_ipsecrequest_reqid); -#endif + ipseclog((LOG_DEBUG, + "key_msg2sp: reqid=%d range " + "violation, updated by kernel.\n", + xisr->sadb_x_ipsecrequest_reqid)); xisr->sadb_x_ipsecrequest_reqid = 0; } @@ -1359,10 +1473,8 @@ key_msg2sp(xpl0, len, error) break; default: -#if IPSEC_DEBUG - printf("key_msg2sp: invalid level=%u\n", - xisr->sadb_x_ipsecrequest_level); -#endif + ipseclog((LOG_DEBUG, "key_msg2sp: invalid level=%u\n", + xisr->sadb_x_ipsecrequest_level)); key_freesp(newsp); *error = EINVAL; return NULL; @@ -1378,10 +1490,8 @@ key_msg2sp(xpl0, len, error) /* validity check */ if (paddr->sa_len > sizeof((*p_isr)->saidx.src)) { -#if IPSEC_DEBUG - printf("key_msg2sp: invalid request " - "address length.\n"); -#endif + ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " + "address length.\n")); key_freesp(newsp); *error = EINVAL; return NULL; @@ -1395,10 +1505,8 @@ key_msg2sp(xpl0, len, error) /* validity check */ if (paddr->sa_len > sizeof((*p_isr)->saidx.dst)) { -#if IPSEC_DEBUG - printf("key_msg2sp: invalid request " - "address length.\n"); -#endif + ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " + "address length.\n")); key_freesp(newsp); *error = EINVAL; return NULL; @@ -1416,9 +1524,7 @@ key_msg2sp(xpl0, len, error) /* validity check */ if (tlen < 0) { -#if IPSEC_DEBUG - printf("key_msg2sp: becoming tlen < 0.\n"); -#endif + ipseclog((LOG_DEBUG, "key_msg2sp: becoming tlen < 0.\n")); key_freesp(newsp); *error = EINVAL; return NULL; @@ -1430,9 +1536,7 @@ key_msg2sp(xpl0, len, error) } break; default: -#if IPSEC_DEBUG - printf("key_msg2sp: invalid policy type.\n"); -#endif + ipseclog((LOG_DEBUG, "key_msg2sp: invalid policy type.\n")); key_freesp(newsp); *error = EINVAL; return NULL; @@ -1632,25 +1736,19 @@ key_spdadd(so, m, mhp) if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_X_EXT_POLICY] == NULL) { -#if IPSEC_DEBUG - printf("key_spdadd: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { -#if IPSEC_DEBUG - printf("key_spdadd: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(struct sadb_lifetime)) { -#if IPSEC_DEBUG - printf("key_spdadd: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } lft = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD]; @@ -1676,9 +1774,7 @@ key_spdadd(so, m, mhp) case IPSEC_DIR_OUTBOUND: break; default: -#if IPSEC_DEBUG - printf("key_spdadd: Invalid SP direction.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdadd: Invalid SP direction.\n")); mhp->msg->sadb_msg_errno = EINVAL; return 0; } @@ -1687,9 +1783,7 @@ key_spdadd(so, m, mhp) /* key_spdadd() accepts DISCARD, NONE and IPSEC. */ if (xpl0->sadb_x_policy_type == IPSEC_POLICY_ENTRUST || xpl0->sadb_x_policy_type == IPSEC_POLICY_BYPASS) { -#if IPSEC_DEBUG - printf("key_spdadd: Invalid policy type.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdadd: Invalid policy type.\n")); return key_senderror(so, m, EINVAL); } @@ -1697,34 +1791,26 @@ key_spdadd(so, m, mhp) if (mhp->msg->sadb_msg_type != SADB_X_SPDSETIDX && xpl0->sadb_x_policy_type == IPSEC_POLICY_IPSEC && mhp->extlen[SADB_X_EXT_POLICY] <= sizeof(*xpl0)) { -#if IPSEC_DEBUG - printf("key_spdadd: some policy requests part required.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdadd: some policy requests part required.\n")); return key_senderror(so, m, EINVAL); } /* * checking there is SP already or not. - * If type is SPDUPDATE and no SP found, then error. - * If type is either SPDADD or SPDSETIDX and SP found, then error. + * SPDUPDATE doesn't depend on whether there is a SP or not. + * If the type is either SPDADD or SPDSETIDX AND a SP is found, + * then error. */ newsp = key_getsp(&spidx); if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) { - if (newsp == NULL) { -#if IPSEC_DEBUG - printf("key_spdadd: no SP found.\n"); -#endif - return key_senderror(so, m, ENOENT); + if (newsp) { + newsp->state = IPSEC_SPSTATE_DEAD; + key_freesp(newsp); } - - newsp->state = IPSEC_SPSTATE_DEAD; - key_freesp(newsp); } else { if (newsp != NULL) { key_freesp(newsp); -#if IPSEC_DEBUG - printf("key_spdadd: a SP entry exists already.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdadd: a SP entry exists already.\n")); return key_senderror(so, m, EEXIST); } } @@ -1865,7 +1951,7 @@ key_getnewspid() /* when requesting to allocate spi ranged */ while (count--) { - newid = (policy_id = (policy_id == ~0 ? 1 : ++policy_id)); + newid = (policy_id = (policy_id == ~0 ? 1 : policy_id + 1)); if ((sp = key_getspbyid(newid)) == NULL) break; @@ -1874,9 +1960,7 @@ key_getnewspid() } if (count == 0 || newid == 0) { -#if IPSEC_DEBUG - printf("key_getnewspid: to allocate policy id is failed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_getnewspid: to allocate policy id is failed.\n")); return 0; } @@ -1913,17 +1997,13 @@ key_spddelete(so, m, mhp) if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_X_EXT_POLICY] == NULL) { -#if IPSEC_DEBUG - printf("key_spddelete: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spddelete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { -#if IPSEC_DEBUG - printf("key_spddelete: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spddelete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -1947,17 +2027,13 @@ key_spddelete(so, m, mhp) case IPSEC_DIR_OUTBOUND: break; default: -#if IPSEC_DEBUG - printf("key_spddelete: Invalid SP direction.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spddelete: Invalid SP direction.\n")); return key_senderror(so, m, EINVAL); } /* Is there SP in SPD ? */ if ((sp = key_getsp(&spidx)) == NULL) { -#if IPSEC_DEBUG - printf("key_spddelete: no SP found.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spddelete: no SP found.\n")); return key_senderror(so, m, EINVAL); } @@ -2014,9 +2090,7 @@ key_spddelete2(so, m, mhp) if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { -#if IPSEC_DEBUG - printf("key_spddelete2: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spddelete2: invalid message is passed.\n")); key_senderror(so, m, EINVAL); return 0; } @@ -2025,9 +2099,7 @@ key_spddelete2(so, m, mhp) /* Is there SP in SPD ? */ if ((sp = key_getspbyid(id)) == NULL) { -#if IPSEC_DEBUG - printf("key_spddelete2: no SP found id:%u.\n", id); -#endif + ipseclog((LOG_DEBUG, "key_spddelete2: no SP found id:%u.\n", id)); key_senderror(so, m, EINVAL); } @@ -2115,9 +2187,7 @@ key_spdget(so, m, mhp) if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { -#if IPSEC_DEBUG - printf("key_spdget: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdget: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -2125,9 +2195,7 @@ key_spdget(so, m, mhp) /* Is there SP in SPD ? */ if ((sp = key_getspbyid(id)) == NULL) { -#if IPSEC_DEBUG - printf("key_spdget: no SP found id:%u.\n", id); -#endif + ipseclog((LOG_DEBUG, "key_spdget: no SP found id:%u.\n", id)); return key_senderror(so, m, ENOENT); } @@ -2248,9 +2316,7 @@ key_spdflush(so, m, mhp) } if (sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) { -#if IPSEC_DEBUG - printf("key_spdflush: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_spdflush: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } @@ -2646,9 +2712,7 @@ key_newsav(m, mhp, sah, errp) KMALLOC(newsav, struct secasvar *, sizeof(struct secasvar)); if (newsav == NULL) { -#if IPSEC_DEBUG - printf("key_newsa: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_newsa: No more memory.\n")); *errp = ENOBUFS; return NULL; } @@ -2672,9 +2736,7 @@ key_newsav(m, mhp, sah, errp) /* sanity check */ if (mhp->ext[SADB_EXT_SA] == NULL) { KFREE(newsav); -#if IPSEC_DEBUG - printf("key_newsa: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_newsa: invalid message is passed.\n")); *errp = EINVAL; return NULL; } @@ -2790,7 +2852,7 @@ key_getsah(saidx) LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; - if (key_cmpsaidx_withoutmode2(&sah->saidx, saidx)) + if (key_cmpsaidx(&sah->saidx, saidx, CMP_REQID)) return sah; } @@ -2814,9 +2876,7 @@ key_checkspidup(saidx, spi) /* check address family */ if (saidx->src.ss_family != saidx->dst.ss_family) { -#if IPSEC_DEBUG - printf("key_checkspidup: address family mismatched.\n"); -#endif + ipseclog((LOG_DEBUG, "key_checkspidup: address family mismatched.\n")); return NULL; } @@ -2856,12 +2916,9 @@ key_getsavbyspi(sah, spi) /* sanity check */ if (sav->state != state) { -#if IPSEC_DEBUG - printf("key_getsavbyspi: " - "invalid sav->state " - "(queue: %d SA: %d)\n", - state, sav->state); -#endif + ipseclog((LOG_DEBUG, "key_getsavbyspi: " + "invalid sav->state (queue: %d SA: %d)\n", + state, sav->state)); continue; } @@ -2907,6 +2964,8 @@ key_setsaval(sav, m, mhp) sav->lft_c = NULL; sav->lft_h = NULL; sav->lft_s = NULL; + sav->remote_ike_port = 0; + sav->natt_last_activity = natt_now; /* SA */ if (mhp->ext[SADB_EXT_SA] != NULL) { @@ -2921,14 +2980,25 @@ key_setsaval(sav, m, mhp) sav->alg_auth = sa0->sadb_sa_auth; sav->alg_enc = sa0->sadb_sa_encrypt; sav->flags = sa0->sadb_sa_flags; + + /* + * Verify that a nat-traversal port was specified if + * the nat-traversal flag is set. + */ + if ((sav->flags & SADB_X_EXT_NATT) != 0) { + if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa_2) || + ((struct sadb_sa_2*)(sa0))->sadb_sa_natt_port == 0) { + error = EINVAL; + goto fail; + } + sav->remote_ike_port = ((struct sadb_sa_2*)(sa0))->sadb_sa_natt_port; + } /* replay window */ if ((sa0->sadb_sa_flags & SADB_X_EXT_OLD) == 0) { sav->replay = keydb_newsecreplay(sa0->sadb_sa_replay); if (sav->replay == NULL) { -#if IPSEC_DEBUG - printf("key_setsaval: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } @@ -2961,17 +3031,13 @@ key_setsaval(sav, m, mhp) break; } if (error) { -#if IPSEC_DEBUG - printf("key_setsaval: invalid key_auth values.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: invalid key_auth values.\n")); goto fail; } sav->key_auth = (struct sadb_key *)key_newbuf(key0, len); if (sav->key_auth == NULL) { -#if IPSEC_DEBUG - printf("key_setsaval: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } @@ -2999,9 +3065,7 @@ key_setsaval(sav, m, mhp) } sav->key_enc = (struct sadb_key *)key_newbuf(key0, len); if (sav->key_enc == NULL) { -#if IPSEC_DEBUG - printf("key_setsaval: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } @@ -3017,9 +3081,7 @@ key_setsaval(sav, m, mhp) break; } if (error) { -#if IPSEC_DEBUG - printf("key_setsatval: invalid key_enc value.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsatval: invalid key_enc value.\n")); goto fail; } } @@ -3037,9 +3099,7 @@ key_setsaval(sav, m, mhp) break; KMALLOC(sav->iv, caddr_t, sav->ivlen); if (sav->iv == 0) { -#if IPSEC_DEBUG - printf("key_setsaval: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } @@ -3052,9 +3112,7 @@ key_setsaval(sav, m, mhp) case SADB_X_SATYPE_IPCOMP: break; default: -#if IPSEC_DEBUG - printf("key_setsaval: invalid SA type.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: invalid SA type.\n")); error = EINVAL; goto fail; } @@ -3067,9 +3125,7 @@ key_setsaval(sav, m, mhp) KMALLOC(sav->lft_c, struct sadb_lifetime *, sizeof(struct sadb_lifetime)); if (sav->lft_c == NULL) { -#if IPSEC_DEBUG - printf("key_setsaval: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } @@ -3097,9 +3153,7 @@ key_setsaval(sav, m, mhp) sav->lft_h = (struct sadb_lifetime *)key_newbuf(lft0, sizeof(*lft0)); if (sav->lft_h == NULL) { -#if IPSEC_DEBUG - printf("key_setsaval: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } @@ -3115,9 +3169,7 @@ key_setsaval(sav, m, mhp) sav->lft_s = (struct sadb_lifetime *)key_newbuf(lft0, sizeof(*lft0)); if (sav->lft_s == NULL) { -#if IPSEC_DEBUG - printf("key_setsaval: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } @@ -3134,14 +3186,17 @@ key_setsaval(sav, m, mhp) sav->replay = NULL; } if (sav->key_auth != NULL) { + bzero(_KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); KFREE(sav->key_auth); sav->key_auth = NULL; } if (sav->key_enc != NULL) { + bzero(_KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)); KFREE(sav->key_enc); sav->key_enc = NULL; } if (sav->sched) { + bzero(sav->sched, sav->schedlen); KFREE(sav->sched); sav->sched = NULL; } @@ -3185,10 +3240,9 @@ key_mature(sav) case IPPROTO_ESP: case IPPROTO_AH: if (ntohl(sav->spi) >= 0 && ntohl(sav->spi) <= 255) { -#if IPSEC_DEBUG - printf("key_mature: illegal range of SPI %u.\n", - (u_int32_t)ntohl(sav->spi)); -#endif + ipseclog((LOG_DEBUG, + "key_mature: illegal range of SPI %u.\n", + (u_int32_t)ntohl(sav->spi))); return EINVAL; } break; @@ -3200,10 +3254,8 @@ key_mature(sav) /* check flags */ if ((sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_DERIV)) { -#if IPSEC_DEBUG - printf("key_mature: " - "invalid flag (derived) given to old-esp.\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: " + "invalid flag (derived) given to old-esp.\n")); return EINVAL; } if (sav->alg_auth == SADB_AALG_NONE) @@ -3215,17 +3267,13 @@ key_mature(sav) case IPPROTO_AH: /* check flags */ if (sav->flags & SADB_X_EXT_DERIV) { -#if IPSEC_DEBUG - printf("key_mature: " - "invalid flag (derived) given to AH SA.\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: " + "invalid flag (derived) given to AH SA.\n")); return EINVAL; } if (sav->alg_enc != SADB_EALG_NONE) { -#if IPSEC_DEBUG - printf("key_mature: " - "protocol and algorithm mismated.\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: " + "protocol and algorithm mismated.\n")); return(EINVAL); } checkmask = 2; @@ -3233,26 +3281,20 @@ key_mature(sav) break; case IPPROTO_IPCOMP: if (sav->alg_auth != SADB_AALG_NONE) { -#if IPSEC_DEBUG - printf("key_mature: " - "protocol and algorithm mismated.\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: " + "protocol and algorithm mismated.\n")); return(EINVAL); } if ((sav->flags & SADB_X_EXT_RAWCPI) == 0 && ntohl(sav->spi) >= 0x10000) { -#if IPSEC_DEBUG - printf("key_mature: invalid cpi for IPComp.\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: invalid cpi for IPComp.\n")); return(EINVAL); } checkmask = 4; mustmask = 4; break; default: -#if IPSEC_DEBUG - printf("key_mature: Invalid satype.\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: Invalid satype.\n")); return EPROTONOSUPPORT; } @@ -3263,10 +3305,8 @@ key_mature(sav) algo = ah_algorithm_lookup(sav->alg_auth); if (!algo) { -#if IPSEC_DEBUG - printf("key_mature: " - "unknown authentication algorithm.\n"); -#endif + ipseclog((LOG_DEBUG,"key_mature: " + "unknown authentication algorithm.\n")); return EINVAL; } @@ -3276,11 +3316,10 @@ key_mature(sav) else keylen = 0; if (keylen < algo->keymin || algo->keymax < keylen) { -#if IPSEC_DEBUG - printf("key_mature: invalid AH key length %d " - "(%d-%d allowed)\n", keylen, - algo->keymin, algo->keymax); -#endif + ipseclog((LOG_DEBUG, + "key_mature: invalid AH key length %d " + "(%d-%d allowed)\n", + keylen, algo->keymin, algo->keymax)); return EINVAL; } @@ -3293,9 +3332,7 @@ key_mature(sav) } if ((mustmask & 2) != 0 && mature != SADB_SATYPE_AH) { -#if IPSEC_DEBUG - printf("key_mature: no satisfy algorithm for AH\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: no satisfy algorithm for AH\n")); return EINVAL; } } @@ -3308,9 +3345,7 @@ key_mature(sav) algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { -#if IPSEC_DEBUG - printf("key_mature: unknown encryption algorithm.\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: unknown encryption algorithm.\n")); return EINVAL; } @@ -3320,11 +3355,10 @@ key_mature(sav) else keylen = 0; if (keylen < algo->keymin || algo->keymax < keylen) { -#if IPSEC_DEBUG - printf("key_mature: invalid ESP key length %d " - "(%d-%d allowed)\n", keylen, - algo->keymin, algo->keymax); -#endif + ipseclog((LOG_DEBUG, + "key_mature: invalid ESP key length %d " + "(%d-%d allowed)\n", + keylen, algo->keymin, algo->keymax)); return EINVAL; } @@ -3337,15 +3371,11 @@ key_mature(sav) } if ((mustmask & 1) != 0 && mature != SADB_SATYPE_ESP) { -#if IPSEC_DEBUG - printf("key_mature: no satisfy algorithm for ESP\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: no satisfy algorithm for ESP\n")); return EINVAL; } #else /*IPSEC_ESP*/ -#if IPSEC_DEBUG - printf("key_mature: ESP not supported in this configuration\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: ESP not supported in this configuration\n")); return EINVAL; #endif } @@ -3357,9 +3387,7 @@ key_mature(sav) /* algorithm-dependent check */ algo = ipcomp_algorithm_lookup(sav->alg_enc); if (!algo) { -#if IPSEC_DEBUG - printf("key_mature: unknown compression algorithm.\n"); -#endif + ipseclog((LOG_DEBUG, "key_mature: unknown compression algorithm.\n")); return EINVAL; } } @@ -3408,6 +3436,7 @@ key_setdumpsa(sav, type, satype, seq, pid) case SADB_X_EXT_SA2: m = key_setsadbxsa2(sav->sah->saidx.mode, + sav->replay ? sav->replay->count : 0, sav->sah->saidx.reqid); if (!m) goto fail; @@ -3416,7 +3445,7 @@ key_setdumpsa(sav, type, satype, seq, pid) case SADB_EXT_ADDRESS_SRC: m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, (struct sockaddr *)&sav->sah->saidx.src, - sav->sah->saidx.src.ss_len << 3, IPSEC_ULPROTO_ANY); + FULLMASK, IPSEC_ULPROTO_ANY); if (!m) goto fail; break; @@ -3424,7 +3453,7 @@ key_setdumpsa(sav, type, satype, seq, pid) case SADB_EXT_ADDRESS_DST: m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, (struct sockaddr *)&sav->sah->saidx.dst, - sav->sah->saidx.dst.ss_len << 3, IPSEC_ULPROTO_ANY); + FULLMASK, IPSEC_ULPROTO_ANY); if (!m) goto fail; break; @@ -3626,6 +3655,18 @@ key_setsadbaddr(exttype, saddr, prefixlen, ul_proto) p->sadb_address_len = PFKEY_UNIT64(len); p->sadb_address_exttype = exttype; p->sadb_address_proto = ul_proto; + if (prefixlen == FULLMASK) { + switch (saddr->sa_family) { + case AF_INET: + prefixlen = sizeof(struct in_addr) << 3; + break; + case AF_INET6: + prefixlen = sizeof(struct in6_addr) << 3; + break; + default: + ; /*XXX*/ + } + } p->sadb_address_prefixlen = prefixlen; p->sadb_address_reserved = 0; @@ -3680,9 +3721,9 @@ key_setsadbident(exttype, idtype, string, stringlen, id) * set data into sadb_x_sa2. */ static struct mbuf * -key_setsadbxsa2(mode, reqid) +key_setsadbxsa2(mode, seq, reqid) u_int8_t mode; - u_int32_t reqid; + u_int32_t seq, reqid; { struct mbuf *m; struct sadb_x_sa2 *p; @@ -3704,7 +3745,7 @@ key_setsadbxsa2(mode, reqid) p->sadb_x_sa2_mode = mode; p->sadb_x_sa2_reserved1 = 0; p->sadb_x_sa2_reserved2 = 0; - p->sadb_x_sa2_reserved3 = 0; + p->sadb_x_sa2_sequence = seq; p->sadb_x_sa2_reqid = reqid; return m; @@ -3756,9 +3797,7 @@ key_newbuf(src, len) KMALLOC(new, caddr_t, len); if (new == NULL) { -#if IPSEC_DEBUG - printf("key_newbuf: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_newbuf: No more memory.\n")); return NULL; } bcopy(src, new, len); @@ -3850,96 +3889,21 @@ key_ismyaddr6(sin6) #endif /*INET6*/ /* - * compare two secasindex structure exactly. - * IN: - * saidx0: source, it can be in SAD. - * saidx1: object. - * OUT: - * 1 : equal - * 0 : not equal - */ -static int -key_cmpsaidx_exactly(saidx0, saidx1) - struct secasindex *saidx0, *saidx1; -{ - /* sanity */ - if (saidx0 == NULL && saidx1 == NULL) - return 1; - - if (saidx0 == NULL || saidx1 == NULL) - return 0; - - if (saidx0->proto != saidx1->proto - || saidx0->mode != saidx1->mode - || saidx0->reqid != saidx1->reqid) - return 0; - - if (bcmp(&saidx0->src, &saidx1->src, saidx0->src.ss_len) != 0 || - bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.ss_len) != 0) - return 0; - - return 1; -} - -/* - * compare two secasindex structure with consideration mode. - * don't compare port. - * IN: - * saidx0: source, it is often in SAD. - * saidx1: object, it is often from SPD. - * OUT: - * 1 : equal - * 0 : not equal - */ -static int -key_cmpsaidx_withmode(saidx0, saidx1) - struct secasindex *saidx0, *saidx1; -{ - /* sanity */ - if (saidx0 == NULL && saidx1 == NULL) - return 1; - - if (saidx0 == NULL || saidx1 == NULL) - return 0; - - if (saidx0->proto != saidx1->proto) - return 0; - - /* - * If reqid of SPD is non-zero, unique SA is required. - * The result must be of same reqid in this case. - */ - if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid) - return 0; - - if (saidx0->mode != IPSEC_MODE_ANY && saidx0->mode != saidx1->mode) - return 0; - - if (key_sockaddrcmp((struct sockaddr *)&saidx0->src, - (struct sockaddr *)&saidx1->src, 0) != 0) { - return 0; - } - if (key_sockaddrcmp((struct sockaddr *)&saidx0->dst, - (struct sockaddr *)&saidx1->dst, 0) != 0) { - return 0; - } - - return 1; -} - -/* - * compare two secasindex structure without mode, but think reqid. + * compare two secasindex structure. + * flag can specify to compare 2 saidxes. + * compare two secasindex structure without both mode and reqid. * don't compare port. - * IN: - * saidx0: source, it is often in SAD. - * saidx1: object, it is often from user. - * OUT: - * 1 : equal - * 0 : not equal + * IN: + * saidx0: source, it can be in SAD. + * saidx1: object. + * OUT: + * 1 : equal + * 0 : not equal */ static int -key_cmpsaidx_withoutmode2(saidx0, saidx1) +key_cmpsaidx(saidx0, saidx1, flag) struct secasindex *saidx0, *saidx1; + int flag; { /* sanity */ if (saidx0 == NULL && saidx1 == NULL) @@ -3951,56 +3915,41 @@ key_cmpsaidx_withoutmode2(saidx0, saidx1) if (saidx0->proto != saidx1->proto) return 0; - /* - * If reqid of SPD is non-zero, unique SA is required. - * The result must be of same reqid in this case. - */ - if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid) - return 0; - - if (key_sockaddrcmp((struct sockaddr *)&saidx0->src, - (struct sockaddr *)&saidx1->src, 0) != 0) { - return 0; - } - if (key_sockaddrcmp((struct sockaddr *)&saidx0->dst, - (struct sockaddr *)&saidx1->dst, 0) != 0) { - return 0; - } - - return 1; -} - -/* - * compare two secasindex structure without both mode and reqid. - * don't compare port. - * IN: - * saidx0: source, it is often in SAD. - * saidx1: object, it is often from user. - * OUT: - * 1 : equal - * 0 : not equal - */ -static int -key_cmpsaidx_withoutmode(saidx0, saidx1) - struct secasindex *saidx0, *saidx1; -{ - /* sanity */ - if (saidx0 == NULL && saidx1 == NULL) - return 1; + if (flag == CMP_EXACTLY) { + if (saidx0->mode != saidx1->mode) + return 0; + if (saidx0->reqid != saidx1->reqid) + return 0; + if (bcmp(&saidx0->src, &saidx1->src, saidx0->src.ss_len) != 0 || + bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.ss_len) != 0) + return 0; + } else { - if (saidx0 == NULL || saidx1 == NULL) - return 0; + /* CMP_MODE_REQID, CMP_REQID, CMP_HEAD */ + if (flag == CMP_MODE_REQID + ||flag == CMP_REQID) { + /* + * If reqid of SPD is non-zero, unique SA is required. + * The result must be of same reqid in this case. + */ + if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid) + return 0; + } - if (saidx0->proto != saidx1->proto) - return 0; + if (flag == CMP_MODE_REQID) { + if (saidx0->mode != IPSEC_MODE_ANY + && saidx0->mode != saidx1->mode) + return 0; + } - if (key_sockaddrcmp((struct sockaddr *)&saidx0->src, - (struct sockaddr *)&saidx1->src, 0) != 0) { - return 0; - } - if (key_sockaddrcmp((struct sockaddr *)&saidx0->dst, - (struct sockaddr *)&saidx1->dst, 0) != 0) { - return 0; + if (key_sockaddrcmp((struct sockaddr *)&saidx0->src, + (struct sockaddr *)&saidx1->src, 0) != 0) { + return 0; + } + if (key_sockaddrcmp((struct sockaddr *)&saidx0->dst, + (struct sockaddr *)&saidx1->dst, 0) != 0) { + return 0; + } } return 1; @@ -4322,7 +4271,23 @@ key_timehandler(void) key_freesav(sav); } } - + + /* + * If this is a NAT traversal SA with no activity, + * we need to send a keep alive. + * + * Performed outside of the loop before so we will + * only ever send one keepalive. The first SA on + * the list is the one that will be used for sending + * traffic, so this is the one we use for determining + * when to send the keepalive. + */ + sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_MATURE]); + if (natt_keepalive_interval && sav && (sav->flags & SADB_X_EXT_NATT_KEEPALIVE) != 0 && + (natt_now - sav->natt_last_activity) >= natt_keepalive_interval) { + ipsec_send_natt_keepalive(sav); + } + /* * check MATURE entry to start to send expire message * whether or not. @@ -4339,10 +4304,8 @@ key_timehandler(void) /* sanity check */ if (sav->lft_c == NULL) { -#if IPSEC_DEBUG - printf("key_timehandler: " - "There is no CURRENT time, why?\n"); -#endif + ipseclog((LOG_DEBUG,"key_timehandler: " + "There is no CURRENT time, why?\n")); continue; } @@ -4350,8 +4313,9 @@ key_timehandler(void) if (sav->lft_s->sadb_lifetime_addtime != 0 && tv.tv_sec - sav->created > sav->lft_s->sadb_lifetime_addtime) { /* - * check SA to be used whether or not. - * when SA hasn't been used, delete it. + * check the SA if it has been used. + * when it hasn't been used, delete it. + * i don't think such SA will be used. */ if (sav->lft_c->sadb_lifetime_usetime == 0) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); @@ -4367,6 +4331,7 @@ key_timehandler(void) key_expire(sav); } } + /* check SOFT lifetime by bytes */ /* * XXX I don't know the way to delete this SA @@ -4399,10 +4364,8 @@ key_timehandler(void) /* sanity check */ if (sav->lft_c == NULL) { -#if IPSEC_DEBUG - printf("key_timehandler: " - "There is no CURRENT time, why?\n"); -#endif + ipseclog((LOG_DEBUG, "key_timehandler: " + "There is no CURRENT time, why?\n")); continue; } @@ -4446,13 +4409,11 @@ key_timehandler(void) /* sanity check */ if (sav->state != SADB_SASTATE_DEAD) { -#if IPSEC_DEBUG - printf("key_timehandler: " + ipseclog((LOG_DEBUG, "key_timehandler: " "invalid sav->state " "(queue: %d SA: %d): " "kill it anyway\n", - SADB_SASTATE_DEAD, sav->state); -#endif + SADB_SASTATE_DEAD, sav->state)); } /* @@ -4508,6 +4469,8 @@ key_timehandler(void) key_tick_init_random = 0; key_srandom(); } + + natt_now++; #ifndef IPSEC_DEBUG2 /* do exchange to tick time !! */ @@ -4661,16 +4624,12 @@ key_getspi(so, m, mhp) if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { -#if IPSEC_DEBUG - printf("key_getspi: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_getspi: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { -#if IPSEC_DEBUG - printf("key_getspi: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_getspi: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { @@ -4686,9 +4645,7 @@ key_getspi(so, m, mhp) /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { -#if IPSEC_DEBUG - printf("key_getspi: invalid satype is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_getspi: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -4739,9 +4696,7 @@ key_getspi(so, m, mhp) if ((newsah = key_getsah(&saidx)) == NULL) { /* create a new SA index */ if ((newsah = key_newsah(&saidx)) == NULL) { -#if IPSEC_DEBUG - printf("key_getspi: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_getspi: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } } @@ -4878,9 +4833,7 @@ key_do_getnewspi(spirange, saidx) if (min == max) { if (key_checkspidup(saidx, min) != NULL) { -#if IPSEC_DEBUG - printf("key_do_getnewspi: SPI %u exists already.\n", min); -#endif + ipseclog((LOG_DEBUG, "key_do_getnewspi: SPI %u exists already.\n", min)); return 0; } @@ -4902,9 +4855,7 @@ key_do_getnewspi(spirange, saidx) } if (count == 0 || newspi == 0) { -#if IPSEC_DEBUG - printf("key_do_getnewspi: to allocate spi is failed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_do_getnewspi: to allocate spi is failed.\n")); return 0; } } @@ -4951,9 +4902,7 @@ key_update(so, m, mhp) /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { -#if IPSEC_DEBUG - printf("key_update: invalid satype is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_update: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -4968,17 +4917,13 @@ key_update(so, m, mhp) mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { -#if IPSEC_DEBUG - printf("key_update: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_update: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { -#if IPSEC_DEBUG - printf("key_update: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_update: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { @@ -4999,9 +4944,7 @@ key_update(so, m, mhp) /* get a SA header */ if ((sah = key_getsah(&saidx)) == NULL) { -#if IPSEC_DEBUG - printf("key_update: no SA index found.\n"); -#endif + ipseclog((LOG_DEBUG, "key_update: no SA index found.\n")); return key_senderror(so, m, ENOENT); } @@ -5015,45 +4958,40 @@ key_update(so, m, mhp) #if IPSEC_DOSEQCHECK if (mhp->msg->sadb_msg_seq != 0 && (sav = key_getsavbyseq(sah, mhp->msg->sadb_msg_seq)) == NULL) { -#if IPSEC_DEBUG - printf("key_update: no larval SA with sequence %u exists.\n", - mhp->msg->sadb_msg_seq); -#endif + ipseclog((LOG_DEBUG, + "key_update: no larval SA with sequence %u exists.\n", + mhp->msg->sadb_msg_seq)); return key_senderror(so, m, ENOENT); } #else if ((sav = key_getsavbyspi(sah, sa0->sadb_sa_spi)) == NULL) { -#if IPSEC_DEBUG - printf("key_update: no such a SA found (spi:%u)\n", - (u_int32_t)ntohl(sa0->sadb_sa_spi)); -#endif + ipseclog((LOG_DEBUG, + "key_update: no such a SA found (spi:%u)\n", + (u_int32_t)ntohl(sa0->sadb_sa_spi))); return key_senderror(so, m, EINVAL); } #endif /* validity check */ if (sav->sah->saidx.proto != proto) { -#if IPSEC_DEBUG - printf("key_update: protocol mismatched (DB=%u param=%u)\n", - sav->sah->saidx.proto, proto); -#endif + ipseclog((LOG_DEBUG, + "key_update: protocol mismatched (DB=%u param=%u)\n", + sav->sah->saidx.proto, proto)); return key_senderror(so, m, EINVAL); } #if IPSEC_DOSEQCHECK if (sav->spi != sa0->sadb_sa_spi) { -#if IPSEC_DEBUG - printf("key_update: SPI mismatched (DB:%u param:%u)\n", - (u_int32_t)ntohl(sav->spi), - (u_int32_t)ntohl(sa0->sadb_sa_spi)); -#endif + ipseclog((LOG_DEBUG, + "key_update: SPI mismatched (DB:%u param:%u)\n", + (u_int32_t)ntohl(sav->spi), + (u_int32_t)ntohl(sa0->sadb_sa_spi))); return key_senderror(so, m, EINVAL); } #endif if (sav->pid != mhp->msg->sadb_msg_pid) { -#if IPSEC_DEBUG - printf("key_update: pid mismatched (DB:%u param:%u)\n", - sav->pid, mhp->msg->sadb_msg_pid); -#endif + ipseclog((LOG_DEBUG, + "key_update: pid mismatched (DB:%u param:%u)\n", + sav->pid, mhp->msg->sadb_msg_pid)); return key_senderror(so, m, EINVAL); } @@ -5076,9 +5014,7 @@ key_update(so, m, mhp) /* set msg buf from mhp */ n = key_getmsgbuf_x1(m, mhp); if (n == NULL) { -#if IPSEC_DEBUG - printf("key_update: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_update: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } @@ -5161,9 +5097,7 @@ key_add(so, m, mhp) /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { -#if IPSEC_DEBUG - printf("key_add: invalid satype is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_add: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -5178,18 +5112,14 @@ key_add(so, m, mhp) mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { -#if IPSEC_DEBUG - printf("key_add: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_add: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { /* XXX need more */ -#if IPSEC_DEBUG - printf("key_add: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_add: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { @@ -5211,9 +5141,7 @@ key_add(so, m, mhp) if ((newsah = key_getsah(&saidx)) == NULL) { /* create a new SA header */ if ((newsah = key_newsah(&saidx)) == NULL) { -#if IPSEC_DEBUG - printf("key_add: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_add: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } } @@ -5228,9 +5156,7 @@ key_add(so, m, mhp) /* create new SA entry. */ /* We can create new SA only if SPI is differenct. */ if (key_getsavbyspi(newsah, sa0->sadb_sa_spi)) { -#if IPSEC_DEBUG - printf("key_add: SA already exists.\n"); -#endif + ipseclog((LOG_DEBUG, "key_add: SA already exists.\n")); return key_senderror(so, m, EEXIST); } newsav = key_newsav(m, mhp, newsah, &error); @@ -5255,9 +5181,7 @@ key_add(so, m, mhp) /* set msg buf from mhp */ n = key_getmsgbuf_x1(m, mhp); if (n == NULL) { -#if IPSEC_DEBUG - printf("key_update: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_update: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } @@ -5290,9 +5214,7 @@ key_setident(sah, m, mhp) if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL || mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { -#if IPSEC_DEBUG - printf("key_setident: invalid identity.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setident: invalid identity.\n")); return EINVAL; } @@ -5303,9 +5225,7 @@ key_setident(sah, m, mhp) /* validity check */ if (idsrc->sadb_ident_type != iddst->sadb_ident_type) { -#if IPSEC_DEBUG - printf("key_setident: ident type mismatch.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setident: ident type mismatch.\n")); return EINVAL; } @@ -5323,18 +5243,14 @@ key_setident(sah, m, mhp) /* make structure */ KMALLOC(sah->idents, struct sadb_ident *, idsrclen); if (sah->idents == NULL) { -#if IPSEC_DEBUG - printf("key_setident: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setident: No more memory.\n")); return ENOBUFS; } KMALLOC(sah->identd, struct sadb_ident *, iddstlen); if (sah->identd == NULL) { KFREE(sah->idents); sah->idents = NULL; -#if IPSEC_DEBUG - printf("key_setident: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_setident: No more memory.\n")); return ENOBUFS; } bcopy(idsrc, sah->idents, idsrclen); @@ -5413,25 +5329,19 @@ key_delete(so, m, mhp) /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { -#if IPSEC_DEBUG - printf("key_delete: invalid satype is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_delete: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { -#if IPSEC_DEBUG - printf("key_delete: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { -#if IPSEC_DEBUG - printf("key_delete: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -5441,14 +5351,10 @@ key_delete(so, m, mhp) * that match the src/dst. This is used during * IKE INITIAL-CONTACT. */ -#if IPSEC_DEBUG - printf("key_delete: doing delete all.\n"); -#endif + ipseclog((LOG_DEBUG, "key_delete: doing delete all.\n")); return key_delete_all(so, m, mhp, proto); } else if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa)) { -#if IPSEC_DEBUG - printf("key_delete: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -5463,7 +5369,7 @@ key_delete(so, m, mhp) LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; - if (key_cmpsaidx_withoutmode(&sah->saidx, &saidx) == 0) + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* get a SA with SPI. */ @@ -5472,9 +5378,7 @@ key_delete(so, m, mhp) break; } if (sah == NULL) { -#if IPSEC_DEBUG - printf("key_delete: no SA found.\n"); -#endif + ipseclog((LOG_DEBUG, "key_delete: no SA found.\n")); return key_senderror(so, m, ENOENT); } @@ -5532,7 +5436,7 @@ key_delete_all(so, m, mhp, proto) LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; - if (key_cmpsaidx_withoutmode(&sah->saidx, &saidx) == 0) + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* Delete all non-LARVAL SAs. */ @@ -5547,12 +5451,10 @@ key_delete_all(so, m, mhp, proto) nextsav = LIST_NEXT(sav, chain); /* sanity check */ if (sav->state != state) { -#if IPSEC_DEBUG - printf("key_delete_all: " + ipseclog((LOG_DEBUG, "key_delete_all: " "invalid sav->state " "(queue: %d SA: %d)\n", - state, sav->state); -#endif + state, sav->state)); continue; } @@ -5617,26 +5519,20 @@ key_get(so, m, mhp) /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { -#if IPSEC_DEBUG - printf("key_get: invalid satype is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_get: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { -#if IPSEC_DEBUG - printf("key_get: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_get: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { -#if IPSEC_DEBUG - printf("key_get: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_get: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -5651,7 +5547,7 @@ key_get(so, m, mhp) LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; - if (key_cmpsaidx_withoutmode(&sah->saidx, &saidx) == 0) + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* get a SA with SPI. */ @@ -5660,9 +5556,7 @@ key_get(so, m, mhp) break; } if (sah == NULL) { -#if IPSEC_DEBUG - printf("key_get: no SA found.\n"); -#endif + ipseclog((LOG_DEBUG, "key_get: no SA found.\n")); return key_senderror(so, m, ENOENT); } @@ -5672,9 +5566,7 @@ key_get(so, m, mhp) /* map proto to satype */ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { -#if IPSEC_DEBUG - printf("key_get: there was invalid proto in SAD.\n"); -#endif + ipseclog((LOG_DEBUG, "key_get: there was invalid proto in SAD.\n")); return key_senderror(so, m, EINVAL); } @@ -6022,8 +5914,7 @@ key_acquire(saidx, sp) /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, - (struct sockaddr *)&saidx->src, saidx->src.ss_len << 3, - IPSEC_ULPROTO_ANY); + (struct sockaddr *)&saidx->src, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; @@ -6031,8 +5922,7 @@ key_acquire(saidx, sp) m_cat(result, m); m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, - (struct sockaddr *)&saidx->dst, saidx->dst.ss_len << 3, - IPSEC_ULPROTO_ANY); + (struct sockaddr *)&saidx->dst, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; @@ -6154,9 +6044,7 @@ key_newacq(saidx) /* get new entry */ KMALLOC(newacq, struct secacq *, sizeof(struct secacq)); if (newacq == NULL) { -#if IPSEC_DEBUG - printf("key_newacq: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_newacq: No more memory.\n")); return NULL; } bzero(newacq, sizeof(*newacq)); @@ -6178,7 +6066,7 @@ key_getacq(saidx) struct secacq *acq; LIST_FOREACH(acq, &acqtree, chain) { - if (key_cmpsaidx_exactly(saidx, &acq->saidx)) + if (key_cmpsaidx(saidx, &acq->saidx, CMP_EXACTLY)) return acq; } @@ -6210,9 +6098,7 @@ key_newspacq(spidx) /* get new entry */ KMALLOC(acq, struct secspacq *, sizeof(struct secspacq)); if (acq == NULL) { -#if IPSEC_DEBUG - printf("key_newspacq: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_newspacq: No more memory.\n")); return NULL; } bzero(acq, sizeof(*acq)); @@ -6283,9 +6169,7 @@ key_acquire2(so, m, mhp) /* check sequence number */ if (mhp->msg->sadb_msg_seq == 0) { -#if IPSEC_DEBUG - printf("key_acquire2: must specify sequence number.\n"); -#endif + ipseclog((LOG_DEBUG, "key_acquire2: must specify sequence number.\n")); m_freem(m); return 0; } @@ -6314,9 +6198,7 @@ key_acquire2(so, m, mhp) /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { -#if IPSEC_DEBUG - printf("key_acquire2: invalid satype is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_acquire2: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -6324,18 +6206,14 @@ key_acquire2(so, m, mhp) mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_EXT_PROPOSAL] == NULL) { /* error */ -#if IPSEC_DEBUG - printf("key_acquire2: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_acquire2: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_PROPOSAL] < sizeof(struct sadb_prop)) { /* error */ -#if IPSEC_DEBUG - printf("key_acquire2: invalid message is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_acquire2: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -6349,22 +6227,18 @@ key_acquire2(so, m, mhp) LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; - if (key_cmpsaidx_withmode(&sah->saidx, &saidx)) + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE_REQID)) break; } if (sah != NULL) { -#if IPSEC_DEBUG - printf("key_acquire2: a SA exists already.\n"); -#endif + ipseclog((LOG_DEBUG, "key_acquire2: a SA exists already.\n")); return key_senderror(so, m, EEXIST); } error = key_acquire(&saidx, NULL); if (error != 0) { -#if IPSEC_DEBUG - printf("key_acquire2: error %d returned " - "from key_acquire.\n", mhp->msg->sadb_msg_errno); -#endif + ipseclog((LOG_DEBUG, "key_acquire2: error %d returned " + "from key_acquire.\n", mhp->msg->sadb_msg_errno)); return key_senderror(so, m, error); } @@ -6407,9 +6281,7 @@ key_register(so, m, mhp) /* check whether existing or not */ LIST_FOREACH(reg, ®tree[mhp->msg->sadb_msg_satype], chain) { if (reg->so == so) { -#if IPSEC_DEBUG - printf("key_register: socket exists already.\n"); -#endif + ipseclog((LOG_DEBUG, "key_register: socket exists already.\n")); return key_senderror(so, m, EEXIST); } } @@ -6417,9 +6289,7 @@ key_register(so, m, mhp) /* create regnode */ KMALLOC(newreg, struct secreg *, sizeof(*newreg)); if (newreg == NULL) { -#if IPSEC_DEBUG - printf("key_register: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_register: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } bzero((caddr_t)newreg, sizeof(*newreg)); @@ -6631,7 +6501,9 @@ key_expire(sav) m_cat(result, m); /* create SA extension */ - m = key_setsadbxsa2(sav->sah->saidx.mode, sav->sah->saidx.reqid); + m = key_setsadbxsa2(sav->sah->saidx.mode, + sav->replay ? sav->replay->count : 0, + sav->sah->saidx.reqid); if (!m) { error = ENOBUFS; goto fail; @@ -6662,7 +6534,7 @@ key_expire(sav) /* set sadb_address for source */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, (struct sockaddr *)&sav->sah->saidx.src, - sav->sah->saidx.src.ss_len << 3, IPSEC_ULPROTO_ANY); + FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; @@ -6672,7 +6544,7 @@ key_expire(sav) /* set sadb_address for destination */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, (struct sockaddr *)&sav->sah->saidx.dst, - sav->sah->saidx.dst.ss_len << 3, IPSEC_ULPROTO_ANY); + FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; @@ -6699,6 +6571,7 @@ key_expire(sav) mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); + splx(s); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); fail: @@ -6739,9 +6612,7 @@ key_flush(so, m, mhp) /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { -#if IPSEC_DEBUG - printf("key_flush: invalid satype is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_flush: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -6775,9 +6646,7 @@ key_flush(so, m, mhp) if (m->m_len < sizeof(struct sadb_msg) || sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) { -#if IPSEC_DEBUG - printf("key_flush: No more memory.\n"); -#endif + ipseclog((LOG_DEBUG, "key_flush: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } @@ -6826,9 +6695,7 @@ key_dump(so, m, mhp) /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { -#if IPSEC_DEBUG - printf("key_dump: invalid satype is passed.\n"); -#endif + ipseclog((LOG_DEBUG, "key_dump: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } @@ -6861,9 +6728,7 @@ key_dump(so, m, mhp) /* map proto to satype */ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { -#if IPSEC_DEBUG - printf("key_dump: there was invalid proto in SAD.\n"); -#endif + ipseclog((LOG_DEBUG, "key_dump: there was invalid proto in SAD.\n")); return key_senderror(so, m, EINVAL); } @@ -6996,7 +6861,7 @@ key_parse(m, so) #if 0 /*kdebug_sadb assumes msg in linear buffer*/ KEYDEBUG(KEYDEBUG_KEY_DUMP, - printf("key_parse: passed sadb_msg\n"); + ipseclog((LOG_DEBUG, "key_parse: passed sadb_msg\n")); kdebug_sadb(msg)); #endif @@ -7011,29 +6876,24 @@ key_parse(m, so) if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len != m->m_pkthdr.len) { -#if IPSEC_DEBUG - printf("key_parse: invalid message length.\n"); -#endif + ipseclog((LOG_DEBUG, "key_parse: invalid message length.\n")); pfkeystat.out_invlen++; error = EINVAL; goto senderror; } if (msg->sadb_msg_version != PF_KEY_V2) { -#if IPSEC_DEBUG - printf("key_parse: PF_KEY version %u is mismatched.\n", - msg->sadb_msg_version); -#endif + ipseclog((LOG_DEBUG, + "key_parse: PF_KEY version %u is mismatched.\n", + msg->sadb_msg_version)); pfkeystat.out_invver++; error = EINVAL; goto senderror; } if (msg->sadb_msg_type > SADB_MAX) { -#if IPSEC_DEBUG - printf("key_parse: invalid type %u is passed.\n", - msg->sadb_msg_type); -#endif + ipseclog((LOG_DEBUG, "key_parse: invalid type %u is passed.\n", + msg->sadb_msg_type)); pfkeystat.out_invmsgtype++; error = EINVAL; goto senderror; @@ -7089,11 +6949,8 @@ key_parse(m, so) case SADB_GET: case SADB_ACQUIRE: case SADB_EXPIRE: -#if IPSEC_DEBUG - printf("key_parse: must specify satype " - "when msg type=%u.\n", - msg->sadb_msg_type); -#endif + ipseclog((LOG_DEBUG, "key_parse: must specify satype " + "when msg type=%u.\n", msg->sadb_msg_type)); pfkeystat.out_invsatype++; error = EINVAL; goto senderror; @@ -7111,10 +6968,8 @@ key_parse(m, so) case SADB_X_SPDSETIDX: case SADB_X_SPDUPDATE: case SADB_X_SPDDELETE2: -#if IPSEC_DEBUG - printf("key_parse: illegal satype=%u\n", - msg->sadb_msg_type); -#endif + ipseclog((LOG_DEBUG, "key_parse: illegal satype=%u\n", + msg->sadb_msg_type)); pfkeystat.out_invsatype++; error = EINVAL; goto senderror; @@ -7124,10 +6979,8 @@ key_parse(m, so) case SADB_SATYPE_OSPFV2: case SADB_SATYPE_RIPV2: case SADB_SATYPE_MIP: -#if IPSEC_DEBUG - printf("key_parse: type %u isn't supported.\n", - msg->sadb_msg_satype); -#endif + ipseclog((LOG_DEBUG, "key_parse: type %u isn't supported.\n", + msg->sadb_msg_satype)); pfkeystat.out_invsatype++; error = EOPNOTSUPP; goto senderror; @@ -7136,10 +6989,8 @@ key_parse(m, so) break; /*FALLTHROUGH*/ default: -#if IPSEC_DEBUG - printf("key_parse: invalid type %u is passed.\n", - msg->sadb_msg_satype); -#endif + ipseclog((LOG_DEBUG, "key_parse: invalid type %u is passed.\n", + msg->sadb_msg_satype)); pfkeystat.out_invsatype++; error = EINVAL; goto senderror; @@ -7156,9 +7007,7 @@ key_parse(m, so) /* check upper layer protocol */ if (src0->sadb_address_proto != dst0->sadb_address_proto) { -#if IPSEC_DEBUG - printf("key_parse: upper layer protocol mismatched.\n"); -#endif + ipseclog((LOG_DEBUG, "key_parse: upper layer protocol mismatched.\n")); pfkeystat.out_invaddr++; error = EINVAL; goto senderror; @@ -7167,18 +7016,15 @@ key_parse(m, so) /* check family */ if (PFKEY_ADDR_SADDR(src0)->sa_family != PFKEY_ADDR_SADDR(dst0)->sa_family) { -#if IPSEC_DEBUG - printf("key_parse: address family mismatched.\n"); -#endif + ipseclog((LOG_DEBUG, "key_parse: address family mismatched.\n")); pfkeystat.out_invaddr++; error = EINVAL; goto senderror; } if (PFKEY_ADDR_SADDR(src0)->sa_len != PFKEY_ADDR_SADDR(dst0)->sa_len) { -#if IPSEC_DEBUG - printf("key_parse: address struct size mismatched.\n"); -#endif + ipseclog((LOG_DEBUG, + "key_parse: address struct size mismatched.\n")); pfkeystat.out_invaddr++; error = EINVAL; goto senderror; @@ -7202,9 +7048,8 @@ key_parse(m, so) } break; default: -#if IPSEC_DEBUG - printf("key_parse: unsupported address family.\n"); -#endif + ipseclog((LOG_DEBUG, + "key_parse: unsupported address family.\n")); pfkeystat.out_invaddr++; error = EAFNOSUPPORT; goto senderror; @@ -7225,9 +7070,8 @@ key_parse(m, so) /* check max prefix length */ if (src0->sadb_address_prefixlen > plen || dst0->sadb_address_prefixlen > plen) { -#if IPSEC_DEBUG - printf("key_parse: illegal prefixlen.\n"); -#endif + ipseclog((LOG_DEBUG, + "key_parse: illegal prefixlen.\n")); pfkeystat.out_invaddr++; error = EINVAL; goto senderror; @@ -7333,21 +7177,18 @@ key_align(m, mhp) * KEY_AUTH or KEY_ENCRYPT ? */ if (mhp->ext[ext->sadb_ext_type] != NULL) { -#if IPSEC_DEBUG - printf("key_align: duplicate ext_type %u " - "is passed.\n", - ext->sadb_ext_type); -#endif + ipseclog((LOG_DEBUG, + "key_align: duplicate ext_type %u " + "is passed.\n", ext->sadb_ext_type)); m_freem(m); pfkeystat.out_dupext++; return EINVAL; } break; default: -#if IPSEC_DEBUG - printf("key_align: invalid ext_type %u is passed.\n", - ext->sadb_ext_type); -#endif + ipseclog((LOG_DEBUG, + "key_align: invalid ext_type %u is passed.\n", + ext->sadb_ext_type)); m_freem(m); pfkeystat.out_invexttype++; return EINVAL; diff --git a/bsd/netkey/key_debug.c b/bsd/netkey/key_debug.c index 2366598b3..6db61a105 100644 --- a/bsd/netkey/key_debug.c +++ b/bsd/netkey/key_debug.c @@ -1,3 +1,6 @@ +/* $FreeBSD: src/sys/netkey/key_debug.c,v 1.10.2.5 2002/04/28 05:40:28 suz Exp $ */ +/* $KAME: key_debug.c,v 1.26 2001/06/27 10:46:50 sakane Exp $ */ + /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. @@ -379,9 +382,9 @@ kdebug_sadb_x_sa2(ext) printf("sadb_x_sa2{ mode=%u reqid=%u\n", sa2->sadb_x_sa2_mode, sa2->sadb_x_sa2_reqid); - printf(" reserved1=%u reserved2=%u reserved3=%u }\n", - sa2->sadb_x_sa2_reserved1, sa2->sadb_x_sa2_reserved1, - sa2->sadb_x_sa2_reserved1); + printf(" reserved1=%u reserved2=%u sequence=%u }\n", + sa2->sadb_x_sa2_reserved1, sa2->sadb_x_sa2_reserved2, + sa2->sadb_x_sa2_sequence); return; } @@ -671,7 +674,7 @@ void kdebug_sockaddr(addr) struct sockaddr *addr; { - struct sockaddr_in *sin; + struct sockaddr_in *sin4; #ifdef INET6 struct sockaddr_in6 *sin6; #endif @@ -685,9 +688,9 @@ kdebug_sockaddr(addr) switch (addr->sa_family) { case AF_INET: - sin = (struct sockaddr_in *)addr; - printf(" port=%u\n", ntohs(sin->sin_port)); - ipsec_hexdump((caddr_t)&sin->sin_addr, sizeof(sin->sin_addr)); + sin4 = (struct sockaddr_in *)addr; + printf(" port=%u\n", ntohs(sin4->sin_port)); + ipsec_hexdump((caddr_t)&sin4->sin_addr, sizeof(sin4->sin_addr)); break; #ifdef INET6 case AF_INET6: diff --git a/bsd/netkey/key_debug.h b/bsd/netkey/key_debug.h index 5b731f9bf..ebbf17f04 100644 --- a/bsd/netkey/key_debug.h +++ b/bsd/netkey/key_debug.h @@ -54,7 +54,8 @@ #define KEYDEBUG_IPSEC_DATA (KEYDEBUG_IPSEC | KEYDEBUG_DATA) #define KEYDEBUG_IPSEC_DUMP (KEYDEBUG_IPSEC | KEYDEBUG_DUMP) -#define KEYDEBUG(lev,arg) if ((key_debug_level & (lev)) == (lev)) { arg; } +#define KEYDEBUG(lev,arg) \ + do { if ((key_debug_level & (lev)) == (lev)) { arg; } } while (0) struct sadb_msg; struct sadb_ext; diff --git a/bsd/netkey/key_var.h b/bsd/netkey/key_var.h index 6efb8dfe7..aa7d7f677 100644 --- a/bsd/netkey/key_var.h +++ b/bsd/netkey/key_var.h @@ -46,7 +46,9 @@ #define KEYCTL_ESP_KEYMIN 9 #define KEYCTL_ESP_AUTH 10 #define KEYCTL_AH_KEYMIN 11 -#define KEYCTL_MAXID 12 +#define KEYCTL_PREFERED_OLDSA 12 +#define KEYCTL_NATT_KEEPALIVE_INTERVAL 13 +#define KEYCTL_MAXID 14 #define KEYCTL_NAMES { \ { 0, 0 }, \ @@ -58,9 +60,13 @@ { "larval_lifetime", CTLTYPE_INT }, \ { "blockacq_count", CTLTYPE_INT }, \ { "blockacq_lifetime", CTLTYPE_INT }, \ + { "esp_keymin", CTLTYPE_INT }, \ + { "esp_auth", CTLTYPE_INT }, \ + { "ah_keymin", CTLTYPE_INT }, \ + { "prefered_oldsa", CTLTYPE_INT }, \ + { "natt_keepalive_interval", CTLTYPE_INT }, \ } -//#if IPSEC_DEBUG #define KEYCTL_VARS { \ 0, \ &key_debug_level, \ @@ -73,22 +79,9 @@ &key_blockacq_lifetime, \ &ipsec_esp_keymin, \ &ipsec_ah_keymin, \ + &ipsec_prefered_oldsa, \ + &natt_keepalive_interval, \ } -//#else -//#define KEYCTL_VARS { \ -// 0, \ -// 0, \ -// &key_spi_trycnt, \ -// &key_spi_minval, \ -// &key_spi_maxval, \ -// &key_int_random, \ -// &key_larval_lifetime, \ -// &key_blockacq_count, \ -// &key_blockacq_lifetime, \ -// &ipsec_esp_keymin, \ -// &ipsec_ah_keymin, \ -//} -//#endif #ifdef KERNEL #define _ARRAYLEN(p) (sizeof(p)/sizeof(p[0])) diff --git a/bsd/netkey/keydb.h b/bsd/netkey/keydb.h index 1d9ef21cb..8c70fa95a 100644 --- a/bsd/netkey/keydb.h +++ b/bsd/netkey/keydb.h @@ -97,6 +97,10 @@ struct secasvar { pid_t pid; /* message's pid */ struct secashead *sah; /* back pointer to the secashead */ + + /* Nat Traversal related bits */ + u_int32_t natt_last_activity; + u_int16_t remote_ike_port; }; /* replay prevention */ diff --git a/bsd/nfs/Makefile b/bsd/nfs/Makefile index 35118f8fd..55a7668eb 100644 --- a/bsd/nfs/Makefile +++ b/bsd/nfs/Makefile @@ -21,6 +21,7 @@ EXPINC_SUBDIRS_I386 = \ DATAFILES = \ krpc.h nfs.h nfsdiskless.h nfsm_subs.h nfsmount.h nfsnode.h \ + nlminfo.h nfs_lock.h \ nfsproto.h nfsrtt.h nfsrvcache.h nqnfs.h rpcv2.h xdr_subs.h diff --git a/bsd/nfs/krpc_subr.c b/bsd/nfs/krpc_subr.c index f1412b99f..8999fd218 100644 --- a/bsd/nfs/krpc_subr.c +++ b/bsd/nfs/krpc_subr.c @@ -240,6 +240,7 @@ krpc_call(sa, prog, vers, func, data, from_p) tv.tv_sec = 1; tv.tv_usec = 0; bzero(&sopt, sizeof sopt); + sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_RCVTIMEO; sopt.sopt_val = &tv; @@ -358,6 +359,12 @@ krpc_call(sa, prog, vers, func, data, from_p) printf("RPC timeout for server " IP_FORMAT "\n", IP_LIST(&(sin->sin_addr.s_addr))); + /* + * soreceive is now conditionally using this pointer + * if present, it updates per-proc stats + */ + auio.uio_procp = NULL; + /* * Wait for up to timo seconds for a reply. * The socket receive timeout was set to 1 second. diff --git a/bsd/nfs/nfs.h b/bsd/nfs/nfs.h index b8d810f11..9e05d983b 100644 --- a/bsd/nfs/nfs.h +++ b/bsd/nfs/nfs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -89,18 +89,24 @@ #ifndef NFS_MAXATTRTIMO #define NFS_MAXATTRTIMO 60 #endif -#define NFS_WSIZE 8192 /* Def. write data size <= 8192 */ -#define NFS_RSIZE 8192 /* Def. read data size <= 8192 */ +#define NFS_WSIZE 16384 /* Def. write data size <= 16K */ +#define NFS_RSIZE 16384 /* Def. read data size <= 16K */ +#define NFS_DGRAM_WSIZE 8192 /* UDP Def. write data size <= 8K */ +#define NFS_DGRAM_RSIZE 8192 /* UDP Def. read data size <= 8K */ #define NFS_READDIRSIZE 8192 /* Def. readdir size */ -#define NFS_DEFRAHEAD 1 /* Def. read ahead # blocks */ -#define NFS_MAXRAHEAD 4 /* Max. read ahead # blocks */ +#define NFS_DEFRAHEAD 4 /* Def. read ahead # blocks */ +#define NFS_MAXRAHEAD 16 /* Max. read ahead # blocks */ #define NFS_MAXUIDHASH 64 /* Max. # of hashed uid entries/mp */ -#define NFS_MAXASYNCDAEMON 20 /* Max. number async_daemons runnable */ +#define NFS_MAXASYNCDAEMON 32 /* Max. number async_daemons runnable */ #define NFS_MAXGATHERDELAY 100 /* Max. write gather delay (msec) */ #ifndef NFS_GATHERDELAY #define NFS_GATHERDELAY 10 /* Default write gather delay (msec) */ #endif #define NFS_DIRBLKSIZ 4096 /* Must be a multiple of DIRBLKSIZ */ +#if defined(KERNEL) && !defined(DIRBLKSIZ) +#define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ + /* can't be larger than NFS_FABLKSIZE */ +#endif /* * Oddballs @@ -115,13 +121,13 @@ /* * XXX - * The B_INVAFTERWRITE flag should be set to whatever is required by the + * The NB_INVAFTERWRITE flag should be set to whatever is required by the * buffer cache code to say "Invalidate the block after it is written back". */ #ifdef __FreeBSD__ -#define B_INVAFTERWRITE B_NOCACHE +#define NB_INVAFTERWRITE NB_NOCACHE #else -#define B_INVAFTERWRITE B_INVAL +#define NB_INVAFTERWRITE NB_INVAL #endif /* @@ -132,15 +138,6 @@ #define IO_METASYNC 0 #endif -/* - * Set the attribute timeout based on how recently the file has been modified. - */ -#define NFS_ATTRTIMEO(np) \ - ((((np)->n_flag & NMODIFIED) || \ - (time.tv_sec - (np)->n_mtime) / 10 < NFS_MINATTRTIMO) ? NFS_MINATTRTIMO : \ - ((time.tv_sec - (np)->n_mtime) / 10 > NFS_MAXATTRTIMO ? NFS_MAXATTRTIMO : \ - (time.tv_sec - (np)->n_mtime) / 10)) - /* * Expected allocation sizes for major data structures. If the actual size * of the structure exceeds these sizes, then malloc() will be allocating @@ -202,21 +199,24 @@ struct nfs_args { #define NFSMNT_RESVPORT 0x00008000 /* Allocate a reserved port */ #define NFSMNT_RDIRPLUS 0x00010000 /* Use Readdirplus for V3 */ #define NFSMNT_READDIRSIZE 0x00020000 /* Set readdir size */ -#define NFSMNT_INTERNAL 0xfffc0000 /* Bits set internally */ -#define NFSMNT_HASWRITEVERF 0x00040000 /* Has write verifier for V3 */ -#define NFSMNT_GOTPATHCONF 0x00080000 /* Got the V3 pathconf info */ -#define NFSMNT_GOTFSINFO 0x00100000 /* Got the V3 fsinfo */ -#define NFSMNT_MNTD 0x00200000 /* Mnt server for mnt point */ -#define NFSMNT_DISMINPROG 0x00400000 /* Dismount in progress */ -#define NFSMNT_DISMNT 0x00800000 /* Dismounted */ -#define NFSMNT_SNDLOCK 0x01000000 /* Send socket lock */ -#define NFSMNT_WANTSND 0x02000000 /* Want above */ -#define NFSMNT_RCVLOCK 0x04000000 /* Rcv socket lock */ -#define NFSMNT_WANTRCV 0x08000000 /* Want above */ -#define NFSMNT_WAITAUTH 0x10000000 /* Wait for authentication */ -#define NFSMNT_HASAUTH 0x20000000 /* Has authenticator */ -#define NFSMNT_WANTAUTH 0x40000000 /* Wants an authenticator */ -#define NFSMNT_AUTHERR 0x80000000 /* Authentication error */ +#define NFSMNT_NOLOCKS 0x00040000 /* don't support file locking */ + +#define NFSSTA_TIMEO 0x00010000 /* experienced a timeout. */ +#define NFSSTA_FORCE 0x00020000 /* doing a forced unmount. */ +#define NFSSTA_HASWRITEVERF 0x00040000 /* Has write verifier for V3 */ +#define NFSSTA_GOTPATHCONF 0x00080000 /* Got the V3 pathconf info */ +#define NFSSTA_GOTFSINFO 0x00100000 /* Got the V3 fsinfo */ +#define NFSSTA_MNTD 0x00200000 /* Mnt server for mnt point */ +#define NFSSTA_DISMINPROG 0x00400000 /* Dismount in progress */ +#define NFSSTA_DISMNT 0x00800000 /* Dismounted */ +#define NFSSTA_SNDLOCK 0x01000000 /* Send socket lock */ +#define NFSSTA_WANTSND 0x02000000 /* Want above */ +#define NFSSTA_RCVLOCK 0x04000000 /* Rcv socket lock */ +#define NFSSTA_WANTRCV 0x08000000 /* Want above */ +#define NFSSTA_WAITAUTH 0x10000000 /* Wait for authentication */ +#define NFSSTA_HASAUTH 0x20000000 /* Has authenticator */ +#define NFSSTA_WANTAUTH 0x40000000 /* Wants an authenticator */ +#define NFSSTA_AUTHERR 0x80000000 /* Authentication error */ /* * Structures for the nfssvc(2) syscall. Not that anyone but nfsd and mount_nfs @@ -310,6 +310,13 @@ struct nfsstats { #define NFSSVC_AUTHINFAIL 0x080 #define NFSSVC_MNTD 0x100 +/* + * Flags for nfsclnt() system call. + */ +#define NFSCLNT_LOCKDANS 0x200 +#define NFSCLNT_LOCKDFD 0x400 +#define NFSCLNT_LOCKDWAIT 0x800 + /* * fs.nfs sysctl(3) identifiers */ @@ -350,7 +357,8 @@ MALLOC_DECLARE(M_NFSD); MALLOC_DECLARE(M_NFSBIGFH); #endif -struct uio; struct buf; struct vattr; struct nameidata; /* XXX */ +struct uio; struct vattr; struct nameidata; /* XXX */ +struct nfsbuf; #define NFSINT_SIGMASK (sigmask(SIGINT)|sigmask(SIGTERM)|sigmask(SIGKILL)| \ sigmask(SIGHUP)|sigmask(SIGQUIT)) @@ -361,7 +369,7 @@ struct uio; struct buf; struct vattr; struct nameidata; /* XXX */ */ #define NFSIGNORE_SOERROR(s, e) \ ((e) != EINTR && (e) != ERESTART && (e) != EWOULDBLOCK && \ - ((s) & PR_CONNREQUIRED) == 0) + (e) != EIO && ((s) & PR_CONNREQUIRED) == 0) /* * Nfs outstanding request list element @@ -382,6 +390,7 @@ struct nfsreq { u_int32_t r_procnum; /* NFS procedure number */ int r_rtt; /* RTT for rpc */ struct proc *r_procp; /* Proc that did I/O system call */ + long r_lastmsg; /* time of last tprintf */ }; /* @@ -390,14 +399,17 @@ struct nfsreq { extern TAILQ_HEAD(nfs_reqq, nfsreq) nfs_reqq; /* Flag values for r_flags */ -#define R_TIMING 0x01 /* timing request (in mntp) */ -#define R_SENT 0x02 /* request has been sent */ -#define R_SOFTTERM 0x04 /* soft mnt, too many retries */ -#define R_INTR 0x08 /* intr mnt, signal pending */ -#define R_SOCKERR 0x10 /* Fatal error on socket */ -#define R_TPRINTFMSG 0x20 /* Did a tprintf msg. */ -#define R_MUSTRESEND 0x40 /* Must resend request */ -#define R_GETONEREP 0x80 /* Probe for one reply only */ +#define R_TIMING 0x0001 /* timing request (in mntp) */ +#define R_SENT 0x0002 /* request has been sent */ +#define R_SOFTTERM 0x0004 /* soft mnt, too many retries */ +#define R_INTR 0x0008 /* intr mnt, signal pending */ +#define R_SOCKERR 0x0010 /* Fatal error on socket */ +#define R_TPRINTFMSG 0x0020 /* Did a tprintf msg. */ +#define R_MUSTRESEND 0x0040 /* Must resend request */ +#define R_GETONEREP 0x0080 /* Probe for one reply only */ +#define R_BUSY 0x0100 /* Locked. */ +#define R_WAITING 0x0200 /* Someone waiting for lock. */ +#define R_RESENDERR 0x0400 /* resend failed. */ /* * A list of nfssvc_sock structures is maintained with all the sockets @@ -464,7 +476,8 @@ struct nfssvc_sock { struct mbuf *ns_rec; struct mbuf *ns_recend; struct mbuf *ns_frag; - int ns_flag; + short ns_flag; /* modified under kernel funnel */ + short ns_nflag; /* modified under network funnel */ int ns_solock; int ns_cc; int ns_reclen; @@ -475,14 +488,14 @@ struct nfssvc_sock { LIST_HEAD(nfsrvw_delayhash, nfsrv_descript) ns_wdelayhashtbl[NFS_WDELAYHASHSIZ]; }; -/* Bits for "ns_flag" */ -#define SLP_VALID 0x01 -#define SLP_DOREC 0x02 -#define SLP_NEEDQ 0x04 -#define SLP_DISCONN 0x08 -#define SLP_GETSTREAM 0x10 -#define SLP_LASTFRAG 0x20 -#define SLP_ALLFLAGS 0xff +/* Bits for "ns_*flag" */ +#define SLP_VALID 0x01 /* ns_flag */ +#define SLP_DOREC 0x02 /* ns_flag */ +#define SLPN_NEEDQ 0x04 /* ns_nflag */ +#define SLPN_DISCONN 0x08 /* ns_nflag */ +#define SLPN_GETSTREAM 0x10 /* ns_nflag */ +#define SLPN_LASTFRAG 0x20 /* ns_nflag */ +#define SLP_ALLFLAGS 0xff /* ns_flag && ns_nflag */ extern TAILQ_HEAD(nfssvc_sockhead, nfssvc_sock) nfssvc_sockhead; extern int nfssvc_sockhead_flag; @@ -620,8 +633,10 @@ int nfs_send __P((struct socket *, struct mbuf *, struct mbuf *, int nfs_rephead __P((int, struct nfsrv_descript *, struct nfssvc_sock *, int, int, u_quad_t *, struct mbuf **, struct mbuf **, caddr_t *)); -int nfs_sndlock __P((int *, struct nfsreq *)); -void nfs_sndunlock __P((int *flagp)); +int nfs_sndlock __P((struct nfsreq *)); +void nfs_sndunlock __P((struct nfsreq *)); +int nfs_slplock __P((struct nfssvc_sock *, int)); +void nfs_slpunlock __P((struct nfssvc_sock *)); int nfs_disct __P((struct mbuf **, caddr_t *, int, int, caddr_t *)); int nfs_vinvalbuf __P((struct vnode *, int, struct ucred *, struct proc *, int)); @@ -629,8 +644,8 @@ int nfs_readrpc __P((struct vnode *, struct uio *, struct ucred *)); int nfs_writerpc __P((struct vnode *, struct uio *, struct ucred *, int *, int *)); int nfs_readdirrpc __P((struct vnode *, struct uio *, struct ucred *)); -int nfs_asyncio __P((struct buf *, struct ucred *)); -int nfs_doio __P((struct buf *, struct ucred *, struct proc *)); +int nfs_asyncio __P((struct nfsbuf *, struct ucred *)); +int nfs_doio __P((struct nfsbuf *, struct ucred *, struct proc *)); int nfs_readlinkrpc __P((struct vnode *, struct uio *, struct ucred *)); int nfs_sigintr __P((struct nfsmount *, struct nfsreq *, struct proc *)); int nfs_readdirplusrpc __P((struct vnode *, struct uio *, struct ucred *)); @@ -678,11 +693,14 @@ int nfs_bioread __P((struct vnode *, struct uio *, int, struct ucred *, int)); int nfsm_uiotombuf __P((struct uio *, struct mbuf **, int, caddr_t *)); void nfsrv_init __P((int)); +int nfs_commit __P((struct vnode *vp, u_quad_t offset, int cnt, + struct ucred *cred, struct proc *procp)); +int nfs_flushcommits(struct vnode *, struct proc *); void nfs_clearcommit __P((struct mount *)); int nfsrv_errmap __P((struct nfsrv_descript *, int)); void nfsrvw_sort __P((gid_t *, int)); void nfsrv_setcred __P((struct ucred *, struct ucred *)); -int nfs_writebp __P((struct buf *, int)); +int nfs_buf_write __P((struct nfsbuf *)); int nfsrv_object_create __P((struct vnode *)); void nfsrv_wakenfsd __P((struct nfssvc_sock *slp)); int nfsrv_writegather __P((struct nfsrv_descript **, struct nfssvc_sock *, @@ -842,8 +860,8 @@ extern uint nfstracemask; /* 32 bits - trace points over 31 are unconditional */ #else /* NFSDIAG */ - #define NFSTRACE(cnst, fptr) - #define NFSTRACE4(cnst, fptr, a2, a3, a4) +# define NFSTRACE(cnst, fptr) +# define NFSTRACE4(cnst, fptr, a2, a3, a4) #endif /* NFSDIAG */ diff --git a/bsd/nfs/nfs_bio.c b/bsd/nfs/nfs_bio.c index 7f41efe13..1b6b078c5 100644 --- a/bsd/nfs/nfs_bio.c +++ b/bsd/nfs/nfs_bio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -66,8 +66,9 @@ #include #include #include -#include +#include #include +#include #include #include #include @@ -98,12 +99,863 @@ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_END, \ (int)(B), (int)(C), (int)(D), (int)(E), 0) -static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size, - struct proc *p, int operation)); - extern int nfs_numasync; +extern int nfs_ioddelwri; extern struct nfsstats nfsstats; -extern int nbdwrite; + +#define NFSBUFHASH(dvp, lbn) \ + (&nfsbufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & nfsbufhash]) +LIST_HEAD(nfsbufhashhead, nfsbuf) *nfsbufhashtbl; +struct nfsbuffreehead nfsbuffree, nfsbufdelwri; +u_long nfsbufhash; +int nfsbufhashlock, nfsbufcnt, nfsbufmin, nfsbufmax; +int nfsbuffreecnt, nfsbufdelwricnt, nfsneedbuffer; +int nfs_nbdwrite; + +#define NFSBUFWRITE_THROTTLE 9 + +/* + * Initialize nfsbuf lists + */ +void +nfs_nbinit(void) +{ + nfsbufhashlock = 0; + nfsbufhashtbl = hashinit(nbuf, M_TEMP, &nfsbufhash); + TAILQ_INIT(&nfsbuffree); + TAILQ_INIT(&nfsbufdelwri); + nfsbufcnt = nfsbuffreecnt = nfsbufdelwricnt = 0; + nfsbufmin = 128; // XXX tune me! + nfsbufmax = 8192; // XXX tune me! + nfsneedbuffer = 0; + nfs_nbdwrite = 0; +} + +/* + * try to free up some excess, unused nfsbufs + */ +static void +nfs_buf_freeup(void) +{ + struct nfsbuf *fbp; + int cnt; + +#define NFS_BUF_FREEUP() \ + do { \ + /* only call nfs_buf_freeup() if it has work to do */ \ + if ((nfsbuffreecnt > nfsbufcnt/4) && \ + (nfsbufcnt-nfsbuffreecnt/8 > nfsbufmin)) \ + nfs_buf_freeup(); \ + } while (0) + + if (nfsbuffreecnt < nfsbufcnt/4) + return; + cnt = nfsbuffreecnt/8; + if (nfsbufcnt-cnt < nfsbufmin) + return; + + FSDBG(320, -1, nfsbufcnt, nfsbuffreecnt, cnt); + while (cnt-- > 0) { + fbp = TAILQ_FIRST(&nfsbuffree); + if (!fbp) + break; + nfs_buf_remfree(fbp); + /* disassociate buffer from any vnode */ + if (fbp->nb_vp) { + struct vnode *oldvp; + if (fbp->nb_vnbufs.le_next != NFSNOLIST) { + LIST_REMOVE(fbp, nb_vnbufs); + fbp->nb_vnbufs.le_next = NFSNOLIST; + } + oldvp = fbp->nb_vp; + fbp->nb_vp = NULL; + HOLDRELE(oldvp); + } + LIST_REMOVE(fbp, nb_hash); + /* nuke any creds */ + if (fbp->nb_rcred != NOCRED) + crfree(fbp->nb_rcred); + if (fbp->nb_wcred != NOCRED) + crfree(fbp->nb_wcred); + /* if buf was NB_META, dump buffer */ + if (ISSET(fbp->nb_flags, NB_META) && fbp->nb_data) { + FREE(fbp->nb_data, M_TEMP); + } + FREE(fbp, M_TEMP); + nfsbufcnt--; + } + FSDBG(320, -1, nfsbufcnt, nfsbuffreecnt, cnt); +} + +void +nfs_buf_remfree(struct nfsbuf *bp) +{ + if (bp->nb_free.tqe_next == NFSNOLIST) + panic("nfsbuf not on free list"); + if (ISSET(bp->nb_flags, NB_DELWRI)) { + nfsbufdelwricnt--; + TAILQ_REMOVE(&nfsbufdelwri, bp, nb_free); + } else { + nfsbuffreecnt--; + TAILQ_REMOVE(&nfsbuffree, bp, nb_free); + } + bp->nb_free.tqe_next = NFSNOLIST; + NFSBUFCNTCHK(); +} + +/* + * check for existence of nfsbuf in cache + */ +struct nfsbuf * +nfs_buf_incore(struct vnode *vp, daddr_t blkno) +{ + /* Search hash chain */ + struct nfsbuf * bp = NFSBUFHASH(vp, blkno)->lh_first; + for (; bp != NULL; bp = bp->nb_hash.le_next) + if (bp->nb_lblkno == blkno && bp->nb_vp == vp && + !ISSET(bp->nb_flags, NB_INVAL)) { + FSDBG(547, bp, blkno, bp->nb_flags, bp->nb_vp); + return (bp); + } + return (NULL); +} + +/* + * Check if it's OK to drop a page. + * + * Called by vnode_pager() on pageout request of non-dirty page. + * We need to make sure that it's not part of a delayed write. + * If it is, we can't let the VM drop it because we may need it + * later when/if we need to write the data (again). + */ +int +nfs_buf_page_inval(struct vnode *vp, off_t offset) +{ + struct nfsbuf *bp; + bp = nfs_buf_incore(vp, ubc_offtoblk(vp, offset)); + if (!bp) + return (0); + FSDBG(325, bp, bp->nb_flags, bp->nb_dirtyoff, bp->nb_dirtyend); + if (ISSET(bp->nb_flags, NB_BUSY)) + return (EBUSY); + /* + * If there's a dirty range in the buffer, check to + * see if this page intersects with the dirty range. + * If it does, we can't let the pager drop the page. + */ + if (bp->nb_dirtyend > 0) { + int start = offset - NBOFF(bp); + if (bp->nb_dirtyend <= start || + bp->nb_dirtyoff >= (start + PAGE_SIZE)) + return (0); + return (EBUSY); + } + return (0); +} + +int +nfs_buf_upl_setup(struct nfsbuf *bp) +{ + kern_return_t kret; + upl_t upl; + int s; + + if (ISSET(bp->nb_flags, NB_PAGELIST)) + return (0); + + kret = ubc_create_upl(bp->nb_vp, NBOFF(bp), bp->nb_bufsize, + &upl, NULL, UPL_PRECIOUS); + if (kret == KERN_INVALID_ARGUMENT) { + /* vm object probably doesn't exist any more */ + bp->nb_pagelist = NULL; + return (EINVAL); + } + if (kret != KERN_SUCCESS) { + printf("nfs_buf_upl_setup(): failed to get pagelist %d\n", kret); + bp->nb_pagelist = NULL; + return (EIO); + } + + FSDBG(538, bp, NBOFF(bp), bp->nb_bufsize, bp->nb_vp); + + s = splbio(); + bp->nb_pagelist = upl; + SET(bp->nb_flags, NB_PAGELIST); + splx(s); + return (0); +} + +void +nfs_buf_upl_check(struct nfsbuf *bp) +{ + upl_page_info_t *pl; + off_t filesize, fileoffset; + int i, npages; + + if (!ISSET(bp->nb_flags, NB_PAGELIST)) + return; + + npages = round_page_32(bp->nb_bufsize) / PAGE_SIZE; + filesize = ubc_getsize(bp->nb_vp); + fileoffset = NBOFF(bp); + if (fileoffset < filesize) + SET(bp->nb_flags, NB_CACHE); + else + CLR(bp->nb_flags, NB_CACHE); + + pl = ubc_upl_pageinfo(bp->nb_pagelist); + bp->nb_valid = bp->nb_dirty = 0; + + for (i=0; i < npages; i++, fileoffset += PAGE_SIZE_64) { + /* anything beyond the end of the file is not valid or dirty */ + if (fileoffset >= filesize) + break; + if (!upl_valid_page(pl, i)) { + CLR(bp->nb_flags, NB_CACHE); + continue; + } + NBPGVALID_SET(bp,i); + if (upl_dirty_page(pl, i)) { + NBPGDIRTY_SET(bp, i); + if (!ISSET(bp->nb_flags, NB_WASDIRTY)) + SET(bp->nb_flags, NB_WASDIRTY); + } + } + fileoffset = NBOFF(bp); + if (ISSET(bp->nb_flags, NB_CACHE)) { + bp->nb_validoff = 0; + bp->nb_validend = bp->nb_bufsize; + if (fileoffset + bp->nb_validend > filesize) + bp->nb_validend = filesize - fileoffset; + } else { + bp->nb_validoff = bp->nb_validend = -1; + } + FSDBG(539, bp, fileoffset, bp->nb_valid, bp->nb_dirty); + FSDBG(539, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend); +} + +static int +nfs_buf_map(struct nfsbuf *bp) +{ + kern_return_t kret; + + if (bp->nb_data) + return (0); + if (!ISSET(bp->nb_flags, NB_PAGELIST)) + return (EINVAL); + + kret = ubc_upl_map(bp->nb_pagelist, (vm_address_t *)&(bp->nb_data)); + if (kret != KERN_SUCCESS) + panic("nfs_buf_map: ubc_upl_map() failed with (%d)", kret); + if (bp->nb_data == 0) + panic("ubc_upl_map mapped 0"); + FSDBG(540, bp, bp->nb_flags, NBOFF(bp), bp->nb_data); + return (0); +} + +/* + * check range of pages in nfsbuf's UPL for validity + */ +static int +nfs_buf_upl_valid_range(struct nfsbuf *bp, int off, int size) +{ + off_t fileoffset, filesize; + int pg, lastpg; + upl_page_info_t *pl; + + if (!ISSET(bp->nb_flags, NB_PAGELIST)) + return (0); + pl = ubc_upl_pageinfo(bp->nb_pagelist); + + size += off & PAGE_MASK; + off &= ~PAGE_MASK; + fileoffset = NBOFF(bp); + filesize = VTONFS(bp->nb_vp)->n_size; + if ((fileoffset + off + size) > filesize) + size = filesize - (fileoffset + off); + + pg = off/PAGE_SIZE; + lastpg = (off + size - 1)/PAGE_SIZE; + while (pg <= lastpg) { + if (!upl_valid_page(pl, pg)) + return (0); + pg++; + } + return (1); +} + +/* + * normalize an nfsbuf's valid range + * + * the read/write code guarantees that we'll always have a valid + * region that is an integral number of pages. If either end + * of the valid range isn't page-aligned, it gets corrected + * here as we extend the valid range through all of the + * contiguous valid pages. + */ +static void +nfs_buf_normalize_valid_range(struct nfsnode *np, struct nfsbuf *bp) +{ + int pg, npg; + /* pull validoff back to start of contiguous valid page range */ + pg = bp->nb_validoff/PAGE_SIZE; + while (pg >= 0 && NBPGVALID(bp,pg)) + pg--; + bp->nb_validoff = (pg+1) * PAGE_SIZE; + /* push validend forward to end of contiguous valid page range */ + npg = bp->nb_bufsize/PAGE_SIZE; + pg = bp->nb_validend/PAGE_SIZE; + while (pg < npg && NBPGVALID(bp,pg)) + pg++; + bp->nb_validend = pg * PAGE_SIZE; + /* clip to EOF */ + if (NBOFF(bp) + bp->nb_validend > np->n_size) + bp->nb_validend = np->n_size % bp->nb_bufsize; +} + +/* + * try to push out some delayed/uncommitted writes + */ +static void +nfs_buf_delwri_push(void) +{ + struct nfsbuf *bp; + int i; + + if (TAILQ_EMPTY(&nfsbufdelwri)) + return; + + /* first try to tell the nfsiods to do it */ + if (nfs_asyncio(NULL, NULL) == 0) + return; + + /* otherwise, try to do some of the work ourselves */ + i = 0; + while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) { + struct nfsnode *np = VTONFS(bp->nb_vp); + nfs_buf_remfree(bp); + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { + /* put buffer at end of delwri list */ + TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); + nfsbufdelwricnt++; + nfs_flushcommits(np->n_vnode, (struct proc *)0); + } else { + SET(bp->nb_flags, (NB_BUSY | NB_ASYNC)); + nfs_buf_write(bp); + } + i++; + } +} + +/* + * Get an nfs cache block. + * Allocate a new one if the block isn't currently in the cache + * and return the block marked busy. If the calling process is + * interrupted by a signal for an interruptible mount point, return + * NULL. + */ +struct nfsbuf * +nfs_buf_get( + struct vnode *vp, + daddr_t blkno, + int size, + struct proc *p, + int operation) +{ + struct nfsnode *np = VTONFS(vp); + struct nfsbuf *bp; + int i, biosize, bufsize, rv; + struct ucred *cred; + int slpflag = PCATCH; + + FSDBG_TOP(541, vp, blkno, size, operation); + + bufsize = size; + if (bufsize > MAXBSIZE) + panic("nfs_buf_get: buffer larger than MAXBSIZE requested"); + + biosize = vp->v_mount->mnt_stat.f_iosize; + + if (UBCINVALID(vp) || !UBCINFOEXISTS(vp)) + operation = BLK_META; + else if (bufsize < biosize) + /* reg files should always have biosize blocks */ + bufsize = biosize; + + /* if BLK_WRITE, check for too many delayed/uncommitted writes */ + if ((operation == BLK_WRITE) && (nfs_nbdwrite > ((nfsbufcnt*3)/4))) { + FSDBG_TOP(542, vp, blkno, nfs_nbdwrite, ((nfsbufcnt*3)/4)); + + /* poke the delwri list */ + nfs_buf_delwri_push(); + + /* sleep to let other threads run... */ + tsleep(&nfs_nbdwrite, PCATCH, "nfs_nbdwrite", 1); + FSDBG_BOT(542, vp, blkno, nfs_nbdwrite, ((nfsbufcnt*3)/4)); + } + +loop: + /* + * Obtain a lock to prevent a race condition if the + * MALLOC() below happens to block. + */ + if (nfsbufhashlock) { + while (nfsbufhashlock) { + nfsbufhashlock = -1; + tsleep(&nfsbufhashlock, PCATCH, "nfsbufget", 0); + if (nfs_sigintr(VFSTONFS(vp->v_mount), NULL, p)) + return (NULL); + } + goto loop; + } + nfsbufhashlock = 1; + + /* check for existence of nfsbuf in cache */ + if (bp = nfs_buf_incore(vp, blkno)) { + /* if busy, set wanted and wait */ + if (ISSET(bp->nb_flags, NB_BUSY)) { + FSDBG_TOP(543, vp, blkno, bp, bp->nb_flags); + SET(bp->nb_flags, NB_WANTED); + /* unlock hash */ + if (nfsbufhashlock < 0) { + nfsbufhashlock = 0; + wakeup(&nfsbufhashlock); + } else + nfsbufhashlock = 0; + tsleep(bp, slpflag|(PRIBIO+1), "nfsbufget", (slpflag == PCATCH) ? 0 : 2*hz); + slpflag = 0; + FSDBG_BOT(543, vp, blkno, bp, bp->nb_flags); + if (nfs_sigintr(VFSTONFS(vp->v_mount), NULL, p)) { + FSDBG_BOT(541, vp, blkno, 0, EINTR); + return (NULL); + } + goto loop; + } + if (bp->nb_bufsize != bufsize) + panic("nfsbuf size mismatch"); + SET(bp->nb_flags, (NB_BUSY | NB_CACHE)); + nfs_buf_remfree(bp); + /* additional paranoia: */ + if (ISSET(bp->nb_flags, NB_PAGELIST)) + panic("pagelist buffer was not busy"); + goto buffer_setup; + } + + /* + * where to get a free buffer: + * - alloc new if we haven't reached min bufs + * - free list + * - alloc new if we haven't reached max allowed + * - start clearing out delwri list and try again + */ + + if ((nfsbufcnt > nfsbufmin) && !TAILQ_EMPTY(&nfsbuffree)) { + /* pull an nfsbuf off the free list */ + bp = TAILQ_FIRST(&nfsbuffree); + FSDBG(544, vp, blkno, bp, bp->nb_flags); + nfs_buf_remfree(bp); + if (ISSET(bp->nb_flags, NB_DELWRI)) + panic("nfs_buf_get: delwri"); + SET(bp->nb_flags, NB_BUSY); + /* disassociate buffer from previous vnode */ + if (bp->nb_vp) { + struct vnode *oldvp; + if (bp->nb_vnbufs.le_next != NFSNOLIST) { + LIST_REMOVE(bp, nb_vnbufs); + bp->nb_vnbufs.le_next = NFSNOLIST; + } + oldvp = bp->nb_vp; + bp->nb_vp = NULL; + HOLDRELE(oldvp); + } + LIST_REMOVE(bp, nb_hash); + /* nuke any creds we're holding */ + cred = bp->nb_rcred; + if (cred != NOCRED) { + bp->nb_rcred = NOCRED; + crfree(cred); + } + cred = bp->nb_wcred; + if (cred != NOCRED) { + bp->nb_wcred = NOCRED; + crfree(cred); + } + /* if buf will no longer be NB_META, dump old buffer */ + if ((operation != BLK_META) && + ISSET(bp->nb_flags, NB_META) && bp->nb_data) { + FREE(bp->nb_data, M_TEMP); + bp->nb_data = NULL; + } + /* re-init buf fields */ + bp->nb_error = 0; + bp->nb_validoff = bp->nb_validend = -1; + bp->nb_dirtyoff = bp->nb_dirtyend = 0; + bp->nb_valid = 0; + bp->nb_dirty = 0; + } else if (nfsbufcnt < nfsbufmax) { + /* just alloc a new one */ + MALLOC(bp, struct nfsbuf *, sizeof(struct nfsbuf), M_TEMP, M_WAITOK); + nfsbufcnt++; + NFSBUFCNTCHK(); + /* init nfsbuf */ + bzero(bp, sizeof(*bp)); + bp->nb_free.tqe_next = NFSNOLIST; + bp->nb_validoff = bp->nb_validend = -1; + FSDBG(545, vp, blkno, bp, 0); + } else { + /* too many bufs... wait for buffers to free up */ + FSDBG_TOP(546, vp, blkno, nfsbufcnt, nfsbufmax); + /* unlock hash */ + if (nfsbufhashlock < 0) { + nfsbufhashlock = 0; + wakeup(&nfsbufhashlock); + } else + nfsbufhashlock = 0; + + /* poke the delwri list */ + nfs_buf_delwri_push(); + + nfsneedbuffer = 1; + tsleep(&nfsneedbuffer, PCATCH, "nfsbufget", 0); + FSDBG_BOT(546, vp, blkno, nfsbufcnt, nfsbufmax); + if (nfs_sigintr(VFSTONFS(vp->v_mount), NULL, p)) { + FSDBG_BOT(541, vp, blkno, 0, EINTR); + return (NULL); + } + goto loop; + } + +setup_nfsbuf: + + /* setup nfsbuf */ + bp->nb_flags = NB_BUSY; + bp->nb_lblkno = blkno; + /* insert buf in hash */ + LIST_INSERT_HEAD(NFSBUFHASH(vp, blkno), bp, nb_hash); + /* associate buffer with new vnode */ + VHOLD(vp); + bp->nb_vp = vp; + LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); + +buffer_setup: + + switch (operation) { + case BLK_META: + SET(bp->nb_flags, NB_META); + if ((bp->nb_bufsize != bufsize) && bp->nb_data) { + FREE(bp->nb_data, M_TEMP); + bp->nb_data = NULL; + bp->nb_validoff = bp->nb_validend = -1; + bp->nb_dirtyoff = bp->nb_dirtyend = 0; + bp->nb_valid = 0; + bp->nb_dirty = 0; + CLR(bp->nb_flags, NB_CACHE); + } + if (!bp->nb_data) + MALLOC(bp->nb_data, caddr_t, bufsize, M_TEMP, M_WAITOK); + if (!bp->nb_data) + panic("nfs_buf_get: null nb_data"); + bp->nb_bufsize = bufsize; + break; + + case BLK_READ: + case BLK_WRITE: + if (bufsize < PAGE_SIZE) + bufsize = PAGE_SIZE; + bp->nb_bufsize = bufsize; + bp->nb_validoff = bp->nb_validend = -1; + + if (UBCISVALID(vp)) { + /* setup upl */ + if (nfs_buf_upl_setup(bp)) { + /* unable to create upl */ + /* vm object must no longer exist */ + /* cleanup buffer and return NULL */ + LIST_REMOVE(bp, nb_vnbufs); + bp->nb_vnbufs.le_next = NFSNOLIST; + bp->nb_vp = NULL; + HOLDRELE(vp); + if (bp->nb_free.tqe_next != NFSNOLIST) + panic("nfsbuf on freelist"); + TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); + nfsbuffreecnt++; + FSDBG_BOT(541, vp, blkno, 0x2bc, EIO); + return (NULL); + } + nfs_buf_upl_check(bp); + } + break; + + default: + panic("nfs_buf_get: %d unknown operation", operation); + } + + /* unlock hash */ + if (nfsbufhashlock < 0) { + nfsbufhashlock = 0; + wakeup(&nfsbufhashlock); + } else + nfsbufhashlock = 0; + + FSDBG_BOT(541, vp, blkno, bp, bp->nb_flags); + + return (bp); +} + +void +nfs_buf_release(struct nfsbuf *bp) +{ + struct vnode *vp = bp->nb_vp; + + FSDBG_TOP(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data); + FSDBG(548, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend); + FSDBG(548, bp->nb_valid, 0, bp->nb_dirty, 0); + + if (UBCINFOEXISTS(vp) && bp->nb_bufsize) { + int upl_flags; + upl_t upl; + int i, rv; + + if (!ISSET(bp->nb_flags, NB_PAGELIST) && !ISSET(bp->nb_flags, NB_INVAL)) { + rv = nfs_buf_upl_setup(bp); + if (rv) + printf("nfs_buf_release: upl create failed %d\n", rv); + else + nfs_buf_upl_check(bp); + } + upl = bp->nb_pagelist; + if (!upl) + goto pagelist_cleanup_done; + if (bp->nb_data) { + if (ubc_upl_unmap(upl) != KERN_SUCCESS) + panic("ubc_upl_unmap failed"); + bp->nb_data = NULL; + } + if (bp->nb_flags & (NB_ERROR | NB_INVAL)) { + if (bp->nb_flags & (NB_READ | NB_INVAL)) + upl_flags = UPL_ABORT_DUMP_PAGES; + else + upl_flags = 0; + ubc_upl_abort(upl, upl_flags); + goto pagelist_cleanup_done; + } + for (i=0; i <= (bp->nb_bufsize - 1)/PAGE_SIZE; i++) { + if (!NBPGVALID(bp,i)) + ubc_upl_abort_range(upl, + i*PAGE_SIZE, PAGE_SIZE, + UPL_ABORT_DUMP_PAGES | + UPL_ABORT_FREE_ON_EMPTY); + else { + if (NBPGDIRTY(bp,i)) + upl_flags = UPL_COMMIT_SET_DIRTY; + else + upl_flags = UPL_COMMIT_CLEAR_DIRTY; + ubc_upl_commit_range(upl, + i*PAGE_SIZE, PAGE_SIZE, + upl_flags | + UPL_COMMIT_INACTIVATE | + UPL_COMMIT_FREE_ON_EMPTY); + } + } +pagelist_cleanup_done: + /* was this the last buffer in the file? */ + if (NBOFF(bp) + bp->nb_bufsize > VTONFS(vp)->n_size) { + /* if so, invalidate all pages of last buffer past EOF */ + int biosize = vp->v_mount->mnt_stat.f_iosize; + off_t off, size; + off = trunc_page_64(VTONFS(vp)->n_size) + PAGE_SIZE_64; + size = trunc_page_64(NBOFF(bp) + biosize) - off; + if (size) + ubc_invalidate(vp, off, size); + } + CLR(bp->nb_flags, NB_PAGELIST); + bp->nb_pagelist = NULL; + } + + /* Wake up any processes waiting for any buffer to become free. */ + if (nfsneedbuffer) { + nfsneedbuffer = 0; + wakeup(&nfsneedbuffer); + } + /* Wake up any processes waiting for _this_ buffer to become free. */ + if (ISSET(bp->nb_flags, NB_WANTED)) { + CLR(bp->nb_flags, NB_WANTED); + wakeup(bp); + } + + /* If it's not cacheable, or an error, mark it invalid. */ + if (ISSET(bp->nb_flags, (NB_NOCACHE|NB_ERROR))) + SET(bp->nb_flags, NB_INVAL); + + if ((bp->nb_bufsize <= 0) || ISSET(bp->nb_flags, NB_INVAL)) { + /* If it's invalid or empty, dissociate it from its vnode */ + if (bp->nb_vnbufs.le_next != NFSNOLIST) { + LIST_REMOVE(bp, nb_vnbufs); + bp->nb_vnbufs.le_next = NFSNOLIST; + } + bp->nb_vp = NULL; + HOLDRELE(vp); + /* if this was a delayed write, wakeup anyone */ + /* waiting for delayed writes to complete */ + if (ISSET(bp->nb_flags, NB_DELWRI)) { + CLR(bp->nb_flags, NB_DELWRI); + nfs_nbdwrite--; + NFSBUFCNTCHK(); + wakeup((caddr_t)&nfs_nbdwrite); + } + /* put buffer at head of free list */ + if (bp->nb_free.tqe_next != NFSNOLIST) + panic("nfsbuf on freelist"); + TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); + nfsbuffreecnt++; + NFS_BUF_FREEUP(); + } else if (ISSET(bp->nb_flags, NB_DELWRI)) { + /* put buffer at end of delwri list */ + if (bp->nb_free.tqe_next != NFSNOLIST) + panic("nfsbuf on freelist"); + TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); + nfsbufdelwricnt++; + } else { + /* put buffer at end of free list */ + if (bp->nb_free.tqe_next != NFSNOLIST) + panic("nfsbuf on freelist"); + TAILQ_INSERT_TAIL(&nfsbuffree, bp, nb_free); + nfsbuffreecnt++; + NFS_BUF_FREEUP(); + } + + NFSBUFCNTCHK(); + + /* Unlock the buffer. */ + CLR(bp->nb_flags, (NB_ASYNC | NB_BUSY | NB_NOCACHE | NB_STABLE | NB_IOD)); + + FSDBG_BOT(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data); +} + +/* + * Wait for operations on the buffer to complete. + * When they do, extract and return the I/O's error value. + */ +int +nfs_buf_iowait(struct nfsbuf *bp) +{ + FSDBG_TOP(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); + + while (!ISSET(bp->nb_flags, NB_DONE)) + tsleep(bp, PRIBIO + 1, "nfs_buf_iowait", 0); + + FSDBG_BOT(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); + + /* check for interruption of I/O, then errors. */ + if (ISSET(bp->nb_flags, NB_EINTR)) { + CLR(bp->nb_flags, NB_EINTR); + return (EINTR); + } else if (ISSET(bp->nb_flags, NB_ERROR)) + return (bp->nb_error ? bp->nb_error : EIO); + return (0); +} + +/* + * Mark I/O complete on a buffer. + */ +void +nfs_buf_iodone(struct nfsbuf *bp) +{ + struct vnode *vp; + + FSDBG_TOP(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); + + if (ISSET(bp->nb_flags, NB_DONE)) + panic("nfs_buf_iodone already"); + SET(bp->nb_flags, NB_DONE); /* note that it's done */ + /* + * I/O was done, so don't believe + * the DIRTY state from VM anymore + */ + CLR(bp->nb_flags, NB_WASDIRTY); + + if (!ISSET(bp->nb_flags, NB_READ)) { + CLR(bp->nb_flags, NB_WRITEINPROG); + vpwakeup(bp->nb_vp); + } + + /* Wakeup the throttled write operations as needed */ + vp = bp->nb_vp; + if (vp && (vp->v_flag & VTHROTTLED) + && (vp->v_numoutput <= (NFSBUFWRITE_THROTTLE / 3))) { + vp->v_flag &= ~VTHROTTLED; + wakeup((caddr_t)&vp->v_numoutput); + } + + if (ISSET(bp->nb_flags, NB_ASYNC)) /* if async, release it */ + nfs_buf_release(bp); + else { /* or just wakeup the buffer */ + CLR(bp->nb_flags, NB_WANTED); + wakeup(bp); + } + + FSDBG_BOT(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); +} + +void +nfs_buf_write_delayed(struct nfsbuf *bp) +{ + struct proc *p = current_proc(); + struct vnode *vp = bp->nb_vp; + + FSDBG_TOP(551, bp, NBOFF(bp), bp->nb_flags, 0); + FSDBG(551, bp, bp->nb_dirtyoff, bp->nb_dirtyend, bp->nb_dirty); + + /* + * If the block hasn't been seen before: + * (1) Mark it as having been seen, + * (2) Charge for the write. + * (3) Make sure it's on its vnode's correct block list, + */ + if (!ISSET(bp->nb_flags, NB_DELWRI)) { + SET(bp->nb_flags, NB_DELWRI); + if (p && p->p_stats) + p->p_stats->p_ru.ru_oublock++; /* XXX */ + nfs_nbdwrite++; + NFSBUFCNTCHK(); + /* move to dirty list */ + if (bp->nb_vnbufs.le_next != NFSNOLIST) + LIST_REMOVE(bp, nb_vnbufs); + LIST_INSERT_HEAD(&VTONFS(vp)->n_dirtyblkhd, bp, nb_vnbufs); + } + + /* + * If the vnode has "too many" write operations in progress + * wait for them to finish the IO + */ + while (vp->v_numoutput >= NFSBUFWRITE_THROTTLE) { + vp->v_flag |= VTHROTTLED; + tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "nfs_buf_write_delayed", 0); + } + + /* + * If we have too many delayed write buffers, + * more than we can "safely" handle, just fall back to + * doing the async write + */ + if (nfs_nbdwrite < 0) + panic("nfs_buf_write_delayed: Negative nfs_nbdwrite"); + + if (nfs_nbdwrite > ((nfsbufcnt/4)*3)) { + /* issue async write */ + SET(bp->nb_flags, NB_ASYNC); + nfs_buf_write(bp); + FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); + return; + } + + /* Otherwise, the "write" is done, so mark and release the buffer. */ + SET(bp->nb_flags, NB_DONE); + nfs_buf_release(bp); + FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, 0); + return; +} + /* * Vnode op for read using bio @@ -115,33 +967,41 @@ nfs_bioread(vp, uio, ioflag, cred, getpages) register struct uio *uio; int ioflag; struct ucred *cred; - int getpages; + int getpages; // XXX unused! { - register struct nfsnode *np = VTONFS(vp); - register int biosize, i; + struct nfsnode *np = VTONFS(vp); + int biosize, i; off_t diff; - struct buf *bp = 0, *rabp; + struct nfsbuf *bp = 0, *rabp; struct vattr vattr; struct proc *p; struct nfsmount *nmp = VFSTONFS(vp->v_mount); - daddr_t lbn, rabn; + daddr_t lbn, rabn, lastrabn = -1; int bufsize; - int nra, error = 0, n = 0, on = 0, not_readin; + int nra, error = 0, n = 0, on = 0; int operation = (getpages? BLK_PAGEIN : BLK_READ); + caddr_t dp; + struct dirent *direntp; + + FSDBG_TOP(514, vp, uio->uio_offset, uio->uio_resid, ioflag); #if DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("nfs_read mode"); #endif - if (uio->uio_resid == 0) + if (uio->uio_resid == 0) { + FSDBG_BOT(514, vp, 0xd1e0001, 0, 0); return (0); - if (uio->uio_offset < 0) + } + if (uio->uio_offset < 0) { + FSDBG_BOT(514, vp, 0xd1e0002, 0, EINVAL); return (EINVAL); + } p = uio->uio_procp; - if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) + if ((nmp->nm_flag & NFSMNT_NFSV3) && + !(nmp->nm_state & NFSSTA_GOTFSINFO)) (void)nfs_fsinfo(nmp, vp, cred, p); - /*due to getblk/vm interractions, use vm page size or less values */ - biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); + biosize = vp->v_mount->mnt_stat.f_iosize; /* * For nfs, cache consistency can only be maintained approximately. * Although RFC1094 does not specify the criteria, the following is @@ -155,7 +1015,7 @@ nfs_bioread(vp, uio, ioflag, cred, getpages) * Then force a getattr rpc to ensure that you have up to date * attributes. * NB: This implies that cache data can be read when up to - * NFS_ATTRTIMEO seconds out of date. If you find that you need current + * NFS_MAXATTRTIMEO seconds out of date. If you find that you need current * attributes this could be forced by setting n_attrstamp to 0 before * the VOP_GETATTR() call. */ @@ -166,24 +1026,35 @@ nfs_bioread(vp, uio, ioflag, cred, getpages) panic("nfs: bioread, not dir"); nfs_invaldir(vp); error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); - if (error) + if (error) { + FSDBG_BOT(514, vp, 0xd1e0003, 0, error); return (error); + } } np->n_attrstamp = 0; error = VOP_GETATTR(vp, &vattr, cred, p); - if (error) + if (error) { + FSDBG_BOT(514, vp, 0xd1e0004, 0, error); return (error); + } np->n_mtime = vattr.va_mtime.tv_sec; } else { error = VOP_GETATTR(vp, &vattr, cred, p); - if (error) + if (error) { + FSDBG_BOT(514, vp, 0xd1e0005, 0, error); return (error); + } if (np->n_mtime != vattr.va_mtime.tv_sec) { - if (vp->v_type == VDIR) + if (vp->v_type == VDIR) { nfs_invaldir(vp); + /* purge name cache entries */ + cache_purge(vp); + } error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); - if (error) + if (error) { + FSDBG_BOT(514, vp, 0xd1e0006, 0, error); return (error); + } np->n_mtime = vattr.va_mtime.tv_sec; } } @@ -198,70 +1069,126 @@ nfs_bioread(vp, uio, ioflag, cred, getpages) do { error = nqnfs_getlease(vp, ND_READ, cred, p); } while (error == NQNFS_EXPIRED); - if (error) + if (error) { + FSDBG_BOT(514, vp, 0xd1e0007, 0, error); return (error); + } if (np->n_lrev != np->n_brev || (np->n_flag & NQNFSNONCACHE) || ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) { if (vp->v_type == VDIR) nfs_invaldir(vp); error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); - if (error) + if (error) { + FSDBG_BOT(514, vp, 0xd1e0008, 0, error); return (error); + } np->n_brev = np->n_lrev; } } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) { nfs_invaldir(vp); error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); - if (error) + if (error) { + FSDBG_BOT(514, vp, 0xd1e0009, 0, error); return (error); + } } } - if (np->n_flag & NQNFSNONCACHE) { + if ((np->n_flag & NQNFSNONCACHE) || (vp->v_flag & VNOCACHE_DATA)) { + if ((vp->v_flag & VNOCACHE_DATA) && + (np->n_dirtyblkhd.lh_first || np->n_cleanblkhd.lh_first)) { + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) { + FSDBG_BOT(514, vp, 0xd1e000a, 0, error); + return (error); + } + } switch (vp->v_type) { case VREG: - return (nfs_readrpc(vp, uio, cred)); + error = nfs_readrpc(vp, uio, cred); + FSDBG_BOT(514, vp, uio->uio_offset, uio->uio_resid, error); + return (error); case VLNK: - return (nfs_readlinkrpc(vp, uio, cred)); + error = nfs_readlinkrpc(vp, uio, cred); + FSDBG_BOT(514, vp, uio->uio_offset, uio->uio_resid, error); + return (error); case VDIR: break; default: - printf(" NQNFSNONCACHE: type %x unexpected\n", - vp->v_type); + printf(" NQNFSNONCACHE: type %x unexpected\n", vp->v_type); }; } switch (vp->v_type) { case VREG: - nfsstats.biocache_reads++; lbn = uio->uio_offset / biosize; - on = uio->uio_offset & (biosize - 1); - not_readin = 1; + + /* + * Copy directly from any cached pages without grabbing the bufs. + */ + if (uio->uio_segflg == UIO_USERSPACE) { + int io_resid = uio->uio_resid; + diff = np->n_size - uio->uio_offset; + if (diff < io_resid) + io_resid = diff; + if (io_resid > 0) { + error = cluster_copy_ubc_data(vp, uio, &io_resid, 0); + if (error) { + FSDBG_BOT(514, vp, uio->uio_offset, 0xcacefeed, error); + return (error); + } + } + /* count any biocache reads that we just copied directly */ + if (lbn != uio->uio_offset / biosize) { + nfsstats.biocache_reads += (uio->uio_offset / biosize) - lbn; + FSDBG(514, vp, 0xcacefeed, uio->uio_offset, error); + } + } + + lbn = uio->uio_offset / biosize; + on = uio->uio_offset % biosize; /* * Start the read ahead(s), as required. */ if (nfs_numasync > 0 && nmp->nm_readahead > 0) { - for (nra = 0; nra < nmp->nm_readahead && - (off_t)(lbn + 1 + nra) * biosize < np->n_size; - nra++) { + for (nra = 0; nra < nmp->nm_readahead; nra++) { rabn = lbn + 1 + nra; - if (!incore(vp, rabn)) { - rabp = nfs_getcacheblk(vp, rabn, biosize, p, operation); - if (!rabp) - return (EINTR); - if (!ISSET(rabp->b_flags, (B_CACHE|B_DELWRI))) { - SET(rabp->b_flags, (B_READ | B_ASYNC)); - if (nfs_asyncio(rabp, cred)) { - SET(rabp->b_flags, (B_INVAL|B_ERROR)); - rabp->b_error = EIO; - brelse(rabp); - } - } else - brelse(rabp); + if (rabn <= lastrabn) { + /* we've already (tried to) read this block */ + /* no need to try it again... */ + continue; } - } + lastrabn = rabn; + if ((off_t)rabn * biosize >= np->n_size) + break; + /* check if block exists and is valid. */ + rabp = nfs_buf_incore(vp, rabn); + if (rabp && nfs_buf_upl_valid_range(rabp, 0, rabp->nb_bufsize)) + continue; + rabp = nfs_buf_get(vp, rabn, biosize, p, operation); + if (!rabp) { + FSDBG_BOT(514, vp, 0xd1e000b, 0, EINTR); + return (EINTR); + } + if (!ISSET(rabp->nb_flags, (NB_CACHE|NB_DELWRI))) { + SET(rabp->nb_flags, (NB_READ|NB_ASYNC)); + if (nfs_asyncio(rabp, cred)) { + SET(rabp->nb_flags, (NB_INVAL|NB_ERROR)); + rabp->nb_error = EIO; + nfs_buf_release(rabp); + } + } else + nfs_buf_release(rabp); + } } + if ((uio->uio_resid <= 0) || (uio->uio_offset >= np->n_size)) { + FSDBG_BOT(514, vp, uio->uio_offset, uio->uio_resid, 0xaaaaaaaa); + return (0); + } + + nfsstats.biocache_reads++; + /* * If the block is in the cache and has the required data * in a valid region, just copy it out. @@ -270,84 +1197,162 @@ nfs_bioread(vp, uio, ioflag, cred, getpages) */ again: bufsize = biosize; - if ((off_t)(lbn + 1) * biosize > np->n_size && - (off_t)(lbn + 1) * biosize - np->n_size < biosize) { - bufsize = np->n_size - (off_t)lbn * biosize; - bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); - } - bp = nfs_getcacheblk(vp, lbn, bufsize, p, operation); - if (!bp) - return (EINTR); - - if (!ISSET(bp->b_flags, B_CACHE)) { - SET(bp->b_flags, B_READ); - CLR(bp->b_flags, (B_DONE | B_ERROR | B_INVAL)); - not_readin = 0; - error = nfs_doio(bp, cred, p); - if (error) { - brelse(bp); - return (error); - } - } - if (bufsize > on) { - n = min((unsigned)(bufsize - on), uio->uio_resid); - } else { - n = 0; - } + n = min((unsigned)(bufsize - on), uio->uio_resid); diff = np->n_size - uio->uio_offset; if (diff < n) n = diff; - if (not_readin && n > 0) { - if (on < bp->b_validoff || (on + n) > bp->b_validend) { - SET(bp->b_flags, (B_NOCACHE|B_INVAFTERWRITE)); - if (bp->b_dirtyend > 0) { - if (!ISSET(bp->b_flags, B_DELWRI)) - panic("nfsbioread"); - if (VOP_BWRITE(bp) == EINTR) - return (EINTR); - } else - brelse(bp); + + bp = nfs_buf_get(vp, lbn, bufsize, p, operation); + if (!bp) { + FSDBG_BOT(514, vp, 0xd1e000c, 0, EINTR); + return (EINTR); + } + + /* if any pages are valid... */ + if (bp->nb_valid) { + /* ...check for any invalid pages in the read range */ + int pg, firstpg, lastpg, dirtypg; + dirtypg = firstpg = lastpg = -1; + pg = on/PAGE_SIZE; + while (pg <= (on + n - 1)/PAGE_SIZE) { + if (!NBPGVALID(bp,pg)) { + if (firstpg < 0) + firstpg = pg; + lastpg = pg; + } else if (firstpg >= 0 && dirtypg < 0 && NBPGDIRTY(bp,pg)) + dirtypg = pg; + pg++; + } + + /* if there are no invalid pages, we're all set */ + if (firstpg < 0) { + if (bp->nb_validoff < 0) { + /* valid range isn't set up, so */ + /* set it to what we know is valid */ + bp->nb_validoff = trunc_page_32(on); + bp->nb_validend = round_page_32(on+n); + nfs_buf_normalize_valid_range(np, bp); + } + goto buffer_ready; + } + + /* there are invalid pages in the read range */ + if ((dirtypg > firstpg) && (dirtypg < lastpg)) { + /* there are also dirty page(s) in the range, */ + /* so write the buffer out and try again */ + CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); + SET(bp->nb_flags, NB_ASYNC); + /* + * NFS has embedded ucred so crhold() risks zone corruption + */ + if (bp->nb_wcred == NOCRED) + bp->nb_wcred = crdup(cred); + error = nfs_buf_write(bp); + if (error) { + FSDBG_BOT(514, vp, 0xd1e000d, 0, error); + return (error); + } goto again; } + if (!bp->nb_dirty && bp->nb_dirtyend <= 0 && + (lastpg - firstpg + 1) > (bufsize/PAGE_SIZE)/2) { + /* we need to read in more than half the buffer and the */ + /* buffer's not dirty, so just fetch the whole buffer */ + bp->nb_valid = 0; + } else { + /* read the page range in */ + struct iovec iov; + struct uio auio; + auio.uio_iov = &iov; + auio.uio_iovcnt = 1; + auio.uio_offset = NBOFF(bp) + firstpg * PAGE_SIZE_64; + auio.uio_resid = (lastpg - firstpg + 1) * PAGE_SIZE; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_READ; + auio.uio_procp = p; + NFS_BUF_MAP(bp); + iov.iov_base = bp->nb_data + firstpg * PAGE_SIZE; + iov.iov_len = auio.uio_resid; + error = nfs_readrpc(vp, &auio, cred); + if (error) { + nfs_buf_release(bp); + FSDBG_BOT(514, vp, 0xd1e000e, 0, error); + return (error); + } + /* Make sure that the valid range is set to cover this read. */ + bp->nb_validoff = trunc_page_32(on); + bp->nb_validend = round_page_32(on+n); + nfs_buf_normalize_valid_range(np, bp); + if (auio.uio_resid > 0) { + /* if short read, must have hit EOF, */ + /* so zero the rest of the range */ + bzero(iov.iov_base, auio.uio_resid); + } + /* mark the pages (successfully read) as valid */ + for (pg=firstpg; pg <= lastpg; pg++) + NBPGVALID_SET(bp,pg); + } } + /* if no pages are valid, read the whole block */ + if (!bp->nb_valid) { + SET(bp->nb_flags, NB_READ); + CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); + error = nfs_doio(bp, cred, p); + if (error) { + nfs_buf_release(bp); + FSDBG_BOT(514, vp, 0xd1e000f, 0, error); + return (error); + } + } +buffer_ready: vp->v_lastr = lbn; - diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on); - if (diff < n) - n = diff; + /* validate read range against valid range and clip */ + if (bp->nb_validend > 0) { + diff = (on >= bp->nb_validend) ? 0 : (bp->nb_validend - on); + if (diff < n) + n = diff; + } + if (n > 0) + NFS_BUF_MAP(bp); break; case VLNK: nfsstats.biocache_readlinks++; - bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p, operation); - if (!bp) + bp = nfs_buf_get(vp, (daddr_t)0, NFS_MAXPATHLEN, p, operation); + if (!bp) { + FSDBG_BOT(514, vp, 0xd1e0010, 0, EINTR); return (EINTR); - if (!ISSET(bp->b_flags, B_CACHE)) { - SET(bp->b_flags, B_READ); + } + if (!ISSET(bp->nb_flags, NB_CACHE)) { + SET(bp->nb_flags, NB_READ); error = nfs_doio(bp, cred, p); if (error) { - SET(bp->b_flags, B_ERROR); - brelse(bp); + SET(bp->nb_flags, NB_ERROR); + nfs_buf_release(bp); + FSDBG_BOT(514, vp, 0xd1e0011, 0, error); return (error); } } - n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); + n = min(uio->uio_resid, bp->nb_validend); on = 0; break; case VDIR: nfsstats.biocache_readdirs++; - if (np->n_direofoffset - && uio->uio_offset >= np->n_direofoffset) { - return (0); + if (np->n_direofoffset && uio->uio_offset >= np->n_direofoffset) { + FSDBG_BOT(514, vp, 0xde0f0001, 0, 0); + return (0); } lbn = uio->uio_offset / NFS_DIRBLKSIZ; on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); - bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, p, operation); - if (!bp) - return (EINTR); - if (!ISSET(bp->b_flags, B_CACHE)) { - SET(bp->b_flags, B_READ); + bp = nfs_buf_get(vp, lbn, NFS_DIRBLKSIZ, p, operation); + if (!bp) { + FSDBG_BOT(514, vp, 0xd1e0012, 0, EINTR); + return (EINTR); + } + if (!ISSET(bp->nb_flags, NB_CACHE)) { + SET(bp->nb_flags, NB_READ); error = nfs_doio(bp, cred, p); if (error) { - brelse(bp); + nfs_buf_release(bp); } while (error == NFSERR_BAD_COOKIE) { nfs_invaldir(vp); @@ -360,20 +1365,23 @@ again: */ for (i = 0; i <= lbn && !error; i++) { if (np->n_direofoffset - && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) + && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) { + FSDBG_BOT(514, vp, 0xde0f0002, 0, 0); return (0); - bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, p, - operation); - if (!bp) + } + bp = nfs_buf_get(vp, i, NFS_DIRBLKSIZ, p, operation); + if (!bp) { + FSDBG_BOT(514, vp, 0xd1e0013, 0, EINTR); return (EINTR); - if (!ISSET(bp->b_flags, B_CACHE)) { - SET(bp->b_flags, B_READ); + } + if (!ISSET(bp->nb_flags, NB_CACHE)) { + SET(bp->nb_flags, NB_READ); error = nfs_doio(bp, cred, p); /* - * no error + B_INVAL == directory EOF, + * no error + NB_INVAL == directory EOF, * use the block. */ - if (error == 0 && (bp->b_flags & B_INVAL)) + if (error == 0 && (bp->nb_flags & NB_INVAL)) break; } /* @@ -383,7 +1391,7 @@ again: * block and go for the next one via the for loop. */ if (error || i < lbn) - brelse(bp); + nfs_buf_release(bp); } } /* @@ -391,8 +1399,10 @@ again: * error. If we hit an error and it wasn't a cookie error, * we give up. */ - if (error) + if (error) { + FSDBG_BOT(514, vp, 0xd1e0014, 0, error); return (error); + } } /* @@ -404,19 +1414,19 @@ again: (np->n_direofoffset == 0 || (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && !(np->n_flag & NQNFSNONCACHE) && - !incore(vp, lbn + 1)) { - rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, p, + !nfs_buf_incore(vp, lbn + 1)) { + rabp = nfs_buf_get(vp, lbn + 1, NFS_DIRBLKSIZ, p, operation); if (rabp) { - if (!ISSET(rabp->b_flags, (B_CACHE|B_DELWRI))) { - SET(rabp->b_flags, (B_READ | B_ASYNC)); + if (!ISSET(rabp->nb_flags, (NB_CACHE))) { + SET(rabp->nb_flags, (NB_READ | NB_ASYNC)); if (nfs_asyncio(rabp, cred)) { - SET(rabp->b_flags, (B_INVAL|B_ERROR)); - rabp->b_error = EIO; - brelse(rabp); + SET(rabp->nb_flags, (NB_INVAL|NB_ERROR)); + rabp->nb_error = EIO; + nfs_buf_release(rabp); } } else { - brelse(rabp); + nfs_buf_release(rabp); } } } @@ -424,30 +1434,41 @@ again: * Make sure we use a signed variant of min() since * the second term may be negative. */ - n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); + n = lmin(uio->uio_resid, bp->nb_validend - on); /* - * Unlike VREG files, whos buffer size ( bp->b_bcount ) is - * chopped for the EOF condition, we cannot tell how large - * NFS directories are going to be until we hit EOF. So - * an NFS directory buffer is *not* chopped to its EOF. Now, - * it just so happens that b_resid will effectively chop it - * to EOF. *BUT* this information is lost if the buffer goes - * away and is reconstituted into a B_CACHE state (recovered - * from VM) later. So we keep track of the directory eof - * in np->n_direofoffset and chop it off as an extra step - * right here. + * We keep track of the directory eof in + * np->n_direofoffset and chop it off as an + * extra step right here. */ if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) n = np->n_direofoffset - uio->uio_offset; + /* + * Make sure that we return an integral number of entries so + * that any subsequent calls will start copying from the start + * of the next entry. + * + * If the current value of n has the last entry cut short, + * set n to copy everything up to the last entry instead. + */ + if (n > 0) { + dp = bp->nb_data + on; + while (dp < (bp->nb_data + on + n)) { + direntp = (struct dirent *)dp; + dp += direntp->d_reclen; + } + if (dp > (bp->nb_data + on + n)) + n = (dp - direntp->d_reclen) - (bp->nb_data + on); + } break; default: - printf(" nfs_bioread: type %x unexpected\n",vp->v_type); - break; + printf("nfs_bioread: type %x unexpected\n",vp->v_type); + FSDBG_BOT(514, vp, 0xd1e0015, 0, EINVAL); + return (EINVAL); }; if (n > 0) { - error = uiomove(bp->b_data + on, (int)n, uio); + error = uiomove(bp->nb_data + on, (int)n, uio); } switch (vp->v_type) { case VREG: @@ -457,13 +1478,12 @@ again: break; case VDIR: if (np->n_flag & NQNFSNONCACHE) - SET(bp->b_flags, B_INVAL); + SET(bp->nb_flags, NB_INVAL); break; - default: - printf(" nfs_bioread: type %x unexpected\n",vp->v_type); } - brelse(bp); + nfs_buf_release(bp); } while (error == 0 && uio->uio_resid > 0 && n > 0); + FSDBG_BOT(514, vp, uio->uio_offset, uio->uio_resid, error); return (error); } @@ -480,23 +1500,24 @@ nfs_write(ap) struct ucred *a_cred; } */ *ap; { - register int biosize; - register struct uio *uio = ap->a_uio; + struct uio *uio = ap->a_uio; struct proc *p = uio->uio_procp; - register struct vnode *vp = ap->a_vp; + struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); - register struct ucred *cred = ap->a_cred; + struct ucred *cred = ap->a_cred; int ioflag = ap->a_ioflag; - struct buf *bp; + struct nfsbuf *bp; struct vattr vattr; struct nfsmount *nmp = VFSTONFS(vp->v_mount); daddr_t lbn; - int bufsize; + int biosize, bufsize, writeop; int n, on, error = 0, iomode, must_commit; - off_t boff; + off_t boff, start, end; struct iovec iov; struct uio auio; + FSDBG_TOP(515, vp, uio->uio_offset, uio->uio_resid, ioflag); + #if DIAGNOSTIC if (uio->uio_rw != UIO_WRITE) panic("nfs_write mode"); @@ -507,29 +1528,39 @@ nfs_write(ap) return (EIO); if (np->n_flag & NWRITEERR) { np->n_flag &= ~NWRITEERR; + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, np->n_error); return (np->n_error); } - if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) + if ((nmp->nm_flag & NFSMNT_NFSV3) && + !(nmp->nm_state & NFSSTA_GOTFSINFO)) (void)nfs_fsinfo(nmp, vp, cred, p); if (ioflag & (IO_APPEND | IO_SYNC)) { if (np->n_flag & NMODIFIED) { np->n_attrstamp = 0; error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); - if (error) + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, 0x10bad01, error); return (error); + } } if (ioflag & IO_APPEND) { np->n_attrstamp = 0; error = VOP_GETATTR(vp, &vattr, cred, p); - if (error) + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, 0x10bad02, error); return (error); + } uio->uio_offset = np->n_size; } } - if (uio->uio_offset < 0) + if (uio->uio_offset < 0) { + FSDBG_BOT(515, vp, uio->uio_offset, 0xbad0ff, EINVAL); return (EINVAL); - if (uio->uio_resid == 0) + } + if (uio->uio_resid == 0) { + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, 0); return (0); + } /* * Maybe this should be above the vnode op call, but so long as * file servers have no limits, i don't think it matters @@ -537,15 +1568,11 @@ nfs_write(ap) if (p && uio->uio_offset + uio->uio_resid > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { psignal(p, SIGXFSZ); + FSDBG_BOT(515, vp, uio->uio_offset, 0x2b1f, EFBIG); return (EFBIG); } - /* - * I use nm_rsize, not nm_wsize so that all buffer cache blocks - * will be the same size within a filesystem. nfs_writerpc will - * still use nm_wsize when sizing the rpc's. - */ - /*due to getblk/vm interractions, use vm page size or less values */ - biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); + + biosize = vp->v_mount->mnt_stat.f_iosize; do { /* @@ -556,210 +1583,376 @@ nfs_write(ap) do { error = nqnfs_getlease(vp, ND_WRITE, cred, p); } while (error == NQNFS_EXPIRED); - if (error) + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, 0x11110001, error); return (error); + } if (np->n_lrev != np->n_brev || (np->n_flag & NQNFSNONCACHE)) { error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); - if (error) + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, 0x11110002, error); return (error); + } np->n_brev = np->n_lrev; } } - if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) { + if (ISSET(vp->v_flag, VNOCACHE_DATA) && + (np->n_dirtyblkhd.lh_first || np->n_cleanblkhd.lh_first)) { + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) { + FSDBG_BOT(515, vp, 0, 0, error); + return (error); + } + } + if (((np->n_flag & NQNFSNONCACHE) || + ISSET(vp->v_flag, VNOCACHE_DATA)) && + uio->uio_iovcnt == 1) { iomode = NFSV3WRITE_FILESYNC; error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit); if (must_commit) nfs_clearcommit(vp->v_mount); + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, error); return (error); } nfsstats.biocache_writes++; lbn = uio->uio_offset / biosize; - on = uio->uio_offset & (biosize-1); + on = uio->uio_offset % biosize; n = min((unsigned)(biosize - on), uio->uio_resid); again: bufsize = biosize; -#if 0 -/* (removed for UBC) */ - if ((lbn + 1) * biosize > np->n_size) { - bufsize = np->n_size - lbn * biosize; - bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); - } -#endif /* * Get a cache block for writing. The range to be written is - * (off..off+len) within the block. We ensure that the block + * (off..off+n) within the block. We ensure that the block * either has no dirty region or that the given range is * contiguous with the existing dirty region. */ - bp = nfs_getcacheblk(vp, lbn, bufsize, p, BLK_WRITE); - if (!bp) + bp = nfs_buf_get(vp, lbn, bufsize, p, BLK_WRITE); + if (!bp) { + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, EINTR); return (EINTR); + } + /* map the block because we know we're going to write to it */ + NFS_BUF_MAP(bp); + + if (ISSET(vp->v_flag, VNOCACHE_DATA)) + SET(bp->nb_flags, (NB_NOCACHE|NB_INVAL)); + + /* + * NFS has embedded ucred so crhold() risks zone corruption + */ + if (bp->nb_wcred == NOCRED) + bp->nb_wcred = crdup(cred); + + /* + * If there's already a dirty range AND dirty pages in this block we + * need to send a commit AND write the dirty pages before continuing. + * + * If there's already a dirty range OR dirty pages in this block + * and the new write range is not contiguous with the existing range, + * then force the buffer to be written out now. + * (We used to just extend the dirty range to cover the valid, + * but unwritten, data in between also. But writing ranges + * of data that weren't actually written by an application + * risks overwriting some other client's data with stale data + * that's just masquerading as new written data.) + */ + if (bp->nb_dirtyend > 0) { + if (on > bp->nb_dirtyend || (on + n) < bp->nb_dirtyoff || bp->nb_dirty) { + FSDBG(515, vp, uio->uio_offset, bp, 0xd15c001); + /* write/commit buffer "synchronously" */ + /* (NB_STABLE indicates that data writes should be FILESYNC) */ + CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); + SET(bp->nb_flags, (NB_ASYNC | NB_STABLE)); + error = nfs_buf_write(bp); + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, error); + return (error); + } + goto again; + } + } else if (bp->nb_dirty) { + int firstpg, lastpg; + u_int32_t pagemask; + /* calculate write range pagemask */ + firstpg = on/PAGE_SIZE; + lastpg = (on+n-1)/PAGE_SIZE; + pagemask = ((1 << (lastpg+1)) - 1) & ~((1 << firstpg) - 1); + /* check if there are dirty pages outside the write range */ + if (bp->nb_dirty & ~pagemask) { + FSDBG(515, vp, uio->uio_offset, bp, 0xd15c002); + /* write/commit buffer "synchronously" */ + /* (NB_STABLE indicates that data writes should be FILESYNC) */ + CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); + SET(bp->nb_flags, (NB_ASYNC | NB_STABLE)); + error = nfs_buf_write(bp); + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, error); + return (error); + } + goto again; + } + /* if the first or last pages are already dirty */ + /* make sure that the dirty range encompasses those pages */ + if (NBPGDIRTY(bp,firstpg) || NBPGDIRTY(bp,lastpg)) { + FSDBG(515, vp, uio->uio_offset, bp, 0xd15c003); + bp->nb_dirtyoff = min(on, firstpg * PAGE_SIZE); + if (NBPGDIRTY(bp,lastpg)) { + bp->nb_dirtyend = (lastpg+1) * PAGE_SIZE; + /* clip to EOF */ + if (NBOFF(bp) + bp->nb_dirtyend > np->n_size) + bp->nb_dirtyend = np->n_size - NBOFF(bp); + } else + bp->nb_dirtyend = on+n; + } + } + /* - * Resize nfsnode *after* we busy the buffer to prevent - * readers from reading garbage. + * Are we extending the size of the file with this write? + * If so, update file size now that we have the block. * If there was a partial buf at the old eof, validate * and zero the new bytes. */ if (uio->uio_offset + n > np->n_size) { - struct buf *bp0 = NULL; - daddr_t bn = np->n_size / biosize; - int off = np->n_size & (biosize - 1); + struct nfsbuf *eofbp = NULL; + daddr_t eofbn = np->n_size / biosize; + int eofoff = np->n_size % biosize; + int neweofoff = (uio->uio_offset + n) % biosize; + + FSDBG(515, 0xb1ffa000, uio->uio_offset + n, eofoff, neweofoff); - if (off && bn < lbn && incore(vp, bn)) - bp0 = nfs_getcacheblk(vp, bn, biosize, p, - BLK_WRITE); + if (eofoff && eofbn < lbn && nfs_buf_incore(vp, eofbn)) + eofbp = nfs_buf_get(vp, eofbn, biosize, p, BLK_WRITE); + + /* if we're extending within the same last block */ + /* and the block is flagged as being cached... */ + if ((lbn == eofbn) && ISSET(bp->nb_flags, NB_CACHE)) { + /* ...check that all pages in buffer are valid */ + int endpg = ((neweofoff ? neweofoff : biosize) - 1)/PAGE_SIZE; + u_int32_t pagemask; + /* pagemask only has to extend to last page being written to */ + pagemask = (1 << (endpg+1)) - 1; + FSDBG(515, 0xb1ffa001, bp->nb_valid, pagemask, 0); + if ((bp->nb_valid & pagemask) != pagemask) { + /* zerofill any hole */ + if (on > bp->nb_validend) { + int i; + for (i=bp->nb_validend/PAGE_SIZE; i <= (on - 1)/PAGE_SIZE; i++) + NBPGVALID_SET(bp, i); + NFS_BUF_MAP(bp); + FSDBG(516, bp, bp->nb_validend, on - bp->nb_validend, 0xf01e); + bzero((char *)bp->nb_data + bp->nb_validend, + on - bp->nb_validend); + } + /* zerofill any trailing data in the last page */ + if (neweofoff) { + NFS_BUF_MAP(bp); + FSDBG(516, bp, neweofoff, PAGE_SIZE - (neweofoff & PAGE_MASK), 0xe0f); + bzero((char *)bp->nb_data + neweofoff, + PAGE_SIZE - (neweofoff & PAGE_MASK)); + } + } + } np->n_flag |= NMODIFIED; np->n_size = uio->uio_offset + n; ubc_setsize(vp, (off_t)np->n_size); /* XXX errors */ - if (bp0) { - bzero((char *)bp0->b_data + off, biosize - off); - bp0->b_validend = biosize; - brelse(bp0); + if (eofbp) { + /* + * We may need to zero any previously invalid data + * after the old EOF in the previous EOF buffer. + * + * For the old last page, don't zero bytes if there + * are invalid bytes in that page (i.e. the page isn't + * currently valid). + * For pages after the old last page, zero them and + * mark them as valid. + */ + char *d; + int i; + if (ISSET(vp->v_flag, VNOCACHE_DATA)) + SET(eofbp->nb_flags, (NB_NOCACHE|NB_INVAL)); + NFS_BUF_MAP(eofbp); + FSDBG(516, eofbp, eofoff, biosize - eofoff, 0xe0fff01e); + d = eofbp->nb_data; + i = eofoff/PAGE_SIZE; + while (eofoff < biosize) { + int poff = eofoff & PAGE_MASK; + if (!poff || NBPGVALID(eofbp,i)) { + bzero(d + eofoff, PAGE_SIZE - poff); + NBPGVALID_SET(eofbp, i); + } + if (bp->nb_validend == eofoff) + bp->nb_validend += PAGE_SIZE - poff; + eofoff += PAGE_SIZE - poff; + i++; + } + nfs_buf_release(eofbp); } } - /* - * NFS has embedded ucred so crhold() risks zone corruption - */ - if (bp->b_wcred == NOCRED) - bp->b_wcred = crdup(cred); /* * If dirtyend exceeds file size, chop it down. This should * not occur unless there is a race. */ - if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > - np->n_size) - bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * - DEV_BSIZE; + if (NBOFF(bp) + bp->nb_dirtyend > np->n_size) + bp->nb_dirtyend = np->n_size - NBOFF(bp); /* - * UBC doesn't (yet) handle partial pages so nfs_biowrite was - * hacked to never bdwrite, to start every little write right - * away. Running IE Avie noticed the performance problem, thus - * this code, which permits those delayed writes by ensuring an - * initial read of the entire page. The read may hit eof - * ("short read") but that we will handle. + * UBC doesn't handle partial pages, so we need to make sure + * that any pages left in the page cache are completely valid. + * + * Writes that are smaller than a block are delayed if they + * don't extend to the end of the block. * - * We are quite dependant on the correctness of B_CACHE so check - * that first in case of problems. + * If the block isn't (completely) cached, we may need to read + * in some parts of pages that aren't covered by the write. + * If the write offset (on) isn't page aligned, we'll need to + * read the start of the first page being written to. Likewise, + * if the offset of the end of the write (on+n) isn't page aligned, + * we'll need to read the end of the last page being written to. + * + * Notes: + * We don't want to read anything we're just going to write over. + * We don't want to issue multiple I/Os if we don't have to + * (because they're synchronous rpcs). + * We don't want to read anything we already have modified in the + * page cache. */ - if (!ISSET(bp->b_flags, B_CACHE) && n < PAGE_SIZE) { - boff = (off_t)bp->b_blkno * DEV_BSIZE; - auio.uio_iov = &iov; - auio.uio_iovcnt = 1; - auio.uio_offset = boff; - auio.uio_resid = PAGE_SIZE; - auio.uio_segflg = UIO_SYSSPACE; - auio.uio_rw = UIO_READ; - auio.uio_procp = p; - iov.iov_base = bp->b_data; - iov.iov_len = PAGE_SIZE; - error = nfs_readrpc(vp, &auio, cred); - if (error) { - bp->b_error = error; - SET(bp->b_flags, B_ERROR); - printf("nfs_write: readrpc %d", error); + if (!ISSET(bp->nb_flags, NB_CACHE) && n < biosize) { + int firstpg, lastpg, dirtypg; + int firstpgoff, lastpgoff; + start = end = -1; + firstpg = on/PAGE_SIZE; + firstpgoff = on & PAGE_MASK; + lastpg = (on+n-1)/PAGE_SIZE; + lastpgoff = (on+n) & PAGE_MASK; + if (firstpgoff && !NBPGVALID(bp,firstpg)) { + /* need to read start of first page */ + start = firstpg * PAGE_SIZE; + end = start + firstpgoff; } - if (auio.uio_resid > 0) - bzero(iov.iov_base, auio.uio_resid); - bp->b_validoff = 0; - bp->b_validend = PAGE_SIZE - auio.uio_resid; - if (np->n_size > boff + bp->b_validend) - bp->b_validend = min(np->n_size - boff, - PAGE_SIZE); - bp->b_dirtyoff = 0; - bp->b_dirtyend = 0; - } - - /* - * If the new write will leave a contiguous dirty - * area, just update the b_dirtyoff and b_dirtyend, - * otherwise try to extend the dirty region. - */ - if (bp->b_dirtyend > 0 && - (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { - off_t start, end; - - boff = (off_t)bp->b_blkno * DEV_BSIZE; - if (on > bp->b_dirtyend) { - start = boff + bp->b_validend; - end = boff + on; - } else { - start = boff + on + n; - end = boff + bp->b_validoff; + if (lastpgoff && !NBPGVALID(bp,lastpg)) { + /* need to read end of last page */ + if (start < 0) + start = (lastpg * PAGE_SIZE) + lastpgoff; + end = (lastpg + 1) * PAGE_SIZE; } - - /* - * It may be that the valid region in the buffer - * covers the region we want, in which case just - * extend the dirty region. Otherwise we try to - * extend the valid region. - */ if (end > start) { + /* need to read the data in range: start...end-1 */ + + /* + * XXX: If we know any of these reads are beyond the + * current EOF (what np->n_size was before we possibly + * just modified it above), we could short-circuit the + * reads and just zero buffer. No need to make a trip + * across the network to read nothing. + */ + + /* first, check for dirty pages in between */ + /* if there are, we'll have to do two reads because */ + /* we don't want to overwrite the dirty pages. */ + for (dirtypg=start/PAGE_SIZE; dirtypg <= (end-1)/PAGE_SIZE; dirtypg++) + if (NBPGDIRTY(bp,dirtypg)) + break; + + /* if start is at beginning of page, try */ + /* to get any preceeding pages as well. */ + if (!(start & PAGE_MASK)) { + /* stop at next dirty/valid page or start of block */ + for (; start > 0; start-=PAGE_SIZE) + if (NBPGVALID(bp,((start-1)/PAGE_SIZE))) + break; + } + + NFS_BUF_MAP(bp); + /* setup uio for read(s) */ + boff = NBOFF(bp); auio.uio_iov = &iov; auio.uio_iovcnt = 1; - auio.uio_offset = start; - auio.uio_resid = end - start; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_procp = p; - iov.iov_base = bp->b_data + (start - boff); - iov.iov_len = end - start; + + if (dirtypg <= (end-1)/PAGE_SIZE) { + /* there's a dirty page in the way, so just do two reads */ + /* we'll read the preceding data here */ + auio.uio_offset = boff + start; + auio.uio_resid = iov.iov_len = on - start; + iov.iov_base = bp->nb_data + start; + error = nfs_readrpc(vp, &auio, cred); + if (error) { + bp->nb_error = error; + SET(bp->nb_flags, NB_ERROR); + printf("nfs_write: readrpc %d", error); + } + if (auio.uio_resid > 0) { + FSDBG(516, bp, iov.iov_base - bp->nb_data, auio.uio_resid, 0xd00dee01); + bzero(iov.iov_base, auio.uio_resid); + } + /* update validoff/validend if necessary */ + if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) + bp->nb_validoff = start; + if ((bp->nb_validend < 0) || (bp->nb_validend < on)) + bp->nb_validend = on; + if (np->n_size > boff + bp->nb_validend) + bp->nb_validend = min(np->n_size - (boff + start), biosize); + /* validate any pages before the write offset */ + for (; start < on/PAGE_SIZE; start+=PAGE_SIZE) + NBPGVALID_SET(bp, start/PAGE_SIZE); + /* adjust start to read any trailing data */ + start = on+n; + } + + /* if end is at end of page, try to */ + /* get any following pages as well. */ + if (!(end & PAGE_MASK)) { + /* stop at next valid page or end of block */ + for (; end < bufsize; end+=PAGE_SIZE) + if (NBPGVALID(bp,end/PAGE_SIZE)) + break; + } + + /* now we'll read the (rest of the) data */ + auio.uio_offset = boff + start; + auio.uio_resid = iov.iov_len = end - start; + iov.iov_base = bp->nb_data + start; error = nfs_readrpc(vp, &auio, cred); - /* - * If we couldn't read, do not do a VOP_BWRITE - * as originally coded. That could also error - * and looping back to "again" as it was doing - * could have us stuck trying to write same buf - * again. nfs_write, will get the entire region - * if nfs_readrpc succeeded. If unsuccessful - * we should just error out. Errors like ESTALE - * would keep us looping rather than transient - * errors justifying a retry. We can return here - * instead of altering dirty region later. We - * did not write old dirty region at this point. - */ if (error) { - bp->b_error = error; - SET(bp->b_flags, B_ERROR); - printf("nfs_write: readrpc2 %d", error); - brelse(bp); - return (error); + bp->nb_error = error; + SET(bp->nb_flags, NB_ERROR); + printf("nfs_write: readrpc %d", error); } - /* - * The read worked. - * If there was a short read, just zero fill. - */ - if (auio.uio_resid > 0) + if (auio.uio_resid > 0) { + FSDBG(516, bp, iov.iov_base - bp->nb_data, auio.uio_resid, 0xd00dee02); bzero(iov.iov_base, auio.uio_resid); - if (on > bp->b_dirtyend) - bp->b_validend = on; - else - bp->b_validoff = on + n; + } + /* update validoff/validend if necessary */ + if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) + bp->nb_validoff = start; + if ((bp->nb_validend < 0) || (bp->nb_validend < end)) + bp->nb_validend = end; + if (np->n_size > boff + bp->nb_validend) + bp->nb_validend = min(np->n_size - (boff + start), biosize); + /* validate any pages before the write offset's page */ + for (; start < trunc_page_32(on); start+=PAGE_SIZE) + NBPGVALID_SET(bp, start/PAGE_SIZE); + /* validate any pages after the range of pages being written to */ + for (; (end - 1) > round_page_32(on+n-1); end-=PAGE_SIZE) + NBPGVALID_SET(bp, (end-1)/PAGE_SIZE); + /* Note: pages being written to will be validated when written */ } - /* - * We now have a valid region which extends up to the - * dirty region which we want. - */ - if (on > bp->b_dirtyend) - bp->b_dirtyend = on; - else - bp->b_dirtyoff = on + n; } - if (ISSET(bp->b_flags, B_ERROR)) { - error = bp->b_error; - brelse(bp); + + if (ISSET(bp->nb_flags, NB_ERROR)) { + error = bp->nb_error; + nfs_buf_release(bp); + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, error); return (error); } - /* - * NFS has embedded ucred so crhold() risks zone corruption - */ - if (bp->b_wcred == NOCRED) - bp->b_wcred = crdup(cred); + np->n_flag |= NMODIFIED; /* * Check for valid write lease and get one as required. - * In case getblk() and/or bwrite() delayed us. + * In case nfs_buf_get() and/or nfs_buf_write() delayed us. */ if ((nmp->nm_flag & NFSMNT_NQNFS) && NQNFS_CKINVALID(vp, np, ND_WRITE)) { @@ -767,124 +1960,222 @@ again: error = nqnfs_getlease(vp, ND_WRITE, cred, p); } while (error == NQNFS_EXPIRED); if (error) { - brelse(bp); + nfs_buf_release(bp); + FSDBG_BOT(515, vp, uio->uio_offset, 0x11220001, error); return (error); } if (np->n_lrev != np->n_brev || (np->n_flag & NQNFSNONCACHE)) { - brelse(bp); + nfs_buf_release(bp); error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); - if (error) + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, 0x11220002, error); return (error); + } np->n_brev = np->n_lrev; goto again; } } - error = uiomove((char *)bp->b_data + on, n, uio); + NFS_BUF_MAP(bp); + error = uiomove((char *)bp->nb_data + on, n, uio); if (error) { - SET(bp->b_flags, B_ERROR); - brelse(bp); + SET(bp->nb_flags, NB_ERROR); + nfs_buf_release(bp); + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, error); return (error); } - if (bp->b_dirtyend > 0) { - bp->b_dirtyoff = min(on, bp->b_dirtyoff); - bp->b_dirtyend = max((on + n), bp->b_dirtyend); + + /* validate any pages written to */ + start = on & ~PAGE_MASK; + for (; start < on+n; start += PAGE_SIZE) { + NBPGVALID_SET(bp, start/PAGE_SIZE); + /* + * This may seem a little weird, but we don't actually set the + * dirty bits for writes. This is because we keep the dirty range + * in the nb_dirtyoff/nb_dirtyend fields. Also, particularly for + * delayed writes, when we give the pages back to the VM we don't + * want to keep them marked dirty, because when we later write the + * buffer we won't be able to tell which pages were written dirty + * and which pages were mmapped and dirtied. + */ + } + if (bp->nb_dirtyend > 0) { + bp->nb_dirtyoff = min(on, bp->nb_dirtyoff); + bp->nb_dirtyend = max((on + n), bp->nb_dirtyend); } else { - bp->b_dirtyoff = on; - bp->b_dirtyend = on + n; + bp->nb_dirtyoff = on; + bp->nb_dirtyend = on + n; } - if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff || - bp->b_validoff > bp->b_dirtyend) { - bp->b_validoff = bp->b_dirtyoff; - bp->b_validend = bp->b_dirtyend; + if (bp->nb_validend <= 0 || bp->nb_validend < bp->nb_dirtyoff || + bp->nb_validoff > bp->nb_dirtyend) { + bp->nb_validoff = bp->nb_dirtyoff; + bp->nb_validend = bp->nb_dirtyend; } else { - bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); - bp->b_validend = max(bp->b_validend, bp->b_dirtyend); + bp->nb_validoff = min(bp->nb_validoff, bp->nb_dirtyoff); + bp->nb_validend = max(bp->nb_validend, bp->nb_dirtyend); } + if (!ISSET(bp->nb_flags, NB_CACHE)) + nfs_buf_normalize_valid_range(np, bp); /* * Since this block is being modified, it must be written * again and not just committed. */ - CLR(bp->b_flags, B_NEEDCOMMIT); + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { + np->n_needcommitcnt--; + CHECK_NEEDCOMMITCNT(np); + } + CLR(bp->nb_flags, NB_NEEDCOMMIT); - /* - * If the lease is non-cachable or IO_SYNC do bwrite(). - */ - if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) { - bp->b_proc = p; - error = VOP_BWRITE(bp); - if (error) + if ((np->n_flag & NQNFSNONCACHE) || + (ioflag & IO_SYNC) || (vp->v_flag & VNOCACHE_DATA)) { + bp->nb_proc = p; + error = nfs_buf_write(bp); + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, + uio->uio_resid, error); return (error); + } if (np->n_flag & NQNFSNONCACHE) { error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); - if (error) + if (error) { + FSDBG_BOT(515, vp, uio->uio_offset, + uio->uio_resid, error); return (error); + } } - } else if ((n + on) == biosize && - (nmp->nm_flag & NFSMNT_NQNFS) == 0) { - bp->b_proc = (struct proc *)0; - SET(bp->b_flags, B_ASYNC); - (void)nfs_writebp(bp, 0); + } else if ((n + on) == biosize && (nmp->nm_flag & NFSMNT_NQNFS) == 0) { + bp->nb_proc = (struct proc *)0; + SET(bp->nb_flags, NB_ASYNC); + nfs_buf_write(bp); } else - bdwrite(bp); + nfs_buf_write_delayed(bp); + + if (np->n_needcommitcnt > (nbuf/16)) + nfs_flushcommits(vp, p); + } while (uio->uio_resid > 0 && n > 0); + + FSDBG_BOT(515, vp, uio->uio_offset, uio->uio_resid, 0); return (0); } - /* - * Get an nfs cache block. - * Allocate a new one if the block isn't currently in the cache - * and return the block marked busy. If the calling process is - * interrupted by a signal for an interruptible mount point, return - * NULL. + * Flush out and invalidate all buffers associated with a vnode. + * Called with the underlying object locked. */ -static struct buf * -nfs_getcacheblk(vp, bn, size, p, operation) - struct vnode *vp; - daddr_t bn; - int size; +static int +nfs_vinvalbuf_internal(vp, flags, cred, p, slpflag, slptimeo) + register struct vnode *vp; + int flags; + struct ucred *cred; struct proc *p; - int operation; /* defined in sys/buf.h */ + int slpflag, slptimeo; { - register struct buf *bp; - struct nfsmount *nmp = VFSTONFS(vp->v_mount); - /*due to getblk/vm interractions, use vm page size or less values */ - int biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); - - if (nbdwrite > ((nbuf/4)*3) && operation == BLK_WRITE) { -#define __BUFFERS_RECLAIMED 2 - struct buf *tbp[__BUFFERS_RECLAIMED]; - int i; - - /* too many delayed writes, try to free up some buffers */ - for (i = 0; i < __BUFFERS_RECLAIMED; i++) - tbp[i] = geteblk(512); - - /* Yield to IO thread */ - (void)tsleep((caddr_t)&nbdwrite, PCATCH, "nbdwrite", 1); + struct nfsbuf *bp; + struct nfsbuf *nbp, *blist; + int s, error = 0; + struct nfsnode *np = VTONFS(vp); - for (i = (__BUFFERS_RECLAIMED - 1); i >= 0; i--) - brelse(tbp[i]); + if (flags & V_SAVE) { + if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) + return (error); + if (np->n_dirtyblkhd.lh_first) + panic("nfs_vinvalbuf: dirty bufs (vp 0x%x, bp 0x%x)", + vp, np->n_dirtyblkhd.lh_first); } - if (nmp->nm_flag & NFSMNT_INT) { - bp = getblk(vp, bn, size, PCATCH, 0, operation); - while (bp == (struct buf *)0) { - if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) - return ((struct buf *)0); - bp = getblk(vp, bn, size, 0, 2 * hz, operation); - } - } else - bp = getblk(vp, bn, size, 0, 0, operation); - - if( vp->v_type == VREG) - bp->b_blkno = ((off_t)bn * biosize) / DEV_BSIZE; + for (;;) { + blist = np->n_cleanblkhd.lh_first; + if (!blist) + blist = np->n_dirtyblkhd.lh_first; + if (!blist) + break; - return (bp); + for (bp = blist; bp; bp = nbp) { + nbp = bp->nb_vnbufs.le_next; + s = splbio(); + if (ISSET(bp->nb_flags, NB_BUSY)) { + SET(bp->nb_flags, NB_WANTED); + FSDBG_TOP(556, vp, bp, NBOFF(bp), bp->nb_flags); + error = tsleep((caddr_t)bp, + slpflag | (PRIBIO + 1), "nfs_vinvalbuf", + slptimeo); + FSDBG_BOT(556, vp, bp, NBOFF(bp), bp->nb_flags); + splx(s); + if (error) { + FSDBG(554, vp, bp, -1, error); + return (error); + } + break; + } + FSDBG(554, vp, bp, NBOFF(bp), bp->nb_flags); + nfs_buf_remfree(bp); + SET(bp->nb_flags, NB_BUSY); + splx(s); + if ((flags & V_SAVE) && UBCINFOEXISTS(vp) && (NBOFF(bp) < np->n_size)) { + /* XXX extra paranoia: make sure we're not */ + /* somehow leaving any dirty data around */ + int mustwrite = 0; + int end = (NBOFF(bp) + bp->nb_bufsize >= np->n_size) ? + bp->nb_bufsize : (np->n_size - NBOFF(bp)); + if (!ISSET(bp->nb_flags, NB_PAGELIST)) { + error = nfs_buf_upl_setup(bp); + if (error == EINVAL) { + /* vm object must no longer exist */ + /* hopefully we don't need to do */ + /* anything for this buffer */ + } else if (error) + printf("nfs_vinvalbuf: upl setup failed %d\n", + error); + bp->nb_valid = bp->nb_dirty = 0; + } + nfs_buf_upl_check(bp); + /* check for any dirty data before the EOF */ + if (bp->nb_dirtyend && bp->nb_dirtyoff < end) { + /* clip dirty range to EOF */ + if (bp->nb_dirtyend > end) + bp->nb_dirtyend = end; + mustwrite++; + } + bp->nb_dirty &= (1 << (round_page_32(end)/PAGE_SIZE)) - 1; + if (bp->nb_dirty) + mustwrite++; + if (mustwrite) { + FSDBG(554, vp, bp, 0xd00dee, bp->nb_flags); + if (!ISSET(bp->nb_flags, NB_PAGELIST)) + panic("nfs_vinvalbuf: dirty buffer without upl"); + /* gotta write out dirty data before invalidating */ + /* (NB_STABLE indicates that data writes should be FILESYNC) */ + /* (NB_NOCACHE indicates buffer should be discarded) */ + CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL | NB_ASYNC)); + SET(bp->nb_flags, NB_STABLE | NB_NOCACHE); + /* + * NFS has embedded ucred so crhold() risks zone corruption + */ + if (bp->nb_wcred == NOCRED) + bp->nb_wcred = crdup(cred); + error = nfs_buf_write(bp); + // Note: bp has been released + if (error) { + FSDBG(554, bp, 0xd00dee, 0xbad, error); + np->n_error = error; + np->n_flag |= NWRITEERR; + error = 0; + } + break; + } + } + SET(bp->nb_flags, NB_INVAL); + nfs_buf_release(bp); + } + } + if (np->n_dirtyblkhd.lh_first || np->n_cleanblkhd.lh_first) + panic("nfs_vinvalbuf: flush failed"); + return (0); } + /* * Flush and invalidate all dirty buffers. If another process is already * doing the flush, just wait for completion. @@ -902,7 +2193,9 @@ nfs_vinvalbuf(vp, flags, cred, p, intrflg) int error = 0, slpflag, slptimeo; int didhold = 0; - if ((nmp->nm_flag & NFSMNT_INT) == 0) + FSDBG_TOP(554, vp, flags, intrflg, 0); + + if (nmp && ((nmp->nm_flag & NFSMNT_INT) == 0)) intrflg = 0; if (intrflg) { slpflag = PCATCH; @@ -916,36 +2209,33 @@ nfs_vinvalbuf(vp, flags, cred, p, intrflg) */ while (np->n_flag & NFLUSHINPROG) { np->n_flag |= NFLUSHWANT; - error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval", - slptimeo); - if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) - return (EINTR); + FSDBG_TOP(555, vp, flags, intrflg, np->n_flag); + error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval", slptimeo); + FSDBG_BOT(555, vp, flags, intrflg, np->n_flag); + if (error && (error = nfs_sigintr(VFSTONFS(vp->v_mount), NULL, p))) { + FSDBG_BOT(554, vp, flags, intrflg, error); + return (error); + } } /* * Now, flush as required. */ np->n_flag |= NFLUSHINPROG; - error = vinvalbuf(vp, flags, cred, p, slpflag, 0); + error = nfs_vinvalbuf_internal(vp, flags, cred, p, slpflag, 0); while (error) { - /* we seem to be stuck in a loop here if the thread got aborted. - * nfs_flush will return EINTR. Not sure if that will cause - * other consequences due to EINTR having other meanings in NFS - * To handle, no dirty pages, it seems safe to just return from - * here. But if we did have dirty pages, how would we get them - * written out if thread was aborted? Some other strategy is - * necessary. -- EKN - */ - if ((intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) || - (error == EINTR && current_thread_aborted())) { + FSDBG(554, vp, 0, 0, error); + error = nfs_sigintr(VFSTONFS(vp->v_mount), NULL, p); + if (error) { np->n_flag &= ~NFLUSHINPROG; if (np->n_flag & NFLUSHWANT) { np->n_flag &= ~NFLUSHWANT; wakeup((caddr_t)&np->n_flag); } - return (EINTR); + FSDBG_BOT(554, vp, flags, intrflg, error); + return (error); } - error = vinvalbuf(vp, flags, cred, p, 0, slptimeo); + error = nfs_vinvalbuf_internal(vp, flags, cred, p, 0, slptimeo); } np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); if (np->n_flag & NFLUSHWANT) { @@ -954,9 +2244,12 @@ nfs_vinvalbuf(vp, flags, cred, p, intrflg) } didhold = ubc_hold(vp); if (didhold) { - (void) ubc_clean(vp, 1); /* get the pages out of vm also */ + int rv = ubc_clean(vp, 1); /* get the pages out of vm also */ + if (!rv) + panic("nfs_vinvalbuf(): ubc_clean failed!"); ubc_rele(vp); } + FSDBG_BOT(554, vp, flags, intrflg, 0); return (0); } @@ -967,7 +2260,7 @@ nfs_vinvalbuf(vp, flags, cred, p, intrflg) */ int nfs_asyncio(bp, cred) - register struct buf *bp; + struct nfsbuf *bp; struct ucred *cred; { struct nfsmount *nmp; @@ -975,17 +2268,23 @@ nfs_asyncio(bp, cred) int gotiod; int slpflag = 0; int slptimeo = 0; - int error; + int error, error2; if (nfs_numasync == 0) return (EIO); - - nmp = VFSTONFS(bp->b_vp->v_mount); + + FSDBG_TOP(552, bp, bp ? NBOFF(bp) : 0, bp ? bp->nb_flags : 0, 0); + + nmp = ((bp != NULL) ? VFSTONFS(bp->nb_vp->v_mount) : NULL); again: - if (nmp->nm_flag & NFSMNT_INT) + if (nmp && nmp->nm_flag & NFSMNT_INT) slpflag = PCATCH; gotiod = FALSE; + /* no nfsbuf means tell nfsiod to process delwri list */ + if (!bp) + nfs_ioddelwri = 1; + /* * Find a free iod to process this request. */ @@ -1000,12 +2299,17 @@ again: i, nmp)); nfs_iodwant[i] = (struct proc *)0; nfs_iodmount[i] = nmp; - nmp->nm_bufqiods++; + if (nmp) + nmp->nm_bufqiods++; wakeup((caddr_t)&nfs_iodwant[i]); gotiod = TRUE; break; } + /* if we're just poking the delwri list, we're done */ + if (!bp) + return (0); + /* * If none are free, we may already have an iod working on this mount * point. If so, it will process our request. @@ -1023,19 +2327,31 @@ again: * If we have an iod which can process the request, then queue * the buffer. */ + FSDBG(552, bp, gotiod, i, nmp->nm_bufqiods); if (gotiod) { /* * Ensure that the queue never grows too large. */ while (nmp->nm_bufqlen >= 2*nfs_numasync) { + if (ISSET(bp->nb_flags, NB_IOD)) { + /* An nfsiod is attempting this async operation so */ + /* we must not fall asleep on the bufq because we */ + /* could be waiting on ourself. Just return error */ + /* and we'll do this operation syncrhonously. */ + goto out; + } + FSDBG(552, bp, nmp->nm_bufqlen, 2*nfs_numasync, -1); NFS_DPF(ASYNCIO, ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); nmp->nm_bufqwant = TRUE; error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO, "nfsaio", slptimeo); if (error) { - if (nfs_sigintr(nmp, NULL, bp->b_proc)) - return (EINTR); + error2 = nfs_sigintr(nmp, NULL, bp->nb_proc); + if (error2) { + FSDBG_BOT(552, bp, NBOFF(bp), bp->nb_flags, error2); + return (error2); + } if (slpflag == PCATCH) { slpflag = 0; slptimeo = 2 * hz; @@ -1052,35 +2368,38 @@ again: } } - if (ISSET(bp->b_flags, B_READ)) { - if (bp->b_rcred == NOCRED && cred != NOCRED) { + if (ISSET(bp->nb_flags, NB_READ)) { + if (bp->nb_rcred == NOCRED && cred != NOCRED) { /* * NFS has embedded ucred. * Can not crhold() here as that causes zone corruption */ - bp->b_rcred = crdup(cred); + bp->nb_rcred = crdup(cred); } } else { - SET(bp->b_flags, B_WRITEINPROG); - if (bp->b_wcred == NOCRED && cred != NOCRED) { + SET(bp->nb_flags, NB_WRITEINPROG); + if (bp->nb_wcred == NOCRED && cred != NOCRED) { /* * NFS has embedded ucred. * Can not crhold() here as that causes zone corruption */ - bp->b_wcred = crdup(cred); + bp->nb_wcred = crdup(cred); } } - TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); + TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, nb_free); nmp->nm_bufqlen++; + FSDBG_BOT(552, bp, NBOFF(bp), bp->nb_flags, 0); return (0); } +out: /* * All the iods are busy on other mounts, so return EIO to * force the caller to process the i/o synchronously. */ NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); + FSDBG_BOT(552, bp, NBOFF(bp), bp->nb_flags, EIO); return (EIO); } @@ -1090,7 +2409,7 @@ again: */ int nfs_doio(bp, cr, p) - register struct buf *bp; + struct nfsbuf *bp; struct ucred *cr; struct proc *p; { @@ -1102,7 +2421,7 @@ nfs_doio(bp, cr, p) struct uio uio; struct iovec io; - vp = bp->b_vp; + vp = bp->nb_vp; np = VTONFS(vp); nmp = VFSTONFS(vp->v_mount); uiop = &uio; @@ -1111,66 +2430,34 @@ nfs_doio(bp, cr, p) uiop->uio_segflg = UIO_SYSSPACE; uiop->uio_procp = p; - /* - * With UBC, getblk() can return a buf with B_DONE set. - * This indicates that the VM has valid data for that page. - * NFS being stateless, this case poses a problem. - * By definition, the NFS server should always be consulted - * for the data in that page. - * So we choose to clear the B_DONE and to do the IO. - * - * XXX revisit this if there is a performance issue. - * XXX In that case, we could play the attribute cache games ... + /* + * we've decided to perform I/O for this block, + * so we couldn't possibly NB_DONE. So, clear it. */ - if (ISSET(bp->b_flags, B_DONE)) { - if (!ISSET(bp->b_flags, B_ASYNC)) + if (ISSET(bp->nb_flags, NB_DONE)) { + if (!ISSET(bp->nb_flags, NB_ASYNC)) panic("nfs_doio: done and not async"); - CLR(bp->b_flags, B_DONE); + CLR(bp->nb_flags, NB_DONE); } - FSDBG_TOP(256, np->n_size, bp->b_blkno * DEV_BSIZE, bp->b_bcount, - bp->b_flags); - FSDBG(257, bp->b_validoff, bp->b_validend, bp->b_dirtyoff, - bp->b_dirtyend); - /* - * Historically, paging was done with physio, but no more. - */ - if (ISSET(bp->b_flags, B_PHYS)) { - /* - * ...though reading /dev/drum still gets us here. - */ - io.iov_len = uiop->uio_resid = bp->b_bcount; - /* mapping was done by vmapbuf() */ - io.iov_base = bp->b_data; - uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE; - if (ISSET(bp->b_flags, B_READ)) { - uiop->uio_rw = UIO_READ; - nfsstats.read_physios++; - error = nfs_readrpc(vp, uiop, cr); - } else { - int com; - - iomode = NFSV3WRITE_DATASYNC; - uiop->uio_rw = UIO_WRITE; - nfsstats.write_physios++; - error = nfs_writerpc(vp, uiop, cr, &iomode, &com); - } - if (error) { - SET(bp->b_flags, B_ERROR); - bp->b_error = error; - } - } else if (ISSET(bp->b_flags, B_READ)) { - io.iov_len = uiop->uio_resid = bp->b_bcount; - io.iov_base = bp->b_data; + FSDBG_TOP(256, np->n_size, NBOFF(bp), bp->nb_bufsize, bp->nb_flags); + FSDBG(257, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, + bp->nb_dirtyend); + + if (ISSET(bp->nb_flags, NB_READ)) { + if (vp->v_type == VREG) + NFS_BUF_MAP(bp); + io.iov_len = uiop->uio_resid = bp->nb_bufsize; + io.iov_base = bp->nb_data; uiop->uio_rw = UIO_READ; switch (vp->v_type) { case VREG: - uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE; + uiop->uio_offset = NBOFF(bp); nfsstats.read_bios++; error = nfs_readrpc(vp, uiop, cr); - FSDBG(262, np->n_size, bp->b_blkno * DEV_BSIZE, - uiop->uio_resid, error); + FSDBG(262, np->n_size, NBOFF(bp), uiop->uio_resid, error); if (!error) { - bp->b_validoff = 0; + /* update valid range */ + bp->nb_validoff = 0; if (uiop->uio_resid) { /* * If len > 0, there is a hole in the file and @@ -1178,33 +2465,26 @@ nfs_doio(bp, cr, p) * the server yet. * Just zero fill the rest of the valid area. */ - diff = bp->b_bcount - uiop->uio_resid; - len = np->n_size - ((u_quad_t)bp->b_blkno * DEV_BSIZE + - diff); + diff = bp->nb_bufsize - uiop->uio_resid; + len = np->n_size - (NBOFF(bp) + diff); if (len > 0) { len = min(len, uiop->uio_resid); - bzero((char *)bp->b_data + diff, len); - bp->b_validend = diff + len; + bzero((char *)bp->nb_data + diff, len); + bp->nb_validend = diff + len; FSDBG(258, diff, len, 0, 1); } else - bp->b_validend = diff; + bp->nb_validend = diff; } else - bp->b_validend = bp->b_bcount; - - if (bp->b_validend < bp->b_bufsize) { - /* - * we're about to release a partial buffer after a - * read... the only way we should get here is if - * this buffer contains the EOF before releasing it, - * we'll zero out to the end of the buffer so that - * if a mmap of this page occurs, we'll see zero's - * even if a ftruncate extends the file in the - * meantime - */ - bzero((caddr_t)(bp->b_data + bp->b_validend), - bp->b_bufsize - bp->b_validend); - FSDBG(258, bp->b_validend, - bp->b_bufsize - bp->b_validend, 0, 2); + bp->nb_validend = bp->nb_bufsize; + bp->nb_valid = (1 << (round_page_32(bp->nb_validend)/PAGE_SIZE)) - 1; + if (bp->nb_validend & PAGE_MASK) { + /* valid range ends in the middle of a page so we */ + /* need to zero-fill any invalid data at the end */ + /* of the last page */ + bzero((caddr_t)(bp->nb_data + bp->nb_validend), + bp->nb_bufsize - bp->nb_validend); + FSDBG(258, bp->nb_validend, + bp->nb_bufsize - bp->nb_validend, 0, 2); } } if (p && (vp->v_flag & VTEXT) && @@ -1222,10 +2502,14 @@ nfs_doio(bp, cr, p) uiop->uio_offset = (off_t)0; nfsstats.readlink_bios++; error = nfs_readlinkrpc(vp, uiop, cr); + if (!error) { + bp->nb_validoff = 0; + bp->nb_validend = uiop->uio_offset; + } break; case VDIR: nfsstats.readdir_bios++; - uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; + uiop->uio_offset = NBOFF(bp); if (!(nmp->nm_flag & NFSMNT_NFSV3)) nmp->nm_flag &= ~NFSMNT_RDIRPLUS; /* dk@farm.org */ if (nmp->nm_flag & NFSMNT_RDIRPLUS) { @@ -1235,151 +2519,276 @@ nfs_doio(bp, cr, p) } if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) error = nfs_readdirrpc(vp, uiop, cr); + if (!error) { + bp->nb_validoff = 0; + bp->nb_validend = uiop->uio_offset - NBOFF(bp); + bp->nb_valid = (1 << (round_page_32(bp->nb_validend)/PAGE_SIZE)) - 1; + } break; default: printf("nfs_doio: type %x unexpected\n", vp->v_type); break; }; if (error) { - SET(bp->b_flags, B_ERROR); - bp->b_error = error; + SET(bp->nb_flags, NB_ERROR); + bp->nb_error = error; } + } else { + /* we're doing a write */ + int doff, dend = 0; + + /* We need to make sure the pages are locked before doing I/O. */ + if (!ISSET(bp->nb_flags, NB_META) && UBCISVALID(vp)) { + if (!ISSET(bp->nb_flags, NB_PAGELIST)) { + error = nfs_buf_upl_setup(bp); + if (error) { + printf("nfs_doio: upl create failed %d\n", error); + SET(bp->nb_flags, NB_ERROR); + bp->nb_error = EIO; + return (EIO); + } + nfs_buf_upl_check(bp); + } + } + + if (ISSET(bp->nb_flags, NB_WASDIRTY)) { + FSDBG(256, bp, NBOFF(bp), bp->nb_dirty, 0xd00dee); + /* + * There are pages marked dirty that need to be written out. + * + * We don't want to just combine the write range with the + * range of pages that are dirty because that could cause us + * to write data that wasn't actually written to. + * We also don't want to write data more than once. + * + * If the dirty range just needs to be committed, we do that. + * Otherwise, we write the dirty range and clear the dirty bits + * for any COMPLETE pages covered by that range. + * If there are dirty pages left after that, we write out the + * parts that we haven't written yet. + */ + } + /* - * mapped I/O may have altered any bytes, so we extend - * the dirty zone to the valid zone. For best performance - * a better solution would be to save & restore page dirty bits - * around the uiomove which brings write-data into the buffer. - * Then here we'd check if the page is dirty rather than WASMAPPED - * Also vnode_pager would change - if a page is clean it might - * still need to be written due to DELWRI. + * If NB_NEEDCOMMIT is set, a commit rpc may do the trick. If not + * an actual write will have to be done. + * If NB_WRITEINPROG is already set, then push it with a write anyhow. */ - if (UBCINFOEXISTS(vp) && ubc_issetflags(vp, UI_WASMAPPED)) { - bp->b_dirtyoff = min(bp->b_dirtyoff, bp->b_validoff); - bp->b_dirtyend = max(bp->b_dirtyend, bp->b_validend); + if ((bp->nb_flags & (NB_NEEDCOMMIT | NB_WRITEINPROG)) == NB_NEEDCOMMIT) { + doff = NBOFF(bp) + bp->nb_dirtyoff; + SET(bp->nb_flags, NB_WRITEINPROG); + error = nfs_commit(vp, doff, bp->nb_dirtyend - bp->nb_dirtyoff, + bp->nb_wcred, bp->nb_proc); + CLR(bp->nb_flags, NB_WRITEINPROG); + if (!error) { + bp->nb_dirtyoff = bp->nb_dirtyend = 0; + CLR(bp->nb_flags, NB_NEEDCOMMIT); + np->n_needcommitcnt--; + CHECK_NEEDCOMMITCNT(np); + } else if (error == NFSERR_STALEWRITEVERF) + nfs_clearcommit(vp->v_mount); } - if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) - bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; - - if (bp->b_dirtyend > bp->b_dirtyoff) { - io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff; - uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE + - bp->b_dirtyoff; - io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; - uiop->uio_rw = UIO_WRITE; - nfsstats.write_bios++; - if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == - B_ASYNC) + if (!error && bp->nb_dirtyend > 0) { + /* there's a dirty range that needs to be written out */ + u_int32_t pagemask; + int firstpg, lastpg; + + if (NBOFF(bp) + bp->nb_dirtyend > np->n_size) + bp->nb_dirtyend = np->n_size - NBOFF(bp); + + NFS_BUF_MAP(bp); + + doff = bp->nb_dirtyoff; + dend = bp->nb_dirtyend; + + /* if doff page is dirty, move doff to start of page */ + if (NBPGDIRTY(bp,doff/PAGE_SIZE)) + doff -= doff & PAGE_MASK; + /* try to expand write range to include preceding dirty pages */ + if (!(doff & PAGE_MASK)) + while (doff > 0 && NBPGDIRTY(bp,(doff-1)/PAGE_SIZE)) + doff -= PAGE_SIZE; + /* if dend page is dirty, move dend to start of next page */ + if ((dend & PAGE_MASK) && NBPGDIRTY(bp,dend/PAGE_SIZE)) + dend = round_page_32(dend); + /* try to expand write range to include trailing dirty pages */ + if (!(dend & PAGE_MASK)) + while (dend < bp->nb_bufsize && NBPGDIRTY(bp,dend/PAGE_SIZE)) + dend += PAGE_SIZE; + /* make sure to keep dend clipped to EOF */ + if (NBOFF(bp) + dend > np->n_size) + dend = np->n_size - NBOFF(bp); + /* calculate range of complete pages being written */ + firstpg = round_page_32(doff) / PAGE_SIZE; + lastpg = (trunc_page_32(dend) - 1)/ PAGE_SIZE; + /* calculate mask for that page range */ + pagemask = ((1 << (lastpg+1)) - 1) & ~((1 << firstpg) - 1); + + /* compare page mask to nb_dirty; if there are other dirty pages */ + /* then write FILESYNC; otherwise, write UNSTABLE if async and */ + /* not needcommit/nocache/call; otherwise write FILESYNC */ + if (bp->nb_dirty & ~pagemask) + iomode = NFSV3WRITE_FILESYNC; + else if ((bp->nb_flags & (NB_ASYNC | NB_NEEDCOMMIT | NB_NOCACHE | NB_STABLE)) == NB_ASYNC) iomode = NFSV3WRITE_UNSTABLE; else iomode = NFSV3WRITE_FILESYNC; - SET(bp->b_flags, B_WRITEINPROG); + + /* write the dirty range */ + io.iov_len = uiop->uio_resid = dend - doff; + uiop->uio_offset = NBOFF(bp) + doff; + io.iov_base = (char *)bp->nb_data + doff; + uiop->uio_rw = UIO_WRITE; + + nfsstats.write_bios++; + + SET(bp->nb_flags, NB_WRITEINPROG); error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit); - if (!error && iomode == NFSV3WRITE_UNSTABLE) - SET(bp->b_flags, B_NEEDCOMMIT); - else - CLR(bp->b_flags, B_NEEDCOMMIT); - CLR(bp->b_flags, B_WRITEINPROG); + if (must_commit) + nfs_clearcommit(vp->v_mount); + /* clear dirty bits for pages we've written */ + if (!error) + bp->nb_dirty &= ~pagemask; + /* set/clear needcommit flag */ + if (!error && iomode == NFSV3WRITE_UNSTABLE) { + if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) + np->n_needcommitcnt++; + SET(bp->nb_flags, NB_NEEDCOMMIT); + /* make sure nb_dirtyoff/nb_dirtyend reflect actual range written */ + bp->nb_dirtyoff = doff; + bp->nb_dirtyend = dend; + } else { + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { + np->n_needcommitcnt--; + CHECK_NEEDCOMMITCNT(np); + } + CLR(bp->nb_flags, NB_NEEDCOMMIT); + } + CLR(bp->nb_flags, NB_WRITEINPROG); /* - * For an interrupted write, the buffer is still valid - * and the write hasn't been pushed to the server yet, - * so we can't set B_ERROR and report the interruption - * by setting B_EINTR. For the B_ASYNC case, B_EINTR - * is not relevant, so the rpc attempt is essentially - * a noop. For the case of a V3 write rpc not being - * committed to stable storage, the block is still - * dirty and requires either a commit rpc or another - * write rpc with iomode == NFSV3WRITE_FILESYNC before - * the block is reused. This is indicated by setting - * the B_DELWRI and B_NEEDCOMMIT flags. + * For an interrupted write, the buffer is still valid and the write + * hasn't been pushed to the server yet, so we can't set NB_ERROR and + * report the interruption by setting NB_EINTR. For the NB_ASYNC case, + * NB_EINTR is not relevant. + * + * For the case of a V3 write rpc not being committed to stable + * storage, the block is still dirty and requires either a commit rpc + * or another write rpc with iomode == NFSV3WRITE_FILESYNC before the + * block is reused. This is indicated by setting the NB_DELWRI and + * NB_NEEDCOMMIT flags. */ - if (error == EINTR || (!error && bp->b_flags & B_NEEDCOMMIT)) { - int s; - - CLR(bp->b_flags, B_INVAL | B_NOCACHE); - if (!ISSET(bp->b_flags, B_DELWRI)) { - SET(bp->b_flags, B_DELWRI); - nbdwrite++; - } - FSDBG(261, bp->b_validoff, bp->b_validend, - bp->b_bufsize, bp->b_bcount); - /* - * Since for the B_ASYNC case, nfs_bwrite() has - * reassigned the buffer to the clean list, we have to - * reassign it back to the dirty one. Ugh. - */ - if (ISSET(bp->b_flags, B_ASYNC)) { - s = splbio(); - reassignbuf(bp, vp); - splx(s); - } else { - SET(bp->b_flags, B_EINTR); - } + if (error == EINTR || (!error && bp->nb_flags & NB_NEEDCOMMIT)) { + CLR(bp->nb_flags, NB_INVAL | NB_NOCACHE); + if (!ISSET(bp->nb_flags, NB_DELWRI)) { + SET(bp->nb_flags, NB_DELWRI); + nfs_nbdwrite++; + NFSBUFCNTCHK(); + } + FSDBG(261, bp->nb_validoff, bp->nb_validend, + bp->nb_bufsize, 0); + /* + * Since for the NB_ASYNC case, nfs_bwrite() has + * reassigned the buffer to the clean list, we have to + * reassign it back to the dirty one. Ugh. + */ + if (ISSET(bp->nb_flags, NB_ASYNC)) { + /* move to dirty list */ + int s = splbio(); + if (bp->nb_vnbufs.le_next != NFSNOLIST) + LIST_REMOVE(bp, nb_vnbufs); + LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); + splx(s); + } else { + SET(bp->nb_flags, NB_EINTR); + } } else { + /* either there's an error or we don't need to commit */ if (error) { - SET(bp->b_flags, B_ERROR); - bp->b_error = np->n_error = error; - np->n_flag |= NWRITEERR; - } - bp->b_dirtyoff = bp->b_dirtyend = 0; - - /* - * validoff and validend represent the real data present - * in this buffer if validoff is non-zero, than we have - * to invalidate the buffer and kill the page when - * biodone is called... the same is also true when - * validend doesn't extend all the way to the end of the - * buffer and validend doesn't equate to the current - * EOF... eventually we need to deal with this in a more - * humane way (like keeping the partial buffer without - * making it immediately available to the VM page cache) - */ - if (bp->b_validoff) - SET(bp->b_flags, B_INVAL); - else - if (bp->b_validend < bp->b_bufsize) { - if ((off_t)bp->b_blkno * DEV_BSIZE + - bp->b_validend == np->n_size) { - bzero((caddr_t)(bp->b_data + - bp->b_validend), - bp->b_bufsize - bp->b_validend); - FSDBG(259, bp->b_validend, - bp->b_bufsize - bp->b_validend, 0, - 0); - } else - SET(bp->b_flags, B_INVAL); + SET(bp->nb_flags, NB_ERROR); + bp->nb_error = np->n_error = error; + np->n_flag |= NWRITEERR; } + /* clear the dirty range */ + bp->nb_dirtyoff = bp->nb_dirtyend = 0; } + } + + if (!error && bp->nb_dirty) { + /* there are pages marked dirty that need to be written out */ + int pg, cnt, npages, off, len; + + nfsstats.write_bios++; - } else { - if (bp->b_validoff || - (bp->b_validend < bp->b_bufsize && - (off_t)bp->b_blkno * DEV_BSIZE + bp->b_validend != - np->n_size)) { - SET(bp->b_flags, B_INVAL); + NFS_BUF_MAP(bp); + + /* + * we do these writes synchronously because we can't really + * support the unstable/needommit method. We could write + * them unstable, clear the dirty bits, and then commit the + * whole block later, but if we need to rewrite the data, we + * won't have any idea which pages were written because that + * info can't be stored in the nb_dirtyoff/nb_dirtyend. We + * also can't leave the dirty bits set because then we wouldn't + * be able to tell if the pages were re-dirtied between the end + * of the write and the commit. + */ + iomode = NFSV3WRITE_FILESYNC; + uiop->uio_rw = UIO_WRITE; + + SET(bp->nb_flags, NB_WRITEINPROG); + npages = bp->nb_bufsize/PAGE_SIZE; + for (pg=0; pg < npages; pg++) { + if (!NBPGDIRTY(bp,pg)) + continue; + cnt = 1; + while (((pg+cnt) < npages) && NBPGDIRTY(bp,pg+cnt)) + cnt++; + /* write cnt pages starting with page pg */ + off = pg * PAGE_SIZE; + len = cnt * PAGE_SIZE; + + /* clip writes to EOF */ + if (NBOFF(bp) + off + len > np->n_size) + len -= (NBOFF(bp) + off + len) - np->n_size; + if (len > 0) { + io.iov_len = uiop->uio_resid = len; + uiop->uio_offset = NBOFF(bp) + off; + io.iov_base = (char *)bp->nb_data + off; + error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit); + if (must_commit) + nfs_clearcommit(vp->v_mount); + if (error) + break; + } + /* clear dirty bits */ + while (cnt--) { + bp->nb_dirty &= ~(1 << pg); + /* leave pg on last page */ + if (cnt) pg++; + } } - if (bp->b_flags & B_INVAL) { - FSDBG(260, bp->b_validoff, bp->b_validend, - bp->b_bufsize, bp->b_bcount); + if (!error) { + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { + np->n_needcommitcnt--; + CHECK_NEEDCOMMITCNT(np); + } + CLR(bp->nb_flags, NB_NEEDCOMMIT); } - bp->b_resid = 0; - biodone(bp); - FSDBG_BOT(256, bp->b_validoff, bp->b_validend, bp->b_bufsize, + CLR(bp->nb_flags, NB_WRITEINPROG); + FSDBG_BOT(256, bp->nb_validoff, bp->nb_validend, bp->nb_bufsize, np->n_size); - return (0); } - } - bp->b_resid = uiop->uio_resid; - if (must_commit) - nfs_clearcommit(vp->v_mount); - if (bp->b_flags & B_INVAL) { - FSDBG(260, bp->b_validoff, bp->b_validend, bp->b_bufsize, - bp->b_bcount); + if (error) { + SET(bp->nb_flags, NB_ERROR); + bp->nb_error = error; + } } - FSDBG_BOT(256, bp->b_validoff, bp->b_validend, bp->b_bcount, error); - biodone(bp); + FSDBG_BOT(256, bp->nb_validoff, bp->nb_validend, bp->nb_bufsize, error); + + nfs_buf_iodone(bp); return (error); } diff --git a/bsd/nfs/nfs_boot.c b/bsd/nfs/nfs_boot.c index 1a21bbee0..a0d9245a6 100644 --- a/bsd/nfs/nfs_boot.c +++ b/bsd/nfs/nfs_boot.c @@ -208,7 +208,7 @@ nfs_boot_init(nd, procp) if (netboot_iaddr(&my_ip) == FALSE) { printf("nfs_boot: networking is not initialized\n"); error = ENXIO; - goto failed; + goto failed_noswitch; } /* get the root path information */ @@ -289,9 +289,10 @@ nfs_boot_init(nd, procp) else { error = 0; } -#endif NO_MOUNT_PRIVATE - failed: +#endif /* NO_MOUNT_PRIVATE */ +failed: thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); +failed_noswitch: return (error); } @@ -328,7 +329,7 @@ nfs_boot_getfh(nd, procp, v3) goto failed; } } -#endif NO_MOUNT_PRIVATE +#endif /* NO_MOUNT_PRIVATE */ failed: thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (error); diff --git a/bsd/nfs/nfs_lock.c b/bsd/nfs/nfs_lock.c new file mode 100644 index 000000000..4edfce39a --- /dev/null +++ b/bsd/nfs/nfs_lock.c @@ -0,0 +1,512 @@ +/* + * Copyright (c) 2002-2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Berkeley Software Design Inc's name may not be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from BSDI nfs_lock.c,v 2.4 1998/12/14 23:49:56 jch Exp + */ + +#include +#include +#include +#include +#include /* for hz */ +#include +#include +#include +#include /* for hz */ /* Must come after sys/malloc.h */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define OFF_MAX QUAD_MAX + +uint64_t nfsadvlocks = 0; +struct timeval nfsadvlock_longest = {0, 0}; +struct timeval nfsadvlocks_time = {0, 0}; + +pid_t nfslockdpid = 0; +struct file *nfslockdfp = 0; +int nfslockdwaiting = 0; +int nfslockdfifowritten = 0; +int nfslockdfifolock = 0; +#define NFSLOCKDFIFOLOCK_LOCKED 1 +#define NFSLOCKDFIFOLOCK_WANT 2 + +/* + * XXX + * We have to let the process know if the call succeeded. I'm using an extra + * field in the uu_nlminfo field in the uthread structure, as it is already for + * lockd stuff. + */ + +/* + * nfs_advlock -- + * NFS advisory byte-level locks. + */ +int +nfs_dolock(struct vop_advlock_args *ap) +/* struct vop_advlock_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + caddr_t a_id; + int a_op; + struct flock *a_fl; + int a_flags; +}; */ +{ + LOCKD_MSG msg; + struct nameidata nd; + struct vnode *vp, *wvp; + struct nfsnode *np; + int error, error1; + struct flock *fl; + int fmode, ioflg; + struct proc *p; + struct uthread *ut; + struct timeval elapsed; + struct nfsmount *nmp; + struct vattr vattr; + off_t start, end; + + ut = get_bsdthread_info(current_act()); + p = current_proc(); + + vp = ap->a_vp; + fl = ap->a_fl; + np = VTONFS(vp); + + nmp = VFSTONFS(vp->v_mount); + if (!nmp) + return (ENXIO); + if (nmp->nm_flag & NFSMNT_NOLOCKS) + return (EOPNOTSUPP); + + /* + * The NLM protocol doesn't allow the server to return an error + * on ranges, so we do it. Pre LFS (Large File Summit) + * standards required EINVAL for the range errors. More recent + * standards use EOVERFLOW, but their EINVAL wording still + * encompasses these errors. + * Any code sensitive to this is either: + * 1) written pre-LFS and so can handle only EINVAL, or + * 2) written post-LFS and thus ought to be tolerant of pre-LFS + * implementations. + * Since returning EOVERFLOW certainly breaks 1), we return EINVAL. + */ + if (fl->l_whence != SEEK_END) { + if ((fl->l_whence != SEEK_CUR && fl->l_whence != SEEK_SET) || + fl->l_start < 0 || + (fl->l_len > 0 && fl->l_len - 1 > OFF_MAX - fl->l_start) || + (fl->l_len < 0 && fl->l_start + fl->l_len < 0)) + return (EINVAL); + } + /* + * If daemon is running take a ref on its fifo + */ + if (!nfslockdfp || !(wvp = (struct vnode *)nfslockdfp->f_data)) { + if (!nfslockdwaiting) + return (EOPNOTSUPP); + /* + * Don't wake lock daemon if it hasn't been started yet and + * this is an unlock request (since we couldn't possibly + * actually have a lock on the file). This could be an + * uninformed unlock request due to closef()'s behavior of doing + * unlocks on all files if a process has had a lock on ANY file. + */ + if (!nfslockdfp && (fl->l_type == F_UNLCK)) + return (EINVAL); + /* wake up lock daemon */ + (void)wakeup((void *)&nfslockdwaiting); + /* wait on nfslockdfp for a while to allow daemon to start */ + tsleep((void *)&nfslockdfp, PCATCH | PUSER, "lockd", 60*hz); + /* check for nfslockdfp and f_data */ + if (!nfslockdfp || !(wvp = (struct vnode *)nfslockdfp->f_data)) + return (EOPNOTSUPP); + } + VREF(wvp); + /* + * if there is no nfsowner table yet, allocate one. + */ + if (ut->uu_nlminfo == NULL) { + if (ap->a_op == F_UNLCK) { + vrele(wvp); + return (0); + } + MALLOC(ut->uu_nlminfo, struct nlminfo *, + sizeof(struct nlminfo), M_LOCKF, M_WAITOK | M_ZERO); + ut->uu_nlminfo->pid_start = p->p_stats->p_start; + } + /* + * Fill in the information structure. + */ + msg.lm_version = LOCKD_MSG_VERSION; + msg.lm_msg_ident.pid = p->p_pid; + msg.lm_msg_ident.ut = ut; + msg.lm_msg_ident.pid_start = ut->uu_nlminfo->pid_start; + msg.lm_msg_ident.msg_seq = ++(ut->uu_nlminfo->msg_seq); + + /* + * The NFS Lock Manager protocol doesn't directly handle + * negative lengths or SEEK_END, so we need to normalize + * things here where we have all the info. + * (Note: SEEK_CUR is already adjusted for at this point) + */ + /* Convert the flock structure into a start and end. */ + switch (fl->l_whence) { + case SEEK_SET: + case SEEK_CUR: + /* + * Caller is responsible for adding any necessary offset + * to fl->l_start when SEEK_CUR is used. + */ + start = fl->l_start; + break; + case SEEK_END: + /* need to flush, and refetch attributes to make */ + /* sure we have the correct end of file offset */ + if (np->n_flag & NMODIFIED) { + np->n_attrstamp = 0; + error = nfs_vinvalbuf(vp, V_SAVE, p->p_ucred, p, 1); + if (error) { + vrele(wvp); + return (error); + } + } + np->n_attrstamp = 0; + error = VOP_GETATTR(vp, &vattr, p->p_ucred, p); + if (error) { + vrele(wvp); + return (error); + } + start = np->n_size + fl->l_start; + break; + default: + vrele(wvp); + return (EINVAL); + } + if (fl->l_len == 0) + end = -1; + else if (fl->l_len > 0) + end = start + fl->l_len - 1; + else { /* l_len is negative */ + end = start - 1; + start += fl->l_len; + } + if (start < 0) { + vrele(wvp); + return (EINVAL); + } + + msg.lm_fl = *fl; + msg.lm_fl.l_start = start; + if (end != -1) + msg.lm_fl.l_len = end - start + 1; + + msg.lm_wait = ap->a_flags & F_WAIT; + msg.lm_getlk = ap->a_op == F_GETLK; + + nmp = VFSTONFS(vp->v_mount); + if (!nmp) { + vrele(wvp); + return (ENXIO); + } + + bcopy(mtod(nmp->nm_nam, struct sockaddr *), &msg.lm_addr, + min(sizeof msg.lm_addr, + mtod(nmp->nm_nam, struct sockaddr *)->sa_len)); + msg.lm_fh_len = NFS_ISV3(vp) ? VTONFS(vp)->n_fhsize : NFSX_V2FH; + bcopy(VTONFS(vp)->n_fhp, msg.lm_fh, msg.lm_fh_len); + msg.lm_nfsv3 = NFS_ISV3(vp); + cru2x(p->p_ucred, &msg.lm_cred); + + microuptime(&ut->uu_nlminfo->nlm_lockstart); + + fmode = FFLAGS(O_WRONLY); + if ((error = VOP_OPEN(wvp, fmode, kernproc->p_ucred, p))) { + vrele(wvp); + return (error); + } + ++wvp->v_writecount; + +#define IO_NOMACCHECK 0; + ioflg = IO_UNIT | IO_NOMACCHECK; + for (;;) { + VOP_LEASE(wvp, p, kernproc->p_ucred, LEASE_WRITE); + + while (nfslockdfifolock & NFSLOCKDFIFOLOCK_LOCKED) { + nfslockdfifolock |= NFSLOCKDFIFOLOCK_WANT; + if (tsleep((void *)&nfslockdfifolock, PCATCH | PUSER, "lockdfifo", 20*hz)) + break; + } + nfslockdfifolock |= NFSLOCKDFIFOLOCK_LOCKED; + + error = vn_rdwr(UIO_WRITE, wvp, (caddr_t)&msg, sizeof(msg), 0, + UIO_SYSSPACE, ioflg, kernproc->p_ucred, NULL, p); + + nfslockdfifowritten = 1; + + nfslockdfifolock &= ~NFSLOCKDFIFOLOCK_LOCKED; + if (nfslockdfifolock & NFSLOCKDFIFOLOCK_WANT) { + nfslockdfifolock &= ~NFSLOCKDFIFOLOCK_WANT; + wakeup((void *)&nfslockdfifolock); + } + /* wake up lock daemon */ + if (nfslockdwaiting) + (void)wakeup((void *)&nfslockdwaiting); + + if (error && (((ioflg & IO_NDELAY) == 0) || error != EAGAIN)) { + break; + } + /* + * If we're locking a file, wait for an answer. Unlocks succeed + * immediately. + */ + if (fl->l_type == F_UNLCK) + /* + * XXX this isn't exactly correct. The client side + * needs to continue sending it's unlock until + * it gets a response back. + */ + break; + + /* + * retry after 20 seconds if we haven't gotten a response yet. + * This number was picked out of thin air... but is longer + * then even a reasonably loaded system should take (at least + * on a local network). XXX Probably should use a back-off + * scheme. + */ + if ((error = tsleep((void *)ut->uu_nlminfo, + PCATCH | PUSER, "lockd", 20*hz)) != 0) { + if (error == EWOULDBLOCK) { + /* + * We timed out, so we rewrite the request + * to the fifo, but only if it isn't already + * full. + */ + ioflg |= IO_NDELAY; + continue; + } + + break; + } + + if (msg.lm_getlk && ut->uu_nlminfo->retcode == 0) { + if (ut->uu_nlminfo->set_getlk) { + fl->l_pid = ut->uu_nlminfo->getlk_pid; + fl->l_start = ut->uu_nlminfo->getlk_start; + fl->l_len = ut->uu_nlminfo->getlk_len; + fl->l_whence = SEEK_SET; + } else { + fl->l_type = F_UNLCK; + } + } + error = ut->uu_nlminfo->retcode; + break; + } + + /* XXX stats */ + nfsadvlocks++; + microuptime(&elapsed); + timevalsub(&elapsed, &ut->uu_nlminfo->nlm_lockstart); + if (timevalcmp(&elapsed, &nfsadvlock_longest, >)) + nfsadvlock_longest = elapsed; + timevaladd(&nfsadvlocks_time, &elapsed); + timerclear(&ut->uu_nlminfo->nlm_lockstart); + + error1 = vn_close(wvp, FWRITE, kernproc->p_ucred, p); + /* prefer any previous 'error' to our vn_close 'error1'. */ + return (error != 0 ? error : error1); +} + +/* + * nfslockdans -- + * NFS advisory byte-level locks answer from the lock daemon. + */ +int +nfslockdans(struct proc *p, struct lockd_ans *ansp) +{ + struct proc *targetp; + struct uthread *targetut, *uth; + int error; + + /* + * Let root, or someone who once was root (lockd generally + * switches to the daemon uid once it is done setting up) make + * this call. + * + * XXX This authorization check is probably not right. + */ + if ((error = suser(p->p_ucred, &p->p_acflag)) != 0 && + p->p_cred->p_svuid != 0) + return (error); + + /* the version should match, or we're out of sync */ + if (ansp->la_vers != LOCKD_ANS_VERSION) + return (EINVAL); + + /* Find the process & thread */ + if ((targetp = pfind(ansp->la_msg_ident.pid)) == NULL) + return (ESRCH); + targetut = ansp->la_msg_ident.ut; + TAILQ_FOREACH(uth, &targetp->p_uthlist, uu_list) { + if (uth == targetut) + break; + } + /* + * Verify the pid hasn't been reused (if we can), and it isn't waiting + * for an answer from a more recent request. We return an EPIPE if + * the match fails, because we've already used ESRCH above, and this + * is sort of like writing on a pipe after the reader has closed it. + * If only the seq# is off, don't return an error just return. It could + * just be a response to a retransmitted request. + */ + if (uth == NULL || uth != targetut || targetut->uu_nlminfo == NULL) + return (EPIPE); + if (ansp->la_msg_ident.msg_seq != -1) { + if (timevalcmp(&targetut->uu_nlminfo->pid_start, + &ansp->la_msg_ident.pid_start, !=)) + return (EPIPE); + if (targetut->uu_nlminfo->msg_seq != ansp->la_msg_ident.msg_seq) + return (0); + } + + /* Found the thread, so set its return errno and wake it up. */ + + targetut->uu_nlminfo->retcode = ansp->la_errno; + targetut->uu_nlminfo->set_getlk = ansp->la_getlk_set; + targetut->uu_nlminfo->getlk_pid = ansp->la_getlk_pid; + targetut->uu_nlminfo->getlk_start = ansp->la_getlk_start; + targetut->uu_nlminfo->getlk_len = ansp->la_getlk_len; + + (void)wakeup((void *)targetut->uu_nlminfo); + + return (0); +} + +/* + * nfslockdfd -- + * NFS advisory byte-level locks: fifo file# from the lock daemon. + */ +int +nfslockdfd(struct proc *p, int fd) +{ + int error; + struct file *fp, *ofp; + + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + if (fd < 0) { + fp = 0; + } else { + error = getvnode(p, fd, &fp); + if (error) + return (error); + (void)fref(fp); + } + ofp = nfslockdfp; + nfslockdfp = fp; + if (ofp) + (void)frele(ofp); + nfslockdpid = nfslockdfp ? p->p_pid : 0; + (void)wakeup((void *)&nfslockdfp); + return (0); +} + +/* + * nfslockdwait -- + * lock daemon waiting for lock request + */ +int +nfslockdwait(struct proc *p) +{ + int error; + struct file *fp, *ofp; + + if (p->p_pid != nfslockdpid) { + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + } + if (nfslockdwaiting) + return (EBUSY); + if (nfslockdfifowritten) { + nfslockdfifowritten = 0; + return (0); + } + + nfslockdwaiting = 1; + tsleep((void *)&nfslockdwaiting, PCATCH | PUSER, "lockd", 0); + nfslockdwaiting = 0; + + return (0); +} diff --git a/bsd/nfs/nfs_lock.h b/bsd/nfs/nfs_lock.h new file mode 100644 index 000000000..f7f77da19 --- /dev/null +++ b/bsd/nfs/nfs_lock.h @@ -0,0 +1,102 @@ +/*- + * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Berkeley Software Design Inc's name may not be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from nfs_lock.h,v 2.2 1998/04/28 19:38:41 don Exp + * $FreeBSD$ + */ + +#include + +#ifdef __APPLE_API_PRIVATE + +/* + * lockd uses the nfsclnt system call for the unique kernel services it needs. + * It passes in a request structure with a version number at the start. + * This prevents libc from needing to change if the information passed + * between lockd and the kernel needs to change. + * + * If a structure changes, you must bump the version number. + */ + +#include + +/* + * The fifo where the kernel writes requests for locks on remote NFS files, + * and where lockd reads these requests. Note this is no longer hardwired + * in the kernel binary - lockd passes the file descriptor down via nfsclnt() + */ +#define _PATH_LCKFIFO "/var/run/nfslockd" + +/* + * This structure is used to uniquely identify the process which originated + * a particular message to lockd. A sequence number is used to differentiate + * multiple messages from the same process. A process start time is used to + * detect the unlikely, but possible, event of the recycling of a pid. + */ +struct lockd_msg_ident { + pid_t pid; /* The process ID. */ + struct timeval pid_start; /* Start time of process id */ + int msg_seq; /* Sequence number of message */ + struct uthread *ut; +}; + +#define LOCKD_MSG_VERSION 2 + +/* + * The structure that the kernel hands us for each lock request. + */ +typedef struct __lock_msg { + int lm_version; /* which version is this */ + struct lockd_msg_ident lm_msg_ident; /* originator of the message */ + struct flock lm_fl; /* The lock request. */ + int lm_wait; /* The F_WAIT flag. */ + int lm_getlk; /* is this a F_GETLK request */ + struct sockaddr_storage lm_addr; /* The address. */ + int lm_nfsv3; /* If NFS version 3. */ + size_t lm_fh_len; /* The file handle length. */ + struct xucred lm_cred; /* user cred for lock req */ + u_int8_t lm_fh[NFS_SMALLFH];/* The file handle. */ +} LOCKD_MSG; + +#define LOCKD_ANS_VERSION 1 + +struct lockd_ans { + int la_vers; + struct lockd_msg_ident la_msg_ident; /* originator of the message */ + int la_errno; + int la_getlk_set; /* use returned getlk values */ + int la_getlk_pid; /* returned pid for F_GETLK */ + off_t la_getlk_start; /* returned starting offset */ + off_t la_getlk_len; /* returned length */ +}; + +#ifdef KERNEL +int nfs_dolock(struct vop_advlock_args *ap); +int nfslockdans(struct proc *p, struct lockd_ans *ansp); +int nfslockdfd(struct proc *p, int fd); +int nfslockdwait(struct proc *p); +#endif +#endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/nfs/nfs_node.c b/bsd/nfs/nfs_node.c index 8a9c0835b..6c354e57e 100644 --- a/bsd/nfs/nfs_node.c +++ b/bsd/nfs/nfs_node.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -77,10 +77,6 @@ #include #include -#ifdef MALLOC_DEFINE -static MALLOC_DEFINE(M_NFSNODE, "NFS node", "NFS vnode private part"); -#endif - LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl; u_long nfsnodehash; @@ -137,19 +133,27 @@ nfs_nget(mntp, fhp, fhsize, npp) register struct vnode *vp; struct vnode *nvp; int error; + struct mount *mp; /* Check for unmount in progress */ - if (mntp->mnt_kern_flag & MNTK_UNMOUNT) { + if (!mntp || (mntp->mnt_kern_flag & MNTK_UNMOUNT)) { *npp = 0; - return (EPERM); + return (!mntp ? ENXIO : EPERM); } nhpp = NFSNOHASH(nfs_hash(fhp, fhsize)); loop: for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) { - if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize || + mp = (np->n_flag & NINIT) ? np->n_mount : NFSTOV(np)->v_mount; + if (mntp != mp || np->n_fhsize != fhsize || bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize)) continue; + /* if the node is still being initialized, sleep on it */ + if (np->n_flag & NINIT) { + np->n_flag |= NWINIT; + tsleep(np, PINOD, "nfsngt", 0); + goto loop; + } vp = NFSTOV(np); if (vget(vp, LK_EXCLUSIVE, p)) goto loop; @@ -170,29 +174,19 @@ loop: nfs_node_hash_lock = 1; /* - * Do the MALLOC before the getnewvnode since doing so afterward - * might cause a bogus v_data pointer to get dereferenced - * elsewhere if MALLOC should block. + * allocate and initialize nfsnode and stick it in the hash + * before calling getnewvnode(). Anyone finding it in the + * hash before initialization is complete will wait for it. */ MALLOC_ZONE(np, struct nfsnode *, sizeof *np, M_NFSNODE, M_WAITOK); - - error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &nvp); - if (error) { - if (nfs_node_hash_lock < 0) - wakeup(&nfs_node_hash_lock); - nfs_node_hash_lock = 0; - *npp = 0; - FREE_ZONE(np, sizeof *np, M_NFSNODE); - return (error); - } - vp = nvp; bzero((caddr_t)np, sizeof *np); - vp->v_data = np; - np->n_vnode = vp; - /* - * Insert the nfsnode in the hash queue for its new file handle - */ - LIST_INSERT_HEAD(nhpp, np, n_hash); + np->n_flag |= NINIT; + np->n_mount = mntp; + lockinit(&np->n_lock, PINOD, "nfsnode", 0, 0); + /* lock the new nfsnode */ + lockmgr(&np->n_lock, LK_EXCLUSIVE, NULL, p); + + /* Insert the nfsnode in the hash queue for its new file handle */ if (fhsize > NFS_SMALLFH) { MALLOC_ZONE(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK); @@ -200,16 +194,36 @@ loop: np->n_fhp = &np->n_fh; bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize); np->n_fhsize = fhsize; - *npp = np; + LIST_INSERT_HEAD(nhpp, np, n_hash); + np->n_flag |= NHASHED; + /* release lock on hash table */ if (nfs_node_hash_lock < 0) wakeup(&nfs_node_hash_lock); nfs_node_hash_lock = 0; - /* - * Lock the new nfsnode. - */ - error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + /* now, attempt to get a new vnode */ + error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &nvp); + if (error) { + LIST_REMOVE(np, n_hash); + np->n_flag &= ~NHASHED; + if (np->n_fhsize > NFS_SMALLFH) + FREE_ZONE((caddr_t)np->n_fhp, np->n_fhsize, M_NFSBIGFH); + FREE_ZONE(np, sizeof *np, M_NFSNODE); + *npp = 0; + return (error); + } + vp = nvp; + vp->v_data = np; + np->n_vnode = vp; + *npp = np; + + /* node is now initialized, check if anyone's waiting for it */ + np->n_flag &= ~NINIT; + if (np->n_flag & NWINIT) { + np->n_flag &= ~NWINIT; + wakeup((caddr_t)np); + } return (error); } @@ -243,35 +257,17 @@ nfs_inactive(ap) #if DIAGNOSTIC kprintf("nfs_inactive removing %s, dvp=%x, a_vp=%x, ap=%x, np=%x, sp=%x\n", &sp->s_name[0], (unsigned)sp->s_dvp, (unsigned)ap->a_vp, (unsigned)ap, (unsigned)np, (unsigned)sp); #endif - /* - * We get a reference (vget) to ensure getnewvnode() - * doesn't recycle vp while we're asleep awaiting I/O. - * Note we don't need the reference unless usecount is - * already zero. In the case of a forcible unmount it - * wont be zero and doing a vget would fail because - * vclean holds VXLOCK. - */ - if (ap->a_vp->v_usecount > 0) { - VREF(ap->a_vp); - } else if (vget(ap->a_vp, 0, ap->a_p)) - panic("nfs_inactive: vget failed"); (void) nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, p, 1); np->n_size = 0; ubc_setsize(ap->a_vp, (off_t)0); - - /* We have a problem. The dvp could have gone away on us while - * in the unmount path. Thus it appears as VBAD and we cannot - * use it. If we tried locking the parent (future), for silly - * rename files, it is unclear where we would lock. The unmount - * code just pulls unlocked vnodes as it goes thru its list and - * yanks them. Could unmount be smarter to see if a busy reg vnode has - * a parent, and not yank it yet? Put in more passes at unmount - * time? In the meantime, just check if it went away on us. - * Could have gone away during the nfs_vinvalbuf or ubc_setsize - * which block. Or perhaps even before nfs_inactive got called. - */ - if ((sp->s_dvp)->v_type != VBAD) - nfs_removeit(sp); /* uses the dvp */ + nfs_removeit(sp); + /* + * remove nfsnode from hash now so we can't accidentally find it + * again if another object gets created with the same filehandle + * before this vnode gets reclaimed + */ + LIST_REMOVE(np, n_hash); + np->n_flag &= ~NHASHED; cred = sp->s_cred; if (cred != NOCRED) { sp->s_cred = NOCRED; @@ -279,10 +275,9 @@ nfs_inactive(ap) } vrele(sp->s_dvp); FREE_ZONE((caddr_t)sp, sizeof (struct sillyrename), M_NFSREQ); - vrele(ap->a_vp); } np->n_flag &= (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NQNFSEVICTED | - NQNFSNONCACHE | NQNFSWRITE); + NQNFSNONCACHE | NQNFSWRITE | NHASHED); VOP_UNLOCK(ap->a_vp, 0, ap->a_p); return (0); } @@ -305,7 +300,10 @@ nfs_reclaim(ap) if (prtactive && vp->v_usecount != 0) vprint("nfs_reclaim: pushing active", vp); - LIST_REMOVE(np, n_hash); + if (np->n_flag & NHASHED) { + LIST_REMOVE(np, n_hash); + np->n_flag &= ~NHASHED; + } /* * In case we block during FREE_ZONEs below, get the entry out @@ -398,22 +396,3 @@ nfs_islocked(ap) return (lockstatus(&VTONFS(ap->a_vp)->n_lock)); } - - -/* - * Nfs abort op, called after namei() when a CREATE/DELETE isn't actually - * done. Currently nothing to do. - */ -/* ARGSUSED */ -int -nfs_abortop(ap) - struct vop_abortop_args /* { - struct vnode *a_dvp; - struct componentname *a_cnp; - } */ *ap; -{ - - if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) - FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); - return (0); -} diff --git a/bsd/nfs/nfs_nqlease.c b/bsd/nfs/nfs_nqlease.c index d390c9e92..f17df2e2b 100644 --- a/bsd/nfs/nfs_nqlease.c +++ b/bsd/nfs/nfs_nqlease.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -429,8 +429,10 @@ nqsrv_instimeq(lp, duration) { register struct nqlease *tlp; time_t newexpiry; + struct timeval now; - newexpiry = time.tv_sec + duration + nqsrv_clockskew; + microtime(&now); + newexpiry = now.tv_sec + duration + nqsrv_clockskew; if (lp->lc_expiry == newexpiry) return; if (lp->lc_timer.cqe_next != 0) { @@ -523,7 +525,7 @@ nqsrv_send_eviction(vp, lp, slp, nam, cred) caddr_t bpos, cp; u_long xid, *tl; int len = 1, ok = 1, i = 0; - int sotype, *solockp; + int sotype, solock; while (ok && (lph->lph_flag & LC_VALID)) { if (nqsrv_cmpnam(slp, nam, lph)) @@ -547,10 +549,7 @@ nqsrv_send_eviction(vp, lp, slp, nam, cred) } else goto nextone; sotype = so->so_type; - if (so->so_proto->pr_flags & PR_CONNREQUIRED) - solockp = &lph->lph_slp->ns_solock; - else - solockp = (int *)0; + solock = (so->so_proto->pr_flags & PR_CONNREQUIRED); nfsm_reqhead((struct vnode *)0, NQNFSPROC_EVICTED, NFSX_V3FH + NFSX_UNSIGNED); fhp = &nfh.fh_generic; @@ -583,15 +582,13 @@ nqsrv_send_eviction(vp, lp, slp, nam, cred) } if (((lph->lph_flag & (LC_UDP | LC_CLTP)) == 0 && (lph->lph_slp->ns_flag & SLP_VALID) == 0) || - (solockp && (*solockp & NFSMNT_SNDLOCK))) + (solock && nfs_slplock(lph->lph_slp, 0) == 0)) { m_freem(m); - else { - if (solockp) - *solockp |= NFSMNT_SNDLOCK; + } else { (void) nfs_send(so, nam2, m, (struct nfsreq *)0); - if (solockp) - nfs_sndunlock(solockp); + if (solock) + nfs_slpunlock(lph->lph_slp); } if (lph->lph_flag & LC_UDP) MFREE(nam2, m); @@ -623,9 +620,11 @@ nqsrv_waitfor_expiry(lp) register int i; struct nqm *lphnext; int len, ok; + struct timeval now; tryagain: - if (time.tv_sec > lp->lc_expiry) + microtime(&now); + if (now.tv_sec > lp->lc_expiry) return; lph = &lp->lc_host; lphnext = lp->lc_morehosts; @@ -669,10 +668,12 @@ nqnfs_serverd() struct nqm *lphnext, *olphnext; struct mbuf *n; int i, len, ok; + struct timeval now; + microtime(&now); for (lp = nqtimerhead.cqh_first; lp != (void *)&nqtimerhead; lp = nextlp) { - if (lp->lc_expiry >= time.tv_sec) + if (lp->lc_expiry >= now.tv_sec) break; nextlp = lp->lc_timer.cqe_next; if (lp->lc_flag & LC_EXPIREDWANTED) { @@ -717,7 +718,7 @@ nqnfs_serverd() nfsrv_slpderef(lph->lph_slp); if (++i == len) { if (olphnext) { - _FREE_ZONE((caddr_t)olphnext, + FREE_ZONE((caddr_t)olphnext, sizeof (struct nqm), M_NQMHOST); olphnext = (struct nqm *)0; @@ -736,7 +737,7 @@ nqnfs_serverd() FREE_ZONE((caddr_t)lp, sizeof (struct nqlease), M_NQLEASE); if (olphnext) - _FREE_ZONE((caddr_t)olphnext, + FREE_ZONE((caddr_t)olphnext, sizeof (struct nqm), M_NQMHOST); nfsstats.srvnqnfs_leases--; } @@ -896,8 +897,9 @@ nqnfs_getlease(vp, rwflag, cred, p) register caddr_t cp; register long t1, t2; register struct nfsnode *np; - struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct nfsmount *nmp; caddr_t bpos, dpos, cp2; + struct timeval now; time_t reqtime; int error = 0; struct mbuf *mreq, *mrep, *md, *mb, *mb2; @@ -905,6 +907,10 @@ nqnfs_getlease(vp, rwflag, cred, p) u_quad_t frev; u_int64_t xid; + nmp = VFSTONFS(vp->v_mount); + if (!nmp) + return (ENXIO); + nfsstats.rpccnt[NQNFSPROC_GETLEASE]++; mb = mreq = nfsm_reqh(vp, NQNFSPROC_GETLEASE, NFSX_V3FH+2*NFSX_UNSIGNED, &bpos); @@ -912,16 +918,24 @@ nqnfs_getlease(vp, rwflag, cred, p) nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); *tl++ = txdr_unsigned(rwflag); *tl = txdr_unsigned(nmp->nm_leaseterm); - reqtime = time.tv_sec; + microtime(&now); + reqtime = now.tv_sec; nfsm_request(vp, NQNFSPROC_GETLEASE, p, cred, &xid); np = VTONFS(vp); nfsm_dissect(tl, u_long *, 4 * NFSX_UNSIGNED); cachable = fxdr_unsigned(int, *tl++); reqtime += fxdr_unsigned(int, *tl++); - if (reqtime > time.tv_sec) { - fxdr_hyper(tl, &frev); - nqnfs_clientlease(nmp, np, rwflag, cachable, reqtime, frev); - nfsm_loadattr(vp, (struct vattr *)0, &xid); + microtime(&now); + if (reqtime > now.tv_sec) { + nmp = VFSTONFS(vp->v_mount); + if (!nmp) { + error = ENXIO; + } else { + fxdr_hyper(tl, &frev); + nqnfs_clientlease(nmp, np, rwflag, cachable, + reqtime, frev); + nfsm_loadattr(vp, (struct vattr *)0, &xid); + } } else error = NQNFS_EXPIRED; nfsm_reqdone; @@ -947,8 +961,12 @@ nqnfs_vacated(vp, cred) struct mbuf *mreq, *mb, *mb2, *mheadend; struct nfsmount *nmp; struct nfsreq myrep; + int connrequired; + int *flagp; nmp = VFSTONFS(vp->v_mount); + if (!nmp) + return (ENXIO); nfsstats.rpccnt[NQNFSPROC_VACATED]++; nfsm_reqhead(vp, NQNFSPROC_VACATED, NFSX_FH(1)); nfsm_fhtom(vp, 1); @@ -968,11 +986,15 @@ nqnfs_vacated(vp, cred) } myrep.r_flags = 0; myrep.r_nmp = nmp; - if (nmp->nm_soflags & PR_CONNREQUIRED) - (void) nfs_sndlock(&nmp->nm_flag, (struct nfsreq *)0); + + connrequired = (nmp->nm_soflags & PR_CONNREQUIRED); + if (connrequired) + (void) nfs_sndlock(&myrep); + (void) nfs_send(nmp->nm_so, nmp->nm_nam, m, &myrep); - if (nmp->nm_soflags & PR_CONNREQUIRED) - nfs_sndunlock(&nmp->nm_flag); + + if (connrequired) + nfs_sndunlock(&myrep); nfsmout: return (error); } @@ -1060,17 +1082,19 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p) struct nfsuid *nuidp, *nnuidp; int error = 0, vpid; register struct nfsreq *rp; + struct timeval now; /* * First initialize some variables */ + microtime(&now); /* * If an authorization string is being passed in, get it. */ if ((flag & NFSSVC_GOTAUTH) && - (nmp->nm_flag & (NFSMNT_WAITAUTH | NFSMNT_DISMNT)) == 0) { - if (nmp->nm_flag & NFSMNT_HASAUTH) + (nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_DISMNT)) == 0) { + if (nmp->nm_state & NFSSTA_HASAUTH) panic("cld kerb"); if ((flag & NFSSVC_AUTHINFAIL) == 0) { if (ncd->ncd_authlen <= nmp->nm_authlen && @@ -1084,18 +1108,18 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p) nmp->nm_key = ncd->ncd_key; #endif } else - nmp->nm_flag |= NFSMNT_AUTHERR; + nmp->nm_state |= NFSSTA_AUTHERR; } else - nmp->nm_flag |= NFSMNT_AUTHERR; - nmp->nm_flag |= NFSMNT_HASAUTH; + nmp->nm_state |= NFSSTA_AUTHERR; + nmp->nm_state |= NFSSTA_HASAUTH; wakeup((caddr_t)&nmp->nm_authlen); } else - nmp->nm_flag |= NFSMNT_WAITAUTH; + nmp->nm_state |= NFSSTA_WAITAUTH; /* * Loop every second updating queue until there is a termination sig. */ - while ((nmp->nm_flag & NFSMNT_DISMNT) == 0) { + while ((nmp->nm_state & NFSSTA_DISMNT) == 0) { if (nmp->nm_flag & NFSMNT_NQNFS) { /* * If there are no outstanding requests (and therefore no @@ -1116,10 +1140,10 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p) */ np = nmp->nm_timerhead.cqh_first; while (np != (void *)&nmp->nm_timerhead && - (nmp->nm_flag & NFSMNT_DISMINPROG) == 0) { + (nmp->nm_state & NFSSTA_DISMINPROG) == 0) { vp = NFSTOV(np); vpid = vp->v_id; - if (np->n_expiry < time.tv_sec) { + if (np->n_expiry < now.tv_sec) { if (vget(vp, LK_EXCLUSIVE, p) == 0) { nmp->nm_inprog = vp; if (vpid == vp->v_id) { @@ -1144,9 +1168,9 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p) vrele(vp); nmp->nm_inprog = NULLVP; } - } else if ((np->n_expiry - NQ_RENEWAL) < time.tv_sec) { + } else if ((np->n_expiry - NQ_RENEWAL) < now.tv_sec) { if ((np->n_flag & (NQNFSWRITE | NQNFSNONCACHE)) - == NQNFSWRITE && vp->v_dirtyblkhd.lh_first && + == NQNFSWRITE && np->n_dirtyblkhd.lh_first && vget(vp, LK_EXCLUSIVE, p) == 0) { nmp->nm_inprog = vp; if (vpid == vp->v_id && @@ -1166,10 +1190,10 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p) /* * Get an authorization string, if required. */ - if ((nmp->nm_flag & (NFSMNT_WAITAUTH | NFSMNT_DISMNT | NFSMNT_HASAUTH)) == 0) { + if ((nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_DISMNT | NFSSTA_HASAUTH)) == 0) { ncd->ncd_authuid = nmp->nm_authuid; if (copyout((caddr_t)ncd, argp, sizeof (struct nfsd_cargs))) - nmp->nm_flag |= NFSMNT_WAITAUTH; + nmp->nm_state |= NFSSTA_WAITAUTH; else return (ENEEDAUTH); } @@ -1177,8 +1201,8 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p) /* * Wait a bit (no pun) and do it again. */ - if ((nmp->nm_flag & NFSMNT_DISMNT) == 0 && - (nmp->nm_flag & (NFSMNT_WAITAUTH | NFSMNT_HASAUTH))) { + if ((nmp->nm_state & NFSSTA_DISMNT) == 0 && + (nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH))) { error = tsleep((caddr_t)&nmp->nm_authstr, PSOCK | PCATCH, "nqnfstimr", hz / 3); if (error == EINTR || error == ERESTART) @@ -1193,7 +1217,7 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p) nnuidp = nuidp->nu_lru.tqe_next; LIST_REMOVE(nuidp, nu_hash); TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru); - _FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID); + FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID); } /* * Loop through outstanding request list and remove dangling @@ -1202,7 +1226,7 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p) for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next) if (rp->r_nmp == nmp) rp->r_nmp = (struct nfsmount *)0; - _FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); + FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); if (error == EWOULDBLOCK) error = 0; return (error); diff --git a/bsd/nfs/nfs_serv.c b/bsd/nfs/nfs_serv.c index 7526e755d..505a9a75a 100644 --- a/bsd/nfs/nfs_serv.c +++ b/bsd/nfs/nfs_serv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -99,8 +99,6 @@ #include #include -#include - #include #include #include @@ -485,6 +483,7 @@ nfsrv_lookup(nfsd, slp, procp, mrq) nqsrv_getl(ndp->ni_startdir, ND_READ); vrele(ndp->ni_startdir); FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; vp = ndp->ni_vp; bzero((caddr_t)fhp, sizeof(nfh)); fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid; @@ -645,6 +644,7 @@ nfsrv_read(nfsd, slp, procp, mrq) struct vattr va, *vap = &va; off_t off; u_quad_t frev; + int didhold = 0; fhp = &nfh.fh_generic; nfsm_srvmtofh(fhp); @@ -745,6 +745,7 @@ nfsrv_read(nfsd, slp, procp, mrq) uiop->uio_resid = cnt; uiop->uio_rw = UIO_READ; uiop->uio_segflg = UIO_SYSSPACE; + didhold = ubc_hold(vp); error = VOP_READ(vp, uiop, IO_NODELOCKED, cred); off = uiop->uio_offset; FREE((caddr_t)iv2, M_TEMP); @@ -754,17 +755,25 @@ nfsrv_read(nfsd, slp, procp, mrq) * that alone. m_freem(mreq) looks bogus. Taking it out. Should be * mrep or not there at all. Causes panic. ekn */ if (error || (getret = VOP_GETATTR(vp, vap, cred, procp))) { + VOP_UNLOCK(vp, 0, procp); + if (didhold) + ubc_rele(vp); if (!error) error = getret; /* m_freem(mreq);*/ - vput(vp); + vrele(vp); nfsm_reply(NFSX_POSTOPATTR(v3)); nfsm_srvpostop_attr(getret, vap); return (0); } - } else + VOP_UNLOCK(vp, 0, procp); + if (didhold) + ubc_rele(vp); + vrele(vp); + } else { uiop->uio_resid = 0; - vput(vp); + vput(vp); + } nfsm_srvfillattr(vap, fp); len -= uiop->uio_resid; tlen = nfsm_rndup(len); @@ -817,6 +826,7 @@ nfsrv_write(nfsd, slp, procp, mrq) struct uio io, *uiop = &io; off_t off; u_quad_t frev; + int didhold = 0; if (mrep == NULL) { *mrq = NULL; @@ -933,12 +943,16 @@ nfsrv_write(nfsd, slp, procp, mrq) uiop->uio_segflg = UIO_SYSSPACE; uiop->uio_procp = (struct proc *)0; uiop->uio_offset = off; + didhold = ubc_hold(vp); error = VOP_WRITE(vp, uiop, ioflags, cred); nfsstats.srvvop_writes++; FREE((caddr_t)iv, M_TEMP); } aftat_ret = VOP_GETATTR(vp, vap, cred, procp); - vput(vp); + VOP_UNLOCK(vp, 0, procp); + if (didhold) + ubc_rele(vp); + vrele(vp); if (!error) error = aftat_ret; nfsm_reply(NFSX_PREOPATTR(v3) + NFSX_POSTOPORFATTR(v3) + @@ -1003,6 +1017,8 @@ nfsrv_writegather(ndp, slp, procp, mrq) struct vnode *vp; struct uio io, *uiop = &io; u_quad_t frev, cur_usec; + int didhold; + struct timeval now; #ifndef nolint i = 0; @@ -1020,7 +1036,8 @@ nfsrv_writegather(ndp, slp, procp, mrq) LIST_INIT(&nfsd->nd_coalesce); nfsd->nd_mreq = NULL; nfsd->nd_stable = NFSV3WRITE_FILESYNC; - cur_usec = (u_quad_t)time.tv_sec * 1000000 + (u_quad_t)time.tv_usec; + microuptime(&now); + cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec; nfsd->nd_time = cur_usec + (v3 ? nfsrvw_procrastinate_v3 : nfsrvw_procrastinate); @@ -1136,7 +1153,8 @@ nfsmout: * and generate the associated reply mbuf list(s). */ loop1: - cur_usec = (u_quad_t)time.tv_sec * 1000000 + (u_quad_t)time.tv_usec; + microuptime(&now); + cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec; s = splsoftclock(); for (nfsd = slp->ns_tq.lh_first; nfsd; nfsd = owp) { owp = nfsd->nd_tq.le_next; @@ -1182,6 +1200,7 @@ loop1: uiop->uio_procp = (struct proc *)0; uiop->uio_offset = nfsd->nd_off; uiop->uio_resid = nfsd->nd_eoff - nfsd->nd_off; + didhold = 0; if (uiop->uio_resid > 0) { mp = mrep; i = 0; @@ -1204,6 +1223,7 @@ loop1: mp = mp->m_next; } if (!error) { + didhold = ubc_hold(vp); error = VOP_WRITE(vp, uiop, ioflags, cred); nfsstats.srvvop_writes++; } @@ -1212,7 +1232,10 @@ loop1: m_freem(mrep); if (vp) { aftat_ret = VOP_GETATTR(vp, &va, cred, procp); - vput(vp); + VOP_UNLOCK(vp, 0, procp); + if (didhold) + ubc_rele(vp); + vrele(vp); } /* @@ -1503,6 +1526,7 @@ nfsrv_create(nfsd, slp, procp, mrq) nfsrv_object_create(nd.ni_vp); FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; if (exclusive_flag) { exclusive_flag = 0; VATTR_NULL(vap); @@ -1519,8 +1543,9 @@ nfsrv_create(nfsd, slp, procp, mrq) if (vap->va_type != VFIFO && (error = suser(cred, (u_short *)0))) { vrele(nd.ni_startdir); - _FREE_ZONE(nd.ni_cnd.cn_pnbuf, + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); vput(nd.ni_dvp); nfsm_reply(0); @@ -1537,13 +1562,15 @@ nfsrv_create(nfsd, slp, procp, mrq) nd.ni_cnd.cn_proc = procp; nd.ni_cnd.cn_cred = cred; if ((error = lookup(&nd))) { - _FREE_ZONE(nd.ni_cnd.cn_pnbuf, + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; nfsm_reply(0); } nfsrv_object_create(nd.ni_vp); FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; if (nd.ni_cnd.cn_flags & ISSYMLINK) { vrele(nd.ni_dvp); vput(nd.ni_vp); @@ -1553,8 +1580,9 @@ nfsrv_create(nfsd, slp, procp, mrq) } } else { vrele(nd.ni_startdir); - _FREE_ZONE(nd.ni_cnd.cn_pnbuf, + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); vput(nd.ni_dvp); error = ENXIO; @@ -1562,7 +1590,8 @@ nfsrv_create(nfsd, slp, procp, mrq) vp = nd.ni_vp; } else { vrele(nd.ni_startdir); - _FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; vp = nd.ni_vp; if (nd.ni_dvp == vp) vrele(nd.ni_dvp); @@ -1614,14 +1643,15 @@ nfsrv_create(nfsd, slp, procp, mrq) nfsm_build(fp, struct nfs_fattr *, NFSX_V2FATTR); nfsm_srvfillattr(vap, fp); } - return (error); + return (0); nfsmout: if (dirp) vrele(dirp); if (nd.ni_cnd.cn_nameiop) { vrele(nd.ni_startdir); - _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; } VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); if (nd.ni_dvp == nd.ni_vp) @@ -1685,8 +1715,9 @@ nfsrv_mknod(nfsd, slp, procp, mrq) vtyp = nfsv3tov_type(*tl); if (vtyp != VCHR && vtyp != VBLK && vtyp != VSOCK && vtyp != VFIFO) { vrele(nd.ni_startdir); - _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; error = NFSERR_BADTYPE; VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); vput(nd.ni_dvp); @@ -1706,8 +1737,9 @@ nfsrv_mknod(nfsd, slp, procp, mrq) */ if (nd.ni_vp) { vrele(nd.ni_startdir); - _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; error = EEXIST; VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); vput(nd.ni_dvp); @@ -1721,11 +1753,13 @@ nfsrv_mknod(nfsd, slp, procp, mrq) if (!error) FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; } else { if (vtyp != VFIFO && (error = suser(cred, (u_short *)0))) { vrele(nd.ni_startdir); - _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); vput(nd.ni_dvp); goto out; @@ -1741,6 +1775,7 @@ nfsrv_mknod(nfsd, slp, procp, mrq) nd.ni_cnd.cn_cred = procp->p_ucred; error = lookup(&nd); FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; if (error) goto out; if (nd.ni_cnd.cn_flags & ISSYMLINK) { @@ -1774,8 +1809,9 @@ nfsmout: vrele(dirp); if (nd.ni_cnd.cn_nameiop) { vrele(nd.ni_startdir); - _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; } VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); if (nd.ni_dvp == nd.ni_vp) @@ -2039,6 +2075,7 @@ out: } vrele(tond.ni_startdir); FREE_ZONE(tond.ni_cnd.cn_pnbuf, tond.ni_cnd.cn_pnlen, M_NAMEI); + tond.ni_cnd.cn_flags &= ~HASBUF; out1: if (fdirp) { fdiraft_ret = VOP_GETATTR(fdirp, &fdiraft, cred, procp); @@ -2050,6 +2087,7 @@ out1: } vrele(fromnd.ni_startdir); FREE_ZONE(fromnd.ni_cnd.cn_pnbuf, fromnd.ni_cnd.cn_pnlen, M_NAMEI); + fromnd.ni_cnd.cn_flags &= ~HASBUF; nfsm_reply(2 * NFSX_WCCDATA(v3)); if (v3) { nfsm_srvwcc_data(fdirfor_ret, &fdirfor, fdiraft_ret, &fdiraft); @@ -2065,11 +2103,13 @@ nfsmout: if (tond.ni_cnd.cn_nameiop) { vrele(tond.ni_startdir); FREE_ZONE(tond.ni_cnd.cn_pnbuf, tond.ni_cnd.cn_pnlen, M_NAMEI); + tond.ni_cnd.cn_flags &= ~HASBUF; } if (fromnd.ni_cnd.cn_nameiop) { vrele(fromnd.ni_startdir); FREE_ZONE(fromnd.ni_cnd.cn_pnbuf, fromnd.ni_cnd.cn_pnlen, M_NAMEI); + fromnd.ni_cnd.cn_flags &= ~HASBUF; VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd); vrele(fromnd.ni_dvp); vrele(fvp); @@ -2249,7 +2289,8 @@ nfsrv_symlink(nfsd, slp, procp, mrq) *(pathcp + len2) = '\0'; if (nd.ni_vp) { vrele(nd.ni_startdir); - _FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); if (nd.ni_dvp == nd.ni_vp) vrele(nd.ni_dvp); @@ -2283,6 +2324,7 @@ nfsrv_symlink(nfsd, slp, procp, mrq) } else vrele(nd.ni_startdir); FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; } out: if (pathcp) @@ -2303,7 +2345,8 @@ out: nfsmout: if (nd.ni_cnd.cn_nameiop) { vrele(nd.ni_startdir); - _FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + nd.ni_cnd.cn_flags &= ~HASBUF; } if (dirp) vrele(dirp); @@ -2550,13 +2593,11 @@ out: * example, client NFS does not { although it is never remote mounted * anyhow } * The alternate call nfsrv_readdirplus() does lookups as well. - * PS: The NFS protocol spec. does not clarify what the "count" byte - * argument is a count of.. just name strings and file id's or the - * entire reply rpc or ... - * I tried just file name and id sizes and it confused the Sun client, - * so I am using the full rpc size now. The "paranoia.." comment refers - * to including the status longwords that are not a part of the dir. - * "entry" structures, but are in the rpc. + * PS: The XNFS protocol spec clearly describes what the "count"s arguments + * are supposed to cover. For readdir, the count is the total number of + * bytes included in everything from the directory's postopattr through + * the EOF flag. For readdirplus, the maxcount is the same, and the + * dircount includes all that except for the entry attributes and handles. */ struct flrep { nfsuint64 fl_off; @@ -2754,13 +2795,14 @@ again: goto again; } - len = 3 * NFSX_UNSIGNED; /* paranoia, probably can be 0 */ nfsm_reply(NFSX_POSTOPATTR(v3) + NFSX_COOKIEVERF(v3) + siz); if (v3) { + len = NFSX_V3POSTOPATTR + NFSX_V3COOKIEVERF + 2 * NFSX_UNSIGNED; nfsm_srvpostop_attr(getret, &at); nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); txdr_hyper(&at.va_filerev, tl); - } + } else + len = 2 * NFSX_UNSIGNED; mp = mp2 = mb; bp = bpos; be = bp + M_TRAILINGSPACE(mp); @@ -3090,7 +3132,7 @@ again: * are calculated conservatively, including all * XDR overheads. */ - len += (7 * NFSX_UNSIGNED + nlen + rem + NFSX_V3FH + + len += (8 * NFSX_UNSIGNED + nlen + rem + NFSX_V3FH + NFSX_V3POSTOPATTR); dirlen += (6 * NFSX_UNSIGNED + nlen + rem); if (len > cnt || dirlen > fullsiz) { @@ -3211,6 +3253,7 @@ nfsrv_commit(nfsd, slp, procp, mrq) char *cp2; struct mbuf *mb, *mb2, *mreq; u_quad_t frev, off; + int didhold; #ifndef nolint cache = 0; @@ -3233,9 +3276,13 @@ nfsrv_commit(nfsd, slp, procp, mrq) return (0); } for_ret = VOP_GETATTR(vp, &bfor, cred, procp); + didhold = ubc_hold(vp); error = VOP_FSYNC(vp, cred, MNT_WAIT, procp); aft_ret = VOP_GETATTR(vp, &aft, cred, procp); - vput(vp); + VOP_UNLOCK(vp, 0, procp); + if (didhold) + ubc_rele(vp); + vrele(vp); nfsm_reply(NFSX_V3WCCDATA + NFSX_V3WRITEVERF); nfsm_srvwcc_data(for_ret, &bfor, aft_ret, &aft); if (!error) { @@ -3316,7 +3363,7 @@ nfsrv_statfs(nfsd, slp, procp, mrq) sfp->sf_afiles.nfsuquad[1] = txdr_unsigned(sf->f_ffree); sfp->sf_invarsec = 0; } else { - sfp->sf_tsize = txdr_unsigned(NFS_MAXDGRAMDATA); + sfp->sf_tsize = txdr_unsigned(NFS_V2MAXDATA); sfp->sf_bsize = txdr_unsigned(sf->f_bsize); sfp->sf_blocks = txdr_unsigned(sf->f_blocks); sfp->sf_bfree = txdr_unsigned(sf->f_bfree); @@ -3343,7 +3390,7 @@ nfsrv_fsinfo(nfsd, slp, procp, mrq) register struct nfsv3_fsinfo *sip; register long t1; caddr_t bpos; - int error = 0, rdonly, cache, getret = 1, pref; + int error = 0, rdonly, cache, getret = 1, pref, max; char *cp2; struct mbuf *mb, *mb2, *mreq; struct vnode *vp; @@ -3372,16 +3419,16 @@ nfsrv_fsinfo(nfsd, slp, procp, mrq) /* * XXX * There should be file system VFS OP(s) to get this information. - * For now, assume ufs. + * For now, assume our usual NFS defaults. */ if (slp->ns_so->so_type == SOCK_DGRAM) - pref = NFS_MAXDGRAMDATA; + max = pref = NFS_MAXDGRAMDATA; else - pref = NFS_MAXDATA; - sip->fs_rtmax = txdr_unsigned(NFS_MAXDATA); + max = pref = NFS_MAXDATA; + sip->fs_rtmax = txdr_unsigned(max); sip->fs_rtpref = txdr_unsigned(pref); sip->fs_rtmult = txdr_unsigned(NFS_FABLKSIZE); - sip->fs_wtmax = txdr_unsigned(NFS_MAXDATA); + sip->fs_wtmax = txdr_unsigned(max); sip->fs_wtpref = txdr_unsigned(pref); sip->fs_wtmult = txdr_unsigned(NFS_FABLKSIZE); sip->fs_dtpref = txdr_unsigned(pref); @@ -3414,7 +3461,7 @@ nfsrv_pathconf(nfsd, slp, procp, mrq) register long t1; caddr_t bpos; int error = 0, rdonly, cache, getret = 1, linkmax, namemax; - int chownres, notrunc; + int chownres, notrunc, case_sensitive, case_preserving; char *cp2; struct mbuf *mb, *mb2, *mreq; struct vnode *vp; @@ -3441,6 +3488,10 @@ nfsrv_pathconf(nfsd, slp, procp, mrq) error = VOP_PATHCONF(vp, _PC_CHOWN_RESTRICTED, &chownres); if (!error) error = VOP_PATHCONF(vp, _PC_NO_TRUNC, ¬runc); + if (!error) + error = VOP_PATHCONF(vp, _PC_CASE_SENSITIVE, &case_sensitive); + if (!error) + error = VOP_PATHCONF(vp, _PC_CASE_PRESERVING, &case_preserving); getret = VOP_GETATTR(vp, &at, cred, procp); vput(vp); nfsm_reply(NFSX_V3POSTOPATTR + NFSX_V3PATHCONF); @@ -3453,14 +3504,9 @@ nfsrv_pathconf(nfsd, slp, procp, mrq) pc->pc_namemax = txdr_unsigned(namemax); pc->pc_notrunc = txdr_unsigned(notrunc); pc->pc_chownrestricted = txdr_unsigned(chownres); + pc->pc_caseinsensitive = txdr_unsigned(!case_sensitive); + pc->pc_casepreserving = txdr_unsigned(case_preserving); - /* - * These should probably be supported by VOP_PATHCONF(), but - * until msdosfs is exportable (why would you want to?), the - * Unix defaults should be ok. - */ - pc->pc_caseinsensitive = nfs_false; - pc->pc_casepreserving = nfs_true; nfsm_srvdone; } diff --git a/bsd/nfs/nfs_socket.c b/bsd/nfs/nfs_socket.c index f2ee753a3..9b55ac48f 100644 --- a/bsd/nfs/nfs_socket.c +++ b/bsd/nfs/nfs_socket.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -178,12 +178,17 @@ static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; int nfsrtton = 0; struct nfsrtt nfsrtt; -static int nfs_msg __P((struct proc *,char *,char *)); +static int nfs_msg __P((struct proc *, const char *, const char *, int)); +static void nfs_up(struct nfsreq *, const char *, int); +static void nfs_down(struct nfsreq *, const char *, int); static int nfs_rcvlock __P((struct nfsreq *)); -static void nfs_rcvunlock __P((int *flagp)); +static void nfs_rcvunlock __P((struct nfsreq *)); static int nfs_receive __P((struct nfsreq *rep, struct mbuf **aname, struct mbuf **mp)); static int nfs_reconnect __P((struct nfsreq *rep)); +static void nfs_repbusy(struct nfsreq *rep); +static struct nfsreq * nfs_repnext(struct nfsreq *rep); +static void nfs_repdequeue(struct nfsreq *rep); #ifndef NFS_NOSERVER static int nfsrv_getstream __P((struct nfssvc_sock *,int)); @@ -428,19 +433,24 @@ nfs_connect(nmp, rep) } splx(s); } + /* + * Always time out on recieve, this allows us to reconnect the + * socket to deal with network changes. + */ + so->so_rcv.sb_timeo = (2 * hz); if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { - so->so_rcv.sb_timeo = (5 * hz); so->so_snd.sb_timeo = (5 * hz); } else { - so->so_rcv.sb_timeo = 0; so->so_snd.sb_timeo = 0; } if (nmp->nm_sotype == SOCK_DGRAM) { - sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 2; - rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * 2; + sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3; + rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * + (nmp->nm_readahead > 0 ? nmp->nm_readahead + 1 : 2); } else if (nmp->nm_sotype == SOCK_SEQPACKET) { - sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 2; - rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * 2; + sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3; + rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * + (nmp->nm_readahead > 0 ? nmp->nm_readahead + 1 : 2); } else { if (nmp->nm_sotype != SOCK_STREAM) panic("nfscon sotype"); @@ -450,6 +460,7 @@ nfs_connect(nmp, rep) int val; bzero(&sopt, sizeof sopt); + sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_KEEPALIVE; sopt.sopt_val = &val; @@ -462,6 +473,7 @@ nfs_connect(nmp, rep) int val; bzero(&sopt, sizeof sopt); + sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = &val; @@ -470,12 +482,15 @@ nfs_connect(nmp, rep) sosetopt(so, &sopt); } - sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_long)) - * 2; - rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_long)) - * 2; + sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_long)) * 3; + rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_long)) * + (nmp->nm_readahead > 0 ? nmp->nm_readahead + 1 : 2); } + if (sndreserve > NFS_MAXSOCKBUF) + sndreserve = NFS_MAXSOCKBUF; + if (rcvreserve > NFS_MAXSOCKBUF) + rcvreserve = NFS_MAXSOCKBUF; error = soreserve(so, sndreserve, rcvreserve); if (error) { goto bad; @@ -492,7 +507,7 @@ nfs_connect(nmp, rep) nmp->nm_sdrtt[3] = 0; nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ nmp->nm_sent = 0; - FSDBG(529, nmp, nmp->nm_flag, nmp->nm_soflags, nmp->nm_cwnd); + FSDBG(529, nmp, nmp->nm_state, nmp->nm_soflags, nmp->nm_cwnd); nmp->nm_timeouts = 0; return (0); @@ -523,6 +538,9 @@ nfs_reconnect(rep) while ((error = nfs_connect(nmp, rep))) { if (error == EINTR || error == ERESTART) return (EINTR); + if (error == EIO) + return (EIO); + nfs_down(rep, "can not connect", error); (void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0); } @@ -531,7 +549,7 @@ nfs_reconnect(rep) * Loop through outstanding request list and fix up all requests * on old socket. */ - for (rp = nfs_reqq.tqh_first; rp != 0; rp = rp->r_chain.tqe_next) { + TAILQ_FOREACH(rp, &nfs_reqq, r_chain) { if (rp->r_nmp == nmp) rp->r_flags |= R_MUSTRESEND; } @@ -578,15 +596,16 @@ nfs_send(so, nam, top, rep) struct nfsreq *rep; { struct sockaddr *sendnam; - int error, soflags, flags; + int error, error2, soflags, flags; int xidqueued = 0; struct nfsreq *rp; char savenametolog[MNAMELEN]; if (rep) { - if (rep->r_flags & R_SOFTTERM) { + error = nfs_sigintr(rep->r_nmp, rep, rep->r_procp); + if (error) { m_freem(top); - return (EINTR); + return (error); } if ((so = rep->r_nmp->nm_so) == NULL) { rep->r_flags |= R_MUSTRESEND; @@ -595,7 +614,7 @@ nfs_send(so, nam, top, rep) } rep->r_flags &= ~R_MUSTRESEND; soflags = rep->r_nmp->nm_soflags; - for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next) + TAILQ_FOREACH(rp, &nfs_reqq, r_chain) if (rp == rep) break; if (rp) @@ -634,8 +653,7 @@ nfs_send(so, nam, top, rep) if (error) { if (rep) { if (xidqueued) { - for (rp = nfs_reqq.tqh_first; rp; - rp = rp->r_chain.tqe_next) + TAILQ_FOREACH(rp, &nfs_reqq, r_chain) if (rp == rep && rp->r_xid == xidqueued) break; if (!rp) @@ -647,9 +665,10 @@ nfs_send(so, nam, top, rep) /* * Deal with errors for the client side. */ - if (rep->r_flags & R_SOFTTERM) - error = EINTR; - else { + error2 = nfs_sigintr(rep->r_nmp, rep, rep->r_procp); + if (error2) { + error = error2; + } else { rep->r_flags |= R_MUSTRESEND; NFS_DPF(DUP, ("nfs_send RESEND error=%d\n", error)); @@ -660,9 +679,10 @@ nfs_send(so, nam, top, rep) /* * Handle any recoverable (soft) socket errors here. (???) */ - if (error != EINTR && error != ERESTART && - error != EWOULDBLOCK && error != EPIPE) + if (error != EINTR && error != ERESTART && error != EIO && + error != EWOULDBLOCK && error != EPIPE) { error = 0; + } } return (error); } @@ -692,7 +712,7 @@ nfs_receive(rep, aname, mp) struct sockaddr *tmp_nam; struct mbuf *mhck; struct sockaddr_in *sin; - int error, sotype, rcvflg; + int error, error2, sotype, rcvflg; struct proc *p = current_proc(); /* XXX */ /* @@ -711,7 +731,7 @@ nfs_receive(rep, aname, mp) * until we have an entire rpc request/reply. */ if (sotype != SOCK_DGRAM) { - error = nfs_sndlock(&rep->r_nmp->nm_flag, rep); + error = nfs_sndlock(rep); if (error) return (error); tryagain: @@ -724,15 +744,17 @@ tryagain: * attempt that has essentially shut down this * mount point. */ - if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) { - nfs_sndunlock(&rep->r_nmp->nm_flag); + if ((error = nfs_sigintr(rep->r_nmp, rep, p)) || rep->r_mrep) { + nfs_sndunlock(rep); + if (error) + return (error); return (EINTR); } so = rep->r_nmp->nm_so; if (!so) { error = nfs_reconnect(rep); if (error) { - nfs_sndunlock(&rep->r_nmp->nm_flag); + nfs_sndunlock(rep); return (error); } goto tryagain; @@ -751,13 +773,13 @@ tryagain: if (error) { if (error == EINTR || error == ERESTART || (error = nfs_reconnect(rep))) { - nfs_sndunlock(&rep->r_nmp->nm_flag); + nfs_sndunlock(rep); return (error); } goto tryagain; } } - nfs_sndunlock(&rep->r_nmp->nm_flag); + nfs_sndunlock(rep); if (sotype == SOCK_STREAM) { aio.iov_base = (caddr_t) &len; aio.iov_len = sizeof(u_long); @@ -773,12 +795,13 @@ tryagain: thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); error = soreceive(so, (struct sockaddr **)0, &auio, (struct mbuf **)0, (struct mbuf **)0, &rcvflg); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); if (!rep->r_nmp) /* if unmounted then bailout */ goto shutout; if (error == EWOULDBLOCK && rep) { - if (rep->r_flags & R_SOFTTERM) - return (EINTR); + error2 = nfs_sigintr(rep->r_nmp, rep, p); + if (error2) + error = error2; } } while (error == EWOULDBLOCK); if (!error && auio.uio_resid > 0) { @@ -844,16 +867,18 @@ tryagain: rcvflg = 0; error = soreceive(so, (struct sockaddr **)0, &auio, mp, &control, &rcvflg); + if (control) + m_freem(control); if (!rep->r_nmp) /* if unmounted then bailout */ { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); goto shutout; } - if (control) - m_freem(control); if (error == EWOULDBLOCK && rep) { - if (rep->r_flags & R_SOFTTERM) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (EINTR); + error2 = nfs_sigintr(rep->r_nmp, rep, p); + if (error2) { + thread_funnel_switch(NETWORK_FUNNEL, + KERNEL_FUNNEL); + return (error2); } } } while (error == EWOULDBLOCK || @@ -876,15 +901,29 @@ errout: "receive error %d from nfs server %s\n", error, rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); - error = nfs_sndlock(&rep->r_nmp->nm_flag, rep); + error = nfs_sndlock(rep); if (!error) error = nfs_reconnect(rep); if (!error) goto tryagain; } } else { - if ((so = rep->r_nmp->nm_so) == NULL) - return (EACCES); + /* + * We could have failed while rebinding the datagram socket + * so we need to attempt to rebind here. + */ + if ((so = rep->r_nmp->nm_so) == NULL) { + error = nfs_sndlock(rep); + if (!error) { + error = nfs_reconnect(rep); + nfs_sndunlock(rep); + } + if (error) + return (error); + if (!rep->r_nmp) /* if unmounted then bailout */ + return (ENXIO); + so = rep->r_nmp->nm_so; + } if (so->so_state & SS_ISCONNECTED) getnam = (struct sockaddr **)0; else @@ -907,18 +946,44 @@ errout: FREE(*getnam, M_SONAME); *aname = mhck; } - if (!rep->r_nmp) /* if unmounted then bailout */ { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - goto shutout; - } - - if (error == EWOULDBLOCK && - (rep->r_flags & R_SOFTTERM)) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (EINTR); + if (!rep->r_nmp) /* if unmounted then bailout */ + goto dgramout; + if (error) { + error2 = nfs_sigintr(rep->r_nmp, rep, p); + if (error2) { + error = error2; + goto dgramout; + } + } + /* Reconnect for all errors. We may be receiving + * soft/hard/blocking errors because of a network + * change. + * XXX: we should rate limit or delay this + * to once every N attempts or something. + * although TCP doesn't seem to. + */ + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, + KERNEL_FUNNEL); + error2 = nfs_sndlock(rep); + if (!error2) { + error2 = nfs_reconnect(rep); + if (error2) + error = error2; + else if (!rep->r_nmp) /* if unmounted then bailout */ + error = ENXIO; + else + so = rep->r_nmp->nm_so; + nfs_sndunlock(rep); + } else { + error = error2; + } + thread_funnel_switch(KERNEL_FUNNEL, + NETWORK_FUNNEL); } } while (error == EWOULDBLOCK); +dgramout: thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); len -= auio.uio_resid; } @@ -976,7 +1041,7 @@ nfs_reply(myrep) * would hang trying to nfs_receive an already received reply. */ if (myrep->r_mrep != NULL) { - nfs_rcvunlock(&nmp->nm_flag); + nfs_rcvunlock(myrep); FSDBG(530, myrep->r_xid, myrep, myrep->r_nmp, -1); return (0); } @@ -985,20 +1050,22 @@ nfs_reply(myrep) * is still intact by checks done in nfs_rcvlock. */ error = nfs_receive(myrep, &nam, &mrep); + if (nam) + m_freem(nam); /* * Bailout asap if nfsmount struct gone (unmounted). */ if (!myrep->r_nmp || !nmp->nm_so) { FSDBG(530, myrep->r_xid, myrep, nmp, -2); - return (ECONNABORTED); + return (ENXIO); } if (error) { FSDBG(530, myrep->r_xid, myrep, nmp, error); - nfs_rcvunlock(&nmp->nm_flag); + nfs_rcvunlock(myrep); /* Bailout asap if nfsmount struct gone (unmounted). */ if (!myrep->r_nmp || !nmp->nm_so) - return (ECONNABORTED); + return (ENXIO); /* * Ignore routing errors on connectionless protocols?? @@ -1011,8 +1078,6 @@ nfs_reply(myrep) } return (error); } - if (nam) - m_freem(nam); /* * We assume all is fine, but if we did not have an error @@ -1029,7 +1094,7 @@ nfs_reply(myrep) */ if (!mrep) { FSDBG(530, myrep->r_xid, myrep, nmp, -3); - return (ECONNABORTED); /* sounds good */ + return (ENXIO); /* sounds good */ } /* @@ -1053,8 +1118,8 @@ nfs_reply(myrep) m_freem(mrep); #endif nfsmout: - if (nmp->nm_flag & NFSMNT_RCVLOCK) - nfs_rcvunlock(&nmp->nm_flag); + if (nmp->nm_state & NFSSTA_RCVLOCK) + nfs_rcvunlock(myrep); if (myrep->r_flags & R_GETONEREP) return (0); /* this path used by NQNFS */ continue; @@ -1064,13 +1129,17 @@ nfsmout: * Loop through the request list to match up the reply * Iff no match, just drop the datagram */ - for (rep = nfs_reqq.tqh_first; rep != 0; - rep = rep->r_chain.tqe_next) { + TAILQ_FOREACH(rep, &nfs_reqq, r_chain) { if (rep->r_mrep == NULL && rxid == rep->r_xid) { /* Found it.. */ rep->r_mrep = mrep; rep->r_md = md; rep->r_dpos = dpos; + /* + * If we're tracking the round trip time + * then we update the circular log here + * with the stats from our current request. + */ if (nfsrtton) { struct rttl *rt; @@ -1084,7 +1153,7 @@ nfsmout: rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; - rt->tstamp = time; + microtime(&rt->tstamp); // XXX unused if (rep->r_flags & R_TIMING) rt->rtt = rep->r_rtt; else @@ -1105,11 +1174,10 @@ nfsmout: if (nmp->nm_cwnd > NFS_MAXCWND) nmp->nm_cwnd = NFS_MAXCWND; } - if (!(rep->r_flags & R_SENT)) - printf("nfs_reply: unsent xid=%x", - rep->r_xid); - rep->r_flags &= ~R_SENT; - nmp->nm_sent -= NFS_CWNDSCALE; + if (rep->r_flags & R_SENT) { + rep->r_flags &= ~R_SENT; + nmp->nm_sent -= NFS_CWNDSCALE; + } /* * Update rtt using a gain of 0.125 on the mean * and a gain of 0.25 on the deviation. @@ -1137,7 +1205,7 @@ nfsmout: break; } } - nfs_rcvunlock(&nmp->nm_flag); + nfs_rcvunlock(myrep); /* * If not matched to a request, drop it. * If it's mine, get out. @@ -1179,7 +1247,7 @@ nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp, xidp) caddr_t *dposp; u_int64_t *xidp; { - register struct mbuf *m, *mrep; + register struct mbuf *m, *mrep, *m2; register struct nfsreq *rep, *rp; register u_long *tl; register int i; @@ -1196,33 +1264,33 @@ nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp, xidp) u_quad_t frev; char *auth_str, *verf_str; NFSKERBKEY_T key; /* save session key */ + int nmsotype; + struct timeval now; if (xidp) *xidp = 0; - nmp = VFSTONFS(vp->v_mount); + MALLOC_ZONE(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); - FSDBG_TOP(531, vp, procnum, nmp, rep); - - /* - * make sure if we blocked above, that the file system didn't get - * unmounted leaving nmp bogus value to trip on later and crash. - * Note nfs_unmount will set rep->r_nmp if unmounted volume, but we - * aren't that far yet. SO this is best we can do. I wanted to check - * for vp->v_mount = 0 also below, but that caused reboot crash. - * Something must think it's okay for vp-v_mount=0 during booting. - * Thus the best I can do here is see if we still have a vnode. - */ - if (vp->v_type == VBAD) { - FSDBG_BOT(531, 1, vp, nmp, rep); - _FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); - return (EINVAL); + nmp = VFSTONFS(vp->v_mount); + if (nmp == NULL || + (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_TIMEO)) == + (NFSSTA_FORCE|NFSSTA_TIMEO)) { + FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + return (ENXIO); } + nmsotype = nmp->nm_sotype; + + FSDBG_TOP(531, vp, procnum, nmp, rep); + rep->r_nmp = nmp; rep->r_vp = vp; rep->r_procp = procp; rep->r_procnum = procnum; + microuptime(&now); + rep->r_lastmsg = now.tv_sec - + ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay)); i = 0; m = mrest; while (m) { @@ -1235,6 +1303,12 @@ nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp, xidp) * Get the RPC header with authorization. */ kerbauth: + nmp = VFSTONFS(vp->v_mount); + if (!nmp) { + FSDBG_BOT(531, error, rep->r_xid, nmp, rep); + FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + return (ENXIO); + } verf_str = auth_str = (char *)0; if (nmp->nm_flag & NFSMNT_KERB) { verf_str = nickv; @@ -1243,11 +1317,22 @@ kerbauth: bzero((caddr_t)key, sizeof (key)); if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str, &auth_len, verf_str, verf_len)) { + nmp = VFSTONFS(vp->v_mount); + if (!nmp) { + FSDBG_BOT(531, 2, vp, error, rep); + FREE_ZONE((caddr_t)rep, + sizeof (struct nfsreq), M_NFSREQ); + m_freem(mrest); + return (ENXIO); + } error = nfs_getauth(nmp, rep, cred, &auth_str, &auth_len, verf_str, &verf_len, key); + nmp = VFSTONFS(vp->v_mount); + if (!error && !nmp) + error = ENXIO; if (error) { FSDBG_BOT(531, 2, vp, error, rep); - _FREE_ZONE((caddr_t)rep, + FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); m_freem(mrest); return (error); @@ -1271,7 +1356,7 @@ kerbauth: /* * For stream protocols, insert a Sun RPC Record Mark. */ - if (nmp->nm_sotype == SOCK_STREAM) { + if (nmsotype == SOCK_STREAM) { M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); *mtod(m, u_long *) = htonl(0x80000000 | (m->m_pkthdr.len - NFSX_UNSIGNED)); @@ -1279,7 +1364,8 @@ kerbauth: rep->r_mreq = m; rep->r_xid = xid; tryagain: - if (nmp->nm_flag & NFSMNT_SOFT) + nmp = VFSTONFS(vp->v_mount); + if (nmp && (nmp->nm_flag & NFSMNT_SOFT)) rep->r_retry = nmp->nm_retry; else rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ @@ -1302,19 +1388,22 @@ tryagain: TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain); /* Get send time for nqnfs */ - reqtime = time.tv_sec; + microtime(&now); + reqtime = now.tv_sec; /* * If backing off another request or avoiding congestion, don't * send this one now but let timer do it. If not timing a request, * do it now. */ - if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || + if (nmp && nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || (nmp->nm_flag & NFSMNT_DUMBTIMR) || nmp->nm_sent < nmp->nm_cwnd)) { + int connrequired = (nmp->nm_soflags & PR_CONNREQUIRED); + splx(s); - if (nmp->nm_soflags & PR_CONNREQUIRED) - error = nfs_sndlock(&nmp->nm_flag, rep); + if (connrequired) + error = nfs_sndlock(rep); /* * Set the R_SENT before doing the send in case another thread @@ -1328,13 +1417,15 @@ tryagain: rep->r_flags |= R_SENT; } - m = m_copym(m, 0, M_COPYALL, M_WAIT); - error = nfs_send(nmp->nm_so, nmp->nm_nam, m, rep); - if (nmp->nm_soflags & PR_CONNREQUIRED) - nfs_sndunlock(&nmp->nm_flag); + m2 = m_copym(m, 0, M_COPYALL, M_WAIT); + error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); + if (connrequired) + nfs_sndunlock(rep); } + nmp = VFSTONFS(vp->v_mount); if (error) { - nmp->nm_sent -= NFS_CWNDSCALE; + if (nmp) + nmp->nm_sent -= NFS_CWNDSCALE; rep->r_flags &= ~R_SENT; } } else { @@ -1351,39 +1442,35 @@ tryagain: /* * RPC done, unlink the request. */ - s = splsoftclock(); - for (rp = nfs_reqq.tqh_first; rp; - rp = rp->r_chain.tqe_next) - if (rp == rep && rp->r_xid == xid) - break; - if (!rp) - panic("nfs_request race, rep %x xid %x", rep, xid); - TAILQ_REMOVE(&nfs_reqq, rep, r_chain); - splx(s); + nfs_repdequeue(rep); + + nmp = VFSTONFS(vp->v_mount); /* * Decrement the outstanding request count. */ if (rep->r_flags & R_SENT) { - FSDBG(531, rep->r_xid, rep, nmp->nm_sent, nmp->nm_cwnd); rep->r_flags &= ~R_SENT; /* paranoia */ - nmp->nm_sent -= NFS_CWNDSCALE; + if (nmp) { + FSDBG(531, rep->r_xid, rep, nmp->nm_sent, nmp->nm_cwnd); + nmp->nm_sent -= NFS_CWNDSCALE; + } } /* * If there was a successful reply and a tprintf msg. * tprintf a response. */ - if (!error && (rep->r_flags & R_TPRINTFMSG)) - nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname, - "is alive again"); + nfs_up(rep, "is alive again", error); mrep = rep->r_mrep; md = rep->r_md; dpos = rep->r_dpos; + if (!error && !nmp) + error = ENXIO; if (error) { m_freem(rep->r_mreq); FSDBG_BOT(531, error, rep->r_xid, nmp, rep); - _FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); return (error); } @@ -1408,7 +1495,7 @@ tryagain: m_freem(mrep); m_freem(rep->r_mreq); FSDBG_BOT(531, error, rep->r_xid, nmp, rep); - _FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); return (error); } @@ -1433,16 +1520,21 @@ tryagain: error == NFSERR_TRYLATER) { m_freem(mrep); error = 0; - waituntil = time.tv_sec + trylater_delay; + microuptime(&now); + waituntil = now.tv_sec + trylater_delay; NFS_DPF(DUP, ("nfs_request %s flag=%x trylater_cnt=%x waituntil=%lx trylater_delay=%x\n", nmp->nm_mountp->mnt_stat.f_mntfromname, nmp->nm_flag, trylater_cnt, waituntil, trylater_delay)); - while (time.tv_sec < waituntil) + while (now.tv_sec < waituntil) { (void)tsleep((caddr_t)&lbolt, PSOCK, "nqnfstry", 0); - trylater_delay *= nfs_backoff[trylater_cnt]; + microuptime(&now); + } + trylater_delay *= 2; + if (trylater_delay > 60) + trylater_delay = 60; if (trylater_cnt < 7) trylater_cnt++; goto tryagain; @@ -1463,7 +1555,7 @@ tryagain: m_freem(mrep); m_freem(rep->r_mreq); FSDBG_BOT(531, error, rep->r_xid, nmp, rep); - _FREE_ZONE((caddr_t)rep, + FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); return (error); } @@ -1479,7 +1571,8 @@ tryagain: nfsm_dissect(tl, u_long *, 4*NFSX_UNSIGNED); cachable = fxdr_unsigned(int, *tl++); reqtime += fxdr_unsigned(int, *tl++); - if (reqtime > time.tv_sec) { + microtime(&now); + if (reqtime > now.tv_sec) { fxdr_hyper(tl, &frev); nqnfs_clientlease(nmp, np, nqlflag, cachable, reqtime, frev); @@ -1499,7 +1592,7 @@ tryagain: nfsmout: m_freem(rep->r_mreq); FSDBG_BOT(531, error, rep->r_xid, nmp, rep); - _FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); return (error); } @@ -1670,6 +1763,7 @@ nfs_rephead(siz, nd, slp, err, cache, frev, mrq, mbp, bposp) static void nfs_softterm(struct nfsreq *rep) { + rep->r_flags |= R_SOFTTERM; if (rep->r_flags & R_SENT) { FSDBG(532, rep->r_xid, rep, rep->r_nmp->nm_sent, @@ -1689,6 +1783,63 @@ nfs_timer_funnel(arg) } +/* + * Ensure rep isn't in use by the timer, then dequeue it. + */ +void +nfs_repdequeue(struct nfsreq *rep) +{ + int s; + + while ((rep->r_flags & R_BUSY)) { + rep->r_flags |= R_WAITING; + tsleep(rep, PSOCK, "repdeq", 0); + } + s = splsoftclock(); + TAILQ_REMOVE(&nfs_reqq, rep, r_chain); + splx(s); +} + +/* + * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not + * free()'d out from under it. + */ +void +nfs_repbusy(struct nfsreq *rep) +{ + + if ((rep->r_flags & R_BUSY)) + panic("rep locked"); + rep->r_flags |= R_BUSY; +} + +/* + * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied. + */ +struct nfsreq * +nfs_repnext(struct nfsreq *rep) +{ + struct nfsreq * nextrep; + + if (rep == NULL) + return (NULL); + /* + * We need to get and busy the next req before signalling the + * current one, otherwise wakeup() may block us and we'll race to + * grab the next req. + */ + nextrep = TAILQ_NEXT(rep, r_chain); + if (nextrep != NULL) + nfs_repbusy(nextrep); + /* unbusy and signal. */ + rep->r_flags &= ~R_BUSY; + if ((rep->r_flags & R_WAITING)) { + rep->r_flags &= ~R_WAITING; + wakeup(rep); + } + return (nextrep); +} + /* * Nfs timer routine * Scan the nfsreq list and retranmit any requests that have timed out @@ -1699,7 +1850,7 @@ void nfs_timer(arg) void *arg; /* never used */ { - register struct nfsreq *rep, *rp; + register struct nfsreq *rep; register struct mbuf *m; register struct socket *so; register struct nfsmount *nmp; @@ -1715,17 +1866,16 @@ nfs_timer(arg) #endif int flags, rexmit, cwnd, sent; u_long xid; + struct timeval now; s = splnet(); /* * XXX If preemptable threads are implemented the spls used for the * outstanding request queue must be replaced with mutexes. */ -rescan: #ifdef NFSTRACESUSPENDERS if (NFSTRACE_SUSPENDING) { - for (rep = nfs_reqq.tqh_first; rep != 0; - rep = rep->r_chain.tqe_next) + TAILQ_FOREACH(rep, &nfs_reqq, r_chain) if (rep->r_xid == nfstracexid) break; if (!rep) { @@ -1735,7 +1885,11 @@ rescan: } } #endif - for (rep = nfs_reqq.tqh_first; rep != 0; rep = rep->r_chain.tqe_next) { + rep = TAILQ_FIRST(&nfs_reqq); + if (rep != NULL) + nfs_repbusy(rep); + microuptime(&now); + for ( ; rep != NULL ; rep = nfs_repnext(rep)) { #ifdef NFSTRACESUSPENDERS if (rep->r_mrep && !NFSTRACE_SUSPENDING) { nfstracexid = rep->r_xid; @@ -1747,9 +1901,13 @@ rescan: continue; if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) continue; - if (nfs_sigintr(nmp, rep, rep->r_procp)) { - nfs_softterm(rep); + if (nfs_sigintr(nmp, rep, rep->r_procp)) continue; + if (nmp->nm_tprintf_initial_delay != 0 && + (rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) && + rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) { + rep->r_lastmsg = now.tv_sec; + nfs_down(rep, "not responding", 0); } if (rep->r_rtt >= 0) { rep->r_rtt++; @@ -1768,15 +1926,10 @@ rescan: nmp->nm_timeouts++; } /* - * Check for server not responding + * Check for too many retransmits. This is never true for + * 'hard' mounts because we set r_retry to NFS_MAXREXMIT + 1 + * and never allow r_rexmit to be more than NFS_MAXREXMIT. */ - if ((rep->r_flags & R_TPRINTFMSG) == 0 && - rep->r_rexmit > nmp->nm_deadthresh) { - nfs_msg(rep->r_procp, - nmp->nm_mountp->mnt_stat.f_mntfromname, - "not responding"); - rep->r_flags |= R_TPRINTFMSG; - } if (rep->r_rexmit >= rep->r_retry) { /* too many */ nfsstats.rpctimeouts++; nfs_softterm(rep); @@ -1857,29 +2010,11 @@ rescan: thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); FSDBG(535, xid, error, sent, cwnd); - /* - * This is to fix "nfs_sigintr" DSI panics. - * We may have slept during the send so the current - * place in the request queue may have been released. - * Due to zone_gc it may even be part of an - * unrelated newly allocated data structure. - * Restart the list scan from the top if needed... - */ - for (rp = nfs_reqq.tqh_first; rp; - rp = rp->r_chain.tqe_next) - if (rp == rep && rp->r_xid == xid) - break; - if (!rp) { - if (!error) - goto rescan; - panic("nfs_timer: race error %d xid 0x%x\n", - error, xid); - } if (error) { if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) so->so_error = 0; - rep->r_flags = flags; + rep->r_flags = flags | R_RESENDERR; rep->r_rexmit = rexmit; nmp->nm_cwnd = cwnd; nmp->nm_sent = sent; @@ -1893,8 +2028,9 @@ rescan: /* * Call the nqnfs server timer once a second to handle leases. */ - if (lasttime != time.tv_sec) { - lasttime = time.tv_sec; + microuptime(&now); + if (lasttime != now.tv_sec) { + lasttime = now.tv_sec; nqnfs_serverd(); } @@ -1902,10 +2038,10 @@ rescan: * Scan the write gathering queues for writes that need to be * completed now. */ - cur_usec = (u_quad_t)time.tv_sec * 1000000 + (u_quad_t)time.tv_usec; - for (slp = nfssvc_sockhead.tqh_first; slp != 0; - slp = slp->ns_chain.tqe_next) { - if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec) + cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec; + TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) { + if (LIST_FIRST(&slp->ns_tq) && + LIST_FIRST(&slp->ns_tq)->nd_time <= cur_usec) nfsrv_wakenfsd(slp); } #endif /* NFS_NOSERVER */ @@ -1917,26 +2053,82 @@ rescan: /* * Test for a termination condition pending on the process. - * This is used for NFSMNT_INT mounts. + * This is used to determine if we need to bail on a mount. + * EIO is returned if there has been a soft timeout. + * EINTR is returned if there is a signal pending that is not being ignored + * and the mount is interruptable, or if we are a thread that is in the process + * of cancellation (also SIGKILL posted). */ int nfs_sigintr(nmp, rep, p) struct nfsmount *nmp; struct nfsreq *rep; - register struct proc *p; + struct proc *p; { + struct uthread *curr_td; + sigset_t pending_sigs; + int context_good = 0; + struct nfsmount *repnmp; + + if (nmp == NULL) + return (ENXIO); + if (rep != NULL) { + repnmp = rep->r_nmp; + /* we've had a forced unmount. */ + if (repnmp == NULL) + return (ENXIO); + /* request has timed out on a 'soft' mount. */ + if (rep->r_flags & R_SOFTTERM) + return (EIO); + /* + * We're in the progress of a force unmount and there's + * been a timeout we're dead and fail IO. + */ + if ((repnmp->nm_state & (NFSSTA_FORCE|NFSSTA_TIMEO)) == + (NFSSTA_FORCE|NFSSTA_TIMEO)) + return (EIO); + /* Someone is unmounting us, go soft and mark it. */ + if ((repnmp->nm_mountp->mnt_kern_flag & MNTK_FRCUNMOUNT)) { + repnmp->nm_flag |= NFSMNT_SOFT; + nmp->nm_state |= NFSSTA_FORCE; + } + /* + * If the mount is hung and we've requested not to hang + * on remote filesystems, then bail now. + */ + if (p != NULL && (p->p_flag & P_NOREMOTEHANG) != 0 && + (repnmp->nm_state & NFSSTA_TIMEO) != 0) + return (EIO); + } + /* XXX: is this valid? this probably should be an assertion. */ + if (p == NULL) + return (0); - struct uthread *ut; - - ut = (struct uthread *)get_bsdthread_info(current_act()); - - if (rep && (rep->r_flags & R_SOFTTERM)) + /* + * XXX: Since nfs doesn't have a good shot at getting the current + * thread we take a guess. (only struct proc * are passed to VOPs) + * What we do is look at the current thread, if it belongs to the + * passed in proc pointer then we have a "good/accurate" context + * and can make an accurate guess as to what to do. + * However if we have a bad context we have to make due with what + * is in the proc struct which may not be as up to date as we'd + * like. + * This is ok because the process will call us with the correct + * context after a short timeout while waiting for a response. + */ + curr_td = (struct uthread *)get_bsdthread_info(current_act()); + if (curr_td->uu_proc == p) + context_good = 1; + if (context_good && current_thread_aborted()) return (EINTR); - if (!(nmp->nm_flag & NFSMNT_INT)) - return (0); - if (p && ut && ut->uu_siglist && - (((ut->uu_siglist & ~ut->uu_sigmask) & ~p->p_sigignore) & - NFSINT_SIGMASK)) + /* mask off thread and process blocked signals. */ + if (context_good) + pending_sigs = curr_td->uu_siglist & ~curr_td->uu_sigmask; + else + pending_sigs = p->p_siglist; + /* mask off process level and NFS ignored signals. */ + pending_sigs &= ~p->p_sigignore & NFSINT_SIGMASK; + if (pending_sigs && (nmp->nm_flag & NFSMNT_INT) != 0) return (EINTR); return (0); } @@ -1948,25 +2140,29 @@ nfs_sigintr(nmp, rep, p) * in progress when a reconnect is necessary. */ int -nfs_sndlock(flagp, rep) - register int *flagp; +nfs_sndlock(rep) struct nfsreq *rep; { + register int *statep; struct proc *p; - int slpflag = 0, slptimeo = 0; + int error, slpflag = 0, slptimeo = 0; - if (rep) { - p = rep->r_procp; - if (rep->r_nmp->nm_flag & NFSMNT_INT) - slpflag = PCATCH; - } else - p = (struct proc *)0; - while (*flagp & NFSMNT_SNDLOCK) { - if (nfs_sigintr(rep->r_nmp, rep, p)) - return (EINTR); - *flagp |= NFSMNT_WANTSND; - (void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1), "nfsndlck", - slptimeo); + if (rep->r_nmp == NULL) + return (ENXIO); + statep = &rep->r_nmp->nm_state; + + p = rep->r_procp; + if (rep->r_nmp->nm_flag & NFSMNT_INT) + slpflag = PCATCH; + while (*statep & NFSSTA_SNDLOCK) { + error = nfs_sigintr(rep->r_nmp, rep, p); + if (error) + return (error); + *statep |= NFSSTA_WANTSND; + if (p != NULL && (p->p_flag & P_NOREMOTEHANG) != 0) + slptimeo = hz; + (void) tsleep((caddr_t)statep, slpflag | (PZERO - 1), + "nfsndlck", slptimeo); if (slpflag == PCATCH) { slpflag = 0; slptimeo = 2 * hz; @@ -1976,9 +2172,9 @@ nfs_sndlock(flagp, rep) * nfs_sigintr and callers expect it in tact. */ if (!rep->r_nmp) - return (ECONNABORTED); /* don't have lock until out of loop */ + return (ENXIO); /* don't have lock until out of loop */ } - *flagp |= NFSMNT_SNDLOCK; + *statep |= NFSSTA_SNDLOCK; return (0); } @@ -1986,16 +2182,20 @@ nfs_sndlock(flagp, rep) * Unlock the stream socket for others. */ void -nfs_sndunlock(flagp) - register int *flagp; +nfs_sndunlock(rep) + struct nfsreq *rep; { + register int *statep; - if ((*flagp & NFSMNT_SNDLOCK) == 0) + if (rep->r_nmp == NULL) + return; + statep = &rep->r_nmp->nm_state; + if ((*statep & NFSSTA_SNDLOCK) == 0) panic("nfs sndunlock"); - *flagp &= ~NFSMNT_SNDLOCK; - if (*flagp & NFSMNT_WANTSND) { - *flagp &= ~NFSMNT_WANTSND; - wakeup((caddr_t)flagp); + *statep &= ~NFSSTA_SNDLOCK; + if (*statep & NFSSTA_WANTSND) { + *statep &= ~NFSSTA_WANTSND; + wakeup((caddr_t)statep); } } @@ -2003,26 +2203,26 @@ static int nfs_rcvlock(rep) register struct nfsreq *rep; { - register int *flagp; - int slpflag, slptimeo = 0; + register int *statep; + int error, slpflag, slptimeo = 0; /* make sure we still have our mountpoint */ if (!rep->r_nmp) { if (rep->r_mrep != NULL) return (EALREADY); - return (ECONNABORTED); + return (ENXIO); } - flagp = &rep->r_nmp->nm_flag; - FSDBG_TOP(534, rep->r_xid, rep, rep->r_nmp, *flagp); - if (*flagp & NFSMNT_INT) + statep = &rep->r_nmp->nm_state; + FSDBG_TOP(534, rep->r_xid, rep, rep->r_nmp, *statep); + if (rep->r_nmp->nm_flag & NFSMNT_INT) slpflag = PCATCH; else slpflag = 0; - while (*flagp & NFSMNT_RCVLOCK) { - if (nfs_sigintr(rep->r_nmp, rep, rep->r_procp)) { + while (*statep & NFSSTA_RCVLOCK) { + if ((error = nfs_sigintr(rep->r_nmp, rep, rep->r_procp))) { FSDBG_BOT(534, rep->r_xid, rep, rep->r_nmp, 0x100); - return (EINTR); + return (error); } else if (rep->r_mrep != NULL) { /* * Don't bother sleeping if reply already arrived @@ -2031,9 +2231,16 @@ nfs_rcvlock(rep) return (EALREADY); } FSDBG(534, rep->r_xid, rep, rep->r_nmp, 0x102); - *flagp |= NFSMNT_WANTRCV; - (void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1), "nfsrcvlk", - slptimeo); + *statep |= NFSSTA_WANTRCV; + /* + * We need to poll if we're P_NOREMOTEHANG so that we + * call nfs_sigintr periodically above. + */ + if (rep->r_procp != NULL && + (rep->r_procp->p_flag & P_NOREMOTEHANG) != 0) + slptimeo = hz; + (void) tsleep((caddr_t)statep, slpflag | (PZERO - 1), + "nfsrcvlk", slptimeo); if (slpflag == PCATCH) { slpflag = 0; slptimeo = 2 * hz; @@ -2044,15 +2251,15 @@ nfs_rcvlock(rep) */ if (!rep->r_nmp) { FSDBG_BOT(534, rep->r_xid, rep, rep->r_nmp, 0x103); - return (ECONNABORTED); /* don't have lock until out of loop */ + return (ENXIO); /* don't have lock until out of loop */ } } /* * nfs_reply will handle it if reply already arrived. * (We may have slept or been preempted while on network funnel). */ - FSDBG_BOT(534, rep->r_xid, rep, rep->r_nmp, *flagp); - *flagp |= NFSMNT_RCVLOCK; + FSDBG_BOT(534, rep->r_xid, rep, rep->r_nmp, *statep); + *statep |= NFSSTA_RCVLOCK; return (0); } @@ -2060,17 +2267,22 @@ nfs_rcvlock(rep) * Unlock the stream socket for others. */ static void -nfs_rcvunlock(flagp) - register int *flagp; +nfs_rcvunlock(rep) + register struct nfsreq *rep; { + register int *statep; + + if (rep->r_nmp == NULL) + return; + statep = &rep->r_nmp->nm_state; - FSDBG(533, flagp, *flagp, 0, 0); - if ((*flagp & NFSMNT_RCVLOCK) == 0) + FSDBG(533, statep, *statep, 0, 0); + if ((*statep & NFSSTA_RCVLOCK) == 0) panic("nfs rcvunlock"); - *flagp &= ~NFSMNT_RCVLOCK; - if (*flagp & NFSMNT_WANTRCV) { - *flagp &= ~NFSMNT_WANTRCV; - wakeup((caddr_t)flagp); + *statep &= ~NFSSTA_RCVLOCK; + if (*statep & NFSSTA_WANTRCV) { + *statep &= ~NFSSTA_WANTRCV; + wakeup((caddr_t)statep); } } @@ -2083,7 +2295,7 @@ nfs_rcvunlock(flagp) * be called with M_WAIT from an nfsd. */ /* - * Needs to eun under network funnel + * Needs to run under network funnel */ void nfsrv_rcv(so, arg, waitflag) @@ -2096,7 +2308,7 @@ nfsrv_rcv(so, arg, waitflag) struct mbuf *mp, *mhck; struct sockaddr *nam=0; struct uio auio; - int flags, error; + int flags, ns_nflag=0, error; struct sockaddr_in *sin; if ((slp->ns_flag & SLP_VALID) == 0) @@ -2106,7 +2318,8 @@ nfsrv_rcv(so, arg, waitflag) * Define this to test for nfsds handling this under heavy load. */ if (waitflag == M_DONTWAIT) { - slp->ns_flag |= SLP_NEEDQ; goto dorecs; + ns_nflag = SLPN_NEEDQ; + goto dorecs; } #endif auio.uio_procp = NULL; @@ -2117,7 +2330,7 @@ nfsrv_rcv(so, arg, waitflag) * the nfs servers are heavily loaded. */ if (slp->ns_rec && waitflag == M_DONTWAIT) { - slp->ns_flag |= SLP_NEEDQ; + ns_nflag = SLPN_NEEDQ; goto dorecs; } @@ -2129,9 +2342,9 @@ nfsrv_rcv(so, arg, waitflag) error = soreceive(so, (struct sockaddr **) 0, &auio, &mp, (struct mbuf **)0, &flags); if (error || mp == (struct mbuf *)0) { if (error == EWOULDBLOCK) - slp->ns_flag |= SLP_NEEDQ; + ns_nflag = SLPN_NEEDQ; else - slp->ns_flag |= SLP_DISCONN; + ns_nflag = SLPN_DISCONN; goto dorecs; } m = mp; @@ -2152,9 +2365,9 @@ nfsrv_rcv(so, arg, waitflag) error = nfsrv_getstream(slp, waitflag); if (error) { if (error == EPERM) - slp->ns_flag |= SLP_DISCONN; + ns_nflag = SLPN_DISCONN; else - slp->ns_flag |= SLP_NEEDQ; + ns_nflag = SLPN_NEEDQ; } } else { do { @@ -2187,7 +2400,7 @@ nfsrv_rcv(so, arg, waitflag) if (error) { if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && error != EWOULDBLOCK) { - slp->ns_flag |= SLP_DISCONN; + ns_nflag = SLPN_DISCONN; goto dorecs; } } @@ -2198,8 +2411,10 @@ nfsrv_rcv(so, arg, waitflag) * Now try and process the request records, non-blocking. */ dorecs: + if (ns_nflag) + slp->ns_nflag |= ns_nflag; if (waitflag == M_DONTWAIT && - (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) { + (slp->ns_rec || (slp->ns_nflag & (SLPN_NEEDQ | SLPN_DISCONN)))) { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); nfsrv_wakenfsd(slp); thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); @@ -2222,13 +2437,13 @@ nfsrv_getstream(slp, waitflag) struct mbuf *om, *m2, *recm; u_long recmark; - if (slp->ns_flag & SLP_GETSTREAM) + if (slp->ns_nflag & SLPN_GETSTREAM) panic("nfs getstream"); - slp->ns_flag |= SLP_GETSTREAM; + slp->ns_nflag |= SLPN_GETSTREAM; for (;;) { if (slp->ns_reclen == 0) { if (slp->ns_cc < NFSX_UNSIGNED) { - slp->ns_flag &= ~SLP_GETSTREAM; + slp->ns_nflag &= ~SLPN_GETSTREAM; return (0); } m = slp->ns_raw; @@ -2253,11 +2468,11 @@ nfsrv_getstream(slp, waitflag) recmark = ntohl(recmark); slp->ns_reclen = recmark & ~0x80000000; if (recmark & 0x80000000) - slp->ns_flag |= SLP_LASTFRAG; + slp->ns_nflag |= SLPN_LASTFRAG; else - slp->ns_flag &= ~SLP_LASTFRAG; + slp->ns_nflag &= ~SLPN_LASTFRAG; if (slp->ns_reclen < NFS_MINPACKET || slp->ns_reclen > NFS_MAXPACKET) { - slp->ns_flag &= ~SLP_GETSTREAM; + slp->ns_nflag &= ~SLPN_GETSTREAM; return (EPERM); } } @@ -2291,7 +2506,7 @@ nfsrv_getstream(slp, waitflag) m->m_len -= slp->ns_reclen - len; len = slp->ns_reclen; } else { - slp->ns_flag &= ~SLP_GETSTREAM; + slp->ns_nflag &= ~SLPN_GETSTREAM; return (EWOULDBLOCK); } } else if ((len + m->m_len) == slp->ns_reclen) { @@ -2310,7 +2525,7 @@ nfsrv_getstream(slp, waitflag) slp->ns_cc -= len; slp->ns_reclen = 0; } else { - slp->ns_flag &= ~SLP_GETSTREAM; + slp->ns_nflag &= ~SLPN_GETSTREAM; return (0); } @@ -2321,7 +2536,7 @@ nfsrv_getstream(slp, waitflag) while (*mpp) mpp = &((*mpp)->m_next); *mpp = recm; - if (slp->ns_flag & SLP_LASTFRAG) { + if (slp->ns_nflag & SLPN_LASTFRAG) { if (slp->ns_recend) slp->ns_recend->m_nextpkt = slp->ns_frag; else @@ -2368,8 +2583,9 @@ nfsrv_dorec(slp, nfsd, ndp) nd->nd_dpos = mtod(m, caddr_t); error = nfs_getreq(nd, nfsd, TRUE); if (error) { - m_freem(nam); - _FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC); + if (nam) + m_freem(nam); + FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC); return (error); } *ndp = nd; @@ -2399,7 +2615,7 @@ nfs_getreq(nd, nfsd, has_header) int error = 0, nqnfs = 0, ticklen; struct mbuf *mrep, *md; register struct nfsuid *nuidp; - struct timeval tvin, tvout; + struct timeval tvin, tvout, now; #if 0 /* until encrypted keys are implemented */ NFSKERBKEYSCHED_T keys; /* stores key schedule */ #endif @@ -2585,7 +2801,8 @@ nfs_getreq(nd, nfsd, has_header) tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); - if (nuidp->nu_expire < time.tv_sec || + microtime(&now); + if (nuidp->nu_expire < now.tv_sec || nuidp->nu_timestamp.tv_sec > tvout.tv_sec || (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { @@ -2637,7 +2854,7 @@ nfsrv_wakenfsd(slp) if ((slp->ns_flag & SLP_VALID) == 0) return; - for (nd = nfsd_head.tqh_first; nd != 0; nd = nd->nfsd_chain.tqe_next) { + TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) { if (nd->nfsd_flag & NFSD_WAITING) { nd->nfsd_flag &= ~NFSD_WAITING; if (nd->nfsd_slp) @@ -2654,9 +2871,10 @@ nfsrv_wakenfsd(slp) #endif /* NFS_NOSERVER */ static int -nfs_msg(p, server, msg) +nfs_msg(p, server, msg, error) struct proc *p; - char *server, *msg; + const char *server, *msg; + int error; { tpr_t tpr; @@ -2664,7 +2882,50 @@ nfs_msg(p, server, msg) tpr = tprintf_open(p); else tpr = NULL; - tprintf(tpr, "nfs server %s: %s\n", server, msg); + if (error) + tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, + error); + else + tprintf(tpr, "nfs server %s: %s\n", server, msg); tprintf_close(tpr); return (0); } + +static void +nfs_down(rep, msg, error) + struct nfsreq *rep; + const char *msg; + int error; +{ + int dosignal; + + if (rep == NULL || rep->r_nmp == NULL) + return; + if (!(rep->r_nmp->nm_state & NFSSTA_TIMEO)) { + vfs_event_signal(&rep->r_nmp->nm_mountp->mnt_stat.f_fsid, + VQ_NOTRESP, 0); + rep->r_nmp->nm_state |= NFSSTA_TIMEO; + } + rep->r_flags |= R_TPRINTFMSG; + nfs_msg(rep->r_procp, rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname, + msg, error); +} + +static void +nfs_up(rep, msg, error) + struct nfsreq *rep; + const char *msg; + int error; +{ + + if (error != 0 || rep == NULL || rep->r_nmp == NULL) + return; + if ((rep->r_flags & R_TPRINTFMSG) != 0) + nfs_msg(rep->r_procp, + rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0); + if ((rep->r_nmp->nm_state & NFSSTA_TIMEO)) { + rep->r_nmp->nm_state &= ~NFSSTA_TIMEO; + vfs_event_signal(&rep->r_nmp->nm_mountp->mnt_stat.f_fsid, + VQ_NOTRESP, 1); + } +} diff --git a/bsd/nfs/nfs_subs.c b/bsd/nfs/nfs_subs.c index 1e341e73e..a4d2cf088 100644 --- a/bsd/nfs/nfs_subs.c +++ b/bsd/nfs/nfs_subs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -79,6 +79,7 @@ #include #include #include +#include #include #include @@ -109,6 +110,9 @@ #include +SYSCTL_DECL(_vfs_generic); +SYSCTL_NODE(_vfs_generic, OID_AUTO, nfs, CTLFLAG_RW, 0, "nfs hinge"); + #define FSDBG(A, B, C, D, E) \ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_NONE, \ (int)(B), (int)(C), (int)(D), (int)(E), 0) @@ -589,15 +593,9 @@ extern nfstype nfsv3_type[9]; extern struct nfsnodehashhead *nfsnodehashtbl; extern u_long nfsnodehash; -struct getfh_args; -extern int getfh(struct proc *, struct getfh_args *, int *); -struct nfssvc_args; -extern int nfssvc(struct proc *, struct nfssvc_args *, int *); LIST_HEAD(nfsnodehashhead, nfsnode); -int nfs_webnamei __P((struct nameidata *, struct vnode *, struct proc *)); - /* * Create the header for an rpc request packet * The hsiz is the size of the rest of the nfs request header. @@ -628,7 +626,7 @@ nfsm_reqh(vp, procid, hsiz, bposp) */ if (vp) { nmp = VFSTONFS(vp->v_mount); - if (nmp->nm_flag & NFSMNT_NQNFS) { + if (nmp && (nmp->nm_flag & NFSMNT_NQNFS)) { nqflag = NQNFS_NEEDLEASE(vp, procid); if (nqflag) { nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); @@ -696,7 +694,6 @@ nfsm_rpchead(cr, nmflag, procid, auth_type, auth_len, auth_str, verf_len, /* * derive initial xid from system time - * XXX time is invalid if root not yet mounted */ if (!base && (rootvp)) { microtime(&tv); @@ -1182,6 +1179,7 @@ nfs_init(vfsp) nfs_iodwant[i] = (struct proc *)0; nfs_iodmount[i] = (struct nfsmount *)0; } + nfs_nbinit(); /* Init the nfsbuf table */ nfs_nhinit(); /* Init the nfsnode table */ #ifndef NFS_NOSERVER nfsrv_init(0); /* Init server data structures */ @@ -1219,13 +1217,6 @@ nfs_init(vfsp) lease_updatetime = nfs_lease_updatetime; #endif vfsp->vfc_refcount++; /* make us non-unloadable */ - sysent[SYS_nfssvc].sy_narg = 2; - sysent[SYS_nfssvc].sy_call = nfssvc; -#ifndef NFS_NOSERVER - sysent[SYS_getfh].sy_narg = 2; - sysent[SYS_getfh].sy_call = getfh; -#endif - return (0); } @@ -1263,18 +1254,15 @@ nfs_loadattrcache(vpp, mdp, dposp, vaper, dontshrink, xidp) enum vtype vtyp; u_short vmode; struct timespec mtime; + struct timeval now; struct vnode *nvp; int v3; FSDBG_TOP(527, vp, 0, *xidp >> 32, *xidp); - /* - * this routine is a good place to check for VBAD again. We caught - * most of them in nfsm_request, but postprocessing may indirectly get - * here, so check again. - */ - if (vp->v_type == VBAD) { - FSDBG_BOT(527, EINVAL, 1, 0, *xidp); - return (EINVAL); + + if (!VFSTONFS(vp->v_mount)) { + FSDBG_BOT(527, ENXIO, 1, 0, *xidp); + return (ENXIO); } v3 = NFS_ISV3(vp); @@ -1333,7 +1321,7 @@ nfs_loadattrcache(vpp, mdp, dposp, vaper, dontshrink, xidp) * information. */ np = VTONFS(vp); -if (*xidp < np->n_xid) { + if (*xidp < np->n_xid) { /* * We have already updated attributes with a response from * a later request. The attributes we have here are probably @@ -1352,12 +1340,6 @@ if (*xidp < np->n_xid) { if (vp->v_type != vtyp) { vp->v_type = vtyp; - if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp)) - if ((error = ubc_info_init(vp))) { /* VREG */ - FSDBG_BOT(527, error, 3, 0, *xidp); - return(error); - } - if (vp->v_type == VFIFO) { vp->v_op = fifo_nfsv2nodeop_p; } @@ -1399,7 +1381,7 @@ if (*xidp < np->n_xid) { vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); fxdr_hyper(&fp->fa3_size, &vap->va_size); - vap->va_blocksize = NFS_FABLKSIZE; + vap->va_blocksize = 16*1024; fxdr_hyper(&fp->fa3_used, &vap->va_bytes); vap->va_fileid = fxdr_unsigned(int, fp->fa3_fileid.nfsuquad[1]); fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime); @@ -1422,7 +1404,21 @@ if (*xidp < np->n_xid) { vap->va_filerev = 0; } - np->n_attrstamp = time.tv_sec; + microuptime(&now); + np->n_attrstamp = now.tv_sec; + + if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp)) { + if (UBCINFORECLAIMED(vp) && ISSET(vp->v_flag, (VXLOCK|VORECLAIM))) { + // vnode is being vclean'ed, abort + FSDBG_BOT(527, ENXIO, 1, 0, *xidp); + return (ENXIO); + } + if ((error = ubc_info_init(vp))) { /* VREG */ + FSDBG_BOT(527, error, 3, 0, *xidp); + return(error); + } + } + if (vap->va_size != np->n_size) { FSDBG(527, vp, vap->va_size, np->n_size, (vap->va_type == VREG) | @@ -1442,8 +1438,9 @@ if (*xidp < np->n_xid) { dontshrink && np->n_size < ubc_getsize(vp)) { vap->va_size = np->n_size = orig_size; np->n_attrstamp = 0; - } else + } else { ubc_setsize(vp, (off_t)np->n_size); /* XXX */ + } } else np->n_size = vap->va_size; } @@ -1473,8 +1470,25 @@ nfs_getattrcache(vp, vaper) { register struct nfsnode *np = VTONFS(vp); register struct vattr *vap; + struct timeval now, nowup; + int32_t timeo; + + /* Set attribute timeout based on how recently the file has been modified. */ + if ((np)->n_flag & NMODIFIED) + timeo = NFS_MINATTRTIMO; + else { + /* Note that if the client and server clocks are way out of sync, */ + /* timeout will probably get clamped to a min or max value */ + microtime(&now); + timeo = (now.tv_sec - (np)->n_mtime) / 10; + if (timeo < NFS_MINATTRTIMO) + timeo = NFS_MINATTRTIMO; + else if (timeo > NFS_MAXATTRTIMO) + timeo = NFS_MAXATTRTIMO; + } - if ((time.tv_sec - np->n_attrstamp) >= NFS_ATTRTIMEO(np)) { + microuptime(&nowup); + if ((nowup.tv_sec - np->n_attrstamp) >= timeo) { FSDBG(528, vp, 0, 0, 1); nfsstats.attrcache_misses++; return (ENOENT); @@ -1542,10 +1556,15 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) int error, rdonly, linklen; struct componentname *cnp = &ndp->ni_cnd; int olen = len; + char *tmppn; *retdirp = (struct vnode *)0; - MALLOC_ZONE(cnp->cn_pnbuf, char *, len + 1, M_NAMEI, M_WAITOK); - cnp->cn_pnlen = len + 1; + + if (len > MAXPATHLEN - 1) + return (ENAMETOOLONG); + + MALLOC_ZONE(cnp->cn_pnbuf, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + cnp->cn_pnlen = MAXPATHLEN; /* * Copy the name from the mbuf list to ndp->ni_pnbuf @@ -1609,14 +1628,16 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) *retdirp = dp; /* XXX CSM 12/4/97 Revisit when enabling WebNFS */ -/* XXX debo 12/15/97 Need to fix M_NAMEI allocations to use zone protocol */ #ifdef notyet if (pubflag) { /* * Oh joy. For WebNFS, handle those pesky '%' escapes, * and the 'native path' indicator. */ - MALLOC(cp, char *, olen + 1, M_NAMEI, M_WAITOK); + + assert(olen <= MAXPATHLEN - 1); + + MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); fromcp = cnp->cn_pnbuf; tocp = cp; if ((unsigned char)*fromcp >= WEBNFS_SPECCHAR_START) { @@ -1634,7 +1655,7 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) */ default: error = EIO; - FREE(cp, M_NAMEI); + FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); goto out; } } @@ -1650,15 +1671,20 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) continue; } else { error = ENOENT; - FREE(cp, M_NAMEI); + FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); goto out; } } else *tocp++ = *fromcp++; } *tocp = '\0'; - FREE(cnp->cn_pnbuf, M_NAMEI); + + tmppn = cnp->cn_pnbuf; + long len = cnp->cn_pnlen; cnp->cn_pnbuf = cp; + cnp->cn_pnlen = MAXPATHLEN; + FREE_ZONE(tmppn, len, M_NAMEI); + } #endif @@ -1714,7 +1740,6 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) error = EINVAL; break; /* XXX CSM 12/4/97 Revisit when enabling WebNFS */ -/* XXX debo 12/15/97 Need to fix M_NAMEI allocations to use zone protocol */ #ifdef notyet } @@ -1722,8 +1747,9 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) error = ELOOP; break; } + /* XXX assert(olen <= MAXPATHLEN - 1); */ if (ndp->ni_pathlen > 1) - MALLOC(cp, char *, olen + 1, M_NAMEI, M_WAITOK); + MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); else cp = cnp->cn_pnbuf; aiov.iov_base = cp; @@ -1737,9 +1763,9 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) auio.uio_resid = MAXPATHLEN; error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred); if (error) { - badlink: +badlink: if (ndp->ni_pathlen > 1) - FREE(cp, M_NAMEI); + FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); break; } linklen = MAXPATHLEN - auio.uio_resid; @@ -1752,9 +1778,12 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) goto badlink; } if (ndp->ni_pathlen > 1) { - bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); - FREE(cnp->cn_pnbuf, M_NAMEI); + long len = cnp->cn_pnlen; + tmppn = cnp->cn_pnbuf; cnp->cn_pnbuf = cp; + cnp->cn_pnlen = olen + 1; + bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); + FREE_ZONE(tmppn, len, M_NAMEI); } else cnp->cn_pnbuf[linklen] = '\0'; ndp->ni_pathlen += linklen; @@ -1772,7 +1801,11 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) } } out: - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + tmppn = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmppn, cnp->cn_pnlen, M_NAMEI); + return (error); } @@ -2162,8 +2195,8 @@ nfs_invaldir(vp) /* * The write verifier has changed (probably due to a server reboot), so all - * B_NEEDCOMMIT blocks will have to be written again. Since they are on the - * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT + * NB_NEEDCOMMIT blocks will have to be written again. Since they are on the + * dirty block list as NB_DELWRI, all this takes is clearing the NB_NEEDCOMMIT * flag. Once done the new write verifier can be set for the mount point. */ void @@ -2171,7 +2204,8 @@ nfs_clearcommit(mp) struct mount *mp; { register struct vnode *vp, *nvp; - register struct buf *bp, *nbp; + register struct nfsbuf *bp, *nbp; + struct nfsnode *np; int s; s = splbio(); @@ -2180,11 +2214,15 @@ loop: if (vp->v_mount != mp) /* Paranoia */ goto loop; nvp = vp->v_mntvnodes.le_next; - for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { - nbp = bp->b_vnbufs.le_next; - if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) - == (B_DELWRI | B_NEEDCOMMIT)) - bp->b_flags &= ~B_NEEDCOMMIT; + np = VTONFS(vp); + for (bp = np->n_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->nb_vnbufs.le_next; + if ((bp->nb_flags & (NB_BUSY | NB_DELWRI | NB_NEEDCOMMIT)) + == (NB_DELWRI | NB_NEEDCOMMIT)) { + bp->nb_flags &= ~NB_NEEDCOMMIT; + np->n_needcommitcnt--; + CHECK_NEEDCOMMITCNT(np); + } } } splx(s); diff --git a/bsd/nfs/nfs_syscalls.c b/bsd/nfs/nfs_syscalls.c index f29b5c80e..910b82c0a 100644 --- a/bsd/nfs/nfs_syscalls.c +++ b/bsd/nfs/nfs_syscalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -76,15 +76,17 @@ #include #include #include +#include #include #include -#include #include #include #include #include #include #include +#include +#include #include #include #include @@ -104,7 +106,7 @@ #include #include #include - +#include /* Global defs. */ extern int (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *nd, @@ -112,6 +114,7 @@ extern int (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *nd, struct proc *procp, struct mbuf **mreqp)); extern int nfs_numasync; +extern int nfs_ioddelwri; extern time_t nqnfsstarttime; extern int nqsrv_writeslack; extern int nfsrtton; @@ -179,7 +182,7 @@ getfh(p, uap) error = suser(p->p_ucred, &p->p_acflag); if(error) return (error); - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, p); + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->fname, p); error = namei(&nd); if (error) return (error); @@ -195,6 +198,188 @@ getfh(p, uap) } #endif /* NFS_NOSERVER */ + +/* + * syscall for the rpc.lockd to use to translate a NFS file handle into + * an open descriptor. + * + * warning: do not remove the suser() call or this becomes one giant + * security hole. + */ +#ifndef _SYS_SYSPROTO_H_ +struct fhopen_args { + const struct fhandle *u_fhp; + int flags; +}; +#endif +int +fhopen(p, uap, retval) + struct proc *p; + register struct fhopen_args *uap; + register_t *retval; +{ + struct mount *mp; + struct vnode *vp; + struct fhandle fhp; + struct vattr vat; + struct vattr *vap = &vat; + struct flock lf; + struct file *fp; + register struct filedesc *fdp = p->p_fd; + int fmode, mode, error, type; + struct file *nfp; + int indx; + struct ucred *credanon; + int exflags; + struct ucred *cred = p->p_ucred; + int didhold = 0; + extern struct fileops vnops; + + /* + * Must be super user + */ + error = suser(cred, &p->p_acflag); + if (error) + return (error); + + fmode = FFLAGS(uap->flags); + /* why not allow a non-read/write open for our lockd? */ + if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT)) + return (EINVAL); + error = copyin((void*)uap->u_fhp, &fhp, sizeof(fhp)); + if (error) + return (error); + /* find the mount point */ + mp = vfs_getvfs(&fhp.fh_fsid); + if (mp == NULL) + return (ESTALE); + /* now give me my vnode, it gets returned to me locked */ +/* XXX CSM need to split VFS_CHECKEXP out of VFS_FHTOVP? */ + error = VFS_FHTOVP(mp, &fhp.fh_fid, NULL, &vp, &exflags, &credanon); + if (error) + return (error); + /* + * from now on we have to make sure not + * to forget about the vnode + * any error that causes an abort must vput(vp) + * just set error = err and 'goto bad;'. + */ + + /* + * from vn_open + */ + if (vp->v_type == VSOCK) { + error = EOPNOTSUPP; + goto bad; + } + + if (UBCINFOEXISTS(vp) && ((didhold = ubc_hold(vp)) == 0)) { + error = ENOENT; + goto bad; + } + + if (fmode & FREAD && fmode & (FWRITE | O_TRUNC)) { + int err = 0; + if (vp->v_type == VDIR) + err = EISDIR; + else + err = vn_writechk(vp); + if (err && !(error = VOP_ACCESS(vp, VREAD, cred, p))) + error = err; + if (error || (error = VOP_ACCESS(vp, VREAD|VWRITE, cred, p))) + goto bad; + } else if (fmode & FREAD) { + if ((error = VOP_ACCESS(vp, VREAD, cred, p))) + goto bad; + } else if (fmode & (FWRITE | O_TRUNC)) { + if (vp->v_type == VDIR) { + error = EISDIR; + goto bad; + } + if ((error = vn_writechk(vp)) || + (error = VOP_ACCESS(vp, VWRITE, cred, p))) + goto bad; + } + if (fmode & O_TRUNC) { + VOP_UNLOCK(vp, 0, p); /* XXX */ + VOP_LEASE(vp, p, cred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ + VATTR_NULL(vap); + vap->va_size = 0; + error = VOP_SETATTR(vp, vap, cred, p); + if (error) + goto bad; + } + + error = VOP_OPEN(vp, fmode, cred, p); + if (error) + goto bad; + + if (fmode & FWRITE) + if (++vp->v_writecount <= 0) + panic("fhopen: v_writecount"); + /* + * end of vn_open code + */ + + if ((error = falloc(p, &nfp, &indx)) != 0) { + if (fmode & FWRITE) + vp->v_writecount--; + goto bad; + } + fp = nfp; + + /* + * Hold an extra reference to avoid having fp ripped out + * from under us while we block in the lock op + */ + fref(fp); + nfp->f_data = (caddr_t)vp; + nfp->f_flag = fmode & FMASK; + nfp->f_ops = &vnops; + nfp->f_type = DTYPE_VNODE; + if (fmode & (O_EXLOCK | O_SHLOCK)) { + lf.l_whence = SEEK_SET; + lf.l_start = 0; + lf.l_len = 0; + if (fmode & O_EXLOCK) + lf.l_type = F_WRLCK; + else + lf.l_type = F_RDLCK; + type = F_FLOCK; + if ((fmode & FNONBLOCK) == 0) + type |= F_WAIT; + VOP_UNLOCK(vp, 0, p); + if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, + type)) != 0) { + (void) vn_close(vp, fp->f_flag, fp->f_cred, p); + ffree(fp); + fdrelse(p, indx); + /* + * release our private reference + */ + frele(fp); + + return (error); + } + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + fp->f_flag |= FHASLOCK; + } + + VOP_UNLOCK(vp, 0, p); + *fdflags(p, indx) &= ~UF_RESERVED; + frele(fp); + *retval = indx; + return (0); + +bad: + VOP_UNLOCK(vp, 0, p); + if (didhold) + ubc_rele(vp); + vrele(vp); + return (error); +} + /* * Nfs server psuedo system call for the nfsd's * Based on the flag value it either: @@ -224,6 +409,7 @@ nfssvc(p, uap) struct nfssvc_sock *slp; struct nfsuid *nuidp; struct nfsmount *nmp; + struct timeval now; #endif /* NFS_NOSERVER */ int error; @@ -259,13 +445,10 @@ nfssvc(p, uap) if (error) return (error); - /* disable split funnels now */ - thread_funnel_merge(kernel_flock, network_flock); - - if ((nmp->nm_flag & NFSMNT_MNTD) && + if ((nmp->nm_state & NFSSTA_MNTD) && (uap->flag & NFSSVC_GOTAUTH) == 0) return (0); - nmp->nm_flag |= NFSMNT_MNTD; + nmp->nm_state |= NFSSTA_MNTD; error = nqnfs_clientd(nmp, p->p_ucred, &ncd, uap->flag, uap->argp, p); } else if (uap->flag & NFSSVC_ADDSOCK) { @@ -292,9 +475,6 @@ nfssvc(p, uap) if (error) return (error); - /* disable split funnels now */ - thread_funnel_merge(kernel_flock, network_flock); - if ((uap->flag & NFSSVC_AUTHIN) && ((nfsd = nsd->nsd_nfsd)) && (nfsd->nfsd_slp->ns_flag & SLP_VALID)) { slp = nfsd->nfsd_slp; @@ -327,7 +507,7 @@ nfssvc(p, uap) nuidp = (struct nfsuid *)0; if ((slp->ns_flag & SLP_VALID) == 0) { if (nuidp) - _FREE_ZONE((caddr_t)nuidp, + FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID); } else { if (nuidp == (struct nfsuid *)0) { @@ -337,14 +517,15 @@ nfssvc(p, uap) nu_lru); if (nuidp->nu_flag & NU_NAM) m_freem(nuidp->nu_nam); - } + } nuidp->nu_flag = 0; nuidp->nu_cr = nsd->nsd_cr; if (nuidp->nu_cr.cr_ngroups > NGROUPS) nuidp->nu_cr.cr_ngroups = NGROUPS; nuidp->nu_cr.cr_ref = 1; nuidp->nu_timestamp = nsd->nsd_timestamp; - nuidp->nu_expire = time.tv_sec + nsd->nsd_ttl; + microtime(&now); + nuidp->nu_expire = now.tv_sec + nsd->nsd_ttl; /* * and save the session key in nu_key. */ @@ -430,10 +611,13 @@ nfssvc_addsock(fp, mynam, p) } #endif /* ISO */ } + /* reserve buffer space for 2 maximally-sized packets */ + siz = NFS_MAXPACKET; if (so->so_type == SOCK_STREAM) - siz = NFS_MAXPACKET + sizeof (u_long); - else - siz = NFS_MAXPACKET; + siz += sizeof (u_long); + siz *= 2; + if (siz > NFS_MAXSOCKBUF) + siz = NFS_MAXSOCKBUF; error = soreserve(so, siz, siz); if (error) { m_freem(mynam); @@ -451,6 +635,7 @@ nfssvc_addsock(fp, mynam, p) int val; bzero(&sopt, sizeof sopt); + sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_KEEPALIVE; sopt.sopt_val = &val; @@ -464,6 +649,7 @@ nfssvc_addsock(fp, mynam, p) int val; bzero(&sopt, sizeof sopt); + sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = &val; @@ -495,8 +681,9 @@ nfssvc_addsock(fp, mynam, p) so->so_upcallarg = (caddr_t)slp; so->so_upcall = nfsrv_rcv; so->so_rcv.sb_flags |= SB_UPCALL; /* required for freebsd merge */ + slp->ns_nflag = SLPN_NEEDQ; thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - slp->ns_flag = (SLP_VALID | SLP_NEEDQ); + slp->ns_flag = SLP_VALID; nfsrv_wakenfsd(slp); splx(s); return (0); @@ -516,14 +703,13 @@ nfssvc_nfsd(nsd, argp, p) register int siz; register struct nfssvc_sock *slp; register struct socket *so; - register int *solockp; struct nfsd *nfsd = nsd->nsd_nfsd; struct nfsrv_descript *nd = NULL; struct mbuf *mreq; int error = 0, cacherep, s, sotype, writes_todo; int procrastinate; u_quad_t cur_usec; - extern void nfs_aio_thread_init(); + struct timeval now; #ifndef nolint cacherep = RC_DOIT; @@ -537,7 +723,6 @@ nfssvc_nfsd(nsd, argp, p) nfsd->nfsd_procp = p; TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain); nfs_numnfsd++; - nfs_aio_thread_init(); } /* * Loop getting rpc requests until SIGKILL. @@ -572,21 +757,23 @@ nfssvc_nfsd(nsd, argp, p) if ((slp = nfsd->nfsd_slp) == (struct nfssvc_sock *)0) continue; if (slp->ns_flag & SLP_VALID) { - if (slp->ns_flag & SLP_DISCONN) + nfs_slplock(slp, 1); + if (slp->ns_nflag & SLPN_DISCONN) { nfsrv_zapsock(slp); - else if (slp->ns_flag & SLP_NEEDQ) { - slp->ns_flag &= ~SLP_NEEDQ; - (void) nfs_sndlock(&slp->ns_solock, - (struct nfsreq *)0); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + } else if (slp->ns_nflag & SLPN_NEEDQ) { thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + slp->ns_nflag &= ~SLPN_NEEDQ; nfsrv_rcv(slp->ns_so, (caddr_t)slp, M_WAIT); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - nfs_sndunlock(&slp->ns_solock); - } + } else + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); error = nfsrv_dorec(slp, nfsd, &nd); - cur_usec = (u_quad_t)time.tv_sec * 1000000 + - (u_quad_t)time.tv_usec; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + nfs_slpunlock(slp); + microuptime(&now); + cur_usec = (u_quad_t)now.tv_sec * 1000000 + + (u_quad_t)now.tv_usec; if (error && slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time <= cur_usec) { error = 0; @@ -602,7 +789,9 @@ nfssvc_nfsd(nsd, argp, p) } if (error || (slp->ns_flag & SLP_VALID) == 0) { if (nd) { - _FREE_ZONE((caddr_t)nd, + if (nd->nd_nam2) + m_freem(nd->nd_nam2); + FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC); nd = NULL; } @@ -614,12 +803,8 @@ nfssvc_nfsd(nsd, argp, p) splx(s); so = slp->ns_so; sotype = so->so_type; - if (so->so_proto->pr_flags & PR_CONNREQUIRED) - solockp = &slp->ns_solock; - else - solockp = (int *)0; if (nd) { - nd->nd_starttime = time; + microuptime(&nd->nd_starttime); if (nd->nd_nam2) nd->nd_nam = nd->nd_nam2; else @@ -648,9 +833,10 @@ nfssvc_nfsd(nsd, argp, p) * Check for just starting up for NQNFS and send * fake "try again later" replies to the NQNFS clients. */ - if (notstarted && nqnfsstarttime <= time.tv_sec) { + microtime(&now); + if (notstarted && nqnfsstarttime <= now.tv_sec) { if (modify_flag) { - nqnfsstarttime = time.tv_sec + nqsrv_writeslack; + nqnfsstarttime = now.tv_sec + nqsrv_writeslack; modify_flag = 0; } else notstarted = 0; @@ -672,7 +858,7 @@ nfssvc_nfsd(nsd, argp, p) } else if (nfs_privport) { /* Check if source port is privileged */ u_short port; - struct sockaddr *nam = nd->nd_nam; + struct sockaddr *nam = mtod(nd->nd_nam, struct sockaddr*); struct sockaddr_in *sin; sin = (struct sockaddr_in *)nam; @@ -713,8 +899,10 @@ nfssvc_nfsd(nsd, argp, p) if (nd->nd_procnum != NQNFSPROC_VACATED) nfsstats.srv_errs++; nfsrv_updatecache(nd, FALSE, mreq); - if (nd->nd_nam2) + if (nd->nd_nam2) { m_freem(nd->nd_nam2); + nd->nd_nam2 = NULL; + } break; } nfsstats.srvrpccnt[nd->nd_procnum]++; @@ -742,26 +930,31 @@ nfssvc_nfsd(nsd, argp, p) M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); *mtod(m, u_long *) = htonl(0x80000000 | siz); } - if (solockp) - (void) nfs_sndlock(solockp, (struct nfsreq *)0); + if (so->so_proto->pr_flags & PR_CONNREQUIRED) + (void) nfs_slplock(slp, 1); if (slp->ns_flag & SLP_VALID) error = nfs_send(so, nd->nd_nam2, m, NULL); else { error = EPIPE; m_freem(m); } + mreq = NULL; if (nfsrtton) nfsd_rt(sotype, nd, cacherep); - if (nd->nd_nam2) + if (nd->nd_nam2) { MFREE(nd->nd_nam2, m); - if (nd->nd_mrep) + nd->nd_nam2 = NULL; + } + if (nd->nd_mrep) { m_freem(nd->nd_mrep); + nd->nd_mrep = NULL; + } if (error == EPIPE) nfsrv_zapsock(slp); - if (solockp) - nfs_sndunlock(solockp); + if (so->so_proto->pr_flags & PR_CONNREQUIRED) + nfs_slpunlock(slp); if (error == EINTR || error == ERESTART) { - _FREE_ZONE((caddr_t)nd, + FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC); nfsrv_slpderef(slp); s = splnet(); @@ -773,9 +966,14 @@ nfssvc_nfsd(nsd, argp, p) nfsd_rt(sotype, nd, cacherep); m_freem(nd->nd_mrep); m_freem(nd->nd_nam2); + nd->nd_mrep = nd->nd_nam2 = NULL; break; }; if (nd) { + if (nd->nd_mrep) + m_freem(nd->nd_mrep); + if (nd->nd_nam2) + m_freem(nd->nd_nam2); FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC); nd = NULL; } @@ -784,8 +982,9 @@ nfssvc_nfsd(nsd, argp, p) * Check to see if there are outstanding writes that * need to be serviced. */ - cur_usec = (u_quad_t)time.tv_sec * 1000000 + - (u_quad_t)time.tv_usec; + microuptime(&now); + cur_usec = (u_quad_t)now.tv_sec * 1000000 + + (u_quad_t)now.tv_usec; s = splsoftclock(); if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time <= cur_usec) { @@ -796,11 +995,14 @@ nfssvc_nfsd(nsd, argp, p) splx(s); } while (writes_todo); s = splnet(); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); if (nfsrv_dorec(slp, nfsd, &nd)) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); nfsd->nfsd_flag &= ~NFSD_REQINPROG; nfsd->nfsd_slp = NULL; nfsrv_slpderef(slp); - } + } else + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); } done: TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain); @@ -819,6 +1021,31 @@ int nfs_defect = 0; SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0, ""); #endif +#ifndef _SYS_SYSPROTO_H_ +struct nfsclnt_args { + int flag; + caddr_t argp; +}; +#endif +int +nfsclnt(struct proc *p, struct nfsclnt_args *uap) +{ + struct lockd_ans la; + int error; + + if (uap->flag == NFSCLNT_LOCKDWAIT) { + return (nfslockdwait(p)); + } + if (uap->flag == NFSCLNT_LOCKDANS) { + error = copyin(uap->argp, &la, sizeof(la)); + return (error != 0 ? error : nfslockdans(p, &la)); + } + if (uap->flag == NFSCLNT_LOCKDFD) + return (nfslockdfd(p, (int)uap->argp)); + return EINVAL; +} + + static int nfssvc_iod_continue(int); /* @@ -830,7 +1057,6 @@ static int nfssvc_iod(p) struct proc *p; { - register struct buf *bp; register int i, myiod; struct nfsmount *nmp; int error = 0; @@ -850,8 +1076,7 @@ nfssvc_iod(p) return (EBUSY); nfs_numasync++; - /* stuff myiod into uthread to get off local stack for - continuation */ + /* stuff myiod into uthread to get off local stack for continuation */ ut = (struct uthread *)get_bsdthread_info(current_act()); ut->uu_state.uu_nfs_myiod = myiod; /* squirrel away for continuation */ @@ -867,7 +1092,7 @@ nfssvc_iod(p) static int nfssvc_iod_continue(error) { - register struct buf *bp; + register struct nfsbuf *bp; register int i, myiod; struct nfsmount *nmp; struct uthread *ut; @@ -882,12 +1107,12 @@ nfssvc_iod_continue(error) /* * Just loop around doin our stuff until SIGKILL - * - actually we don't loop with continuations... + * - actually we don't loop with continuations... */ for (;;) { while (((nmp = nfs_iodmount[myiod]) == NULL || nmp->nm_bufq.tqh_first == NULL) - && error == 0) { + && error == 0 && nfs_ioddelwri == 0) { if (nmp) nmp->nm_bufqiods--; nfs_iodwant[myiod] = p; @@ -906,30 +1131,51 @@ nfssvc_iod_continue(error) error = 0; unix_syscall_return(error); } - while ((bp = nmp->nm_bufq.tqh_first) != NULL) { - /* Take one off the front of the list */ - TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist); - nmp->nm_bufqlen--; - if (nmp->nm_bufqwant && nmp->nm_bufqlen < 2 * nfs_numasync) { - nmp->nm_bufqwant = FALSE; - wakeup(&nmp->nm_bufq); - } - if (ISSET(bp->b_flags, B_READ)) - (void) nfs_doio(bp, bp->b_rcred, (struct proc *)0); - else - (void) nfs_doio(bp, bp->b_wcred, (struct proc *)0); + if (nmp != NULL) { + while ((bp = nmp->nm_bufq.tqh_first) != NULL) { + /* Take one off the front of the list */ + TAILQ_REMOVE(&nmp->nm_bufq, bp, nb_free); + bp->nb_free.tqe_next = NFSNOLIST; + nmp->nm_bufqlen--; + if (nmp->nm_bufqwant && nmp->nm_bufqlen < 2 * nfs_numasync) { + nmp->nm_bufqwant = FALSE; + wakeup(&nmp->nm_bufq); + } + if (ISSET(bp->nb_flags, NB_READ)) + (void) nfs_doio(bp, bp->nb_rcred, (struct proc *)0); + else + (void) nfs_doio(bp, bp->nb_wcred, (struct proc *)0); - /* - * If there are more than one iod on this mount, then defect - * so that the iods can be shared out fairly between the mounts - */ - if (nfs_defect && nmp->nm_bufqiods > 1) { - NFS_DPF(ASYNCIO, - ("nfssvc_iod: iod %d defecting from mount %p\n", - myiod, nmp)); - nfs_iodmount[myiod] = NULL; - nmp->nm_bufqiods--; - break; + /* + * If there are more than one iod on this mount, then defect + * so that the iods can be shared out fairly between the mounts + */ + if (nfs_defect && nmp->nm_bufqiods > 1) { + NFS_DPF(ASYNCIO, + ("nfssvc_iod: iod %d defecting from mount %p\n", + myiod, nmp)); + nfs_iodmount[myiod] = NULL; + nmp->nm_bufqiods--; + break; + } + } + } + if (nfs_ioddelwri) { + i = 0; + nfs_ioddelwri = 0; + while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) { + struct nfsnode *np = VTONFS(bp->nb_vp); + nfs_buf_remfree(bp); + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { + /* put buffer at end of delwri list */ + TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); + nfsbufdelwricnt++; + nfs_flushcommits(np->n_vnode, (struct proc *)0); + } else { + SET(bp->nb_flags, (NB_BUSY | NB_ASYNC | NB_IOD)); + nfs_buf_write(bp); + } + i++; } } } @@ -954,6 +1200,7 @@ nfsrv_zapsock(slp) int s; slp->ns_flag &= ~SLP_ALLFLAGS; + slp->ns_nflag &= ~SLP_ALLFLAGS; fp = slp->ns_fp; if (fp) { slp->ns_fp = (struct file *)0; @@ -962,12 +1209,13 @@ nfsrv_zapsock(slp) so->so_upcall = NULL; so->so_rcv.sb_flags &= ~SB_UPCALL; soshutdown(so, 2); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - closef(fp, (struct proc *)0); if (slp->ns_nam) MFREE(slp->ns_nam, m); m_freem(slp->ns_raw); m_freem(slp->ns_rec); + slp->ns_nam = slp->ns_raw = slp->ns_rec = NULL; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + closef(fp, (struct proc *)0); for (nuidp = slp->ns_uidlruhead.tqh_first; nuidp != 0; nuidp = nnuidp) { nnuidp = nuidp->nu_lru.tqe_next; @@ -975,14 +1223,14 @@ nfsrv_zapsock(slp) TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru); if (nuidp->nu_flag & NU_NAM) m_freem(nuidp->nu_nam); - _FREE_ZONE((caddr_t)nuidp, + FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID); } s = splsoftclock(); for (nwp = slp->ns_tq.lh_first; nwp; nwp = nnwp) { nnwp = nwp->nd_tq.le_next; LIST_REMOVE(nwp, nd_tq); - _FREE_ZONE((caddr_t)nwp, sizeof *nwp, M_NFSRVDESC); + FREE_ZONE((caddr_t)nwp, sizeof *nwp, M_NFSRVDESC); } LIST_INIT(&slp->ns_tq); splx(s); @@ -1006,17 +1254,17 @@ nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key) { int error = 0; - while ((nmp->nm_flag & NFSMNT_WAITAUTH) == 0) { - nmp->nm_flag |= NFSMNT_WANTAUTH; + while ((nmp->nm_state & NFSSTA_WAITAUTH) == 0) { + nmp->nm_state |= NFSSTA_WANTAUTH; (void) tsleep((caddr_t)&nmp->nm_authtype, PSOCK, "nfsauth1", 2 * hz); error = nfs_sigintr(nmp, rep, rep->r_procp); if (error) { - nmp->nm_flag &= ~NFSMNT_WANTAUTH; + nmp->nm_state &= ~NFSSTA_WANTAUTH; return (error); } } - nmp->nm_flag &= ~(NFSMNT_WAITAUTH | NFSMNT_WANTAUTH); + nmp->nm_state &= ~(NFSSTA_WAITAUTH | NFSSTA_WANTAUTH); MALLOC(*auth_str, char *, RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK); nmp->nm_authstr = *auth_str; nmp->nm_authlen = RPCAUTH_MAXSIZ; @@ -1028,13 +1276,13 @@ nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key) /* * And wait for mount_nfs to do its stuff. */ - while ((nmp->nm_flag & NFSMNT_HASAUTH) == 0 && error == 0) { + while ((nmp->nm_state & NFSSTA_HASAUTH) == 0 && error == 0) { (void) tsleep((caddr_t)&nmp->nm_authlen, PSOCK, "nfsauth2", 2 * hz); error = nfs_sigintr(nmp, rep, rep->r_procp); } - if (nmp->nm_flag & NFSMNT_AUTHERR) { - nmp->nm_flag &= ~NFSMNT_AUTHERR; + if (nmp->nm_state & NFSSTA_AUTHERR) { + nmp->nm_state &= ~NFSSTA_AUTHERR; error = EAUTH; } if (error) @@ -1044,10 +1292,10 @@ nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key) *verf_len = nmp->nm_verflen; bcopy((caddr_t)nmp->nm_key, (caddr_t)key, sizeof (key)); } - nmp->nm_flag &= ~NFSMNT_HASAUTH; - nmp->nm_flag |= NFSMNT_WAITAUTH; - if (nmp->nm_flag & NFSMNT_WANTAUTH) { - nmp->nm_flag &= ~NFSMNT_WANTAUTH; + nmp->nm_state &= ~NFSSTA_HASAUTH; + nmp->nm_state |= NFSSTA_WAITAUTH; + if (nmp->nm_state & NFSSTA_WANTAUTH) { + nmp->nm_state &= ~NFSSTA_WANTAUTH; wakeup((caddr_t)&nmp->nm_authtype); } return (error); @@ -1067,7 +1315,7 @@ nfs_getnickauth(nmp, cred, auth_str, auth_len, verf_str, verf_len) { register struct nfsuid *nuidp; register u_long *nickp, *verfp; - struct timeval ktvin, ktvout; + struct timeval ktvin, ktvout, now; #if DIAGNOSTIC if (verf_len < (4 * NFSX_UNSIGNED)) @@ -1078,7 +1326,8 @@ nfs_getnickauth(nmp, cred, auth_str, auth_len, verf_str, verf_len) if (nuidp->nu_cr.cr_uid == cred->cr_uid) break; } - if (!nuidp || nuidp->nu_expire < time.tv_sec) + microtime(&now); + if (!nuidp || nuidp->nu_expire < now.tv_sec) return (EACCES); /* @@ -1098,10 +1347,11 @@ nfs_getnickauth(nmp, cred, auth_str, auth_len, verf_str, verf_len) */ verfp = (u_long *)verf_str; *verfp++ = txdr_unsigned(RPCAKN_NICKNAME); - if (time.tv_sec > nuidp->nu_timestamp.tv_sec || - (time.tv_sec == nuidp->nu_timestamp.tv_sec && - time.tv_usec > nuidp->nu_timestamp.tv_usec)) - nuidp->nu_timestamp = time; + microtime(&now); + if (now.tv_sec > nuidp->nu_timestamp.tv_sec || + (now.tv_sec == nuidp->nu_timestamp.tv_sec && + now.tv_usec > nuidp->nu_timestamp.tv_usec)) + nuidp->nu_timestamp = now; else nuidp->nu_timestamp.tv_usec++; ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec); @@ -1138,7 +1388,7 @@ nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep) register u_long *tl; register long t1; struct mbuf *md = *mdp; - struct timeval ktvin, ktvout; + struct timeval ktvin, ktvout, now; u_long nick; char *dpos = *dposp, *cp2; int deltasec, error = 0; @@ -1157,7 +1407,8 @@ nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep) #endif ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec); ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec); - deltasec = time.tv_sec - ktvout.tv_sec; + microtime(&now); + deltasec = now.tv_sec - ktvout.tv_sec; if (deltasec < 0) deltasec = -deltasec; /* @@ -1177,7 +1428,7 @@ nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep) } nuidp->nu_flag = 0; nuidp->nu_cr.cr_uid = cred->cr_uid; - nuidp->nu_expire = time.tv_sec + NFS_KERBTTL; + nuidp->nu_expire = now.tv_sec + NFS_KERBTTL; nuidp->nu_timestamp = ktvout; nuidp->nu_nickname = nick; bcopy(key, nuidp->nu_key, sizeof (key)); @@ -1210,6 +1461,44 @@ nfsrv_slpderef(slp) } } +/* + * Lock a socket against others. + */ +int +nfs_slplock(slp, wait) + register struct nfssvc_sock *slp; + int wait; +{ + int *statep = &slp->ns_solock; + + if (!wait && (*statep & NFSSTA_SNDLOCK)) + return(0); /* already locked, fail */ + while (*statep & NFSSTA_SNDLOCK) { + *statep |= NFSSTA_WANTSND; + (void) tsleep((caddr_t)statep, PZERO - 1, "nfsslplck", 0); + } + *statep |= NFSSTA_SNDLOCK; + return (1); +} + +/* + * Unlock the stream socket for others. + */ +void +nfs_slpunlock(slp) + struct nfssvc_sock *slp; +{ + int *statep = &slp->ns_solock; + + if ((*statep & NFSSTA_SNDLOCK) == 0) + panic("nfs slpunlock"); + *statep &= ~NFSSTA_SNDLOCK; + if (*statep & NFSSTA_WANTSND) { + *statep &= ~NFSSTA_WANTSND; + wakeup((caddr_t)statep); + } +} + /* * Initialize the data structures for the server. * Handshake with any new nfsds starting up to avoid any chance of @@ -1274,6 +1563,7 @@ nfsd_rt(sotype, nd, cacherep) int cacherep; { register struct drt *rt; + struct timeval now; rt = &nfsdrt.drt[nfsdrt.pos]; if (cacherep == RC_DOIT) @@ -1293,9 +1583,10 @@ nfsd_rt(sotype, nd, cacherep) rt->ipadr = mtod(nd->nd_nam, struct sockaddr_in *)->sin_addr.s_addr; else rt->ipadr = INADDR_ANY; - rt->resptime = ((time.tv_sec - nd->nd_starttime.tv_sec) * 1000000) + - (time.tv_usec - nd->nd_starttime.tv_usec); - rt->tstamp = time; + microuptime(&now); + rt->resptime = ((now.tv_sec - nd->nd_starttime.tv_sec) * 1000000) + + (now.tv_usec - nd->nd_starttime.tv_usec); + microtime(&rt->tstamp); // XXX unused nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ; } #endif /* NFS_NOSERVER */ diff --git a/bsd/nfs/nfs_vfsops.c b/bsd/nfs/nfs_vfsops.c index 882c56996..fcd38055c 100644 --- a/bsd/nfs/nfs_vfsops.c +++ b/bsd/nfs/nfs_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -74,7 +74,6 @@ #include #include #include -#include #include #include #include @@ -121,6 +120,18 @@ SYSCTL_INT(_vfs_nfs, OID_AUTO, debug, CTLFLAG_RW, &nfs_debug, 0, ""); #endif #endif +SYSCTL_DECL(_vfs_generic_nfs); +SYSCTL_NODE(_vfs_generic_nfs, OID_AUTO, client, CTLFLAG_RW, 0, + "nfs client hinge"); +/* how long NFS will wait before signalling vfs that it's down. */ +static int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY; +SYSCTL_INT(_vfs_generic_nfs_client, NFS_TPRINTF_INITIAL_DELAY, + initialdowndelay, CTLFLAG_RW, &nfs_tprintf_initial_delay, 0, ""); +/* how long between console messages "nfs server foo not responding" */ +static int nfs_tprintf_delay = NFS_TPRINTF_DELAY; +SYSCTL_INT(_vfs_generic_nfs_client, NFS_TPRINTF_DELAY, + nextdowndelay, CTLFLAG_RW, &nfs_tprintf_delay, 0, ""); + static int nfs_iosize __P((struct nfsmount *nmp)); static int mountnfs __P((struct nfs_args *,struct mount *, struct mbuf *,char *,char *,struct vnode **)); @@ -141,7 +152,7 @@ static int nfs_vptofh __P(( struct vnode *vp, struct fid *fhp)); static int nfs_fhtovp __P((struct mount *mp, struct fid *fhp, struct mbuf *nam, struct vnode **vpp, int *exflagsp, struct ucred **credanonp)); -static int nfs_vget __P((struct mount *, ino_t, struct vnode **)); +static int nfs_vget __P((struct mount *, void *, struct vnode **)); /* @@ -192,12 +203,15 @@ static int nfs_iosize(nmp) * Calculate the size used for io buffers. Use the larger * of the two sizes to minimise nfs requests but make sure * that it is at least one VM page to avoid wasting buffer - * space. + * space and to allow easy mmapping of I/O buffers. + * The read/write rpc calls handle the splitting up of + * buffers into multiple requests if the buffer size is + * larger than the I/O size. */ iosize = max(nmp->nm_rsize, nmp->nm_wsize); if (iosize < PAGE_SIZE) iosize = PAGE_SIZE; - return (trunc_page(iosize)); + return (trunc_page_32(iosize)); } static void nfs_convert_oargs(args,oargs) @@ -255,7 +269,7 @@ nfs_statfs(mp, sbp, p) return(error); cred = crget(); cred->cr_ngroups = 1; - if (v3 && (nmp->nm_flag & NFSMNT_GOTFSINFO) == 0) + if (v3 && (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) (void)nfs_fsinfo(nmp, vp, cred, p); nfsstats.rpccnt[NFSPROC_FSSTAT]++; nfsm_reqhead(vp, NFSPROC_FSSTAT, NFSX_FH(v3)); @@ -355,7 +369,7 @@ nfs_fsinfo(nmp, vp, cred, p) if (max < nmp->nm_readdirsize) { nmp->nm_readdirsize = max; } - nmp->nm_flag |= NFSMNT_GOTFSINFO; + nmp->nm_state |= NFSSTA_GOTFSINFO; } nfsm_reqdone; return (error); @@ -513,8 +527,12 @@ nfs_mount_diskless(ndmntp, mntname, mntflag, vpp, mpp) if ((error = mountnfs(&args, mp, m, mntname, args.hostname, vpp))) { printf("nfs_mountroot: mount %s failed: %d", mntname, error); mp->mnt_vfc->vfc_refcount--; + + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + FREE(mp->mnt_xinfo_ptr, M_TEMP); vfs_unbusy(mp, procp); - _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + + FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); return (error); } #if 0 /* Causes incorrect reporting of "mounted on" */ @@ -607,9 +625,9 @@ nfs_mount_diskless_private(ndmntp, mntname, mntflag, vpp, mpp) mp = _MALLOC_ZONE((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); - /* Initialize the default IO constraints */ - mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; - mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; + /* Initialize the default IO constraints */ + mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; + mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); (void)vfs_busy(mp, LK_NOWAIT, 0, procp); @@ -645,8 +663,12 @@ nfs_mount_diskless_private(ndmntp, mntname, mntflag, vpp, mpp) if ((error = mountnfs(&args, mp, m, mntname, args.hostname, &vp))) { printf("nfs_mountroot: mount %s failed: %d", mntname, error); mp->mnt_vfc->vfc_refcount--; + + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + FREE(mp->mnt_xinfo_ptr, M_TEMP); vfs_unbusy(mp, procp); - _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + + FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); return (error); } @@ -679,7 +701,7 @@ nfs_mount(mp, path, data, ndp, p) struct mbuf *nam; struct vnode *vp; char pth[MNAMELEN], hst[MNAMELEN]; - u_int len; + size_t len; u_char nfh[NFSX_V3FHMAX]; error = copyin(data, (caddr_t)&args, sizeof (struct nfs_args)); @@ -750,6 +772,13 @@ mountnfs(argp, mp, nam, pth, hst, vpp) error = NFSERR_NOTSUPP; goto bad2; } + + /* + * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes + * no sense in that context. + */ + if (argp->sotype == SOCK_STREAM) + argp->flags &= ~NFSMNT_NOCONN; if (mp->mnt_flag & MNT_UPDATE) { nmp = VFSTONFS(mp); @@ -777,26 +806,30 @@ mountnfs(argp, mp, nam, pth, hst, vpp) mp->mnt_maxsymlinklen = 1; nmp->nm_timeo = NFS_TIMEO; nmp->nm_retry = NFS_RETRANS; - nmp->nm_wsize = NFS_WSIZE; - nmp->nm_rsize = NFS_RSIZE; + if (argp->sotype == SOCK_DGRAM) { + nmp->nm_wsize = NFS_DGRAM_WSIZE; + nmp->nm_rsize = NFS_DGRAM_RSIZE; + } else { + nmp->nm_wsize = NFS_WSIZE; + nmp->nm_rsize = NFS_RSIZE; + } nmp->nm_readdirsize = NFS_READDIRSIZE; nmp->nm_numgrps = NFS_MAXGRPS; nmp->nm_readahead = NFS_DEFRAHEAD; nmp->nm_leaseterm = NQ_DEFLEASE; nmp->nm_deadthresh = NQ_DEADTHRESH; + nmp->nm_tprintf_delay = nfs_tprintf_delay; + if (nmp->nm_tprintf_delay < 0) + nmp->nm_tprintf_delay = 0; + nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay; + if (nmp->nm_tprintf_initial_delay < 0) + nmp->nm_tprintf_initial_delay = 0; CIRCLEQ_INIT(&nmp->nm_timerhead); nmp->nm_inprog = NULLVP; bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN); bcopy(pth, mp->mnt_stat.f_mntonname, MNAMELEN); nmp->nm_nam = nam; - /* - * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes - * no sense in that context. - */ - if (argp->sotype == SOCK_STREAM) - argp->flags &= ~NFSMNT_NOCONN; - if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) { nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10; if (nmp->nm_timeo < NFS_MINTIMEO) @@ -875,13 +908,6 @@ mountnfs(argp, mp, nam, pth, hst, vpp) (error = nfs_connect(nmp, (struct nfsreq *)0))) goto bad; - /* - * This is silly, but it has to be set so that vinifod() works. - * We do not want to do an nfs_statfs() here since we can get - * stuck on a dead server and we are holding a lock on the mount - * point. - */ - mp->mnt_stat.f_iosize = nfs_iosize(nmp); /* * A reference count is needed on the nfsnode representing the * remote root. If this object is not persistent, then backward @@ -906,7 +932,24 @@ mountnfs(argp, mp, nam, pth, hst, vpp) * effect of filling in (*vpp)->v_type with the correct value. */ curproc = current_proc(); - VOP_GETATTR(*vpp, &attrs, curproc->p_ucred, curproc); + error = VOP_GETATTR(*vpp, &attrs, curproc->p_ucred, curproc); + if (error) { + /* + * we got problems... we couldn't get the attributes + * from the NFS server... so the mount fails. + */ + vput(*vpp); + goto bad; + } + + /* + * Set the mount point's block I/O size. + * We really need to do this after we get info back from + * the server about what its preferred I/O sizes are. + */ + if (nmp->nm_flag & NFSMNT_NFSV3) + nfs_fsinfo(nmp, *vpp, curproc->p_ucred, curproc); + mp->mnt_stat.f_iosize = nfs_iosize(nmp); /* * Lose the lock but keep the ref. @@ -916,7 +959,7 @@ mountnfs(argp, mp, nam, pth, hst, vpp) return (0); bad: nfs_disconnect(nmp); - _FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); + FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); bad2: m_freem(nam); return (error); @@ -936,9 +979,17 @@ nfs_unmount(mp, mntflags, p) struct vnode *vp; int error, flags = 0; - if (mntflags & MNT_FORCE) - flags |= FORCECLOSE; nmp = VFSTONFS(mp); + /* + * During a force unmount we want to... + * Mark that we are doing a force unmount. + * Make the mountpoint soft. + */ + if (mntflags & MNT_FORCE) { + flags |= FORCECLOSE; + nmp->nm_state |= NFSSTA_FORCE; + nmp->nm_flag |= NFSMNT_SOFT; + } /* * Goes something like this.. * - Call vflush() to clear out vnodes for this file system, @@ -953,7 +1004,7 @@ nfs_unmount(mp, mntflags, p) /* * Must handshake with nqnfs_clientd() if it is active. */ - nmp->nm_flag |= NFSMNT_DISMINPROG; + nmp->nm_state |= NFSSTA_DISMINPROG; while (nmp->nm_inprog != NULLVP) (void) tsleep((caddr_t)&lbolt, PSOCK, "nfsdism", 0); /* @@ -962,18 +1013,18 @@ nfs_unmount(mp, mntflags, p) * not get EBUSY back. */ error = vflush(mp, vp, SKIPSWAP | flags); - if (mntflags & MNT_FORCE) + if (mntflags & MNT_FORCE) { error = vflush(mp, NULLVP, flags); /* locks vp in the process */ - else { + } else { if (vp->v_usecount > 1) { - nmp->nm_flag &= ~NFSMNT_DISMINPROG; + nmp->nm_state &= ~NFSSTA_DISMINPROG; return (EBUSY); } error = vflush(mp, vp, flags); } if (error) { - nmp->nm_flag &= ~NFSMNT_DISMINPROG; + nmp->nm_state &= ~NFSSTA_DISMINPROG; return (error); } @@ -982,7 +1033,7 @@ nfs_unmount(mp, mntflags, p) * For NQNFS, let the server daemon free the nfsmount structure. */ if (nmp->nm_flag & (NFSMNT_NQNFS | NFSMNT_KERB)) - nmp->nm_flag |= NFSMNT_DISMNT; + nmp->nm_state |= NFSSTA_DISMNT; /* * Release the root vnode reference held by mountnfs() @@ -1018,7 +1069,7 @@ nfs_unmount(mp, mntflags, p) if (hw_atomic_sub(&nfsreqqusers, 1) != 0) nfsatompanic("unmount sub"); #endif - _FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); + FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); } return (0); } @@ -1033,13 +1084,18 @@ nfs_root(mp, vpp) { register struct vnode *vp; struct nfsmount *nmp; - int error; + int error, vpid; nmp = VFSTONFS(mp); vp = nmp->nm_dvp; - error = vget(vp, LK_EXCLUSIVE, current_proc()); - if (error) - return (error); + vpid = vp->v_id; + while (error = vget(vp, LK_EXCLUSIVE, current_proc())) { + /* vget may return ENOENT if the dir changes while in vget */ + /* If that happens, try vget again, else return the error */ + if ((error != ENOENT) || (vp->v_id == vpid)) + return (error); + vpid = vp->v_id; + } if (vp->v_type == VNON) vp->v_type = VDIR; vp->v_flag |= VROOT; @@ -1067,17 +1123,15 @@ nfs_sync(mp, waitfor, cred, p) * Force stale buffer cache information to be flushed. */ loop: - for (vp = mp->mnt_vnodelist.lh_first; - vp != NULL; - vp = vp->v_mntvnodes.le_next) { - int didhold = 0; + LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { + int didhold; /* * If the vnode that we are about to sync is no longer * associated with this mount point, start over. */ if (vp->v_mount != mp) goto loop; - if (VOP_ISLOCKED(vp) || vp->v_dirtyblkhd.lh_first == NULL) + if (VOP_ISLOCKED(vp) || LIST_FIRST(&VTONFS(vp)->n_dirtyblkhd) == NULL) continue; if (vget(vp, LK_EXCLUSIVE, p)) goto loop; @@ -1101,7 +1155,7 @@ loop: static int nfs_vget(mp, ino, vpp) struct mount *mp; - ino_t ino; + void *ino; /* XXX void* or ino_t? */ struct vnode **vpp; { @@ -1175,7 +1229,12 @@ static int nfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, struct proc *p) { - int rv; + int error; + struct sysctl_req *req; + struct vfsidctl vc; + struct mount *mp; + struct nfsmount *nmp; + struct vfsquery vq; /* * All names at this level are terminal. @@ -1183,6 +1242,24 @@ nfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, if(namelen > 1) return ENOTDIR; /* overloaded */ + /* common code for "new style" VFS_CTL sysctl, get the mount. */ + switch (name[0]) { + case VFS_CTL_TIMEO: + case VFS_CTL_QUERY: + req = oldp; + error = SYSCTL_IN(req, &vc, sizeof(vc)); + if (error) + return (error); + mp = vfs_getvfs(&vc.vc_fsid); + if (mp == NULL) + return (ENOENT); + nmp = VFSTONFS(mp); + if (nmp == NULL) + return (ENOENT); + bzero(&vq, sizeof(vq)); + VCTLTOREQ(&vc, req); + } + switch(name[0]) { case NFS_NFSSTATS: if(!oldp) { @@ -1195,8 +1272,9 @@ nfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, return ENOMEM; } - rv = copyout(&nfsstats, oldp, sizeof nfsstats); - if(rv) return rv; + error = copyout(&nfsstats, oldp, sizeof nfsstats); + if (error) + return (error); if(newp && newlen != sizeof nfsstats) return EINVAL; @@ -1205,9 +1283,30 @@ nfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, return copyin(newp, &nfsstats, sizeof nfsstats); } return 0; - + case VFS_CTL_QUERY: + if ((nmp->nm_state & NFSSTA_TIMEO)) + vq.vq_flags |= VQ_NOTRESP; + error = SYSCTL_OUT(req, &vq, sizeof(vq)); + break; + case VFS_CTL_TIMEO: + if (req->oldptr != NULL) { + error = SYSCTL_OUT(req, &nmp->nm_tprintf_initial_delay, + sizeof(nmp->nm_tprintf_initial_delay)); + if (error) + return (error); + } + if (req->newptr != NULL) { + error = SYSCTL_IN(req, &nmp->nm_tprintf_initial_delay, + sizeof(nmp->nm_tprintf_initial_delay)); + if (error) + return (error); + if (nmp->nm_tprintf_initial_delay < 0) + nmp->nm_tprintf_initial_delay = 0; + } + break; default: - return EOPNOTSUPP; + return (ENOTSUP); } + return (error); } diff --git a/bsd/nfs/nfs_vnops.c b/bsd/nfs/nfs_vnops.c index dcc4d31dc..882ed59fe 100644 --- a/bsd/nfs/nfs_vnops.c +++ b/bsd/nfs/nfs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -72,7 +72,6 @@ #include #include #include -#include #include #include #include @@ -83,7 +82,6 @@ #include #include -#include #include #include @@ -101,6 +99,7 @@ #include #include #include +#include #include #include #include @@ -128,6 +127,15 @@ #define TRUE 1 #define FALSE 0 +#define NFS_FREE_PNBUF(CNP) \ + do { \ + char *tmp = (CNP)->cn_pnbuf; \ + (CNP)->cn_pnbuf = NULL; \ + (CNP)->cn_flags &= ~HASBUF; \ + FREE_ZONE(tmp, (CNP)->cn_pnlen, M_NAMEI); \ + } while (0) + + static int nfsspec_read __P((struct vop_read_args *)); static int nfsspec_write __P((struct vop_write_args *)); static int nfsfifo_read __P((struct vop_read_args *)); @@ -158,7 +166,6 @@ static int nfs_rmdir __P((struct vop_rmdir_args *)); static int nfs_symlink __P((struct vop_symlink_args *)); static int nfs_readdir __P((struct vop_readdir_args *)); static int nfs_bmap __P((struct vop_bmap_args *)); -static int nfs_strategy __P((struct vop_strategy_args *)); static int nfs_lookitup __P((struct vnode *,char *,int,struct ucred *,struct proc *,struct nfsnode **)); static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *)); static int nfsspec_access __P((struct vop_access_args *)); @@ -167,7 +174,6 @@ static int nfs_print __P((struct vop_print_args *)); static int nfs_pathconf __P((struct vop_pathconf_args *)); static int nfs_advlock __P((struct vop_advlock_args *)); static int nfs_blkatoff __P((struct vop_blkatoff_args *)); -static int nfs_bwrite __P((struct vop_bwrite_args *)); static int nfs_valloc __P((struct vop_valloc_args *)); static int nfs_vfree __P((struct vop_vfree_args *)); static int nfs_truncate __P((struct vop_truncate_args *)); @@ -209,13 +215,13 @@ static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { { &vop_symlink_desc, (vop_t *)nfs_symlink }, /* symlink */ { &vop_readdir_desc, (vop_t *)nfs_readdir }, /* readdir */ { &vop_readlink_desc, (vop_t *)nfs_readlink }, /* readlink */ - { &vop_abortop_desc, (vop_t *)nfs_abortop }, /* abortop */ + { &vop_abortop_desc, (vop_t *)nop_abortop }, /* abortop */ { &vop_inactive_desc, (vop_t *)nfs_inactive }, /* inactive */ { &vop_reclaim_desc, (vop_t *)nfs_reclaim }, /* reclaim */ { &vop_lock_desc, (vop_t *)nfs_lock }, /* lock */ { &vop_unlock_desc, (vop_t *)nfs_unlock }, /* unlock */ { &vop_bmap_desc, (vop_t *)nfs_bmap }, /* bmap */ - { &vop_strategy_desc, (vop_t *)nfs_strategy }, /* strategy */ + { &vop_strategy_desc, (vop_t *)err_strategy }, /* strategy */ { &vop_print_desc, (vop_t *)nfs_print }, /* print */ { &vop_islocked_desc, (vop_t *)nfs_islocked }, /* islocked */ { &vop_pathconf_desc, (vop_t *)nfs_pathconf }, /* pathconf */ @@ -226,7 +232,7 @@ static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { { &vop_vfree_desc, (vop_t *)nfs_vfree }, /* vfree */ { &vop_truncate_desc, (vop_t *)nfs_truncate }, /* truncate */ { &vop_update_desc, (vop_t *)nfs_update }, /* update */ - { &vop_bwrite_desc, (vop_t *)nfs_bwrite }, /* bwrite */ + { &vop_bwrite_desc, (vop_t *)err_bwrite }, /* bwrite */ { &vop_pagein_desc, (vop_t *)nfs_pagein }, /* Pagein */ { &vop_pageout_desc, (vop_t *)nfs_pageout }, /* Pageout */ { &vop_copyfile_desc, (vop_t *)err_copyfile }, /* Copyfile */ @@ -363,8 +369,6 @@ struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = VNODEOP_SET(fifo_nfsv2nodeop_opv_desc); #endif -static int nfs_commit __P((struct vnode *vp, u_quad_t offset, int cnt, - struct ucred *cred, struct proc *procp)); static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap)); @@ -387,6 +391,7 @@ extern nfstype nfsv3_type[9]; struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON]; int nfs_numasync = 0; +int nfs_ioddelwri = 0; #define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO; @@ -528,30 +533,32 @@ nfs3_access_otw(struct vnode *vp, struct ucred *cred) { const int v3 = 1; - u_int32_t *tl; + u_long *tl; int error = 0, attrflag; struct mbuf *mreq, *mrep, *md, *mb, *mb2; caddr_t bpos, dpos, cp2; - register int32_t t1, t2; + register long t1, t2; register caddr_t cp; u_int32_t rmode; struct nfsnode *np = VTONFS(vp); u_int64_t xid; + struct timeval now; nfsstats.rpccnt[NFSPROC_ACCESS]++; nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); nfsm_fhtom(vp, v3); - nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); + nfsm_build(tl, u_long *, NFSX_UNSIGNED); *tl = txdr_unsigned(wmode); nfsm_request(vp, NFSPROC_ACCESS, p, cred, &xid); nfsm_postop_attr(vp, attrflag, &xid); if (!error) { - nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); rmode = fxdr_unsigned(u_int32_t, *tl); np->n_mode = rmode; np->n_modeuid = cred->cr_uid; - np->n_modestamp = time_second; + microuptime(&now); + np->n_modestamp = now.tv_sec; } nfsm_reqdone; return error; @@ -577,6 +584,7 @@ nfs_access(ap) u_long mode, wmode; int v3 = NFS_ISV3(vp); struct nfsnode *np = VTONFS(vp); + struct timeval now; /* * For nfs v3, do an access rpc, otherwise you are stuck emulating @@ -615,7 +623,8 @@ nfs_access(ap) * Does our cached result allow us to give a definite yes to * this request? */ - if (time_second < np->n_modestamp + nfsaccess_cache_timeout && + microuptime(&now); + if (now.tv_sec < np->n_modestamp + nfsaccess_cache_timeout && ap->a_cred->cr_uid == np->n_modeuid && (np->n_mode & mode) == mode) { /* nfsstats.accesscache_hits++; */ @@ -708,14 +717,22 @@ nfs_open(ap) error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); if (error) return (error); + /* if directory changed, purge any name cache entries */ + if ((vp->v_type == VDIR) && + (np->n_mtime != vattr.va_mtime.tv_sec)) + cache_purge(vp); np->n_mtime = vattr.va_mtime.tv_sec; } else { error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); if (error) return (error); if (np->n_mtime != vattr.va_mtime.tv_sec) { - if (vp->v_type == VDIR) + if (vp->v_type == VDIR) { np->n_direofoffset = 0; + nfs_invaldir(vp); + /* purge name cache entries */ + cache_purge(vp); + } if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1)) == EINTR) return (error); @@ -771,6 +788,7 @@ nfs_close(ap) { register struct vnode *vp = ap->a_vp; register struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp; int error = 0; if (vp->v_type == VREG) { @@ -781,8 +799,21 @@ nfs_close(ap) &sp->s_name[0], (unsigned)(sp->s_dvp), (unsigned)vp, (unsigned)ap, (unsigned)np, (unsigned)sp); #endif - if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 && + nmp = VFSTONFS(vp->v_mount); + if (!nmp) + return (ENXIO); + if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && (np->n_flag & NMODIFIED)) { + int getlock = !VOP_ISLOCKED(vp); + if (getlock) { + error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); + if (!error && !VFSTONFS(vp->v_mount)) { + VOP_UNLOCK(vp, 0, ap->a_p); + error = ENXIO; + } + if (error) + return (error); + } if (NFS_ISV3(vp)) { error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 1); /* @@ -791,9 +822,12 @@ nfs_close(ap) * NMODIFIED is a hint */ /* np->n_flag &= ~NMODIFIED; */ - } else + } else { error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1); + } np->n_attrstamp = 0; + if (getlock) + VOP_UNLOCK(vp, 0, ap->a_p); } if (np->n_flag & NWRITEERR) { np->n_flag &= ~NWRITEERR; @@ -823,7 +857,7 @@ nfs_getattr(ap) caddr_t bpos, dpos; int error = 0; struct mbuf *mreq, *mrep, *md, *mb, *mb2; - int v3 = NFS_ISV3(vp); + int v3; u_int64_t xid; int avoidfloods; @@ -845,6 +879,12 @@ nfs_getattr(ap) np->n_flag); return (error); } + + if (!VFSTONFS(vp->v_mount)) { + FSDBG_BOT(513, np->n_size, ENXIO, np->n_vattr.va_size, np->n_flag); + return (ENXIO); + } + v3 = NFS_ISV3(vp); error = 0; if (v3 && nfsaccess_cache_timeout > 0) { @@ -878,13 +918,17 @@ tryagain: } if (np->n_mtime != ap->a_vap->va_mtime.tv_sec) { FSDBG(513, -1, np, -1, vp); - if (vp->v_type == VDIR) + if (vp->v_type == VDIR) { nfs_invaldir(vp); + /* purge name cache entries */ + cache_purge(vp); + } error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1); FSDBG(513, -1, np, -2, error); - if (!error) + if (!error) { np->n_mtime = ap->a_vap->va_mtime.tv_sec; + } } } nfsm_reqdone; @@ -973,32 +1017,70 @@ nfs_setattr(ap) } else if (np->n_size > vap->va_size) { /* shrinking? */ daddr_t obn, bn; int biosize; - struct buf *bp; + struct nfsbuf *bp; - biosize = min(vp->v_mount->mnt_stat.f_iosize, - PAGE_SIZE); + biosize = vp->v_mount->mnt_stat.f_iosize; obn = (np->n_size - 1) / biosize; bn = vap->va_size / biosize; for ( ; obn >= bn; obn--) - if (incore(vp, obn)) { - bp = getblk(vp, obn, biosize, 0, - 0, BLK_READ); - FSDBG(512, bp, bp->b_flags, - 0, obn); - SET(bp->b_flags, B_INVAL); - brelse(bp); + if (nfs_buf_incore(vp, obn)) { + bp = nfs_buf_get(vp, obn, biosize, 0, BLK_READ); + if (!bp) + continue; + if (obn == bn) { + int neweofoff, mustwrite; + mustwrite = 0; + neweofoff = vap->va_size - NBOFF(bp); + /* check for any dirty data before the new EOF */ + if (bp->nb_dirtyend && bp->nb_dirtyoff < neweofoff) { + /* clip dirty range to EOF */ + if (bp->nb_dirtyend > neweofoff) + bp->nb_dirtyend = neweofoff; + mustwrite++; + } + bp->nb_dirty &= (1 << round_page_32(neweofoff)/PAGE_SIZE) - 1; + if (bp->nb_dirty) + mustwrite++; + if (mustwrite) { + /* gotta write out dirty data before invalidating */ + /* (NB_STABLE indicates that data writes should be FILESYNC) */ + /* (NB_NOCACHE indicates buffer should be discarded) */ + CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL | NB_ASYNC | NB_READ)); + SET(bp->nb_flags, NB_STABLE | NB_NOCACHE); + /* + * NFS has embedded ucred so crhold() risks zone corruption + */ + if (bp->nb_wcred == NOCRED) + bp->nb_wcred = crdup(ap->a_cred); + error = nfs_buf_write(bp); + // Note: bp has been released + if (error) { + FSDBG(512, bp, 0xd00dee, 0xbad, error); + np->n_error = error; + np->n_flag |= NWRITEERR; + error = 0; + } + bp = NULL; + } } + if (bp) { + FSDBG(512, bp, bp->nb_flags, 0, obn); + SET(bp->nb_flags, NB_INVAL); + nfs_buf_release(bp); + } + } } tsize = np->n_size; np->n_size = np->n_vattr.va_size = vap->va_size; - ubc_setsize(vp, (off_t)vap->va_size); /* XXX */ + ubc_setsize(vp, (off_t)vap->va_size); /* XXX error? */ }; } else if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && - (np->n_flag & NMODIFIED) && vp->v_type == VREG && - (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, - ap->a_p, 1)) == EINTR) - return (error); + (np->n_flag & NMODIFIED) && vp->v_type == VREG) { + error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1); + if (error == EINTR) + return (error); + } error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); FSDBG_BOT(512, np->n_size, vap->va_size, np->n_vattr.va_size, error); if (error && vap->va_size != VNOVAL) { @@ -1033,8 +1115,13 @@ nfs_setattrrpc(vp, vap, cred, procp) u_long *tl; int error = 0, wccflag = NFSV3_WCCRATTR; struct mbuf *mreq, *mrep, *md, *mb, *mb2; - int v3 = NFS_ISV3(vp); + int v3; u_int64_t xid; + struct timeval now; + + if (!VFSTONFS(vp->v_mount)) + return (ENXIO); + v3 = NFS_ISV3(vp); nfsstats.rpccnt[NFSPROC_SETATTR]++; nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); @@ -1072,8 +1159,9 @@ nfs_setattrrpc(vp, vap, cred, procp) nfsm_build(tl, u_long *, NFSX_UNSIGNED); *tl = nfs_false; } + microtime(&now); if (vap->va_atime.tv_sec != VNOVAL) { - if (vap->va_atime.tv_sec != time.tv_sec) { + if (vap->va_atime.tv_sec != now.tv_sec) { nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); txdr_nfsv3time(&vap->va_atime, tl); @@ -1086,7 +1174,7 @@ nfs_setattrrpc(vp, vap, cred, procp) *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); } if (vap->va_mtime.tv_sec != VNOVAL) { - if (vap->va_mtime.tv_sec != time.tv_sec) { + if (vap->va_mtime.tv_sec != now.tv_sec) { nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); txdr_nfsv3time(&vap->va_mtime, tl); @@ -1121,7 +1209,7 @@ nfs_setattrrpc(vp, vap, cred, procp) nfsm_request(vp, NFSPROC_SETATTR, procp, cred, &xid); if (v3) { nfsm_wcc_data(vp, wccflag, &xid); - if (!wccflag && vp->v_type != VBAD) /* EINVAL on VBAD node */ + if (!wccflag) VTONFS(vp)->n_attrstamp = 0; } else nfsm_loadattr(vp, (struct vattr *)0, &xid); @@ -1151,7 +1239,6 @@ nfs_lookup(ap) register u_long *tl; register caddr_t cp; register long t1, t2; - struct nfsmount *nmp; caddr_t bpos, dpos, cp2; struct mbuf *mreq, *mrep, *md, *mb, *mb2; long len; @@ -1160,8 +1247,9 @@ nfs_lookup(ap) int lockparent, wantparent, error = 0, attrflag, fhsize; int v3 = NFS_ISV3(dvp); struct proc *p = cnp->cn_proc; - int worldbuildworkaround = 1; + int unlockdvp = 0; u_int64_t xid; + struct vattr vattr; if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) @@ -1169,92 +1257,81 @@ nfs_lookup(ap) *vpp = NULLVP; if (dvp->v_type != VDIR) return (ENOTDIR); + lockparent = flags & LOCKPARENT; wantparent = flags & (LOCKPARENT|WANTPARENT); - nmp = VFSTONFS(dvp->v_mount); np = VTONFS(dvp); - if (worldbuildworkaround) { - /* - * Temporary workaround for world builds to not have dvp go - * VBAD on during server calls in this routine. When - * the real ref counting problem is found take this out. - * Note if this was later and before the nfsm_request - * set up, the workaround did not work (NOTE other difference - * was I only put one VREF in that time. Thus it needs - * to be above the cache_lookup branch or with 2 VREFS. Not - * sure which. Can't play with world builds right now to see - * which. VOP_ACCESS could also make it go to server. - EKN - */ - VREF(dvp); /* hang on to this dvp - EKN */ - VREF(dvp); /* hang on tight - EKN */ - } + /* if directory has changed, purge any name cache entries */ + if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred, p) && + (np->n_mtime != vattr.va_mtime.tv_sec)) + cache_purge(dvp); if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) { - struct vattr vattr; int vpid; - if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p))) { - *vpp = NULLVP; - goto error_return; - } - - /* got to check to make sure the vnode didn't go away if access went to server */ - if ((*vpp)->v_type == VBAD) { - error = EINVAL; - goto error_return; - } - newvp = *vpp; vpid = newvp->v_id; + /* * See the comment starting `Step through' in ufs/ufs_lookup.c * for an explanation of the locking protocol */ + + /* + * Note: we need to make sure to get a lock/ref on newvp + * before we possibly go off to the server in VOP_ACCESS. + */ if (dvp == newvp) { VREF(newvp); error = 0; } else if (flags & ISDOTDOT) { VOP_UNLOCK(dvp, 0, p); error = vget(newvp, LK_EXCLUSIVE, p); - if (!error && lockparent && (flags & ISLASTCN)) + if (!error) error = vn_lock(dvp, LK_EXCLUSIVE, p); } else { error = vget(newvp, LK_EXCLUSIVE, p); - if (!lockparent || error || !(flags & ISLASTCN)) + if (error) VOP_UNLOCK(dvp, 0, p); } - if (!error) { - if (vpid == newvp->v_id) { - if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p) - && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { - nfsstats.lookupcache_hits++; - if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) - cnp->cn_flags |= SAVENAME; - error = 0; /* ignore any from VOP_GETATTR */ - goto error_return; - } - cache_purge(newvp); + + if (error) + goto cache_lookup_out; + + if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p))) { + if (dvp == newvp) + vrele(newvp); + else + vput(newvp); + *vpp = NULLVP; + goto error_return; } - vput(newvp); - if (lockparent && dvp != newvp && (flags & ISLASTCN)) - VOP_UNLOCK(dvp, 0, p); + + if ((dvp != newvp) && (!lockparent || !(flags & ISLASTCN))) + VOP_UNLOCK(dvp, 0, p); + + if (vpid == newvp->v_id) { + if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p) + && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { + nfsstats.lookupcache_hits++; + if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) + cnp->cn_flags |= SAVENAME; + error = 0; /* ignore any from VOP_GETATTR */ + goto error_return; + } + cache_purge(newvp); } + vput(newvp); + if ((dvp != newvp) && lockparent && (flags & ISLASTCN)) + VOP_UNLOCK(dvp, 0, p); +cache_lookup_out: error = vn_lock(dvp, LK_EXCLUSIVE, p); *vpp = NULLVP; if (error) goto error_return; } - /* - * Got to check to make sure the vnode didn't go away if VOP_GETATTR went to server - * or callers prior to this blocked and had it go VBAD. - */ - if (dvp->v_type == VBAD) { - error = EINVAL; - goto error_return; - } - error = 0; newvp = NULLVP; nfsstats.lookupcache_misses++; @@ -1304,29 +1381,32 @@ nfs_lookup(ap) goto error_return; } - if (flags & ISDOTDOT) { + if (NFS_CMPFH(np, fhp, fhsize)) { + VREF(dvp); + newvp = dvp; + } else if (flags & ISDOTDOT) { VOP_UNLOCK(dvp, 0, p); error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); if (error) { + m_freem(mrep); vn_lock(dvp, LK_EXCLUSIVE + LK_RETRY, p); goto error_return; } newvp = NFSTOV(np); - if (lockparent && (flags & ISLASTCN) && - (error = vn_lock(dvp, LK_EXCLUSIVE, p))) { + if (!lockparent || !(flags & ISLASTCN)) + unlockdvp = 1; /* keep dvp locked until after postops */ + if (error = vn_lock(dvp, LK_EXCLUSIVE, p)) { + m_freem(mrep); vput(newvp); goto error_return; } - } else if (NFS_CMPFH(np, fhp, fhsize)) { - VREF(dvp); - newvp = dvp; } else { if ((error = nfs_nget(dvp->v_mount, fhp, fhsize, &np))) { m_freem(mrep); goto error_return; } if (!lockparent || !(flags & ISLASTCN)) - VOP_UNLOCK(dvp, 0, p); + unlockdvp = 1; /* keep dvp locked until after postops */ newvp = NFSTOV(np); } if (v3) { @@ -1345,35 +1425,29 @@ nfs_lookup(ap) } *vpp = newvp; nfsm_reqdone; + if (unlockdvp) + VOP_UNLOCK(dvp, 0, p); if (error) { if (newvp != NULLVP) { - vrele(newvp); + if (newvp == dvp) + vrele(newvp); + else + vput(newvp); *vpp = NULLVP; } if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && (flags & ISLASTCN) && error == ENOENT) { - if (!lockparent) - VOP_UNLOCK(dvp, 0, p); - if (dvp->v_mount->mnt_flag & MNT_RDONLY) + if (dvp->v_mount && (dvp->v_mount->mnt_flag & MNT_RDONLY)) error = EROFS; else error = EJUSTRETURN; + if (!lockparent) + VOP_UNLOCK(dvp, 0, p); } if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) cnp->cn_flags |= SAVENAME; } error_return: - /* - * These "vreles" set dvp refcounts back to where they were - * before we took extra 2 VREFS to avoid VBAD vnode on dvp - * during server calls for world builds. Remove when real - * fix is found. - EKN - */ - if (worldbuildworkaround) { - vrele(dvp); /* end of hanging on tight to dvp - EKN */ - vrele(dvp); /* end of hanging on tight to dvp - EKN */ - } - return (error); } @@ -1432,9 +1506,13 @@ nfs_readlinkrpc(vp, uiop, cred) caddr_t bpos, dpos, cp2; int error = 0, len, attrflag; struct mbuf *mreq, *mrep, *md, *mb, *mb2; - int v3 = NFS_ISV3(vp); + int v3; u_int64_t xid; + if (!VFSTONFS(vp->v_mount)) + return (ENXIO); + v3 = NFS_ISV3(vp); + nfsstats.rpccnt[NFSPROC_READLINK]++; nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3)); nfsm_fhtom(vp, v3); @@ -1474,21 +1552,25 @@ nfs_readrpc(vp, uiop, cred) caddr_t bpos, dpos, cp2; struct mbuf *mreq, *mrep, *md, *mb, *mb2; struct nfsmount *nmp; - int error = 0, len, retlen, tsiz, eof, attrflag; - int v3 = NFS_ISV3(vp); + int error = 0, len, retlen, tsiz, eof = 0, attrflag; + int v3, nmrsize; u_int64_t xid; -#ifndef nolint - eof = 0; -#endif + FSDBG_TOP(536, vp, uiop->uio_offset, uiop->uio_resid, 0); nmp = VFSTONFS(vp->v_mount); + if (!nmp) + return (ENXIO); + v3 = NFS_ISV3(vp); + nmrsize = nmp->nm_rsize; + tsiz = uiop->uio_resid; - if (((u_int64_t)uiop->uio_offset + (unsigned int)tsiz > 0xffffffff) && - !v3) + if (((u_int64_t)uiop->uio_offset + (unsigned int)tsiz > 0xffffffff) && !v3) { + FSDBG_BOT(536, vp, uiop->uio_offset, uiop->uio_resid, EFBIG); return (EFBIG); + } while (tsiz > 0) { nfsstats.rpccnt[NFSPROC_READ]++; - len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; + len = (tsiz > nmrsize) ? nmrsize : tsiz; nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); nfsm_fhtom(vp, v3); nfsm_build(tl, u_long *, NFSX_UNSIGNED * 3); @@ -1500,6 +1582,7 @@ nfs_readrpc(vp, uiop, cred) *tl++ = txdr_unsigned(len); *tl = 0; } + FSDBG(536, vp, uiop->uio_offset, len, 0); nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred, &xid); if (v3) { nfsm_postop_attr(vp, attrflag, &xid); @@ -1511,7 +1594,7 @@ nfs_readrpc(vp, uiop, cred) eof = fxdr_unsigned(int, *(tl + 1)); } else nfsm_loadattr(vp, (struct vattr *)0, &xid); - nfsm_strsiz(retlen, nmp->nm_rsize); + nfsm_strsiz(retlen, nmrsize); nfsm_mtouio(uiop, retlen); m_freem(mrep); tsiz -= retlen; @@ -1522,6 +1605,7 @@ nfs_readrpc(vp, uiop, cred) tsiz = 0; } nfsmout: + FSDBG_BOT(536, vp, eof, uiop->uio_resid, error); return (error); } @@ -1540,20 +1624,32 @@ nfs_writerpc(vp, uiop, cred, iomode, must_commit) register int t1, t2, backup; caddr_t bpos, dpos, cp2; struct mbuf *mreq, *mrep, *md, *mb, *mb2; - struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct nfsmount *nmp; int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; - int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC; + int v3, committed = NFSV3WRITE_FILESYNC; u_int64_t xid; #if DIAGNOSTIC if (uiop->uio_iovcnt != 1) panic("nfs_writerpc: iovcnt > 1"); #endif + FSDBG_TOP(537, vp, uiop->uio_offset, uiop->uio_resid, *iomode); + nmp = VFSTONFS(vp->v_mount); + if (!nmp) + return (ENXIO); + v3 = NFS_ISV3(vp); *must_commit = 0; tsiz = uiop->uio_resid; - if (((u_int64_t)uiop->uio_offset + (unsigned int)tsiz > 0xffffffff) && !v3) + if (((u_int64_t)uiop->uio_offset + (unsigned int)tsiz > 0xffffffff) && !v3) { + FSDBG_BOT(537, vp, uiop->uio_offset, uiop->uio_resid, EFBIG); return (EFBIG); + } while (tsiz > 0) { + nmp = VFSTONFS(vp->v_mount); + if (!nmp) { + error = ENXIO; + break; + } nfsstats.rpccnt[NFSPROC_WRITE]++; len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; nfsm_reqhead(vp, NFSPROC_WRITE, @@ -1571,8 +1667,12 @@ nfs_writerpc(vp, uiop, cred, iomode, must_commit) tl += 2; } *tl = txdr_unsigned(len); + FSDBG(537, vp, uiop->uio_offset, len, 0); nfsm_uiotom(uiop, len); nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred, &xid); + nmp = VFSTONFS(vp->v_mount); + if (!nmp) + error = ENXIO; if (v3) { wccflag = NFSV3_WCCCHK; nfsm_wcc_data(vp, wccflag, &xid); @@ -1602,10 +1702,10 @@ nfs_writerpc(vp, uiop, cred, iomode, must_commit) else if (committed == NFSV3WRITE_DATASYNC && commit == NFSV3WRITE_UNSTABLE) committed = commit; - if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) { + if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) { bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF); - nmp->nm_flag |= NFSMNT_HASWRITEVERF; + nmp->nm_state |= NFSSTA_HASWRITEVERF; } else if (bcmp((caddr_t)tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) { *must_commit = 1; @@ -1616,7 +1716,7 @@ nfs_writerpc(vp, uiop, cred, iomode, must_commit) } else nfsm_loadattr(vp, (struct vattr *)0, &xid); - if (wccflag && vp->v_type != VBAD) /* EINVAL set on VBAD node */ + if (wccflag) VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec; m_freem(mrep); /* @@ -1631,16 +1731,12 @@ nfs_writerpc(vp, uiop, cred, iomode, must_commit) tsiz -= len; } nfsmout: - /* EKN - * does it make sense to even say it was committed if we had an error? - * okay well just don't on bad vnodes then. EINVAL will be - * returned on bad vnodes - */ - if (vp->v_type != VBAD && (vp->v_mount->mnt_flag & MNT_ASYNC)) + if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) committed = NFSV3WRITE_FILESYNC; *iomode = committed; if (error) uiop->uio_resid = tsiz; + FSDBG_BOT(537, vp, committed, uiop->uio_resid, error); return (error); } @@ -1735,13 +1831,11 @@ nfs_mknodrpc(dvp, vpp, cnp, vap) cache_enter(dvp, newvp, cnp); *vpp = newvp; } - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); - if (dvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ - VTONFS(dvp)->n_flag |= NMODIFIED; - if (!wccflag) - VTONFS(dvp)->n_attrstamp = 0; - } + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; vput(dvp); + NFS_FREE_PNBUF(cnp); return (error); } @@ -1874,13 +1968,11 @@ again: cache_enter(dvp, newvp, cnp); *ap->a_vpp = newvp; } - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); - if (dvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ - VTONFS(dvp)->n_flag |= NMODIFIED; - if (!wccflag) - VTONFS(dvp)->n_attrstamp = 0; - } + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; vput(dvp); + NFS_FREE_PNBUF(cnp); return (error); } @@ -1924,9 +2016,9 @@ nfs_remove(ap) gofree = (ubc_isinuse(vp, 1)) ? 0 : 1; else { /* dead or dying vnode.With vnode locking panic instead of error */ - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); vput(dvp); vput(vp); + NFS_FREE_PNBUF(cnp); return (EIO); } } else { @@ -1934,6 +2026,13 @@ nfs_remove(ap) if (vp->v_usecount == 1) gofree = 1; } + if ((ap->a_cnp->cn_flags & NODELETEBUSY) && !gofree) { + /* Caller requested Carbon delete semantics, but file is busy */ + vput(dvp); + vput(vp); + NFS_FREE_PNBUF(cnp); + return (EBUSY); + } if (gofree || (np->n_sillyrename && VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && vattr.va_nlink > 1)) { @@ -1964,15 +2063,23 @@ nfs_remove(ap) */ if (error == ENOENT) error = 0; + if (!error) { + /* + * remove nfsnode from hash now so we can't accidentally find it + * again if another object gets created with the same filehandle + * before this vnode gets reclaimed + */ + LIST_REMOVE(np, n_hash); + np->n_flag &= ~NHASHED; + } } else if (!np->n_sillyrename) { error = nfs_sillyrename(dvp, vp, cnp); } - - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); np->n_attrstamp = 0; vput(dvp); VOP_UNLOCK(vp, 0, cnp->cn_proc); + NFS_FREE_PNBUF(cnp); ubc_uncache(vp); vrele(vp); @@ -2008,9 +2115,13 @@ nfs_removerpc(dvp, name, namelen, cred, proc) caddr_t bpos, dpos, cp2; int error = 0, wccflag = NFSV3_WCCRATTR; struct mbuf *mreq, *mrep, *md, *mb, *mb2; - int v3 = NFS_ISV3(dvp); + int v3; u_int64_t xid; + if (!VFSTONFS(dvp->v_mount)) + return (ENXIO); + v3 = NFS_ISV3(dvp); + nfsstats.rpccnt[NFSPROC_REMOVE]++; nfsm_reqhead(dvp, NFSPROC_REMOVE, NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); @@ -2020,11 +2131,9 @@ nfs_removerpc(dvp, name, namelen, cred, proc) if (v3) nfsm_wcc_data(dvp, wccflag, &xid); nfsm_reqdone; - if (dvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ - VTONFS(dvp)->n_flag |= NMODIFIED; - if (!wccflag) - VTONFS(dvp)->n_attrstamp = 0; - } + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; return (error); } @@ -2091,6 +2200,7 @@ nfs_rename(ap) if (inuse && !VTONFS(tvp)->n_sillyrename && tvp->v_type != VDIR) { if (error = nfs_sillyrename(tdvp, tvp, tcnp)) { /* sillyrename failed. Instead of pressing on, return error */ + VOP_UNLOCK(tvp, 0, tcnp->cn_proc); goto out; /* should not be ENOENT. */ } else { /* sillyrename succeeded.*/ @@ -2105,6 +2215,16 @@ nfs_rename(ap) tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, tcnp->cn_proc); + if (!error && tvp && tvp != fvp && !VTONFS(tvp)->n_sillyrename) { + /* + * remove nfsnode from hash now so we can't accidentally find it + * again if another object gets created with the same filehandle + * before this vnode gets reclaimed + */ + LIST_REMOVE(VTONFS(tvp), n_hash); + VTONFS(tvp)->n_flag &= ~NHASHED; + } + if (fvp->v_type == VDIR) { if (tvp != NULL && tvp->v_type == VDIR) { cache_purge(tdvp); @@ -2172,9 +2292,13 @@ nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc) caddr_t bpos, dpos, cp2; int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; struct mbuf *mreq, *mrep, *md, *mb, *mb2; - int v3 = NFS_ISV3(fdvp); + int v3; u_int64_t xid; + if (!VFSTONFS(fdvp->v_mount)) + return (ENXIO); + v3 = NFS_ISV3(fdvp); + nfsstats.rpccnt[NFSPROC_RENAME]++; nfsm_reqhead(fdvp, NFSPROC_RENAME, (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + @@ -2191,16 +2315,12 @@ nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc) nfsm_wcc_data(tdvp, twccflag, &txid); } nfsm_reqdone; - if (fdvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ - VTONFS(fdvp)->n_flag |= NMODIFIED; - if (!fwccflag) - VTONFS(fdvp)->n_attrstamp = 0; - } - if (tdvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ - VTONFS(tdvp)->n_flag |= NMODIFIED; - if (!twccflag) - VTONFS(tdvp)->n_attrstamp = 0; - } + VTONFS(fdvp)->n_flag |= NMODIFIED; + if (!fwccflag) + VTONFS(fdvp)->n_attrstamp = 0; + VTONFS(tdvp)->n_flag |= NMODIFIED; + if (!twccflag) + VTONFS(tdvp)->n_attrstamp = 0; return (error); } @@ -2224,24 +2344,38 @@ nfs_link(ap) caddr_t bpos, dpos, cp2; int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; struct mbuf *mreq, *mrep, *md, *mb, *mb2; - int v3 = NFS_ISV3(vp); + int v3, didhold; u_int64_t xid; if (vp->v_mount != tdvp->v_mount) { VOP_ABORTOP(vp, cnp); - if (tdvp == vp) - vrele(tdvp); - else - vput(tdvp); + vput(tdvp); return (EXDEV); } + /* need to get vnode lock for vp before calling VOP_FSYNC() */ + if (error = vn_lock(vp, LK_EXCLUSIVE, cnp->cn_proc)) { + VOP_ABORTOP(vp, cnp); + vput(tdvp); + return (error); + } + + if (!VFSTONFS(vp->v_mount)) { + VOP_UNLOCK(vp, 0, cnp->cn_proc); + VOP_ABORTOP(vp, cnp); + vput(tdvp); + return (ENXIO); + } + v3 = NFS_ISV3(vp); + /* * Push all writes to the server, so that the attribute cache * doesn't get "out of sync" with the server. * XXX There should be a better way! */ + didhold = ubc_hold(vp); VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); + VOP_UNLOCK(vp, 0, cnp->cn_proc); nfsstats.rpccnt[NFSPROC_LINK]++; nfsm_reqhead(vp, NFSPROC_LINK, @@ -2257,14 +2391,16 @@ nfs_link(ap) nfsm_wcc_data(tdvp, wccflag, &txid); } nfsm_reqdone; - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); VTONFS(tdvp)->n_flag |= NMODIFIED; - if (!attrflag && vp->v_type != VBAD) /* EINVAL set on VBAD vnode */ + if (!attrflag) VTONFS(vp)->n_attrstamp = 0; - if (!wccflag && tdvp->v_type != VBAD) /* EINVAL set on VBAD vnode */ + if (!wccflag) VTONFS(tdvp)->n_attrstamp = 0; + if (didhold) + ubc_rele(vp); vput(tdvp); + NFS_FREE_PNBUF(cnp); /* * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */ @@ -2333,13 +2469,12 @@ nfs_symlink(ap) nfsm_reqdone; if (newvp) vput(newvp); - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); - if (dvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ - VTONFS(dvp)->n_flag |= NMODIFIED; - if (!wccflag) - VTONFS(dvp)->n_attrstamp = 0; - } + + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; vput(dvp); + NFS_FREE_PNBUF(cnp); /* * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */ @@ -2409,18 +2544,16 @@ nfs_mkdir(ap) if (v3) nfsm_wcc_data(dvp, wccflag, &dxid); nfsm_reqdone; - if (dvp->v_type != VBAD) { /* EINVAL set on this case */ - VTONFS(dvp)->n_flag |= NMODIFIED; - if (!wccflag) - VTONFS(dvp)->n_attrstamp = 0; - } + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; /* * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry * if we can succeed in looking up the directory. */ if (error == EEXIST || (!error && !gotvp)) { if (newvp) { - vrele(newvp); + vput(newvp); newvp = (struct vnode *)0; } error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, @@ -2433,11 +2566,11 @@ nfs_mkdir(ap) } if (error) { if (newvp) - vrele(newvp); + vput(newvp); } else *ap->a_vpp = newvp; - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); vput(dvp); + NFS_FREE_PNBUF(cnp); return (error); } @@ -2473,16 +2606,14 @@ nfs_rmdir(ap) if (v3) nfsm_wcc_data(dvp, wccflag, &xid); nfsm_reqdone; - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); - if (dvp->v_type != VBAD) { /* EINVAL set on this case */ - VTONFS(dvp)->n_flag |= NMODIFIED; - if (!wccflag) - VTONFS(dvp)->n_attrstamp = 0; - } + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; cache_purge(dvp); cache_purge(vp); vput(vp); vput(dvp); + NFS_FREE_PNBUF(cnp); /* * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. */ @@ -2520,10 +2651,13 @@ nfs_readdir(ap) nfsstats.direofcache_hits++; return (0); } - } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && - np->n_mtime == vattr.va_mtime.tv_sec) { - nfsstats.direofcache_hits++; - return (0); + } else if (!VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp)) { + if (np->n_mtime == vattr.va_mtime.tv_sec) { + nfsstats.direofcache_hits++; + return (0); + } + /* directory changed, purge any name cache entries */ + cache_purge(vp); } } @@ -2558,12 +2692,12 @@ nfs_readdirrpc(vp, uiop, cred) caddr_t bpos, dpos, cp2; struct mbuf *mreq, *mrep, *md, *mb, *mb2; nfsuint64 cookie; - struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct nfsmount *nmp; struct nfsnode *dnp = VTONFS(vp); u_quad_t fileno; int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; int attrflag; - int v3 = NFS_ISV3(vp); + int v3, nmreaddirsize; u_int64_t xid; #ifndef nolint @@ -2574,6 +2708,11 @@ nfs_readdirrpc(vp, uiop, cred) (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) panic("nfs_readdirrpc: bad uio"); #endif + nmp = VFSTONFS(vp->v_mount); + if (!nmp) + return (ENXIO); + v3 = NFS_ISV3(vp); + nmreaddirsize = nmp->nm_readdirsize; /* * If there is no cookie, assume directory was stale. @@ -2603,7 +2742,7 @@ nfs_readdirrpc(vp, uiop, cred) nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); *tl++ = cookie.nfsuquad[0]; } - *tl = txdr_unsigned(nmp->nm_readdirsize); + *tl = txdr_unsigned(nmreaddirsize); nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred, &xid); if (v3) { nfsm_postop_attr(vp, attrflag, &xid); @@ -2746,12 +2885,12 @@ nfs_readdirplusrpc(vp, uiop, cred) struct nameidata nami, *ndp = &nami; struct componentname *cnp = &ndp->ni_cnd; nfsuint64 cookie; - struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct nfsmount *nmp; struct nfsnode *dnp = VTONFS(vp), *np; nfsfh_t *fhp; u_quad_t fileno; int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; - int attrflag, fhsize; + int attrflag, fhsize, nmreaddirsize, nmrsize; u_int64_t xid, savexid; #ifndef nolint @@ -2762,6 +2901,12 @@ nfs_readdirplusrpc(vp, uiop, cred) (uiop->uio_resid & (DIRBLKSIZ - 1))) panic("nfs_readdirplusrpc: bad uio"); #endif + nmp = VFSTONFS(vp->v_mount); + if (!nmp) + return (ENXIO); + nmreaddirsize = nmp->nm_readdirsize; + nmrsize = nmp->nm_rsize; + ndp->ni_dvp = vp; newvp = NULLVP; @@ -2788,8 +2933,8 @@ nfs_readdirplusrpc(vp, uiop, cred) *tl++ = cookie.nfsuquad[1]; *tl++ = dnp->n_cookieverf.nfsuquad[0]; *tl++ = dnp->n_cookieverf.nfsuquad[1]; - *tl++ = txdr_unsigned(nmp->nm_readdirsize); - *tl = txdr_unsigned(nmp->nm_rsize); + *tl++ = txdr_unsigned(nmreaddirsize); + *tl = txdr_unsigned(nmrsize); nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred, &xid); savexid = xid; @@ -2877,6 +3022,20 @@ nfs_readdirplusrpc(vp, uiop, cred) VREF(vp); newvp = vp; np = dnp; + } else if (!bigenough || + (cnp->cn_namelen == 2 && + cnp->cn_nameptr[1] == '.' && + cnp->cn_nameptr[0] == '.')) { + /* + * don't doit if we can't guarantee + * that this entry is NOT ".." because + * we would have to drop the lock on + * the directory before getting the + * (lock on) the ".." vnode... and we + * don't want to drop the dvp lock in + * the middle of a readdirplus. + */ + doit = 0; } else { if ((error = nfs_nget(vp->v_mount, fhp, fhsize, &np))) @@ -2885,7 +3044,7 @@ nfs_readdirplusrpc(vp, uiop, cred) newvp = NFSTOV(np); } } - if (doit) { + if (doit && bigenough) { dpossav2 = dpos; dpos = dpossav1; mdsav2 = md; @@ -2911,7 +3070,10 @@ nfs_readdirplusrpc(vp, uiop, cred) nfsm_adv(nfsm_rndup(i)); } if (newvp != NULLVP) { - vrele(newvp); + if (newvp == vp) + vrele(newvp); + else + vput(newvp); newvp = NULLVP; } nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); @@ -2970,6 +3132,11 @@ nfsmout: * to create the same funny name between the nfs_lookitup() fails and the * nfs_rename() completes, but... */ + +/* format of "random" names and next name to try */ +/* (note: shouldn't exceed size of sillyrename.s_name) */ +static char sillyrename_name[] = ".nfsAAA%04x4.4"; + static int nfs_sillyrename(dvp, vp, cnp) struct vnode *dvp, *vp; @@ -2980,6 +3147,7 @@ nfs_sillyrename(dvp, vp, cnp) int error; short pid; struct ucred *cred; + int i, j, k; cache_purge(dvp); np = VTONFS(vp); @@ -2995,17 +3163,39 @@ nfs_sillyrename(dvp, vp, cnp) /* Fudge together a funny name */ pid = cnp->cn_proc->p_pid; - sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid); + sp->s_namlen = sprintf(sp->s_name, sillyrename_name, pid); /* Try lookitups until we get one that isn't there */ + i = j = k = 0; while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, cnp->cn_proc, (struct nfsnode **)0) == 0) { - sp->s_name[4]++; - if (sp->s_name[4] > 'z') { - error = EINVAL; - goto bad; + if (sp->s_name[4]++ >= 'z') + sp->s_name[4] = 'A'; + if (++i > ('z' - 'A' + 1)) { + i = 0; + if (sp->s_name[5]++ >= 'z') + sp->s_name[5] = 'A'; + if (++j > ('z' - 'A' + 1)) { + j = 0; + if (sp->s_name[6]++ >= 'z') + sp->s_name[6] = 'A'; + if (++k > ('z' - 'A' + 1)) { + error = EINVAL; + goto bad; + } + } } } + /* make note of next "random" name to try */ + if ((sillyrename_name[4] = (sp->s_name[4] + 1)) > 'z') { + sillyrename_name[4] = 'A'; + if ((sillyrename_name[5] = (sp->s_name[5] + 1)) > 'z') { + sillyrename_name[5] = 'A'; + if ((sillyrename_name[6] = (sp->s_name[6] + 1)) > 'z') + sillyrename_name[6] = 'A'; + } + } + /* now, do the rename */ if ((error = nfs_renameit(dvp, cnp, sp))) goto bad; error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, @@ -3021,7 +3211,7 @@ bad: cred = sp->s_cred; sp->s_cred = NOCRED; crfree(cred); - _FREE_ZONE((caddr_t)sp, sizeof (struct sillyrename), M_NFSREQ); + FREE_ZONE((caddr_t)sp, sizeof (struct sillyrename), M_NFSREQ); return (error); } @@ -3051,9 +3241,13 @@ nfs_lookitup(dvp, name, len, cred, procp, npp) int error = 0, fhlen, attrflag; struct mbuf *mreq, *mrep, *md, *mb, *mb2; nfsfh_t *nfhp; - int v3 = NFS_ISV3(dvp); + int v3; u_int64_t xid; + if (!VFSTONFS(dvp->v_mount)) + return (ENXIO); + v3 = NFS_ISV3(dvp); + nfsstats.rpccnt[NFSPROC_LOOKUP]++; nfsm_reqhead(dvp, NFSPROC_LOOKUP, NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); @@ -3065,7 +3259,7 @@ nfs_lookitup(dvp, name, len, cred, procp, npp) if (*npp) { np = *npp; if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { - _FREE_ZONE((caddr_t)np->n_fhp, + FREE_ZONE((caddr_t)np->n_fhp, np->n_fhsize, M_NFSBIGFH); np->n_fhp = &np->n_fh; } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH) @@ -3115,7 +3309,7 @@ nfs_lookitup(dvp, name, len, cred, procp, npp) /* * Nfs Version 3 commit rpc */ -static int +int nfs_commit(vp, offset, cnt, cred, procp) register struct vnode *vp; u_quad_t offset; @@ -3132,8 +3326,10 @@ nfs_commit(vp, offset, cnt, cred, procp) struct mbuf *mreq, *mrep, *md, *mb, *mb2; u_int64_t xid; - FSDBG(521, vp, offset, cnt, nmp->nm_flag); - if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) + FSDBG(521, vp, offset, cnt, nmp->nm_state); + if (!nmp) + return (ENXIO); + if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) return (0); nfsstats.rpccnt[NFSPROC_COMMIT]++; nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1)); @@ -3157,15 +3353,6 @@ nfs_commit(vp, offset, cnt, cred, procp) return (error); } -/* - * Kludge City.. - * - make nfs_bmap() essentially a no-op that does no translation - * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc - * (Maybe I could use the process's page mapping, but I was concerned that - * Kernel Write might not be enabled and also figured copyout() would do - * a lot more work than bcopy() and also it currently happens in the - * context of the swapper process (2). - */ static int nfs_bmap(ap) struct vop_bmap_args /* { @@ -3182,9 +3369,12 @@ nfs_bmap(ap) if (ap->a_vpp != NULL) *ap->a_vpp = vp; - if (ap->a_bnp != NULL) + if (ap->a_bnp != NULL) { + if (!vp->v_mount) + return (ENXIO); *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize, devBlockSize); + } if (ap->a_runp != NULL) *ap->a_runp = 0; #ifdef notyet @@ -3194,41 +3384,6 @@ nfs_bmap(ap) return (0); } -/* - * Strategy routine. - * For async requests when nfsiod(s) are running, queue the request by - * calling nfs_asyncio(), otherwise just all nfs_doio() to do the - * request. - */ -static int -nfs_strategy(ap) - struct vop_strategy_args *ap; -{ - register struct buf *bp = ap->a_bp; - struct ucred *cr; - struct proc *p; - int error = 0; - - if (ISSET(bp->b_flags, B_PHYS)) - panic("nfs_strategy: physio"); - if (ISSET(bp->b_flags, B_ASYNC)) - p = (struct proc *)0; - else - p = current_proc(); /* XXX */ - if (ISSET(bp->b_flags, B_READ)) - cr = bp->b_rcred; - else - cr = bp->b_wcred; - /* - * If the op is asynchronous and an i/o daemon is waiting - * queue the request, wake it up and wait for completion - * otherwise just do it ourselves. - */ - if (!ISSET(bp->b_flags, B_ASYNC) || nfs_asyncio(bp, NOCRED)) - error = nfs_doio(bp, cr, p); - return (error); -} - /* * Mmap a file * @@ -3264,270 +3419,284 @@ nfs_fsync(ap) { return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); } - -/* - * Flush all the blocks associated with a vnode. - * Walk through the buffer pool and push any dirty pages - * associated with the vnode. - */ -static int -nfs_flush(vp, cred, waitfor, p, commit) - register struct vnode *vp; - struct ucred *cred; - int waitfor; - struct proc *p; - int commit; + +int +nfs_flushcommits(struct vnode *vp, struct proc *p) { - register struct nfsnode *np = VTONFS(vp); - register struct buf *bp; - register int i; - struct buf *nbp; - struct nfsmount *nmp = VFSTONFS(vp->v_mount); - int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos, err; - int passone = 1; + struct nfsnode *np = VTONFS(vp); + struct nfsbuf *bp, *nbp; + int i, s, error = 0, retv, bvecpos, wcred_set; u_quad_t off, endoff, toff; - struct ucred* wcred = NULL; - struct buf **bvec = NULL; -#ifndef NFS_COMMITBVECSIZ + struct ucred* wcred; + struct nfsbuf **bvec = NULL; #define NFS_COMMITBVECSIZ 20 -#endif - struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; - int bvecsize = 0, bveccount; - kern_return_t kret; - upl_t upl; - - FSDBG_TOP(517, vp, np, waitfor, commit); +#define NFS_MAXCOMMITBVECSIZ 1024 + struct nfsbuf *bvec_on_stack[NFS_COMMITBVECSIZ]; + int bvecsize = NFS_MAXCOMMITBVECSIZ; - if (nmp->nm_flag & NFSMNT_INT) - slpflag = PCATCH; - if (!commit) - passone = 0; + FSDBG_TOP(557, vp, np, 0, 0); /* - * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the + * A nb_flags == (NB_DELWRI | NB_NEEDCOMMIT) block has been written to the * server, but nas not been committed to stable storage on the server - * yet. On the first pass, the byte range is worked out and the commit - * rpc is done. On the second pass, nfs_writebp() is called to do the - * job. + * yet. The byte range is worked out for as many nfsbufs as we can handle + * and the commit rpc is done. */ -again: - FSDBG(518, vp->v_dirtyblkhd.lh_first, np->n_flag, 0, 0); - if (vp->v_dirtyblkhd.lh_first) + if (np->n_dirtyblkhd.lh_first) np->n_flag |= NMODIFIED; + off = (u_quad_t)-1; endoff = 0; bvecpos = 0; - if (NFS_ISV3(vp) && commit) { - s = splbio(); - /* - * Count up how many buffers waiting for a commit. - * This is an upper bound - any with dirty pages must be - * written not commited. - */ - bveccount = 0; - for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { - nbp = bp->b_vnbufs.le_next; - if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) - == (B_DELWRI | B_NEEDCOMMIT)) - bveccount++; - FSDBG(519, bp, bp->b_flags, bveccount, 0); - } - /* - * Allocate space to remember the list of bufs to commit. It is - * important to use M_NOWAIT here to avoid a race with nfs_write - * If we can't get memory (for whatever reason), we will end up - * committing the buffers one-by-one in the loop below. - */ - if (bvec != NULL && bvec != bvec_on_stack) - _FREE(bvec, M_TEMP); - if (bveccount > NFS_COMMITBVECSIZ) { - MALLOC(bvec, struct buf **, - bveccount * sizeof(struct buf *), M_TEMP, - M_NOWAIT); - if (bvec == NULL) { - bvec = bvec_on_stack; - bvecsize = NFS_COMMITBVECSIZ; - } else - bvecsize = bveccount; - } else { - bvec = bvec_on_stack; - bvecsize = NFS_COMMITBVECSIZ; - } - FSDBG(519, 0, bvecsize, bveccount, 0); + wcred_set = 0; - for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { - nbp = bp->b_vnbufs.le_next; + if (!VFSTONFS(vp->v_mount)) { + error = ENXIO; + goto done; + } + if (!NFS_ISV3(vp)) { + error = EINVAL; + goto done; + } + s = splbio(); - FSDBG(520, bp, bp->b_flags, bvecpos, bp->b_bufsize); - FSDBG(520, bp->b_validoff, bp->b_validend, - bp->b_dirtyoff, bp->b_dirtyend); - if (bvecpos >= bvecsize) - break; - if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) - != (B_DELWRI | B_NEEDCOMMIT)) - continue; + /* + * Allocate space to remember the list of bufs to commit. It is + * important to use M_NOWAIT here to avoid a race with nfs_write + */ + MALLOC(bvec, struct nfsbuf **, + bvecsize * sizeof(struct nfsbuf *), M_TEMP, + M_NOWAIT); + if (bvec == NULL) { + bvec = bvec_on_stack; + bvecsize = NFS_COMMITBVECSIZ; + } + for (bp = np->n_dirtyblkhd.lh_first; bp && bvecpos < bvecsize; bp = nbp) { + nbp = bp->nb_vnbufs.le_next; - bremfree(bp); - SET(bp->b_flags, B_BUSY); - /* - * we need a upl to see if the page has been - * dirtied (think mmap) since the unstable write, and - * so to prevent vm from paging during our commit rpc - */ - if (ISSET(bp->b_flags, B_PAGELIST)) { - upl = bp->b_pagelist; - } else { - kret = ubc_create_upl(vp, ubc_blktooff(vp, bp->b_lblkno), - bp->b_bufsize, &upl, - NULL, UPL_PRECIOUS); - if (kret != KERN_SUCCESS) - panic("nfs_flush: create upl %d", kret); -#ifdef UBC_DEBUG - upl_ubc_alias_set(upl, current_act(), 1); -#endif /* UBC_DEBUG */ - } - if (upl_dirty_page(ubc_upl_pageinfo(upl), 0)) { - if (!ISSET(bp->b_flags, B_PAGELIST)) { - err = ubc_upl_abort(upl, NULL); - if (err) - printf("nfs_flush: upl abort %d\n", err); - } - /* - * Any/all of it may be modified... - */ - bp->b_dirtyoff = bp->b_validoff; - bp->b_dirtyend = bp->b_validend; - CLR(bp->b_flags, B_NEEDCOMMIT); - /* blocking calls were made, re-evaluate nbp */ - nbp = bp->b_vnbufs.le_next; - brelse(bp); /* XXX may block. Is using nbp ok??? */ - continue; - } - if (!ISSET(bp->b_flags, B_PAGELIST)) { - bp->b_pagelist = upl; - SET(bp->b_flags, B_PAGELIST); - ubc_upl_map(upl, (vm_address_t *)&bp->b_data); + if (((bp->nb_flags & (NB_BUSY | NB_DELWRI | NB_NEEDCOMMIT)) + != (NB_DELWRI | NB_NEEDCOMMIT))) + continue; + + nfs_buf_remfree(bp); + SET(bp->nb_flags, NB_BUSY); + /* + * we need a upl to see if the page has been + * dirtied (think mmap) since the unstable write, and + * also to prevent vm from paging it during our commit rpc + */ + if (!ISSET(bp->nb_flags, NB_PAGELIST)) { + retv = nfs_buf_upl_setup(bp); + if (retv) { + /* unable to create upl */ + /* vm object must no longer exist */ + /* this could be fatal if we need */ + /* to write the data again, we'll see... */ + printf("nfs_flushcommits: upl create failed %d\n", retv); + bp->nb_valid = bp->nb_dirty = 0; } + } + nfs_buf_upl_check(bp); - /* blocking calls were made, re-evaluate nbp */ - nbp = bp->b_vnbufs.le_next; + FSDBG(557, bp, bp->nb_flags, bp->nb_valid, bp->nb_dirty); + FSDBG(557, bp->nb_validoff, bp->nb_validend, + bp->nb_dirtyoff, bp->nb_dirtyend); - /* - * Work out if all buffers are using the same cred - * so we can deal with them all with one commit. - */ - if (wcred == NULL) - wcred = bp->b_wcred; - else if (wcred != bp->b_wcred) - wcred = NOCRED; - SET(bp->b_flags, B_WRITEINPROG); + /* + * We used to check for dirty pages here; if there were any + * we'd abort the commit and force the entire buffer to be + * written again. + * + * Instead of doing that, we now go ahead and commit the dirty + * range, and then leave the buffer around with dirty pages + * that will be written out later. + */ + + /* in case blocking calls were made, re-evaluate nbp */ + nbp = bp->nb_vnbufs.le_next; - /* - * A list of these buffers is kept so that the - * second loop knows which buffers have actually - * been committed. This is necessary, since there - * may be a race between the commit rpc and new - * uncommitted writes on the file. - */ - bvec[bvecpos++] = bp; - toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + - bp->b_dirtyoff; - if (toff < off) - off = toff; - toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); - if (toff > endoff) - endoff = toff; - } - splx(s); - } - if (bvecpos > 0) { /* - * Commit data on the server, as required. - * If all bufs are using the same wcred, then use that with - * one call for all of them, otherwise commit each one - * separately. + * Work out if all buffers are using the same cred + * so we can deal with them all with one commit. */ - if (wcred != NOCRED) - retv = nfs_commit(vp, off, (int)(endoff - off), - wcred, p); - else { - retv = 0; - for (i = 0; i < bvecpos; i++) { - off_t off, size; - bp = bvec[i]; - FSDBG(522, bp, bp->b_blkno * DEV_BSIZE, - bp->b_dirtyoff, bp->b_dirtyend); - off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + - bp->b_dirtyoff; - size = (u_quad_t)(bp->b_dirtyend - - bp->b_dirtyoff); - retv = nfs_commit(vp, off, (int)size, - bp->b_wcred, p); - if (retv) break; - } + if (wcred_set == 0) { + wcred = bp->nb_wcred; + if (wcred == NOCRED) + panic("nfs: needcommit w/out wcred"); + wcred_set = 1; + } else if ((wcred_set == 1) && crcmp(wcred, bp->nb_wcred)) { + wcred_set = -1; } - - if (retv == NFSERR_STALEWRITEVERF) - nfs_clearcommit(vp->v_mount); + SET(bp->nb_flags, NB_WRITEINPROG); /* - * Now, either mark the blocks I/O done or mark the - * blocks dirty, depending on whether the commit - * succeeded. + * A list of these buffers is kept so that the + * second loop knows which buffers have actually + * been committed. This is necessary, since there + * may be a race between the commit rpc and new + * uncommitted writes on the file. */ + bvec[bvecpos++] = bp; + toff = NBOFF(bp) + bp->nb_dirtyoff; + if (toff < off) + off = toff; + toff += (u_quad_t)(bp->nb_dirtyend - bp->nb_dirtyoff); + if (toff > endoff) + endoff = toff; + } + splx(s); + + if (bvecpos == 0) { + error = ENOBUFS; + goto done; + } + + /* + * Commit data on the server, as required. + * If all bufs are using the same wcred, then use that with + * one call for all of them, otherwise commit each one + * separately. + */ + if (wcred_set == 1) + retv = nfs_commit(vp, off, (int)(endoff - off), wcred, p); + else { + retv = 0; + for (i = 0; i < bvecpos; i++) { + off_t off, size; bp = bvec[i]; - FSDBG(523, bp, retv, bp->b_flags, 0); - CLR(bp->b_flags, (B_NEEDCOMMIT | B_WRITEINPROG)); - if (retv) { - brelse(bp); - } else { - int oldflags = bp->b_flags; - - s = splbio(); - vp->v_numoutput++; - SET(bp->b_flags, B_ASYNC); - CLR(bp->b_flags, - (B_READ|B_DONE|B_ERROR|B_DELWRI)); - if (ISSET(oldflags, B_DELWRI)) { - extern int nbdwrite; - nbdwrite--; - wakeup((caddr_t)&nbdwrite); - } - bp->b_dirtyoff = bp->b_dirtyend = 0; - reassignbuf(bp, vp); - splx(s); - biodone(bp); + off = NBOFF(bp) + bp->nb_dirtyoff; + size = (u_quad_t)(bp->nb_dirtyend - bp->nb_dirtyoff); + retv = nfs_commit(vp, off, (int)size, bp->nb_wcred, p); + if (retv) break; + } + } + if (retv == NFSERR_STALEWRITEVERF) + nfs_clearcommit(vp->v_mount); + + /* + * Now, either mark the blocks I/O done or mark the + * blocks dirty, depending on whether the commit + * succeeded. + */ + for (i = 0; i < bvecpos; i++) { + bp = bvec[i]; + FSDBG(557, bp, retv, bp->nb_flags, bp->nb_dirty); + + CLR(bp->nb_flags, (NB_NEEDCOMMIT | NB_WRITEINPROG)); + + np->n_needcommitcnt--; + CHECK_NEEDCOMMITCNT(np); + + if (retv) { + nfs_buf_release(bp); + } else { + s = splbio(); + vp->v_numoutput++; + + if (ISSET(bp->nb_flags, NB_DELWRI)) { + nfs_nbdwrite--; + NFSBUFCNTCHK(); + wakeup((caddr_t)&nfs_nbdwrite); + } + CLR(bp->nb_flags, (NB_READ|NB_DONE|NB_ERROR|NB_DELWRI)); + /* if block still has dirty pages, we don't want it to */ + /* be released in nfs_buf_iodone(). So, don't set NB_ASYNC. */ + if (!bp->nb_dirty) + SET(bp->nb_flags, NB_ASYNC); + + /* move to clean list */ + if (bp->nb_vnbufs.le_next != NFSNOLIST) + LIST_REMOVE(bp, nb_vnbufs); + LIST_INSERT_HEAD(&VTONFS(vp)->n_cleanblkhd, bp, nb_vnbufs); + + bp->nb_dirtyoff = bp->nb_dirtyend = 0; + splx(s); + + nfs_buf_iodone(bp); + if (bp->nb_dirty) { + /* throw it back in as a delayed write buffer */ + CLR(bp->nb_flags, NB_DONE); + nfs_buf_write_delayed(bp); } } + } + +done: + if (bvec != NULL && bvec != bvec_on_stack) + _FREE(bvec, M_TEMP); + FSDBG_BOT(557, vp, np, 0, error); + return (error); +} +/* + * Flush all the blocks associated with a vnode. + * Walk through the buffer pool and push any dirty pages + * associated with the vnode. + */ +static int +nfs_flush(vp, cred, waitfor, p, commit) + register struct vnode *vp; + struct ucred *cred; + int waitfor; + struct proc *p; + int commit; +{ + struct nfsnode *np = VTONFS(vp); + struct nfsbuf *bp, *nbp; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int i, s, error = 0, error2, slptimeo = 0, slpflag = 0; + int passone = 1; + + FSDBG_TOP(517, vp, np, waitfor, commit); + + if (!nmp) { + error = ENXIO; + goto done; } + if (nmp->nm_flag & NFSMNT_INT) + slpflag = PCATCH; + if (!commit) + passone = 0; + /* - * Start/do any write(s) that are required. There is a window here - * where B_BUSY protects the buffer. The vm pages have been freed up, - * yet B_BUSY is set. Don't think you will hit any busy/incore problems - * while we sleep, but not absolutely sure. Keep an eye on it. Otherwise - * we will have to hold vm page across this locked. - EKN + * On the first pass, commit all the bufs that can be. + * On the second pass, nfs_buf_write() is called to do the job. */ -loop: - if (current_thread_aborted()) { - error = EINTR; +again: + FSDBG(518, np->n_dirtyblkhd.lh_first, np->n_flag, 0, 0); + if (np->n_dirtyblkhd.lh_first) + np->n_flag |= NMODIFIED; + if (!VFSTONFS(vp->v_mount)) { + error = ENXIO; goto done; } + if (NFS_ISV3(vp) && commit) { + /* loop while it looks like there are still buffers to be */ + /* commited and nfs_flushcommits() seems to be handling them. */ + while (np->n_needcommitcnt) + if (nfs_flushcommits(vp, p)) + break; + } + + /* Start/do any write(s) that are required. */ +loop: s = splbio(); - for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { - nbp = bp->b_vnbufs.le_next; - if (ISSET(bp->b_flags, B_BUSY)) { - FSDBG(524, bp, waitfor, passone, bp->b_flags); + for (bp = np->n_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->nb_vnbufs.le_next; + if (ISSET(bp->nb_flags, NB_BUSY)) { + FSDBG(524, bp, waitfor, passone, bp->nb_flags); if (waitfor != MNT_WAIT || passone) continue; - SET(bp->b_flags, B_WANTED); + SET(bp->nb_flags, NB_WANTED); error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), "nfsfsync", slptimeo); splx(s); if (error) { - if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) { - error = EINTR; + error2 = nfs_sigintr(VFSTONFS(vp->v_mount), + (struct nfsreq *)0, p); + if (error2) { + error = error2; goto done; } if (slpflag == PCATCH) { @@ -3537,34 +3706,45 @@ loop: } goto loop; } - if (!ISSET(bp->b_flags, B_DELWRI)) + if (!ISSET(bp->nb_flags, NB_DELWRI)) panic("nfs_fsync: not dirty"); - FSDBG(525, bp, passone, commit, bp->b_flags); - if ((passone || !commit) && ISSET(bp->b_flags, B_NEEDCOMMIT)) + FSDBG(525, bp, passone, commit, bp->nb_flags); + if ((passone || !commit) && ISSET(bp->nb_flags, NB_NEEDCOMMIT)) + continue; + nfs_buf_remfree(bp); + if (ISSET(bp->nb_flags, NB_ERROR)) { + np->n_error = bp->nb_error ? bp->nb_error : EIO; + np->n_flag |= NWRITEERR; + nfs_buf_release(bp); continue; - bremfree(bp); + } if (passone || !commit) - SET(bp->b_flags, B_BUSY|B_ASYNC); - else - SET(bp->b_flags, - B_BUSY|B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT); + SET(bp->nb_flags, NB_BUSY|NB_ASYNC); + else { + /* the NB_STABLE forces this to be written FILESYNC */ + SET(bp->nb_flags, NB_BUSY|NB_ASYNC|NB_STABLE); + } splx(s); - VOP_BWRITE(bp); + nfs_buf_write(bp); goto loop; } splx(s); + if (passone) { passone = 0; goto again; } + if (waitfor == MNT_WAIT) { while (vp->v_numoutput) { vp->v_flag |= VBWAIT; error = tsleep((caddr_t)&vp->v_numoutput, slpflag | (PRIBIO + 1), "nfsfsync", slptimeo); if (error) { - if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) { - error = EINTR; + error2 = nfs_sigintr(VFSTONFS(vp->v_mount), + (struct nfsreq *)0, p); + if (error2) { + error = error2; goto done; } if (slpflag == PCATCH) { @@ -3573,7 +3753,7 @@ loop: } } } - if (vp->v_dirtyblkhd.lh_first && commit) { + if (np->n_dirtyblkhd.lh_first && commit) { goto loop; } } @@ -3584,8 +3764,6 @@ loop: } done: FSDBG_BOT(517, vp, np, error, 0); - if (bvec != NULL && bvec != bvec_on_stack) - _FREE(bvec, M_TEMP); return (error); } @@ -3609,8 +3787,7 @@ nfs_pathconf(ap) } /* - * NFS advisory byte-level locks. - * Currently unsupported. + * NFS advisory byte-level locks (client) */ static int nfs_advlock(ap) @@ -3622,21 +3799,7 @@ nfs_advlock(ap) int a_flags; } */ *ap; { -#ifdef __FreeBSD__ - register struct nfsnode *np = VTONFS(ap->a_vp); - - /* - * The following kludge is to allow diskless support to work - * until a real NFS lockd is implemented. Basically, just pretend - * that this is a local lock. - */ - return (lf_advlock(ap, &(np->n_lockf), np->n_size)); -#else -#if DIAGNOSTIC - printf("nfs_advlock: pid %d comm %s\n", current_proc()->p_pid, current_proc()->p_comm); -#endif - return (EOPNOTSUPP); -#endif + return (nfs_dolock(ap)); } /* @@ -3756,187 +3919,74 @@ nfs_update(ap) return (EOPNOTSUPP); } -int nfs_aio_threads = 0; /* 1 per nfd (arbitrary) */ -struct slock nfs_aio_slock; -TAILQ_HEAD(bqueues, buf) nfs_aio_bufq; -int nfs_aio_bufq_len = 0; /* diagnostic only */ - -void -nfs_aio_thread() -{ /* see comment below in nfs_bwrite() for some rationale */ - struct buf *bp; - boolean_t funnel_state; - - funnel_state = thread_funnel_set(kernel_flock, TRUE); - for(;;) { - simple_lock(&nfs_aio_slock); - if ((bp = nfs_aio_bufq.tqh_first)) { - TAILQ_REMOVE(&nfs_aio_bufq, bp, b_freelist); - nfs_aio_bufq_len--; - simple_unlock(&nfs_aio_slock); - nfs_writebp(bp, 1); - } else { /* nothing to do - goodnight */ - assert_wait(&nfs_aio_bufq, THREAD_UNINT); - simple_unlock(&nfs_aio_slock); - (void)tsleep((caddr_t)0, PRIBIO+1, "nfs_aio_bufq", 0); - } - } - (void) thread_funnel_set(kernel_flock, FALSE); -} - - -void -nfs_aio_thread_init() -{ - if (nfs_aio_threads++ == 0) { - simple_lock_init(&nfs_aio_slock); - TAILQ_INIT(&nfs_aio_bufq); - } - kernel_thread(kernel_task, nfs_aio_thread); -} - - /* - * Just call nfs_writebp() with the force argument set to 1. - */ -static int -nfs_bwrite(ap) - struct vop_bwrite_args /* { - struct vnode *a_bp; - } */ *ap; -{ - extern void wakeup_one(caddr_t chan); - - /* - * nfs_writebp will issue a synchronous rpc to if B_ASYNC then - * to avoid distributed deadlocks we handoff the write to the - * nfs_aio threads. Doing so allows us to complete the - * current request, rather than blocking on a server which may - * be ourself (or blocked on ourself). - * - * Note the loopback deadlocks happened when the thread - * invoking us was nfsd, and also when it was the pagedaemon. - * - * This solution has one known problem. If *ALL* buffers get - * on the nfs_aio queue then no forward progress can be made - * until one of those writes complete. And if the current - * nfs_aio writes-in-progress block due to a non-responsive server we - * are in a deadlock circle. Probably the cure is to limit the - * async write concurrency in getnewbuf as in FreeBSD 3.2. - */ - if (nfs_aio_threads && ISSET(ap->a_bp->b_flags, B_ASYNC)) { - simple_lock(&nfs_aio_slock); - nfs_aio_bufq_len++; - TAILQ_INSERT_TAIL(&nfs_aio_bufq, ap->a_bp, b_freelist); - simple_unlock(&nfs_aio_slock); - wakeup_one((caddr_t)&nfs_aio_bufq); - return (0); - } - return (nfs_writebp(ap->a_bp, 1)); -} - -/* - * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless - * the force flag is one and it also handles the B_NEEDCOMMIT flag. + * write (or commit) the given NFS buffer */ int -nfs_writebp(bp, force) - register struct buf *bp; - int force; +nfs_buf_write(struct nfsbuf *bp) { int s; - register int oldflags = bp->b_flags, retv = 1; + int oldflags = bp->nb_flags, rv = 0; off_t off; - upl_t upl; - kern_return_t kret; - struct vnode *vp = bp->b_vp; - upl_page_info_t *pl; + struct vnode *vp = bp->nb_vp; + struct ucred *cr; + struct proc *p = current_proc(); + + FSDBG_TOP(553, bp, NBOFF(bp), bp->nb_flags, 0); - if(!ISSET(bp->b_flags, B_BUSY)) - panic("nfs_writebp: buffer is not busy???"); + if (!ISSET(bp->nb_flags, NB_BUSY)) + panic("nfs_buf_write: buffer is not busy???"); s = splbio(); - CLR(bp->b_flags, (B_READ|B_DONE|B_ERROR|B_DELWRI)); - if (ISSET(oldflags, B_DELWRI)) { - extern int nbdwrite; - nbdwrite--; - wakeup((caddr_t)&nbdwrite); + CLR(bp->nb_flags, (NB_READ|NB_DONE|NB_ERROR|NB_DELWRI)); + if (ISSET(oldflags, NB_DELWRI)) { + nfs_nbdwrite--; + NFSBUFCNTCHK(); + wakeup((caddr_t)&nfs_nbdwrite); } - if (ISSET(oldflags, (B_ASYNC|B_DELWRI))) { - reassignbuf(bp, vp); + /* move to clean list */ + if (ISSET(oldflags, (NB_ASYNC|NB_DELWRI))) { + if (bp->nb_vnbufs.le_next != NFSNOLIST) + LIST_REMOVE(bp, nb_vnbufs); + LIST_INSERT_HEAD(&VTONFS(vp)->n_cleanblkhd, bp, nb_vnbufs); } vp->v_numoutput++; - current_proc()->p_stats->p_ru.ru_oublock++; + if (p && p->p_stats) + p->p_stats->p_ru.ru_oublock++; splx(s); - - /* - * Since the B_BUSY flag is set, we need to lock the page before doing - * nfs_commit. Otherwise we may block and get a busy incore pages - * during a vm pageout. Move the existing code up before the commit. - */ - if (!ISSET(bp->b_flags, B_META) && UBCISVALID(vp) && - !ISSET(bp->b_flags, B_PAGELIST)) { - kret = ubc_create_upl(vp, ubc_blktooff(vp, bp->b_lblkno), - bp->b_bufsize, &upl, &pl, UPL_PRECIOUS); - if (kret != KERN_SUCCESS) - panic("nfs_writebp: ubc_create_upl %d", kret); -#ifdef UBC_DEBUG - upl_ubc_alias_set(upl, current_act(), 2); -#endif /* UBC_DEBUG */ - s = splbio(); - bp->b_pagelist = upl; - SET(bp->b_flags, B_PAGELIST); - splx(s); - - kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_data)); - if (kret != KERN_SUCCESS) - panic("nfs_writebp: ubc_upl_map %d", kret); - if(bp->b_data == 0) - panic("nfs_writebp: ubc_upl_map mapped 0"); - if (!upl_page_present(pl, 0)) /* even more paranoia */ - panic("nfs_writebp: nopage"); - } /* - * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not - * an actual write will have to be scheduled via. VOP_STRATEGY(). - * If B_WRITEINPROG is already set, then push it with a write anyhow. + * For async requests when nfsiod(s) are running, queue the request by + * calling nfs_asyncio(), otherwise just all nfs_doio() to do the request. */ - if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { - off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; - SET(bp->b_flags, B_WRITEINPROG); - retv = nfs_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, - bp->b_wcred, bp->b_proc); - CLR(bp->b_flags, B_WRITEINPROG); - if (!retv) { - bp->b_dirtyoff = bp->b_dirtyend = 0; - CLR(bp->b_flags, B_NEEDCOMMIT); - biodone(bp); /* on B_ASYNC will brelse the buffer */ - - } else if (retv == NFSERR_STALEWRITEVERF) - nfs_clearcommit(vp->v_mount); - } - if (retv) { - if (force) - SET(bp->b_flags, B_WRITEINPROG); - VOP_STRATEGY(bp); - } - - if( (oldflags & B_ASYNC) == 0) { - int rtval = biowait(bp); - - if (oldflags & B_DELWRI) { + if (ISSET(bp->nb_flags, NB_ASYNC)) + p = (struct proc *)0; + if (ISSET(bp->nb_flags, NB_READ)) + cr = bp->nb_rcred; + else + cr = bp->nb_wcred; + if (!ISSET(bp->nb_flags, NB_ASYNC) || nfs_asyncio(bp, NOCRED)) + rv = nfs_doio(bp, cr, p); + + if ((oldflags & NB_ASYNC) == 0) { + rv = nfs_buf_iowait(bp); + /* move to clean list */ + if (oldflags & NB_DELWRI) { s = splbio(); - reassignbuf(bp, vp); + if (bp->nb_vnbufs.le_next != NFSNOLIST) + LIST_REMOVE(bp, nb_vnbufs); + LIST_INSERT_HEAD(&VTONFS(vp)->n_cleanblkhd, bp, nb_vnbufs); splx(s); } - brelse(bp); - return (rtval); + FSDBG_BOT(553, bp, NBOFF(bp), bp->nb_flags, rv); + nfs_buf_release(bp); + return (rv); } - return (0); + FSDBG_BOT(553, bp, NBOFF(bp), bp->nb_flags, rv); + return (rv); } /* @@ -3967,7 +4017,7 @@ nfsspec_access(ap) * unless the file is a socket, fifo, or a block or character * device resident on the filesystem. */ - if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { + if ((mode & VWRITE) && vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY)) { switch (vp->v_type) { case VREG: case VDIR: case VLNK: return (EROFS); @@ -4015,13 +4065,15 @@ nfsspec_read(ap) } */ *ap; { register struct nfsnode *np = VTONFS(ap->a_vp); + struct timeval now; /* * Set access flag. */ np->n_flag |= NACC; - np->n_atim.tv_sec = time.tv_sec; - np->n_atim.tv_nsec = time.tv_usec * 1000; + microtime(&now); + np->n_atim.tv_sec = now.tv_sec; + np->n_atim.tv_nsec = now.tv_usec * 1000; return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); } @@ -4038,13 +4090,15 @@ nfsspec_write(ap) } */ *ap; { register struct nfsnode *np = VTONFS(ap->a_vp); + struct timeval now; /* * Set update flag. */ np->n_flag |= NUPD; - np->n_mtim.tv_sec = time.tv_sec; - np->n_mtim.tv_nsec = time.tv_usec * 1000; + microtime(&now); + np->n_mtim.tv_sec = now.tv_sec; + np->n_mtim.tv_nsec = now.tv_usec * 1000; return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); } @@ -4068,7 +4122,7 @@ nfsspec_close(ap) if (np->n_flag & (NACC | NUPD)) { np->n_flag |= NCHG; - if (vp->v_usecount == 1 && + if (vp->v_usecount == 1 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { VATTR_NULL(&vattr); if (np->n_flag & NACC) @@ -4095,13 +4149,15 @@ nfsfifo_read(ap) { extern vop_t **fifo_vnodeop_p; register struct nfsnode *np = VTONFS(ap->a_vp); + struct timeval now; /* * Set access flag. */ np->n_flag |= NACC; - np->n_atim.tv_sec = time.tv_sec; - np->n_atim.tv_nsec = time.tv_usec * 1000; + microtime(&now); + np->n_atim.tv_sec = now.tv_sec; + np->n_atim.tv_nsec = now.tv_usec * 1000; return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); } @@ -4119,13 +4175,15 @@ nfsfifo_write(ap) { extern vop_t **fifo_vnodeop_p; register struct nfsnode *np = VTONFS(ap->a_vp); + struct timeval now; /* * Set update flag. */ np->n_flag |= NUPD; - np->n_mtim.tv_sec = time.tv_sec; - np->n_mtim.tv_nsec = time.tv_usec * 1000; + microtime(&now); + np->n_mtim.tv_sec = now.tv_sec; + np->n_mtim.tv_nsec = now.tv_usec * 1000; return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); } @@ -4146,19 +4204,21 @@ nfsfifo_close(ap) register struct vnode *vp = ap->a_vp; register struct nfsnode *np = VTONFS(vp); struct vattr vattr; + struct timeval now; extern vop_t **fifo_vnodeop_p; if (np->n_flag & (NACC | NUPD)) { + microtime(&now); if (np->n_flag & NACC) { - np->n_atim.tv_sec = time.tv_sec; - np->n_atim.tv_nsec = time.tv_usec * 1000; + np->n_atim.tv_sec = now.tv_sec; + np->n_atim.tv_nsec = now.tv_usec * 1000; } if (np->n_flag & NUPD) { - np->n_mtim.tv_sec = time.tv_sec; - np->n_mtim.tv_nsec = time.tv_usec * 1000; + np->n_mtim.tv_sec = now.tv_sec; + np->n_mtim.tv_nsec = now.tv_usec * 1000; } np->n_flag |= NCHG; - if (vp->v_usecount == 1 && + if (vp->v_usecount == 1 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { VATTR_NULL(&vattr); if (np->n_flag & NACC) @@ -4194,7 +4254,6 @@ nfs_select(ap) return (1); } -/* XXX Eliminate use of struct bp here */ /* * Vnode op for pagein using getblk_pages * derived from nfs_bioread() @@ -4219,21 +4278,20 @@ nfs_pagein(ap) vm_offset_t pl_offset = ap->a_pl_offset; int flags = ap->a_flags; struct ucred *cred; - register struct nfsnode *np = VTONFS(vp); - register int biosize; - register int iosize; - register int xsize; + struct nfsnode *np = VTONFS(vp); + int biosize, xsize, iosize; struct vattr vattr; struct proc *p = current_proc(); - struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct nfsmount *nmp; int error = 0; vm_offset_t ioaddr; struct uio auio; struct iovec aiov; struct uio * uio = &auio; int nofreeupl = flags & UPL_NOCOMMIT; + upl_page_info_t *plinfo; - FSDBG(322, f_offset, size, pl, pl_offset); + FSDBG(322, vp, f_offset, size, flags); if (pl == (upl_t)NULL) panic("nfs_pagein: no upl"); @@ -4251,8 +4309,7 @@ nfs_pagein(ap) (void) ubc_upl_abort(pl, NULL); return (EINVAL); } - if (f_offset < 0 || f_offset >= np->n_size || - (f_offset & PAGE_MASK_64)) { + if (f_offset < 0 || f_offset >= np->n_size || (f_offset & PAGE_MASK_64)) { if (!nofreeupl) ubc_upl_abort_range(pl, pl_offset, size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); @@ -4267,27 +4324,38 @@ nfs_pagein(ap) auio.uio_rw = UIO_READ; auio.uio_procp = NULL; - if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) + nmp = VFSTONFS(vp->v_mount); + if (!nmp) { + if (!nofreeupl) + ubc_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + return (ENXIO); + } + if ((nmp->nm_flag & NFSMNT_NFSV3) && !(nmp->nm_state & NFSSTA_GOTFSINFO)) (void)nfs_fsinfo(nmp, vp, cred, p); - biosize = min(vp->v_mount->mnt_stat.f_iosize, size); - - if (biosize & PAGE_MASK) - panic("nfs_pagein(%x): biosize not page aligned", biosize); + biosize = vp->v_mount->mnt_stat.f_iosize; + plinfo = ubc_upl_pageinfo(pl); ubc_upl_map(pl, &ioaddr); ioaddr += pl_offset; xsize = size; do { + /* + * It would be nice to be able to issue all these requests + * in parallel instead of waiting for each one to complete + * before sending the next one. + * XXX Should we align these requests to block boundaries? + */ iosize = min(biosize, xsize); uio->uio_resid = iosize; - auio.uio_iov = &aiov; - auio.uio_iovcnt = 1; aiov.iov_len = iosize; aiov.iov_base = (caddr_t)ioaddr; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; FSDBG(322, uio->uio_offset, uio->uio_resid, ioaddr, xsize); -#warning our nfs_pagein does not support NQNFS +// XXX #warning our nfs_pagein does not support NQNFS /* * With UBC we get here only when the file data is not in the VM * page cache, so go ahead and read in. @@ -4319,7 +4387,8 @@ nfs_pagein(ap) } else FSDBG(322, uio->uio_offset, uio->uio_resid, error, -1); - if (p && (vp->v_flag & VTEXT) && + nmp = VFSTONFS(vp->v_mount); + if (p && (vp->v_flag & VTEXT) && nmp && ((nmp->nm_flag & NFSMNT_NQNFS && NQNFS_CKINVALID(vp, np, ND_READ) && np->n_lrev != np->n_brev) || @@ -4372,11 +4441,10 @@ nfs_pageout(ap) vm_offset_t pl_offset = ap->a_pl_offset; int flags = ap->a_flags; int ioflag = ap->a_flags; - register int biosize; struct proc *p = current_proc(); struct nfsnode *np = VTONFS(vp); register struct ucred *cred; - struct buf *bp; + struct nfsbuf *bp; struct nfsmount *nmp = VFSTONFS(vp->v_mount); daddr_t lbn; int n = 0, on, error = 0, iomode, must_commit, s; @@ -4384,10 +4452,8 @@ nfs_pageout(ap) vm_offset_t ioaddr; struct uio auio; struct iovec aiov; - struct uio * uio = &auio; int nofreeupl = flags & UPL_NOCOMMIT; - int iosize; - int pgsize; + int biosize, iosize, pgsize, xsize; FSDBG(323, f_offset, size, pl, pl_offset); @@ -4397,7 +4463,7 @@ nfs_pageout(ap) if (UBCINVALID(vp)) { printf("nfs_pageout: invalid vnode 0x%x", (int)vp); if (!nofreeupl) - (void) ubc_upl_abort(pl, NULL); + ubc_upl_abort(pl, 0); return (EIO); } UBCINFOCHECK("nfs_pageout", vp); @@ -4405,42 +4471,90 @@ nfs_pageout(ap) if (size <= 0) { printf("nfs_pageout: invalid size %d", size); if (!nofreeupl) - (void) ubc_upl_abort(pl, NULL); + ubc_upl_abort(pl, 0); return (EINVAL); } - /* - * I use nm_rsize, not nm_wsize so that all buffer cache blocks - * will be the same size within a filesystem. nfs_writerpc will - * still use nm_wsize when sizing the rpc's. - */ - biosize = min(vp->v_mount->mnt_stat.f_iosize, size); - - if (biosize & PAGE_MASK) - panic("nfs_pageout(%x): biosize not page aligned", biosize); + if (!nmp) { + if (!nofreeupl) + ubc_upl_abort(pl, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY); + return (ENXIO); + } + biosize = vp->v_mount->mnt_stat.f_iosize; /* - * Check to see whether the buffer is incore - * If incore and not busy invalidate it from the cache - * we should not find it BUSY, since we always do a - * vm_fault_list_request in 'getblk' before returning - * which would block on the page busy status + * Check to see whether the buffer is incore. + * If incore and not busy, invalidate it from the cache. */ - lbn = f_offset / PAGE_SIZE; /* to match the size getblk uses */ - - for (iosize = size; iosize > 0; iosize -= PAGE_SIZE, lbn++) { + for (iosize = 0; iosize < size; iosize += xsize) { + off = f_offset + iosize; + /* need make sure we do things on block boundaries */ + xsize = biosize - (off % biosize); + if (off + xsize > f_offset + size) + xsize = f_offset + size - off; + lbn = ubc_offtoblk(vp, off); s = splbio(); - if (bp = incore(vp, lbn)) { - FSDBG(323, lbn*PAGE_SIZE, 1, bp, bp->b_flags); - if (ISSET(bp->b_flags, B_BUSY)) { + if (bp = nfs_buf_incore(vp, lbn)) { + FSDBG(323, off, 1, bp, bp->nb_flags); + if (ISSET(bp->nb_flags, NB_BUSY)) { /* no panic. just tell vm we are busy */ if (!nofreeupl) - (void) ubc_upl_abort(pl, NULL); - return(EBUSY); + ubc_upl_abort(pl, 0); + return (EBUSY); + } + if (bp->nb_dirtyend > 0) { + /* + * if there's a dirty range in the buffer, check to + * see if it extends beyond the pageout region + * + * if the dirty region lies completely within the + * pageout region, we just invalidate the buffer + * because it's all being written out now anyway. + * + * if any of the dirty region lies outside the + * pageout region, we'll try to clip the dirty + * region to eliminate the portion that's being + * paged out. If that's not possible, because + * the dirty region extends before and after the + * pageout region, then we'll just return EBUSY. + */ + off_t boff, start, end; + boff = NBOFF(bp); + start = off; + end = off + xsize; + /* clip end to EOF */ + if (end > np->n_size) + end = np->n_size; + start -= boff; + end -= boff; + if ((bp->nb_dirtyoff < start) && + (bp->nb_dirtyend > end)) { + /* not gonna be able to clip the dirty region */ + FSDBG(323, vp, bp, 0xd00deebc, EBUSY); + if (!nofreeupl) + ubc_upl_abort(pl, 0); + return (EBUSY); + } + if ((bp->nb_dirtyoff < start) || + (bp->nb_dirtyend > end)) { + /* clip dirty region, if necessary */ + if (bp->nb_dirtyoff < start) + bp->nb_dirtyend = min(bp->nb_dirtyend, start); + if (bp->nb_dirtyend > end) + bp->nb_dirtyoff = max(bp->nb_dirtyoff, end); + FSDBG(323, bp, bp->nb_dirtyoff, bp->nb_dirtyend, 0xd00dee00); + /* we're leaving this block dirty */ + continue; + } + } + nfs_buf_remfree(bp); + SET(bp->nb_flags, (NB_BUSY | NB_INVAL)); + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { + CLR(bp->nb_flags, NB_NEEDCOMMIT); + np->n_needcommitcnt--; + CHECK_NEEDCOMMITCNT(np); } - bremfree(bp); - SET(bp->b_flags, (B_BUSY | B_INVAL)); - brelse(bp); + nfs_buf_release(bp); } splx(s); } @@ -4456,11 +4570,12 @@ nfs_pageout(ap) UPL_ABORT_FREE_ON_EMPTY); return (np->n_error); } - if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) + if ((nmp->nm_flag & NFSMNT_NFSV3) && + !(nmp->nm_state & NFSSTA_GOTFSINFO)) (void)nfs_fsinfo(nmp, vp, cred, p); if (f_offset < 0 || f_offset >= np->n_size || - f_offset & PAGE_MASK_64 || size & PAGE_MASK) { + f_offset & PAGE_MASK_64 || size & PAGE_MASK_64) { if (!nofreeupl) ubc_upl_abort_range(pl, pl_offset, size, UPL_ABORT_FREE_ON_EMPTY); @@ -4468,30 +4583,21 @@ nfs_pageout(ap) } ubc_upl_map(pl, &ioaddr); + ioaddr += pl_offset; if (f_offset + size > np->n_size) - iosize = np->n_size - f_offset; + xsize = np->n_size - f_offset; else - iosize = size; - - pgsize = (iosize + (PAGE_SIZE - 1)) & ~PAGE_MASK; + xsize = size; + pgsize = round_page_64(xsize); if (size > pgsize) { if (!nofreeupl) ubc_upl_abort_range(pl, pl_offset + pgsize, size - pgsize, UPL_ABORT_FREE_ON_EMPTY); } - auio.uio_iov = &aiov; - auio.uio_iovcnt = 1; - auio.uio_offset = f_offset; - auio.uio_segflg = UIO_SYSSPACE; - auio.uio_rw = UIO_READ; - auio.uio_resid = iosize; - auio.uio_procp = NULL; - aiov.iov_len = iosize; - aiov.iov_base = (caddr_t)ioaddr + pl_offset; /* * check for partial page and clear the * contents past end of the file before @@ -4499,45 +4605,47 @@ nfs_pageout(ap) */ if (f_offset < np->n_size && f_offset + size > np->n_size) { size_t io = np->n_size - f_offset; - - bzero((caddr_t)(ioaddr + pl_offset + io), size - io); - + bzero((caddr_t)(ioaddr + io), size - io); FSDBG(321, np->n_size, f_offset, f_offset + io, size - io); } + auio.uio_offset = f_offset; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_READ; + auio.uio_procp = NULL; + do { -#warning our nfs_pageout does not support NQNFS + /* + * It would be nice to be able to issue all these requests + * in parallel instead of waiting for each one to complete + * before sending the next one. + * XXX Should we align these requests to block boundaries? + */ + iosize = min(biosize, xsize); + auio.uio_resid = iosize; + aiov.iov_len = iosize; + aiov.iov_base = (caddr_t)ioaddr; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + + FSDBG(323, auio.uio_offset, auio.uio_resid, ioaddr, xsize); +// XXX #warning our nfs_pageout does not support NQNFS nfsstats.pageouts++; - lbn = uio->uio_offset / biosize; - on = uio->uio_offset & (biosize-1); - n = min((unsigned)(biosize - on), uio->uio_resid); -again: -#if 0 - /* (removed for UBC) */ - bufsize = biosize; - if ((off_t)(lbn + 1) * biosize > np->n_size) { - bufsize = np->n_size - (off_t)lbn * biosize; - bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); - } -#endif + vp->v_numoutput++; /* NMODIFIED would be set here if doing unstable writes */ iomode = NFSV3WRITE_FILESYNC; - error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit); + error = nfs_writerpc(vp, &auio, cred, &iomode, &must_commit); if (must_commit) nfs_clearcommit(vp->v_mount); vpwakeup(vp); - if (error) goto cleanup; - - if (n > 0) { - uio->uio_resid -= n; - uio->uio_offset += n; - uio->uio_iov->iov_base += n; - uio->uio_iov->iov_len -= n; - } - } while (uio->uio_resid > 0 && n > 0); + /* Note: no need to check uio_resid, because */ + /* it'll only be set if there was an error. */ + ioaddr += iosize; + xsize -= iosize; + } while (xsize > 0); cleanup: ubc_upl_unmap(pl); @@ -4619,9 +4727,12 @@ nfs_blktooff(ap) int biosize; register struct vnode *vp = ap->a_vp; - biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); /* nfs_bio.c */ + if (!vp->v_mount) + return (ENXIO); + + biosize = vp->v_mount->mnt_stat.f_iosize; - *ap->a_offset = (off_t)ap->a_lblkno * biosize; + *ap->a_offset = (off_t)ap->a_lblkno * biosize; return (0); } @@ -4637,9 +4748,12 @@ nfs_offtoblk(ap) int biosize; register struct vnode *vp = ap->a_vp; - biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); /* nfs_bio.c */ + if (!vp->v_mount) + return (ENXIO); + + biosize = vp->v_mount->mnt_stat.f_iosize; - *ap->a_lblkno = (daddr_t)(ap->a_offset / biosize); + *ap->a_lblkno = (daddr_t)(ap->a_offset / biosize); return (0); } diff --git a/bsd/nfs/nfsm_subs.h b/bsd/nfs/nfsm_subs.h index 5156591a9..f2648d604 100644 --- a/bsd/nfs/nfsm_subs.h +++ b/bsd/nfs/nfsm_subs.h @@ -334,7 +334,12 @@ struct mbuf *nfsm_rpchead __P((struct ucred *cr, int nmflag, int procid, */ #define nfsm_request(v, t, p, c, x) \ { \ - int nfsv3 = (VFSTONFS((v)->v_mount))->nm_flag & NFSMNT_NFSV3; \ + int nfsv3; \ + if (!VFSTONFS((v)->v_mount)) { \ + error = ENXIO; \ + goto nfsmout; \ + } \ + nfsv3 = (VFSTONFS((v)->v_mount))->nm_flag & NFSMNT_NFSV3; \ if ((error = nfs_request((v), mreq, (t), (p), \ (c), &mrep, &md, &dpos, (x)))) { \ if (error & NFSERR_RETERR) \ @@ -342,11 +347,6 @@ struct mbuf *nfsm_rpchead __P((struct ucred *cr, int nmflag, int procid, else \ goto nfsmout; \ } \ - else if ((v)->v_type==VBAD) { \ - error = EINVAL; \ - if (!nfsv3) \ - goto nfsmout; \ - } \ } #define nfsm_strtom(a,s,m) \ @@ -446,7 +446,9 @@ struct mbuf *nfsm_rpchead __P((struct ucred *cr, int nmflag, int procid, nfsm_srvpostopattr(nfsd, (r), (a), &mb, &bpos) #define nfsm_srvsattr(a) \ - { nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + { \ + struct timeval now; \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ if (*tl == nfs_true) { \ nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ (a)->va_mode = nfstov_mode(*tl); \ @@ -467,14 +469,15 @@ struct mbuf *nfsm_rpchead __P((struct ucred *cr, int nmflag, int procid, fxdr_hyper(tl, &(a)->va_size); \ } \ nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + microtime(&now); \ switch (fxdr_unsigned(int, *tl)) { \ case NFSV3SATTRTIME_TOCLIENT: \ nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); \ fxdr_nfsv3time(tl, &(a)->va_atime); \ break; \ case NFSV3SATTRTIME_TOSERVER: \ - (a)->va_atime.tv_sec = time.tv_sec; \ - (a)->va_atime.tv_nsec = time.tv_usec * 1000; \ + (a)->va_atime.tv_sec = now.tv_sec; \ + (a)->va_atime.tv_nsec = now.tv_usec * 1000; \ break; \ }; \ nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ @@ -484,8 +487,8 @@ struct mbuf *nfsm_rpchead __P((struct ucred *cr, int nmflag, int procid, fxdr_nfsv3time(tl, &(a)->va_mtime); \ break; \ case NFSV3SATTRTIME_TOSERVER: \ - (a)->va_mtime.tv_sec = time.tv_sec; \ - (a)->va_mtime.tv_nsec = time.tv_usec * 1000; \ + (a)->va_mtime.tv_sec = now.tv_sec; \ + (a)->va_mtime.tv_nsec = now.tv_usec * 1000; \ break; \ }; } diff --git a/bsd/nfs/nfsmount.h b/bsd/nfs/nfsmount.h index efa40d2f8..ae0f636c9 100644 --- a/bsd/nfs/nfsmount.h +++ b/bsd/nfs/nfsmount.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -76,6 +76,7 @@ */ struct nfsmount { int nm_flag; /* Flags for soft/hard... */ + int nm_state; /* Internal state flags */ struct mount *nm_mountp; /* Vfs structure for this filesystem */ int nm_numgrps; /* Max. size of groupslist */ struct vnode *nm_dvp; /* root directory vnode pointer */ @@ -110,17 +111,28 @@ struct nfsmount { int nm_numuids; /* Number of nfsuid mappings */ TAILQ_HEAD(, nfsuid) nm_uidlruhead; /* Lists of nfsuid mappings */ LIST_HEAD(, nfsuid) nm_uidhashtbl[NFS_MUIDHASHSIZ]; - TAILQ_HEAD(, buf) nm_bufq; /* async io buffer queue */ + TAILQ_HEAD(, nfsbuf) nm_bufq; /* async io buffer queue */ short nm_bufqlen; /* number of buffers in queue */ short nm_bufqwant; /* process wants to add to the queue */ int nm_bufqiods; /* number of iods processing queue */ + int nm_tprintf_initial_delay; /* delay first "server down" */ + int nm_tprintf_delay; /* delay between "server down" */ }; + #if defined(KERNEL) /* * Convert mount ptr to nfsmount ptr. */ -#define VFSTONFS(mp) ((struct nfsmount *)((mp)->mnt_data)) +#define VFSTONFS(mp) ((mp) ? ((struct nfsmount *)((mp)->mnt_data)) : NULL) + +#ifndef NFS_TPRINTF_INITIAL_DELAY +#define NFS_TPRINTF_INITIAL_DELAY 12 +#endif + +#ifndef NFS_TPRINTF_DELAY +#define NFS_TPRINTF_DELAY 30 +#endif #endif /* KERNEL */ diff --git a/bsd/nfs/nfsnode.h b/bsd/nfs/nfsnode.h index cd3a8d080..6462f92b9 100644 --- a/bsd/nfs/nfsnode.h +++ b/bsd/nfs/nfsnode.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -102,6 +102,96 @@ struct nfsdmap { nfsuint64 ndm_cookies[NFSNUMCOOKIES]; }; +/* + * The nfsbuf is the nfs equivalent to a struct buf. + */ +struct nfsbuf { + LIST_ENTRY(nfsbuf) nb_hash; /* hash chain */ + LIST_ENTRY(nfsbuf) nb_vnbufs; /* vnode's nfsbuf chain */ + TAILQ_ENTRY(nfsbuf) nb_free; /* free list position if not active. */ + volatile long nb_flags; /* NB_* flags. */ + long nb_bufsize; /* buffer size */ + daddr_t nb_lblkno; /* logical block number. */ + int nb_error; /* errno value. */ + u_int32_t nb_valid; /* valid pages in buf */ + u_int32_t nb_dirty; /* dirty pages in buf */ + int nb_validoff; /* offset in buffer of valid region. */ + int nb_validend; /* offset of end of valid region. */ + int nb_dirtyoff; /* offset in buffer of dirty region. */ + int nb_dirtyend; /* offset of end of dirty region. */ + caddr_t nb_data; /* mapped buffer */ + struct vnode * nb_vp; /* device vnode */ + struct proc * nb_proc; /* associated proc; NULL if kernel. */ + struct ucred * nb_rcred; /* read credentials reference */ + struct ucred * nb_wcred; /* write credentials reference */ + void * nb_pagelist; /* upl */ +}; + +/* + * These flags are kept in nb_flags and they're (purposefully) + * very similar to the B_* flags for struct buf. + */ +#define NB_NEEDCOMMIT 0x00000002 /* Append-write in progress. */ +#define NB_ASYNC 0x00000004 /* Start I/O, do not wait. */ +#define NB_BUSY 0x00000010 /* I/O in progress. */ +#define NB_CACHE 0x00000020 /* Bread found us in the cache. */ +#define NB_STABLE 0x00000040 /* write FILESYNC not UNSTABLE. */ +#define NB_DELWRI 0x00000080 /* Delay I/O until buffer reused. */ +#define NB_DONE 0x00000200 /* I/O completed. */ +#define NB_EINTR 0x00000400 /* I/O was interrupted */ +#define NB_ERROR 0x00000800 /* I/O error occurred. */ +#define NB_WASDIRTY 0x00001000 /* page was found dirty in the VM cache */ +#define NB_INVAL 0x00002000 /* Does not contain valid info. */ +#define NB_NOCACHE 0x00008000 /* Do not cache block after use. */ +#define NB_READ 0x00100000 /* Read buffer. */ +#define NB_PAGELIST 0x00400000 /* Buffer describes pagelist I/O. */ +#define NB_WANTED 0x00800000 /* Process wants this buffer. */ +#define NB_WRITE 0x00000000 /* Write buffer (pseudo flag). */ +#define NB_WRITEINPROG 0x01000000 /* Write in progress. */ +#define NB_META 0x40000000 /* buffer contains meta-data. */ +#define NB_IOD 0x80000000 /* buffer being handled by nfsiod. */ + + +#define NBOFF(BP) ((off_t)(BP)->nb_lblkno * (off_t)(BP)->nb_bufsize) +#define NBPGVALID(BP,P) (((BP)->nb_valid >> (P)) & 0x1) +#define NBPGDIRTY(BP,P) (((BP)->nb_dirty >> (P)) & 0x1) +#define NBPGVALID_SET(BP,P) ((BP)->nb_valid |= (1 << (P))) +#define NBPGDIRTY_SET(BP,P) ((BP)->nb_dirty |= (1 << (P))) + +#define NFS_BUF_MAP(BP) \ + do { \ + if (!(BP)->nb_data && nfs_buf_map(BP)) \ + panic("nfs_buf_map failed"); \ + } while (0) + +LIST_HEAD(nfsbuflists, nfsbuf); +TAILQ_HEAD(nfsbuffreehead, nfsbuf); + +#define NFSNOLIST ((struct nfsbuf *)0xdeadbeef) + +extern int nfsbufhashlock, nfsbufcnt, nfsbufmin, nfsbufmax; +extern int nfsbuffreecnt, nfsbufdelwricnt, nfsneedbuffer; +extern int nfs_nbdwrite; +extern struct nfsbuffreehead nfsbuffree, nfsbufdelwri; + +#define NFSBUFCNTCHK() \ + do { \ + if ( (nfsbufcnt < 0) || \ + (nfsbufcnt > nfsbufmax) || \ + (nfsbuffreecnt < 0) || \ + (nfsbuffreecnt > nfsbufmax) || \ + (nfsbuffreecnt > nfsbufcnt) || \ + (nfsbufdelwricnt < 0) || \ + (nfsbufdelwricnt > nfsbufmax) || \ + (nfsbufdelwricnt > nfsbufcnt) || \ + (nfs_nbdwrite < 0) || \ + (nfs_nbdwrite > nfsbufcnt) || \ + 0) \ + panic("nfsbuf count error: max %d cnt %d free %d delwr %d bdw %d\n", \ + nfsbufmax, nfsbufcnt, nfsbuffreecnt, \ + nfsbufdelwricnt, nfs_nbdwrite); \ + } while (0) + /* * The nfsnode is the nfs equivalent to ufs's inode. Any similarity * is purely coincidental. @@ -131,7 +221,10 @@ struct nfsnode { time_t n_ctime; /* Prev create time. */ time_t n_expiry; /* Lease expiry time */ nfsfh_t *n_fhp; /* NFS File Handle */ - struct vnode *n_vnode; /* associated vnode */ + union { + struct vnode *n_vp; /* associated vnode */ + struct mount *n_mp; /* associated mount (NINIT) */ + } n_un0; struct lockf *n_lockf; /* Locking record of file */ int n_error; /* Save write error value */ union { @@ -150,8 +243,21 @@ struct nfsnode { short n_flag; /* Flag for locking.. */ nfsfh_t n_fh; /* Small File Handle */ u_int64_t n_xid; /* last xid to loadattr */ + struct nfsbuflists n_cleanblkhd; /* clean blocklist head */ + struct nfsbuflists n_dirtyblkhd; /* dirty blocklist head */ + int n_needcommitcnt;/* # bufs that need committing */ }; +#define CHECK_NEEDCOMMITCNT(np) \ + do { \ + if ((np)->n_needcommitcnt < 0) { \ + printf("nfs: n_needcommitcnt negative\n"); \ + (np)->n_needcommitcnt = 0; \ + } \ + } while (0) + +#define n_vnode n_un0.n_vp +#define n_mount n_un0.n_mp #define n_atim n_un1.nf_atim #define n_mtim n_un2.nf_mtim #define n_sillyrename n_un3.nf_silly @@ -172,8 +278,9 @@ struct nfsnode { #define NACC 0x0100 /* Special file accessed */ #define NUPD 0x0200 /* Special file updated */ #define NCHG 0x0400 /* Special file times changed */ -#define NLOCKED 0x0800 /* node is locked */ -#define NWANTED 0x0100 /* someone wants to lock */ +#define NHASHED 0x1000 /* someone wants to lock */ +#define NINIT 0x2000 /* node is being initialized */ +#define NWINIT 0x4000 /* someone waiting for init to complete */ /* * Convert between nfsnode pointers and vnode pointers @@ -204,7 +311,6 @@ int nfs_write __P((struct vop_write_args *)); int nqnfs_vop_lease_check __P((struct vop_lease_args *)); #define nfs_revoke vop_revoke #define nfs_seek ((int (*) __P((struct vop_seek_args *)))nullop) -int nfs_abortop __P((struct vop_abortop_args *)); int nfs_inactive __P((struct vop_inactive_args *)); int nfs_reclaim __P((struct vop_reclaim_args *)); int nfs_lock __P((struct vop_lock_args *)); @@ -222,6 +328,18 @@ void nfs_invaldir __P((struct vnode *)); #define nqnfs_lease_updatetime lease_updatetime +/* nfsbuf functions */ +void nfs_nbinit(void); +void nfs_buf_remfree(struct nfsbuf *); +struct nfsbuf * nfs_buf_incore(struct vnode *, daddr_t); +struct nfsbuf * nfs_buf_get(struct vnode *, daddr_t, int, struct proc *, int); +int nfs_buf_upl_setup(struct nfsbuf *bp); +void nfs_buf_upl_check(struct nfsbuf *bp); +void nfs_buf_release(struct nfsbuf *); +int nfs_buf_iowait(struct nfsbuf *); +void nfs_buf_iodone(struct nfsbuf *); +void nfs_buf_write_delayed(struct nfsbuf *); + #endif /* KERNEL */ #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/nfs/nfsproto.h b/bsd/nfs/nfsproto.h index 8b077777d..297a6b40e 100644 --- a/bsd/nfs/nfsproto.h +++ b/bsd/nfs/nfsproto.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -88,12 +88,13 @@ #define NFS_VER3 3 #define NFS_V2MAXDATA 8192 #define NFS_MAXDGRAMDATA 16384 -#define NFS_MAXDATA 32768 +#define NFS_MAXDATA (60*1024) // XXX not ready for 64K-128K #define NFS_MAXPATHLEN 1024 #define NFS_MAXNAMLEN 255 #define NFS_MAXPKTHDR 404 #define NFS_MAXPACKET (NFS_MAXPKTHDR + NFS_MAXDATA) #define NFS_MINPACKET 20 +#define NFS_MAXSOCKBUF (224*1024) #define NFS_FABLKSIZE 512 /* Size in bytes of a block wrt fa_blocks */ /* Stat numbers for rpc returns (version 2 and 3) */ diff --git a/bsd/nfs/nlminfo.h b/bsd/nfs/nlminfo.h new file mode 100644 index 000000000..d149664da --- /dev/null +++ b/bsd/nfs/nlminfo.h @@ -0,0 +1,52 @@ +/*- + * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Berkeley Software Design Inc's name may not be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from BSDI nlminfo.h,v 2.1 1998/03/18 01:30:38 don Exp + * $FreeBSD: src/sys/nfsclient/nlminfo.h,v 1.1 2001/04/17 20:45:22 alfred Exp $ + */ + +#include + +#ifdef __APPLE_API_PRIVATE + +/* + * Misc NLM information, some needed for the master lockd process, and some + * needed by every process doing nlm based locking. + */ +struct nlminfo { + /* these are used by any process doing nlm locking */ + int msg_seq; /* sequence counter for lock requests */ + int retcode; /* return code for lock requests */ + int set_getlk; + int getlk_pid; + off_t getlk_start; + off_t getlk_len; + struct timeval pid_start; /* process starting time */ + struct timeval nlm_lockstart; /* XXX debug */ +}; + +extern void nlminfo_release(struct proc *p); +#endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/ppc/param.h b/bsd/ppc/param.h index ac6477555..673862a1b 100644 --- a/bsd/ppc/param.h +++ b/bsd/ppc/param.h @@ -113,7 +113,7 @@ #define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE)) /* from machdep/ppc/proc_reg.h */ -#if __BIG_ENDIAN__ +#ifdef __BIG_ENDIAN__ #define ENDIAN_MASK(val,size) (1 << (size-1 - val)) #else #error code not ported to little endian targets yet diff --git a/bsd/ppc/ucontext.h b/bsd/ppc/ucontext.h index 5d93537b3..fb311c5b6 100644 --- a/bsd/ppc/ucontext.h +++ b/bsd/ppc/ucontext.h @@ -40,4 +40,14 @@ struct mcontext { typedef struct mcontext * mcontext_t; +struct mcontext64 { + ppc_exception_state_t es; + ppc_thread_state64_t ss; + ppc_float_state_t fs; + ppc_vector_state_t vs; +}; +#define PPC_MCONTEXT64_SIZE (PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int) + +typedef struct mcontext64 * mcontext64_t; + #endif /* _PPC_UCONTEXT_H_ */ diff --git a/bsd/ppc/vmparam.h b/bsd/ppc/vmparam.h index 790696cfe..a0b782e89 100644 --- a/bsd/ppc/vmparam.h +++ b/bsd/ppc/vmparam.h @@ -40,7 +40,7 @@ #define MAXDSIZ (RLIM_INFINITY) /* max data size */ #endif #ifndef DFLSSIZ -#define DFLSSIZ (512*1024) /* initial stack size limit */ +#define DFLSSIZ (8*1024*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ #define MAXSSIZ (64*1024*1024) /* max stack size */ diff --git a/bsd/sys/Makefile b/bsd/sys/Makefile index e73e39434..a3a4ba1c4 100644 --- a/bsd/sys/Makefile +++ b/bsd/sys/Makefile @@ -20,10 +20,11 @@ EXPINC_SUBDIRS_PPC = \ EXPINC_SUBDIRS_I386 = \ DATAFILES = \ - appleapiopts.h \ - acct.h attr.h buf.h callout.h cdefs.h clist.h conf.h \ + appleapiopts.h acct.h aio.h attr.h \ + audit.h bsm_kevents.h bsm_token.h bsm_uevents.h \ + buf.h callout.h cdefs.h clist.h conf.h \ dir.h dirent.h disk.h disklabel.h disktab.h dkstat.h dmap.h domain.h \ - errno.h ev.h exec.h fcntl.h file.h filedesc.h filio.h gmon.h ioccom.h ioctl.h \ + errno.h ev.h event.h exec.h fcntl.h file.h filedesc.h filio.h gmon.h ioccom.h ioctl.h \ ioctl_compat.h ipc.h kernel.h kern_event.h ktrace.h loadable_fs.h lock.h lockf.h mach_swapon.h malloc.h \ kdebug.h linker_set.h md5.h kern_control.h \ mbuf.h mman.h mount.h msgbuf.h mtio.h namei.h netport.h param.h paths.h \ @@ -32,7 +33,7 @@ DATAFILES = \ syscall.h sysctl.h syslimits.h syslog.h systm.h sys_domain.h termios.h time.h \ timeb.h times.h tprintf.h trace.h tty.h ttychars.h ttycom.h \ ttydefaults.h ttydev.h types.h ubc.h ucontext.h ucred.h uio.h un.h unistd.h unpcb.h \ - user.h utfconv.h utsname.h ux_exception.h vadvise.h vcmd.h version.h vlimit.h \ + user.h utfconv.h utsname.h ux_exception.h vadvise.h vcmd.h version.h \ vm.h vmmeter.h vmparam.h vnioctl.h vnode.h vnode_if.h vstat.h wait.h INSTALL_MI_LIST = ${DATAFILES} diff --git a/bsd/sys/aio.h b/bsd/sys/aio.h new file mode 100644 index 000000000..a38d52eb7 --- /dev/null +++ b/bsd/sys/aio.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: sys/aio.h + * Author: Umesh Vaishampayan [umeshv@apple.com] + * 05-Feb-2003 umeshv Created. + * + * Header file for POSIX Asynchronous IO APIs + * + */ + +#ifndef _SYS_AIO_H_ +#define _SYS_AIO_H_ + +#include + +struct aiocb { + int aio_fildes; /* File descriptor */ + off_t aio_offset; /* File offset */ + volatile void *aio_buf; /* Location of buffer */ + size_t aio_nbytes; /* Length of transfer */ + int aio_reqprio; /* Request priority offset */ + struct sigevent aio_sigevent; /* Signal number and value */ + int aio_lio_opcode; /* Operation to be performed */ +}; + +/* + * aio_cancel() return values + */ + +/* + * none of the requested operations could be canceled since they are + * already complete. + */ +#define AIO_ALLDONE 0x1 + +/* all requested operations have been canceled */ +#define AIO_CANCELED 0x2 + +/* + * some of the requested operations could not be canceled since + * they are in progress + */ +#define AIO_NOTCANCELED 0x4 + + +/* + * lio_listio operation options + */ + +#define LIO_NOP 0x0 /* option indicating that no transfer is requested */ +#define LIO_READ 0x1 /* option requesting a read */ +#define LIO_WRITE 0x2 /* option requesting a write */ + +/* + * lio_listio() modes + */ + +/* + * A lio_listio() synchronization operation indicating + * that the calling thread is to continue execution while + * the lio_listio() operation is being performed, and no + * notification is given when the operation is complete + */ +#define LIO_NOWAIT 0x1 + +/* + * A lio_listio() synchronization operation indicating + * that the calling thread is to suspend until the + * lio_listio() operation is complete. + */ +#define LIO_WAIT 0x2 + +/* + * Maximum number of operations in single lio_listio call + */ +#define AIO_LISTIO_MAX 16 + +/* + * A aio_fsync() options + * that the calling thread is to continue execution while + * the lio_listio() operation is being performed, and no + * notification is given when the operation is complete + */ + +#define O_SYNC 0x0 /* queued IO is completed as if by fsync() */ +#if 0 /* O_DSYNC - NOT SUPPORTED */ +#define O_DSYNC 0x1 /* queued async IO is completed as if by fdatasync() */ +#endif + +#ifndef KERNEL +/* + * Prototypes + */ + +/* + * Attempt to cancel one or more asynchronous I/O requests currently outstanding + * against file descriptor fd. The aiocbp argument points to the asynchronous I/O + * control block for a particular request to be canceled. If aiocbp is NULL, then + * all outstanding cancelable asynchronous I/O requests against fd shall be canceled. + */ +int aio_cancel( int fd, + struct aiocb * aiocbp ); + +/* + * Return the error status associated with the aiocb structure referenced by the + * aiocbp argument. The error status for an asynchronous I/O operation is the errno + * value that would be set by the corresponding read(), write(), or fsync() + * operation. If the operation has not yet completed, then the error status shall + * be equal to [EINPROGRESS]. + */ +int aio_error( const struct aiocb * aiocbp ); + +/* + * Asynchronously force all I/O operations associated with the file indicated by + * the file descriptor aio_fildes member of the aiocb structure referenced by the + * aiocbp argument and queued at the time of the call to aio_fsync() to the + * synchronized I/O completion state. The function call shall return when the + * synchronization request has been initiated or queued. op O_SYNC is the only + * supported opertation at this time. + * The aiocbp argument refers to an asynchronous I/O control block. The aiocbp + * value may be used as an argument to aio_error() and aio_return() in order to + * determine the error status and return status, respectively, of the asynchronous + * operation while it is proceeding. When the request is queued, the error status + * for the operation is [EINPROGRESS]. When all data has been successfully + * transferred, the error status shall be reset to reflect the success or failure + * of the operation. + */ +int aio_fsync( int op, + struct aiocb * aiocbp ); + +/* + * Read aiocbp->aio_nbytes from the file associated with aiocbp->aio_fildes into + * the buffer pointed to by aiocbp->aio_buf. The function call shall return when + * the read request has been initiated or queued. + * The aiocbp value may be used as an argument to aio_error() and aio_return() in + * order to determine the error status and return status, respectively, of the + * asynchronous operation while it is proceeding. If an error condition is + * encountered during queuing, the function call shall return without having + * initiated or queued the request. The requested operation takes place at the + * absolute position in the file as given by aio_offset, as if lseek() were called + * immediately prior to the operation with an offset equal to aio_offset and a + * whence equal to SEEK_SET. After a successful call to enqueue an asynchronous + * I/O operation, the value of the file offset for the file is unspecified. + */ +int aio_read( struct aiocb * aiocbp ); + +/* + * Return the return status associated with the aiocb structure referenced by + * the aiocbp argument. The return status for an asynchronous I/O operation is + * the value that would be returned by the corresponding read(), write(), or + * fsync() function call. If the error status for the operation is equal to + * [EINPROGRESS], then the return status for the operation is undefined. The + * aio_return() function may be called exactly once to retrieve the return status + * of a given asynchronous operation; thereafter, if the same aiocb structure + * is used in a call to aio_return() or aio_error(), an error may be returned. + * When the aiocb structure referred to by aiocbp is used to submit another + * asynchronous operation, then aio_return() may be successfully used to + * retrieve the return status of that operation. + */ +ssize_t aio_return( struct aiocb * aiocbp ); + +/* + * Suspend the calling thread until at least one of the asynchronous I/O + * operations referenced by the aiocblist argument has completed, until a signal + * interrupts the function, or, if timeout is not NULL, until the time + * interval specified by timeout has passed. If any of the aiocb structures + * in the aiocblist correspond to completed asynchronous I/O operations (that is, + * the error status for the operation is not equal to [EINPROGRESS]) at the + * time of the call, the function shall return without suspending the calling + * thread. The aiocblist argument is an array of pointers to asynchronous I/O + * control blocks. The nent argument indicates the number of elements in the + * array. Each aiocb structure pointed to has been used in initiating an + * asynchronous I/O request via aio_read(), aio_write(), or lio_listio(). This + * array may contain NULL pointers, which are ignored. + */ +int aio_suspend( const struct aiocb *const aiocblist[], + int nent, + const struct timespec * timeoutp ); + +/* + * Write aiocbp->aio_nbytes to the file associated with aiocbp->aio_fildes from + * the buffer pointed to by aiocbp->aio_buf. The function shall return when the + * write request has been initiated or, at a minimum, queued. + * The aiocbp argument may be used as an argument to aio_error() and aio_return() + * in order to determine the error status and return status, respectively, of the + * asynchronous operation while it is proceeding. + */ +int aio_write( struct aiocb * aiocbp ); + +/* + * Initiate a list of I/O requests with a single function call. The mode + * argument takes one of the values LIO_WAIT or LIO_NOWAIT and determines whether + * the function returns when the I/O operations have been completed, or as soon + * as the operations have been queued. If the mode argument is LIO_WAIT, the + * function shall wait until all I/O is complete and the sig argument shall be + * ignored. + * If the mode argument is LIO_NOWAIT, the function shall return immediately, and + * asynchronous notification shall occur, according to the sig argument, when all + * the I/O operations complete. If sig is NULL, then no asynchronous notification + * shall occur. + */ +int lio_listio( int mode, + struct aiocb *const aiocblist[], + int nent, + struct sigevent *sigp ); +#endif /* KERNEL */ +#endif /* _SYS_AIO_H_ */ diff --git a/bsd/sys/aio_kern.h b/bsd/sys/aio_kern.h new file mode 100644 index 000000000..03555f39a --- /dev/null +++ b/bsd/sys/aio_kern.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: sys/aio_kern.h + * Author: Jerry Cottingham [jerryc@apple.com] + * + * Header file for kernel only portion of POSIX Asynchronous IO APIs + * + */ + +#include + +#ifndef _SYS_AIO_KERN_H_ +#define _SYS_AIO_KERN_H_ + +#ifdef KERNEL + +struct aio_workq_entry +{ + TAILQ_ENTRY( aio_workq_entry ) aio_workq_link; + struct proc *procp; /* user proc that queued this request */ + struct aiocb *uaiocbp; /* pointer passed in from user land */ + struct aiocb *fsyncp; /* not NULL means this request must complete */ + /* before an aio_fsync call can proceed. */ + vm_map_t aio_map; /* user land map we have a reference to */ + ssize_t returnval; /* return value from read / write request */ + int errorval; /* error value from read / write request */ + int flags; + long group_tag; /* identifier used to group IO requests */ + struct aiocb aiocb; /* copy of aiocb from user land */ +}; +typedef struct aio_workq_entry aio_workq_entry; + +/* + * definitions for aio_workq_entry.flags + */ +#define AIO_READ 0x00000001 +#define AIO_WRITE 0x00000002 +#define AIO_FSYNC 0x00000004 /* aio_fsync with op = O_SYNC */ +#define AIO_DSYNC 0x00000008 /* aio_fsync with op = O_DSYNC (not supported yet) */ +#define AIO_LIO 0x00000010 /* lio_listio generated IO */ +#define AIO_DO_FREE 0x00000800 /* entry needs to be freed */ +#define AIO_COMPLETION 0x00001000 /* entry is in completion processing (not freeable yet) */ +#define AIO_DISABLE 0x00002000 /* process is trying to exit or exec and we need */ + /* to disable normal completion notification */ +#define AIO_WAITING 0x00004000 /* process is trying to exit, exec, or close and is */ + /* waiting for one or more active IO requests to */ + /* complete */ + + +__private_extern__ void _aio_close( struct proc *p, int fd ); +__private_extern__ void _aio_exit( struct proc *p ); +__private_extern__ void _aio_exec( struct proc *p ); +__private_extern__ void _aio_create_worker_threads( int num ); + +#endif /* KERNEL */ + +#endif /* _SYS_AIO_KERN_H_ */ diff --git a/bsd/sys/attr.h b/bsd/sys/attr.h index a4b0b3dae..0c6f6290e 100644 --- a/bsd/sys/attr.h +++ b/bsd/sys/attr.h @@ -113,14 +113,129 @@ typedef struct vol_capabilities_attr { vol_capabilities_set_t valid; } vol_capabilities_attr_t; +/* + * VOL_CAP_FMT_PERSISTENTOBJECTIDS: When set, the volume has object IDs + * that are persistent (retain their values even when the volume is + * unmounted and remounted), and a file or directory can be looked up + * by ID. Volumes that support VolFS and can support Carbon File ID + * references should set this bit. + * + * VOL_CAP_FMT_SYMBOLICLINKS: When set, the volume supports symbolic + * links. The symlink(), readlink(), and lstat() calls all use this + * symbolic link. + * + * VOL_CAP_FMT_HARDLINKS: When set, the volume supports hard links. + * The link() call creates hard links. + * + * VOL_CAP_FMT_JOURNAL: When set, the volume is capable of supporting + * a journal used to speed recovery in case of unplanned shutdown + * (such as a power outage or crash). This bit does not necessarily + * mean the volume is actively using a journal for recovery. + * + * VOL_CAP_FMT_JOURNAL_ACTIVE: When set, the volume is currently using + * a journal for use in speeding recovery after an unplanned shutdown. + * This bit can be set only if VOL_CAP_FMT_JOURNAL is also set. + * + * VOL_CAP_FMT_NO_ROOT_TIMES: When set, the volume format does not + * store reliable times for the root directory, so you should not + * depend on them to detect changes, etc. + * + * VOL_CAP_FMT_SPARSE_FILES: When set, the volume supports sparse files. + * That is, files which can have "holes" that have never been written + * to, and are not allocated on disk. Sparse files may have an + * allocated size that is less than the file's logical length. + * + * VOL_CAP_FMT_ZERO_RUNS: For security reasons, parts of a file (runs) + * that have never been written to must appear to contain zeroes. When + * this bit is set, the volume keeps track of allocated but unwritten + * runs of a file so that it can substitute zeroes without actually + * writing zeroes to the media. This provides performance similar to + * sparse files, but not the space savings. + * + * VOL_CAP_FMT_CASE_SENSITIVE: When set, file and directory names are + * case sensitive (upper and lower case are different). When clear, + * an upper case character is equivalent to a lower case character, + * and you can't have two names that differ solely in the case of + * the characters. + * + * VOL_CAP_FMT_CASE_PRESERVING: When set, file and directory names + * preserve the difference between upper and lower case. If clear, + * the volume may change the case of some characters (typically + * making them all upper or all lower case). A volume that sets + * VOL_CAP_FMT_CASE_SENSITIVE should also set VOL_CAP_FMT_CASE_PRESERVING. + * + * VOL_CAP_FMT_FAST_STATFS: This bit is used as a hint to upper layers + * (especially Carbon) that statfs() is fast enough that its results + * need not be cached by those upper layers. A volume that caches + * the statfs information in its in-memory structures should set this bit. + * A volume that must always read from disk or always perform a network + * transaction should not set this bit. + */ #define VOL_CAP_FMT_PERSISTENTOBJECTIDS 0x00000001 #define VOL_CAP_FMT_SYMBOLICLINKS 0x00000002 #define VOL_CAP_FMT_HARDLINKS 0x00000004 +#define VOL_CAP_FMT_JOURNAL 0x00000008 +#define VOL_CAP_FMT_JOURNAL_ACTIVE 0x00000010 +#define VOL_CAP_FMT_NO_ROOT_TIMES 0x00000020 +#define VOL_CAP_FMT_SPARSE_FILES 0x00000040 +#define VOL_CAP_FMT_ZERO_RUNS 0x00000080 +#define VOL_CAP_FMT_CASE_SENSITIVE 0x00000100 +#define VOL_CAP_FMT_CASE_PRESERVING 0x00000200 +#define VOL_CAP_FMT_FAST_STATFS 0x00000400 + +/* + * VOL_CAP_INT_SEARCHFS: When set, the volume implements the + * searchfs() system call (the VOP_SEARCHFS vnode operation). + * + * VOL_CAP_INT_ATTRLIST: When set, the volume implements the + * getattrlist() and setattrlist() system calls (VOP_GETATTRLIST + * and VOP_SETATTRLIST vnode operations) for the volume, files, + * and directories. The volume may or may not implement the + * readdirattr() system call. XXX Is there any minimum set + * of attributes that should be supported? To determine the + * set of supported attributes, get the ATTR_VOL_ATTRIBUTES + * attribute of the volume. + * + * VOL_CAP_INT_NFSEXPORT: When set, the volume implements exporting + * of NFS volumes. + * + * VOL_CAP_INT_READDIRATTR: When set, the volume implements the + * readdirattr() system call (VOP_READDIRATTR vnode operation). + * + * VOL_CAP_INT_EXCHANGEDATA: When set, the volume implements the + * exchangedata() system call (VOP_EXCHANGE vnode operation). + * + * VOL_CAP_INT_COPYFILE: When set, the volume implements the + * VOP_COPYFILE vnode operation. (XXX There should be a copyfile() + * system call in .) + * + * VOL_CAP_INT_ALLOCATE: When set, the volume implements the + * VOP_ALLOCATE vnode operation, which means it implements the + * F_PREALLOCATE selector of fcntl(2). + * + * VOL_CAP_INT_VOL_RENAME: When set, the volume implements the + * ATTR_VOL_NAME attribute for both getattrlist() and setattrlist(). + * The volume can be renamed by setting ATTR_VOL_NAME with setattrlist(). + * + * VOL_CAP_INT_ADVLOCK: When set, the volume implements POSIX style + * byte range locks via VOP_ADVLOCK (accessible from fcntl(2)). + * + * VOL_CAP_INT_FLOCK: When set, the volume implements whole-file flock(2) + * style locks via VOP_ADVLOCK. This includes the O_EXLOCK and O_SHLOCK + * flags of the open(2) call. + * + */ #define VOL_CAP_INT_SEARCHFS 0x00000001 #define VOL_CAP_INT_ATTRLIST 0x00000002 #define VOL_CAP_INT_NFSEXPORT 0x00000004 #define VOL_CAP_INT_READDIRATTR 0x00000008 +#define VOL_CAP_INT_EXCHANGEDATA 0x00000010 +#define VOL_CAP_INT_COPYFILE 0x00000020 +#define VOL_CAP_INT_ALLOCATE 0x00000040 +#define VOL_CAP_INT_VOL_RENAME 0x00000080 +#define VOL_CAP_INT_ADVLOCK 0x00000100 +#define VOL_CAP_INT_FLOCK 0x00000200 typedef struct vol_attributes_attr { attribute_set_t validattr; @@ -218,8 +333,13 @@ typedef struct vol_attributes_attr { #define SRCHFS_MATCHPARTIALNAMES 0x00000002 #define SRCHFS_MATCHDIRS 0x00000004 #define SRCHFS_MATCHFILES 0x00000008 +#define SRCHFS_SKIPLINKS 0x00000010 +#define SRCHFS_SKIPINVISIBLE 0x00000020 +#define SRCHFS_SKIPPACKAGES 0x00000040 +#define SRCHFS_SKIPINAPPROPRIATE 0x00000080 + #define SRCHFS_NEGATEPARAMS 0x80000000 -#define SRCHFS_VALIDOPTIONSMASK 0x8000000F +#define SRCHFS_VALIDOPTIONSMASK 0x800000FF struct fssearchblock { struct attrlist *returnattrs; diff --git a/bsd/sys/audit.h b/bsd/sys/audit.h new file mode 100644 index 000000000..39ab86fdb --- /dev/null +++ b/bsd/sys/audit.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _SYS_AUDIT_H +#define _SYS_AUDIT_H + +#include +#include +#include +#include +#include + +#define AUDIT_RECORD_MAGIC 0x828a0f1b +#define MAX_AUDIT_RECORDS 20 +#define MAX_AUDIT_RECORD_SIZE 4096 + +/* + * Define the masks for the classes of audit events. + */ +#define AU_NULL 0x00000000 +#define AU_FREAD 0x00000001 +#define AU_FWRITE 0x00000002 +#define AU_FACCESS 0x00000004 +#define AU_FMODIFY 0x00000008 +#define AU_FCREATE 0x00000010 +#define AU_FDELETE 0x00000020 +#define AU_CLOSE 0x00000040 +#define AU_PROCESS 0x00000080 +#define AU_NET 0x00000100 +#define AU_IPC 0x00000200 +#define AU_NONAT 0x00000400 +#define AU_ADMIN 0x00000800 +#define AU_LOGIN 0x00001000 +#define AU_TFM 0x00002000 +#define AU_APPL 0x00004000 +#define AU_SETL 0x00008000 +#define AU_IFLOAT 0x00010000 +#define AU_PRIV 0x00020000 +#define AU_MAC_RW 0x00040000 +#define AU_XCONN 0x00080000 +#define AU_XCREATE 0x00100000 +#define AU_XDELETE 0x00200000 +#define AU_XIFLOAT 0x00400000 +#define AU_XPRIVS 0x00800000 +#define AU_XPRIVF 0x01000000 +#define AU_XMOVE 0x02000000 +#define AU_XDACF 0x04000000 +#define AU_XMACF 0x08000000 +#define AU_XSECATTR 0x10000000 +#define AU_IOCTL 0x20000000 +#define AU_EXEC 0x40000000 +#define AU_OTHER 0x80000000 +#define AU_ALL 0xffffffff + +/* + * IPC types + */ +#define AT_IPC_MSG ((u_char)1) /* message IPC id */ +#define AT_IPC_SEM ((u_char)2) /* semaphore IPC id */ +#define AT_IPC_SHM ((u_char)3) /* shared mem IPC id */ + +/* + * Audit conditions. + */ +#define AUC_UNSET 0 +#define AUC_AUDITING 1 +#define AUC_NOAUDIT 2 +#define AUC_DISABLED -1 + +/* + * auditon(2) commands. + */ +#define A_GETPOLICY 2 +#define A_SETPOLICY 3 +#define A_GETKMASK 4 +#define A_SETKMASK 5 +#define A_GETQCTRL 6 +#define A_SETQCTRL 7 +#define A_GETCWD 8 +#define A_GETCAR 9 +#define A_GETSTAT 12 +#define A_SETSTAT 13 +#define A_SETUMASK 14 +#define A_SETSMASK 15 +#define A_GETCOND 20 +#define A_SETCOND 21 +#define A_GETCLASS 22 +#define A_SETCLASS 23 +#define A_GETPINFO 24 +#define A_SETPMASK 25 +#define A_SETFSIZE 26 +#define A_GETFSIZE 27 +#define A_GETPINFO_ADDR 28 +#define A_GETKAUDIT 29 +#define A_SETKAUDIT 30 + +/* + * Audit policy controls. + */ +#define AUDIT_CNT 0x0001 +#define AUDIT_AHLT 0x0002 +#define AUDIT_ARGV 0x0004 +#define AUDIT_ARGE 0x0008 +#define AUDIT_PASSWD 0x0010 +#define AUDIT_SEQ 0x0020 +#define AUDIT_WINDATA 0x0040 +#define AUDIT_USER 0x0080 +#define AUDIT_GROUP 0x0100 +#define AUDIT_TRAIL 0x0200 +#define AUDIT_PATH 0x0400 + +typedef uid_t au_id_t; +typedef pid_t au_asid_t; +typedef u_int16_t au_event_t; +typedef u_int16_t au_emod_t; +typedef u_int32_t au_class_t; + +struct au_tid { + dev_t port; + u_int32_t machine; +}; +typedef struct au_tid au_tid_t; + +struct au_tid_addr { + dev_t at_port; + u_int32_t at_type; + u_int32_t at_addr[4]; +}; +typedef struct au_tid_addr au_tid_addr_t; + +struct au_mask { + unsigned int am_success; /* success bits */ + unsigned int am_failure; /* failure bits */ +}; +typedef struct au_mask au_mask_t; + +struct auditinfo { + au_id_t ai_auid; /* Audit user ID */ + au_mask_t ai_mask; /* Audit masks */ + au_tid_t ai_termid; /* Terminal ID */ + au_asid_t ai_asid; /* Audit session ID */ +}; +typedef struct auditinfo auditinfo_t; + +struct auditinfo_addr { + au_id_t ai_auid; /* Audit user ID */ + au_mask_t ai_mask; /* Audit masks */ + au_tid_addr_t ai_termid; /* Terminal ID */ + au_asid_t ai_asid; /* Audit session ID */ +}; +typedef struct auditinfo_addr auditinfo_addr_t; + +/* Token and record structures */ + +struct au_token { + u_char *t_data; + size_t len; + TAILQ_ENTRY(au_token) tokens; +}; +typedef struct au_token token_t; + +struct au_record { + char used; /* Is this record currently being used */ + int desc; /* The descriptor associated with this record */ + TAILQ_HEAD(, au_token) token_q; /* queue of BSM tokens */ + u_char *data; + size_t len; + LIST_ENTRY(au_record) au_rec_q; +}; +typedef struct au_record au_record_t; + +#ifndef KERNEL +#include + +__BEGIN_DECLS +int audit (const void *, int); +int auditon (int, void *, int); +int auditsvc (int, int); +int auditctl (const char *); +int getauid (au_id_t *); +int setauid (const au_id_t *); +int getaudit (struct auditinfo *); +int setaudit (const struct auditinfo *); +int getaudit_addr (struct auditinfo_addr *, int); +int setaudit_addr (const struct auditinfo_addr *, int); +__END_DECLS +#endif /* !KERNEL */ + +#endif /* !_SYS_AUDIT_H */ diff --git a/bsd/sys/bsm_kevents.h b/bsd/sys/bsm_kevents.h new file mode 100644 index 000000000..8c717ba75 --- /dev/null +++ b/bsd/sys/bsm_kevents.h @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _BSM_KEVENTS_H_ +#define _BSM_KEVENTS_H_ + +/* + * Values marked as AUE_NULL are not required to be audited as per CAPP + * + * The second value within comments is the syscall number in Darwin + * + * Values in the third column are the values assigned by BSM for obsolete + * or old system calls + * + * Values marked as XXX in the third column do not have an + * event number assigned as yet, and have (temporarily) been assigned + * value of AUE_NULL + */ + +#define AUE_NULL 0 +#define AUE_EXIT 1 /*1*/ +#define AUE_FORK 2 /*2*/ +#define AUE_READ AUE_NULL /*3*/ +#define AUE_WRITE AUE_NULL /*4*/ +#define AUE_OPEN_R 72 /*5*/ +#define AUE_OPEN_RC 73 /*5*/ +#define AUE_OPEN_RTC 75 /*5*/ +#define AUE_OPEN_RT 74 /*5*/ +#define AUE_OPEN_RW 80 /*5*/ +#define AUE_OPEN_RWC 81 /*5*/ +#define AUE_OPEN_RWTC 83 /*5*/ +#define AUE_OPEN_RWT 82 /*5*/ +#define AUE_OPEN_W 76 /*5*/ +#define AUE_OPEN_WC 77 /*5*/ +#define AUE_OPEN_WTC 79 /*5*/ +#define AUE_OPEN_WT 78 /*5*/ +#define AUE_CLOSE 112 /*6*/ +#define AU_WAIT4 AUE_NULL /*7*/ +#define AUE_O_CREAT AUE_NULL /*8*/ /*4*/ +#define AUE_LINK 5 /*9*/ +#define AUE_UNLINK 6 /*10*/ +#define AUE_O_EXECV AUE_NULL /*11*/ +#define AUE_CHDIR 8 /*12*/ +#define AUE_FCHDIR 68 /*13*/ +#define AUE_MKNOD 9 /*14*/ +#define AUE_CHMOD 10 /*15*/ +#define AUE_CHOWN 11 /*16*/ +#define AUE_O_SBREAK AUE_NULL /*17*/ +#define AUE_GETFSSTAT 301 /*18*/ +#define AUE_O_LSEEK AUE_NULL /*19*/ +#define AUE_GETPID AUE_NULL /*20*/ +#define AUE_O_MOUNT AUE_NULL /*21*/ +#define AUE_O_UMOUNT AUE_NULL /*22*/ +#define AUE_SETUID 200 /*23*/ +#define AUE_GETUID AUE_NULL /*24*/ +#define AUE_GETEUID AUE_NULL /*25*/ +#define AUE_PTRACE 302 /*26*/ +#define AUE_RECVMSG 190 /*27*/ +#define AUE_SENDMSG 188 /*28*/ +#define AUE_RECVFROM 191 /*29*/ +#define AUE_ACCEPT 33 /*30*/ +#define AUE_GETPEERNAME AUE_NULL /*31*/ +#define AUE_GETSOCKNAME AUE_NULL /*32*/ +#define AUE_ACCESS 14 /*33*/ +#define AUE_CHFLAGS 303 /*34*/ +#define AUE_FCHFLAGS 304 /*35*/ +#define AUE_SYNC AUE_NULL /*36*/ +#define AUE_KILL 15 /*37*/ +#define AUE_O_STAT AUE_NULL /*38*/ +#define AUE_GETPPID AUE_NULL /*39*/ +#define AUE_O_LSTAT AUE_NULL /*40*/ +#define AUE_DUP AUE_NULL /*41*/ +#define AUE_PIPE 185 /*42*/ +#define AUE_GETEGID AUE_NULL /*43*/ +#define AUE_PROFILE 305 /*44*/ +#define AUE_KTRACE 306 /*45*/ +#define AUE_REBOOT 308 +#define AUE_SIGACTION AUE_NULL /*46*/ /*XXX*/ +#define AUE_GETGID AUE_NULL /*47*/ +#define AUE_SIGPROCMASK AUE_NULL /*48*/ /*XXX*/ +#define AUE_GETLOGIN AUE_NULL /*49*/ +#define AUE_SETLOGIN 307 /*50*/ +#define AUE_ACCT 18 /*51*/ +#define AUE_SIGPENDING AUE_NULL /*52*/ /*XXX*/ +#define AUE_SIGALTSTACK AUE_NULL /*53*/ /*XXX*/ +#define AUE_IOCTL 158 /*54*/ +#define AUE_SYSTEMBOOT 113 /*55*/ +#define AUE_REVOKE 309 /*56*/ +#define AUE_SYMLINK 21 /*57*/ +#define AUE_READLINK 22 /*58*/ +#define AUE_EXECVE 23 /*59*/ +#define AUE_UMASK 310 /*60*/ +#define AUE_CHROOT 24 /*61*/ +#define AUE_O_FSTAT AUE_NULL /*62*/ + +#define AUE_O_GETPAGESIZE AUE_NULL /*64*/ +#define AUE_MSYNC AUE_NULL /*65*/ +#define AUE_VFORK 25 /*66*/ +#define AUE_O_VREAD AUE_NULL /*67*/ +#define AUE_O_VWRITE AUE_NULL /*68*/ +#define AUE_SBRK AUE_NULL /*69*/ /*EOPNOTSUP*/ +#define AUE_SSTK AUE_NULL /*70*/ /*EOPNOTSUP*/ +#define AUE_O_MMAN AUE_NULL /*71*/ +#define AUE_O_VADVISE AUE_NULL /*72*/ +#define AUE_MUNMAP 213 /*73*/ +#define AUE_MPROTECT 311 /*74*/ +#define AUE_MADVISE AUE_NULL /*75*/ +#define AUE_O_VHANGUP AUE_NULL /*76*/ +#define AUE_O_VLIMIT AUE_NULL /*77*/ +#define AUE_MINCORE AUE_NULL /*78*/ +#define AUE_GETGROUPS AUE_NULL /*79*/ +#define AUE_SETGROUPS 26 /*80*/ +#define AUE_GETPGRP AUE_NULL /*81*/ +#define AUE_SETPGRP 27 /*82*/ +#define AUE_SETITIMER AUE_NULL /*83*/ /*XXX*/ +#define AUE_O_WAIT AUE_NULL /*84*/ +#define AUE_SWAPON AUE_NULL /*85*/ /*EOPNOTSUP*/ +#define AUE_GETITIMER AUE_NULL /*86*/ +#define AUE_O_GETHOSTNAME AUE_NULL /*87*/ +#define AUE_O_SETHOSTNAME AUE_NULL /*88*/ +#define AUE_GETDTABLESIZE AUE_NULL /*89*/ +#define AUE_DUP2 AUE_NULL /*90*/ +#define AUE_O_GETDOPT AUE_NULL /*91*/ +#define AUE_FCNTL 30 /*92*/ +#define AUE_SELECT AUE_NULL /*93*/ +#define AUE_O_SETDOPT AUE_NULL /*94*/ +#define AUE_FSYNC AUE_NULL /*95*/ +#define AUE_SETPRIORITY 312 /*96*/ +#define AUE_SOCKET 183 /*97*/ +#define AUE_CONNECT 32 /*98*/ +#define AUE_O_ACCEPT AUE_NULL /*99*/ +#define AUE_GETPRIORITY AUE_NULL /*100*/ +#define AUE_O_SEND AUE_NULL /*101*/ +#define AUE_O_RECV AUE_NULL /*102*/ +#define AUE_SIGRETURN AUE_NULL /*103*/ /*XXX*/ +#define AUE_BIND 34 /*104*/ +#define AUE_SETSOCKOPT 35 /*105*/ +#define AUE_LISTEN AUE_NULL /*106*/ +#define AUE_O_VTIMES AUE_NULL /*107*/ +#define AUE_O_SIGVEC AUE_NULL /*108*/ +#define AUE_O_SIGBLOCK AUE_NULL /*109*/ +#define AUE_O_SIGSETMASK AUE_NULL /*110*/ +#define AUE_SIGSUSPEND AUE_NULL /*111*/ /*XXX*/ +#define AUE_O_SIGSTACK AUE_NULL /*112*/ +#define AUE_O_RECVMSG AUE_NULL /*113*/ +#define AUE_O_SENDMSG AUE_NULL /*114*/ +#define AUE_O_VTRACE AUE_NULL /*115*/ /*36*/ +#define AUE_GETTIMEOFDAY AUE_NULL /*116*/ +#define AUE_GETRUSAGE AUE_NULL /*117*/ +#define AUE_GTSOCKOPT AUE_NULL /*118*/ +#define AUE_O_RESUBA AUE_NULL /*119*/ +#define AUE_READV AUE_NULL /*120*/ +#define AUE_WRITEV AUE_NULL /*121*/ +#define AUE_SETTIMEOFDAY 313 /*122*/ +#define AUE_FCHOWN 38 /*123*/ +#define AUE_FCHMOD 39 /*124*/ +#define AUE_O_RECVFROM AUE_NULL /*125*/ +#define AUE_O_SETREUID AUE_NULL /*126*/ /*40*/ +#define AUE_O_SETREGID AUE_NULL /*127*/ /*41*/ +#define AUE_RENAME 42 /*128*/ +#define AUE_O_TRUNCATE AUE_NULL /*129*/ +#define AUE_O_FTRUNCATE AUE_NULL /*130*/ +#define AUE_FLOCK 314 /*131*/ +#define AUE_MKFIFO 315 /*132*/ +#define AUE_SENDTO 184 /*133*/ +#define AUE_SHUTDOWN 46 /*134*/ +#define AUE_SOCKETPAIR 317 /*135*/ +#define AUE_MKDIR 47 /*136*/ +#define AUE_RMDIR 48 /*137*/ +#define AUE_UTIMES 49 /*138*/ +#define AUE_FUTIMES 318 /*139*/ +#define AUE_ADJTIME 50 /*140*/ +#define AUE_O_GETPEERNAME AUE_NULL /*141*/ +#define AUE_O_GETHOSTID AUE_NULL /*142*/ +#define AUE_O_SETHOSTID AUE_NULL /*143*/ +#define AUE_O_GETRLIMIT AUE_NULL /*144*/ +#define AUE_O_SETRLIMIT AUE_NULL /*145*/ +#define AUE_O_KILLPG AUE_NULL /*146*/ +#define AUE_SETSID 319 /*147*/ +#define AUE_O_SETQUOTA AUE_NULL /*148*/ +#define AUE_O_QUOTA AUE_NULL /*149*/ +#define AUE_O_GETSOCKNAME AUE_NULL /*150*/ +#define AUE_GETPGID AUE_NULL /*151*/ +#define AUE_SETPRIVEXEC 320 /*152*/ +#define AUE_PREAD AUE_NULL /*153*/ +#define AUE_PWRITE AUE_NULL /*154*/ +#define AUE_NFSSVC 321 /*155*/ +#define AUE_O_GETDIRENTRIES AUE_NULL /*156*/ +#define AUE_STATFS 54 /*157*/ +#define AUE_FSTATFS 55 /*158*/ +#define AUE_UMOUNT 12 /*159*/ +#define AUE_O_ASYNCDAEMON AUE_NULL /*160*/ +#define AUE_GETFH 322 /*161*/ +#define AUE_O_GETDOMAINNAME AUE_NULL /*162*/ +#define AUE_O_SETDOMAINNAME AUE_NULL /*163*/ +#define AUE_O_PCFS_MOUNT AUE_NULL /*164*/ +#define AUE_QUOTACTL 323 /*165*/ +#define AUE_O_EXPORTFS AUE_NULL /*166*/ +#define AUE_MOUNT 62 /*167*/ +#define AUE_O_USTATE AUE_NULL /*168*/ +#define AUE_TABLE AUE_NULL /*170*/ /*ENOSYS*/ +#define AUE_O_WAIT3 AUE_NULL /*171*/ +#define AUE_O_RPAUSE AUE_NULL /*172*/ +#define AUE_O_GETDENTS AUE_NULL /*174*/ +#define AUE_GCCONTROL AUE_NULL /*175*/ /*ENOSYS*/ +#define AUE_ADDPROFILE 324 /*176*/ + +#define AUE_KDBUGTRACE 325 /*180*/ +#define AUE_SETGID 205 /*181*/ +#define AUE_SETEGID 214 /*182*/ +#define AUE_SETEUID 215 /*183*/ + +#define AUE_STAT 16 /*188*/ +#define AUE_FSTAT 326 /*189*/ +#define AUE_LSTAT 17 /*190*/ +#define AUE_PATHCONF 71 /*191*/ +#define AUE_FPATHCONF 327 /*192*/ +#define AUE_GETRLIMIT AUE_NULL /*194*/ +#define AUE_SETRLIMIT 51 /*195*/ +#define AUE_GETDIRENTRIES 328 /*196*/ +#define AUE_MMAP 210 /*197*/ +#define AUE_SYSCALL AUE_NULL /*198*/ /*ENOSYS*/ +#define AUE_LSEEK AUE_NULL /*199*/ +#define AUE_TRUNCATE 329 /*200*/ +#define AUE_FTRUNCATE 330 /*201*/ +#define AUE_SYSCTL 331 /*202*/ +#define AUE_MLOCK 332 /*203*/ +#define AUE_MUNLOCK 333 /*204*/ +#define AUE_UNDELETE 334 /*205*/ + +#define AUE_MKCOMPLEX AUE_NULL /*216*/ /*XXX*/ +#define AUE_STATV AUE_NULL /*217*/ /*EOPNOTSUPP*/ +#define AUE_LSTATV AUE_NULL /*218*/ /*EOPNOTSUPP*/ +#define AUE_FSTATV AUE_NULL /*219*/ /*EOPNOTSUPP*/ +#define AUE_GETATTRLIST 335 /*220*/ +#define AUE_SETATTRLIST 336 /*221*/ +#define AUE_GETDIRENTRIESATTR 337 /*222*/ +#define AUE_EXCHANGEDATA 338 /*223*/ +#define AUE_CHECKUSERACCESS AUE_NULL /*224*/ /* To Be Removed */ +#define AUE_SEARCHFS 339 /*225*/ + +#define AUE_DELETE AUE_NULL /*226*/ /* reserved */ +#define AUE_COPYFILE AUE_NULL /*227*/ /* reserved */ +#define AUE_WATCHEVENT AUE_NULL /*231*/ /* reserved */ +#define AUE_WAITEVENT AUE_NULL /*232*/ /* reserved */ +#define AUE_MODWATCH AUE_NULL /*233*/ /* reserved */ +#define AUE_FSCTL AUE_NULL /*242*/ /* reserved */ + +#define AUE_MINHERIT 340 /*250*/ +#define AUE_SEMSYS AUE_NULL /*251*/ /* To Be Removed */ +#define AUE_MSGSYS AUE_NULL /*252*/ /* To Be Removed */ +#define AUE_SHMSYS AUE_NULL /*253*/ +#define AUE_SEMCTL 98 /*254*/ +#define AUE_SEMCTL_GETALL 105 /*254*/ +#define AUE_SEMCTL_GETNCNT 102 /*254*/ +#define AUE_SEMCTL_GETPID 103 /*254*/ +#define AUE_SEMCTL_GETVAL 104 /*254*/ +#define AUE_SEMCTL_GETZCNT 106 /*254*/ +#define AUE_SEMCTL_RMID 99 /*254*/ +#define AUE_SEMCTL_SET 100 /*254*/ +#define AUE_SEMCTL_SETALL 108 /*254*/ +#define AUE_SEMCTL_SETVAL 107 /*254*/ +#define AUE_SEMCTL_STAT 101 /*254*/ +#define AUE_SEMGET 109 /*255*/ +#define AUE_SEMOP 110 /*256*/ +#define AUE_SEMCONFIG 341 /*257*/ +#define AUE_MSGCL AUE_NULL /*258*/ /*EOPNOTSUPP*/ +#define AUE_MSGGET 88 /*259*/ /*88-EOPNOTSUPP*/ +#define AUE_MSGRCV 89 /*261*/ /*89-EOPNOTSUPP*/ +#define AUE_MSGSND 90 /*260*/ /*90-EOPNOTSUPP*/ +#define AUE_SHMAT 96 /*262*/ +#define AUE_SHMCTL 91 /*263*/ +#define AUE_SHMCTL_RMID 92 /*263*/ +#define AUE_SHMCTL_SET 93 /*263*/ +#define AUE_SHMCTL_STAT 94 /*263*/ +#define AUE_SHMDT 97 /*264*/ +#define AUE_SHMGET 95 /*265*/ +#define AUE_SHMOPEN 345 /*266*/ +#define AUE_SHMUNLINK 346 /*267*/ +#define AUE_SEMOPEN 342 /*268*/ +#define AUE_SEMCLOSE 343 /*269*/ +#define AUE_SEMUNLINK 344 /*270*/ +#define AUE_SEMWAIT AUE_NULL /*271*/ +#define AUE_SEMTRYWAIT AUE_NULL /*272*/ +#define AUE_SEMPOST AUE_NULL /*273*/ +#define AUE_SEMGETVALUE AUE_NULL /*274*/ /*ENOSYS*/ +#define AUE_SEMINIT AUE_NULL /*275*/ /*ENOSYS*/ +#define AUE_SEMDESTROY AUE_NULL /*276*/ /*ENOSYS*/ + +#define AUE_LOADSHFILE 347 /*296*/ +#define AUE_RESETSHFILE 348 /*297*/ +#define AUE_NEWSYSTEMSHREG 349 /*298*/ + +#define AUE_GETSID AUE_NULL /*310*/ + +#define AUE_MLOCKALL AUE_NULL /*324*/ /*ENOSYS*/ +#define AUE_MUNLOCKALL AUE_NULL /*325*/ /*ENOSYS*/ + +#define AUE_ISSETUGID AUE_NULL /*327*/ +#define AUE_PTHREADKILL 350 /*328*/ +#define AUE_PTHREADSIGMASK 351 /*329*/ +#define AUE_SIGWAIT AUE_NULL /*330*/ /*XXX*/ + + + +// BSM events - Have to identify which ones are relevant to MacOSX +#define AUE_ACLSET 251 +#define AUE_AUDIT 211 +#define AUE_AUDITON_GETCAR 224 +#define AUE_AUDITON_GETCLASS 231 +#define AUE_AUDITON_GETCOND 229 +#define AUE_AUDITON_GETCWD 223 +#define AUE_AUDITON_GETKMASK 221 +#define AUE_AUDITON_GETSTAT 225 +#define AUE_AUDITON_GPOLICY 114 +#define AUE_AUDITON_GQCTRL 145 +#define AUE_AUDITON_SETCLASS 232 +#define AUE_AUDITON_SETCOND 230 +#define AUE_AUDITON_SETKMASK 222 +#define AUE_AUDITON_SESKMASK 228 +#define AUE_AUDITON_SETSTAT 226 +#define AUE_AUDITON_SETUMASK 227 +#define AUE_AUDITON_SPOLICY 147 +#define AUE_AUDITON_SQCTRL 146 +#define AUE_AUDITSVC 136 +#define AUE_DOORFS_DOOR_BIND 260 +#define AUE_DOORFS_DOOR_CALL 254 +#define AUE_DOORFS_DOOR_CREATE 256 +#define AUE_DOORFS_DOOR_CRED 259 +#define AUE_DOORFS_DOOR_INFO 258 +#define AUE_DOORFS_DOOR_RETURN 255 +#define AUE_DOORFS_DOOR_REVOKE 257 +#define AUE_DOORFS_DOOR_UNBIND 261 +#define AUE_ENTERPROM 153 +#define AUE_EXEC 7 +#define AUE_EXITPROM 154 +#define AUE_FACLSET 252 +#define AUE_FCHROOT 69 +#define AUE_FORK1 241 +#define AUE_GETAUDIT 132 +#define AUE_GETAUDIT_ADDR 267 +#define AUE_GETAUID 130 +#define AUE_GETMSG 217 +#define AUE_SOCKACCEPT 247 +#define AUE_SOCKRECEIVE 250 +#define AUE_GETPMSG 219 +#define AUE_GETPORTAUDIT 149 +#define AUE_INST_SYNC 264 +#define AUE_LCHOWN 237 +#define AUE_LXSTAT 236 +#define AUE_MEMCNTL 238 +#define AUE_MODADDMAJ 246 +#define AUE_MODCONFIG 245 +#define AUE_MODLOAD 243 +#define AUE_MODUNLOAD 244 +#define AUE_MSGCTL 84 +#define AUE_MSGCTL_RMID 85 +#define AUE_MSGCTL_SET 86 +#define AUE_MSGCTL_STAT 87 +#define AUE_NICE 203 +#define AUE_P_ONLINE 262 +#define AUE_PRIOCNTLSYS 212 +#define AUE_CORE 111 +#define AUE_PROCESSOR_BIND 263 +#define AUE_PUTMSG 216 +#define AUE_SOCKCONNECT 248 +#define AUE_SOCKSEND 249 +#define AUE_PUTPMSG 218 +#define AUE_SETAUDIT 133 +#define AUE_SETAUDIT_ADDR 266 +#define AUE_SETAUID 131 +#define AUE_SOCKCONFIG 183 +#define AUE_STATVFS 234 +#define AUE_STIME 201 +#define AUE_SYSINFO 39 +#define AUE_UTIME 202 +#define AUE_UTSYS 233 +#define AUE_XMKNOD 240 +#define AUE_XSTAT 235 + +#endif /* !_BSM_KEVENTS_H_ */ diff --git a/bsd/sys/bsm_klib.h b/bsd/sys/bsm_klib.h new file mode 100644 index 000000000..4df7668a0 --- /dev/null +++ b/bsd/sys/bsm_klib.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _BSM_KLIB_H_ +#define _BSM_KLIB_H_ + +#define AU_PRS_SUCCESS 1 +#define AU_PRS_FAILURE 2 +#define AU_PRS_BOTH (AU_PRS_SUCCESS|AU_PRS_FAILURE) + +#ifdef KERNEL +int au_preselect(au_event_t event, au_mask_t *mask_p, int sorf); +au_event_t flags_to_openevent(int oflags); +void fill_vattr(struct vattr *v, struct vnode_au_info *vn_info); +void canon_path(struct proc *p, char *path, char *cpath); +/* + * Define a system call to audit event mapping table. + */ +extern au_event_t sys_au_event[]; +extern int nsys_au_event; /* number of entries in this table */ + +#endif /*KERNEL*/ + +#endif /* ! _BSM_KLIB_H_ */ diff --git a/bsd/sys/bsm_token.h b/bsd/sys/bsm_token.h new file mode 100644 index 000000000..6f555d50e --- /dev/null +++ b/bsd/sys/bsm_token.h @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _BSM_TOKEN_H_ +#define _BSM_TOKEN_H_ + +#include +#include +#include +#include +#include +#include + +/* We could determined the header and trailer sizes by + * defining appropriate structures. We hold off that approach + * till we have a consistant way of using structures for all tokens. + * This is not straightforward since these token structures may + * contain pointers of whose contents we dont know the size + * (e.g text tokens) + */ +#define HEADER_SIZE 18 +#define TRAILER_SIZE 7 + +#define ADD_U_CHAR(loc, val) \ + do {\ + *loc = val;\ + loc += sizeof(u_char);\ + }while(0) + + +#define ADD_U_INT16(loc, val) \ + do { \ + memcpy(loc, (u_char *)&val, sizeof(u_int16_t));\ + loc += sizeof(u_int16_t); \ + }while(0) + +#define ADD_U_INT32(loc, val) \ + do { \ + memcpy(loc, (u_char *)&val, sizeof(u_int32_t));\ + loc += sizeof(u_int32_t); \ + }while(0) + +#define ADD_U_INT64(loc, val)\ + do {\ + memcpy(loc, (u_char *)&val, sizeof(u_int64_t));\ + loc += sizeof(u_int64_t); \ + }while(0) + +#define ADD_MEM(loc, data, size) \ + do { \ + memcpy(loc, data, size);\ + loc += size;\ + }while(0) + +#define ADD_STRING(loc, data, size) ADD_MEM(loc, data, size) + + +/* Various token id types */ + +/* + * Values inside the comments are not documented in the BSM pages and + * have been picked up from the header files + */ + +/* + * Values marked as XXX do not have a value defined in the BSM header files + */ + +/* + * Control token types + +#define AUT_OTHER_FILE ((char)0x11) +#define AUT_OTHER_FILE32 AUT_OTHER_FILE +#define AUT_OHEADER ((char)0x12) + + */ + +#define AUT_INVALID 0x00 +#define AU_FILE_TOKEN 0x11 +#define AU_TRAILER_TOKEN 0x13 +#define AU_HEADER_32_TOKEN 0x14 +#define AU_HEADER_EX_32_TOKEN 0x15 + + +/* + * Data token types +#define AUT_SERVER ((char)0x25) +#define AUT_SERVER32 AUT_SERVER + */ + +#define AU_DATA_TOKEN 0x21 +#define AU_ARB_TOKEN AU_DATA_TOKEN +#define AU_IPC_TOKEN 0x22 +#define AU_PATH_TOKEN 0x23 +#define AU_SUBJECT_32_TOKEN 0x24 +#define AU_PROCESS_32_TOKEN 0x26 +#define AU_RETURN_32_TOKEN 0x27 +#define AU_TEXT_TOKEN 0x28 +#define AU_OPAQUE_TOKEN 0x29 +#define AU_IN_ADDR_TOKEN 0x2A +#define AU_IP_TOKEN 0x2B +#define AU_IPORT_TOKEN 0x2C +#define AU_ARG32_TOKEN 0x2D +#define AU_SOCK_TOKEN 0x2E +#define AU_SEQ_TOKEN 0x2F + +/* + * Modifier token types + +#define AUT_ACL ((char)0x30) +#define AUT_LABEL ((char)0x33) +#define AUT_GROUPS ((char)0x34) +#define AUT_ILABEL ((char)0x35) +#define AUT_SLABEL ((char)0x36) +#define AUT_CLEAR ((char)0x37) +#define AUT_PRIV ((char)0x38) +#define AUT_UPRIV ((char)0x39) +#define AUT_LIAISON ((char)0x3A) + + */ + +#define AU_ATTR_TOKEN 0x31 +#define AU_IPCPERM_TOKEN 0x32 +#define AU_NEWGROUPS_TOKEN 0x3B +#define AU_EXEC_ARG_TOKEN 0x3C +#define AU_EXEC_ENV_TOKEN 0x3D +#define AU_ATTR32_TOKEN 0x3E + + +/* + * Command token types + */ + +#define AU_CMD_TOKEN 0x51 +#define AU_EXIT_TOKEN 0x52 + + + +/* + * Miscellaneous token types + +#define AUT_HOST ((char)0x70) + + */ + +/* + * 64bit token types + +#define AUT_SERVER64 ((char)0x76) +#define AUT_OTHER_FILE64 ((char)0x78) + + */ + +#define AU_ARG64_TOKEN 0x71 +#define AU_RETURN_64_TOKEN 0x72 +#define AU_ATTR64_TOKEN 0x73 +#define AU_HEADER_64_TOKEN 0x74 +#define AU_SUBJECT_64_TOKEN 0x75 +#define AU_PROCESS_64_TOKEN 0x77 + + + +/* + * Extended network address token types + */ + +#define AU_HEADER_EX_64_TOKEN 0x79 +#define AU_SUBJECT_32_EX_TOKEN 0x7a +#define AU_PROCESS_32_EX_TOKEN 0x7b +#define AU_SUBJECT_64_EX_TOKEN 0x7c +#define AU_PROCESS_64_EX_TOKEN 0x7d +#define AU_IN_ADDR_EX_TOKEN 0x7e +#define AU_SOCK_EX32_TOKEN 0x7f +#define AU_SOCK_EX128_TOKEN AUT_INVALID /*XXX*/ +#define AU_IP_EX_TOKEN AUT_INVALID /*XXX*/ + + +/* + * The values for the following token ids is not + * defined by BSM + */ +#define AU_SOCK_INET_32_TOKEN 0x80 /*XXX*/ +#define AU_SOCK_INET_128_TOKEN 0x81 /*XXX*/ +#define AU_SOCK_UNIX_TOKEN 0x82 /*XXX*/ + +/* print values for the arbitrary token */ +#define AUP_BINARY 0 +#define AUP_OCTAL 1 +#define AUP_DECIMAL 2 +#define AUP_HEX 3 +#define AUP_STRING 4 + + +/* data-types for the arbitrary token */ +#define AUR_BYTE 0 +#define AUR_SHORT 1 +#define AUR_LONG 2 + +/* ... and their sizes */ +#define AUR_BYTE_SIZE sizeof(u_char) +#define AUR_SHORT_SIZE sizeof(u_int16_t) +#define AUR_LONG_SIZE sizeof(u_int32_t) + +/* Modifiers for the header token */ +#define PAD_NOTATTR 0x4000 /* nonattributable event */ +#define PAD_FAILURE 0x8000 /* fail audit event */ + + +#define MAX_GROUPS 16 +#define HEADER_VERSION 1 +#define TRAILER_PAD_MAGIC 0xB105 + +/* BSM library calls */ + +int au_open(void); +int au_write(int d, token_t *m); +int au_close(int d, int keep, short event); +token_t *au_to_file(char *file); +token_t *au_to_header(int rec_size, au_event_t e_type, + au_emod_t e_mod); +token_t *au_to_header32(int rec_size, au_event_t e_type, + au_emod_t e_mod); +token_t *au_to_header64(int rec_size, au_event_t e_type, + au_emod_t e_mod); +token_t *au_to_me(void); + +token_t *au_to_arg(char n, char *text, u_int32_t v); +token_t *au_to_arg32(char n, char *text, u_int32_t v); +token_t *au_to_arg64(char n, char *text, u_int64_t v); +token_t *au_to_attr(struct vattr *attr); +token_t *au_to_attr32(struct vattr *attr); +token_t *au_to_attr64(struct vattr *attr); +token_t *au_to_data(char unit_print, char unit_type, + char unit_count, char *p); +token_t *au_to_exit(int retval, int err); +token_t *au_to_groups(int *groups); +token_t *au_to_newgroups(u_int16_t n, gid_t *groups); +token_t *au_to_in_addr(struct in_addr *internet_addr); +token_t *au_to_in_addr_ex(struct in6_addr *internet_addr); +token_t *au_to_ip(struct ip *ip); +token_t *au_to_ipc(char type, int id); +token_t *au_to_ipc_perm(struct ipc_perm *perm); +token_t *au_to_iport(u_int16_t iport); +token_t *au_to_opaque(char *data, u_int16_t bytes); +token_t *au_to_path(char *path); +token_t *au_to_process(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_process32(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_process64(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_process_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_process32_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_process64_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_return(char status, u_int32_t ret); +token_t *au_to_return32(char status, u_int32_t ret); +token_t *au_to_return64(char status, u_int64_t ret); +token_t *au_to_seq(long audit_count); +token_t *au_to_socket(struct socket *so); +token_t *au_to_socket_ex_32(struct socket *so); +token_t *au_to_socket_ex_128(struct socket *so); +token_t *au_to_sock_inet(struct sockaddr_in *so); +token_t *au_to_sock_inet32(struct sockaddr_in *so); +token_t *au_to_sock_inet128(struct sockaddr_in6 *so); +token_t *au_to_sock_unix(struct sockaddr_un *so); +token_t *au_to_subject(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject32(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject64(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_subject32_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_subject64_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_exec_args(const char **); +token_t *au_to_exec_env(const char **); +token_t *au_to_text(char *text); +token_t *au_to_trailer(int rec_size); + +#endif /* ! _BSM_TOKEN_H_ */ diff --git a/bsd/sys/bsm_token.save.h b/bsd/sys/bsm_token.save.h new file mode 100644 index 000000000..c821ae25c --- /dev/null +++ b/bsd/sys/bsm_token.save.h @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _BSM_TOKEN_H_ +#define _BSM_TOKEN_H_ + +#include +#include +#include +#include +#include + +/* We could determined the header and trailer sizes by + * defining appropriate structures. We hold off that approach + * till we have a consistant way of using structures for all tokens. + * This is not straightforward since these token structures may + * contain pointers of whose contents we dont know the size + * (e.g text tokens) + */ +#define HEADER_SIZE 18 +#define TRAILER_SIZE 7 + +#define ADD_U_CHAR(loc, val) \ + do {\ + *loc = val;\ + loc += sizeof(u_char);\ + }while(0) + + +#define ADD_U_INT16(loc, val) \ + do { \ + memcpy(loc, (u_char *)&val, sizeof(u_int16_t));\ + loc += sizeof(u_int16_t); \ + }while(0) + +#define ADD_U_INT32(loc, val) \ + do { \ + memcpy(loc, (u_char *)&val, sizeof(u_int32_t));\ + loc += sizeof(u_int32_t); \ + }while(0) + +#define ADD_U_INT64(loc, val)\ + do {\ + memcpy(loc, (u_char *)&val, sizeof(u_int64_t));\ + loc += sizeof(u_int64_t); \ + }while(0) + +#define ADD_MEM(loc, data, size) \ + do { \ + memcpy(loc, data, size);\ + loc += size;\ + }while(0) + +#define ADD_STRING(loc, data, size) ADD_MEM(loc, data, size) + + +/* Various token id types */ + +/* + * Values inside the comments are not documented in the BSM pages and + * have been picked up from the header files + */ + +/* + * Values marked as XXX do not have a value defined in the BSM header files + */ + +/* + * Control token types + +#define AUT_OTHER_FILE ((char)0x11) +#define AUT_OTHER_FILE32 AUT_OTHER_FILE +#define AUT_OHEADER ((char)0x12) + + */ + +#define AUT_INVALID 0x00 +#define AU_FILE_TOKEN 0x11 +#define AU_TRAILER_TOKEN 0x13 +#define AU_HEADER_32_TOKEN 0x14 +#define AU_HEADER_EX_32_TOKEN 0x15 + + +/* + * Data token types +#define AUT_SERVER ((char)0x25) +#define AUT_SERVER32 AUT_SERVER + */ + +#define AU_DATA_TOKEN 0x21 +#define AU_ARB_TOKEN AU_DATA_TOKEN +#define AU_IPC_TOKEN 0x22 +#define AU_PATH_TOKEN 0x23 +#define AU_SUBJECT_32_TOKEN 0x24 +#define AU_PROCESS_32_TOKEN 0x26 +#define AU_RETURN_32_TOKEN 0x27 +#define AU_TEXT_TOKEN 0x28 +#define AU_OPAQUE_TOKEN 0x29 +#define AU_IN_ADDR_TOKEN 0x2A +#define AU_IP_TOKEN 0x2B +#define AU_IPORT_TOKEN 0x2C +#define AU_ARG32_TOKEN 0x2D +#define AU_SOCK_TOKEN 0x2E +#define AU_SEQ_TOKEN 0x2F + +/* + * Modifier token types + +#define AUT_ACL ((char)0x30) +#define AUT_LABEL ((char)0x33) +#define AUT_GROUPS ((char)0x34) +#define AUT_ILABEL ((char)0x35) +#define AUT_SLABEL ((char)0x36) +#define AUT_CLEAR ((char)0x37) +#define AUT_PRIV ((char)0x38) +#define AUT_UPRIV ((char)0x39) +#define AUT_LIAISON ((char)0x3A) + + */ + +#define AU_ATTR_TOKEN 0x31 +#define AU_IPCPERM_TOKEN 0x32 +#define AU_NEWGROUPS_TOKEN 0x3B +#define AU_EXEC_ARG_TOKEN 0x3C +#define AU_EXEC_ENV_TOKEN 0x3D +#define AU_ATTR32_TOKEN 0x3E + + +/* + * Command token types + */ + +#define AU_CMD_TOKEN 0x51 +#define AU_EXIT_TOKEN 0x52 + + + +/* + * Miscellaneous token types + +#define AUT_HOST ((char)0x70) + + */ + +/* + * 64bit token types + +#define AUT_SERVER64 ((char)0x76) +#define AUT_OTHER_FILE64 ((char)0x78) + + */ + +#define AU_ARG64_TOKEN 0x71 +#define AU_RETURN_64_TOKEN 0x72 +#define AU_ATTR64_TOKEN 0x73 +#define AU_HEADER_64_TOKEN 0x74 +#define AU_SUBJECT_64_TOKEN 0x75 +#define AU_PROCESS_64_TOKEN 0x77 + + + +/* + * Extended network address token types + */ + +#define AU_HEADER_EX_64_TOKEN 0x79 +#define AU_SUBJECT_32_EX_TOKEN 0x7a +#define AU_PROCESS_32_EX_TOKEN 0x7b +#define AU_SUBJECT_64_EX_TOKEN 0x7c +#define AU_PROCESS_64_EX_TOKEN 0x7d +#define AU_IN_ADDR_EX_TOKEN 0x7e +#define AU_SOCK_EX32_TOKEN 0x7f +#define AU_SOCK_EX128_TOKEN AUT_INVALID /*XXX*/ +#define AU_IP_EX_TOKEN AUT_INVALID /*XXX*/ + + +/* + * The values for the following token ids is not + * defined by BSM + */ +#define AU_SOCK_INET_32_TOKEN 0x80 /*XXX*/ +#define AU_SOCK_INET_128_TOKEN 0x81 /*XXX*/ + +/* print values for the arbitrary token */ +#define AUP_BINARY 0 +#define AUP_OCTAL 1 +#define AUP_DECIMAL 2 +#define AUP_HEX 3 +#define AUP_STRING 4 + + +/* data-types for the arbitrary token */ +#define AUR_BYTE 0 +#define AUR_SHORT 1 +#define AUR_LONG 2 + +/* ... and their sizes */ +#define AUR_BYTE_SIZE sizeof(u_char) +#define AUR_SHORT_SIZE sizeof(u_int16_t) +#define AUR_LONG_SIZE sizeof(u_int32_t) + +/* Modifiers for the header token */ +#define PAD_NOTATTR 0x4000 /* nonattributable event */ +#define PAD_FAILURE 0x8000 /* fail audit event */ + + +#define MAX_GROUPS 16 +#define HEADER_VERSION 1 +#define TRAILER_PAD_MAGIC 0xB105 + +/* BSM system calls */ + +#ifdef KERNEL +#else +int au_open(void); +int au_write(int d, token_t *m); +int au_close(int d, int keep, short event); +token_t *au_to_file(char *file); +token_t *au_to_header(int rec_size, au_event_t e_type, + au_emod_t e_mod); +token_t *au_to_header32(int rec_size, au_event_t e_type, + au_emod_t e_mod); +token_t *au_to_header64(int rec_size, au_event_t e_type, + au_emod_t e_mod); +token_t *au_to_me(void); +#endif /* !KERNEL */ + +token_t *au_to_arg(char n, char *text, u_int32_t v); +token_t *au_to_arg32(char n, char *text, u_int32_t v); +token_t *au_to_arg64(char n, char *text, u_int64_t v); +token_t *au_to_attr(struct vattr *attr); +token_t *au_to_attr32(struct vattr *attr); +token_t *au_to_attr64(struct vattr *attr); +token_t *au_to_data(char unit_print, char unit_type, + char unit_count, char *p); +token_t *au_to_exit(int retval, int err); +token_t *au_to_groups(int *groups); +token_t *au_to_newgroups(u_int16_t n, gid_t *groups); +token_t *au_to_in_addr(struct in_addr *internet_addr); +token_t *au_to_in_addr_ex(struct in6_addr *internet_addr); +token_t *au_to_ip(struct ip *ip); +token_t *au_to_ipc(char type, int id); +token_t *au_to_ipc_perm(struct ipc_perm *perm); +token_t *au_to_iport(u_int16_t iport); +token_t *au_to_opaque(char *data, u_int16_t bytes); +token_t *au_to_path(char *path); +token_t *au_to_process(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_process32(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_process64(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_process_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_process32_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_process64_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_return(char status, u_int32_t ret); +token_t *au_to_return32(char status, u_int32_t ret); +token_t *au_to_return64(char status, u_int64_t ret); +token_t *au_to_seq(long audit_count); +token_t *au_to_socket(struct socket *so); +token_t *au_to_socket_ex_32(struct socket *so); +token_t *au_to_socket_ex_128(struct socket *so); +token_t *au_to_sock_inet(struct sockaddr_in *so); +token_t *au_to_sock_inet32(struct sockaddr_in *so); +token_t *au_to_sock_inet128(struct sockaddr_in6 *so); +token_t *au_to_subject(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject32(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject64(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_subject32_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_subject64_ex(au_id_t auid, uid_t euid, + gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, + au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_exec_args(const char **); +token_t *au_to_exec_env(const char **); +token_t *au_to_text(char *text); +token_t *au_to_trailer(int rec_size); + +#endif /* ! _BSM_TOKEN_H_ */ diff --git a/bsd/sys/bsm_uevents.h b/bsd/sys/bsm_uevents.h new file mode 100644 index 000000000..fcf8df983 --- /dev/null +++ b/bsd/sys/bsm_uevents.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _BSM_UEVENTS_H_ +#define _BSM_UEVENTS_H_ + +/* + * User level audit event numbers + * + * Range of audit event numbers: + * 0 Reserved, invalid + * 1 - 2047 Reserved for kernel events + * 2048 - 32767 Defined by BSM for user events + * 32768 - 36864 Reserved for Mac OS-X applications + * 36865 - 65535 Reserved for applications + * + */ +#define AUE_at_create 6144 +#define AUE_at_delete 6145 +#define AUE_at_perm 6146 +#define AUE_cron_invoke 6147 +#define AUE_crontab_create 6148 +#define AUE_crontab_delete 6149 +#define AUE_crontab_perm 6150 +#define AUE_inetd_connect 6151 +#define AUE_login 6152 +#define AUE_logout 6153 +#define AUE_telnet 6154 +#define AUE_rlogin 6155 +#define AUE_mountd_mount 6156 +#define AUE_mountd_umount 6157 +#define AUE_rshd 6158 +#define AUE_su 6159 +#define AUE_halt 6160 +#define AUE_reboot 6161 +#define AUE_rexecd 6162 +#define AUE_passwd 6163 +#define AUE_rexd 6164 +#define AUE_ftpd 6165 +#define AUE_init 6166 +#define AUE_uadmin 6167 +#define AUE_shutdown 6168 +#define AUE_poweroff 6169 +#define AUE_crontab_mod 6170 +#define AUE_allocate_succ 6200 +#define AUE_allocate_fail 6201 +#define AUE_deallocate_succ 6202 +#define AUE_deallocate_fail 6203 +#define AUE_listdevice_succ 6205 +#define AUE_listdevice_fail 6206 +#define AUE_create_user 6207 +#define AUE_modify_user 6208 +#define AUE_delete_user 6209 +#define AUE_disable_user 6210 +#define AUE_enable_user 6211 + +#endif /* !_BSM_UEVENTS_H_ */ diff --git a/bsd/sys/buf.h b/bsd/sys/buf.h index 9fb7de80a..54323d44f 100644 --- a/bsd/sys/buf.h +++ b/bsd/sys/buf.h @@ -251,6 +251,8 @@ int breada __P((struct vnode *, daddr_t, int, daddr_t, int, struct ucred *, struct buf **)); int breadn __P((struct vnode *, daddr_t, int, daddr_t *, int *, int, struct ucred *, struct buf **)); +int meta_breadn __P((struct vnode *, daddr_t, int, daddr_t *, int *, int, + struct ucred *, struct buf **)); void brelse __P((struct buf *)); void bremfree __P((struct buf *)); void bufinit __P((void)); diff --git a/bsd/sys/cdefs.h b/bsd/sys/cdefs.h index af2221256..409275e3a 100644 --- a/bsd/sys/cdefs.h +++ b/bsd/sys/cdefs.h @@ -133,10 +133,18 @@ */ #if defined(__MWERKS__) && (__MWERKS__ > 0x2400) /* newer Metrowerks compilers support __attribute__() */ -#elif !defined(__GNUC__) || __GNUC__ < 2 || \ - (__GNUC__ == 2 && __GNUC_MINOR__ < 5) +#elif __GNUC__ > 2 || __GNUC__ == 2 && __GNUC_MINOR__ >= 5 +#define __dead2 __attribute__((__noreturn__)) +#define __pure2 __attribute__((__const__)) +#if __GNUC__ == 2 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 7 +#define __unused /* no attribute */ +#else +#define __unused __attribute__((__unused__)) +#endif +#else #define __attribute__(x) /* delete __attribute__ if non-gcc or gcc1 */ #if defined(__GNUC__) && !defined(__STRICT_ANSI__) +/* __dead and __pure are depreciated. Use __dead2 and __pure2 instead */ #define __dead __volatile #define __pure __const #endif @@ -147,9 +155,13 @@ #define __dead #define __pure #endif +#ifndef __dead2 +#define __dead2 +#define __pure2 +#define __unused +#endif -#define __IDSTRING(name,string) \ - static const char name[] __attribute__((__unused__)) = string +#define __IDSTRING(name,string) static const char name[] __unused = string #ifndef __COPYRIGHT #define __COPYRIGHT(s) __IDSTRING(copyright,s) diff --git a/bsd/sys/conf.h b/bsd/sys/conf.h index 0f2305936..ef86f5fbb 100644 --- a/bsd/sys/conf.h +++ b/bsd/sys/conf.h @@ -67,6 +67,7 @@ #define _SYS_CONF_H_ 1 #include +#include /* * Definitions of device driver entry switches @@ -106,6 +107,13 @@ typedef int d_poll_t __P((dev_t dev, int events, struct proc *p)); #define d_read_t read_write_fcn_t #define d_write_t read_write_fcn_t #define d_ioctl_t ioctl_fcn_t +#define d_stop_t stop_fcn_t +#define d_reset_t reset_fcn_t +#define d_select_t select_fcn_t +#define d_mmap_t mmap_fcn_t +#define d_strategy_t strategy_fcn_t +#define d_getc_t getc_fcn_t +#define d_putc_t putc_fcn_t __BEGIN_DECLS int enodev (); /* avoid actual prototype for multiple use */ @@ -201,7 +209,7 @@ extern struct cdevsw cdevsw[]; { \ eno_opcl, eno_opcl, eno_rdwrt, eno_rdwrt, \ eno_ioctl, eno_stop, eno_reset, 0, \ - seltrue, eno_mmap, eno_strat, eno_getc, \ + (select_fcn_t *)seltrue, eno_mmap, eno_strat, eno_getc, \ eno_putc, 0 \ } #endif /* KERNEL */ diff --git a/bsd/sys/disk.h b/bsd/sys/disk.h index 8edb0f368..203811181 100644 --- a/bsd/sys/disk.h +++ b/bsd/sys/disk.h @@ -29,33 +29,82 @@ #include #include +/* + * Definitions + * + * ioctl description + * -------------------------------- -------------------------------------------- + * DKIOCEJECT eject media + * DKIOCSYNCHRONIZECACHE flush media + * + * DKIOCFORMAT format media + * DKIOCGETFORMATCAPACITIES get media's formattable capacities + * + * DKIOCGETBLOCKSIZE get media's block size + * DKIOCGETBLOCKCOUNT get media's block count + * DKIOCGETFIRMWAREPATH get media's firmware path + * + * DKIOCISFORMATTED is media formatted? + * DKIOCISWRITABLE is media writable? + * + * DKIOCGETMAXBLOCKCOUNTREAD get maximum block count for reads + * DKIOCGETMAXBLOCKCOUNTWRITE get maximum block count for writes + * DKIOCGETMAXBYTECOUNTREAD get maximum byte count for reads + * DKIOCGETMAXBYTECOUNTWRITE get maximum byte count for writes + * DKIOCGETMAXSEGMENTCOUNTREAD get maximum segment count for reads + * DKIOCGETMAXSEGMENTCOUNTWRITE get maximum segment count for writes + * DKIOCGETMAXSEGMENTBYTECOUNTREAD get maximum segment byte count for reads + * DKIOCGETMAXSEGMENTBYTECOUNTWRITE get maximum segment byte count for writes + */ + typedef struct { char path[128]; } dk_firmware_path_t; -#define DKIOCEJECT _IO('d', 21) -#define DKIOCSYNCHRONIZECACHE _IO('d', 22) +typedef struct +{ + u_int64_t blockCount; + u_int32_t blockSize; + + u_int8_t reserved0096[4]; /* reserved, clear to zero */ +} dk_format_capacity_t; + +typedef struct +{ + dk_format_capacity_t * capacities; + u_int32_t capacitiesCount; /* use zero to probe count */ + + u_int8_t reserved0064[8]; /* reserved, clear to zero */ +} dk_format_capacities_t; + +#define DKIOCEJECT _IO('d', 21) +#define DKIOCSYNCHRONIZECACHE _IO('d', 22) + +#define DKIOCFORMAT _IOW('d', 26, dk_format_capacity_t) +#define DKIOCGETFORMATCAPACITIES _IOWR('d', 26, dk_format_capacities_t) -#define DKIOCGETBLOCKSIZE _IOR('d', 24, u_int32_t) -#define DKIOCGETBLOCKCOUNT _IOR('d', 25, u_int64_t) -#define DKIOCGETBLOCKCOUNT32 _IOR('d', 25, u_int32_t) -#define DKIOCGETFIRMWAREPATH _IOR('d', 28, dk_firmware_path_t) +#define DKIOCGETBLOCKSIZE _IOR('d', 24, u_int32_t) +#define DKIOCGETBLOCKCOUNT _IOR('d', 25, u_int64_t) +#define DKIOCGETFIRMWAREPATH _IOR('d', 28, dk_firmware_path_t) -#define DKIOCISFORMATTED _IOR('d', 23, u_int32_t) -#define DKIOCISWRITABLE _IOR('d', 29, u_int32_t) +#define DKIOCISFORMATTED _IOR('d', 23, u_int32_t) +#define DKIOCISWRITABLE _IOR('d', 29, u_int32_t) -#define DKIOCGETMAXBLOCKCOUNTREAD _IOR('d', 64, u_int64_t) -#define DKIOCGETMAXBLOCKCOUNTWRITE _IOR('d', 65, u_int64_t) +#define DKIOCGETMAXBLOCKCOUNTREAD _IOR('d', 64, u_int64_t) +#define DKIOCGETMAXBLOCKCOUNTWRITE _IOR('d', 65, u_int64_t) #define DKIOCGETMAXBYTECOUNTREAD _IOR('d', 70, u_int64_t) #define DKIOCGETMAXBYTECOUNTWRITE _IOR('d', 71, u_int64_t) -#define DKIOCGETMAXSEGMENTCOUNTREAD _IOR('d', 66, u_int64_t) -#define DKIOCGETMAXSEGMENTCOUNTWRITE _IOR('d', 67, u_int64_t) +#define DKIOCGETMAXSEGMENTCOUNTREAD _IOR('d', 66, u_int64_t) +#define DKIOCGETMAXSEGMENTCOUNTWRITE _IOR('d', 67, u_int64_t) #define DKIOCGETMAXSEGMENTBYTECOUNTREAD _IOR('d', 68, u_int64_t) #define DKIOCGETMAXSEGMENTBYTECOUNTWRITE _IOR('d', 69, u_int64_t) #ifdef KERNEL -#define DKIOCSETBLOCKSIZE _IOW('d', 24, u_int32_t) +#define DKIOCGETISVIRTUAL _IOR('d', 72, u_int32_t) +#define DKIOCGETBLOCKCOUNT32 _IOR('d', 25, u_int32_t) +#define DKIOCSETBLOCKSIZE _IOW('d', 24, u_int32_t) +#define DKIOCGETBSDUNIT _IOR('d', 27, u_int32_t) #endif /* KERNEL */ #endif /* _SYS_DISK_H_ */ diff --git a/bsd/sys/errno.h b/bsd/sys/errno.h index 4318fc90a..b07007d2d 100644 --- a/bsd/sys/errno.h +++ b/bsd/sys/errno.h @@ -208,7 +208,13 @@ __END_DECLS #define EBADMACHO 88 /* Malformed Macho file */ #define ECANCELED 89 /* Operation canceled */ -#define ELAST 89 /* Must be equal largest errno */ + +#define EIDRM 90 /* Identifier removed */ +#define ENOMSG 91 /* No message of desired type */ +#define EILSEQ 92 /* Illegal byte sequence */ +#define ENOATTR 93 /* Attribute not found */ + +#define ELAST 93 /* Must be equal largest errno */ #endif /* _POSIX_SOURCE */ #ifdef KERNEL diff --git a/bsd/sys/event.h b/bsd/sys/event.h new file mode 100644 index 000000000..c10243c4f --- /dev/null +++ b/bsd/sys/event.h @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1999,2000,2001 Jonathan Lemon + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: src/sys/sys/event.h,v 1.5.2.5 2001/12/14 19:21:22 jlemon Exp $ + */ + +#ifndef _SYS_EVENT_H_ +#define _SYS_EVENT_H_ + +#define EVFILT_READ (-1) +#define EVFILT_WRITE (-2) +#define EVFILT_AIO (-3) /* attached to aio requests */ +#define EVFILT_VNODE (-4) /* attached to vnodes */ +#define EVFILT_PROC (-5) /* attached to struct proc */ +#define EVFILT_SIGNAL (-6) /* attached to struct proc */ +#define EVFILT_TIMER (-7) /* timers */ +#define EVFILT_MACHPORT (-8) /* Mach ports */ +#define EVFILT_FS (-9) /* Filesystem events */ + +#define EVFILT_SYSCOUNT 9 + +struct kevent { + uintptr_t ident; /* identifier for this event */ + short filter; /* filter for event */ + u_short flags; + u_int fflags; + intptr_t data; + void *udata; /* opaque user data identifier */ +}; + +#define EV_SET(kevp, a, b, c, d, e, f) do { \ + struct kevent *__kevp__ = (kevp); \ + __kevp__->ident = (a); \ + __kevp__->filter = (b); \ + __kevp__->flags = (c); \ + __kevp__->fflags = (d); \ + __kevp__->data = (e); \ + __kevp__->udata = (f); \ +} while(0) + +/* actions */ +#define EV_ADD 0x0001 /* add event to kq (implies enable) */ +#define EV_DELETE 0x0002 /* delete event from kq */ +#define EV_ENABLE 0x0004 /* enable event */ +#define EV_DISABLE 0x0008 /* disable event (not reported) */ + +/* flags */ +#define EV_ONESHOT 0x0010 /* only report one occurrence */ +#define EV_CLEAR 0x0020 /* clear event state after reporting */ + +#define EV_SYSFLAGS 0xF000 /* reserved by system */ +#define EV_FLAG1 0x2000 /* filter-specific flag */ + +/* returned values */ +#define EV_EOF 0x8000 /* EOF detected */ +#define EV_ERROR 0x4000 /* error, data contains errno */ + +/* + * data/hint flags for EVFILT_{READ|WRITE}, shared with userspace + */ +#define NOTE_LOWAT 0x0001 /* low water mark */ + +/* + * data/hint flags for EVFILT_VNODE, shared with userspace + */ +#define NOTE_DELETE 0x0001 /* vnode was removed */ +#define NOTE_WRITE 0x0002 /* data contents changed */ +#define NOTE_EXTEND 0x0004 /* size increased */ +#define NOTE_ATTRIB 0x0008 /* attributes changed */ +#define NOTE_LINK 0x0010 /* link count changed */ +#define NOTE_RENAME 0x0020 /* vnode was renamed */ +#define NOTE_REVOKE 0x0040 /* vnode access was revoked */ + +/* + * data/hint flags for EVFILT_PROC, shared with userspace + */ +#define NOTE_EXIT 0x80000000 /* process exited */ +#define NOTE_FORK 0x40000000 /* process forked */ +#define NOTE_EXEC 0x20000000 /* process exec'd */ +#define NOTE_PCTRLMASK 0xf0000000 /* mask for hint bits */ +#define NOTE_PDATAMASK 0x000fffff /* mask for pid */ + +/* additional flags for EVFILT_PROC */ +#define NOTE_TRACK 0x00000001 /* follow across forks */ +#define NOTE_TRACKERR 0x00000002 /* could not track child */ +#define NOTE_CHILD 0x00000004 /* am a child process */ + + +#ifdef KERNEL_PRIVATE + +#include + +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_KQUEUE); +#endif + +/* + * Flag indicating hint is a signal. Used by EVFILT_SIGNAL, and also + * shared by EVFILT_PROC (all knotes attached to p->p_klist) + */ +#define NOTE_SIGNAL 0x08000000 + +struct knote { + /* JMM - line these up with wait_queue_link */ +#if 0 + struct wait_queue_link kn_wql; /* wait queue linkage */ +#else + SLIST_ENTRY(knote) kn_selnext; /* klist element chain */ + void *kn_type; /* knote vs. thread */ + struct klist *kn_list; /* pointer to list we are on */ + SLIST_ENTRY(knote) kn_link; /* members of kqueue */ + struct kqueue *kn_kq; /* which kqueue we are on */ +#endif + TAILQ_ENTRY(knote) kn_tqe; /* ...ready to process */ + union { + struct file *p_fp; /* file data pointer */ + struct proc *p_proc; /* proc pointer */ + } kn_ptr; + struct filterops *kn_fop; + int kn_status; + int kn_sfflags; /* saved filter flags */ + struct kevent kn_kevent; + intptr_t kn_sdata; /* saved data field */ + caddr_t kn_hook; +#define KN_ACTIVE 0x01 /* event has been triggered */ +#define KN_QUEUED 0x02 /* event is on queue */ +#define KN_DISABLED 0x04 /* event is disabled */ +#define KN_DETACHED 0x08 /* knote is detached */ + +#define kn_id kn_kevent.ident +#define kn_filter kn_kevent.filter +#define kn_flags kn_kevent.flags +#define kn_fflags kn_kevent.fflags +#define kn_data kn_kevent.data +#define kn_fp kn_ptr.p_fp +}; + +struct filterops { + int f_isfd; /* true if ident == filedescriptor */ + int (*f_attach) __P((struct knote *kn)); + void (*f_detach) __P((struct knote *kn)); + int (*f_event) __P((struct knote *kn, long hint)); +}; + +struct proc; + +SLIST_HEAD(klist, knote); +extern void klist_init(struct klist *list); + +#define KNOTE(list, hint) knote(list, hint) +#define KNOTE_ATTACH(list, kn) knote_attach(list, kn) +#define KNOTE_DETACH(list, kn) knote_detach(list, kn) + + +extern void knote(struct klist *list, long hint); +extern int knote_attach(struct klist *list, struct knote *kn); +extern int knote_detach(struct klist *list, struct knote *kn); +extern void knote_remove(struct proc *p, struct klist *list); +extern void knote_fdclose(struct proc *p, int fd); +extern int kqueue_register(struct kqueue *kq, + struct kevent *kev, struct proc *p); + +#else /* !KERNEL_PRIVATE */ + +/* + * This is currently visible to userland to work around broken + * programs which pull in or . + */ +#include +struct knote; +SLIST_HEAD(klist, knote); + +#include +struct timespec; + +__BEGIN_DECLS +int kqueue __P((void)); +int kevent __P((int kq, const struct kevent *changelist, int nchanges, + struct kevent *eventlist, int nevents, + const struct timespec *timeout)); +__END_DECLS + +#include + +#ifdef __APPLE_API_PRIVATE +#include + +__BEGIN_DECLS +mach_port_t kqueue_portset_np __P((int kq)); +int kqueue_from_portset_np __P((mach_port_t portset)); +__END_DECLS +#endif /* __APPLE_API_PRIVATE */ + +#endif /* !KERNEL_PRIVATE */ + +#endif /* !_SYS_EVENT_H_ */ diff --git a/bsd/ufs/mfs/mfsiom.h b/bsd/sys/eventvar.h similarity index 64% rename from bsd/ufs/mfs/mfsiom.h rename to bsd/sys/eventvar.h index 2f18cb18d..41f488951 100644 --- a/bsd/ufs/mfs/mfsiom.h +++ b/bsd/sys/eventvar.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,10 +22,9 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ -/* - * Copyright (c) 1989, 1993 - * The Regents of the University of California. All rights reserved. +/*- + * Copyright (c) 1999,2000 Jonathan Lemon + * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -35,18 +34,11 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -55,16 +47,32 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * @(#)mfsiom.h 8.1 (Berkeley) 6/11/93 + * $FreeBSD: src/sys/sys/eventvar.h,v 1.1.2.2 2000/07/18 21:49:12 jlemon Exp $ */ -#ifndef __UFS_MFS_MFSIOM_H__ -#define __UFS_MFS_MFSIOM_H__ +#ifndef _SYS_EVENTVAR_H_ +#define _SYS_EVENTVAR_H_ + +#include +#include + +#define KQ_NEVENTS 16 /* minimize copy{in,out} calls */ +#define KQEXTENT 256 /* linear growth by this amount */ -#include +struct kqueue { +#if 0 + /* threads, member notes, and notes for us in parent sets */ + struct wait_queue_set kq_wqs; +#else + int kq_state; + int kq_lock; /* space for a lock */ + TAILQ_HEAD(kqlist, knote) kq_head; /* list of pending events */ + int kq_count; /* number of pending events */ +#endif + struct selinfo kq_sel; /* JMM - parent set at some point */ + struct filedesc *kq_fdp; +#define KQ_SEL 0x01 +#define KQ_SLEEP 0x02 +}; -#ifdef __APPLE_API_OBSOLETE -#define MFS_MAPREG (MAXPHYS/NBPG + 2) /* Kernel mapping pte's */ -#define MFS_MAPSIZE 10 /* Size of alloc map for pte's */ -#endif /* __APPLE_API_OBSOLETE */ -#endif /* __UFS_MFS_MFSIOM_H__ */ +#endif /* !_SYS_EVENTVAR_H_ */ diff --git a/bsd/sys/fcntl.h b/bsd/sys/fcntl.h index ba9033ac1..64d7fc292 100644 --- a/bsd/sys/fcntl.h +++ b/bsd/sys/fcntl.h @@ -119,6 +119,9 @@ #define FDEFER 0x2000 /* defer for next gc pass */ #define FHASLOCK 0x4000 /* descriptor holds advisory lock */ #endif +#ifndef _POSIX_SOURCE +#define O_EVTONLY 0x8000 /* descriptor requested for event notifications only */ +#endif /* defined by POSIX 1003.1; BSD default, so no bit required */ #define O_NOCTTY 0 /* don't assign controlling terminal */ @@ -175,6 +178,7 @@ #define F_GETLK 7 /* get record locking information */ #define F_SETLK 8 /* set record locking information */ #define F_SETLKW 9 /* F_SETLK; wait if blocked */ +#define F_CHKCLEAN 41 /* Used for regression test */ #define F_PREALLOCATE 42 /* Preallocate storage */ #define F_SETSIZE 43 /* Truncate a file without zeroing space */ #define F_RDADVISE 44 /* Issue an advisory read async with no copy to user */ @@ -183,6 +187,8 @@ #define F_WRITEBOOTSTRAP 47 /* Write bootstrap on disk */ #define F_NOCACHE 48 /* turning data caching off/on */ #define F_LOG2PHYS 49 /* file offset to device offset */ +#define F_GETPATH 50 /* return the full path of the fd */ +#define F_FULLFSYNC 51 /* fsync + ask the drive to flush to the media */ /* file descriptor flags (F_GETFD, F_SETFD) */ #define FD_CLOEXEC 1 /* close-on-exec flag */ diff --git a/bsd/sys/file.h b/bsd/sys/file.h index 8a29f42f6..883d5f23a 100644 --- a/bsd/sys/file.h +++ b/bsd/sys/file.h @@ -72,6 +72,7 @@ struct proc; struct uio; +struct knote; #ifdef __APPLE_API_UNSTABLE /* @@ -85,6 +86,7 @@ struct file { #define DTYPE_SOCKET 2 /* communications endpoint */ #define DTYPE_PSXSHM 3 /* POSIX Shared memory */ #define DTYPE_PSXSEM 4 /* POSIX Semaphores */ +#define DTYPE_KQUEUE 5 /* kqueue */ short f_type; /* descriptor type */ short f_count; /* reference count */ short f_msgcount; /* references from message queue */ @@ -102,6 +104,8 @@ struct file { int (*fo_select) __P((struct file *fp, int which, void *wql, struct proc *p)); int (*fo_close) __P((struct file *fp, struct proc *p)); + int (*fo_kqfilter) __P((struct file *fp, struct knote *kn, + struct proc *p)); } *f_ops; off_t f_offset; caddr_t f_data; /* vnode or socket or SHM or semaphore */ @@ -128,6 +132,8 @@ static __inline int fo_ioctl __P((struct file *fp, u_long com, caddr_t data, static __inline int fo_select __P((struct file *fp, int which, void *wql, struct proc *p)); static __inline int fo_close __P((struct file *fp, struct proc *p)); +static __inline int fo_kqfilter __P((struct file *fp, struct knote *kn, + struct proc *p)); static __inline int fo_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct proc *p) @@ -180,6 +186,13 @@ fo_close(struct file *fp, struct proc *p) return ((*fp->f_ops->fo_close)(fp, p)); } + +static __inline int +fo_kqfilter(struct file *fp, struct knote *kn, struct proc *p) +{ + return ((*fp->f_ops->fo_kqfilter)(fp, kn, p)); +} + __END_DECLS #endif /* __APPLE_API_UNSTABLE */ diff --git a/bsd/sys/filedesc.h b/bsd/sys/filedesc.h index 63a254366..8e9f04fab 100644 --- a/bsd/sys/filedesc.h +++ b/bsd/sys/filedesc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -81,6 +81,8 @@ #define NDFILE 25 /* 125 bytes */ #define NDEXTENT 50 /* 250 bytes in 256-byte alloc. */ +struct klist; + struct filedesc { struct file **fd_ofiles; /* file structures for open files */ char *fd_ofileflags; /* per-process open file flags */ @@ -91,6 +93,11 @@ struct filedesc { u_short fd_freefile; /* approx. next free file */ u_short fd_cmask; /* mask for file creation */ u_short fd_refcnt; /* reference count */ + + int fd_knlistsize; /* size of knlist */ + struct klist *fd_knlist; /* list of attached knotes */ + u_long fd_knhashmask; /* size of knhash */ + struct klist *fd_knhash; /* hash table for attached knotes */ }; /* diff --git a/bsd/sys/kdebug.h b/bsd/sys/kdebug.h index 646724b35..9de3967d6 100644 --- a/bsd/sys/kdebug.h +++ b/bsd/sys/kdebug.h @@ -39,6 +39,7 @@ __BEGIN_DECLS #ifdef __APPLE_API_UNSTABLE #include +#include #if defined(KERNEL_BUILD) #include #endif /* KERNEL_BUILD */ @@ -79,6 +80,7 @@ __BEGIN_DECLS #define DBG_DLIL 8 #define DBG_MISC 20 #define DBG_DYLD 31 +#define DBG_QT 32 #define DBG_MIG 255 /* **** The Kernel Debug Sub Classes for Mach (DBG_MACH) **** */ @@ -91,6 +93,7 @@ __BEGIN_DECLS #define DBG_MACH_EXCP_DECI 0x09 /* Decrementer Interrupt */ #define DBG_MACH_EXCP_SC 0x0C /* System Calls */ #define DBG_MACH_EXCP_TRACE 0x0D /* Trace exception */ +#define DBG_MACH_EXCP_EMUL 0x0E /* Instruction emulated */ #define DBG_MACH_IHDLR 0x10 /* Interrupt Handlers */ #define DBG_MACH_IPC 0x20 /* Inter Process Comm */ #define DBG_MACH_VM 0x30 /* Virtual Memory */ @@ -107,6 +110,7 @@ __BEGIN_DECLS #define MACH_MAKE_RUNNABLE 0x6 /* make thread runnable */ #define MACH_PROMOTE 0x7 /* promoted due to resource */ #define MACH_DEMOTE 0x8 /* promotion undone */ +#define MACH_PREBLOCK_MUTEX 0x9 /* preblocking on mutex */ /* **** The Kernel Debug Sub Classes for Network (DBG_NETWORK) **** */ #define DBG_NETIP 1 /* Internet Protocol */ @@ -132,6 +136,7 @@ __BEGIN_DECLS #define DBG_NETAFP 107 /* Apple Filing Protocol */ #define DBG_NETRTMP 108 /* Routing Table Maintenance Protocol */ #define DBG_NETAURP 109 /* Apple Update Routing Protocol */ +#define DBG_NETIPSEC 128 /* IPsec Protocol */ /* **** The Kernel Debug Sub Classes for IOKIT (DBG_IOKIT) **** */ #define DBG_IOSCSI 1 /* SCSI */ @@ -172,9 +177,13 @@ __BEGIN_DECLS /* The Kernel Debug Sub Classes for File System */ #define DBG_FSRW 1 /* reads and writes to the filesystem */ #define DBG_DKRW 2 /* reads and writes to the disk */ +#define DBG_FSVN 3 /* vnode operations (inc. locking/unlocking) */ +#define DBG_FSLOOOKUP 4 /* namei and other lookup-related operations */ /* The Kernel Debug Sub Classes for BSD */ #define DBG_BSD_EXCP_SC 0x0C /* System Calls */ +#define DBG_BSD_AIO 0x0D /* aio (POSIX async IO) */ +#define DBG_BSD_SC_EXTENDED_INFO 0x0E /* System Calls, extended info */ /* The Kernel Debug Sub Classes for DBG_TRACE */ #define DBG_TRACE_DATA 0 @@ -206,6 +215,7 @@ __BEGIN_DECLS #define MISCDBG_CODE(SubClass,code) KDBG_CODE(DBG_MISC, SubClass, code) #define DLILDBG_CODE(SubClass,code) KDBG_CODE(DBG_DLIL, SubClass, code) #define DYLDDBG_CODE(SubClass,code) KDBG_CODE(DBG_DYLD, SubClass, code) +#define QTDBG_CODE(SubClass,code) KDBG_CODE(DBG_QT, SubClass, code) /* Usage: * kernel_debug((KDBG_CODE(DBG_NETWORK, DNET_PROTOCOL, 51) | DBG_FUNC_START), @@ -287,7 +297,7 @@ __END_DECLS */ typedef struct { -mach_timespec_t timestamp; +uint64_t timestamp; unsigned int arg1; unsigned int arg2; unsigned int arg3; diff --git a/bsd/sys/kern_audit.h b/bsd/sys/kern_audit.h new file mode 100644 index 000000000..e2412806b --- /dev/null +++ b/bsd/sys/kern_audit.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _SYS_KERN_AUDIT_H +#define _SYS_KERN_AUDIT_H + +#ifdef KERNEL + +/* + * Audit subsystem condition flags. The audit_enabled flag is set and + * removed automatically as a result of configuring log files, and + * can be observed but should not be directly manipulated. The audit + * suspension flag permits audit to be temporarily disabled without + * reconfiguring the audit target. + */ +extern int audit_enabled; +extern int audit_suspended; + +#define BSM_SUCCESS 0 +#define BSM_FAILURE 1 +#define BSM_NOAUDIT 2 + +/* + * Define the masks for the audited arguments. + */ +#define ARG_EUID 0x0000000000000001ULL +#define ARG_RUID 0x0000000000000002ULL +#define ARG_SUID 0x0000000000000004ULL +#define ARG_EGID 0x0000000000000008ULL +#define ARG_RGID 0x0000000000000010ULL +#define ARG_SGID 0x0000000000000020ULL +#define ARG_PID 0x0000000000000040ULL +#define ARG_UID 0x0000000000000080ULL +#define ARG_AUID 0x0000000000000100ULL +#define ARG_GID 0x0000000000000200ULL +#define ARG_FD 0x0000000000000400ULL +#define UNUSED 0x0000000000000800ULL +#define ARG_FFLAGS 0x0000000000001000ULL +#define ARG_MODE 0x0000000000002000ULL +#define ARG_DEV 0x0000000000004000ULL +#define ARG_ACCMODE 0x0000000000008000ULL +#define ARG_CMODE 0x0000000000010000ULL +#define ARG_MASK 0x0000000000020000ULL +#define ARG_SIGNUM 0x0000000000040000ULL +#define ARG_LOGIN 0x0000000000080000ULL +#define ARG_SADDRINET 0x0000000000100000ULL +#define ARG_SADDRINET6 0x0000000000200000ULL +#define ARG_SADDRUNIX 0x0000000000400000ULL +#define ARG_KPATH1 0x0000000000800000ULL +#define ARG_KPATH2 0x0000000001000000ULL +#define ARG_UPATH1 0x0000000002000000ULL +#define ARG_UPATH2 0x0000000004000000ULL +#define ARG_TEXT 0x0000000008000000ULL +#define ARG_VNODE1 0x0000000010000000ULL +#define ARG_VNODE2 0x0000000020000000ULL +#define ARG_SVIPC_CMD 0x0000000040000000ULL +#define ARG_SVIPC_PERM 0x0000000080000000ULL +#define ARG_SVIPC_ID 0x0000000100000000ULL +#define ARG_SVIPC_ADDR 0x0000000200000000ULL +#define ARG_GROUPSET 0x0000000400000000ULL +#define ARG_CMD 0x0000000800000000ULL +#define ARG_SOCKINFO 0x0000001000000000ULL +#define ARG_NONE 0x0000000000000000ULL +#define ARG_ALL 0xFFFFFFFFFFFFFFFFULL + +struct vnode_au_info { + mode_t vn_mode; + uid_t vn_uid; + gid_t vn_gid; + dev_t vn_dev; + long vn_fsid; + long vn_fileid; + long vn_gen; +}; + +struct groupset { + gid_t gidset[NGROUPS]; + u_int gidset_size; +}; + +struct socket_info { + int sodomain; + int sotype; + int soprotocol; +}; + +struct audit_record { + /* Audit record header. */ + u_int32_t ar_magic; + int ar_event; + int ar_retval; /* value returned to the process */ + int ar_errno; /* return status of system call */ + struct timespec ar_starttime; + struct timespec ar_endtime; + u_int64_t ar_valid_arg; /* Bitmask of valid arguments */ + + /* Audit subject information. */ + struct xucred ar_subj_cred; + uid_t ar_subj_ruid; + gid_t ar_subj_rgid; + gid_t ar_subj_egid; + uid_t ar_subj_auid; /* Audit user ID */ + pid_t ar_subj_asid; /* Audit session ID */ + pid_t ar_subj_pid; + struct au_tid ar_subj_term; + char ar_subj_comm[MAXCOMLEN + 1]; + struct au_mask ar_subj_amask; + + /* Operation arguments. */ + uid_t ar_arg_euid; + uid_t ar_arg_ruid; + uid_t ar_arg_suid; + gid_t ar_arg_egid; + gid_t ar_arg_rgid; + gid_t ar_arg_sgid; + pid_t ar_arg_pid; + uid_t ar_arg_uid; + uid_t ar_arg_auid; + gid_t ar_arg_gid; + struct groupset ar_arg_groups; + int ar_arg_fd; + int ar_arg_fflags; + mode_t ar_arg_mode; + int ar_arg_dev; + int ar_arg_accmode; + int ar_arg_cmode; + int ar_arg_mask; + u_int ar_arg_signum; + char ar_arg_login[MAXLOGNAME]; + struct sockaddr ar_arg_sockaddr; + struct socket_info ar_arg_sockinfo; + char *ar_arg_upath1; + char *ar_arg_upath2; + char *ar_arg_kpath1; + char *ar_arg_kpath2; + char *ar_arg_text; + struct au_mask ar_arg_amask; + struct vnode_au_info ar_arg_vnode1; + struct vnode_au_info ar_arg_vnode2; + int ar_arg_cmd; + int ar_arg_svipc_cmd; + struct ipc_perm ar_arg_svipc_perm; + int ar_arg_svipc_id; + void * ar_arg_svipc_addr; +}; + +/* + * In-kernel version of audit record; the basic record plus queue meta-data. + * This record can also have a pointer set to some opaque data that will + * be passed through to the audit writing mechanism. + */ +struct kaudit_record { + struct audit_record k_ar; + caddr_t k_udata; /* user data */ + u_int k_ulen; /* user data length */ + struct uthread *k_uthread; /* thread we are auditing */ + TAILQ_ENTRY(kaudit_record) k_q; +}; + +struct proc; +struct vnode; +struct componentname; + +void audit_abort(struct kaudit_record *ar); +void audit_commit(struct kaudit_record *ar, int error, + int retval); +void audit_init(void); +void audit_shutdown(void); + +struct kaudit_record *audit_new(int event, struct proc *p, + struct uthread *uthread); + +void audit_syscall_enter(unsigned short code, struct proc *proc, struct uthread *uthread); +void audit_syscall_exit(int error, struct proc *proc, + struct uthread *uthread); + +int kaudit_to_bsm(struct kaudit_record *kar, + struct au_record **pau); + +int bsm_rec_verify(caddr_t rec); + +/* + * Kernel versions of the BSM audit record functions. + */ +struct au_record *kau_open(void); +int kau_write(struct au_record *rec, token_t *m); +int kau_close(struct au_record *rec, + struct timespec *endtime, short event); +void kau_free(struct au_record *rec); +void kau_init(void); +token_t *kau_to_file(char *file, struct timeval *tv); +token_t *kau_to_header(struct timespec *ctime, int rec_size, + au_event_t e_type, au_emod_t e_mod); +token_t *kau_to_header32(struct timespec *ctime, int rec_size, + au_event_t e_type, au_emod_t e_mod); +token_t *kau_to_header64(struct timespec *ctime, int rec_size, + au_event_t e_type, au_emod_t e_mod); +/* + * The remaining kernel functions are conditionally compiled in as they + * are wrapped by a macro, and the macro should be the only place in + * the source tree where these functions are referenced. + */ +#ifdef AUDIT +void audit_arg_accmode(int mode); +void audit_arg_cmode(int cmode); +void audit_arg_fd(int fd); +void audit_arg_fflags(int fflags); +void audit_arg_gid(gid_t gid, gid_t egid, gid_t rgid, + gid_t sgid); +void audit_arg_uid(uid_t uid, uid_t euid, uid_t ruid, + uid_t suid); +void audit_arg_groupset(gid_t *gidset, u_int gidset_size); +void audit_arg_login(char[MAXLOGNAME]); +void audit_arg_mask(int mask); +void audit_arg_mode(mode_t mode); +void audit_arg_dev(int dev); +void audit_arg_owner(uid_t uid, gid_t gid); +void audit_arg_pid(pid_t pid); +void audit_arg_signum(u_int signum); +void audit_arg_socket(int sodomain, int sotype, + int soprotocol); +void audit_arg_sockaddr(struct proc *p, + struct sockaddr *so); +void audit_arg_auid(uid_t auid); +void audit_arg_upath(struct proc *p, char *upath, + u_int64_t flags); +void audit_arg_vnpath(struct vnode *vp, u_int64_t flags); +void audit_arg_text(char *text); +void audit_arg_cmd(int cmd); +void audit_arg_svipc_cmd(int cmd); +void audit_arg_svipc_perm(struct ipc_perm *perm); +void audit_arg_svipc_id(int id); +void audit_arg_svipc_addr(void *addr); + +void audit_proc_init(struct proc *p); +void audit_proc_fork(struct proc *parent, + struct proc *child); +void audit_proc_free(struct proc *p); + +/* + * Define a macro to wrap the audit_arg_* calls by checking the global + * audit_enabled flag before performing the actual call. + */ +#define AUDIT_ARG(op, args...) do { \ + if (audit_enabled) \ + audit_arg_ ## op (args); \ + } while (0) + +#define AUDIT_CMD(audit_cmd) do { \ + if (audit_enabled) { \ + audit_cmd; \ + } \ + } while (0) + +#else /* !AUDIT */ +#define AUDIT_ARG(op, args...) do { \ + } while (0) + +#define AUDIT_CMD(audit_cmd) do { \ + } while (0) + +#endif /* AUDIT */ + +#endif /* KERNEL */ + +#endif /* !_SYS_KERN_AUDIT_H */ diff --git a/bsd/sys/lock.h b/bsd/sys/lock.h index c08bdc006..a6d72b321 100644 --- a/bsd/sys/lock.h +++ b/bsd/sys/lock.h @@ -105,15 +105,9 @@ #include -#if defined(__ppc__) struct slock{ volatile unsigned int lock_data[10]; }; -#else -struct slock{ - volatile unsigned int lock_data[9]; -}; -#endif typedef struct slock simple_lock_data_t; typedef struct slock *simple_lock_t; #define decl_simple_lock_data(class,name) \ diff --git a/bsd/sys/lockf.h b/bsd/sys/lockf.h index 226b91aba..1864658e6 100644 --- a/bsd/sys/lockf.h +++ b/bsd/sys/lockf.h @@ -65,6 +65,7 @@ #define _SYS_LOCKF_H_ #include +#include #ifdef __APPLE_API_PRIVATE /* diff --git a/bsd/sys/malloc.h b/bsd/sys/malloc.h index 781ce11a0..763e84c9e 100644 --- a/bsd/sys/malloc.h +++ b/bsd/sys/malloc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -71,6 +71,7 @@ */ #define M_WAITOK 0x0000 #define M_NOWAIT 0x0001 +#define M_ZERO 0x0004 /* bzero the allocation */ /* * Types of memory to be allocated (not all are used by us) @@ -169,8 +170,10 @@ #define M_IGMP 90 #define M_JNL_JNL 91 /* Journaling: "struct journal" */ #define M_JNL_TR 92 /* Journaling: "struct transaction" */ +#define M_SPECINFO 93 /* special file node */ +#define M_KQUEUE 94 /* kqueue */ -#define M_LAST 93 /* Must be last type + 1 */ +#define M_LAST 95 /* Must be last type + 1 */ /* Strings corresponding to types of memory */ /* Must be in synch with the #defines above */ @@ -267,7 +270,9 @@ "TCP Segment Q",/* 89 M_TSEGQ */\ "IGMP state", /* 90 M_IGMP */\ "Journal", /* 91 M_JNL_JNL */\ - "Transaction" /* 92 M_JNL_TR */\ + "Transaction", /* 92 M_JNL_TR */\ + "specinfo", /* 93 M_SPECINFO */\ + "kqueue" /* 94 M_KQUEUE */\ } struct kmemstats { diff --git a/bsd/sys/mbuf.h b/bsd/sys/mbuf.h index 8de818489..f55270fb2 100644 --- a/bsd/sys/mbuf.h +++ b/bsd/sys/mbuf.h @@ -139,7 +139,7 @@ struct pkthdr { /* description of external storage mapped into mbuf, valid if M_EXT set */ struct m_ext { caddr_t ext_buf; /* start of buffer */ - void (*ext_free)(); /* free routine if not the usual */ + void (*ext_free)(caddr_t , u_int, caddr_t); /* free routine if not the usual */ u_int ext_size; /* size of buffer, for ext_free */ caddr_t ext_arg; /* additional ext_free argument */ struct ext_refsq { /* references held */ diff --git a/bsd/sys/mman.h b/bsd/sys/mman.h index 255f6bab3..1e2c8c926 100644 --- a/bsd/sys/mman.h +++ b/bsd/sys/mman.h @@ -86,7 +86,7 @@ #define MAP_FIXED 0x0010 /* map addr must be exactly as requested */ #define MAP_RENAME 0x0020 /* Sun: rename private pages to file */ #define MAP_NORESERVE 0x0040 /* Sun: don't reserve needed swap area */ -#define MAP_INHERIT 0x0080 /* region is retained after exec */ +#define MAP_RESERVED0080 0x0080 /* previously unimplemented MAP_INHERIT */ #define MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change file size */ #define MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */ diff --git a/bsd/sys/mount.h b/bsd/sys/mount.h index dbb687214..3fdc23964 100644 --- a/bsd/sys/mount.h +++ b/bsd/sys/mount.h @@ -140,7 +140,12 @@ struct mount { struct statfs mnt_stat; /* cache of filesystem stats */ qaddr_t mnt_data; /* private data */ /* Cached values of the IO constraints for the device */ - u_int32_t mnt_maxreadcnt; /* Max. byte count for read */ + union { + u_int32_t mntu_maxreadcnt; /* Max. byte count for read */ + void *mntu_xinfo_ptr; /* points at extended IO constraints */ + } mnt_un; /* if MNTK_IO_XINFO is set */ +#define mnt_maxreadcnt mnt_un.mntu_maxreadcnt +#define mnt_xinfo_ptr mnt_un.mntu_xinfo_ptr u_int32_t mnt_maxwritecnt; /* Max. byte count for write */ u_int16_t mnt_segreadcnt; /* Max. segment count for read */ u_int16_t mnt_segwritecnt; /* Max. segment count for write */ @@ -212,12 +217,16 @@ struct mount { * past the mount point. This keeps the subtree stable during mounts * and unmounts. */ +#define MNTK_VIRTUALDEV 0x00200000 /* mounted on a virtual device i.e. a disk image */ +#define MNTK_ROOTDEV 0x00400000 /* this filesystem resides on the same device as the root */ +#define MNTK_IO_XINFO 0x00800000 /* mnt_un.mntu_ioptr has a malloc associated with it */ #define MNTK_UNMOUNT 0x01000000 /* unmount in progress */ #define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */ #define MNTK_WANTRDWR 0x04000000 /* upgrade to read/write requested */ #if REV_ENDIAN_FS #define MNT_REVEND 0x08000000 /* Reverse endian FS */ #endif /* REV_ENDIAN_FS */ +#define MNTK_FRCUNMOUNT 0x10000000 /* Forced unmount wanted. */ /* * Sysctl CTL_VFS definitions. * @@ -234,6 +243,9 @@ struct mount { #define VFS_MAXTYPENUM 1 /* int: highest defined filesystem type */ #define VFS_CONF 2 /* struct: vfsconf for filesystem given as next argument */ +#define VFS_FMOD_WATCH 3 /* block waiting for the next modified file */ +#define VFS_FMOD_WATCH_ENABLE 4 /* 1==enable, 0==disable */ + /* * Flags for various system call interfaces. * @@ -282,6 +294,61 @@ struct vfsconf { #endif /*__APPLE_API_UNSTABLE */ +struct vfsidctl { + int vc_vers; /* should be VFSIDCTL_VERS1 (below) */ + fsid_t vc_fsid; /* fsid to operate on. */ + void *vc_ptr; /* pointer to data structure. */ + size_t vc_len; /* sizeof said structure. */ + u_int32_t vc_spare[12]; /* spare (must be zero). */ +}; + +/* vfsidctl API version. */ +#define VFS_CTL_VERS1 0x01 + +/* + * New style VFS sysctls, do not reuse/conflict with the namespace for + * private sysctls. + */ +#define VFS_CTL_STATFS 0x00010001 /* statfs */ +#define VFS_CTL_UMOUNT 0x00010002 /* unmount */ +#define VFS_CTL_QUERY 0x00010003 /* anything wrong? (vfsquery) */ +#define VFS_CTL_NEWADDR 0x00010004 /* reconnect to new address */ +#define VFS_CTL_TIMEO 0x00010005 /* set timeout for vfs notification */ + +struct vfsquery { + u_int32_t vq_flags; + u_int32_t vq_spare[31]; +}; + +/* vfsquery flags */ +#define VQ_NOTRESP 0x0001 /* server down */ +#define VQ_NEEDAUTH 0x0002 /* server bad auth */ +#define VQ_LOWDISK 0x0004 /* we're low on space */ +#define VQ_MOUNT 0x0008 /* new filesystem arrived */ +#define VQ_UNMOUNT 0x0010 /* filesystem has left */ +#define VQ_DEAD 0x0020 /* filesystem is dead, needs force unmount */ +#define VQ_ASSIST 0x0040 /* filesystem needs assistance from external + program */ +#define VQ_FLAG0080 0x0080 /* placeholder */ +#define VQ_FLAG0100 0x0100 /* placeholder */ +#define VQ_FLAG0200 0x0200 /* placeholder */ +#define VQ_FLAG0400 0x0400 /* placeholder */ +#define VQ_FLAG0800 0x0800 /* placeholder */ +#define VQ_FLAG1000 0x1000 /* placeholder */ +#define VQ_FLAG2000 0x2000 /* placeholder */ +#define VQ_FLAG4000 0x4000 /* placeholder */ +#define VQ_FLAG8000 0x8000 /* placeholder */ + +#ifdef KERNEL +/* Point a sysctl request at a vfsidctl's data. */ +#define VCTLTOREQ(vc, req) \ + do { \ + (req)->newptr = (vc)->vc_ptr; \ + (req)->newlen = (vc)->vc_len; \ + (req)->newidx = 0; \ + } while (0) +#endif + #ifdef KERNEL #ifdef __APPLE_API_UNSTABLE extern int maxvfsconf; /* highest defined filesystem type */ @@ -371,6 +438,10 @@ void vfs_unbusy __P((struct mount *, struct proc *)); int vfs_mountroot __P((void)); int vfs_rootmountalloc __P((char *, char *, struct mount **)); void vfs_unmountall __P((void)); +int safedounmount(struct mount *, int, struct proc *); +int dounmount(struct mount *, int, struct proc *); +void vfs_event_signal(fsid_t *, u_int32_t, intptr_t); +void vfs_event_init(void); #endif /* __APPLE_API_PRIVATE */ extern CIRCLEQ_HEAD(mntlist, mount) mountlist; extern struct slock mountlist_slock; @@ -381,6 +452,7 @@ extern struct slock mountlist_slock; #include __BEGIN_DECLS +int fhopen __P((const struct fhandle *, int)); int fstatfs __P((int, struct statfs *)); int getfh __P((const char *, fhandle_t *)); int getfsstat __P((struct statfs *, long, int)); diff --git a/bsd/sys/namei.h b/bsd/sys/namei.h index 40faa46e9..5bfd07044 100644 --- a/bsd/sys/namei.h +++ b/bsd/sys/namei.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -142,6 +142,7 @@ struct nameidata { #define NOCACHE 0x0020 /* name must not be left in cache */ #define FOLLOW 0x0040 /* follow symbolic links */ #define NOFOLLOW 0x0000 /* do not follow symbolic links (pseudo) */ +#define SHAREDLEAF 0x0080 /* OK to have shared leaf lock */ #define MODMASK 0x00fc /* mask of operational modifiers */ /* * Namei parameter descriptors. @@ -169,8 +170,11 @@ struct nameidata { #define ISWHITEOUT 0x020000 /* found whiteout */ #define DOWHITEOUT 0x040000 /* do whiteouts */ #define WILLBEDIR 0x080000 /* new files will be dirs; allow trailing / */ +#define AUDITVNPATH1 0x100000 /* audit the path/vnode info */ +#define AUDITVNPATH2 0x200000 /* audit the path/vnode info */ +#define USEDVP 0x400000 /* start the lookup at ndp.ni_dvp */ #define NODELETEBUSY 0x800000 /* donot delete busy files (Carbon semantic) */ -#define PARAMASK 0x0fff00 /* mask of parameter descriptors */ +#define PARAMASK 0x3fff00 /* mask of parameter descriptors */ /* * Initialization of an nameidata structure. */ @@ -199,8 +203,7 @@ struct namecache { u_long nc_dvpid; /* capability number of nc_dvp */ struct vnode *nc_vp; /* vnode the name refers to */ u_long nc_vpid; /* capability number of nc_vp */ - char nc_nlen; /* length of name */ - char nc_name[NCHNAMLEN]; /* segment name */ + char *nc_name; /* segment name */ }; #ifdef KERNEL @@ -218,6 +221,16 @@ void cache_enter __P((struct vnode *dvp, struct vnode *vpp, struct componentname *cnp)); void cache_purge __P((struct vnode *vp)); void cache_purgevfs __P((struct mount *mp)); + +// +// Global string-cache routines. You can pass zero for nc_hash +// if you don't know it (add_name() will then compute the hash). +// There are no flags for now but maybe someday. +// +char *add_name(const char *name, size_t len, u_int nc_hash, u_int flags); +int remove_name(const char *name); + + #endif /* KERNEL */ /* diff --git a/bsd/sys/param.h b/bsd/sys/param.h index af0d5d043..775912b4b 100644 --- a/bsd/sys/param.h +++ b/bsd/sys/param.h @@ -103,16 +103,18 @@ /* Machine type dependent parameters. */ #include -#include /* More types and definitions used throughout the kernel. */ #ifdef KERNEL +#include #include #include #include #include #include #include +#else +#include #endif /* Signals. */ diff --git a/bsd/sys/proc.h b/bsd/sys/proc.h index 28a52f1e8..4bac46299 100644 --- a/bsd/sys/proc.h +++ b/bsd/sys/proc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -72,6 +72,8 @@ #include #include #include +#include +#include #ifdef __APPLE_API_PRIVATE @@ -144,7 +146,8 @@ struct proc { fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ void *p_wchan; /* Sleep address. */ char *p_wmesg; /* Reason for sleep. */ - u_int p_swtime; /* Time swapped in or out. */ + u_int p_swtime; /* DEPRECATED (Time swapped in or out.) */ +#define p_argslen p_swtime /* Length of process arguments. */ u_int p_slptime; /* Time since last blocked. */ struct itimerval p_realtimer; /* Alarm timer. */ @@ -168,7 +171,7 @@ struct proc { * Belongs after p_pid, but here to avoid shifting proc elements. */ LIST_ENTRY(proc) p_hash; /* Hash chain. */ - TAILQ_HEAD( ,eventqelt) p_evlist; + TAILQ_HEAD( ,eventqelt) p_evlist; /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_sigmask @@ -201,7 +204,7 @@ struct proc { caddr_t user_stack; /* where user stack was allocated */ void * exitarg; /* exit arg for proc terminate */ void * vm_shm; /* for sysV shared memory */ - sigset_t p_xxxsigpending; /* DEPRECATED . */ + int p_argc; /* saved argc for sysctl_procargs() */ int p_vforkcnt; /* number of outstanding vforks */ void * p_vforkact; /* activation running this vfork proc */ TAILQ_HEAD( , uthread) p_uthlist; /* List of uthreads */ @@ -210,6 +213,13 @@ struct proc { u_short si_status; u_short si_code; uid_t si_uid; + TAILQ_HEAD( , aio_workq_entry ) aio_activeq; /* active async IO requests */ + int aio_active_count; /* entries on aio_activeq */ + TAILQ_HEAD( , aio_workq_entry ) aio_doneq; /* completed async IO requests */ + int aio_done_count; /* entries on aio_doneq */ + + struct klist p_klist; /* knote list */ + struct auditinfo *p_au; /* User auditing data */ #if DIAGNOSTIC #if SIGNAL_DEBUG unsigned int lockpc[8]; @@ -218,11 +228,11 @@ struct proc { #endif /* DIAGNOSTIC */ }; -#else /* __APPLE_API_PRIVATE */ +#else /* !__APPLE_API_PRIVATE */ struct session; struct pgrp; struct proc; -#endif /* __APPLE_API_PRIVATE */ +#endif /* !__APPLE_API_PRIVATE */ #ifdef __APPLE_API_UNSTABLE /* Exported fields for kern sysctls */ @@ -311,9 +321,12 @@ struct extern_proc { /* Should be moved to machine-dependent areas. */ #define P_OWEUPC 0x08000 /* Owe process an addupc() call at next ast. */ -/* XXX Not sure what to do with these, yet. */ -#define P_FSTRACE 0x10000 /* tracing via file system (elsewhere?) */ -#define P_SSTEP 0x20000 /* process needs single-step fixup ??? */ +#define P_AFFINITY 0x0010000 /* xxx */ +#define P_CLASSIC 0x0020000 /* xxx */ +/* +#define P_FSTRACE 0x10000 / * tracing via file system (elsewhere?) * / +#define P_SSTEP 0x20000 / * process needs single-step fixup ??? * / +*/ #define P_WAITING 0x0040000 /* process has a wait() in progress */ #define P_KDEBUG 0x0080000 /* kdebug tracing is on for this process */ @@ -329,9 +342,12 @@ struct extern_proc { /* flag set on exec */ #define P_FORCEQUOTA 0x20000000 /* Force quota for root */ #define P_NOCLDWAIT 0x40000000 /* No zombies when chil procs exit */ +#define P_NOREMOTEHANG 0x80000000 /* Don't hang on remote FS ops */ #define P_NOSWAP 0 /* Obsolete: retained so that nothing breaks */ -#define P_PHYSIO 0 /* Obsolete: retained so that nothing breaks */ +#define P_PHYSIO 0 /* Obsolete: retained so that nothing breaks */ +#define P_FSTRACE 0 /* Obsolete: retained so that nothing breaks */ +#define P_SSTEP 0 /* Obsolete: retained so that nothing breaks */ /* * Shareable process credentials (always resident). This includes a reference @@ -365,6 +381,7 @@ __BEGIN_DECLS * as it is used to represent "no process group". */ extern int nprocs, maxproc; /* Current and max number of procs. */ +__private_extern__ int hard_maxproc; /* hard limit */ #define PID_MAX 30000 #define NO_PID 30001 @@ -393,6 +410,7 @@ extern void procinit __P((void)); #ifdef __APPLE_API_UNSTABLE extern struct proc *pfind __P((pid_t)); /* Find process by id. */ +__private_extern__ struct proc *pzfind(pid_t); /* Find zombie by id. */ extern struct pgrp *pgfind __P((pid_t)); /* Find process group by id. */ extern int chgproccnt __P((uid_t uid, int diff)); diff --git a/bsd/sys/select.h b/bsd/sys/select.h index 775c58d1c..8b2a73148 100644 --- a/bsd/sys/select.h +++ b/bsd/sys/select.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -61,6 +61,7 @@ #define _SYS_SELECT_H_ #include +#include #ifdef __APPLE_API_UNSTABLE @@ -70,17 +71,24 @@ __BEGIN_DECLS #include #endif +#include + /* * Used to maintain information about processes that wish to be * notified when I/O becomes possible. */ struct selinfo { #ifdef KERNEL - struct wait_queue wait_queue; /* wait_queue for wait/wakeup */ + union { + struct wait_queue wait_queue; /* wait_queue for wait/wakeup */ + struct klist note; /* JMM - temporary separation */ + } si_u; +#define si_wait_queue si_u.wait_queue +#define si_note si_u.note #else - char wait_queue[16]; + char si_wait_queue[16]; #endif - u_int si_flags; /* see below */ + u_int si_flags; /* see below */ }; #define SI_COLL 0x0001 /* collision occurred */ @@ -100,4 +108,20 @@ __END_DECLS #endif /* __APPLE_API_UNSTABLE */ +#ifndef KERNEL +#include +#ifndef __MWERKS__ +#include +#endif /* __MWERKS__ */ +#include + +__BEGIN_DECLS +#ifndef __MWERKS__ +int pselect(int, fd_set *, fd_set *, fd_set *, + const struct timespec *, const sigset_t *); +#endif /* __MWERKS__ */ +int select(int, fd_set *, fd_set *, fd_set *, struct timeval *); +__END_DECLS +#endif /* ! KERNEL */ + #endif /* !_SYS_SELECT_H_ */ diff --git a/bsd/sys/sem.h b/bsd/sys/sem.h index 9aa6bd8ef..7a20a99a7 100644 --- a/bsd/sys/sem.h +++ b/bsd/sys/sem.h @@ -242,7 +242,7 @@ typedef enum { __BEGIN_DECLS int semsys __P((int, ...)); -int semctl __P((int, int, int, union semun)); +int semctl __P((int, int, int, ...)); int semget __P((key_t, int, int)); int semop __P((int, struct sembuf *,unsigned)); __END_DECLS diff --git a/bsd/sys/semaphore.h b/bsd/sys/semaphore.h index 75ca1c01d..89983396b 100644 --- a/bsd/sys/semaphore.h +++ b/bsd/sys/semaphore.h @@ -43,6 +43,9 @@ typedef int sem_t; #define SEM_FAILED -1 #ifndef KERNEL +#include + +__BEGIN_DECLS int sem_close(sem_t *); int sem_destroy(sem_t *); int sem_getvalue(sem_t *, int *); @@ -52,6 +55,7 @@ int sem_post(sem_t *); int sem_trywait(sem_t *); int sem_unlink(const char *); int sem_wait(sem_t *); +__END_DECLS #endif /* KERNEL */ diff --git a/bsd/sys/shm.h b/bsd/sys/shm.h index f6f43f5f8..885807db1 100644 --- a/bsd/sys/shm.h +++ b/bsd/sys/shm.h @@ -108,6 +108,7 @@ struct proc; void shmexit __P((struct proc *)); void shmfork __P((struct proc *, struct proc *)); +__private_extern__ void shmexec __P((struct proc *)); #endif /* __APPLE_API_PRIVATE */ #else /* !KERNEL */ diff --git a/bsd/sys/signal.h b/bsd/sys/signal.h index d1704b27e..92e3f7ffe 100644 --- a/bsd/sys/signal.h +++ b/bsd/sys/signal.h @@ -134,6 +134,8 @@ #endif #ifndef _ANSI_SOURCE +#include + typedef unsigned int sigset_t; union sigval { @@ -142,12 +144,20 @@ union sigval { void *sigval_ptr; }; -#define SIGEV_NONE 0 /* No async notification */ +#define SIGEV_NONE 0 /* No async notification */ +#define SIGEV_SIGNAL 1 /* aio - completion notification */ #ifdef __APPLE_API_PRIVATE -#define SIGEV_SIGNAL 1 /* Generate a queued signal */ -#define SIGEV_THREAD 3 /* A notification function will be called to perfrom notification */ +#define SIGEV_THREAD 3 /* A notification function will be called to perform notification */ #endif /*__APPLE_API_PRIVATE */ +struct sigevent { + int sigev_notify; /* Notification type */ + int sigev_signo; /* Signal number */ + union sigval sigev_value; /* Signal value */ + void (*sigev_notify_function)(union sigval); /* Notification function */ + pthread_attr_t *sigev_notify_attributes; /* Notification attributes */ +}; + typedef struct __siginfo { int si_signo; /* signal number */ int si_errno; /* errno association */ @@ -158,7 +168,7 @@ typedef struct __siginfo { void *si_addr; /* faulting instruction */ union sigval si_value; /* signal value */ long si_band; /* band event for SIGPOLL */ - int pad[7]; /* RFU */ + unsigned int pad[7]; /* Reserved for Future Use */ } siginfo_t; /* @@ -244,6 +254,8 @@ struct sigaction { #define SA_NOCLDWAIT 0x0020 /* don't keep zombies around */ #define SA_SIGINFO 0x0040 /* signal handler with SA_SIGINFO args */ #define SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */ +/* This will provide 64bit register set in a 32bit user address space */ +#define SA_64REGSET 0x0200 /* signal handler with SA_SIGINFO args with 64bit regs information */ #endif #define SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */ @@ -278,8 +290,8 @@ typedef struct sigaltstack stack_t; #define SS_ONSTACK 0x0001 /* take signal on signal stack */ #define SS_DISABLE 0x0004 /* disable taking signals on alternate stack */ -#define MINSIGSTKSZ 8192 /* minimum allowable stack */ -#define SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended stack size */ +#define MINSIGSTKSZ 32768 /* (32K)minimum allowable stack */ +#define SIGSTKSZ 131072 /* (128K)recommended stack size */ /* * 4.3 compatibility: diff --git a/bsd/sys/signalvar.h b/bsd/sys/signalvar.h index c4d1f7c12..beb83e255 100644 --- a/bsd/sys/signalvar.h +++ b/bsd/sys/signalvar.h @@ -89,6 +89,7 @@ struct sigacts { int ps_code; /* for core dump/debugger XXX */ int ps_addr; /* for core dump/debugger XXX */ sigset_t ps_usertramp; /* SunOS compat; libc sigtramp XXX */ + sigset_t ps_64regset; /* signals that want SA_EXSIGINFO args */ }; /* signal flags */ diff --git a/bsd/sys/socket.h b/bsd/sys/socket.h index 2a0db2834..837398c6e 100644 --- a/bsd/sys/socket.h +++ b/bsd/sys/socket.h @@ -131,6 +131,7 @@ typedef _BSD_SOCKLEN_T_ socklen_t; #define SO_NREAD 0x1020 /* APPLE: get 1st-packet byte count */ #define SO_NKE 0x1021 /* APPLE: Install socket-level NKE */ #define SO_NOSIGPIPE 0x1022 /* APPLE: No SIGPIPE on EPIPE */ +#define SO_NOADDRERR 0x1023 /* APPLE: Returns EADDRNOTAVAIL when src is not available anymore */ #endif /* * Structure used for manipulating linger option. @@ -372,13 +373,13 @@ struct sockaddr_storage { * Used value-result for recvmsg, value only for sendmsg. */ struct msghdr { - caddr_t msg_name; /* optional address */ - u_int msg_namelen; /* size of address */ - struct iovec *msg_iov; /* scatter/gather array */ - u_int msg_iovlen; /* # elements in msg_iov */ - caddr_t msg_control; /* ancillary data, see below */ - u_int msg_controllen; /* ancillary data buffer len */ - int msg_flags; /* flags on received message */ + caddr_t msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + struct iovec *msg_iov; /* scatter/gather array */ + u_int msg_iovlen; /* # elements in msg_iov */ + caddr_t msg_control; /* ancillary data, see below */ + socklen_t msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ }; #define MSG_OOB 0x1 /* process out-of-band data */ @@ -391,6 +392,7 @@ struct msghdr { #define MSG_DONTWAIT 0x80 /* this message should be nonblocking */ #define MSG_EOF 0x100 /* data completes connection */ #ifdef __APPLE__ +#define MSG_WAITSTREAM 0x200 /* wait up to full request.. may return partial */ #define MSG_FLUSH 0x400 /* Start of 'hold' seq; dump so_temp */ #define MSG_HOLD 0x800 /* Hold frag in so_temp */ #define MSG_SEND 0x1000 /* Send the packet in so_temp */ @@ -406,9 +408,9 @@ struct msghdr { * of message elements headed by cmsghdr structures. */ struct cmsghdr { - u_int cmsg_len; /* data byte count, including hdr */ - int cmsg_level; /* originating protocol */ - int cmsg_type; /* protocol-specific type */ + socklen_t cmsg_len; /* data byte count, including hdr */ + int cmsg_level; /* originating protocol */ + int cmsg_type; /* protocol-specific type */ /* followed by u_char cmsg_data[]; */ }; @@ -509,24 +511,24 @@ struct sf_hdtr { #include __BEGIN_DECLS -int accept __P((int, struct sockaddr *, int *)); -int bind __P((int, const struct sockaddr *, int)); -int connect __P((int, const struct sockaddr *, int)); -int getpeername __P((int, struct sockaddr *, int *)); -int getsockname __P((int, struct sockaddr *, int *)); +int accept __P((int, struct sockaddr *, socklen_t *)); +int bind __P((int, const struct sockaddr *, socklen_t)); +int connect __P((int, const struct sockaddr *, socklen_t)); +int getpeername __P((int, struct sockaddr *, socklen_t *)); +int getsockname __P((int, struct sockaddr *, socklen_t *)); int getsockopt __P((int, int, int, void *, int *)); int listen __P((int, int)); ssize_t recv __P((int, void *, size_t, int)); -ssize_t recvfrom __P((int, void *, size_t, int, struct sockaddr *, int *)); +ssize_t recvfrom __P((int, void *, size_t, int, struct sockaddr *, socklen_t *)); ssize_t recvmsg __P((int, struct msghdr *, int)); ssize_t send __P((int, const void *, size_t, int)); ssize_t sendto __P((int, const void *, - size_t, int, const struct sockaddr *, int)); + size_t, int, const struct sockaddr *, socklen_t)); ssize_t sendmsg __P((int, const struct msghdr *, int)); #if SENDFILE int sendfile __P((int, int, off_t, size_t, struct sf_hdtr *, off_t *, int)); #endif -int setsockopt __P((int, int, int, const void *, int)); +int setsockopt __P((int, int, int, const void *, socklen_t)); int shutdown __P((int, int)); int socket __P((int, int, int)); int socketpair __P((int, int, int, int *)); diff --git a/bsd/sys/socketvar.h b/bsd/sys/socketvar.h index e60037bba..33bd91dd3 100644 --- a/bsd/sys/socketvar.h +++ b/bsd/sys/socketvar.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -162,9 +162,9 @@ struct socket { #define SB_ASYNC 0x10 /* ASYNC I/O, need signals */ #define SB_UPCALL 0x20 /* someone wants an upcall */ #define SB_NOINTR 0x40 /* operations not interruptible */ +#define SB_KNOTE 0x100 /* kernel note attached */ #ifndef __APPLE__ #define SB_AIO 0x80 /* AIO operations queued */ -#define SB_KNOTE 0x100 /* kernel note attached */ #else #define SB_NOTIFY (SB_WAIT|SB_SEL|SB_ASYNC) #define SB_RECV 0x8000 /* this is rcv sb */ @@ -197,6 +197,7 @@ struct socket { struct kextcb *so_ext; /* NKE hook */ u_long so_flags; /* Flags */ #define SOF_NOSIGPIPE 0x00000001 +#define SOF_NOADDRAVAIL 0x00000002 /* returns EADDRNOTAVAIL if src address is gone */ void *reserved2; void *reserved3; void *reserved4; @@ -340,9 +341,7 @@ struct sockaddr; struct stat; struct ucred; struct uio; -#ifndef __APPLE struct knote; -#endif /* * File operations on sockets. @@ -356,6 +355,8 @@ int soo_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p)); int soo_stat __P((struct socket *so, struct stat *ub)); int soo_select __P((struct file *fp, int which, void * wql, struct proc *p)); +int soo_kqfilter __P((struct file *fp, struct knote *kn, struct proc *p)); + /* * From uipc_socket and friends diff --git a/bsd/sys/stat.h b/bsd/sys/stat.h index b92ffb717..5f878f212 100644 --- a/bsd/sys/stat.h +++ b/bsd/sys/stat.h @@ -165,17 +165,15 @@ struct stat { #define S_ISVTX 0001000 /* save swapped text even after use */ #endif -#define S_ISDIR(m) ((m & 0170000) == 0040000) /* directory */ -#define S_ISCHR(m) ((m & 0170000) == 0020000) /* char special */ -#define S_ISBLK(m) ((m & 0170000) == 0060000) /* block special */ -#define S_ISREG(m) ((m & 0170000) == 0100000) /* regular file */ -#define S_ISFIFO(m) ((m & 0170000) == 0010000 || \ - (m & 0170000) == 0140000) /* fifo or socket */ +#define S_ISDIR(m) (((m) & 0170000) == 0040000) /* directory */ +#define S_ISCHR(m) (((m) & 0170000) == 0020000) /* char special */ +#define S_ISBLK(m) (((m) & 0170000) == 0060000) /* block special */ +#define S_ISREG(m) (((m) & 0170000) == 0100000) /* regular file */ +#define S_ISFIFO(m) (((m) & 0170000) == 0010000) /* fifo or socket */ #ifndef _POSIX_SOURCE -#define S_ISLNK(m) ((m & 0170000) == 0120000) /* symbolic link */ -#define S_ISSOCK(m) ((m & 0170000) == 0010000 || \ - (m & 0170000) == 0140000) /* fifo or socket */ -#define S_ISWHT(m) ((m & 0170000) == 0160000) /* whiteout */ +#define S_ISLNK(m) (((m) & 0170000) == 0120000) /* symbolic link */ +#define S_ISSOCK(m) (((m) & 0170000) == 0140000) /* socket */ +#define S_ISWHT(m) (((m) & 0170000) == 0160000) /* whiteout */ #endif #ifndef _POSIX_SOURCE diff --git a/bsd/sys/syscall.h b/bsd/sys/syscall.h index 174ab95ec..90158109a 100644 --- a/bsd/sys/syscall.h +++ b/bsd/sys/syscall.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -139,7 +139,9 @@ #define SYS_getpriority 100 /* 101 is old send */ /* 102 is old recv */ +#ifndef __ppc__ #define SYS_sigreturn 103 +#endif #define SYS_bind 104 #define SYS_setsockopt 105 #define SYS_listen 106 @@ -220,7 +222,9 @@ #define SYS_setgid 181 #define SYS_setegid 182 #define SYS_seteuid 183 - /* 184 is unused */ +#ifdef __ppc__ +#define SYS_sigreturn 184 +#endif /* 185 is unused */ /* 186 is unused */ /* 187 is unused */ @@ -251,7 +255,9 @@ #define SYS_ATPsndrsp 210 #define SYS_ATPgetreq 211 #define SYS_ATPgetrsp 212 - /* 213-215 are reserved for AppleTalk */ + /* 213 is reserved for AppleTalk */ +#define SYS_kqueue_from_portset_np 214 +#define SYS_kqueue_portset_np 215 #define SYS_mkcomplex 216 #define SYS_statv 217 #define SYS_lstatv 218 @@ -264,7 +270,12 @@ #define SYS_searchfs 225 /* 226 - 230 are reserved for HFS expansion */ - /* 231 - 249 are reserved */ + /* 231 - 241 are reserved */ +#define SYS_fsctl 242 + /* 243 - 246 are reserved */ +#define SYS_nfsclnt 247 /* from freebsd, for lockd */ +#define SYS_fhopen 248 /* from freebsd, for lockd */ + /* 249 is reserved */ #define SYS_minherit 250 #define SYS_semsys 251 #define SYS_msgsys 252 @@ -298,7 +309,16 @@ #define SYS_new_system_shared_regions 298 /* 299 - 309 are reserved */ #define SYS_getsid 310 - /* 311 - 323 are reserved */ + /* 311 - 312 are reserved */ +#define SYS_aio_fsync 313 +#define SYS_aio_return 314 +#define SYS_aio_suspend 315 +#define SYS_aio_cancel 316 +#define SYS_aio_error 317 +#define SYS_aio_read 318 +#define SYS_aio_write 319 +#define SYS_lio_listio 320 + /* 321 - 323 are reserved */ #define SYS_mlockall 324 #define SYS_munlockall 325 /* 326 is reserved */ @@ -306,5 +326,19 @@ #define SYS___pthread_kill 328 #define SYS_pthread_sigmask 329 #define SYS_sigwait 330 + +#define SYS_audit 350 /* submit user space audit records */ +#define SYS_auditon 351 /* audit subsystem control */ +#define SYS_auditsvc 352 /* audit file control */ +#define SYS_getauid 353 +#define SYS_setauid 354 +#define SYS_getaudit 355 +#define SYS_setaudit 356 +#define SYS_getaudit_addr 357 +#define SYS_setaudit_addr 358 +#define SYS_auditctl 359 /* audit control */ + +#define SYS_kqueue 362 +#define SYS_kevent 363 #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/sys/sysctl.h b/bsd/sys/sysctl.h index 8af90d6ed..4f18cd64a 100644 --- a/bsd/sys/sysctl.h +++ b/bsd/sys/sysctl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -212,6 +212,11 @@ void sysctl_unregister_oid(struct sysctl_oid *oidp); SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ ptr, val, sysctl_handle_int, "I", descr) +/* Oid for an unsigned int. If ptr is NULL, val is returned. */ +#define SYSCTL_UINT(parent, nbr, name, access, ptr, val, descr) \ + SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ + ptr, val, sysctl_handle_int, "IU", descr) + /* Oid for a long. The pointer must be non NULL. */ #define SYSCTL_LONG(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ @@ -298,7 +303,7 @@ void sysctl_unregister_oid(struct sysctl_oid *oidp); #define KERN_NISDOMAINNAME 22 /* string: YP domain name */ #define KERN_DOMAINNAME KERN_NISDOMAINNAME #define KERN_MAXPARTITIONS 23 /* int: number of partitions/disk */ -#define KERN_KDEBUG 24 /* int: kernel trace points */ +#define KERN_KDEBUG 24 /* int: kernel trace points */ #define KERN_UPDATEINTERVAL 25 /* int: update process sleep time */ #define KERN_OSRELDATE 26 /* int: OS release date */ #define KERN_NTP_PLL 27 /* node: NTP PLL control */ @@ -306,8 +311,8 @@ void sysctl_unregister_oid(struct sysctl_oid *oidp); #define KERN_MAXFILESPERPROC 29 /* int: max open files per proc */ #define KERN_MAXPROCPERUID 30 /* int: max processes per uid */ #define KERN_DUMPDEV 31 /* dev_t: device to dump on */ -#define KERN_IPC 32 /* node: anything related to IPC */ -#define KERN_DUMMY 33 /* unused */ +#define KERN_IPC 32 /* node: anything related to IPC */ +#define KERN_DUMMY 33 /* unused */ #define KERN_PS_STRINGS 34 /* int: address of PS_STRINGS */ #define KERN_USRSTACK 35 /* int: address of USRSTACK */ #define KERN_LOGSIGEXIT 36 /* int: do we log sigexit procs? */ @@ -316,8 +321,17 @@ void sysctl_unregister_oid(struct sysctl_oid *oidp); #define KERN_PCSAMPLES 39 /* node: pc sampling */ #define KERN_NETBOOT 40 /* int: are we netbooted? 1=yes,0=no */ #define KERN_PANICINFO 41 /* node: panic UI information */ -#define KERN_SYSV 42 /* node: panic UI information */ -#define KERN_MAXID 43 /* number of valid kern ids */ +#define KERN_SYSV 42 /* node: panic UI information */ +#define KERN_AFFINITY 43 /* xxx */ +#define KERN_CLASSIC 44 /* xxx */ +#define KERN_CLASSICHANDLER 45 /* xxx */ +#define KERN_AIOMAX 46 /* int: max aio requests */ +#define KERN_AIOPROCMAX 47 /* int: max aio requests per process */ +#define KERN_AIOTHREADS 48 /* int: max aio worker threads */ +#ifdef __APPLE_API_UNSTABLE +#define KERN_PROCARGS2 49 /* number of valid kern ids */ +#endif /* __APPLE_API_UNSTABLE */ +#define KERN_MAXID 50 /* number of valid kern ids */ /* KERN_KDEBUG types */ @@ -331,7 +345,7 @@ void sysctl_unregister_oid(struct sysctl_oid *oidp); #define KERN_KDSETREG 8 #define KERN_KDGETREG 9 #define KERN_KDREADTR 10 -#define KERN_KDPIDTR 11 +#define KERN_KDPIDTR 11 #define KERN_KDTHRMAP 12 /* Don't use 13 as it is overloaded with KERN_VNODE */ #define KERN_KDPIDEX 14 @@ -361,6 +375,11 @@ void sysctl_unregister_oid(struct sysctl_oid *oidp); #define KSYSV_SHMMNI 3 /* int: max number of shared memory identifiers */ #define KSYSV_SHMSEG 4 /* int: max shared memory segments per process */ #define KSYSV_SHMALL 5 /* int: max amount of shared memory (pages) */ +#define KSYSV_SEMMNI 6 /* int: max num of semaphore identifiers */ +#define KSYSV_SEMMNS 7 /* int: max num of semaphores in system */ +#define KSYSV_SEMMNU 8 /* int: max num of undo structures in system */ +#define KSYSV_SEMMSL 9 /* int: max num of semaphores per id */ +#define KSYSV_SEMUNE 10 /* int: max num of undo entries per process */ #define CTL_KERN_NAMES { \ @@ -406,7 +425,14 @@ void sysctl_unregister_oid(struct sysctl_oid *oidp); { "pcsamples",CTLTYPE_STRUCT },\ { "netboot", CTLTYPE_INT }, \ { "panicinfo", CTLTYPE_NODE }, \ - { "sysv", CTLTYPE_NODE } \ + { "sysv", CTLTYPE_NODE }, \ + { "dummy", CTLTYPE_INT }, \ + { "dummy", CTLTYPE_INT }, \ + { "dummy", CTLTYPE_INT }, \ + { "aiomax", CTLTYPE_INT }, \ + { "aioprocmax", CTLTYPE_INT }, \ + { "aiothreads", CTLTYPE_INT }, \ + { "procargs2",CTLTYPE_STRUCT } \ } /* @@ -749,7 +775,6 @@ int sysctl_rdquad __P((void *, size_t *, void *, quad_t)); int sysctl_string __P((void *, size_t *, void *, size_t, char *, int)); int sysctl_rdstring __P((void *, size_t *, void *, char *)); int sysctl_rdstruct __P((void *, size_t *, void *, void *, int)); -void fill_eproc __P((struct proc *, struct eproc *)); #endif /* __APPLE_API_UNSTABLE */ #else /* !KERNEL */ @@ -758,6 +783,7 @@ void fill_eproc __P((struct proc *, struct eproc *)); __BEGIN_DECLS int sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); int sysctlbyname __P((const char *, void *, size_t *, void *, size_t)); +int sysctlnametomib __P((const char *, int *, size_t *)); __END_DECLS #endif /* KERNEL */ #endif /* !_SYS_SYSCTL_H_ */ diff --git a/bsd/sys/syslimits.h b/bsd/sys/syslimits.h index 54f538e54..607df163d 100644 --- a/bsd/sys/syslimits.h +++ b/bsd/sys/syslimits.h @@ -63,7 +63,11 @@ #define _SYS_SYSLIMITS_H_ #if !defined(_ANSI_SOURCE) -#define ARG_MAX (64 * 1024) /* max bytes for an exec function */ +/* + * Note: CHILD_MAX *must* be less than hard_maxproc, which is set at + * compile time; you *cannot* set it higher than the hard limit!! + */ +#define ARG_MAX (256 * 1024) /* max bytes for an exec function */ #define CHILD_MAX 100 /* max simultaneous processes */ #define GID_MAX 2147483647U /* max value for a gid_t (2^31-2) */ #define LINK_MAX 32767 /* max file link count */ diff --git a/bsd/sys/syslog.h b/bsd/sys/syslog.h index 1f8032e3a..0a1901d1d 100644 --- a/bsd/sys/syslog.h +++ b/bsd/sys/syslog.h @@ -129,6 +129,7 @@ CODE prioritynames[] = { #define LOG_FTP (11<<3) /* ftp daemon */ #define LOG_NETINFO (12<<3) /* NetInfo */ #define LOG_REMOTEAUTH (13<<3) /* remote authentication/authorization */ +#define LOG_INSTALL (14<<3) /* installer subsystem */ /* other codes through 15 reserved for system use */ #define LOG_LOCAL0 (16<<3) /* reserved for local use */ @@ -152,6 +153,7 @@ CODE facilitynames[] = { "cron", LOG_CRON, "daemon", LOG_DAEMON, "ftp", LOG_FTP, + "install", LOG_INSTALL, "kern", LOG_KERN, "lpr", LOG_LPR, "mail", LOG_MAIL, diff --git a/bsd/sys/time.h b/bsd/sys/time.h index cc790bf23..fcc2f07d8 100644 --- a/bsd/sys/time.h +++ b/bsd/sys/time.h @@ -133,6 +133,8 @@ struct timezone { } \ } while (0) +#define timevalcmp(l, r, cmp) timercmp(l, r, cmp) /* freebsd */ + /* * Names of the interval timers, and structure * defining a timer setting. diff --git a/bsd/sys/types.h b/bsd/sys/types.h index 44328bf2b..50c8ec276 100644 --- a/bsd/sys/types.h +++ b/bsd/sys/types.h @@ -107,6 +107,7 @@ typedef quad_t rlim_t; /* resource limit */ typedef int32_t segsz_t; /* segment size */ typedef int32_t swblk_t; /* swap offset */ typedef u_int32_t uid_t; /* user id */ +typedef u_int32_t useconds_t; /* microseconds (unsigned) */ #ifndef _POSIX_SOURCE /* Major, minor numbers, dev_t's. */ diff --git a/bsd/sys/ubc.h b/bsd/sys/ubc.h index aafad6f45..eed260077 100644 --- a/bsd/sys/ubc.h +++ b/bsd/sys/ubc.h @@ -112,7 +112,7 @@ int ubc_release_named __P((struct vnode *)); int ubc_invalidate __P((struct vnode *, off_t, size_t)); int ubc_isinuse __P((struct vnode *, int)); -int ubc_page_op __P((struct vnode *, off_t, int, vm_offset_t *, int *)); +int ubc_page_op __P((struct vnode *, off_t, int, ppnum_t *, int *)); /* cluster IO routines */ int cluster_read __P((struct vnode *, struct uio *, off_t, int, int)); @@ -120,11 +120,14 @@ int advisory_read __P((struct vnode *, off_t, off_t, int, int)); int cluster_write __P((struct vnode *, struct uio*, off_t, off_t, off_t, off_t, int, int)); int cluster_push __P((struct vnode *)); +int cluster_release __P((struct vnode *)); int cluster_pageout __P((struct vnode *, upl_t, vm_offset_t, off_t, int, off_t, int, int)); int cluster_pagein __P((struct vnode *, upl_t, vm_offset_t, off_t, int, off_t, int, int)); int cluster_bp __P((struct buf *)); +int cluster_copy_upl_data __P((struct uio *, upl_t, int, int)); +int cluster_copy_ubc_data __P((struct vnode *, struct uio *, int *, int)); /* UPL routines */ int ubc_create_upl __P((struct vnode *, off_t, long, upl_t *, @@ -164,6 +167,7 @@ __END_DECLS /* Flags for ubc_getobject() */ #define UBC_FLAGS_NONE 0x0000 #define UBC_HOLDOBJECT 0x0001 +#define UBC_FOR_PAGEOUT 0x0002 #endif /* __APPLE_API_EVOLVING */ diff --git a/bsd/sys/ucontext.h b/bsd/sys/ucontext.h index 114c476e5..993d22487 100644 --- a/bsd/sys/ucontext.h +++ b/bsd/sys/ucontext.h @@ -37,6 +37,18 @@ struct ucontext { mcontext_t uc_mcontext; /* machine specific context */ }; + typedef struct ucontext ucontext_t; +struct ucontext64 { + int uc_onstack; + sigset_t uc_sigmask; /* signal mask used by this context */ + stack_t uc_stack; /* stack used by this context */ + struct ucontext *uc_link; /* pointer to resuming context */ + size_t uc_mcsize; /* size of the machine context passed in */ + mcontext64_t uc_mcontext64; /* machine specific context */ +}; + +typedef struct ucontext64 ucontext64_t; + #endif /* _SYS_UCONTEXT_H_ */ diff --git a/bsd/sys/ucred.h b/bsd/sys/ucred.h index 23e739a7f..9f044224e 100644 --- a/bsd/sys/ucred.h +++ b/bsd/sys/ucred.h @@ -74,6 +74,17 @@ struct ucred { short cr_ngroups; /* number of groups */ gid_t cr_groups[NGROUPS]; /* groups */ }; +/* + * This is the external representation of struct ucred. + */ +struct xucred { + u_int cr_version; /* structure layout version */ + uid_t cr_uid; /* effective user id */ + short cr_ngroups; /* number of groups */ + gid_t cr_groups[NGROUPS]; /* groups */ +}; +#define XUCRED_VERSION 0 + #define cr_gid cr_groups[0] #define NOCRED ((struct ucred *)0) /* no credential available */ #define FSCRED ((struct ucred *)-1) /* filesystem credential */ @@ -89,7 +100,9 @@ struct ucred *crcopy __P((struct ucred *cr)); struct ucred *crdup __P((struct ucred *cr)); void crfree __P((struct ucred *cr)); struct ucred *crget __P((void)); +int crcmp __P((struct ucred *cr1, struct ucred *cr2)); int suser __P((struct ucred *cred, u_short *acflag)); +void cru2x __P((struct ucred *cr, struct xucred *xcr)); #endif /* KERNEL */ #endif /* __APPLE_API_UNSTABLE */ diff --git a/bsd/sys/uio.h b/bsd/sys/uio.h index 11b8e75a6..a2bf7882c 100644 --- a/bsd/sys/uio.h +++ b/bsd/sys/uio.h @@ -74,10 +74,11 @@ enum uio_rw { UIO_READ, UIO_WRITE }; /* Segment flag values. */ enum uio_seg { - UIO_USERSPACE, /* from user data space */ - UIO_USERISPACE, /* from user I space */ - UIO_SYSSPACE, /* from system space */ - UIO_PHYS_USERSPACE /* kernel address is physical, to/from user data space */ + UIO_USERSPACE, /* kernel address is virtual, to/from user virtual */ + UIO_USERISPACE, /* kernel address is virtual, to/from user virtual */ + UIO_SYSSPACE, /* kernel address is virtual, to/from system virtual */ + UIO_PHYS_USERSPACE, /* kernel address is physical, to/from user virtual */ + UIO_PHYS_SYSSPACE, /* kernel address is physical, to/from system virtual */ }; #ifdef KERNEL @@ -98,6 +99,7 @@ struct uio { #define UIO_SMALLIOV 8 /* 8 on stack, else malloc */ extern int uiomove __P((caddr_t cp, int n, struct uio *uio)); +extern int uiomove64 __P((unsigned long long cp, int n, struct uio *uio)); extern int ureadc __P((int c, struct uio *uio)); extern int uwritec __P((struct uio *uio)); diff --git a/bsd/sys/unistd.h b/bsd/sys/unistd.h index 00199aa57..ed404148d 100644 --- a/bsd/sys/unistd.h +++ b/bsd/sys/unistd.h @@ -131,36 +131,10 @@ #define _PC_CASE_PRESERVING 12 #endif -/* configurable system variables */ -#define _SC_ARG_MAX 1 -#define _SC_CHILD_MAX 2 -#define _SC_CLK_TCK 3 -#define _SC_NGROUPS_MAX 4 -#define _SC_OPEN_MAX 5 -#define _SC_JOB_CONTROL 6 -#define _SC_SAVED_IDS 7 -#define _SC_VERSION 8 -#define _SC_BC_BASE_MAX 9 -#define _SC_BC_DIM_MAX 10 -#define _SC_BC_SCALE_MAX 11 -#define _SC_BC_STRING_MAX 12 -#define _SC_COLL_WEIGHTS_MAX 13 -#define _SC_EXPR_NEST_MAX 14 -#define _SC_LINE_MAX 15 -#define _SC_RE_DUP_MAX 16 -#define _SC_2_VERSION 17 -#define _SC_2_C_BIND 18 -#define _SC_2_C_DEV 19 -#define _SC_2_CHAR_TERM 20 -#define _SC_2_FORT_DEV 21 -#define _SC_2_FORT_RUN 22 -#define _SC_2_LOCALEDEF 23 -#define _SC_2_SW_DEV 24 -#define _SC_2_UPE 25 -#define _SC_STREAM_MAX 26 -#define _SC_TZNAME_MAX 27 - /* configurable system strings */ #define _CS_PATH 1 +/* async IO support */ +#define _POSIX_ASYNCHRONOUS_IO + #endif /* !_SYS_UNISTD_H_ */ diff --git a/bsd/sys/user.h b/bsd/sys/user.h index 099080040..8ab23ffc6 100644 --- a/bsd/sys/user.h +++ b/bsd/sys/user.h @@ -78,6 +78,7 @@ #ifdef KERNEL #ifdef __APPLE_API_PRIVATE +struct nlminfo; /* * Per-thread U area. */ @@ -92,7 +93,7 @@ struct uthread { /* thread exception handling */ int uu_code; /* ``code'' to trap */ char uu_cursig; /* p_cursig for exc. */ - int XXX_dummy; /* NOT USED LEFT FOR COMPATIBILITY. */ + struct nlminfo *uu_nlminfo; /* for rpc.lockd */ /* support for syscalls which use continuations */ union { struct _select { @@ -134,6 +135,9 @@ struct uthread { sigset_t uu_vforkmask; /* saved signal mask during vfork */ TAILQ_ENTRY(uthread) uu_list; /* List of uthreads in proc */ + + struct kaudit_record *uu_ar; /* audit record */ + struct task* uu_aio_task; /* target task for async io */ }; typedef struct uthread * uthread_t; diff --git a/bsd/sys/utfconv.h b/bsd/sys/utfconv.h index 9c0a4af89..57f7725c4 100644 --- a/bsd/sys/utfconv.h +++ b/bsd/sys/utfconv.h @@ -27,6 +27,7 @@ #define _SYS_UTFCONV_H_ #include +#include #ifdef KERNEL #ifdef __APPLE_API_UNSTABLE diff --git a/bsd/sys/vnioctl.h b/bsd/sys/vnioctl.h index 8c37314c7..e3a3729a6 100644 --- a/bsd/sys/vnioctl.h +++ b/bsd/sys/vnioctl.h @@ -57,7 +57,7 @@ #define _PATH_VNTAB "/etc/vntab" /* default config file */ typedef enum { - vncontrol_readwrite_io_e = 0, + vncontrol_readwrite_io_e = 0 } vncontrol_t; struct vn_ioctl { diff --git a/bsd/sys/vnode.h b/bsd/sys/vnode.h index 0ddf130b7..a5ad2a329 100644 --- a/bsd/sys/vnode.h +++ b/bsd/sys/vnode.h @@ -143,11 +143,12 @@ struct vnode { struct socket *vu_socket; /* unix ipc (VSOCK) */ struct specinfo *vu_specinfo; /* device (VCHR, VBLK) */ struct fifoinfo *vu_fifoinfo; /* fifo (VFIFO) */ + char *vu_name; /* name (only for VREG) */ } v_un; struct ubc_info *v_ubcinfo; /* valid for (VREG) */ struct nqlease *v_lease; /* Soft reference to lease */ - daddr_t v_lastw; /* last write (write cluster) */ - daddr_t v_cstart; /* start block of cluster */ + void *v_scmap; /* pointer to sparse cluster map */ + int v_scdirty; /* number of dirty pages in the sparse cluster map */ daddr_t v_ciosiz; /* real size of I/O for cluster */ int v_clen; /* length of current cluster */ int v_ralen; /* Read-ahead length */ @@ -169,6 +170,11 @@ struct vnode { #define v_specinfo v_un.vu_specinfo #define v_fifoinfo v_un.vu_fifoinfo +// NOTE: Do not use these macros. They are for vfs internal use only. +#define VNAME(vp) ((char *)((vp)->v_type == VREG ? (vp)->v_un.vu_name : (vp)->v_scmap)) +#define VPARENT(vp) ((struct vnode *)((vp)->v_type == VREG ? (vp)->v_un1.v_cl.v_pad : (vp)->v_scdirty)) + + /* * Vnode flags. */ @@ -198,7 +204,9 @@ struct vnode { #define VTHROTTLED 0x400000 /* writes or pageouts have been throttled */ /* wakeup tasks waiting when count falls below threshold */ #define VNOFLUSH 0x800000 /* don't vflush() if SKIPSYSTEM */ - +#define VDELETED 0x1000000 /* this vnode is being deleted */ +#define VFULLFSYNC 0x2000000 /* ask the drive to write the data to the media */ +#define VHASBEENPAGED 0x4000000 /* vnode has been recently paged to */ /* * Vnode attributes. A field value of VNOVAL represents a field whose value @@ -505,6 +513,10 @@ int vn_close __P((struct vnode *vp, int flags, struct ucred *cred, struct proc *p)); int vn_lock __P((struct vnode *vp, int flags, struct proc *p)); int vn_open __P((struct nameidata *ndp, int fmode, int cmode)); +#ifndef __APPLE_API_PRIVATE +__private_extern__ int + vn_open_modflags __P((struct nameidata *ndp, int *fmode, int cmode)); +#endif /* __APPLE_API_PRIVATE */ int vn_rdwr __P((enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset, enum uio_seg segflg, int ioflg, struct ucred *cred, int *aresid, struct proc *p)); @@ -520,7 +532,7 @@ void vrele __P((struct vnode *vp)); int vaccess __P((mode_t file_mode, uid_t uid, gid_t gid, mode_t acc_mode, struct ucred *cred)); int getvnode __P((struct proc *p, int fd, struct file **fpp)); -#endif __APPLE_API_EVOLVING +#endif /* __APPLE_API_EVOLVING */ #endif /* KERNEL */ diff --git a/bsd/sys/vnode_if.h b/bsd/sys/vnode_if.h index 4231e640a..fa07ece18 100644 --- a/bsd/sys/vnode_if.h +++ b/bsd/sys/vnode_if.h @@ -482,6 +482,42 @@ static __inline int _VOP_EXCHANGE(struct vnode *fvp, struct vnode *tvp, struct u return (VCALL(fvp, VOFFSET(vop_exchange), &a)); } +struct vop_kqfilt_add_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct knote *a_kn; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_kqfilt_add_desc; +#define VOP_KQFILT_ADD(vp, kn, p) _VOP_KQFILT_ADD(vp, kn, p) +static __inline int _VOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, struct proc *p) +{ + struct vop_kqfilt_add_args a; + a.a_desc = VDESC(vop_kqfilt_add); + a.a_vp = vp; + a.a_kn = kn; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_kqfilt_add), &a)); +} + +struct vop_kqfilt_remove_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + uintptr_t a_ident; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_kqfilt_remove_desc; +#define VOP_KQFILT_REMOVE(vp, ident, p) _VOP_KQFILT_REMOVE(vp, ident, p) +static __inline int _VOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, struct proc *p) +{ + struct vop_kqfilt_remove_args a; + a.a_desc = VDESC(vop_kqfilt_remove); + a.a_vp = vp; + a.a_ident = ident; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_kqfilt_remove), &a)); +} + struct vop_revoke_args { struct vnodeop_desc *a_desc; struct vnode *a_vp; diff --git a/bsd/ufs/ffs/ffs_alloc.c b/bsd/ufs/ffs/ffs_alloc.c index 7671afb01..6b4eb93c2 100644 --- a/bsd/ufs/ffs/ffs_alloc.c +++ b/bsd/ufs/ffs/ffs_alloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -89,7 +89,7 @@ static ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int)); static ufs_daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, ufs_daddr_t)); static ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t, int)); -static ino_t ffs_dirpref __P((struct fs *)); +static ino_t ffs_dirpref __P((struct inode *)); static ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); static void ffs_fserr __P((struct fs *, u_int, char *)); static u_long ffs_hashalloc @@ -243,7 +243,7 @@ ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) ip->i_flag |= IN_CHANGE | IN_UPDATE; allocbuf(bp, nsize); bp->b_flags |= B_DONE; - bzero((char *)bp->b_data + osize, (u_int)nsize - osize); + bzero((char *)bp->b_data + osize, (u_int)bp->b_bufsize - osize); *bpp = bp; return (0); } @@ -307,7 +307,7 @@ ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) ip->i_flag |= IN_CHANGE | IN_UPDATE; allocbuf(bp, nsize); bp->b_flags |= B_DONE; - bzero((char *)bp->b_data + osize, (u_int)nsize - osize); + bzero((char *)bp->b_data + osize, (u_int)bp->b_bufsize - osize); *bpp = bp; return (0); } @@ -392,16 +392,27 @@ ffs_valloc(ap) goto noinodes; if ((mode & IFMT) == IFDIR) - ipref = ffs_dirpref(fs); + ipref = ffs_dirpref(pip); else ipref = pip->i_number; if (ipref >= fs->fs_ncg * fs->fs_ipg) ipref = 0; cg = ino_to_cg(fs, ipref); + /* + * Track the number of dirs created one after another + * in a cg without intervening files. + */ + if ((mode & IFMT) == IFDIR) { + if (fs->fs_contigdirs[cg] < 255) + fs->fs_contigdirs[cg]++; + } else { + if (fs->fs_contigdirs[cg] > 0) + fs->fs_contigdirs[cg]--; + } ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); if (ino == 0) goto noinodes; - error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp); + error = VFS_VGET(pvp->v_mount, (void *)ino, ap->a_vpp); if (error) { VOP_VFREE(pvp, ino, mode); return (error); @@ -432,28 +443,112 @@ noinodes: } /* - * Find a cylinder to place a directory. + * Find a cylinder group to place a directory. * - * The policy implemented by this algorithm is to select from - * among those cylinder groups with above the average number of - * free inodes, the one with the smallest number of directories. + * The policy implemented by this algorithm is to allocate a + * directory inode in the same cylinder group as its parent + * directory, but also to reserve space for its files inodes + * and data. Restrict the number of directories which may be + * allocated one after another in the same cylinder group + * without intervening allocation of files. */ static ino_t -ffs_dirpref(fs) - register struct fs *fs; +ffs_dirpref(pip) + struct inode *pip; { - int cg, minndir, mincg, avgifree; + register struct fs *fs; + int cg, prefcg, dirsize, cgsize; + int avgifree, avgbfree, avgndir, curdirsize; + int minifree, minbfree, maxndir; + int mincg, minndir; + int maxcontigdirs; + fs = pip->i_fs; avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; - minndir = fs->fs_ipg; - mincg = 0; - for (cg = 0; cg < fs->fs_ncg; cg++) - if (fs->fs_cs(fs, cg).cs_ndir < minndir && - fs->fs_cs(fs, cg).cs_nifree >= avgifree) { - mincg = cg; - minndir = fs->fs_cs(fs, cg).cs_ndir; + avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; + avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; + + /* + * Force allocation in another cg if creating a first level dir. + */ + if (ITOV(pip)->v_flag & VROOT) { +#ifdef __APPLE__ + prefcg = random() % fs->fs_ncg; +#else + prefcg = arc4random() % fs->fs_ncg; +#endif + mincg = prefcg; + minndir = fs->fs_ipg; + for (cg = prefcg; cg < fs->fs_ncg; cg++) + if (fs->fs_cs(fs, cg).cs_ndir < minndir && + fs->fs_cs(fs, cg).cs_nifree >= avgifree && + fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { + mincg = cg; + minndir = fs->fs_cs(fs, cg).cs_ndir; + } + for (cg = 0; cg < prefcg; cg++) + if (fs->fs_cs(fs, cg).cs_ndir < minndir && + fs->fs_cs(fs, cg).cs_nifree >= avgifree && + fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { + mincg = cg; + minndir = fs->fs_cs(fs, cg).cs_ndir; + } + return ((ino_t)(fs->fs_ipg * mincg)); + } + + /* + * Count various limits which used for + * optimal allocation of a directory inode. + */ + maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); + minifree = avgifree - fs->fs_ipg / 4; + if (minifree < 0) + minifree = 0; + minbfree = avgbfree - fs->fs_fpg / fs->fs_frag / 4; + if (minbfree < 0) + minbfree = 0; + cgsize = fs->fs_fsize * fs->fs_fpg; + dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; + curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; + if (dirsize < curdirsize) + dirsize = curdirsize; + maxcontigdirs = min(cgsize / dirsize, 255); + if (fs->fs_avgfpdir > 0) + maxcontigdirs = min(maxcontigdirs, + fs->fs_ipg / fs->fs_avgfpdir); + if (maxcontigdirs == 0) + maxcontigdirs = 1; + + /* + * Limit number of dirs in one cg and reserve space for + * regular files, but only if we have no deficit in + * inodes or space. + */ + prefcg = ino_to_cg(fs, pip->i_number); + for (cg = prefcg; cg < fs->fs_ncg; cg++) + if (fs->fs_cs(fs, cg).cs_ndir < maxndir && + fs->fs_cs(fs, cg).cs_nifree >= minifree && + fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { + if (fs->fs_contigdirs[cg] < maxcontigdirs) + return ((ino_t)(fs->fs_ipg * cg)); + } + for (cg = 0; cg < prefcg; cg++) + if (fs->fs_cs(fs, cg).cs_ndir < maxndir && + fs->fs_cs(fs, cg).cs_nifree >= minifree && + fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { + if (fs->fs_contigdirs[cg] < maxcontigdirs) + return ((ino_t)(fs->fs_ipg * cg)); } - return ((ino_t)(fs->fs_ipg * mincg)); + /* + * This is a backstop when we have deficit in space. + */ + for (cg = prefcg; cg < fs->fs_ncg; cg++) + if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) + return ((ino_t)(fs->fs_ipg * cg)); + for (cg = 0; cg < prefcg; cg++) + if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) + break; + return ((ino_t)(fs->fs_ipg * cg)); } /* diff --git a/bsd/ufs/ffs/ffs_balloc.c b/bsd/ufs/ffs/ffs_balloc.c index bfd05cd9e..73fcc38e8 100644 --- a/bsd/ufs/ffs/ffs_balloc.c +++ b/bsd/ufs/ffs/ffs_balloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -160,10 +160,10 @@ ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc) ip->i_flag |= IN_CHANGE | IN_UPDATE; if ((flags & B_SYNC) || (!alloc_buffer)) { if (!alloc_buffer) - SET(bp->b_flags, B_INVAL); + SET(bp->b_flags, B_NOCACHE); bwrite(bp); } else - bawrite(bp); + bdwrite(bp); /* note that bp is already released here */ } } @@ -212,9 +212,12 @@ ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc) return (error); ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno); ip->i_flag |= IN_CHANGE | IN_UPDATE; - if(!alloc_buffer) { - SET(bp->b_flags, B_INVAL); - bwrite(bp); + if(!alloc_buffer) { + SET(bp->b_flags, B_NOCACHE); + if (flags & B_SYNC) + bwrite(bp); + else + bdwrite(bp); } else *bpp = bp; return (0); @@ -254,7 +257,7 @@ ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc) return(error); #if DIAGNOSTIC if (num < 1) - panic ("ffs_balloc: ufs_bmaparray returned indirect block\n"); + panic ("ffs_balloc: ufs_bmaparray returned indirect block"); #endif /* * Fetch the first indirect block allocating if necessary. @@ -274,11 +277,14 @@ ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc) bp->b_blkno = fsbtodb(fs, nb); clrbuf(bp); /* - * Write synchronously so that indirect blocks - * never point at garbage. + * Write synchronously conditional on mount flags. */ - if (error = bwrite(bp)) + if ((vp)->v_mount->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(bp); + } else if ((error = bwrite(bp)) != 0) { goto fail; + } allocib = &ip->i_ib[indirs[0].in_off]; *allocib = nb; ip->i_flag |= IN_CHANGE | IN_UPDATE; @@ -323,10 +329,12 @@ ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc) nbp->b_blkno = fsbtodb(fs, nb); clrbuf(nbp); /* - * Write synchronously so that indirect blocks - * never point at garbage. + * Write synchronously conditional on mount flags. */ - if (error = bwrite(nbp)) { + if ((vp)->v_mount->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(nbp); + } else if (error = bwrite(nbp)) { brelse(bp); goto fail; } @@ -469,7 +477,7 @@ ffs_blkalloc(ip, lbn, size, cred, flags) fs = ip->i_fs; if(size > fs->fs_bsize) - panic("ffs_blkalloc: too large for allocation\n"); + panic("ffs_blkalloc: too large for allocation"); /* * If the next write will extend the file into a new block, @@ -478,7 +486,7 @@ ffs_blkalloc(ip, lbn, size, cred, flags) */ nb = lblkno(fs, ip->i_size); if (nb < NDADDR && nb < lbn) { - panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d\n", ip->i_size, lbn); + panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d", ip->i_size, lbn); } /* * The first NDADDR blocks are direct blocks @@ -496,8 +504,7 @@ ffs_blkalloc(ip, lbn, size, cred, flags) osize = fragroundup(fs, blkoff(fs, ip->i_size)); nsize = fragroundup(fs, size); if (nsize > osize) { - panic("ffs_allocblk: trying to extend - a fragment \n"); + panic("ffs_allocblk: trying to extend a fragment"); } return(0); } else { @@ -523,7 +530,7 @@ ffs_blkalloc(ip, lbn, size, cred, flags) return(error); if(num == 0) { - panic("ffs_blkalloc: file with direct blocks only\n"); + panic("ffs_blkalloc: file with direct blocks only"); } /* @@ -544,11 +551,14 @@ ffs_blkalloc(ip, lbn, size, cred, flags) bp->b_blkno = fsbtodb(fs, nb); clrbuf(bp); /* - * Write synchronously so that indirect blocks - * never point at garbage. + * Write synchronously conditional on mount flags. */ - if (error = bwrite(bp)) + if ((vp)->v_mount->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(bp); + } else if (error = bwrite(bp)) { goto fail; + } allocib = &ip->i_ib[indirs[0].in_off]; *allocib = nb; ip->i_flag |= IN_CHANGE | IN_UPDATE; @@ -593,10 +603,12 @@ ffs_blkalloc(ip, lbn, size, cred, flags) nbp->b_blkno = fsbtodb(fs, nb); clrbuf(nbp); /* - * Write synchronously so that indirect blocks - * never point at garbage. + * Write synchronously conditional on mount flags. */ - if (error = bwrite(nbp)) { + if ((vp)->v_mount->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(nbp); + } else if (error = bwrite(nbp)) { brelse(bp); goto fail; } diff --git a/bsd/ufs/ffs/ffs_extern.h b/bsd/ufs/ffs/ffs_extern.h index 8ac5276f5..48d6be82e 100644 --- a/bsd/ufs/ffs/ffs_extern.h +++ b/bsd/ufs/ffs/ffs_extern.h @@ -133,7 +133,7 @@ int ffs_unmount __P((struct mount *, int, struct proc *)); int ffs_update __P((struct vop_update_args *)); int ffs_valloc __P((struct vop_valloc_args *)); int ffs_vfree __P((struct vop_vfree_args *)); -int ffs_vget __P((struct mount *, ino_t, struct vnode **)); +int ffs_vget __P((struct mount *, void *, struct vnode **)); int ffs_vptofh __P((struct vnode *, struct fid *)); int ffs_write __P((struct vop_write_args *)); int ffs_pagein __P((struct vop_pagein_args *)); diff --git a/bsd/ufs/ffs/ffs_inode.c b/bsd/ufs/ffs/ffs_inode.c index 0ffc112e2..88d47d5eb 100644 --- a/bsd/ufs/ffs/ffs_inode.c +++ b/bsd/ufs/ffs/ffs_inode.c @@ -537,9 +537,14 @@ ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); if (last == -1) bp->b_flags |= B_INVAL; - error = bwrite(bp); - if (error) - allerror = error; + if (last != -1 && (vp)->v_mount->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(bp); + } else { + error = bwrite(bp); + if (error) + allerror = error; + } bap = copy; /* diff --git a/bsd/ufs/ffs/ffs_vfsops.c b/bsd/ufs/ffs/ffs_vfsops.c index 6e58add47..e16aae89a 100644 --- a/bsd/ufs/ffs/ffs_vfsops.c +++ b/bsd/ufs/ffs/ffs_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -70,7 +70,7 @@ #include #include #include -#include +#include #include #include #include @@ -138,11 +138,18 @@ ffs_mountroot() /* Must set the MNT_ROOTFS flag before doing the actual mount */ mp->mnt_flag |= MNT_ROOTFS; + /* Set asynchronous flag by default */ + mp->mnt_flag |= MNT_ASYNC; + if (error = ffs_mountfs(rootvp, mp, p)) { mp->mnt_vfc->vfc_refcount--; + + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + FREE(mp->mnt_xinfo_ptr, M_TEMP); vfs_unbusy(mp, p); + vrele(rootvp); /* release the reference from bdevvp() */ - _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); return (error); } simple_lock(&mountlist_slock); @@ -301,12 +308,13 @@ ffs_mount(mp, path, data, ndp, p) } ump = VFSTOUFS(mp); fs = ump->um_fs; - (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); + (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, + (size_t *)&size); bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, MNAMELEN); (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, - &size); + (size_t *)&size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); (void)ffs_statfs(mp, &mp->mnt_stat, p); return (0); @@ -383,6 +391,7 @@ ffs_reload(mountp, cred, p) */ newfs->fs_csp = fs->fs_csp; newfs->fs_maxcluster = fs->fs_maxcluster; + newfs->fs_contigdirs = fs->fs_contigdirs; bcopy(newfs, fs, (u_int)fs->fs_sbsize); if (fs->fs_sbsize < SBSIZE) bp->b_flags |= B_INVAL; @@ -393,7 +402,7 @@ ffs_reload(mountp, cred, p) brelse(bp); mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; ffs_oldfscompat(fs); - maxfilesize = (u_int64_t)0x100000000; /* 4GB */ + maxfilesize = 0x100000000ULL; /* 4GB */ if (fs->fs_maxfilesize > maxfilesize) /* XXX */ fs->fs_maxfilesize = maxfilesize; /* XXX */ /* @@ -423,6 +432,7 @@ ffs_reload(mountp, cred, p) byte_swap_ints((int *)bp->b_data, size / sizeof(int)); } #endif /* REV_ENDIAN_FS */ + space = (char *) space + size; brelse(bp); } /* @@ -602,14 +612,14 @@ ffs_mountfs(devvp, mp, p) dbsize = fs->fs_fsize / NSPF(fs); if(dbsize <= 0 ) { kprintf("device blocksize computaion failed\n"); - } else { - if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, &dbsize, FWRITE, NOCRED, - p) != 0) { + } else { + if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&dbsize, + FWRITE, NOCRED, p) != 0) { kprintf("failed to set device blocksize\n"); - } + } /* force the specfs to reread blocksize from size() */ set_fsblocksize(devvp); - } + } /* cache the IO attributes */ error = vfs_init_io_attributes(devvp, mp); @@ -690,6 +700,7 @@ ffs_mountfs(devvp, mp, p) blks = howmany(size, fs->fs_fsize); if (fs->fs_contigsumsize > 0) size += fs->fs_ncg * sizeof(int32_t); + size += fs->fs_ncg * sizeof(u_int8_t); space = _MALLOC((u_long)size, M_UFSMNT, M_WAITOK); fs->fs_csp = space; for (i = 0; i < blks; i += fs->fs_frag) { @@ -714,11 +725,22 @@ ffs_mountfs(devvp, mp, p) fs->fs_maxcluster = lp = space; for (i = 0; i < fs->fs_ncg; i++) *lp++ = fs->fs_contigsumsize; + space = lp; } + size = fs->fs_ncg * sizeof(u_int8_t); + fs->fs_contigdirs = (u_int8_t *)space; + space = (u_int8_t *)space + size; + bzero(fs->fs_contigdirs, size); + /* XXX Compatibility for old filesystems */ + if (fs->fs_avgfilesize <= 0) + fs->fs_avgfilesize = AVFILESIZ; + if (fs->fs_avgfpdir <= 0) + fs->fs_avgfpdir = AFPDIR; + /* XXX End of compatibility */ mp->mnt_data = (qaddr_t)ump; mp->mnt_stat.f_fsid.val[0] = (long)dev; mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; -#warning hardcoded max symlen and not "mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;" + /* XXX warning hardcoded max symlen and not "mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;" */ mp->mnt_maxsymlinklen = 60; #if REV_ENDIAN_FS if (rev_endian) @@ -735,7 +757,7 @@ ffs_mountfs(devvp, mp, p) devvp->v_specflags |= SI_MOUNTEDON; ffs_oldfscompat(fs); ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */ - maxfilesize = (u_int64_t)0x100000000; /* 4GB */ + maxfilesize = 0x100000000ULL; /* 4GB */ #if 0 maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */ #endif /* 0 */ @@ -853,10 +875,46 @@ ffs_flushfiles(mp, flags, p) int i, error; ump = VFSTOUFS(mp); + #if QUOTA + /* + * NOTE: The open quota files have an indirect reference + * on the root directory vnode. We must account for this + * extra reference when doing the intial vflush. + */ if (mp->mnt_flag & MNT_QUOTA) { - if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) + struct vnode *rootvp = NULLVP; + int quotafilecnt = 0; + + /* Find out how many quota files we have open. */ + for (i = 0; i < MAXQUOTAS; i++) { + if (ump->um_qfiles[i].qf_vp != NULLVP) + ++quotafilecnt; + } + + /* + * Check if the root vnode is in our inode hash + * (so we can skip over it). + */ + rootvp = ufs_ihashget(ump->um_dev, ROOTINO); + + error = vflush(mp, rootvp, SKIPSYSTEM|flags); + + if (rootvp) { + /* + * See if there are additional references on the + * root vp besides the ones obtained from the open + * quota files and the hfs_chashget call above. + */ + if ((error == 0) && + (rootvp->v_usecount > (1 + quotafilecnt))) { + error = EBUSY; /* root dir is still open */ + } + vput(rootvp); + } + if (error && (flags & FORCECLOSE) == 0) return (error); + for (i = 0; i < MAXQUOTAS; i++) { if (ump->um_qfiles[i].qf_vp == NULLVP) continue; @@ -951,6 +1009,14 @@ loop: simple_lock(&vp->v_interlock); nvp = vp->v_mntvnodes.le_next; ip = VTOI(vp); + + // restart our whole search if this guy is locked + // or being reclaimed. + if (ip == NULL || vp->v_flag & (VXLOCK|VORECLAIM)) { + simple_unlock(&vp->v_interlock); + continue; + } + if ((vp->v_type == VNON) || ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && vp->v_dirtyblkhd.lh_first == NULL && !(vp->v_flag & VHASDIRTY))) { @@ -1002,9 +1068,9 @@ loop: * done by the calling routine. */ int -ffs_vget(mp, ino, vpp) +ffs_vget(mp, inop, vpp) struct mount *mp; - ino_t ino; + void *inop; struct vnode **vpp; { struct proc *p = current_proc(); /* XXX */ @@ -1013,9 +1079,11 @@ ffs_vget(mp, ino, vpp) struct ufsmount *ump; struct buf *bp; struct vnode *vp; + ino_t ino; dev_t dev; - int i, type, error; + int i, type, error = 0; + ino = (ino_t) inop; ump = VFSTOUFS(mp); dev = ump->um_dev; @@ -1025,12 +1093,17 @@ ffs_vget(mp, ino, vpp) return (EPERM); } + /* check in the inode hash */ if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { vp = *vpp; UBCINFOCHECK("ffs_vget", vp); return (0); } - /* Allocate a new vnode/inode. */ + + /* + * Not in inode hash. + * Allocate a new vnode/inode. + */ type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */ MALLOC_ZONE(ip, struct inode *, sizeof(struct inode), type, M_WAITOK); bzero((caddr_t)ip, sizeof(struct inode)); @@ -1041,17 +1114,17 @@ ffs_vget(mp, ino, vpp) ip->i_fs = fs = ump->um_fs; ip->i_dev = dev; ip->i_number = ino; - ip->i_flag |= IN_ALLOC; + SET(ip->i_flag, IN_ALLOC); #if QUOTA for (i = 0; i < MAXQUOTAS; i++) ip->i_dquot[i] = NODQUOT; #endif /* - * MALLOC_ZONE is blocking call. Check for race. + * We could have blocked in MALLOC_ZONE. Check for the race. */ if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { - /* Clean up */ + /* lost the race, clean up */ FREE_ZONE(ip, sizeof(struct inode), type); vp = *vpp; UBCINFOCHECK("ffs_vget", vp); @@ -1066,50 +1139,29 @@ ffs_vget(mp, ino, vpp) */ ufs_ihashins(ip); - if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) { - ufs_ihashrem(ip); - if (ISSET(ip->i_flag, IN_WALLOC)) - wakeup(ip); - FREE_ZONE(ip, sizeof(struct inode), type); - *vpp = NULL; - return (error); - } - vp->v_data = ip; - ip->i_vnode = vp; - - /* - * A vnode is associated with the inode now, - * vget() can deal with the serialization. - */ - CLR(ip->i_flag, IN_ALLOC); - if (ISSET(ip->i_flag, IN_WALLOC)) - wakeup(ip); - /* Read in the disk contents for the inode, copy into the inode. */ if (error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->fs_bsize, NOCRED, &bp)) { - /* - * The inode does not contain anything useful, so it would - * be misleading to leave it on its hash chain. With mode - * still zero, it will be unlinked and returned to the free - * list by vput(). - */ - vput(vp); brelse(bp); - *vpp = NULL; - return (error); + goto errout; } #if REV_ENDIAN_FS if (mp->mnt_flag & MNT_REVEND) { byte_swap_inode_in(((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)),ip); } else { -#endif /* REV_ENDIAN_FS */ - ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)); -#if REV_ENDIAN_FS + ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)); } +#else + ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)); #endif /* REV_ENDIAN_FS */ brelse(bp); + if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) + goto errout; + + vp->v_data = ip; + ip->i_vnode = vp; + /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. @@ -1117,7 +1169,7 @@ ffs_vget(mp, ino, vpp) if (error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp)) { vput(vp); *vpp = NULL; - return (error); + goto out; } /* * Finish inode initialization now that aliasing has been resolved. @@ -1144,10 +1196,24 @@ ffs_vget(mp, ino, vpp) ip->i_gid = ip->i_din.di_ogid; /* XXX */ } /* XXX */ - *vpp = vp; if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp)) ubc_info_init(vp); - return (0); + *vpp = vp; + +out: + CLR(ip->i_flag, IN_ALLOC); + if (ISSET(ip->i_flag, IN_WALLOC)) + wakeup(ip); + return (error); + +errout: + ufs_ihashrem(ip); + CLR(ip->i_flag, IN_ALLOC); + if (ISSET(ip->i_flag, IN_WALLOC)) + wakeup(ip); + FREE_ZONE(ip, sizeof(struct inode), type); + *vpp = NULL; + return (error); } /* diff --git a/bsd/ufs/ffs/ffs_vnops.c b/bsd/ufs/ffs/ffs_vnops.c index b095c6dd2..68361f0d8 100644 --- a/bsd/ufs/ffs/ffs_vnops.c +++ b/bsd/ufs/ffs/ffs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -126,7 +126,7 @@ struct vnodeopv_entry_desc ffs_vnodeop_entries[] = { { &vop_symlink_desc, (VOPFUNC)ufs_symlink }, /* symlink */ { &vop_readdir_desc, (VOPFUNC)ufs_readdir }, /* readdir */ { &vop_readlink_desc, (VOPFUNC)ufs_readlink }, /* readlink */ - { &vop_abortop_desc, (VOPFUNC)ufs_abortop }, /* abortop */ + { &vop_abortop_desc, (VOPFUNC)nop_abortop }, /* abortop */ { &vop_inactive_desc, (VOPFUNC)ufs_inactive }, /* inactive */ { &vop_reclaim_desc, (VOPFUNC)ffs_reclaim }, /* reclaim */ { &vop_lock_desc, (VOPFUNC)ufs_lock }, /* lock */ @@ -150,6 +150,9 @@ struct vnodeopv_entry_desc ffs_vnodeop_entries[] = { { &vop_blktooff_desc, (VOPFUNC)ffs_blktooff }, /* blktooff */ { &vop_offtoblk_desc, (VOPFUNC)ffs_offtoblk }, /* offtoblk */ { &vop_cmap_desc, (VOPFUNC)ufs_cmap }, /* cmap */ + { &vop_getattrlist_desc, (VOPFUNC)ufs_getattrlist }, /* getattrlist */ + { &vop_setattrlist_desc, (VOPFUNC)ufs_setattrlist }, /* setattrlist */ + { &vop_kqfilt_add_desc, (VOPFUNC)ufs_kqfilt_add }, /* kqfilt_add */ { (struct vnodeop_desc*)NULL, (int(*)())NULL } }; struct vnodeopv_desc ffs_vnodeop_opv_desc = @@ -266,6 +269,7 @@ struct vnodeopv_entry_desc ffs_fifoop_entries[] = { { &vop_blktooff_desc, (VOPFUNC)ffs_blktooff }, /* blktooff */ { &vop_offtoblk_desc, (VOPFUNC)ffs_offtoblk }, /* offtoblk */ { &vop_cmap_desc, (VOPFUNC)ufs_cmap }, /* cmap */ + { &vop_kqfilt_add_desc, (VOPFUNC)ufsfifo_kqfilt_add }, /* kqfilt_add */ { (struct vnodeop_desc*)NULL, (int(*)())NULL } }; struct vnodeopv_desc ffs_fifoop_opv_desc = diff --git a/bsd/ufs/ffs/fs.h b/bsd/ufs/ffs/fs.h index cffa4fad4..16871857e 100644 --- a/bsd/ufs/ffs/fs.h +++ b/bsd/ufs/ffs/fs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -140,16 +140,17 @@ * computed as cylinder groups are inspected. * There is a 128-byte region in the superblock reserved for in-core * pointers to summary information. Originally this included an array - * of pointers to blocks of struct csum; now there are just two + * of pointers to blocks of struct csum; now there are just three * pointers and the remaining space is padded with fs_ocsp[]. * * NOCSPTRS determines the size of this padding. One pointer (fs_csp) * is taken away to point to a contiguous array of struct csum for * all cylinder groups; a second (fs_maxcluster) points to an array - * of cluster sizes that is computed as cylinder groups are inspected. + * of cluster sizes that is computed as cylinder groups are inspected, + * and the third points to an array that tracks the creation of new + * directories. */ -#define NOCSPTRS ((128 / sizeof(void *)) - 2) - +#define NOCSPTRS ((128 / sizeof(void *)) - 3) /* * A summary of contiguous blocks of various sizes is maintained @@ -174,6 +175,17 @@ #define MINFREE 5 #define DEFAULTOPT FS_OPTTIME +/* Grigoriy Orlov has done some extensive work to fine + * tune the layout preferences for directories within a filesystem. + * His algorithm can be tuned by adjusting the following parameters + * which tell the system the average file size and the average number + * of files per directory. These defaults are well selected for typical + * filesystems, but may need to be tuned for odd cases like filesystems + * being used for squid caches or news spools. + */ +#define AVFILESIZ 16384 +#define AFPDIR 64 + /* * Per cylinder group information; summarized in blocks allocated * from first cylinder group data blocks. These blocks have to be @@ -260,11 +272,14 @@ struct fs { /* these fields retain the current block allocation info */ int32_t fs_cgrotor; /* last cg searched */ void *fs_ocsp[NOCSPTRS]; /* list of fs_cs info buffers */ + u_int8_t *fs_contigdirs; /* # of contiguously allocated dirs */ struct csum *fs_csp; /* list of fs_cs info buffers */ int32_t *fs_maxcluster; /* max cluster in each cyl group */ int32_t fs_cpc; /* cyl per cycle in postbl */ int16_t fs_opostbl[16][8]; /* old rotation block list head */ - int32_t fs_sparecon[50]; /* reserved for future constants */ + int32_t fs_avgfilesize; /* expected average file size */ + int32_t fs_avgfpdir; /* expected # of files per directory */ + int32_t fs_sparecon[48]; /* reserved for future constants */ int32_t fs_contigsumsize; /* size of cluster summary array */ int32_t fs_maxsymlinklen; /* max length of an internal symlink */ int32_t fs_inodefmt; /* format of on-disk inodes */ diff --git a/bsd/ufs/mfs/mfs_vfsops.c b/bsd/ufs/mfs/mfs_vfsops.c deleted file mode 100644 index 71ae15bad..000000000 --- a/bsd/ufs/mfs/mfs_vfsops.c +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ -/* - * Copyright (c) 1989, 1990, 1993, 1994 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)mfs_vfsops.c 8.4 (Berkeley) 4/16/94 - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include -#include - -caddr_t mfs_rootbase; /* address of mini-root in kernel virtual memory */ -u_long mfs_rootsize; /* size of mini-root in bytes */ - -static int mfs_minor; /* used for building internal dev_t */ - -extern int (**mfs_vnodeop_p)(void *); - -/* - * mfs vfs operations. - */ -struct vfsops mfs_vfsops = { - MOUNT_MFS, - mfs_mount, - mfs_start, - ffs_unmount, - ufs_root, - ufs_quotactl, - mfs_statfs, - ffs_sync, - ffs_vget, - ffs_fhtovp, - ffs_vptofh, - mfs_init, -}; - -/* - * Called by main() when mfs is going to be mounted as root. - * - * Name is updated by mount(8) after booting. - */ -#define ROOTNAME "mfs_root" - -mfs_mountroot() -{ - extern struct vnode *rootvp; - register struct fs *fs; - register struct mount *mp; - struct proc *p = kernel_proc; /* XXX - WMG*/ - struct ufsmount *ump; - struct mfsnode *mfsp; - size_t size; - int error; - - /* - * Get vnodes for swapdev and rootdev. - */ -#if 0 - if (bdevvp(swapdev, &swapdev_vp) || bdevvp(rootdev, &rootvp)) - panic("mfs_mountroot: can't setup bdevvp's"); -#else - if ( bdevvp(rootdev, &rootvp)) - panic("mfs_mountroot: can't setup bdevvp's"); - -#endif - MALLOC_ZONE(mp, struct mount *, - sizeof(struct mount), M_MOUNT, M_WAITOK); - bzero((char *)mp, (u_long)sizeof(struct mount)); - - /* Initialize the default IO constraints */ - mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; - mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; - - mp->mnt_op = &mfs_vfsops; - mp->mnt_flag = MNT_RDONLY; - MALLOC(mfsp, struct mfsnode *, sizeof(struct mfsnode), M_MFSNODE, M_WAITOK); - rootvp->v_data = mfsp; - rootvp->v_op = mfs_vnodeop_p; - rootvp->v_tag = VT_MFS; - mfsp->mfs_baseoff = mfs_rootbase; - mfsp->mfs_size = mfs_rootsize; - mfsp->mfs_vnode = rootvp; - mfsp->mfs_pid = p->p_pid; - mfsp->mfs_buflist = (struct buf *)0; - if (error = ffs_mountfs(rootvp, mp, p)) { - vrele(rootvp); /* release the reference from bdevvp() */ - _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); - _FREE(mfsp, M_MFSNODE); - return (error); - } - if (error = vfs_lock(mp)) { - (void)ffs_unmount(mp, 0, p); - _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); - _FREE(mfsp, M_MFSNODE); - return (error); - } - CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); - mp->mnt_vnodecovered = NULLVP; - ump = VFSTOUFS(mp); - fs = ump->um_fs; - bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); - fs->fs_fsmnt[0] = '/'; - bcopy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MNAMELEN); - (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, - &size); - bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); - (void)ffs_statfs(mp, &mp->mnt_stat, p); - vfs_unlock(mp); - inittodr((time_t)0); - return (0); -} - -/* - * This is called early in boot to set the base address and size - * of the mini-root. - */ -mfs_initminiroot(base) - caddr_t base; -{ - struct fs *fs = (struct fs *)(base + SBOFF); - extern int (*mountroot)(); - - /* check for valid super block */ - if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || - fs->fs_bsize < sizeof(struct fs)) - return (0); - mountroot = mfs_mountroot; - mfs_rootbase = base; - mfs_rootsize = fs->fs_fsize * fs->fs_size; - rootdev = makedev(255, mfs_minor++); - return (mfs_rootsize); -} - -/* - * VFS Operations. - * - * mount system call - */ -/* ARGSUSED */ -int -mfs_mount(mp, path, data, ndp, p) - register struct mount *mp; - char *path; - caddr_t data; - struct nameidata *ndp; - struct proc *p; -{ - struct vnode *devvp; - struct mfs_args args; - struct ufsmount *ump; - register struct fs *fs; - register struct mfsnode *mfsp; - size_t size; - int flags, error; - - if (error = copyin(data, (caddr_t)&args, sizeof (struct mfs_args))) - return (error); - - /* - * If updating, check whether changing from read-only to - * read/write; if there is no device name, that's all we do. - */ - if (mp->mnt_flag & MNT_UPDATE) { - ump = VFSTOUFS(mp); - fs = ump->um_fs; - if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { - flags = WRITECLOSE; - if (mp->mnt_flag & MNT_FORCE) - flags |= FORCECLOSE; - if (vfs_busy(mp)) - return (EBUSY); - error = ffs_flushfiles(mp, flags, p); - vfs_unbusy(mp); - if (error) - return (error); - } - if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) - fs->fs_ronly = 0; -#ifdef EXPORTMFS - if (args.fspec == 0) - return (vfs_export(mp, &ump->um_export, &args.export)); -#endif - return (0); - } - MALLOC(mfsp, struct mfsnode *, sizeof(struct mfsnode), M_MFSNODE, M_WAITOK); - error = getnewvnode(VT_MFS, (struct mount *)0, mfs_vnodeop_p, &devvp); - if (error) { - FREE(mfsp, M_MFSNODE); - return (error); - } - devvp->v_type = VBLK; - if (checkalias(devvp, makedev(255, mfs_minor++), (struct mount *)0)) - panic("mfs_mount: dup dev"); - devvp->v_data = mfsp; - mfsp->mfs_baseoff = args.base; - mfsp->mfs_size = args.size; - mfsp->mfs_vnode = devvp; - mfsp->mfs_pid = p->p_pid; - mfsp->mfs_buflist = (struct buf *)0; - if (error = ffs_mountfs(devvp, mp, p)) { - mfsp->mfs_buflist = (struct buf *)-1; - vrele(devvp); - return (error); - } - ump = VFSTOUFS(mp); - fs = ump->um_fs; - (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); - bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); - bcopy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MNAMELEN); - (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, - &size); - bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); - return (0); -} - -int mfs_pri = PWAIT | PCATCH; /* XXX prob. temp */ - -/* - * Used to grab the process and keep it in the kernel to service - * memory filesystem I/O requests. - * - * Loop servicing I/O requests. - * Copy the requested data into or out of the memory filesystem - * address space. - */ -/* ARGSUSED */ -int -mfs_start(mp, flags, p) - struct mount *mp; - int flags; - struct proc *p; -{ - register struct vnode *vp = VFSTOUFS(mp)->um_devvp; - register struct mfsnode *mfsp = VTOMFS(vp); - register struct buf *bp; - register caddr_t base; - int error = 0; - - base = mfsp->mfs_baseoff; - while (mfsp->mfs_buflist != (struct buf *)(-1)) { - while (bp = mfsp->mfs_buflist) { - mfsp->mfs_buflist = bp->b_actf; - mfs_doio(bp, base); - wakeup((caddr_t)bp); - } - /* - * If a non-ignored signal is received, try to unmount. - * If that fails, clear the signal (it has been "processed"), - * otherwise we will loop here, as tsleep will always return - * EINTR/ERESTART. - */ - if (error = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0)) - if (dounmount(mp, 0, p) != 0) - CLRSIG(p, CURSIG(p)); - } - return (error); -} - -/* - * Get file system statistics. - */ -mfs_statfs(mp, sbp, p) - struct mount *mp; - struct statfs *sbp; - struct proc *p; -{ - int error; - - error = ffs_statfs(mp, sbp, p); -#ifdef COMPAT_09 - sbp->f_type = 3; -#else - sbp->f_type = 0; -#endif - strncpy(&sbp->f_fstypename[0], mp->mnt_op->vfs_name, MFSNAMELEN); - sbp->f_fstypename[MFSNAMELEN] = '\0'; - return (error); -} diff --git a/bsd/ufs/mfs/mfs_vnops.c b/bsd/ufs/mfs/mfs_vnops.c deleted file mode 100644 index 81e4dad8a..000000000 --- a/bsd/ufs/mfs/mfs_vnops.c +++ /dev/null @@ -1,375 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* $NetBSD: mfs_vnops.c,v 1.5 1994/12/14 13:03:52 mycroft Exp $ */ - -/* - * Copyright (c) 1989, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)mfs_vnops.c 8.5 (Berkeley) 7/28/94 - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include -#include -#include - -/* - * mfs vnode operations. - */ - -#define VOPFUNC int (*)(void *) - -int (**mfs_vnodeop_p)(void *); -struct vnodeopv_entry_desc mfs_vnodeop_entries[] = { - { &vop_default_desc, (VOPFUNC)vn_default_error }, - { &vop_lookup_desc, (VOPFUNC)mfs_lookup }, /* lookup */ - { &vop_create_desc, (VOPFUNC)mfs_create }, /* create */ - { &vop_mknod_desc, (VOPFUNC)mfs_mknod }, /* mknod */ - { &vop_open_desc, (VOPFUNC)mfs_open }, /* open */ - { &vop_close_desc, (VOPFUNC)mfs_close }, /* close */ - { &vop_access_desc, (VOPFUNC)mfs_access }, /* access */ - { &vop_getattr_desc, (VOPFUNC)mfs_getattr }, /* getattr */ - { &vop_setattr_desc, (VOPFUNC)mfs_setattr }, /* setattr */ - { &vop_read_desc, (VOPFUNC)mfs_read }, /* read */ - { &vop_write_desc, (VOPFUNC)mfs_write }, /* write */ - { &vop_ioctl_desc, (VOPFUNC)mfs_ioctl }, /* ioctl */ - { &vop_select_desc, (VOPFUNC)mfs_select }, /* select */ - { &vop_mmap_desc, (VOPFUNC)mfs_mmap }, /* mmap */ - { &vop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ - { &vop_seek_desc, (VOPFUNC)mfs_seek }, /* seek */ - { &vop_remove_desc, (VOPFUNC)mfs_remove }, /* remove */ - { &vop_link_desc, (VOPFUNC)mfs_link }, /* link */ - { &vop_rename_desc, (VOPFUNC)mfs_rename }, /* rename */ - { &vop_mkdir_desc, (VOPFUNC)mfs_mkdir }, /* mkdir */ - { &vop_rmdir_desc, (VOPFUNC)mfs_rmdir }, /* rmdir */ - { &vop_symlink_desc, (VOPFUNC)mfs_symlink }, /* symlink */ - { &vop_readdir_desc, (VOPFUNC)mfs_readdir }, /* readdir */ - { &vop_readlink_desc, (VOPFUNC)mfs_readlink }, /* readlink */ - { &vop_abortop_desc, (VOPFUNC)mfs_abortop }, /* abortop */ - { &vop_inactive_desc, (VOPFUNC)mfs_inactive }, /* inactive */ - { &vop_reclaim_desc, (VOPFUNC)mfs_reclaim }, /* reclaim */ - { &vop_lock_desc, (VOPFUNC)mfs_lock }, /* lock */ - { &vop_unlock_desc, (VOPFUNC)mfs_unlock }, /* unlock */ - { &vop_bmap_desc, (VOPFUNC)mfs_bmap }, /* bmap */ - { &vop_strategy_desc, (VOPFUNC)mfs_strategy }, /* strategy */ - { &vop_print_desc, (VOPFUNC)mfs_print }, /* print */ - { &vop_islocked_desc, (VOPFUNC)mfs_islocked }, /* islocked */ - { &vop_pathconf_desc, (VOPFUNC)mfs_pathconf }, /* pathconf */ - { &vop_advlock_desc, (VOPFUNC)mfs_advlock }, /* advlock */ - { &vop_blkatoff_desc, (VOPFUNC)mfs_blkatoff }, /* blkatoff */ - { &vop_valloc_desc, (VOPFUNC)mfs_valloc }, /* valloc */ - { &vop_vfree_desc, (VOPFUNC)mfs_vfree }, /* vfree */ - { &vop_truncate_desc, (VOPFUNC)mfs_truncate }, /* truncate */ - { &vop_update_desc, (VOPFUNC)mfs_update }, /* update */ - { &vop_bwrite_desc, (VOPFUNC)mfs_bwrite }, /* bwrite */ - { &vop_pgrd_desc, (VOPFUNC)mfs_pgrg }, /* pager read */ - { &vop_pgwr_desc, (VOPFUNC)mfs_pgwr }, /* pager write */ - { (struct vnodeop_desc*)NULL, (int(*)())NULL } -}; -struct vnodeopv_desc mfs_vnodeop_opv_desc = - { &mfs_vnodeop_p, mfs_vnodeop_entries }; - -/* - * Vnode Operations. - * - * Open called to allow memory filesystem to initialize and - * validate before actual IO. Record our process identifier - * so we can tell when we are doing I/O to ourself. - */ -/* ARGSUSED */ -int -mfs_open(ap) - struct vop_open_args /* { - struct vnode *a_vp; - int a_mode; - struct ucred *a_cred; - struct proc *a_p; - } */ *ap; -{ - - if (ap->a_vp->v_type != VBLK) { - panic("mfs_ioctl not VBLK"); - /* NOTREACHED */ - } - return (0); -} - -/* - * Ioctl operation. - */ -/* ARGSUSED */ -int -mfs_ioctl(ap) - struct vop_ioctl_args /* { - struct vnode *a_vp; - u_long a_command; - caddr_t a_data; - int a_fflag; - struct ucred *a_cred; - struct proc *a_p; - } */ *ap; -{ - - return (ENOTTY); -} - -/* - * Pass I/O requests to the memory filesystem process. - */ -int -mfs_strategy(ap) - struct vop_strategy_args /* { - struct buf *a_bp; - } */ *ap; -{ - register struct buf *bp = ap->a_bp; - register struct mfsnode *mfsp; - struct vnode *vp; - struct proc *p = curproc; /* XXX */ - - if (!vfinddev(bp->b_dev, VBLK, &vp) || vp->v_usecount == 0) - panic("mfs_strategy: bad dev"); - mfsp = VTOMFS(vp); - /* check for mini-root access */ - if (mfsp->mfs_pid == 0) { - caddr_t base; - - base = mfsp->mfs_baseoff + (bp->b_blkno << DEV_BSHIFT); - if (bp->b_flags & B_READ) - bcopy(base, bp->b_data, bp->b_bcount); - else - bcopy(bp->b_data, base, bp->b_bcount); - biodone(bp); - } else if (mfsp->mfs_pid == p->p_pid) { - mfs_doio(bp, mfsp->mfs_baseoff); - } else { - bp->b_actf = mfsp->mfs_buflist; - mfsp->mfs_buflist = bp; - wakeup((caddr_t)vp); - } - return (0); -} - -/* - * Memory file system I/O. - * - * Trivial on the HP since buffer has already been mapping into KVA space. - */ -void -mfs_doio(bp, base) - register struct buf *bp; - caddr_t base; -{ - - base += (bp->b_blkno << DEV_BSHIFT); - if (bp->b_flags & B_READ) - bp->b_error = copyin(base, bp->b_data, bp->b_bcount); - else - bp->b_error = copyout(bp->b_data, base, bp->b_bcount); - if (bp->b_error) - bp->b_flags |= B_ERROR; - biodone(bp); -} - -/* - * This is a noop, simply returning what one has been given. - */ -int -mfs_bmap(ap) - struct vop_bmap_args /* { - struct vnode *a_vp; - daddr_t a_bn; - struct vnode **a_vpp; - daddr_t *a_bnp; - int *a_runp; - } */ *ap; -{ - - if (ap->a_vpp != NULL) - *ap->a_vpp = ap->a_vp; - if (ap->a_bnp != NULL) - *ap->a_bnp = ap->a_bn; - return (0); -} - -/* - * Memory filesystem close routine - */ -/* ARGSUSED */ -int -mfs_close(ap) - struct vop_close_args /* { - struct vnode *a_vp; - int a_fflag; - struct ucred *a_cred; - struct proc *a_p; - } */ *ap; -{ - register struct vnode *vp = ap->a_vp; - register struct mfsnode *mfsp = VTOMFS(vp); - register struct buf *bp; - int error; - - /* - * Finish any pending I/O requests. - */ - while (bp = mfsp->mfs_buflist) { - mfsp->mfs_buflist = bp->b_actf; - mfs_doio(bp, mfsp->mfs_baseoff); - wakeup((caddr_t)bp); - } - /* - * On last close of a memory filesystem - * we must invalidate any in core blocks, so that - * we can, free up its vnode. - */ - if (error = vinvalbuf(vp, 1, ap->a_cred, ap->a_p, 0, 0)) - return (error); - /* - * There should be no way to have any more uses of this - * vnode, so if we find any other uses, it is a panic. - */ - if (vp->v_usecount > 1) - printf("mfs_close: ref count %d > 1\n", vp->v_usecount); - if (vp->v_usecount > 1 || mfsp->mfs_buflist) - panic("mfs_close"); - /* - * Send a request to the filesystem server to exit. - */ - mfsp->mfs_buflist = (struct buf *)(-1); - wakeup((caddr_t)vp); - return (0); -} - -/* - * Memory filesystem inactive routine - */ -/* ARGSUSED */ -int -mfs_inactive(ap) - struct vop_inactive_args /* { - struct vnode *a_vp; - } */ *ap; -{ - register struct mfsnode *mfsp = VTOMFS(ap->a_vp); - - if (mfsp->mfs_buflist && mfsp->mfs_buflist != (struct buf *)(-1)) - panic("mfs_inactive: not inactive (mfs_buflist %x)", - mfsp->mfs_buflist); - return (0); -} - -/* - * Reclaim a memory filesystem devvp so that it can be reused. - */ -int -mfs_reclaim(ap) - struct vop_reclaim_args /* { - struct vnode *a_vp; - } */ *ap; -{ - register struct vnode *vp = ap->a_vp; - - FREE(vp->v_data, M_MFSNODE); - vp->v_data = NULL; - return (0); -} - -/* - * Print out the contents of an mfsnode. - */ -int -mfs_print(ap) - struct vop_print_args /* { - struct vnode *a_vp; - } */ *ap; -{ - register struct mfsnode *mfsp = VTOMFS(ap->a_vp); - - printf("tag VT_MFS, pid %d, base %d, size %d\n", mfsp->mfs_pid, - mfsp->mfs_baseoff, mfsp->mfs_size); - return (0); -} - -/* - * Block device bad operation - */ -int -mfs_badop() -{ - - panic("mfs_badop called\n"); - /* NOTREACHED */ -} - -/* - * Memory based filesystem initialization. - */ -mfs_init() -{ - -} diff --git a/bsd/ufs/mfs/mfsnode.h b/bsd/ufs/mfs/mfsnode.h deleted file mode 100644 index a95554a9f..000000000 --- a/bsd/ufs/mfs/mfsnode.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ -/* - * Copyright (c) 1989, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)mfsnode.h 8.2 (Berkeley) 8/11/93 - */ - -#ifndef __UFS_MFS_MFSNODE_H__ -#define __UFS_MFS_MFSNODE_H__ - -#include - -#ifdef __APPLE_API_OBSOLETE -/* - * This structure defines the control data for the memory based file system. - */ - -struct mfsnode { - struct vnode *mfs_vnode; /* vnode associated with this mfsnode */ - caddr_t mfs_baseoff; /* base of file system in memory */ - long mfs_size; /* size of memory file system */ - pid_t mfs_pid; /* supporting process pid */ - struct buf *mfs_buflist; /* list of I/O requests */ - long mfs_spare[4]; -}; - -/* - * Convert between mfsnode pointers and vnode pointers - */ -#define VTOMFS(vp) ((struct mfsnode *)(vp)->v_data) -#define MFSTOV(mfsp) ((mfsp)->mfs_vnode) - -/* Prototypes for MFS operations on vnodes. */ -#define mfs_lookup ((int (*) __P((struct vop_lookup_args *)))mfs_badop) -#define mfs_create ((int (*) __P((struct vop_create_args *)))mfs_badop) -#define mfs_mknod ((int (*) __P((struct vop_mknod_args *)))mfs_badop) -#define mfs_access ((int (*) __P((struct vop_access_args *)))mfs_badop) -#define mfs_getattr ((int (*) __P((struct vop_getattr_args *)))mfs_badop) -#define mfs_setattr ((int (*) __P((struct vop_setattr_args *)))mfs_badop) -#define mfs_read ((int (*) __P((struct vop_read_args *)))mfs_badop) -#define mfs_write ((int (*) __P((struct vop_write_args *)))mfs_badop) -#define mfs_select ((int (*) __P((struct vop_select_args *)))mfs_badop) -#define mfs_mmap ((int (*) __P((struct vop_mmap_args *)))mfs_badop) -#define mfs_seek ((int (*) __P((struct vop_seek_args *)))mfs_badop) -#define mfs_remove ((int (*) __P((struct vop_remove_args *)))mfs_badop) -#define mfs_link ((int (*) __P((struct vop_link_args *)))mfs_badop) -#define mfs_rename ((int (*) __P((struct vop_rename_args *)))mfs_badop) -#define mfs_mkdir ((int (*) __P((struct vop_mkdir_args *)))mfs_badop) -#define mfs_rmdir ((int (*) __P((struct vop_rmdir_args *)))mfs_badop) -#define mfs_symlink ((int (*) __P((struct vop_symlink_args *)))mfs_badop) -#define mfs_readdir ((int (*) __P((struct vop_readdir_args *)))mfs_badop) -#define mfs_readlink ((int (*) __P((struct vop_readlink_args *)))mfs_badop) -#define mfs_abortop ((int (*) __P((struct vop_abortop_args *)))mfs_badop) -#define mfs_lock ((int (*) __P((struct vop_lock_args *)))nullop) -#define mfs_unlock ((int (*) __P((struct vop_unlock_args *)))nullop) -#define mfs_islocked ((int (*) __P((struct vop_islocked_args *)))nullop) -#define mfs_pathconf ((int (*) __P((struct vop_pathconf_args *)))mfs_badop) -#define mfs_advlock ((int (*) __P((struct vop_advlock_args *)))mfs_badop) -#define mfs_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))mfs_badop) -#define mfs_valloc ((int (*) __P((struct vop_valloc_args *)))mfs_badop) -#define mfs_vfree ((int (*) __P((struct vop_vfree_args *)))mfs_badop) -#define mfs_truncate ((int (*) __P((struct vop_truncate_args *)))mfs_badop) -#define mfs_update ((int (*) __P((struct vop_update_args *)))mfs_badop) -#define mfs_bwrite ((int (*) __P((struct vop_bwrite_args *)))vn_bwrite) -#endif /* __APPLE_API_OBSOLETE */ -#endif /* __UFS_MFS_MFSNODE_H__ */ diff --git a/bsd/ufs/ufs/inode.h b/bsd/ufs/ufs/inode.h index b35cf294f..3aa34ee0c 100644 --- a/bsd/ufs/ufs/inode.h +++ b/bsd/ufs/ufs/inode.h @@ -70,6 +70,7 @@ #ifdef __APPLE_API_PRIVATE #include #include +#include #include #include @@ -95,6 +96,7 @@ struct inode { } inode_u; #define i_fs inode_u.fs + struct klist i_knotes; /* knotes attached to this vnode */ struct dquot *i_dquot[MAXQUOTAS]; /* Dquot structures. */ u_quad_t i_modrev; /* Revision level for NFS lease. */ struct lockf *i_lockf;/* Head of byte-level lock list. */ @@ -179,6 +181,8 @@ struct indir { } \ } +#define VN_KNOTE(vp, hint) KNOTE(&VTOI(vp)->i_knotes, (hint)) + /* This overlays the fid structure (see mount.h). */ struct ufid { u_int16_t ufid_len; /* Length of structure. */ diff --git a/bsd/ufs/ufs/ufs_attrlist.c b/bsd/ufs/ufs/ufs_attrlist.c new file mode 100644 index 000000000..1485073c2 --- /dev/null +++ b/bsd/ufs/ufs/ufs_attrlist.c @@ -0,0 +1,811 @@ +/* + * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * ufs_attrlist.c - UFS attribute list processing + * + * Copyright (c) 2002, Apple Computer, Inc. All Rights Reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "ufsmount.h" + +/* +12345678901234567890123456789012345678901234567890123456789012345678901234567890 +*/ +enum { + UFS_ATTR_CMN_NATIVE = 0, + UFS_ATTR_CMN_SUPPORTED = 0, + UFS_ATTR_VOL_NATIVE = ATTR_VOL_NAME | + ATTR_VOL_CAPABILITIES | + ATTR_VOL_ATTRIBUTES, + UFS_ATTR_VOL_SUPPORTED = UFS_ATTR_VOL_NATIVE, + UFS_ATTR_DIR_NATIVE = 0, + UFS_ATTR_DIR_SUPPORTED = 0, + UFS_ATTR_FILE_NATIVE = 0, + UFS_ATTR_FILE_SUPPORTED = 0, + UFS_ATTR_FORK_NATIVE = 0, + UFS_ATTR_FORK_SUPPORTED = 0, + + UFS_ATTR_CMN_SETTABLE = 0, + UFS_ATTR_VOL_SETTABLE = ATTR_VOL_NAME, + UFS_ATTR_DIR_SETTABLE = 0, + UFS_ATTR_FILE_SETTABLE = 0, + UFS_ATTR_FORK_SETTABLE = 0 +}; + +static char ufs_label_magic[4] = UFS_LABEL_MAGIC; + +/* Copied from diskdev_cmds/disklib/ufslabel.c */ +typedef union { + char c[2]; + u_short s; +} short_union_t; + +/* Copied from diskdev_cmds/disklib/ufslabel.c */ +typedef union { + u_short s[2]; + long l; +} long_union_t; + +/* Copied from diskdev_cmds/disklib/ufslabel.c */ +static __inline__ void +reduce(int *sum) +{ + long_union_t l_util; + + l_util.l = *sum; + *sum = l_util.s[0] + l_util.s[1]; + if (*sum > 65535) + *sum -= 65535; + return; +} + +/* Copied from diskdev_cmds/disklib/ufslabel.c */ +static unsigned short +in_cksum(void *data, int len) +{ + u_short *w; + int sum; + + sum = 0; + w = (u_short *)data; + while ((len -= 32) >= 0) { + sum += w[0]; sum += w[1]; + sum += w[2]; sum += w[3]; + sum += w[4]; sum += w[5]; + sum += w[6]; sum += w[7]; + sum += w[8]; sum += w[9]; + sum += w[10]; sum += w[11]; + sum += w[12]; sum += w[13]; + sum += w[14]; sum += w[15]; + w += 16; + } + len += 32; + while ((len -= 8) >= 0) { + sum += w[0]; sum += w[1]; + sum += w[2]; sum += w[3]; + w += 4; + } + len += 8; + if (len) { + reduce(&sum); + while ((len -= 2) >= 0) { + sum += *w++; + } + } + if (len == -1) { /* odd-length data */ + short_union_t s_util; + + s_util.s = 0; + s_util.c[0] = *((char *)w); + s_util.c[1] = 0; + sum += s_util.s; + } + reduce(&sum); + return (~sum & 0xffff); +} + +/* Adapted from diskdev_cmds/disklib/ufslabel.c */ +static boolean_t +ufs_label_check(struct ufslabel *ul_p) +{ + u_int16_t calc; + u_int16_t checksum; + + if (bcmp(&ul_p->ul_magic, ufs_label_magic, + sizeof(ul_p->ul_magic))) { +#ifdef DEBUG + printf("ufslabel_check: label has bad magic number\n"); +#endif + return (FALSE); + } + if (ntohl(ul_p->ul_version) != UFS_LABEL_VERSION) { +#ifdef DEBUG + printf("ufslabel_check: label has incorect version %d " + "(should be %d)\n", ntohl(ul_p->ul_version), + UFS_LABEL_VERSION); +#endif + return (FALSE); + } + if (ntohs(ul_p->ul_namelen) > UFS_MAX_LABEL_NAME) { +#ifdef DEBUG + printf("ufslabel_check: name length %d is too big (> %d)\n", + ntohs(ul_p->ul_namelen), UFS_MAX_LABEL_NAME); +#endif + return (FALSE); + } + + checksum = ul_p->ul_checksum; /* Remember previous checksum. */ + ul_p->ul_checksum = 0; + calc = in_cksum(ul_p, sizeof(*ul_p)); + if (calc != checksum) { +#ifdef DEBUG + printf("ufslabel_check: label checksum %x (should be %x)\n", + checksum, calc); +#endif + return (FALSE); + } + return (TRUE); +} + +static void +ufs_label_init(struct ufslabel *ul_p) +{ + bzero(ul_p, sizeof(*ul_p)); + ul_p->ul_version = htonl(UFS_LABEL_VERSION); + bcopy(ufs_label_magic, &ul_p->ul_magic, sizeof(ul_p->ul_magic)); + ul_p->ul_time = htonl(time.tv_sec); +} + +static int +ufs_get_label(struct vnode *vp, struct ucred *cred, char *label, + int *name_length) +{ + int error; + int devBlockSize; + struct mount *mp; + struct vnode *devvp; + struct buf *bp; + struct ufslabel *ulp; + + mp = vp->v_mount; + devvp = VFSTOUFS(mp)->um_devvp; + VOP_DEVBLOCKSIZE(devvp, &devBlockSize); + + if (error = meta_bread(devvp, (ufs_daddr_t)(UFS_LABEL_OFFSET / devBlockSize), + UFS_LABEL_SIZE, cred, &bp)) + goto out; + + /* + * Since the disklabel is read directly by older user space code, + * make sure this buffer won't remain in the cache when we release it. + * + * It would be better if that user space code was modified to get + * at the fields of the disklabel via the filesystem (such as + * getattrlist). + */ + SET(bp->b_flags, B_NOCACHE); + + ulp = (struct ufslabel *) bp->b_data; + if (ufs_label_check(ulp)) { + int length; + /* Copy the name out */ + length = ulp->ul_namelen; +#if REV_ENDIAN_FS + if (mp->mnt_flag & MNT_REVEND) + length = NXSwapShort(length); +#endif + if (length > 0 && length <= UFS_MAX_LABEL_NAME) { + bcopy(ulp->ul_name, label, length); + *name_length = length; + } else { + /* Return an empty name */ + *label = '\0'; + *name_length = 0; + } + } + +out: + if (bp) + brelse(bp); + return error; +} + +static int ufs_set_label(struct vnode *vp, struct ucred *cred, + const char *label, int name_length) +{ + int error; + int devBlockSize; + struct mount *mp; + struct vnode *devvp; + struct buf *bp; + struct ufslabel *ulp; + + mp = vp->v_mount; + + /* Validate the new name's length */ + if (name_length < 0 || name_length > UFS_MAX_LABEL_NAME) + return EINVAL; + + /* Read UFS_LABEL_SIZE bytes at UFS_LABEL_OFFSET */ + devvp = VFSTOUFS(mp)->um_devvp; + VOP_DEVBLOCKSIZE(devvp, &devBlockSize); + if (error = meta_bread(devvp, (ufs_daddr_t)(UFS_LABEL_OFFSET / devBlockSize), + UFS_LABEL_SIZE, cred, &bp)) + goto out; + + /* + * Since the disklabel is read directly by older user space code, + * make sure this buffer won't remain in the cache when we release it. + * + * It would be better if that user space code was modified to get + * at the fields of the disklabel via the filesystem (such as + * getattrlist). + */ + SET(bp->b_flags, B_NOCACHE); + + /* Validate the label structure; init if not valid */ + ulp = (struct ufslabel *) bp->b_data; + if (!ufs_label_check(ulp)) + ufs_label_init(ulp); + + /* Copy new name over existing name */ + ulp->ul_namelen = name_length; +#if REV_ENDIAN_FS + if (mp->mnt_flag & MNT_REVEND) + ulp->ul_namelen = NXSwapShort(ulp->ul_namelen); +#endif + bcopy(label, ulp->ul_name, name_length); + + /* Update the checksum */ + ulp->ul_checksum = 0; + ulp->ul_checksum = in_cksum(ulp, sizeof(*ulp)); + + /* Write the label back to disk */ + bwrite(bp); + bp = NULL; + +out: + if (bp) + brelse(bp); + return error; +} + +/* + * Pack a C-style string into an attribute buffer. Returns the new varptr. + */ +static void * +packstr(char *s, void *attrptr, void *varptr) +{ + struct attrreference *ref = attrptr; + u_long length; + + length = strlen(s) + 1; /* String, plus terminator */ + + /* + * In the fixed-length part of buffer, store the offset and length of + * the variable-length data. + */ + ref->attr_dataoffset = (u_int8_t *)varptr - (u_int8_t *)attrptr; + ref->attr_length = length; + + /* Copy the string to variable-length part of buffer */ + (void) strncpy((unsigned char *)varptr, s, length); + + /* Advance pointer past string, and round up to multiple of 4 bytes */ + return (char *)varptr + ((length + 3) & ~3); +} + +/* + * Pack an unterminated string into an attribute buffer as a C-style + * string. Copies the indicated number of characters followed by a + * terminating '\0'. Returns the new varptr. + */ +static void * +packtext(u_char *text, u_int text_len, void *attrptr, void *varptr) +{ + struct attrreference *ref = attrptr; + u_long length; /* of the attribute, including terminator */ + + length = text_len + 1; /* String, plus terminator */ + + /* + * In the fixed-length part of buffer, store the offset and length of + * the variable-length data. + */ + ref->attr_dataoffset = (u_int8_t *) varptr - (u_int8_t *) attrptr; + ref->attr_length = length; + + /* Copy the string to variable-length part of buffer */ + bcopy(text, varptr, text_len); + ((char *) varptr)[text_len] = '\0'; + + /* Advance pointer past string, and round up to multiple of 4 bytes */ + return (char *) varptr + ((length + 3) & ~3); +} + +/* + * ufs_packvolattr + * + * Pack the volume-related attributes from a getattrlist call into result + * buffers. Fields are packed in order based on the bitmap masks. + * Attributes with smaller masks are packed first. + * + * The buffer pointers are updated to point past the data that was returned. + */ +static int ufs_packvolattr( + struct vnode *vp, /* The volume's vnode */ + struct ucred *cred, + struct attrlist *alist, /* Desired attributes */ + void **attrptrptr, /* Fixed-size attributes buffer */ + void **varptrptr) /* Variable-size attributes buffer */ +{ + int error; + attrgroup_t a; + void *attrptr = *attrptrptr; + void *varptr = *varptrptr; + + a = alist->volattr; + if (a) { + if (a & ATTR_VOL_NAME) { + int length; + char name[UFS_MAX_LABEL_NAME]; + + error = ufs_get_label(vp, cred, name, &length); + if (error) + return error; + + varptr = packtext(name, length, attrptr, varptr); + ++((struct attrreference *)attrptr); + } + + if (a & ATTR_VOL_CAPABILITIES) { + vol_capabilities_attr_t *vcapattrptr; + + vcapattrptr = (vol_capabilities_attr_t *) attrptr; + + /* + * Capabilities this volume format has. Note that + * we do not set VOL_CAP_FMT_PERSISTENTOBJECTIDS. + * That's because we can't resolve an inode number + * into a directory entry (parent and name), which + * Carbon would need to support PBResolveFileIDRef. + */ + vcapattrptr->capabilities[VOL_CAPABILITIES_FORMAT] = + VOL_CAP_FMT_SYMBOLICLINKS | + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_SPARSE_FILES | + VOL_CAP_FMT_CASE_SENSITIVE | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS ; + vcapattrptr->capabilities[VOL_CAPABILITIES_INTERFACES] + = VOL_CAP_INT_NFSEXPORT | + VOL_CAP_INT_VOL_RENAME | + VOL_CAP_INT_ADVLOCK | + VOL_CAP_INT_FLOCK ; + vcapattrptr->capabilities[VOL_CAPABILITIES_RESERVED1] + = 0; + vcapattrptr->capabilities[VOL_CAPABILITIES_RESERVED2] + = 0; + + /* Capabilities we know about: */ + vcapattrptr->valid[VOL_CAPABILITIES_FORMAT] = + VOL_CAP_FMT_PERSISTENTOBJECTIDS | + VOL_CAP_FMT_SYMBOLICLINKS | + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_JOURNAL | + VOL_CAP_FMT_JOURNAL_ACTIVE | + VOL_CAP_FMT_NO_ROOT_TIMES | + VOL_CAP_FMT_SPARSE_FILES | + VOL_CAP_FMT_ZERO_RUNS | + VOL_CAP_FMT_CASE_SENSITIVE | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS ; + vcapattrptr->valid[VOL_CAPABILITIES_INTERFACES] = + VOL_CAP_INT_SEARCHFS | + VOL_CAP_INT_ATTRLIST | + VOL_CAP_INT_NFSEXPORT | + VOL_CAP_INT_READDIRATTR | + VOL_CAP_INT_EXCHANGEDATA | + VOL_CAP_INT_COPYFILE | + VOL_CAP_INT_ALLOCATE | + VOL_CAP_INT_VOL_RENAME | + VOL_CAP_INT_ADVLOCK | + VOL_CAP_INT_FLOCK ; + vcapattrptr->valid[VOL_CAPABILITIES_RESERVED1] = 0; + vcapattrptr->valid[VOL_CAPABILITIES_RESERVED2] = 0; + + ++((vol_capabilities_attr_t *)attrptr); + } + + if (a & ATTR_VOL_ATTRIBUTES) { + vol_attributes_attr_t *volattrptr; + + volattrptr = (vol_attributes_attr_t *)attrptr; + + volattrptr->validattr.commonattr = + UFS_ATTR_CMN_SUPPORTED; + volattrptr->validattr.volattr = + UFS_ATTR_VOL_SUPPORTED; + volattrptr->validattr.dirattr = + UFS_ATTR_DIR_SUPPORTED; + volattrptr->validattr.fileattr = + UFS_ATTR_FILE_SUPPORTED; + volattrptr->validattr.forkattr = + UFS_ATTR_FORK_SUPPORTED; + + volattrptr->nativeattr.commonattr = + UFS_ATTR_CMN_NATIVE; + volattrptr->nativeattr.volattr = + UFS_ATTR_VOL_NATIVE; + volattrptr->nativeattr.dirattr = + UFS_ATTR_DIR_NATIVE; + volattrptr->nativeattr.fileattr = + UFS_ATTR_FILE_NATIVE; + volattrptr->nativeattr.forkattr = + UFS_ATTR_FORK_NATIVE; + + ++((vol_attributes_attr_t *)attrptr); + } + } + + /* Update the buffer pointers to point past what we just returned */ + *attrptrptr = attrptr; + *varptrptr = varptr; + + return 0; +} + +/* + * Pack all attributes from a getattrlist or readdirattr call into + * the result buffer. For now, we only support volume attributes. + */ +static int +ufs_packattr(struct vnode *vp, struct ucred *cred, struct attrlist *alist, + void **attrptr, void **varptr) +{ + int error=0; + + if (alist->volattr != 0) + error = ufs_packvolattr(vp, cred, alist, attrptr, varptr); + + return error; +} + +/* + * Calculate the fixed-size space required to hold a set of attributes. + * For variable-length attributes, this will be the size of the + * attribute reference (an offset and length). + */ +static size_t +ufs_attrsize(struct attrlist *attrlist) +{ + size_t size; + attrgroup_t a = 0; + +#if ((ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | ATTR_CMN_OBJTYPE | \ + ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | ATTR_CMN_OBJPERMANENTID | \ + ATTR_CMN_PAROBJID | ATTR_CMN_SCRIPT | ATTR_CMN_CRTIME | \ + ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | \ + ATTR_CMN_BKUPTIME | ATTR_CMN_FNDRINFO | ATTR_CMN_OWNERID | \ + ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | ATTR_CMN_NAMEDATTRCOUNT | \ + ATTR_CMN_NAMEDATTRLIST | ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS) \ + != ATTR_CMN_VALIDMASK) +#error ufs_attrsize: Missing bits in common mask computation! +#endif + +#if ((ATTR_VOL_FSTYPE | ATTR_VOL_SIGNATURE | ATTR_VOL_SIZE | \ + ATTR_VOL_SPACEFREE | ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | \ + ATTR_VOL_ALLOCATIONCLUMP | ATTR_VOL_IOBLOCKSIZE | \ + ATTR_VOL_OBJCOUNT | ATTR_VOL_FILECOUNT | ATTR_VOL_DIRCOUNT | \ + ATTR_VOL_MAXOBJCOUNT | ATTR_VOL_MOUNTPOINT | ATTR_VOL_NAME | \ + ATTR_VOL_MOUNTFLAGS | ATTR_VOL_INFO | ATTR_VOL_MOUNTEDDEVICE | \ + ATTR_VOL_ENCODINGSUSED | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES) \ + != ATTR_VOL_VALIDMASK) +#error ufs_attrsize: Missing bits in volume mask computation! +#endif + +#if ((ATTR_DIR_LINKCOUNT | ATTR_DIR_ENTRYCOUNT | ATTR_DIR_MOUNTSTATUS) \ + != ATTR_DIR_VALIDMASK) +#error ufs_attrsize: Missing bits in directory mask computation! +#endif + +#if ((ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | ATTR_FILE_ALLOCSIZE | \ + ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_CLUMPSIZE | ATTR_FILE_DEVTYPE | \ + ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST | \ + ATTR_FILE_DATALENGTH | ATTR_FILE_DATAALLOCSIZE | \ + ATTR_FILE_DATAEXTENTS | ATTR_FILE_RSRCLENGTH | \ + ATTR_FILE_RSRCALLOCSIZE | ATTR_FILE_RSRCEXTENTS) \ + != ATTR_FILE_VALIDMASK) +#error ufs_attrsize: Missing bits in file mask computation! +#endif + +#if ((ATTR_FORK_TOTALSIZE | ATTR_FORK_ALLOCSIZE) != ATTR_FORK_VALIDMASK) +#error ufs_attrsize: Missing bits in fork mask computation! +#endif + + size = 0; + + if ((a = attrlist->volattr) != 0) { + if (a & ATTR_VOL_NAME) + size += sizeof(struct attrreference); + if (a & ATTR_VOL_CAPABILITIES) + size += sizeof(vol_capabilities_attr_t); + if (a & ATTR_VOL_ATTRIBUTES) + size += sizeof(vol_attributes_attr_t); + }; + + /* + * Ignore common, dir, file, and fork attributes since we + * don't support those yet. + */ + + return size; +} + +/* +# +#% getattrlist vp = = = +# + vop_getattrlist { + IN struct vnode *vp; + IN struct attrlist *alist; + INOUT struct uio *uio; + IN struct ucred *cred; + IN struct proc *p; + }; + + */ +__private_extern__ int +ufs_getattrlist(struct vop_getattrlist_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct attrlist *alist = ap->a_alist; + size_t fixedblocksize; + size_t attrblocksize; + size_t attrbufsize; + void *attrbufptr; + void *attrptr; + void *varptr; + int error; + + /* + * Check the attrlist for valid inputs (i.e. be sure we understand what + * caller is asking). + */ + if ((alist->bitmapcount != ATTR_BIT_MAP_COUNT) || + ((alist->commonattr & ~ATTR_CMN_VALIDMASK) != 0) || + ((alist->volattr & ~ATTR_VOL_VALIDMASK) != 0) || + ((alist->dirattr & ~ATTR_DIR_VALIDMASK) != 0) || + ((alist->fileattr & ~ATTR_FILE_VALIDMASK) != 0) || + ((alist->forkattr & ~ATTR_FORK_VALIDMASK) != 0)) + return EINVAL; + + /* + * Requesting volume information requires setting the + * ATTR_VOL_INFO bit. Also, volume info requests are + * mutually exclusive with all other info requests. + */ + if ((alist->volattr != 0) && + (((alist->volattr & ATTR_VOL_INFO) == 0) || + (alist->dirattr != 0) || (alist->fileattr != 0) || + alist->forkattr != 0)) + return EINVAL; + + /* + * Make sure caller isn't asking for an attibute we don't support. + */ + if ((alist->commonattr & ~UFS_ATTR_CMN_SUPPORTED) != 0 || + (alist->volattr & ~(UFS_ATTR_VOL_SUPPORTED | ATTR_VOL_INFO)) != 0 || + (alist->dirattr & ~UFS_ATTR_DIR_SUPPORTED) != 0 || + (alist->fileattr & ~UFS_ATTR_FILE_SUPPORTED) != 0 || + (alist->forkattr & ~UFS_ATTR_FORK_SUPPORTED) != 0) + return EOPNOTSUPP; + + /* + * Requesting volume information requires a vnode for the volume root. + */ + if (alist->volattr && (vp->v_flag & VROOT) == 0) + return EINVAL; + + fixedblocksize = ufs_attrsize(alist); + attrblocksize = fixedblocksize + (sizeof(u_long)); + if (alist->volattr & ATTR_VOL_NAME) + attrblocksize += 516; /* 512 + terminator + padding */ + attrbufsize = MIN(ap->a_uio->uio_resid, attrblocksize); + MALLOC(attrbufptr, void *, attrblocksize, M_TEMP, M_WAITOK); + attrptr = attrbufptr; + *((u_long *)attrptr) = 0; /* Set buffer length in case of errors */ + ++((u_long *)attrptr); /* skip over length field */ + varptr = ((char *)attrptr) + fixedblocksize; + + error = ufs_packattr(vp, ap->a_cred, alist, &attrptr, &varptr); + + if (error == 0) { + /* Don't return more data than was generated */ + attrbufsize = MIN(attrbufsize, (size_t) varptr - (size_t) attrbufptr); + + /* Return the actual buffer length */ + *((u_long *) attrbufptr) = attrbufsize; + + error = uiomove((caddr_t) attrbufptr, attrbufsize, ap->a_uio); + } + + FREE(attrbufptr, M_TEMP); + return error; +} + + +/* + * Unpack the volume-related attributes from a setattrlist call into the + * appropriate in-memory and on-disk structures. + */ +static int +ufs_unpackvolattr( + struct vnode *vp, + struct ucred *cred, + attrgroup_t attrs, + void *attrbufptr) +{ + int i; + int error; + attrreference_t *attrref; + + error = 0; + + if (attrs & ATTR_VOL_NAME) { + char *name; + int name_length; + + attrref = attrbufptr; + name = ((char*)attrbufptr) + attrref->attr_dataoffset; + name_length = strlen(name); + ufs_set_label(vp, cred, name, name_length); + + /* Advance buffer pointer past attribute reference */ + attrbufptr = ++attrref; + } + + return error; +} + + + +/* + * Unpack the attributes from a setattrlist call into the + * appropriate in-memory and on-disk structures. Right now, + * we only support the volume name. + */ +static int +ufs_unpackattr( + struct vnode *vp, + struct ucred *cred, + struct attrlist *alist, + void *attrbufptr) +{ + int error; + + error = 0; + + if (alist->volattr != 0) { + error = ufs_unpackvolattr(vp, cred, alist->volattr, + attrbufptr); + } + + return error; +} + + + +/* +# +#% setattrlist vp L L L +# +vop_setattrlist { + IN struct vnode *vp; + IN struct attrlist *alist; + INOUT struct uio *uio; + IN struct ucred *cred; + IN struct proc *p; +}; +*/ +__private_extern__ int +ufs_setattrlist(struct vop_setattrlist_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct attrlist *alist = ap->a_alist; + size_t attrblocksize; + void *attrbufptr; + int error; + + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + + /* + * Check the attrlist for valid inputs (i.e. be sure we understand + * what caller is asking). + */ + if ((alist->bitmapcount != ATTR_BIT_MAP_COUNT) || + ((alist->commonattr & ~ATTR_CMN_SETMASK) != 0) || + ((alist->volattr & ~ATTR_VOL_SETMASK) != 0) || + ((alist->dirattr & ~ATTR_DIR_SETMASK) != 0) || + ((alist->fileattr & ~ATTR_FILE_SETMASK) != 0) || + ((alist->forkattr & ~ATTR_FORK_SETMASK) != 0)) + return EINVAL; + + /* + * Setting volume information requires setting the + * ATTR_VOL_INFO bit. Also, volume info requests are + * mutually exclusive with all other info requests. + */ + if ((alist->volattr != 0) && + (((alist->volattr & ATTR_VOL_INFO) == 0) || + (alist->dirattr != 0) || (alist->fileattr != 0) || + alist->forkattr != 0)) + return EINVAL; + + /* + * Make sure caller isn't asking for an attibute we don't support. + * Right now, all we support is setting the volume name. + */ + if ((alist->commonattr & ~UFS_ATTR_CMN_SETTABLE) != 0 || + (alist->volattr & ~(UFS_ATTR_VOL_SETTABLE | ATTR_VOL_INFO)) != 0 || + (alist->dirattr & ~UFS_ATTR_DIR_SETTABLE) != 0 || + (alist->fileattr & ~UFS_ATTR_FILE_SETTABLE) != 0 || + (alist->forkattr & ~UFS_ATTR_FORK_SETTABLE) != 0) + return EOPNOTSUPP; + + /* + * Setting volume information requires a vnode for the volume root. + */ + if (alist->volattr && (vp->v_flag & VROOT) == 0) + return EINVAL; + + attrblocksize = ap->a_uio->uio_resid; + if (attrblocksize < ufs_attrsize(alist)) + return EINVAL; + + MALLOC(attrbufptr, void *, attrblocksize, M_TEMP, M_WAITOK); + + error = uiomove((caddr_t)attrbufptr, attrblocksize, ap->a_uio); + if (error) + goto ErrorExit; + + error = ufs_unpackattr(vp, ap->a_cred, alist, attrbufptr); + +ErrorExit: + FREE(attrbufptr, M_TEMP); + return error; +} diff --git a/bsd/ufs/ufs/ufs_byte_order.c b/bsd/ufs/ufs/ufs_byte_order.c index d39a6e610..ef7eecdcb 100644 --- a/bsd/ufs/ufs/ufs_byte_order.c +++ b/bsd/ufs/ufs/ufs_byte_order.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -79,8 +79,12 @@ byte_swap_sbin(struct fs *sb) byte_swap_ints(((int32_t *)&sb->fs_firstfield), 52); byte_swap_int(sb->fs_cgrotor); byte_swap_int(sb->fs_cpc); - byte_swap_shorts((int16_t *)sb->fs_opostbl, 16 * 8); - byte_swap_ints((int32_t *)sb->fs_sparecon, 50); + byte_swap_shorts((int16_t *)sb->fs_opostbl, + sizeof(sb->fs_opostbl) / sizeof(int16_t)); + byte_swap_int(sb->fs_avgfilesize); + byte_swap_int(sb->fs_avgfpdir); + byte_swap_ints((int32_t *)sb->fs_sparecon, + sizeof(sb->fs_sparecon) / sizeof(int32_t)); byte_swap_ints((int32_t *)&sb->fs_contigsumsize, 3); byte_swap_longlongs((u_int64_t *)&sb->fs_maxfilesize,3); byte_swap_ints((int32_t *)&sb->fs_state, 6); @@ -108,8 +112,12 @@ byte_swap_sbout(struct fs *sb) byte_swap_ints(((int32_t *)&sb->fs_firstfield), 52); byte_swap_int(sb->fs_cgrotor); byte_swap_int(sb->fs_cpc); - byte_swap_shorts((int16_t *)sb->fs_opostbl, 16 * 8); - byte_swap_ints((int32_t *)sb->fs_sparecon, 50); + byte_swap_shorts((int16_t *)sb->fs_opostbl, + sizeof(sb->fs_opostbl) / sizeof(int16_t)); + byte_swap_int(sb->fs_avgfilesize); + byte_swap_int(sb->fs_avgfpdir); + byte_swap_ints((int32_t *)sb->fs_sparecon, + sizeof(sb->fs_sparecon) / sizeof(int32_t)); byte_swap_ints((int32_t *)&sb->fs_contigsumsize, 3); byte_swap_longlongs((u_int64_t *)&sb->fs_maxfilesize,3); byte_swap_ints((int32_t *)&sb->fs_state, 6); @@ -146,7 +154,7 @@ byte_swap_cgin(struct cg *cg, struct fs * fs) byte_swap_int(cg->cg_nextfreeoff); byte_swap_int(cg->cg_clusteroff); byte_swap_int(cg->cg_nclusterblks); - byte_swap_ints(&cg->cg_sparecon, 13); + byte_swap_ints((int *)&cg->cg_sparecon, 13); byte_swap_int(cg->cg_btotoff); ulptr = ((int32_t *)((u_int8_t *)(cg) + (cg)->cg_btotoff)); @@ -192,7 +200,7 @@ byte_swap_cgout(struct cg *cg, struct fs * fs) byte_swap_int(cg->cg_freeoff); byte_swap_int(cg->cg_nextfreeoff); byte_swap_int(cg->cg_nclusterblks); - byte_swap_ints(&cg->cg_sparecon, 13); + byte_swap_ints((int *)&cg->cg_sparecon, 13); byte_swap_int(cg->cg_iusedoff); byte_swap_int(cg->cg_clusteroff); diff --git a/bsd/ufs/ufs/ufs_extern.h b/bsd/ufs/ufs/ufs_extern.h index 33b9c783f..a1a158731 100644 --- a/bsd/ufs/ufs/ufs_extern.h +++ b/bsd/ufs/ufs/ufs_extern.h @@ -89,7 +89,6 @@ char *readdisklabel __P((dev_t, int (*)(), struct disklabel *)); int setdisklabel __P((struct disklabel *, struct disklabel *, u_long)); int writedisklabel __P((dev_t, int (*)(), struct disklabel *)); -int ufs_abortop __P((struct vop_abortop_args *)); int ufs_access __P((struct vop_access_args *)); int ufs_advlock __P((struct vop_advlock_args *)); int ufs_bmap __P((struct vop_bmap_args *)); @@ -106,6 +105,7 @@ int ufs_dirremove __P((struct vnode *, struct componentname*)); int ufs_dirrewrite __P((struct inode *, struct inode *, struct componentname *)); int ufs_getattr __P((struct vop_getattr_args *)); +int ufs_getattrlist __P((struct vop_getattrlist_args *)); int ufs_getlbns __P((struct vnode *, ufs_daddr_t, struct indir *, int *)); struct vnode * ufs_ihashget __P((dev_t, ino_t)); @@ -144,7 +144,9 @@ int ufs_rmdir __P((struct vop_rmdir_args *)); int ufs_root __P((struct mount *, struct vnode **)); int ufs_seek __P((struct vop_seek_args *)); int ufs_select __P((struct vop_select_args *)); +int ufs_kqfilt_add __P((struct vop_kqfilt_add_args *)); int ufs_setattr __P((struct vop_setattr_args *)); +int ufs_setattrlist __P((struct vop_setattrlist_args *)); int ufs_start __P((struct mount *, int, struct proc *)); int ufs_strategy __P((struct vop_strategy_args *)); int ufs_symlink __P((struct vop_symlink_args *)); @@ -160,6 +162,7 @@ int ufsspec_write __P((struct vop_write_args *)); int ufsfifo_read __P((struct vop_read_args *)); int ufsfifo_write __P((struct vop_write_args *)); int ufsfifo_close __P((struct vop_close_args *)); +int ufsfifo_kqfilt_add __P((struct vop_kqfilt_add_args *)); #endif int ufs_blktooff __P((struct vop_blktooff_args *)); int ufs_cmap __P((struct vop_cmap_args *)); diff --git a/bsd/ufs/ufs/ufs_lookup.c b/bsd/ufs/ufs/ufs_lookup.c index 302cb32de..dcd7ee72e 100644 --- a/bsd/ufs/ufs/ufs_lookup.c +++ b/bsd/ufs/ufs/ufs_lookup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -537,7 +537,7 @@ found: *vpp = vdp; return (0); } - if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) + if (error = VFS_VGET(vdp->v_mount, (void *)dp->i_ino, &tdp)) return (error); /* * If directory is "sticky", then user must own @@ -574,7 +574,7 @@ found: */ if (dp->i_number == dp->i_ino) return (EISDIR); - if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) + if (error = VFS_VGET(vdp->v_mount, (void *)dp->i_ino, &tdp)) return (error); *vpp = tdp; cnp->cn_flags |= SAVENAME; @@ -605,7 +605,7 @@ found: pdp = vdp; if (flags & ISDOTDOT) { VOP_UNLOCK(pdp, 0, p); /* race to get the inode */ - if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) { + if (error = VFS_VGET(vdp->v_mount, (void *)dp->i_ino, &tdp)) { vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY, p); return (error); } @@ -619,7 +619,7 @@ found: VREF(vdp); /* we want ourself, ie "." */ *vpp = vdp; } else { - if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) + if (error = VFS_VGET(vdp->v_mount, (void *)dp->i_ino, &tdp)) return (error); if (!lockparent || !(flags & ISLASTCN)) VOP_UNLOCK(pdp, 0, p); @@ -714,7 +714,7 @@ ufs_direnter(ip, dvp, cnp) struct direct newdir; #if DIAGNOSTIC - if ((cnp->cn_flags & SAVENAME) == 0) + if ((cnp->cn_flags & HASBUF) == 0) panic("direnter: missing name"); #endif dp = VTOI(dvp); @@ -862,7 +862,12 @@ ufs_direnter2(dvp, dirp, cr, p) if (rev_endian) byte_swap_dir_block_out(bp); #endif /* REV_ENDIAN_FS */ - error = VOP_BWRITE(bp); + if (mp->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(bp); + } else { + error = VOP_BWRITE(bp); + } dp->i_flag |= IN_CHANGE | IN_UPDATE; if (!error && dp->i_endoff && dp->i_endoff < dp->i_size) error = VOP_TRUNCATE(dvp, (off_t)dp->i_endoff, IO_SYNC, cr, p); @@ -910,7 +915,12 @@ ufs_dirremove(dvp, cnp) if (rev_endian) byte_swap_dir_block_out(bp); #endif /* REV_ENDIAN_FS */ - error = VOP_BWRITE(bp); + if (mp->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(bp); + } else { + error = VOP_BWRITE(bp); + } dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } @@ -927,7 +937,12 @@ ufs_dirremove(dvp, cnp) if (rev_endian) byte_swap_dir_block_out(bp); #endif /* REV_ENDIAN_FS */ - error = VOP_BWRITE(bp); + if (mp->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(bp); + } else { + error = VOP_BWRITE(bp); + } dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } @@ -942,7 +957,12 @@ ufs_dirremove(dvp, cnp) if (rev_endian) byte_swap_dir_block_out(bp); #endif /* REV_ENDIAN_FS */ - error = VOP_BWRITE(bp); + if (mp->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(bp); + } else { + error = VOP_BWRITE(bp); + } dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } @@ -971,7 +991,12 @@ ufs_dirrewrite(dp, ip, cnp) if (vdp->v_mount->mnt_flag & MNT_REVEND) byte_swap_dir_block_out(bp); #endif /* REV_ENDIAN_FS */ - error = VOP_BWRITE(bp); + if (vdp->v_mount->mnt_flag & MNT_ASYNC) { + error = 0; + bdwrite(bp); + } else { + error = VOP_BWRITE(bp); + } dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } @@ -1104,7 +1129,7 @@ ufs_checkpath(source, target, cred) if (dirbuf.dotdot_ino == rootino) break; vput(vp); - if (error = VFS_VGET(vp->v_mount, dirbuf.dotdot_ino, &vp)) { + if (error = VFS_VGET(vp->v_mount, (void *)dirbuf.dotdot_ino, &vp)) { vp = NULL; break; } diff --git a/bsd/ufs/ufs/ufs_readwrite.c b/bsd/ufs/ufs/ufs_readwrite.c index 1fcbcfcd3..42cd13801 100644 --- a/bsd/ufs/ufs/ufs_readwrite.c +++ b/bsd/ufs/ufs/ufs_readwrite.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -219,6 +219,8 @@ ffs_write(ap) int save_error=0, save_size=0; int blkalloc = 0; int error = 0; + int file_extended = 0; + int doingdirectory = 0; #if REV_ENDIAN_FS int rev_endian=0; @@ -247,6 +249,7 @@ ffs_write(ap) case VLNK: break; case VDIR: + doingdirectory = 1; if ((ioflag & IO_SYNC) == 0) panic("ffs_write: nonsync dir write"); break; @@ -277,7 +280,9 @@ ffs_write(ap) resid = uio->uio_resid; osize = ip->i_size; - flags = ioflag & IO_SYNC ? B_SYNC : 0; + flags = 0; + if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC)) + flags = B_SYNC; if (UBCISVALID(vp)) { off_t filesize; @@ -289,7 +294,6 @@ ffs_write(ap) int fboff; int fblk; int loopcount; - int file_extended = 0; endofwrite = uio->uio_offset + uio->uio_resid; @@ -304,7 +308,9 @@ ffs_write(ap) /* Go ahead and allocate the block that are going to be written */ rsd = uio->uio_resid; local_offset = uio->uio_offset; - local_flags = ioflag & IO_SYNC ? B_SYNC : 0; + local_flags = 0; + if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC)) + local_flags = B_SYNC; local_flags |= B_NOBUFF; first_block = 1; @@ -400,7 +406,9 @@ ffs_write(ap) } ip->i_flag |= IN_CHANGE | IN_UPDATE; } else { - flags = ioflag & IO_SYNC ? B_SYNC : 0; + flags = 0; + if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC)) + flags = B_SYNC; for (error = 0; uio->uio_resid > 0;) { lbn = lblkno(fs, uio->uio_offset); @@ -436,11 +444,11 @@ ffs_write(ap) byte_swap_dir_out((char *)bp->b_data + blkoffset, xfersize); } #endif /* REV_ENDIAN_FS */ - if (ioflag & IO_SYNC) + if (doingdirectory == 0 && (ioflag & IO_SYNC)) (void)bwrite(bp); else if (xfersize + blkoffset == fs->fs_bsize) { bp->b_flags |= B_AGE; - bawrite(bp); + bdwrite(bp); } else bdwrite(bp); @@ -456,6 +464,8 @@ ffs_write(ap) */ if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) ip->i_mode &= ~(ISUID | ISGID); + if (resid > uio->uio_resid) + VN_KNOTE(vp, NOTE_WRITE | (file_extended ? NOTE_EXTEND : 0)); if (error) { if (ioflag & IO_UNIT) { (void)VOP_TRUNCATE(vp, osize, @@ -464,46 +474,11 @@ ffs_write(ap) uio->uio_resid = resid; } } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) - error = VOP_UPDATE(vp, &time, &time, 1); + error = VOP_UPDATE(vp, (struct timeval *)&time, + (struct timeval *)&time, 1); return (error); } -/* - * Vnode op for page read. - */ -/* ARGSUSED */ -PGRD(ap) - struct vop_pgrd_args /* { - struct vnode *a_vp; - struct uio *a_uio; - struct ucred *a_cred; - } */ *ap; -{ - -#warning ufs_readwrite PGRD need to implement -return (EOPNOTSUPP); - -} - -/* - * Vnode op for page read. - */ -/* ARGSUSED */ -PGWR(ap) - struct vop_pgwr_args /* { - struct vnode *a_vp; - struct uio *a_uio; - struct ucred *a_cred; - memory_object_t a_pager; - vm_offset_t a_offset; - } */ *ap; -{ - -#warning ufs_readwrite PGWR need to implement -return (EOPNOTSUPP); - -} - /* * Vnode op for pagein. * Similar to ffs_read() @@ -668,11 +643,11 @@ ffs_pageout(ap) } - error = cluster_pageout(vp, pl, pl_offset, f_offset, round_page(xfer_size), ip->i_size, devBlockSize, flags); + error = cluster_pageout(vp, pl, pl_offset, f_offset, round_page_32(xfer_size), ip->i_size, devBlockSize, flags); if(save_error) { lupl_offset = size - save_size; - resid = round_page(save_size); + resid = round_page_32(save_size); if (!nocommit) ubc_upl_abort_range(pl, lupl_offset, resid, UPL_ABORT_FREE_ON_EMPTY); diff --git a/bsd/ufs/ufs/ufs_vfsops.c b/bsd/ufs/ufs/ufs_vfsops.c index b4b60ec2f..8829468c9 100644 --- a/bsd/ufs/ufs/ufs_vfsops.c +++ b/bsd/ufs/ufs/ufs_vfsops.c @@ -105,7 +105,7 @@ ufs_root(mp, vpp) struct vnode *nvp; int error; - if (error = VFS_VGET(mp, (ino_t)ROOTINO, &nvp)) + if (error = VFS_VGET(mp, (void *)ROOTINO, &nvp)) return (error); *vpp = nvp; return (0); @@ -234,10 +234,10 @@ ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp) * Get the export permission structure for this tuple. */ np = vfs_export_lookup(mp, &ump->um_export, nam); - if (np == NULL) + if (nam && (np == NULL)) return (EACCES); - if (error = VFS_VGET(mp, ufhp->ufid_ino, &nvp)) { + if (error = VFS_VGET(mp, (void *)ufhp->ufid_ino, &nvp)) { *vpp = NULLVP; return (error); } @@ -248,7 +248,9 @@ ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp) return (ESTALE); } *vpp = nvp; - *exflagsp = np->netc_exflags; - *credanonp = &np->netc_anon; + if (np) { + *exflagsp = np->netc_exflags; + *credanonp = &np->netc_anon; + } return (0); } diff --git a/bsd/ufs/ufs/ufs_vnops.c b/bsd/ufs/ufs/ufs_vnops.c index 903e31fe5..a12536a7d 100644 --- a/bsd/ufs/ufs/ufs_vnops.c +++ b/bsd/ufs/ufs/ufs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -102,6 +102,11 @@ static int ufs_chmod __P((struct vnode *, int, struct ucred *, struct proc *)); static int ufs_chown __P((struct vnode *, uid_t, gid_t, struct ucred *, struct proc *)); +static int filt_ufsread __P((struct knote *kn, long hint)); +static int filt_ufswrite __P((struct knote *kn, long hint)); +static int filt_ufsvnode __P((struct knote *kn, long hint)); +static void filt_ufsdetach __P((struct knote *kn)); +static int ufs_kqfilter __P((struct vop_kqfilter_args *ap)); union _qcvt { int64_t qcvt; @@ -138,6 +143,7 @@ ufs_create(ap) ufs_makeinode(MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode), ap->a_dvp, ap->a_vpp, ap->a_cnp)) return (error); + VN_KNOTE(ap->a_dvp, NOTE_WRITE); return (0); } @@ -163,6 +169,7 @@ ufs_mknod(ap) ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), ap->a_dvp, vpp, ap->a_cnp)) return (error); + VN_KNOTE(ap->a_dvp, NOTE_WRITE); ip = VTOI(*vpp); ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; if (vap->va_rdev != VNOVAL) { @@ -477,6 +484,7 @@ ufs_setattr(ap) return (EROFS); error = ufs_chmod(vp, (int)vap->va_mode, cred, p); } + VN_KNOTE(vp, NOTE_ATTRIB); return (error); } @@ -754,6 +762,8 @@ ufs_remove(ap) if ((error = ufs_dirremove(dvp, ap->a_cnp)) == 0) { ip->i_nlink--; ip->i_flag |= IN_CHANGE; + VN_KNOTE(vp, NOTE_DELETE); + VN_KNOTE(dvp, NOTE_WRITE); } if (dvp != vp) @@ -828,7 +838,14 @@ ufs_link(ap) ip->i_nlink--; ip->i_flag |= IN_CHANGE; } - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); + } + VN_KNOTE(vp, NOTE_LINK); + VN_KNOTE(tdvp, NOTE_WRITE); out1: if (tdvp != vp) VOP_UNLOCK(vp, 0, p); @@ -863,7 +880,7 @@ ufs_whiteout(ap) case CREATE: /* create a new directory whiteout */ #if DIAGNOSTIC - if ((cnp->cn_flags & SAVENAME) == 0) + if ((cnp->cn_flags & HASBUF) == 0) panic("ufs_whiteout: missing name"); if (dvp->v_mount->mnt_maxsymlinklen <= 0) panic("ufs_whiteout: old format filesystem"); @@ -888,8 +905,10 @@ ufs_whiteout(ap) break; } if (cnp->cn_flags & HASBUF) { - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); } return (error); } @@ -941,7 +960,7 @@ ufs_rename(ap) struct dirtemplate dirbuf; struct timeval tv; int doingdirectory = 0, oldparent = 0, newparent = 0; - int error = 0; + int error = 0, ioflag; u_char namlen; #if DIAGNOSTIC @@ -1023,6 +1042,7 @@ abortit: oldparent = dp->i_number; doingdirectory++; } + VN_KNOTE(fdvp, NOTE_WRITE); /* XXX right place? */ vrele(fdvp); /* @@ -1111,6 +1131,7 @@ abortit: } goto bad; } + VN_KNOTE(tdvp, NOTE_WRITE); vput(tdvp); } else { if (xp->i_dev != dp->i_dev || xp->i_dev != ip->i_dev) @@ -1164,6 +1185,7 @@ abortit: dp->i_nlink--; dp->i_flag |= IN_CHANGE; } + VN_KNOTE(tdvp, NOTE_WRITE); vput(tdvp); /* * Adjust the link count of the target to @@ -1179,10 +1201,13 @@ abortit: if (doingdirectory) { if (--xp->i_nlink != 0) panic("rename: linked directory"); - error = VOP_TRUNCATE(tvp, (off_t)0, IO_SYNC, + ioflag = ((tvp)->v_mount->mnt_flag & MNT_ASYNC) ? + 0 : IO_SYNC; + error = VOP_TRUNCATE(tvp, (off_t)0, ioflag, tcnp->cn_cred, tcnp->cn_proc); } xp->i_flag |= IN_CHANGE; + VN_KNOTE(tvp, NOTE_DELETE); vput(tvp); xp = NULL; } @@ -1268,6 +1293,7 @@ abortit: } xp->i_flag &= ~IN_RENAME; } + VN_KNOTE(fvp, NOTE_RENAME); if (dp) vput(fdvp); if (xp) @@ -1348,7 +1374,10 @@ ufs_mkdir(ap) #if QUOTA if ((error = getinoquota(ip)) || (error = chkiq(ip, 1, cnp->cn_cred, 0))) { - _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); VOP_VFREE(tvp, ip->i_number, dmode); vput(tvp); vput(dvp); @@ -1412,10 +1441,17 @@ bad: ip->i_nlink = 0; ip->i_flag |= IN_CHANGE; vput(tvp); - } else + } else { + VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); *ap->a_vpp = tvp; + }; out: - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); + } vput(dvp); return (error); } @@ -1435,7 +1471,7 @@ ufs_rmdir(ap) struct vnode *dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp; - int error; + int error, ioflag; ip = VTOI(vp); dp = VTOI(dvp); @@ -1471,6 +1507,7 @@ ufs_rmdir(ap) */ if (error = ufs_dirremove(dvp, cnp)) goto out; + VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); dp->i_nlink--; dp->i_flag |= IN_CHANGE; cache_purge(dvp); @@ -1488,12 +1525,14 @@ ufs_rmdir(ap) * worry about them later. */ ip->i_nlink -= 2; - error = VOP_TRUNCATE(vp, (off_t)0, IO_SYNC, cnp->cn_cred, + ioflag = ((vp)->v_mount->mnt_flag & MNT_ASYNC) ? 0 : IO_SYNC; + error = VOP_TRUNCATE(vp, (off_t)0, ioflag, cnp->cn_cred, cnp->cn_proc); cache_purge(ITOV(ip)); out: if (dvp) vput(dvp); + VN_KNOTE(vp, NOTE_DELETE); vput(vp); return (error); } @@ -1518,6 +1557,7 @@ ufs_symlink(ap) if (error = ufs_makeinode(IFLNK | ap->a_vap->va_mode, ap->a_dvp, vpp, ap->a_cnp)) return (error); + VN_KNOTE(ap->a_dvp, NOTE_WRITE); vp = *vpp; len = strlen(ap->a_target); if (len < vp->v_mount->mnt_maxsymlinklen) { @@ -1670,23 +1710,6 @@ ufs_readlink(ap) return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred)); } -/* - * Ufs abort op, called after namei() when a CREATE/DELETE isn't actually - * done. If a buffer has been saved in anticipation of a CREATE, delete it. - */ -/* ARGSUSED */ -int -ufs_abortop(ap) - struct vop_abortop_args /* { - struct vnode *a_dvp; - struct componentname *a_cnp; - } */ *ap; -{ - if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) - FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); - return (0); -} - /* * Lock an inode. If its already locked, set the WANT bit and sleep. */ @@ -1959,8 +1982,165 @@ ufsfifo_close(ap) simple_unlock(&vp->v_interlock); return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap)); } + +/* + * kqfilt_add wrapper for fifos. + * + * Fall through to ufs kqfilt_add routines if needed + */ +int +ufsfifo_kqfilt_add(ap) + struct vop_kqfilt_add_args *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + int error; + + error = VOCALL(fifo_vnodeop_p, VOFFSET(vop_kqfilt_add), ap); + if (error) + error = ufs_kqfilt_add(ap); + return (error); +} + +#if 0 +/* + * kqfilt_remove wrapper for fifos. + * + * Fall through to ufs kqfilt_remove routines if needed + */ +int +ufsfifo_kqfilt_remove(ap) + struct vop_kqfilt_remove_args *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + int error; + + error = VOCALL(fifo_vnodeop_p, VOFFSET(vop_kqfilt_remove), ap); + if (error) + error = ufs_kqfilt_remove(ap); + return (error); +} +#endif + #endif /* FIFO */ + +static struct filterops ufsread_filtops = + { 1, NULL, filt_ufsdetach, filt_ufsread }; +static struct filterops ufswrite_filtops = + { 1, NULL, filt_ufsdetach, filt_ufswrite }; +static struct filterops ufsvnode_filtops = + { 1, NULL, filt_ufsdetach, filt_ufsvnode }; + +/* + # + #% kqfilt_add vp L L L + # + vop_kqfilt_add + IN struct vnode *vp; + IN struct knote *kn; + IN struct proc *p; + */ +int +ufs_kqfilt_add(ap) + struct vop_kqfilt_add_args /* { + struct vnode *a_vp; + struct knote *a_kn; + struct proc *p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct knote *kn = ap->a_kn; + + switch (kn->kn_filter) { + case EVFILT_READ: + kn->kn_fop = &ufsread_filtops; + break; + case EVFILT_WRITE: + kn->kn_fop = &ufswrite_filtops; + break; + case EVFILT_VNODE: + kn->kn_fop = &ufsvnode_filtops; + break; + default: + return (1); + } + + kn->kn_hook = (caddr_t)vp; + + KNOTE_ATTACH(&VTOI(vp)->i_knotes, kn); + + return (0); +} + +static void +filt_ufsdetach(struct knote *kn) +{ + struct vnode *vp; + int result; + struct proc *p = current_proc(); + + vp = (struct vnode *)kn->kn_hook; + if (1) { /* ! KNDETACH_VNLOCKED */ + result = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (result) return; + }; + + result = KNOTE_DETACH(&VTOI(vp)->i_knotes, kn); + + if (1) { /* ! KNDETACH_VNLOCKED */ + VOP_UNLOCK(vp, 0, p); + }; +} + +/*ARGSUSED*/ +static int +filt_ufsread(struct knote *kn, long hint) +{ + struct vnode *vp = (struct vnode *)kn->kn_hook; + struct inode *ip = VTOI(vp); + + /* + * filesystem is gone, so set the EOF flag and schedule + * the knote for deletion. + */ + if (hint == NOTE_REVOKE) { + kn->kn_flags |= (EV_EOF | EV_ONESHOT); + return (1); + } + + kn->kn_data = ip->i_size - kn->kn_fp->f_offset; + return (kn->kn_data != 0); +} + +/*ARGSUSED*/ +static int +filt_ufswrite(struct knote *kn, long hint) +{ + + /* + * filesystem is gone, so set the EOF flag and schedule + * the knote for deletion. + */ + if (hint == NOTE_REVOKE) + kn->kn_flags |= (EV_EOF | EV_ONESHOT); + + kn->kn_data = 0; + return (1); +} + +static int +filt_ufsvnode(struct knote *kn, long hint) +{ + + if (kn->kn_sfflags & hint) + kn->kn_fflags |= hint; + if (hint == NOTE_REVOKE) { + kn->kn_flags |= EV_EOF; + return (1); + } + return (kn->kn_fflags != 0); +} + /* * Return POSIX pathconf information applicable to ufs filesystems. */ @@ -2046,12 +2226,16 @@ ufs_advlock(ap) default: return (EINVAL); } - if (start < 0) - return (EINVAL); if (fl->l_len == 0) end = -1; - else + else if (fl->l_len > 0) end = start + fl->l_len - 1; + else { /* l_len is negative */ + end = start - 1; + start += fl->l_len; + } + if (start < 0) + return (EINVAL); /* * Create the lockf structure */ @@ -2178,7 +2362,10 @@ ufs_makeinode(mode, dvp, vpp, cnp) mode |= IFREG; if (error = VOP_VALLOC(dvp, mode, cnp->cn_cred, &tvp)) { - _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); vput(dvp); return (error); } @@ -2191,7 +2378,10 @@ ufs_makeinode(mode, dvp, vpp, cnp) #if QUOTA if ((error = getinoquota(ip)) || (error = chkiq(ip, 1, cnp->cn_cred, 0))) { - _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); VOP_VFREE(tvp, ip->i_number, mode); vput(tvp); vput(dvp); @@ -2227,8 +2417,12 @@ ufs_makeinode(mode, dvp, vpp, cnp) goto bad; if (error = ufs_direnter(ip, dvp, cnp)) goto bad; - if ((cnp->cn_flags & SAVESTART) == 0) - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + if ((cnp->cn_flags & SAVESTART) == 0) { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); + } vput(dvp); *vpp = tvp; @@ -2239,7 +2433,12 @@ bad: * Write error occurred trying to update the inode * or the directory so must deallocate the inode. */ - _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + { + char *tmp = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); + } vput(dvp); ip->i_nlink = 0; ip->i_flag |= IN_CHANGE; diff --git a/bsd/uxkern/ux_exception.c b/bsd/uxkern/ux_exception.c index 6dcad81dc..941d65c5d 100644 --- a/bsd/uxkern/ux_exception.c +++ b/bsd/uxkern/ux_exception.c @@ -157,15 +157,9 @@ ux_handler(void) void ux_handler_init(void) { - task_t handler_task; - simple_lock_init(&ux_handler_init_lock); ux_exception_port = MACH_PORT_NULL; - if (kernel_task_create(kernel_task, - 0, 0, &handler_task) != MACH_MSG_SUCCESS) { - panic("Failed to created ux handler task\n"); - } - (void) kernel_thread(handler_task, ux_handler); + (void) kernel_thread(kernel_task, ux_handler); simple_lock(&ux_handler_init_lock); if (ux_exception_port == MACH_PORT_NULL) { simple_unlock(&ux_handler_init_lock); diff --git a/bsd/vfs/vfs_bio.c b/bsd/vfs/vfs_bio.c index 4ca0b0e67..b5933b82f 100644 --- a/bsd/vfs/vfs_bio.c +++ b/bsd/vfs/vfs_bio.c @@ -98,8 +98,11 @@ static __inline__ void bufqinc(int q); static __inline__ void bufqdec(int q); +static int do_breadn_for_type(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks, + int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp, int queuetype); static struct buf *getnewbuf(int slpflag, int slptimeo, int *queue); static int bcleanbuf(struct buf *bp); +static int brecover_data(struct buf *bp); extern void vwakeup(); extern int niobuf; /* The number of IO buffer headers for cluster IO */ @@ -523,7 +526,6 @@ meta_bread(vp, blkno, size, cred, bpp) /* * Read-ahead multiple disk blocks. The first is sync, the rest async. - * Trivial modification to the breada algorithm presented in Bach (p.55). */ int breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp) @@ -533,11 +535,38 @@ breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp) int nrablks; struct ucred *cred; struct buf **bpp; +{ + return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ)); +} + +/* + * Read-ahead multiple disk blocks. The first is sync, the rest async. + * [breadn() for meta-data] + */ +int +meta_breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp) + struct vnode *vp; + daddr_t blkno; int size; + daddr_t rablks[]; int rasizes[]; + int nrablks; + struct ucred *cred; + struct buf **bpp; +{ + return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META)); +} + +/* + * Perform the reads for breadn() and meta_breadn(). + * Trivial modification to the breada algorithm presented in Bach (p.55). + */ +static int +do_breadn_for_type(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks, int *rasizes, + int nrablks, struct ucred *cred, struct buf **bpp, int queuetype) { register struct buf *bp; int i; - bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ); + bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype); /* * For each of the read-ahead blocks, start a read, if necessary. @@ -548,7 +577,7 @@ breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp) continue; /* Get a buffer for the read-ahead block */ - (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, BLK_READ); + (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype); } /* Otherwise, we had to start a read for it; wait until it's valid. */ @@ -583,6 +612,10 @@ bwrite(bp) struct proc *p = current_proc(); struct vnode *vp = bp->b_vp; + if (bp->b_data == 0) { + if (brecover_data(bp) == 0) + return (0); + } /* Remember buffer type, to switch on it later. */ sync = !ISSET(bp->b_flags, B_ASYNC); wasdelayed = ISSET(bp->b_flags, B_DELWRI); @@ -865,11 +898,14 @@ brelse(bp) upl = (upl_t) 0; } else { upl = bp->b_pagelist; - kret = ubc_upl_unmap(upl); - if (kret != KERN_SUCCESS) - panic("kernel_upl_unmap failed"); - bp->b_data = 0; + if (bp->b_data) { + kret = ubc_upl_unmap(upl); + + if (kret != KERN_SUCCESS) + panic("kernel_upl_unmap failed"); + bp->b_data = 0; + } } if (upl) { if (bp->b_flags & (B_ERROR | B_INVAL)) { @@ -883,7 +919,7 @@ brelse(bp) upl_flags = UPL_COMMIT_CLEAR_DIRTY ; else if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) upl_flags = UPL_COMMIT_SET_DIRTY ; - else + else upl_flags = UPL_COMMIT_CLEAR_DIRTY ; ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); @@ -1442,14 +1478,23 @@ allocbuf(bp, size) if (bp->b_bufsize <= MAXMETA) { if (bp->b_bufsize < nsize) { /* reallocate to a bigger size */ - desired_size = nsize; zprev = getbufzone(bp->b_bufsize); - z = getbufzone(nsize); - bp->b_data = (caddr_t)zalloc(z); - if(bp->b_data == 0) - panic("allocbuf: zalloc() returned NULL"); - bcopy(elem, bp->b_data, bp->b_bufsize); + if (nsize <= MAXMETA) { + desired_size = nsize; + z = getbufzone(nsize); + bp->b_data = (caddr_t)zalloc(z); + if(bp->b_data == 0) + panic("allocbuf: zalloc() returned NULL"); + } else { + kret = kmem_alloc(kernel_map, &bp->b_data, desired_size); + if (kret != KERN_SUCCESS) + panic("allocbuf: kmem_alloc() 0 returned %d", kret); + if(bp->b_data == 0) + panic("allocbuf: null b_data 0"); + CLR(bp->b_flags, B_ZALLOC); + } + bcopy((const void *)elem, bp->b_data, bp->b_bufsize); zfree(zprev, elem); } else { desired_size = bp->b_bufsize; @@ -1464,7 +1509,7 @@ allocbuf(bp, size) panic("allocbuf: kmem_alloc() returned %d", kret); if(bp->b_data == 0) panic("allocbuf: null b_data"); - bcopy(elem, bp->b_data, bp->b_bufsize); + bcopy((const void *)elem, bp->b_data, bp->b_bufsize); kmem_free(kernel_map, elem, bp->b_bufsize); } else { desired_size = bp->b_bufsize; @@ -1819,6 +1864,8 @@ biodone(bp) { boolean_t funnel_state; struct vnode *vp; + extern struct timeval priority_IO_timestamp_for_root; + extern int hard_throttle_on_root; funnel_state = thread_funnel_set(kernel_flock, TRUE); @@ -1851,7 +1898,8 @@ biodone(bp) code |= DKIO_PAGING; KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, - bp, bp->b_vp, bp->b_resid, bp->b_error, 0); + (unsigned int)bp, (unsigned int)bp->b_vp, + bp->b_resid, bp->b_error, 0); } /* Wakeup the throttled write operations as needed */ @@ -1862,7 +1910,10 @@ biodone(bp) vp->v_flag &= ~VTHROTTLED; wakeup((caddr_t)&vp->v_numoutput); } - + if ((bp->b_flags & B_PGIN) && (vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) { + priority_IO_timestamp_for_root = time; + hard_throttle_on_root = 0; + } if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */ void (*iodone_func)(struct buf *) = bp->b_iodone; @@ -2033,7 +2084,7 @@ free_io_buf(bp) typedef long long blsize_t; -blsize_t MAXNBUF; /* initialize to (mem_size / PAGE_SIZE) */ +blsize_t MAXNBUF; /* initialize to (sane_size / PAGE_SIZE) */ /* Global tunable limits */ blsize_t nbufh; /* number of buffer headers */ blsize_t nbuflow; /* minimum number of buffer headers required */ @@ -2129,11 +2180,11 @@ bufq_balance_thread_init() if (bufqscanwait++ == 0) { /* Initalize globals */ - MAXNBUF = (mem_size / PAGE_SIZE); + MAXNBUF = (sane_size / PAGE_SIZE); nbufh = nbuf; nbuflow = min(nbufh, 100); nbufhigh = min(MAXNBUF, max(nbufh, 2048)); - nbuftarget = (mem_size >> 5) / PAGE_SIZE; + nbuftarget = (sane_size >> 5) / PAGE_SIZE; nbuftarget = max(nbuflow, nbuftarget); nbuftarget = min(nbufhigh, nbuftarget); @@ -2377,6 +2428,7 @@ doit: /* Remove from the queue */ bremfree(bp); blaundrycnt--; + /* do the IO */ error = bawrite_internal(bp, 0); if (error) { @@ -2397,6 +2449,62 @@ doit: } +static int +brecover_data(struct buf *bp) +{ + upl_t upl; + upl_page_info_t *pl; + int upl_offset; + kern_return_t kret; + struct vnode *vp = bp->b_vp; + + if (vp->v_tag == VT_NFS) + /* + * NFS currently deals with this case + * in a slightly different manner... + * continue to let it do so + */ + return(1); + + if (!UBCISVALID(vp) || bp->b_bufsize == 0) + goto dump_buffer; + + kret = ubc_create_upl(vp, + ubc_blktooff(vp, bp->b_lblkno), + bp->b_bufsize, + &upl, + &pl, + UPL_PRECIOUS); + if (kret != KERN_SUCCESS) + panic("Failed to get pagelists"); + + for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) { + + if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) { + ubc_upl_abort(upl, 0); + goto dump_buffer; + } + } + SET(bp->b_flags, B_PAGELIST); + bp->b_pagelist = upl; + + kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_data)); + if (kret != KERN_SUCCESS) + panic("getblk: ubc_upl_map() failed with (%d)", kret); + if (bp->b_data == 0) + panic("ubc_upl_map mapped 0"); + + return (1); + +dump_buffer: + bp->b_bufsize = 0; + SET(bp->b_flags, B_INVAL); + brelse(bp); + + return(0); +} + + static int bp_cmp(void *a, void *b) { diff --git a/bsd/vfs/vfs_cache.c b/bsd/vfs/vfs_cache.c index e4b72f554..cc59be0c6 100644 --- a/bsd/vfs/vfs_cache.c +++ b/bsd/vfs/vfs_cache.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -94,8 +94,8 @@ /* * Structures associated with name cacheing. */ -#define NCHHASH(dvp, cnp) \ - (&nchashtbl[((dvp)->v_id + (cnp)->cn_hash) & nchash]) +#define NCHHASH(dvp, hash_val) \ + (&nchashtbl[((u_long)(dvp) ^ ((dvp)->v_id ^ (hash_val))) & nchash]) LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ u_long nchash; /* size of hash table - 1 */ long numcache; /* number of cache entries allocated */ @@ -107,6 +107,10 @@ int doingcache = 1; /* 1 => enable the cache */ /* * Delete an entry from its hash list and move it to the front * of the LRU list for immediate reuse. + * + * NOTE: THESE MACROS CAN BLOCK (in the call to remove_name()) + * SO BE CAREFUL IF YOU HOLD POINTERS TO nclruhead OR + * nchashtbl. */ #if DIAGNOSTIC #define PURGE(ncp) { \ @@ -118,6 +122,9 @@ int doingcache = 1; /* 1 => enable the cache */ ncp->nc_hash.le_prev = 0; \ TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \ TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru); \ + /* this has to come last because it could block */ \ + remove_name(ncp->nc_name); \ + ncp->nc_name = NULL; \ } #else #define PURGE(ncp) { \ @@ -125,6 +132,9 @@ int doingcache = 1; /* 1 => enable the cache */ ncp->nc_hash.le_prev = 0; \ TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \ TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru); \ + /* this has to come last because it could block */ \ + remove_name(ncp->nc_name); \ + ncp->nc_name = NULL; \ } #endif /* DIAGNOSTIC */ @@ -139,6 +149,32 @@ int doingcache = 1; /* 1 => enable the cache */ } \ } + +// +// Have to take a len argument because we may only need to +// hash part of a componentname. +// +static unsigned int +hash_string(const char *str, int len) +{ + unsigned int i, hashval = 0; + + if (len == 0) { + for(i=1; *str != 0; i++, str++) { + hashval += (unsigned char)*str * i; + } + } else { + for(i=len; i > 0; i--, str++) { + hashval += (unsigned char)*str * (len - i + 1); + } + } + + return hashval; +} + + + + /* * Lookup an entry in the cache * @@ -162,32 +198,30 @@ cache_lookup(dvp, vpp, cnp) { register struct namecache *ncp, *nnp; register struct nchashhead *ncpp; + register long namelen = cnp->cn_namelen; + char *nameptr = cnp->cn_nameptr; if (!doingcache) { cnp->cn_flags &= ~MAKEENTRY; return (0); } - if (cnp->cn_namelen > NCHNAMLEN) { - nchstats.ncs_long++; - cnp->cn_flags &= ~MAKEENTRY; - return (0); - } - ncpp = NCHHASH(dvp, cnp); + ncpp = NCHHASH(dvp, cnp->cn_hash); for (ncp = ncpp->lh_first; ncp != 0; ncp = nnp) { nnp = ncp->nc_hash.le_next; - /* If one of the vp's went stale, don't bother anymore. */ - if ((ncp->nc_dvpid != ncp->nc_dvp->v_id) || - (ncp->nc_vp && ncp->nc_vpid != ncp->nc_vp->v_id)) { - nchstats.ncs_falsehits++; - PURGE(ncp); - continue; - } - /* Now that we know the vp's to be valid, is it ours ? */ + if (ncp->nc_dvp == dvp && - ncp->nc_nlen == cnp->cn_namelen && - !bcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen)) + strncmp(ncp->nc_name, nameptr, namelen) == 0 && + ncp->nc_name[namelen] == 0) { + /* Make sure the vp isn't stale. */ + if ((ncp->nc_dvpid != dvp->v_id) || + (ncp->nc_vp && ncp->nc_vpid != ncp->nc_vp->v_id)) { + nchstats.ncs_falsehits++; + PURGE(ncp); + continue; + } break; + } } /* We failed to find an entry */ @@ -205,6 +239,11 @@ cache_lookup(dvp, vpp, cnp) /* We found a "positive" match, return the vnode */ if (ncp->nc_vp) { + if (ncp->nc_vp->v_flag & (VUINIT|VXLOCK|VTERMINATE|VORECLAIM)) { + PURGE(ncp); + return (0); + } + nchstats.ncs_goodhits++; TOUCH(ncp); *vpp = ncp->nc_vp; @@ -243,15 +282,6 @@ cache_enter(dvp, vp, cnp) if (!doingcache) return; - /* - * If an entry that is too long, is entered, bad things happen. - * cache_lookup acts as the sentinel to make sure longer names - * are not stored. This here will prevent outsiders from doing - * something that is unexpected. - */ - if (cnp->cn_namelen > NCHNAMLEN) - panic("cache_enter: name too long"); - /* * We allocate a new entry if we are less than the maximum * allowed and the one at the front of the LRU list is in use. @@ -273,6 +303,8 @@ cache_enter(dvp, vp, cnp) panic("cache_enter: le_next"); #endif LIST_REMOVE(ncp, nc_hash); + remove_name(ncp->nc_name); + ncp->nc_name = NULL; ncp->nc_hash.le_prev = 0; } } else { @@ -293,10 +325,9 @@ cache_enter(dvp, vp, cnp) ncp->nc_vpid = cnp->cn_flags & ISWHITEOUT; ncp->nc_dvp = dvp; ncp->nc_dvpid = dvp->v_id; - ncp->nc_nlen = cnp->cn_namelen; - bcopy(cnp->cn_nameptr, ncp->nc_name, (unsigned)ncp->nc_nlen); + ncp->nc_name = add_name(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0); TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); - ncpp = NCHHASH(dvp, cnp); + ncpp = NCHHASH(dvp, cnp->cn_hash); #if DIAGNOSTIC { register struct namecache *p; @@ -315,11 +346,66 @@ cache_enter(dvp, vp, cnp) void nchinit() { + static void init_string_table(void); + + TAILQ_INIT(&nclruhead); + nchashtbl = hashinit(MAX(4096, desiredvnodes), M_CACHE, &nchash); - TAILQ_INIT(&nclruhead); - nchashtbl = hashinit(desiredvnodes, M_CACHE, &nchash); + init_string_table(); } + +int +resize_namecache(u_int newsize) +{ + struct nchashhead *new_table; + struct nchashhead *old_table; + struct nchashhead *old_head, *head; + struct namecache *entry, *next; + uint32_t i; + u_long new_mask, old_mask; + + // we don't support shrinking yet + if (newsize < nchash) { + return 0; + } + + new_table = hashinit(newsize, M_CACHE, &new_mask); + if (new_table == NULL) { + return ENOMEM; + } + + // do the switch! + old_table = nchashtbl; + nchashtbl = new_table; + old_mask = nchash; + nchash = new_mask; + + // walk the old table and insert all the entries into + // the new table + // + for(i=0; i <= old_mask; i++) { + old_head = &old_table[i]; + for (entry=old_head->lh_first; entry != NULL; entry=next) { + // + // XXXdbg - Beware: this assumes that hash_string() does + // the same thing as what happens in + // lookup() over in vfs_lookup.c + head = NCHHASH(entry->nc_dvp, hash_string(entry->nc_name, 0)); + + next = entry->nc_hash.le_next; + LIST_INSERT_HEAD(head, entry, nc_hash); + } + } + + FREE(old_table, M_CACHE); + + return 0; +} + + + + /* * Invalidate a all entries to particular vnode. * @@ -370,3 +456,187 @@ cache_purgevfs(mp) } } } + + + +// +// String ref routines +// +static LIST_HEAD(stringhead, string_t) *string_ref_table; +static u_long string_table_mask; +static uint32_t max_chain_len=0; +static struct stringhead *long_chain_head=NULL; +static uint32_t filled_buckets=0; +static uint32_t num_dups=0; +static uint32_t nstrings=0; + +typedef struct string_t { + LIST_ENTRY(string_t) hash_chain; + unsigned char *str; + uint32_t refcount; +} string_t; + + + +static int +resize_string_ref_table() +{ + struct stringhead *new_table; + struct stringhead *old_table; + struct stringhead *old_head, *head; + string_t *entry, *next; + uint32_t i, hashval; + u_long new_mask, old_mask; + + new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask); + if (new_table == NULL) { + return ENOMEM; + } + + // do the switch! + old_table = string_ref_table; + string_ref_table = new_table; + old_mask = string_table_mask; + string_table_mask = new_mask; + + printf("resize: max chain len %d, new table size %d\n", + max_chain_len, new_mask + 1); + max_chain_len = 0; + long_chain_head = NULL; + filled_buckets = 0; + + // walk the old table and insert all the entries into + // the new table + // + for(i=0; i <= old_mask; i++) { + old_head = &old_table[i]; + for (entry=old_head->lh_first; entry != NULL; entry=next) { + hashval = hash_string(entry->str, 0); + head = &string_ref_table[hashval & string_table_mask]; + if (head->lh_first == NULL) { + filled_buckets++; + } + + next = entry->hash_chain.le_next; + LIST_INSERT_HEAD(head, entry, hash_chain); + } + } + + FREE(old_table, M_CACHE); + + return 0; +} + + +static void +init_string_table(void) +{ + string_ref_table = hashinit(4096, M_CACHE, &string_table_mask); +} + + +char * +add_name(const char *name, size_t len, u_int hashval, u_int flags) +{ + struct stringhead *head; + string_t *entry; + int chain_len = 0; + + // + // If the table gets more than 3/4 full, resize it + // + if (4*filled_buckets >= ((string_table_mask + 1) * 3)) { + if (resize_string_ref_table() != 0) { + printf("failed to resize the hash table.\n"); + } + } + + if (hashval == 0) { + hashval = hash_string(name, len); + } + + head = &string_ref_table[hashval & string_table_mask]; + for (entry=head->lh_first; entry != NULL; chain_len++, entry=entry->hash_chain.le_next) { + if (strncmp(entry->str, name, len) == 0 && entry->str[len] == '\0') { + entry->refcount++; + num_dups++; + break; + } + } + + if (entry == NULL) { + // it wasn't already there so add it. + MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK); + + // have to get "head" again because we could have blocked + // in malloc and thus head could have changed. + // + head = &string_ref_table[hashval & string_table_mask]; + if (head->lh_first == NULL) { + filled_buckets++; + } + + LIST_INSERT_HEAD(head, entry, hash_chain); + entry->str = (char *)((char *)entry + sizeof(string_t)); + strncpy(entry->str, name, len); + entry->str[len] = '\0'; + entry->refcount = 1; + + if (chain_len > max_chain_len) { + max_chain_len = chain_len; + long_chain_head = head; + } + + nstrings++; + } + + return entry->str; +} + +int +remove_name(const char *nameref) +{ + struct stringhead *head; + string_t *entry; + uint32_t hashval; + + hashval = hash_string(nameref, 0); + head = &string_ref_table[hashval & string_table_mask]; + for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { + if (entry->str == (unsigned char *)nameref) { + entry->refcount--; + if (entry->refcount == 0) { + LIST_REMOVE(entry, hash_chain); + if (head->lh_first == NULL) { + filled_buckets--; + } + entry->str = NULL; + nstrings--; + + FREE(entry, M_TEMP); + } else { + num_dups--; + } + + return 0; + } + } + + return ENOENT; +} + + +void +dump_string_table(void) +{ + struct stringhead *head; + string_t *entry; + int i; + + for(i=0; i <= string_table_mask; i++) { + head = &string_ref_table[i]; + for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { + printf("%6d - %s\n", entry->refcount, entry->str); + } + } +} diff --git a/bsd/vfs/vfs_cluster.c b/bsd/vfs/vfs_cluster.c index 160acae0a..41de0c840 100644 --- a/bsd/vfs/vfs_cluster.c +++ b/bsd/vfs/vfs_cluster.c @@ -65,14 +65,21 @@ #include #include #include +#include +#include #include #include +#include #include #include +#include +#include + #include + #define CL_READ 0x01 #define CL_ASYNC 0x02 #define CL_COMMIT 0x04 @@ -83,6 +90,7 @@ #define CL_PAGEIN 0x100 #define CL_DEV_MEMORY 0x200 #define CL_PRESERVE 0x400 +#define CL_THROTTLE 0x800 struct clios { @@ -109,9 +117,20 @@ static int cluster_phys_read(struct vnode *vp, struct uio *uio, static int cluster_phys_write(struct vnode *vp, struct uio *uio, off_t newEOF, int devblocksize, int flags); static int cluster_align_phys_io(struct vnode *vp, struct uio *uio, - vm_offset_t usr_paddr, int xsize, int devblocksize, int flags); + addr64_t usr_paddr, int xsize, int devblocksize, int flags); static int cluster_push_x(struct vnode *vp, off_t EOF, daddr_t first, daddr_t last, int can_delay); -static int cluster_try_push(struct vnode *vp, off_t newEOF, int can_delay, int push_all); +static int cluster_try_push(struct vnode *vp, off_t EOF, int can_delay, int push_all); + +static int sparse_cluster_switch(struct vnode *vp, off_t EOF); +static int sparse_cluster_push(struct vnode *vp, off_t EOF, int push_all); +static int sparse_cluster_add(struct vnode *vp, off_t EOF, daddr_t first, daddr_t last); + +static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, int *setcountp); +static kern_return_t vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length); +static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp); +static kern_return_t vfs_drt_control(void **cmapp, int op_type); + +int ubc_page_op_with_control __P((memory_object_control_t, off_t, int, ppnum_t *, int *)); /* @@ -119,7 +138,35 @@ static int cluster_try_push(struct vnode *vp, off_t newEOF, int can_delay, int p * can be outstanding on a single vnode * before we issue a synchronous write */ -#define ASYNC_THROTTLE 9 +#define ASYNC_THROTTLE 18 +#define HARD_THROTTLE_MAXCNT 1 +#define HARD_THROTTLE_MAXSIZE (64 * 1024) + +int hard_throttle_on_root = 0; +struct timeval priority_IO_timestamp_for_root; + + +static int +cluster_hard_throttle_on(vp) + struct vnode *vp; +{ + static struct timeval hard_throttle_maxelapsed = { 0, 300000 }; + + if (vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV) { + struct timeval elapsed; + + if (hard_throttle_on_root) + return(1); + + elapsed = time; + timevalsub(&elapsed, &priority_IO_timestamp_for_root); + + if (timevalcmp(&elapsed, &hard_throttle_maxelapsed, <)) + return(1); + } + return(0); +} + static int cluster_iodone(bp) @@ -174,9 +221,6 @@ cluster_iodone(bp) iostate = (struct clios *)cbp->b_iostate; while (cbp) { - if (cbp->b_vectorcount > 1) - _FREE(cbp->b_vectorlist, M_SEGMENT); - if ((cbp->b_flags & B_ERROR) && error == 0) error = cbp->b_error; @@ -229,14 +273,12 @@ cluster_iodone(bp) if (b_flags & B_COMMIT_UPL) { pg_offset = upl_offset & PAGE_MASK; - commit_size = (((pg_offset + total_size) + (PAGE_SIZE - 1)) / PAGE_SIZE) * PAGE_SIZE; + commit_size = (pg_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; - if (error || (b_flags & B_NOCACHE) || ((b_flags & B_PHYS) && !(b_flags & B_READ))) { + if (error || (b_flags & B_NOCACHE)) { int upl_abort_code; - if (b_flags & B_PHYS) - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; - else if ((b_flags & B_PAGEOUT) && (error != ENXIO)) /* transient error */ + if ((b_flags & B_PAGEOUT) && (error != ENXIO)) /* transient error */ upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; else if (b_flags & B_PGIN) upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR; @@ -253,10 +295,12 @@ cluster_iodone(bp) } else { int upl_commit_flags = UPL_COMMIT_FREE_ON_EMPTY; - if (b_flags & B_PHYS) - upl_commit_flags |= UPL_COMMIT_SET_DIRTY; - else if ( !(b_flags & B_PAGEOUT)) + if (b_flags & B_PHYS) { + if (b_flags & B_READ) + upl_commit_flags |= UPL_COMMIT_SET_DIRTY; + } else if ( !(b_flags & B_PAGEOUT)) upl_commit_flags |= UPL_COMMIT_CLEAR_DIRTY; + if (b_flags & B_AGE) upl_commit_flags |= UPL_COMMIT_INACTIVATE; @@ -282,32 +326,37 @@ cluster_zero(upl, upl_offset, size, bp) int size; struct buf *bp; { - vm_offset_t io_addr = 0; - int must_unmap = 0; - kern_return_t kret; + upl_page_info_t *pl; - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_NONE, + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START, upl_offset, size, (int)bp, 0, 0); if (bp == NULL || bp->b_data == NULL) { - kret = ubc_upl_map(upl, &io_addr); - - if (kret != KERN_SUCCESS) - panic("cluster_zero: ubc_upl_map() failed with (%d)", kret); - if (io_addr == 0) - panic("cluster_zero: ubc_upl_map() mapped 0"); - must_unmap = 1; + pl = ubc_upl_pageinfo(upl); + + while (size) { + int page_offset; + int page_index; + addr64_t zero_addr; + int zero_cnt; + + page_index = upl_offset / PAGE_SIZE; + page_offset = upl_offset & PAGE_MASK; + + zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << 12) + page_offset; + zero_cnt = min(PAGE_SIZE - page_offset, size); + + bzero_phys(zero_addr, zero_cnt); + + size -= zero_cnt; + upl_offset += zero_cnt; + } } else - io_addr = (vm_offset_t)bp->b_data; - bzero((caddr_t)(io_addr + upl_offset), size); - - if (must_unmap) { - kret = ubc_upl_unmap(upl); + bzero((caddr_t)((vm_offset_t)bp->b_data + upl_offset), size); - if (kret != KERN_SUCCESS) - panic("cluster_zero: kernel_upl_unmap failed"); - } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END, + upl_offset, size, 0, 0, 0); } static int @@ -323,7 +372,6 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, struct clios *iostate; { struct buf *cbp; - struct iovec *iovp; u_int size; u_int io_size; int io_flags; @@ -331,7 +379,6 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, int retval = 0; struct buf *cbp_head = 0; struct buf *cbp_tail = 0; - upl_page_info_t *pl; int buf_count = 0; int pg_count; int pg_offset; @@ -339,7 +386,16 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, u_int max_vectors; int priv; int zero_offset = 0; - u_int first_lblkno; + int async_throttle; + + if (devblocksize) + size = (non_rounded_size + (devblocksize - 1)) & ~(devblocksize - 1); + else + size = non_rounded_size; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, + (int)f_offset, size, upl_offset, flags, 0); + if (flags & CL_READ) { io_flags = (B_VECTORLIST | B_READ); @@ -350,8 +406,20 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, vfs_io_attributes(vp, B_WRITE, &max_iosize, &max_vectors); } - pl = ubc_upl_pageinfo(upl); - + /* + * make sure the maximum iosize are at least the size of a page + * and that they are multiples of the page size + */ + max_iosize &= ~PAGE_MASK; + + if (flags & CL_THROTTLE) { + if ( !(flags & CL_PAGEOUT) && cluster_hard_throttle_on(vp)) { + if (max_iosize > HARD_THROTTLE_MAXSIZE) + max_iosize = HARD_THROTTLE_MAXSIZE; + async_throttle = HARD_THROTTLE_MAXCNT; + } else + async_throttle = ASYNC_THROTTLE; + } if (flags & CL_AGE) io_flags |= B_AGE; if (flags & CL_DUMP) @@ -365,15 +433,6 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, if (flags & CL_PRESERVE) io_flags |= B_PHYS; - if (devblocksize) - size = (non_rounded_size + (devblocksize - 1)) & ~(devblocksize - 1); - else - size = non_rounded_size; - - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, - (int)f_offset, size, upl_offset, flags, 0); - if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) { /* * then we are going to end up @@ -387,7 +446,6 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, while (size) { int vsize; int i; - int pl_index; int pg_resid; int num_contig; daddr_t lblkno; @@ -418,27 +476,24 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, be mapped in a "hole" and require allocation before the I/O: */ - ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE_64, UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); if (ubc_pushdirty_range(vp, f_offset, PAGE_SIZE_64) == 0) { error = EINVAL; break; }; - upl_offset += PAGE_SIZE_64; f_offset += PAGE_SIZE_64; - size -= PAGE_SIZE_64; + upl_offset += PAGE_SIZE; + size -= PAGE_SIZE; continue; } lblkno = (daddr_t)(f_offset / PAGE_SIZE_64); /* * we have now figured out how much I/O we can do - this is in 'io_size' - * pl_index represents the first page in the 'upl' that the I/O will occur for * pg_offset is the starting point in the first page for the I/O * pg_count is the number of full and partial pages that 'io_size' encompasses */ - pl_index = upl_offset / PAGE_SIZE; pg_offset = upl_offset & PAGE_MASK; - pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE; if (flags & CL_DEV_MEMORY) { /* @@ -452,7 +507,9 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, * treat physical requests as one 'giant' page */ pg_count = 1; - } + } else + pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE; + if ((flags & CL_READ) && (long)blkno == -1) { int bytes_to_zero; @@ -530,88 +587,37 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, real_bp->b_blkno = blkno; } - if (pg_count > 1) { - if (pg_count > max_vectors) { - io_size -= (pg_count - max_vectors) * PAGE_SIZE; + if (pg_count > max_vectors) { + io_size -= (pg_count - max_vectors) * PAGE_SIZE; - if (io_size < 0) { - io_size = PAGE_SIZE - pg_offset; - pg_count = 1; - } else - pg_count = max_vectors; - } - /* - * we need to allocate space for the vector list - */ - if (pg_count > 1) { - iovp = (struct iovec *)_MALLOC(sizeof(struct iovec) * pg_count, - M_SEGMENT, M_NOWAIT); - - if (iovp == (struct iovec *) 0) { - /* - * if the allocation fails, then throttle down to a single page - */ - io_size = PAGE_SIZE - pg_offset; - pg_count = 1; - } - } + if (io_size < 0) { + io_size = PAGE_SIZE - pg_offset; + pg_count = 1; + } else + pg_count = max_vectors; } - /* Throttle the speculative IO */ - if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT)) + if ( !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)) + /* + * if we're not targeting a virtual device i.e. a disk image + * it's safe to dip into the reserve pool since real devices + * can complete this I/O request without requiring additional + * bufs from the alloc_io_buf pool + */ + priv = 1; + else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT)) + /* + * Throttle the speculative IO + */ priv = 0; else priv = 1; cbp = alloc_io_buf(vp, priv); - if (pg_count == 1) - /* - * we use the io vector that's reserved in the buffer header - * this insures we can always issue an I/O even in a low memory - * condition that prevents the _MALLOC from succeeding... this - * is necessary to prevent deadlocks with the pager - */ - iovp = (struct iovec *)(&cbp->b_vects[0]); - - cbp->b_vectorlist = (void *)iovp; - cbp->b_vectorcount = pg_count; - - if (flags & CL_DEV_MEMORY) { - - iovp->iov_len = io_size; - iovp->iov_base = (caddr_t)upl_phys_page(pl, 0); - if (iovp->iov_base == (caddr_t) 0) { - free_io_buf(cbp); - error = EINVAL; - } else - iovp->iov_base += upl_offset; - } else { - - for (i = 0, vsize = io_size; i < pg_count; i++, iovp++) { - int psize; - - psize = PAGE_SIZE - pg_offset; - - if (psize > vsize) - psize = vsize; - - iovp->iov_len = psize; - iovp->iov_base = (caddr_t)upl_phys_page(pl, pl_index + i); - - if (iovp->iov_base == (caddr_t) 0) { - if (pg_count > 1) - _FREE(cbp->b_vectorlist, M_SEGMENT); - free_io_buf(cbp); - - error = EINVAL; - break; - } - iovp->iov_base += pg_offset; - pg_offset = 0; - - if (flags & CL_PAGEOUT) { + if (flags & CL_PAGEOUT) { + for (i = 0; i < pg_count; i++) { int s; struct buf *bp; @@ -627,12 +633,7 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, } splx(s); } - vsize -= psize; - } } - if (error) - break; - if (flags & CL_ASYNC) { cbp->b_flags |= (B_CALL | B_ASYNC); cbp->b_iodone = (void *)cluster_iodone; @@ -703,6 +704,12 @@ start_io: } else cbp_head->b_validend = 0; + if (flags & CL_THROTTLE) { + while (vp->v_numoutput >= async_throttle) { + vp->v_flag |= VTHROTTLED; + tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_io", 0); + } + } for (cbp = cbp_head; cbp;) { struct buf * cbp_next; @@ -740,8 +747,6 @@ start_io: for (cbp = cbp_head; cbp;) { struct buf * cbp_next; - if (cbp->b_vectorcount > 1) - _FREE(cbp->b_vectorlist, M_SEGMENT); upl_offset -= cbp->b_bcount; size += cbp->b_bcount; io_size += cbp->b_bcount; @@ -770,23 +775,25 @@ start_io: } } pg_offset = upl_offset & PAGE_MASK; - abort_size = ((size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE) * PAGE_SIZE; + abort_size = (size + pg_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK; if (flags & CL_COMMIT) { int upl_abort_code; - if (flags & CL_PRESERVE) - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; - else if ((flags & CL_PAGEOUT) && (error != ENXIO)) /* transient error */ - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; - else if (flags & CL_PAGEIN) - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR; - else - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES; + if (flags & CL_PRESERVE) { + ubc_upl_commit_range(upl, upl_offset - pg_offset, abort_size, + UPL_COMMIT_FREE_ON_EMPTY); + } else { + if ((flags & CL_PAGEOUT) && (error != ENXIO)) /* transient error */ + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; + else if (flags & CL_PAGEIN) + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR; + else + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES; - ubc_upl_abort_range(upl, upl_offset - pg_offset, abort_size, + ubc_upl_abort_range(upl, upl_offset - pg_offset, abort_size, upl_abort_code); - + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE, (int)upl, upl_offset - pg_offset, abort_size, error, 0); } @@ -814,8 +821,7 @@ cluster_rd_prefetch(vp, f_offset, size, filesize, devblocksize) off_t filesize; int devblocksize; { - int pages_to_fetch; - int skipped_pages; + int pages_in_prefetch; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START, (int)f_offset, size, (int)filesize, 0, 0); @@ -826,28 +832,20 @@ cluster_rd_prefetch(vp, f_offset, size, filesize, devblocksize) return(0); } if (size > (MAX_UPL_TRANSFER * PAGE_SIZE)) - size = MAX_UPL_TRANSFER * PAGE_SIZE; + size = (MAX_UPL_TRANSFER * PAGE_SIZE); else - size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); + size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK; if ((off_t)size > (filesize - f_offset)) size = filesize - f_offset; - - pages_to_fetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; + pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; - for (skipped_pages = 0; skipped_pages < pages_to_fetch; skipped_pages++) { - if (ubc_page_op(vp, f_offset, 0, 0, 0) != KERN_SUCCESS) - break; - f_offset += PAGE_SIZE; - size -= PAGE_SIZE; - } - if (skipped_pages < pages_to_fetch) - advisory_read(vp, filesize, f_offset, size, devblocksize); + advisory_read(vp, filesize, f_offset, size, devblocksize); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END, - (int)f_offset + (pages_to_fetch * PAGE_SIZE), skipped_pages, 0, 1, 0); + (int)f_offset + size, pages_in_prefetch, 0, 1, 0); - return (pages_to_fetch); + return (pages_in_prefetch); } @@ -863,7 +861,6 @@ cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize) daddr_t r_lblkno; off_t f_offset; int size_of_prefetch; - int max_pages; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START, b_lblkno, e_lblkno, vp->v_lastr, 0, 0); @@ -873,7 +870,6 @@ cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize) vp->v_ralen, vp->v_maxra, vp->v_lastr, 0, 0); return; } - if (vp->v_lastr == -1 || (b_lblkno != vp->v_lastr && b_lblkno != (vp->v_lastr + 1) && (b_lblkno != (vp->v_maxra + 1) || vp->v_ralen == 0))) { vp->v_ralen = 0; @@ -884,15 +880,8 @@ cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize) return; } - max_pages = MAX_UPL_TRANSFER; - - vp->v_ralen = vp->v_ralen ? min(max_pages, vp->v_ralen << 1) : 1; - - if (((e_lblkno + 1) - b_lblkno) > vp->v_ralen) - vp->v_ralen = min(max_pages, (e_lblkno + 1) - b_lblkno); - if (e_lblkno < vp->v_maxra) { - if ((vp->v_maxra - e_lblkno) > max(max_pages / 16, 4)) { + if ((vp->v_maxra - e_lblkno) > (MAX_UPL_TRANSFER / 4)) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, vp->v_ralen, vp->v_maxra, vp->v_lastr, 2, 0); @@ -902,14 +891,28 @@ cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize) r_lblkno = max(e_lblkno, vp->v_maxra) + 1; f_offset = (off_t)r_lblkno * PAGE_SIZE_64; + size_of_prefetch = 0; + + ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch); + + if (size_of_prefetch) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, + vp->v_ralen, vp->v_maxra, vp->v_lastr, 3, 0); + return; + } if (f_offset < filesize) { - size_of_prefetch = cluster_rd_prefetch(vp, f_offset, vp->v_ralen * PAGE_SIZE, filesize, devblocksize); + vp->v_ralen = vp->v_ralen ? min(MAX_UPL_TRANSFER, vp->v_ralen << 1) : 1; + + if (((e_lblkno + 1) - b_lblkno) > vp->v_ralen) + vp->v_ralen = min(MAX_UPL_TRANSFER, (e_lblkno + 1) - b_lblkno); + + size_of_prefetch = cluster_rd_prefetch(vp, f_offset, vp->v_ralen * PAGE_SIZE, filesize, devblocksize); if (size_of_prefetch) vp->v_maxra = (r_lblkno + size_of_prefetch) - 1; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, - vp->v_ralen, vp->v_maxra, vp->v_lastr, 3, 0); + vp->v_ralen, vp->v_maxra, vp->v_lastr, 4, 0); } int @@ -924,9 +927,22 @@ cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, fla int flags; { int io_size; - int pg_size; + int rounded_size; off_t max_size; - int local_flags = CL_PAGEOUT; + int local_flags; + + if (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) + /* + * if we know we're issuing this I/O to a virtual device (i.e. disk image) + * then we don't want to enforce this throttle... if we do, we can + * potentially deadlock since we're stalling the pageout thread at a time + * when the disk image might need additional memory (which won't be available + * if the pageout thread can't run)... instead we'll just depend on the throttle + * that the pageout thread now has in place to deal with external files + */ + local_flags = CL_PAGEOUT; + else + local_flags = CL_PAGEOUT | CL_THROTTLE; if ((flags & UPL_IOSYNC) == 0) local_flags |= CL_ASYNC; @@ -969,17 +985,14 @@ cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, fla else io_size = max_size; - pg_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; + rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; - if (size > pg_size) { + if (size > rounded_size) { if (local_flags & CL_COMMIT) - ubc_upl_abort_range(upl, upl_offset + pg_size, size - pg_size, + ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size, UPL_ABORT_FREE_ON_EMPTY); } - while (vp->v_numoutput >= ASYNC_THROTTLE) { - vp->v_flag |= VTHROTTLED; - tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_pageout", 0); - } + vp->v_flag |= VHASBEENPAGED; return (cluster_io(vp, upl, upl_offset, f_offset, io_size, devblocksize, local_flags, (struct buf *)0, (struct clios *)0)); @@ -1037,7 +1050,7 @@ cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flag if (size > rounded_size && (local_flags & CL_COMMIT)) ubc_upl_abort_range(upl, upl_offset + rounded_size, - size - (upl_offset + rounded_size), UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); retval = cluster_io(vp, upl, upl_offset, f_offset, io_size, devblocksize, local_flags | CL_READ | CL_PAGEIN, (struct buf *)0, (struct clios *)0); @@ -1100,43 +1113,62 @@ cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) int clip_size; off_t max_io_size; struct iovec *iov; - vm_offset_t upl_offset; int upl_size; - int pages_in_pl; - upl_page_info_t *pl; int upl_flags; upl_t upl; int retval = 0; + + if (vp->v_flag & VHASBEENPAGED) + { + /* + * this vnode had pages cleaned to it by + * the pager which indicates that either + * it's not very 'hot', or the system is + * being overwhelmed by a lot of dirty + * data being delayed in the VM cache... + * in either event, we'll push our remaining + * delayed data at this point... this will + * be more efficient than paging out 1 page at + * a time, and will also act as a throttle + * by delaying this client from writing any + * more data until all his delayed data has + * at least been queued to the uderlying driver. + */ + cluster_push(vp); + + vp->v_flag &= ~VHASBEENPAGED; + } if ( (!(vp->v_flag & VNOCACHE_DATA)) || (!uio) || (uio->uio_segflg != UIO_USERSPACE)) { - retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); - return(retval); + /* + * go do a write through the cache if one of the following is true.... + * NOCACHE is not true + * there is no uio structure or it doesn't target USERSPACE + */ + return (cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)); } while (uio->uio_resid && uio->uio_offset < newEOF && retval == 0) { - /* we know we have a resid, so this is safe */ + /* + * we know we have a resid, so this is safe + * skip over any emtpy vectors + */ iov = uio->uio_iov; + while (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; iov = uio->uio_iov; } - - /* - * We check every vector target and if it is physically - * contiguous space, we skip the sanity checks. - */ - - upl_offset = (vm_offset_t)iov->iov_base & ~PAGE_MASK; - upl_size = (upl_offset + PAGE_SIZE +(PAGE_SIZE -1)) & ~PAGE_MASK; - pages_in_pl = 0; + upl_size = PAGE_SIZE; upl_flags = UPL_QUERY_OBJECT_TYPE; + if ((vm_map_get_upl(current_map(), (vm_offset_t)iov->iov_base & ~PAGE_MASK, - &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0)) != KERN_SUCCESS) + &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) { /* * the user app must have passed in an invalid address @@ -1144,6 +1176,10 @@ cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) return (EFAULT); } + /* + * We check every vector target but if it is physically + * contiguous space, we skip the sanity checks. + */ if (upl_flags & UPL_PHYS_CONTIG) { if (flags & IO_HEADZEROFILL) @@ -1158,51 +1194,61 @@ cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) if (uio->uio_resid == 0 && (flags & IO_TAILZEROFILL)) { - retval = cluster_write_x(vp, (struct uio *)0, 0, tailOff, uio->uio_offset, 0, devblocksize, IO_HEADZEROFILL); - return(retval); + return (cluster_write_x(vp, (struct uio *)0, 0, tailOff, uio->uio_offset, 0, devblocksize, IO_HEADZEROFILL)); } } - else if ((uio->uio_resid < 4 * PAGE_SIZE) || (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL))) + else if ((uio->uio_resid < PAGE_SIZE) || (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL))) { /* - * We set a threshhold of 4 pages to decide if the nocopy - * write loop is worth the trouble... - * we also come here if we're trying to zero the head and/or tail - * of a partially written page, and the user source is not a physically contiguous region + * we're here because we're don't have a physically contiguous target buffer + * go do a write through the cache if one of the following is true.... + * the total xfer size is less than a page... + * we're being asked to ZEROFILL either the head or the tail of the I/O... */ - retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); - return(retval); + return (cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)); } - else if (uio->uio_offset & PAGE_MASK_64) + else if (((int)uio->uio_offset & PAGE_MASK) || ((int)iov->iov_base & PAGE_MASK)) { - /* Bring the file offset write up to a pagesize boundary */ - clip_size = (PAGE_SIZE - (uio->uio_offset & PAGE_MASK_64)); - if (uio->uio_resid < clip_size) - clip_size = uio->uio_resid; - /* - * Fake the resid going into the cluster_write_x call - * and restore it on the way out. - */ - prev_resid = uio->uio_resid; - uio->uio_resid = clip_size; - retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); - uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); - } - else if ((int)iov->iov_base & PAGE_MASK_64) - { - clip_size = iov->iov_len; - prev_resid = uio->uio_resid; - uio->uio_resid = clip_size; - retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); - uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + if (((int)uio->uio_offset & PAGE_MASK) == ((int)iov->iov_base & PAGE_MASK)) + { + /* + * Bring the file offset write up to a pagesize boundary + * this will also bring the base address to a page boundary + * since they both are currently on the same offset within a page + * note: if we get here, uio->uio_resid is greater than PAGE_SIZE + * so the computed clip_size must always be less than the current uio_resid + */ + clip_size = (PAGE_SIZE - (uio->uio_offset & PAGE_MASK_64)); + + /* + * Fake the resid going into the cluster_write_x call + * and restore it on the way out. + */ + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + else + { + /* + * can't get both the file offset and the buffer offset aligned to a page boundary + * so fire an I/O through the cache for this entire vector + */ + clip_size = iov->iov_len; + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } } else { /* * If we come in here, we know the offset into - * the file is on a pagesize boundary + * the file is on a pagesize boundary and the + * target buffer address is also on a page boundary */ - max_io_size = newEOF - uio->uio_offset; clip_size = uio->uio_resid; if (iov->iov_len < clip_size) @@ -1259,7 +1305,6 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags) kern_return_t kret; struct iovec *iov; int i; - int first = 1; int force_data_sync; int error = 0; struct clios iostate; @@ -1289,12 +1334,7 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags) if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE)) io_size = MAX_UPL_TRANSFER * PAGE_SIZE; - if (first) { - if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE) / 4) - io_size = (MAX_UPL_TRANSFER * PAGE_SIZE) / 8; - first = 0; - } - upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64; + upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK; upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START, @@ -1304,7 +1344,7 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags) pages_in_pl = 0; upl_size = upl_needed_size; upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL; + UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; kret = vm_map_get_upl(current_map(), (vm_offset_t)iov->iov_base & ~PAGE_MASK, @@ -1318,7 +1358,6 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags) if (kret != KERN_SUCCESS) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, 0, 0, 0, kret, 0); - /* * cluster_nocopy_write: failed to get pagelist * @@ -1348,7 +1387,6 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags) if (force_data_sync >= 3) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, i, pages_in_pl, upl_size, kret, 0); - /* * for some reason, we couldn't acquire a hold on all * the pages needed in the user's address space @@ -1372,7 +1410,6 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags) if (io_size == 0) { ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, UPL_ABORT_FREE_ON_EMPTY); - /* * we may have already spun some portion of this request * off as async requests... we need to wait for the I/O @@ -1383,23 +1420,11 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags) /* * Now look for pages already in the cache * and throw them away. + * uio->uio_offset is page aligned within the file + * io_size is a multiple of PAGE_SIZE */ + ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL); - upl_f_offset = uio->uio_offset; /* this is page aligned in the file */ - max_io_size = io_size; - - while (max_io_size) { - /* - * Flag UPL_POP_DUMP says if the page is found - * in the page cache it must be thrown away. - */ - ubc_page_op(vp, - upl_f_offset, - UPL_POP_SET | UPL_POP_BUSY | UPL_POP_DUMP, - 0, 0); - max_io_size -= PAGE_SIZE_64; - upl_f_offset += PAGE_SIZE_64; - } /* * we want push out these writes asynchronously so that we can overlap * the preparation of the next I/O @@ -1423,7 +1448,7 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags) goto wait_for_writes; } - io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT; + io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START, (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0); @@ -1469,7 +1494,7 @@ cluster_phys_write(vp, uio, newEOF, devblocksize, flags) int flags; { upl_page_info_t *pl; - vm_offset_t src_paddr; + addr64_t src_paddr; upl_t upl; vm_offset_t upl_offset; int tail_size; @@ -1491,13 +1516,13 @@ cluster_phys_write(vp, uio, newEOF, devblocksize, flags) iov = uio->uio_iov; io_size = iov->iov_len; - upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64; + upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK; upl_needed_size = upl_offset + io_size; pages_in_pl = 0; upl_size = upl_needed_size; upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL; + UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; kret = vm_map_get_upl(current_map(), (vm_offset_t)iov->iov_base & ~PAGE_MASK, @@ -1520,7 +1545,7 @@ cluster_phys_write(vp, uio, newEOF, devblocksize, flags) } pl = ubc_upl_pageinfo(upl); - src_paddr = (vm_offset_t)upl_phys_page(pl, 0) + ((vm_offset_t)iov->iov_base & PAGE_MASK); + src_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + ((addr64_t)((u_int)iov->iov_base & PAGE_MASK)); while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) { int head_size; @@ -1596,7 +1621,6 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) int xfer_resid; int io_size; int io_flags; - vm_offset_t io_address; int io_offset; int bytes_to_zero; int bytes_to_move; @@ -1610,6 +1634,8 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) off_t zero_off1; daddr_t start_blkno; daddr_t last_blkno; + int intersection; + if (uio) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START, @@ -1651,12 +1677,11 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) zero_cnt1 = tailOff - zero_off1; } } - if (zero_cnt == 0 && uio == (struct uio *) 0) - { + if (zero_cnt == 0 && uio == (struct uio *) 0) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, 0, 0, 0); return (0); - } + } while ((total_size = (uio_resid + zero_cnt + zero_cnt1)) && retval == 0) { /* @@ -1678,6 +1703,45 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) if (total_size > (MAX_UPL_TRANSFER * PAGE_SIZE)) total_size = MAX_UPL_TRANSFER * PAGE_SIZE; + start_blkno = (daddr_t)(upl_f_offset / PAGE_SIZE_64); + + if (uio && !(vp->v_flag & VNOCACHE_DATA) && + (flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0) { + /* + * assumption... total_size <= uio_resid + * because IO_HEADZEROFILL and IO_TAILZEROFILL not set + */ + if ((start_offset + total_size) > (MAX_UPL_TRANSFER * PAGE_SIZE)) + total_size -= start_offset; + xfer_resid = total_size; + + retval = cluster_copy_ubc_data(vp, uio, &xfer_resid, 1); + + if (retval) + break; + + uio_resid -= (total_size - xfer_resid); + total_size = xfer_resid; + start_offset = (int)(uio->uio_offset & PAGE_MASK_64); + upl_f_offset = uio->uio_offset - start_offset; + + if (total_size == 0) { + if (start_offset) { + /* + * the write did not finish on a page boundary + * which will leave upl_f_offset pointing to the + * beginning of the last page written instead of + * the page beyond it... bump it in this case + * so that the cluster code records the last page + * written as dirty + */ + upl_f_offset += PAGE_SIZE_64; + } + upl_size = 0; + + goto check_cluster; + } + } /* * compute the size of the upl needed to encompass * the requested write... limit each call to cluster_io @@ -1697,20 +1761,20 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) if ((long long)io_size > total_size) io_size = total_size; - start_blkno = (daddr_t)(upl_f_offset / PAGE_SIZE_64); - last_blkno = start_blkno + pages_in_upl; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0); + kret = ubc_create_upl(vp, upl_f_offset, upl_size, &upl, &pl, - UPL_FLAGS_NONE); + UPL_SET_LITE); if (kret != KERN_SUCCESS) panic("cluster_write: failed to get pagelist"); - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_NONE, - (int)upl, (int)upl_f_offset, upl_size, start_offset, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, + (int)upl, (int)upl_f_offset, start_offset, 0, 0); if (start_offset && !upl_valid_page(pl, 0)) { int read_size; @@ -1777,8 +1841,6 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) } } } - if ((kret = ubc_upl_map(upl, &io_address)) != KERN_SUCCESS) - panic("cluster_write: ubc_upl_map failed\n"); xfer_resid = io_size; io_offset = start_offset; @@ -1790,11 +1852,7 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) bytes_to_zero = xfer_resid; if ( !(flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) { - bzero((caddr_t)(io_address + io_offset), bytes_to_zero); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, - (int)upl_f_offset + io_offset, bytes_to_zero, - (int)io_offset, xfer_resid, 0); + cluster_zero(upl, io_offset, bytes_to_zero, NULL); } else { int zero_pg_index; @@ -1802,19 +1860,11 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64); if ( !upl_valid_page(pl, zero_pg_index)) { - bzero((caddr_t)(io_address + io_offset), bytes_to_zero); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, - (int)upl_f_offset + io_offset, bytes_to_zero, - (int)io_offset, xfer_resid, 0); + cluster_zero(upl, io_offset, bytes_to_zero, NULL); } else if ((flags & (IO_NOZERODIRTY | IO_NOZEROVALID)) == IO_NOZERODIRTY && !upl_dirty_page(pl, zero_pg_index)) { - bzero((caddr_t)(io_address + io_offset), bytes_to_zero); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, - (int)upl_f_offset + io_offset, bytes_to_zero, - (int)io_offset, xfer_resid, 0); + cluster_zero(upl, io_offset, bytes_to_zero, NULL); } } xfer_resid -= bytes_to_zero; @@ -1825,15 +1875,9 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) if (xfer_resid && uio_resid) { bytes_to_move = min(uio_resid, xfer_resid); - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 42)) | DBG_FUNC_NONE, - (int)uio->uio_offset, bytes_to_move, uio_resid, xfer_resid, 0); - - retval = uiomove((caddr_t)(io_address + io_offset), bytes_to_move, uio); - + retval = cluster_copy_upl_data(uio, upl, io_offset, bytes_to_move); if (retval) { - if ((kret = ubc_upl_unmap(upl)) != KERN_SUCCESS) - panic("cluster_write: kernel_upl_unmap failed\n"); ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); @@ -1853,11 +1897,7 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) bytes_to_zero = xfer_resid; if ( !(flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) { - bzero((caddr_t)(io_address + io_offset), bytes_to_zero); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, - (int)upl_f_offset + io_offset, - bytes_to_zero, (int)io_offset, xfer_resid, 0); + cluster_zero(upl, io_offset, bytes_to_zero, NULL); } else { int zero_pg_index; @@ -1865,19 +1905,10 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) zero_pg_index = (int)((zero_off1 - upl_f_offset) / PAGE_SIZE_64); if ( !upl_valid_page(pl, zero_pg_index)) { - bzero((caddr_t)(io_address + io_offset), bytes_to_zero); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, - (int)upl_f_offset + io_offset, - bytes_to_zero, (int)io_offset, xfer_resid, 0); - + cluster_zero(upl, io_offset, bytes_to_zero, NULL); } else if ((flags & (IO_NOZERODIRTY | IO_NOZEROVALID)) == IO_NOZERODIRTY && !upl_dirty_page(pl, zero_pg_index)) { - bzero((caddr_t)(io_address + io_offset), bytes_to_zero); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, - (int)upl_f_offset + io_offset, - bytes_to_zero, (int)io_offset, xfer_resid, 0); + cluster_zero(upl, io_offset, bytes_to_zero, NULL); } } xfer_resid -= bytes_to_zero; @@ -1899,15 +1930,8 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) * if the file gets extended again in such a way as to leave a * hole starting at this EOF, we'll have zero's in the correct spot */ - bzero((caddr_t)(io_address + io_size), upl_size - io_size); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, - (int)upl_f_offset + io_size, - upl_size - io_size, 0, 0, 0); + cluster_zero(upl, io_size, upl_size - io_size, NULL); } - if ((kret = ubc_upl_unmap(upl)) != KERN_SUCCESS) - panic("cluster_write: kernel_upl_unmap failed\n"); - if (flags & IO_SYNC) /* * if the IO_SYNC flag is set than we need to @@ -1915,6 +1939,63 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) * the I/O */ goto issue_io; +check_cluster: + /* + * calculate the last logical block number + * that this delayed I/O encompassed + */ + last_blkno = (upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64; + + if (vp->v_flag & VHASDIRTY) { + + if ( !(vp->v_flag & VNOCACHE_DATA)) { + /* + * we've fallen into the sparse + * cluster method of delaying dirty pages + * first, we need to release the upl if we hold one + * since pages in it may be present in the sparse cluster map + * and may span 2 separate buckets there... if they do and + * we happen to have to flush a bucket to make room and it intersects + * this upl, a deadlock may result on page BUSY + */ + if (upl_size) + ubc_upl_commit_range(upl, 0, upl_size, + UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); + + sparse_cluster_add(vp, newEOF, start_blkno, last_blkno); + + continue; + } + /* + * must have done cached writes that fell into + * the sparse cluster mechanism... we've switched + * to uncached writes on the file, so go ahead + * and push whatever's in the sparse map + * and switch back to normal clustering + * + * see the comment above concerning a possible deadlock... + */ + if (upl_size) { + ubc_upl_commit_range(upl, 0, upl_size, + UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); + /* + * setting upl_size to 0 keeps us from committing a + * second time in the start_new_cluster path + */ + upl_size = 0; + } + sparse_cluster_push(vp, ubc_getsize(vp), 1); + + /* + * no clusters of either type present at this point + * so just go directly to start_new_cluster since + * we know we need to delay this I/O since we've + * already released the pages back into the cache + * to avoid the deadlock with sparse_cluster_push + */ + goto start_new_cluster; + } + upl_offset = 0; if (vp->v_clen == 0) /* @@ -1922,18 +2003,13 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) */ goto start_new_cluster; - /* - * keep track of the overall dirty page - * range we've developed - * in case we have to fall back to the - * VHASDIRTY method of flushing - */ - if (vp->v_flag & VHASDIRTY) - goto delay_io; - for (cl_index = 0; cl_index < vp->v_clen; cl_index++) { /* - * we have an existing cluster... see if this write will extend it nicely + * check each cluster that we currently hold + * try to merge some or all of this write into + * one or more of the existing clusters... if + * any portion of the write remains, start a + * new cluster */ if (start_blkno >= vp->v_clusters[cl_index].start_pg) { /* @@ -1954,37 +2030,61 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) if (start_blkno < (vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER)) { /* * we have a write that starts in the middle of the current cluster - * but extends beyond the cluster's limit - * we'll clip the current cluster if we actually - * overlap with the new write - * and start a new cluster with the current write + * but extends beyond the cluster's limit... we know this because + * of the previous checks + * we'll extend the current cluster to the max + * and update the start_blkno for the current write to reflect that + * the head of it was absorbed into this cluster... + * note that we'll always have a leftover tail in this case since + * full absorbtion would have occurred in the clause above */ - if (vp->v_clusters[cl_index].last_pg > start_blkno) - vp->v_clusters[cl_index].last_pg = start_blkno; + vp->v_clusters[cl_index].last_pg = vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER; + + if (upl_size) { + int start_pg_in_upl; + + start_pg_in_upl = upl_f_offset / PAGE_SIZE_64; + + if (start_pg_in_upl < vp->v_clusters[cl_index].last_pg) { + intersection = (vp->v_clusters[cl_index].last_pg - start_pg_in_upl) * PAGE_SIZE; + + ubc_upl_commit_range(upl, upl_offset, intersection, + UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); + upl_f_offset += intersection; + upl_offset += intersection; + upl_size -= intersection; + } + } + start_blkno = vp->v_clusters[cl_index].last_pg; } /* - * we also get here for the case where the current write starts - * beyond the limit of the existing cluster + * we come here for the case where the current write starts + * beyond the limit of the existing cluster or we have a leftover + * tail after a partial absorbtion * * in either case, we'll check the remaining clusters before * starting a new one */ } else { /* - * the current write starts in front of the current cluster + * the current write starts in front of the cluster we're currently considering */ - if ((vp->v_clusters[cl_index].last_pg - start_blkno) <= MAX_UPL_TRANSFER) { + if ((vp->v_clusters[cl_index].last_pg - start_blkno) <= MAX_UPL_TRANSFER) { /* - * we can just merge the old cluster - * with the new request and leave it - * in the cache + * we can just merge the new request into + * this cluster and leave it in the cache + * since the resulting cluster is still + * less than the maximum allowable size */ vp->v_clusters[cl_index].start_pg = start_blkno; if (last_blkno > vp->v_clusters[cl_index].last_pg) { /* * the current write completely - * envelops the existing cluster + * envelops the existing cluster and since + * each write is limited to at most MAX_UPL_TRANSFER bytes + * we can just use the start and last blocknos of the write + * to generate the cluster limits */ vp->v_clusters[cl_index].last_pg = last_blkno; } @@ -1995,28 +2095,49 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) * if we were to combine this write with the current cluster * we would exceed the cluster size limit.... so, * let's see if there's any overlap of the new I/O with - * the existing cluster... + * the cluster we're currently considering... in fact, we'll + * stretch the cluster out to it's full limit and see if we + * get an intersection with the current write * */ - if (last_blkno > vp->v_clusters[cl_index].start_pg) + if (last_blkno > vp->v_clusters[cl_index].last_pg - MAX_UPL_TRANSFER) { /* - * the current write extends into the existing cluster - * clip the current cluster by moving the start position - * to where the current write ends + * the current write extends into the proposed cluster + * clip the length of the current write after first combining it's + * tail with the newly shaped cluster */ - vp->v_clusters[cl_index].start_pg = last_blkno; + vp->v_clusters[cl_index].start_pg = vp->v_clusters[cl_index].last_pg - MAX_UPL_TRANSFER; + + if (upl_size) { + intersection = (last_blkno - vp->v_clusters[cl_index].start_pg) * PAGE_SIZE; + + if (intersection > upl_size) + /* + * because the current write may consist of a number of pages found in the cache + * which are not part of the UPL, we may have an intersection that exceeds + * the size of the UPL that is also part of this write + */ + intersection = upl_size; + + ubc_upl_commit_range(upl, upl_offset + (upl_size - intersection), intersection, + UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); + upl_size -= intersection; + } + last_blkno = vp->v_clusters[cl_index].start_pg; + } /* * if we get here, there was no way to merge - * the new I/O with this cluster and - * keep it under our maximum cluster length + * any portion of this write with this cluster + * or we could only merge part of it which + * will leave a tail... * we'll check the remaining clusters before starting a new one */ } } if (cl_index < vp->v_clen) /* - * we found an existing cluster that we - * could merger this I/O into + * we found an existing cluster(s) that we + * could entirely merge this I/O into */ goto delay_io; @@ -2031,43 +2152,62 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) /* * no exisitng cluster to merge with and no * room to start a new one... we'll try - * pushing the existing ones... if none of - * them are able to be pushed, we'll have - * to fall back on the VHASDIRTY mechanism - * cluster_try_push will set v_clen to the - * number of remaining clusters if it is - * unable to push all of them + * pushing one of the existing ones... if none of + * them are able to be pushed, we'll switch + * to the sparse cluster mechanism + * cluster_try_push updates v_clen to the + * number of remaining clusters... and + * returns the number of currently unused clusters */ if (vp->v_flag & VNOCACHE_DATA) can_delay = 0; else can_delay = 1; - if (cluster_try_push(vp, newEOF, 0, 0) == 0) { - vp->v_flag |= VHASDIRTY; - goto delay_io; + if (cluster_try_push(vp, newEOF, can_delay, 0) == 0) { + /* + * no more room in the normal cluster mechanism + * so let's switch to the more expansive but expensive + * sparse mechanism.... + * first, we need to release the upl if we hold one + * since pages in it may be present in the sparse cluster map (after the cluster_switch) + * and may span 2 separate buckets there... if they do and + * we happen to have to flush a bucket to make room and it intersects + * this upl, a deadlock may result on page BUSY + */ + if (upl_size) + ubc_upl_commit_range(upl, upl_offset, upl_size, + UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); + + sparse_cluster_switch(vp, newEOF); + sparse_cluster_add(vp, newEOF, start_blkno, last_blkno); + + continue; } + /* + * we pushed one cluster successfully, so we must be sequentially writing this file + * otherwise, we would have failed and fallen into the sparse cluster support + * so let's take the opportunity to push out additional clusters as long as we + * remain below the throttle... this will give us better I/O locality if we're + * in a copy loop (i.e. we won't jump back and forth between the read and write points + * however, we don't want to push so much out that the write throttle kicks in and + * hangs this thread up until some of the I/O completes... + */ + while (vp->v_clen && (vp->v_numoutput <= (ASYNC_THROTTLE / 2))) + cluster_try_push(vp, newEOF, 0, 0); + start_new_cluster: - if (vp->v_clen == 0) { + if (vp->v_clen == 0) vp->v_ciosiz = devblocksize; - vp->v_cstart = start_blkno; - vp->v_lastw = last_blkno; - } + vp->v_clusters[vp->v_clen].start_pg = start_blkno; vp->v_clusters[vp->v_clen].last_pg = last_blkno; vp->v_clen++; -delay_io: - /* - * make sure we keep v_cstart and v_lastw up to - * date in case we have to fall back on the - * V_HASDIRTY mechanism (or we've already entered it) - */ - if (start_blkno < vp->v_cstart) - vp->v_cstart = start_blkno; - if (last_blkno > vp->v_lastw) - vp->v_lastw = last_blkno; - ubc_upl_commit_range(upl, 0, upl_size, UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); +delay_io: + if (upl_size) + ubc_upl_commit_range(upl, upl_offset, upl_size, + UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); continue; issue_io: /* @@ -2084,23 +2224,19 @@ issue_io: } if (flags & IO_SYNC) - io_flags = CL_COMMIT | CL_AGE; + io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE; else - io_flags = CL_COMMIT | CL_AGE | CL_ASYNC; + io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | CL_ASYNC; if (vp->v_flag & VNOCACHE_DATA) io_flags |= CL_DUMP; - while (vp->v_numoutput >= ASYNC_THROTTLE) { - vp->v_flag |= VTHROTTLED; - tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_write", 0); - } retval = cluster_io(vp, upl, 0, upl_f_offset, io_size, devblocksize, io_flags, (struct buf *)0, (struct clios *)0); } } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, - retval, 0, 0, 0, 0); + retval, 0, uio_resid, 0, 0); return (retval); } @@ -2117,52 +2253,41 @@ cluster_read(vp, uio, filesize, devblocksize, flags) int clip_size; off_t max_io_size; struct iovec *iov; - vm_offset_t upl_offset; int upl_size; - int pages_in_pl; - upl_page_info_t *pl; int upl_flags; upl_t upl; int retval = 0; - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START, - (int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0); - - /* - * We set a threshhold of 4 pages to decide if the nocopy - * read loop is worth the trouble... - */ if (!((vp->v_flag & VNOCACHE_DATA) && (uio->uio_segflg == UIO_USERSPACE))) { - retval = cluster_read_x(vp, uio, filesize, devblocksize, flags); - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, - (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0); - return(retval); + /* + * go do a read through the cache if one of the following is true.... + * NOCACHE is not true + * the uio request doesn't target USERSPACE + */ + return (cluster_read_x(vp, uio, filesize, devblocksize, flags)); } while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) { - /* we know we have a resid, so this is safe */ + /* + * we know we have a resid, so this is safe + * skip over any emtpy vectors + */ iov = uio->uio_iov; + while (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; iov = uio->uio_iov; } - - /* - * We check every vector target and if it is physically - * contiguous space, we skip the sanity checks. - */ - - upl_offset = (vm_offset_t)iov->iov_base & ~PAGE_MASK; - upl_size = (upl_offset + PAGE_SIZE +(PAGE_SIZE -1)) & ~PAGE_MASK; - pages_in_pl = 0; + upl_size = PAGE_SIZE; upl_flags = UPL_QUERY_OBJECT_TYPE; - if((vm_map_get_upl(current_map(), + + if ((vm_map_get_upl(current_map(), (vm_offset_t)iov->iov_base & ~PAGE_MASK, - &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0)) != KERN_SUCCESS) + &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) { /* * the user app must have passed in an invalid address @@ -2170,43 +2295,57 @@ cluster_read(vp, uio, filesize, devblocksize, flags) return (EFAULT); } + /* + * We check every vector target but if it is physically + * contiguous space, we skip the sanity checks. + */ if (upl_flags & UPL_PHYS_CONTIG) { retval = cluster_phys_read(vp, uio, filesize, devblocksize, flags); } - else if (uio->uio_resid < 4 * PAGE_SIZE) + else if (uio->uio_resid < PAGE_SIZE) { /* - * We set a threshhold of 4 pages to decide if the nocopy - * read loop is worth the trouble... - */ - retval = cluster_read_x(vp, uio, filesize, devblocksize, flags); - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, - (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0); - return(retval); - } - else if (uio->uio_offset & PAGE_MASK_64) - { - /* Bring the file offset read up to a pagesize boundary */ - clip_size = (PAGE_SIZE - (int)(uio->uio_offset & PAGE_MASK_64)); - if (uio->uio_resid < clip_size) - clip_size = uio->uio_resid; - /* - * Fake the resid going into the cluster_read_x call - * and restore it on the way out. + * we're here because we're don't have a physically contiguous target buffer + * go do a read through the cache if + * the total xfer size is less than a page... */ - prev_resid = uio->uio_resid; - uio->uio_resid = clip_size; - retval = cluster_read_x(vp, uio, filesize, devblocksize, flags); - uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + return (cluster_read_x(vp, uio, filesize, devblocksize, flags)); } - else if ((int)iov->iov_base & PAGE_MASK_64) + else if (((int)uio->uio_offset & PAGE_MASK) || ((int)iov->iov_base & PAGE_MASK)) { - clip_size = iov->iov_len; - prev_resid = uio->uio_resid; - uio->uio_resid = clip_size; - retval = cluster_read_x(vp, uio, filesize, devblocksize, flags); - uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + if (((int)uio->uio_offset & PAGE_MASK) == ((int)iov->iov_base & PAGE_MASK)) + { + /* + * Bring the file offset read up to a pagesize boundary + * this will also bring the base address to a page boundary + * since they both are currently on the same offset within a page + * note: if we get here, uio->uio_resid is greater than PAGE_SIZE + * so the computed clip_size must always be less than the current uio_resid + */ + clip_size = (PAGE_SIZE - (int)(uio->uio_offset & PAGE_MASK_64)); + + /* + * Fake the resid going into the cluster_read_x call + * and restore it on the way out. + */ + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_read_x(vp, uio, filesize, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + else + { + /* + * can't get both the file offset and the buffer offset aligned to a page boundary + * so fire an I/O through the cache for this entire vector + */ + clip_size = iov->iov_len; + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_read_x(vp, uio, filesize, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } } else { @@ -2246,13 +2385,9 @@ cluster_read(vp, uio, filesize, devblocksize, flags) } /* end else */ } /* end while */ - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, - (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0); - return(retval); } - static int cluster_read_x(vp, uio, filesize, devblocksize, flags) struct vnode *vp; @@ -2272,16 +2407,56 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) int uio_last; int pages_in_upl; off_t max_size; + off_t last_ioread_offset; + off_t last_request_offset; + u_int size_of_prefetch; int io_size; - vm_offset_t io_address; kern_return_t kret; - int segflg; int error = 0; int retval = 0; - int b_lblkno; - int e_lblkno; + u_int b_lblkno; + u_int e_lblkno; + struct clios iostate; + u_int max_rd_size = MAX_UPL_TRANSFER * PAGE_SIZE; + u_int rd_ahead_enabled = 1; + u_int prefetch_enabled = 1; + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START, + (int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0); + + if (cluster_hard_throttle_on(vp)) { + rd_ahead_enabled = 0; + prefetch_enabled = 0; + + max_rd_size = HARD_THROTTLE_MAXSIZE; + } + if (vp->v_flag & (VRAOFF|VNOCACHE_DATA)) + rd_ahead_enabled = 0; + + last_request_offset = uio->uio_offset + uio->uio_resid; + + if (last_request_offset > filesize) + last_request_offset = filesize; + b_lblkno = (u_int)(uio->uio_offset / PAGE_SIZE_64); + e_lblkno = (u_int)((last_request_offset - 1) / PAGE_SIZE_64); + + if (vp->v_ralen && (vp->v_lastr == b_lblkno || (vp->v_lastr + 1) == b_lblkno)) { + /* + * determine if we already have a read-ahead in the pipe courtesy of the + * last read systemcall that was issued... + * if so, pick up it's extent to determine where we should start + * with respect to any read-ahead that might be necessary to + * garner all the data needed to complete this read systemcall + */ + last_ioread_offset = (vp->v_maxra * PAGE_SIZE_64) + PAGE_SIZE_64; - b_lblkno = (int)(uio->uio_offset / PAGE_SIZE_64); + if (last_ioread_offset < uio->uio_offset) + last_ioread_offset = (off_t)0; + else if (last_ioread_offset > last_request_offset) + last_ioread_offset = last_request_offset; + } else + last_ioread_offset = (off_t)0; while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) { /* @@ -2301,70 +2476,91 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) else io_size = max_size; - if (uio->uio_segflg == UIO_USERSPACE && !(vp->v_flag & VNOCACHE_DATA)) { - segflg = uio->uio_segflg; + if (!(vp->v_flag & VNOCACHE_DATA)) { - uio->uio_segflg = UIO_PHYS_USERSPACE; + while (io_size) { + u_int io_resid; + u_int io_requested; - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START, - (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0); + /* + * if we keep finding the pages we need already in the cache, then + * don't bother to call cluster_rd_prefetch since it costs CPU cycles + * to determine that we have all the pages we need... once we miss in + * the cache and have issued an I/O, than we'll assume that we're likely + * to continue to miss in the cache and it's to our advantage to try and prefetch + */ + if (last_request_offset && last_ioread_offset && (size_of_prefetch = (last_request_offset - last_ioread_offset))) { + if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) { + /* + * we've already issued I/O for this request and + * there's still work to do and + * our prefetch stream is running dry, so issue a + * pre-fetch I/O... the I/O latency will overlap + * with the copying of the data + */ + if (size_of_prefetch > max_rd_size) + size_of_prefetch = max_rd_size; - while (io_size && retval == 0) { - int xsize; - vm_offset_t paddr; + size_of_prefetch = cluster_rd_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, devblocksize); - if (ubc_page_op(vp, - upl_f_offset, - UPL_POP_SET | UPL_POP_BUSY, - &paddr, 0) != KERN_SUCCESS) - break; + last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE); + + if (last_ioread_offset > last_request_offset) + last_ioread_offset = last_request_offset; + } + } + /* + * limit the size of the copy we're about to do so that + * we can notice that our I/O pipe is running dry and + * get the next I/O issued before it does go dry + */ + if (last_ioread_offset && io_size > ((MAX_UPL_TRANSFER * PAGE_SIZE) / 4)) + io_resid = ((MAX_UPL_TRANSFER * PAGE_SIZE) / 4); + else + io_resid = io_size; - xsize = PAGE_SIZE - start_offset; - - if (xsize > io_size) - xsize = io_size; + io_requested = io_resid; - retval = uiomove((caddr_t)(paddr + start_offset), xsize, uio); + retval = cluster_copy_ubc_data(vp, uio, &io_resid, 0); - ubc_page_op(vp, upl_f_offset, - UPL_POP_CLR | UPL_POP_BUSY, 0, 0); + io_size -= (io_requested - io_resid); - io_size -= xsize; - start_offset = (int) - (uio->uio_offset & PAGE_MASK_64); - upl_f_offset = uio->uio_offset - start_offset; + if (retval || io_resid) + /* + * if we run into a real error or + * a page that is not in the cache + * we need to leave streaming mode + */ + break; + + if ((io_size == 0 || last_ioread_offset == last_request_offset) && rd_ahead_enabled) { + /* + * we're already finished the I/O for this read request + * let's see if we should do a read-ahead + */ + cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize); + } } - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, - (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0); - - uio->uio_segflg = segflg; - if (retval) break; - if (io_size == 0) { - /* - * we're already finished with this read request - * let's see if we should do a read-ahead - */ - e_lblkno = (int) - ((uio->uio_offset - 1) / PAGE_SIZE_64); - - if (!(vp->v_flag & VRAOFF)) - /* - * let's try to read ahead if we're in - * a sequential access pattern - */ - cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize); - vp->v_lastr = e_lblkno; + if (e_lblkno < vp->v_lastr) + vp->v_maxra = 0; + vp->v_lastr = e_lblkno; break; } - max_size = filesize - uio->uio_offset; + start_offset = (int)(uio->uio_offset & PAGE_MASK_64); + upl_f_offset = uio->uio_offset - (off_t)start_offset; + max_size = filesize - uio->uio_offset; } + if (io_size > max_rd_size) + io_size = max_rd_size; + upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; - if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE)) - upl_size = MAX_UPL_TRANSFER * PAGE_SIZE; + + if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE) / 4) + upl_size = (MAX_UPL_TRANSFER * PAGE_SIZE) / 4; pages_in_upl = upl_size / PAGE_SIZE; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START, @@ -2375,7 +2571,7 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) upl_size, &upl, &pl, - UPL_FLAGS_NONE); + UPL_SET_LITE); if (kret != KERN_SUCCESS) panic("cluster_read: failed to get pagelist"); @@ -2403,6 +2599,10 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) if (upl_valid_page(pl, last_pg)) break; } + iostate.io_completed = 0; + iostate.io_issued = 0; + iostate.io_error = 0; + iostate.io_wanted = 0; if (start_pg < last_pg) { /* @@ -2418,21 +2618,20 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) io_size = filesize - (upl_f_offset + upl_offset); /* - * issue a synchronous read to cluster_io + * issue an asynchronous read to cluster_io */ error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, - io_size, devblocksize, CL_READ, (struct buf *)0, (struct clios *)0); + io_size, devblocksize, CL_READ | CL_ASYNC, (struct buf *)0, &iostate); } if (error == 0) { /* * if the read completed successfully, or there was no I/O request - * issued, than map the upl into kernel address space and - * move the data into user land.... we'll first add on any 'valid' + * issued, than copy the data into user land via 'cluster_upl_copy_data' + * we'll first add on any 'valid' * pages that were present in the upl when we acquired it. */ u_int val_size; - u_int size_of_prefetch; for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) { if (!upl_valid_page(pl, uio_last)) @@ -2440,79 +2639,56 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) } /* * compute size to transfer this round, if uio->uio_resid is - * still non-zero after this uiomove, we'll loop around and + * still non-zero after this attempt, we'll loop around and * set up for another I/O. */ val_size = (uio_last * PAGE_SIZE) - start_offset; - if (max_size < val_size) + if (val_size > max_size) val_size = max_size; - if (uio->uio_resid < val_size) + if (val_size > uio->uio_resid) val_size = uio->uio_resid; - e_lblkno = (int)((uio->uio_offset + ((off_t)val_size - 1)) / PAGE_SIZE_64); + if (last_ioread_offset == 0) + last_ioread_offset = uio->uio_offset + val_size; - if (size_of_prefetch = (uio->uio_resid - val_size)) { + if ((size_of_prefetch = (last_request_offset - last_ioread_offset)) && prefetch_enabled) { /* - * if there's still I/O left to do for this request, then issue a - * pre-fetch I/O... the I/O wait time will overlap + * if there's still I/O left to do for this request, and... + * we're not in hard throttle mode, then issue a + * pre-fetch I/O... the I/O latency will overlap * with the copying of the data */ - cluster_rd_prefetch(vp, uio->uio_offset + val_size, size_of_prefetch, filesize, devblocksize); - } else { - if (!(vp->v_flag & VRAOFF) && !(vp->v_flag & VNOCACHE_DATA)) - /* - * let's try to read ahead if we're in - * a sequential access pattern - */ - cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize); - vp->v_lastr = e_lblkno; - } - if (uio->uio_segflg == UIO_USERSPACE) { - int offset; - - segflg = uio->uio_segflg; + size_of_prefetch = cluster_rd_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, devblocksize); - uio->uio_segflg = UIO_PHYS_USERSPACE; - - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START, - (int)uio->uio_offset, val_size, uio->uio_resid, 0, 0); - - offset = start_offset; - - while (val_size && retval == 0) { - int csize; - int i; - caddr_t paddr; - - i = offset / PAGE_SIZE; - csize = min(PAGE_SIZE - start_offset, val_size); - - paddr = (caddr_t)upl_phys_page(pl, i) + start_offset; - - retval = uiomove(paddr, csize, uio); + last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE); + + if (last_ioread_offset > last_request_offset) + last_ioread_offset = last_request_offset; - val_size -= csize; - offset += csize; - start_offset = offset & PAGE_MASK; - } - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, - (int)uio->uio_offset, val_size, uio->uio_resid, 0, 0); + } else if ((uio->uio_offset + val_size) == last_request_offset) { + /* + * this transfer will finish this request, so... + * let's try to read ahead if we're in + * a sequential access pattern and we haven't + * explicitly disabled it + */ + if (rd_ahead_enabled) + cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize); - uio->uio_segflg = segflg; + if (e_lblkno < vp->v_lastr) + vp->v_maxra = 0; + vp->v_lastr = e_lblkno; } + while (iostate.io_issued != iostate.io_completed) { + iostate.io_wanted = 1; + tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_read_x", 0); + } + if (iostate.io_error) + error = iostate.io_error; else - { - if ((kret = ubc_upl_map(upl, &io_address)) != KERN_SUCCESS) - panic("cluster_read: ubc_upl_map() failed\n"); - - retval = uiomove((caddr_t)(io_address + start_offset), val_size, uio); - - if ((kret = ubc_upl_unmap(upl)) != KERN_SUCCESS) - panic("cluster_read: ubc_upl_unmap() failed\n"); - } + retval = cluster_copy_upl_data(uio, upl, start_offset, val_size); } if (start_pg < last_pg) { /* @@ -2530,9 +2706,9 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); else ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, - UPL_COMMIT_CLEAR_DIRTY - | UPL_COMMIT_FREE_ON_EMPTY - | UPL_COMMIT_INACTIVATE); + UPL_COMMIT_CLEAR_DIRTY | + UPL_COMMIT_FREE_ON_EMPTY | + UPL_COMMIT_INACTIVATE); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, (int)upl, start_pg * PAGE_SIZE, io_size, error, 0); @@ -2544,7 +2720,7 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) /* * the set of pages that we issued an I/O for did not encompass * the entire upl... so just release these without modifying - * there state + * their state */ if (error) ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); @@ -2611,6 +2787,8 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags) if (retval == 0) retval = error; } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0); return (retval); } @@ -2626,24 +2804,22 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) { upl_t upl; upl_page_info_t *pl; - off_t upl_f_offset; vm_offset_t upl_offset; - off_t start_upl_f_offset; off_t max_io_size; int io_size; int upl_size; int upl_needed_size; int pages_in_pl; - vm_offset_t paddr; int upl_flags; kern_return_t kret; - int segflg; struct iovec *iov; int i; int force_data_sync; int retval = 0; - int first = 1; struct clios iostate; + u_int max_rd_size = MAX_UPL_TRANSFER * PAGE_SIZE; + u_int max_rd_ahead = MAX_UPL_TRANSFER * PAGE_SIZE * 2; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START, (int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0); @@ -2662,6 +2838,10 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) iov = uio->uio_iov; + if (cluster_hard_throttle_on(vp)) { + max_rd_size = HARD_THROTTLE_MAXSIZE; + max_rd_ahead = HARD_THROTTLE_MAXSIZE - 1; + } while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) { max_io_size = filesize - uio->uio_offset; @@ -2671,39 +2851,11 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) else io_size = uio->uio_resid; - /* - * We don't come into this routine unless - * UIO_USERSPACE is set. - */ - segflg = uio->uio_segflg; - - uio->uio_segflg = UIO_PHYS_USERSPACE; - /* * First look for pages already in the cache * and move them to user space. */ - while (io_size && (retval == 0)) { - upl_f_offset = uio->uio_offset; - - /* - * If this call fails, it means the page is not - * in the page cache. - */ - if (ubc_page_op(vp, upl_f_offset, - UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) != KERN_SUCCESS) - break; - - retval = uiomove((caddr_t)(paddr), PAGE_SIZE, uio); - - ubc_page_op(vp, upl_f_offset, - UPL_POP_CLR | UPL_POP_BUSY, 0, 0); - - io_size -= PAGE_SIZE; - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 71)) | DBG_FUNC_NONE, - (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0); - } - uio->uio_segflg = segflg; + retval = cluster_copy_ubc_data(vp, uio, &io_size, 0); if (retval) { /* @@ -2726,30 +2878,13 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) } max_io_size = io_size; - if (max_io_size > (MAX_UPL_TRANSFER * PAGE_SIZE)) - max_io_size = MAX_UPL_TRANSFER * PAGE_SIZE; - if (first) { - if (max_io_size > (MAX_UPL_TRANSFER * PAGE_SIZE) / 4) - max_io_size = (MAX_UPL_TRANSFER * PAGE_SIZE) / 8; - first = 0; - } - start_upl_f_offset = uio->uio_offset; /* this is page aligned in the file */ - upl_f_offset = start_upl_f_offset; + if (max_io_size > max_rd_size) + max_io_size = max_rd_size; + io_size = 0; - while (io_size < max_io_size) { - if (ubc_page_op(vp, upl_f_offset, - UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) == KERN_SUCCESS) { - ubc_page_op(vp, upl_f_offset, - UPL_POP_CLR | UPL_POP_BUSY, 0, 0); - break; - } - /* - * Build up the io request parameters. - */ - io_size += PAGE_SIZE_64; - upl_f_offset += PAGE_SIZE_64; - } + ubc_range_op(vp, uio->uio_offset, uio->uio_offset + max_io_size, UPL_ROP_ABSENT, &io_size); + if (io_size == 0) /* * we may have already spun some portion of this request @@ -2758,7 +2893,7 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) */ goto wait_for_reads; - upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64; + upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK; upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START, @@ -2767,7 +2902,7 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) { pages_in_pl = 0; upl_size = upl_needed_size; - upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL; + upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; kret = vm_map_get_upl(current_map(), (vm_offset_t)iov->iov_base & ~PAGE_MASK, @@ -2776,7 +2911,6 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) if (kret != KERN_SUCCESS) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, (int)upl_offset, upl_size, io_size, kret, 0); - /* * cluster_nocopy_read: failed to get pagelist * @@ -2825,7 +2959,7 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) * if there are already too many outstanding reads * wait until some have completed before issuing the next read */ - while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) { + while ((iostate.io_issued - iostate.io_completed) > max_rd_ahead) { iostate.io_wanted = 1; tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_read", 0); } @@ -2843,9 +2977,9 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags) goto wait_for_reads; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START, - (int)upl, (int)upl_offset, (int)start_upl_f_offset, io_size, 0); + (int)upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0); - retval = cluster_io(vp, upl, upl_offset, start_upl_f_offset, + retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, devblocksize, CL_PRESERVE | CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO, (struct buf *)0, &iostate); @@ -2893,7 +3027,7 @@ cluster_phys_read(vp, uio, filesize, devblocksize, flags) upl_page_info_t *pl; upl_t upl; vm_offset_t upl_offset; - vm_offset_t dst_paddr; + addr64_t dst_paddr; off_t max_size; int io_size; int tail_size; @@ -2921,13 +3055,13 @@ cluster_phys_read(vp, uio, filesize, devblocksize, flags) else io_size = max_size; - upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64; + upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK; upl_needed_size = upl_offset + io_size; error = 0; pages_in_pl = 0; upl_size = upl_needed_size; - upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL; + upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; kret = vm_map_get_upl(current_map(), (vm_offset_t)iov->iov_base & ~PAGE_MASK, @@ -2949,7 +3083,7 @@ cluster_phys_read(vp, uio, filesize, devblocksize, flags) } pl = ubc_upl_pageinfo(upl); - dst_paddr = (vm_offset_t)upl_phys_page(pl, 0) + ((vm_offset_t)iov->iov_base & PAGE_MASK); + dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + ((addr64_t)((u_int)iov->iov_base & PAGE_MASK)); while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) { int head_size; @@ -3065,6 +3199,7 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize) kern_return_t kret; int retval = 0; int issued_io; + int skip_range; if (!UBCINFOEXISTS(vp)) return(EINVAL); @@ -3093,14 +3228,45 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize) upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE)) upl_size = MAX_UPL_TRANSFER * PAGE_SIZE; + + skip_range = 0; + /* + * return the number of contiguously present pages in the cache + * starting at upl_f_offset within the file + */ + ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range); + + if (skip_range) { + /* + * skip over pages already present in the cache + */ + io_size = skip_range - start_offset; + + f_offset += io_size; + resid -= io_size; + + if (skip_range == upl_size) + continue; + /* + * have to issue some real I/O + * at this point, we know it's starting on a page boundary + * because we've skipped over at least the first page in the request + */ + start_offset = 0; + upl_f_offset += skip_range; + upl_size -= skip_range; + } pages_in_upl = upl_size / PAGE_SIZE; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START, + (int)upl, (int)upl_f_offset, upl_size, start_offset, 0); + kret = ubc_create_upl(vp, upl_f_offset, upl_size, &upl, &pl, - UPL_RET_ONLY_ABSENT); + UPL_RET_ONLY_ABSENT | UPL_SET_LITE); if (kret != KERN_SUCCESS) return(retval); issued_io = 0; @@ -3117,7 +3283,7 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize) pages_in_upl = last_pg + 1; - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_NONE, + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END, (int)upl, (int)upl_f_offset, upl_size, start_offset, 0); @@ -3190,43 +3356,42 @@ cluster_push(vp) { int retval; - if (!UBCINFOEXISTS(vp) || vp->v_clen == 0) { - vp->v_flag &= ~VHASDIRTY; + if (!UBCINFOEXISTS(vp) || (vp->v_clen == 0 && !(vp->v_flag & VHASDIRTY))) return(0); - } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START, vp->v_flag & VHASDIRTY, vp->v_clen, 0, 0, 0); if (vp->v_flag & VHASDIRTY) { - daddr_t start_pg; - daddr_t last_pg; - daddr_t end_pg; - - start_pg = vp->v_cstart; - end_pg = vp->v_lastw; + sparse_cluster_push(vp, ubc_getsize(vp), 1); - vp->v_flag &= ~VHASDIRTY; vp->v_clen = 0; + retval = 1; + } else + retval = cluster_try_push(vp, ubc_getsize(vp), 0, 1); - while (start_pg < end_pg) { - last_pg = start_pg + MAX_UPL_TRANSFER; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END, + vp->v_flag & VHASDIRTY, vp->v_clen, retval, 0, 0); - if (last_pg > end_pg) - last_pg = end_pg; + return (retval); +} - cluster_push_x(vp, ubc_getsize(vp), start_pg, last_pg, 0); - start_pg = last_pg; - } - return (1); - } - retval = cluster_try_push(vp, ubc_getsize(vp), 0, 1); +int +cluster_release(vp) + struct vnode *vp; +{ + off_t offset; + u_int length; - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END, - vp->v_flag & VHASDIRTY, vp->v_clen, retval, 0, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0); - return (retval); + if (vp->v_flag & VHASDIRTY) { + vfs_drt_control(&(vp->v_scmap), 0); + + vp->v_flag &= ~VHASDIRTY; + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0); } @@ -3242,7 +3407,7 @@ cluster_try_push(vp, EOF, can_delay, push_all) int min_index; int cl_len; int cl_total; - int cl_pushed; + int cl_pushed = 0; struct v_cluster l_clusters[MAX_CLUSTERS]; /* @@ -3269,7 +3434,36 @@ cluster_try_push(vp, EOF, can_delay, push_all) cl_len = cl_index; vp->v_clen = 0; - for (cl_pushed = 0, cl_index = 0; cl_index < cl_len; cl_index++) { + if (can_delay && cl_len == MAX_CLUSTERS) { + int i; + + /* + * determine if we appear to be writing the file sequentially + * if not, by returning without having pushed any clusters + * we will cause this vnode to be pushed into the sparse cluster mechanism + * used for managing more random I/O patterns + * + * we know that we've got all clusters currently in use and the next write doesn't fit into one of them... + * that's why we're in try_push with can_delay true... + * + * check to make sure that all the clusters except the last one are 'full'... and that each cluster + * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above + * so we can just make a simple pass through up, to but not including the last one... + * note that last_pg is not inclusive, so it will be equal to the start_pg of the next cluster if they + * are sequential + * + * we let the last one be partial as long as it was adjacent to the previous one... + * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out + * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world... + */ + for (i = 0; i < MAX_CLUSTERS - 1; i++) { + if ((l_clusters[i].last_pg - l_clusters[i].start_pg) != MAX_UPL_TRANSFER) + goto dont_try; + if (l_clusters[i].last_pg != l_clusters[i+1].start_pg) + goto dont_try; + } + } + for (cl_index = 0; cl_index < cl_len; cl_index++) { /* * try to push each cluster in turn... cluster_push_x may not * push the cluster if can_delay is TRUE and the cluster doesn't @@ -3285,6 +3479,7 @@ cluster_try_push(vp, EOF, can_delay, push_all) break; } } +dont_try: if (cl_len > cl_pushed) { /* * we didn't push all of the clusters, so @@ -3296,18 +3491,33 @@ cluster_try_push(vp, EOF, can_delay, push_all) * push the old ones (I don't think this can happen because * I'm holding the lock, but just in case)... the sum of the * leftovers plus the new cluster count exceeds our ability - * to represent them, so fall back to the VHASDIRTY mechanism + * to represent them, so switch to the sparse cluster mechanism */ - for (cl_index = 0; cl_index < cl_len; cl_index++) { + + /* + * first collect the new clusters sitting in the vp + */ + sparse_cluster_switch(vp, EOF); + + for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) { if (l_clusters[cl_index].start_pg == l_clusters[cl_index].last_pg) continue; + vp->v_clusters[cl_index1].start_pg = l_clusters[cl_index].start_pg; + vp->v_clusters[cl_index1].last_pg = l_clusters[cl_index].last_pg; - if (l_clusters[cl_index].start_pg < vp->v_cstart) - vp->v_cstart = l_clusters[cl_index].start_pg; - if (l_clusters[cl_index].last_pg > vp->v_lastw) - vp->v_lastw = l_clusters[cl_index].last_pg; + cl_index1++; } - vp->v_flag |= VHASDIRTY; + /* + * update the cluster count + */ + vp->v_clen = cl_index1; + + /* + * and collect the original clusters that were moved into the + * local storage for sorting purposes + */ + sparse_cluster_switch(vp, EOF); + } else { /* * we've got room to merge the leftovers back in @@ -3321,15 +3531,6 @@ cluster_try_push(vp, EOF, can_delay, push_all) vp->v_clusters[cl_index1].start_pg = l_clusters[cl_index].start_pg; vp->v_clusters[cl_index1].last_pg = l_clusters[cl_index].last_pg; - if (cl_index1 == 0) { - vp->v_cstart = l_clusters[cl_index].start_pg; - vp->v_lastw = l_clusters[cl_index].last_pg; - } else { - if (l_clusters[cl_index].start_pg < vp->v_cstart) - vp->v_cstart = l_clusters[cl_index].start_pg; - if (l_clusters[cl_index].last_pg > vp->v_lastw) - vp->v_lastw = l_clusters[cl_index].last_pg; - } cl_index1++; } /* @@ -3361,6 +3562,7 @@ cluster_push_x(vp, EOF, first, last, can_delay) int last_pg; int io_size; int io_flags; + int upl_flags; int size; kern_return_t kret; @@ -3390,58 +3592,83 @@ cluster_push_x(vp, EOF, first, last, can_delay) } size = EOF - upl_f_offset; - upl_size = (size + (PAGE_SIZE - 1) ) & ~(PAGE_SIZE - 1); + upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK; pages_in_upl = upl_size / PAGE_SIZE; - } else { - if (can_delay && (pages_in_upl < (MAX_UPL_TRANSFER - (MAX_UPL_TRANSFER / 2)))) - return(0); + } else size = upl_size; - } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0); + + if (vp->v_flag & VNOCACHE_DATA) + upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED; + else + upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE; + kret = ubc_create_upl(vp, upl_f_offset, upl_size, &upl, &pl, - UPL_RET_ONLY_DIRTY); + upl_flags); if (kret != KERN_SUCCESS) panic("cluster_push: failed to get pagelist"); - if (can_delay) { - int num_of_dirty; - - for (num_of_dirty = 0, start_pg = 0; start_pg < pages_in_upl; start_pg++) { - if (upl_valid_page(pl, start_pg) && upl_dirty_page(pl, start_pg)) - num_of_dirty++; - } - if (num_of_dirty < pages_in_upl / 2) { - ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 0, 2, num_of_dirty, (pages_in_upl / 2), 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, (int)upl, upl_f_offset, 0, 0, 0); - return(0); - } + /* + * since we only asked for the dirty pages back + * it's possible that we may only get a few or even none, so... + * before we start marching forward, we must make sure we know + * where the last present page is in the UPL, otherwise we could + * end up working with a freed upl due to the FREE_ON_EMPTY semantics + * employed by commit_range and abort_range. + */ + for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) { + if (upl_page_present(pl, last_pg)) + break; } - last_pg = 0; + pages_in_upl = last_pg + 1; - while (size) { + if (pages_in_upl == 0) { + ubc_upl_abort(upl, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0); + return(1); + } + + for (last_pg = 0; last_pg < pages_in_upl; ) { + /* + * find the next dirty page in the UPL + * this will become the first page in the + * next I/O to generate + */ for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) { - if (upl_valid_page(pl, start_pg) && upl_dirty_page(pl, start_pg)) + if (upl_dirty_page(pl, start_pg)) break; + if (upl_page_present(pl, start_pg)) + /* + * RET_ONLY_DIRTY will return non-dirty 'precious' pages + * just release these unchanged since we're not going + * to steal them or change their state + */ + ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); } - if (start_pg > last_pg) { - io_size = (start_pg - last_pg) * PAGE_SIZE; - - ubc_upl_abort_range(upl, last_pg * PAGE_SIZE, io_size, - UPL_ABORT_FREE_ON_EMPTY); + if (start_pg >= pages_in_upl) + /* + * done... no more dirty pages to push + */ + break; + if (start_pg > last_pg) + /* + * skipped over some non-dirty pages + */ + size -= ((start_pg - last_pg) * PAGE_SIZE); - if (io_size < size) - size -= io_size; - else - break; - } + /* + * find a range of dirty pages to write + */ for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) { - if (!upl_valid_page(pl, last_pg) || !upl_dirty_page(pl, last_pg)) + if (!upl_dirty_page(pl, last_pg)) break; } upl_offset = start_pg * PAGE_SIZE; @@ -3449,14 +3676,10 @@ cluster_push_x(vp, EOF, first, last, can_delay) io_size = min(size, (last_pg - start_pg) * PAGE_SIZE); if (vp->v_flag & VNOCACHE_DATA) - io_flags = CL_COMMIT | CL_AGE | CL_ASYNC | CL_DUMP; + io_flags = CL_THROTTLE | CL_COMMIT | CL_ASYNC | CL_DUMP; else - io_flags = CL_COMMIT | CL_AGE | CL_ASYNC; + io_flags = CL_THROTTLE | CL_COMMIT | CL_ASYNC; - while (vp->v_numoutput >= ASYNC_THROTTLE) { - vp->v_flag |= VTHROTTLED; - tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_push", 0); - } cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, vp->v_ciosiz, io_flags, (struct buf *)0, (struct clios *)0); size -= io_size; @@ -3467,62 +3690,1032 @@ cluster_push_x(vp, EOF, first, last, can_delay) } - static int -cluster_align_phys_io(struct vnode *vp, struct uio *uio, vm_offset_t usr_paddr, int xsize, int devblocksize, int flags) +sparse_cluster_switch(struct vnode *vp, off_t EOF) { - struct iovec *iov; - upl_page_info_t *pl; - upl_t upl; - vm_offset_t ubc_paddr; - kern_return_t kret; - int error = 0; + int cl_index; - iov = uio->uio_iov; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0); - kret = ubc_create_upl(vp, - uio->uio_offset & ~PAGE_MASK_64, - PAGE_SIZE, - &upl, - &pl, - UPL_FLAGS_NONE); + if ( !(vp->v_flag & VHASDIRTY)) { + vp->v_flag |= VHASDIRTY; + vp->v_scdirty = 0; + vp->v_scmap = 0; + } + for (cl_index = 0; cl_index < vp->v_clen; cl_index++) { + int flags; + int start_pg; + int last_pg; - if (kret != KERN_SUCCESS) - return(EINVAL); + for (start_pg = vp->v_clusters[cl_index].start_pg; start_pg < vp->v_clusters[cl_index].last_pg; start_pg++) { - if (!upl_valid_page(pl, 0)) { - /* - * issue a synchronous read to cluster_io - */ - error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, devblocksize, - CL_READ, (struct buf *)0, (struct clios *)0); - if (error) { + if (ubc_page_op(vp, (off_t)(((off_t)start_pg) * PAGE_SIZE_64), 0, 0, &flags) == KERN_SUCCESS) { + if (flags & UPL_POP_DIRTY) + sparse_cluster_add(vp, EOF, start_pg, start_pg + 1); + } + } + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0); +} + + +static int +sparse_cluster_push(struct vnode *vp, off_t EOF, int push_all) +{ + daddr_t first; + daddr_t last; + off_t offset; + u_int length; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, (int)vp, (int)vp->v_scmap, vp->v_scdirty, push_all, 0); + + if (push_all) + vfs_drt_control(&(vp->v_scmap), 1); + + for (;;) { + if (vfs_drt_get_cluster(&(vp->v_scmap), &offset, &length) != KERN_SUCCESS) { + vp->v_flag &= ~VHASDIRTY; + vp->v_clen = 0; + break; + } + first = (daddr_t)(offset / PAGE_SIZE_64); + last = (daddr_t)((offset + length) / PAGE_SIZE_64); + + cluster_push_x(vp, EOF, first, last, 0); + + vp->v_scdirty -= (last - first); + + if (push_all == 0) + break; + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0); +} + + +static int +sparse_cluster_add(struct vnode *vp, off_t EOF, daddr_t first, daddr_t last) +{ + u_int new_dirty; + u_int length; + off_t offset; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (int)vp->v_scmap, vp->v_scdirty, first, last, 0); + + offset = (off_t)first * PAGE_SIZE_64; + length = (last - first) * PAGE_SIZE; + + while (vfs_drt_mark_pages(&(vp->v_scmap), offset, length, &new_dirty) != KERN_SUCCESS) { + /* + * no room left in the map + * only a partial update was done + * push out some pages and try again + */ + vp->v_scdirty += new_dirty; + + sparse_cluster_push(vp, EOF, 0); + + offset += (new_dirty * PAGE_SIZE_64); + length -= (new_dirty * PAGE_SIZE); + } + vp->v_scdirty += new_dirty; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0); +} + + +static int +cluster_align_phys_io(struct vnode *vp, struct uio *uio, addr64_t usr_paddr, int xsize, int devblocksize, int flags) +{ + struct iovec *iov; + upl_page_info_t *pl; + upl_t upl; + addr64_t ubc_paddr; + kern_return_t kret; + int error = 0; + + iov = uio->uio_iov; + + kret = ubc_create_upl(vp, + uio->uio_offset & ~PAGE_MASK_64, + PAGE_SIZE, + &upl, + &pl, + UPL_SET_LITE); + + if (kret != KERN_SUCCESS) + return(EINVAL); + + if (!upl_valid_page(pl, 0)) { + /* + * issue a synchronous read to cluster_io + */ + error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, devblocksize, + CL_READ, (struct buf *)0, (struct clios *)0); + if (error) { ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); return(error); } } - ubc_paddr = (vm_offset_t)upl_phys_page(pl, 0) + (int)(uio->uio_offset & PAGE_MASK_64); + ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)(uio->uio_offset & PAGE_MASK_64); +/* + * NOTE: There is no prototype for the following in BSD. It, and the definitions + * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in + * osfmk/ppc/mappings.h. They are not included here because there appears to be no + * way to do so without exporting them to kexts as well. + */ if (flags & CL_READ) - copyp2p(ubc_paddr, usr_paddr, xsize, 2); +// copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */ + copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */ else - copyp2p(usr_paddr, ubc_paddr, xsize, 1); - - if ( !(flags & CL_READ) || upl_dirty_page(pl, 0)) { - /* - * issue a synchronous write to cluster_io - */ - error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, devblocksize, - 0, (struct buf *)0, (struct clios *)0); +// copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */ + copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */ + + if ( !(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) { + /* + * issue a synchronous write to cluster_io + */ + error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, devblocksize, + 0, (struct buf *)0, (struct clios *)0); } if (error == 0) { - uio->uio_offset += xsize; + uio->uio_offset += xsize; iov->iov_base += xsize; iov->iov_len -= xsize; uio->uio_resid -= xsize; } ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); + + return (error); +} + + + +int +cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int xsize) +{ + int pg_offset; + int pg_index; + int csize; + int segflg; + int retval = 0; + upl_page_info_t *pl; + boolean_t funnel_state = FALSE; + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START, + (int)uio->uio_offset, uio->uio_resid, upl_offset, xsize, 0); + + if (xsize >= (16 * 1024)) + funnel_state = thread_funnel_set(kernel_flock, FALSE); + + segflg = uio->uio_segflg; + + switch(segflg) { + + case UIO_USERSPACE: + case UIO_USERISPACE: + uio->uio_segflg = UIO_PHYS_USERSPACE; + break; + + case UIO_SYSSPACE: + uio->uio_segflg = UIO_PHYS_SYSSPACE; + break; + } + pl = ubc_upl_pageinfo(upl); + + pg_index = upl_offset / PAGE_SIZE; + pg_offset = upl_offset & PAGE_MASK; + csize = min(PAGE_SIZE - pg_offset, xsize); + + while (xsize && retval == 0) { + addr64_t paddr; + + paddr = ((addr64_t)upl_phys_page(pl, pg_index) << 12) + pg_offset; - return (error); + retval = uiomove64(paddr, csize, uio); + + pg_index += 1; + pg_offset = 0; + xsize -= csize; + csize = min(PAGE_SIZE, xsize); + } + uio->uio_segflg = segflg; + + if (funnel_state == TRUE) + thread_funnel_set(kernel_flock, TRUE); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, retval, segflg, 0); + + return (retval); +} + + +int +cluster_copy_ubc_data(struct vnode *vp, struct uio *uio, int *io_resid, int mark_dirty) +{ + int segflg; + int io_size; + int xsize; + int start_offset; + off_t f_offset; + int retval = 0; + memory_object_control_t control; + int op_flags = UPL_POP_SET | UPL_POP_BUSY; + boolean_t funnel_state = FALSE; + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START, + (int)uio->uio_offset, uio->uio_resid, 0, *io_resid, 0); + + control = ubc_getobject(vp, UBC_FLAGS_NONE); + if (control == MEMORY_OBJECT_CONTROL_NULL) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, retval, 3, 0); + + return(0); + } + if (mark_dirty) + op_flags |= UPL_POP_DIRTY; + + segflg = uio->uio_segflg; + + switch(segflg) { + + case UIO_USERSPACE: + case UIO_USERISPACE: + uio->uio_segflg = UIO_PHYS_USERSPACE; + break; + + case UIO_SYSSPACE: + uio->uio_segflg = UIO_PHYS_SYSSPACE; + break; + } + io_size = *io_resid; + start_offset = (int)(uio->uio_offset & PAGE_MASK_64); + f_offset = uio->uio_offset - start_offset; + xsize = min(PAGE_SIZE - start_offset, io_size); + + while (io_size && retval == 0) { + ppnum_t pgframe; + + if (ubc_page_op_with_control(control, f_offset, op_flags, &pgframe, 0) != KERN_SUCCESS) + break; + + if (funnel_state == FALSE && io_size >= (16 * 1024)) + funnel_state = thread_funnel_set(kernel_flock, FALSE); + + retval = uiomove64((addr64_t)(((addr64_t)pgframe << 12) + start_offset), xsize, uio); + + ubc_page_op_with_control(control, f_offset, UPL_POP_CLR | UPL_POP_BUSY, 0, 0); + + io_size -= xsize; + start_offset = 0; + f_offset = uio->uio_offset; + xsize = min(PAGE_SIZE, io_size); + } + uio->uio_segflg = segflg; + *io_resid = io_size; + + if (funnel_state == TRUE) + thread_funnel_set(kernel_flock, TRUE); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, retval, 0x80000000 | segflg, 0); + + return(retval); +} + + +int +is_file_clean(struct vnode *vp, off_t filesize) +{ + off_t f_offset; + int flags; + int total_dirty = 0; + + for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) { + if (ubc_page_op(vp, f_offset, 0, 0, &flags) == KERN_SUCCESS) { + if (flags & UPL_POP_DIRTY) { + total_dirty++; + } + } + } + if (total_dirty) + return(EINVAL); + + return (0); +} + + + +/* + * Dirty region tracking/clustering mechanism. + * + * This code (vfs_drt_*) provides a mechanism for tracking and clustering + * dirty regions within a larger space (file). It is primarily intended to + * support clustering in large files with many dirty areas. + * + * The implementation assumes that the dirty regions are pages. + * + * To represent dirty pages within the file, we store bit vectors in a + * variable-size circular hash. + */ + +/* + * Bitvector size. This determines the number of pages we group in a + * single hashtable entry. Each hashtable entry is aligned to this + * size within the file. + */ +#define DRT_BITVECTOR_PAGES 256 + +/* + * File offset handling. + * + * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES; + * the correct formula is (~(DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1) + */ +#define DRT_ADDRESS_MASK (~((1 << 20) - 1)) +#define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK) + +/* + * Hashtable address field handling. + * + * The low-order bits of the hashtable address are used to conserve + * space. + * + * DRT_HASH_COUNT_MASK must be large enough to store the range + * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value + * to indicate that the bucket is actually unoccupied. + */ +#define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK) +#define DRT_HASH_SET_ADDRESS(scm, i, a) \ + do { \ + (scm)->scm_hashtable[(i)].dhe_control = \ + ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \ + } while (0) +#define DRT_HASH_COUNT_MASK 0x1ff +#define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK) +#define DRT_HASH_SET_COUNT(scm, i, c) \ + do { \ + (scm)->scm_hashtable[(i)].dhe_control = \ + ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \ + } while (0) +#define DRT_HASH_CLEAR(scm, i) \ + do { \ + (scm)->scm_hashtable[(i)].dhe_control = 0; \ + } while (0) +#define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK) +#define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK) +#define DRT_HASH_COPY(oscm, oi, scm, i) \ + do { \ + (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \ + DRT_BITVECTOR_COPY(oscm, oi, scm, i); \ + } while(0); + + +/* + * Hash table moduli. + * + * Since the hashtable entry's size is dependent on the size of + * the bitvector, and since the hashtable size is constrained to + * both being prime and fitting within the desired allocation + * size, these values need to be manually determined. + * + * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes. + * + * The small hashtable allocation is 1024 bytes, so the modulus is 23. + * The large hashtable allocation is 16384 bytes, so the modulus is 401. + */ +#define DRT_HASH_SMALL_MODULUS 23 +#define DRT_HASH_LARGE_MODULUS 401 + +#define DRT_SMALL_ALLOCATION 1024 /* 104 bytes spare */ +#define DRT_LARGE_ALLOCATION 16384 /* 344 bytes spare */ + +/* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */ + +/* + * Hashtable bitvector handling. + * + * Bitvector fields are 32 bits long. + */ + +#define DRT_HASH_SET_BIT(scm, i, bit) \ + (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32)) + +#define DRT_HASH_CLEAR_BIT(scm, i, bit) \ + (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32)) + +#define DRT_HASH_TEST_BIT(scm, i, bit) \ + ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32))) + +#define DRT_BITVECTOR_CLEAR(scm, i) \ + bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t)) + +#define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \ + bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \ + &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \ + (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t)) + + + +/* + * Hashtable entry. + */ +struct vfs_drt_hashentry { + u_int64_t dhe_control; + u_int32_t dhe_bitvector[DRT_BITVECTOR_PAGES / 32]; +}; + +/* + * Dirty Region Tracking structure. + * + * The hashtable is allocated entirely inside the DRT structure. + * + * The hash is a simple circular prime modulus arrangement, the structure + * is resized from small to large if it overflows. + */ + +struct vfs_drt_clustermap { + u_int32_t scm_magic; /* sanity/detection */ +#define DRT_SCM_MAGIC 0x12020003 + u_int32_t scm_modulus; /* current ring size */ + u_int32_t scm_buckets; /* number of occupied buckets */ + u_int32_t scm_lastclean; /* last entry we cleaned */ + u_int32_t scm_iskips; /* number of slot skips */ + + struct vfs_drt_hashentry scm_hashtable[0]; +}; + + +#define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus) +#define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus) + +/* + * Debugging codes and arguments. + */ +#define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */ +#define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */ +#define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */ +#define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */ +#define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length, + * dirty */ + /* 0, setcount */ + /* 1 (clean, no map) */ + /* 2 (map alloc fail) */ + /* 3, resid (partial) */ +#define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87)) +#define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets, + * lastclean, iskips */ + + +static void vfs_drt_sanity(struct vfs_drt_clustermap *cmap); +static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp); +static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap); +static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap, + u_int64_t offset, int *indexp); +static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, + u_int64_t offset, + int *indexp, + int recursed); +static kern_return_t vfs_drt_do_mark_pages( + void **cmapp, + u_int64_t offset, + u_int length, + int *setcountp, + int dirty); +static void vfs_drt_trace( + struct vfs_drt_clustermap *cmap, + int code, + int arg1, + int arg2, + int arg3, + int arg4); + + +/* + * Allocate and initialise a sparse cluster map. + * + * Will allocate a new map, resize or compact an existing map. + * + * XXX we should probably have at least one intermediate map size, + * as the 1:16 ratio seems a bit drastic. + */ +static kern_return_t +vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp) +{ + struct vfs_drt_clustermap *cmap, *ocmap; + kern_return_t kret; + u_int64_t offset; + int nsize, i, active_buckets, index, copycount; + + ocmap = NULL; + if (cmapp != NULL) + ocmap = *cmapp; + + /* + * Decide on the size of the new map. + */ + if (ocmap == NULL) { + nsize = DRT_HASH_SMALL_MODULUS; + } else { + /* count the number of active buckets in the old map */ + active_buckets = 0; + for (i = 0; i < ocmap->scm_modulus; i++) { + if (!DRT_HASH_VACANT(ocmap, i) && + (DRT_HASH_GET_COUNT(ocmap, i) != 0)) + active_buckets++; + } + /* + * If we're currently using the small allocation, check to + * see whether we should grow to the large one. + */ + if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) { + /* if the ring is nearly full */ + if (active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) { + nsize = DRT_HASH_LARGE_MODULUS; + } else { + nsize = DRT_HASH_SMALL_MODULUS; + } + } else { + /* already using the large modulus */ + nsize = DRT_HASH_LARGE_MODULUS; + /* + * If the ring is completely full, there's + * nothing useful for us to do. Behave as + * though we had compacted into the new + * array and return. + */ + if (active_buckets >= DRT_HASH_LARGE_MODULUS) + return(KERN_SUCCESS); + } + } + + /* + * Allocate and initialise the new map. + */ + + kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap, + (nsize == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION); + if (kret != KERN_SUCCESS) + return(kret); + cmap->scm_magic = DRT_SCM_MAGIC; + cmap->scm_modulus = nsize; + cmap->scm_buckets = 0; + cmap->scm_lastclean = 0; + cmap->scm_iskips = 0; + for (i = 0; i < cmap->scm_modulus; i++) { + DRT_HASH_CLEAR(cmap, i); + DRT_HASH_VACATE(cmap, i); + DRT_BITVECTOR_CLEAR(cmap, i); + } + + /* + * If there's an old map, re-hash entries from it into the new map. + */ + copycount = 0; + if (ocmap != NULL) { + for (i = 0; i < ocmap->scm_modulus; i++) { + /* skip empty buckets */ + if (DRT_HASH_VACANT(ocmap, i) || + (DRT_HASH_GET_COUNT(ocmap, i) == 0)) + continue; + /* get new index */ + offset = DRT_HASH_GET_ADDRESS(ocmap, i); + kret = vfs_drt_get_index(&cmap, offset, &index, 1); + if (kret != KERN_SUCCESS) { + /* XXX need to bail out gracefully here */ + panic("vfs_drt: new cluster map mysteriously too small"); + } + /* copy */ + DRT_HASH_COPY(ocmap, i, cmap, index); + copycount++; + } + } + + /* log what we've done */ + vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0); + + /* + * It's important to ensure that *cmapp always points to + * a valid map, so we must overwrite it before freeing + * the old map. + */ + *cmapp = cmap; + if (ocmap != NULL) { + /* emit stats into trace buffer */ + vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA, + ocmap->scm_modulus, + ocmap->scm_buckets, + ocmap->scm_lastclean, + ocmap->scm_iskips); + + vfs_drt_free_map(ocmap); + } + return(KERN_SUCCESS); +} + + +/* + * Free a sparse cluster map. + */ +static kern_return_t +vfs_drt_free_map(struct vfs_drt_clustermap *cmap) +{ + kern_return_t ret; + + kmem_free(kernel_map, (vm_offset_t)cmap, + (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION); + return(KERN_SUCCESS); +} + + +/* + * Find the hashtable slot currently occupied by an entry for the supplied offset. + */ +static kern_return_t +vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp) +{ + kern_return_t kret; + int index, i, tries; + + offset = DRT_ALIGN_ADDRESS(offset); + index = DRT_HASH(cmap, offset); + + /* traverse the hashtable */ + for (i = 0; i < cmap->scm_modulus; i++) { + + /* + * If the slot is vacant, we can stop. + */ + if (DRT_HASH_VACANT(cmap, index)) + break; + + /* + * If the address matches our offset, we have success. + */ + if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) { + *indexp = index; + return(KERN_SUCCESS); + } + + /* + * Move to the next slot, try again. + */ + index = DRT_HASH_NEXT(cmap, index); + } + /* + * It's not there. + */ + return(KERN_FAILURE); +} + +/* + * Find the hashtable slot for the supplied offset. If we haven't allocated + * one yet, allocate one and populate the address field. Note that it will + * not have a nonzero page count and thus will still technically be free, so + * in the case where we are called to clean pages, the slot will remain free. + */ +static kern_return_t +vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed) +{ + struct vfs_drt_clustermap *cmap; + kern_return_t kret; + int index, i; + + cmap = *cmapp; + + /* look for an existing entry */ + kret = vfs_drt_search_index(cmap, offset, indexp); + if (kret == KERN_SUCCESS) + return(kret); + + /* need to allocate an entry */ + offset = DRT_ALIGN_ADDRESS(offset); + index = DRT_HASH(cmap, offset); + + /* scan from the index forwards looking for a vacant slot */ + for (i = 0; i < cmap->scm_modulus; i++) { + /* slot vacant? */ + if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap,index) == 0) { + cmap->scm_buckets++; + if (index < cmap->scm_lastclean) + cmap->scm_lastclean = index; + DRT_HASH_SET_ADDRESS(cmap, index, offset); + DRT_HASH_SET_COUNT(cmap, index, 0); + DRT_BITVECTOR_CLEAR(cmap, index); + *indexp = index; + vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0); + return(KERN_SUCCESS); + } + cmap->scm_iskips += i; + index = DRT_HASH_NEXT(cmap, index); + } + + /* + * We haven't found a vacant slot, so the map is full. If we're not + * already recursed, try reallocating/compacting it. + */ + if (recursed) + return(KERN_FAILURE); + kret = vfs_drt_alloc_map(cmapp); + if (kret == KERN_SUCCESS) { + /* now try to insert again */ + kret = vfs_drt_get_index(cmapp, offset, indexp, 1); + } + return(kret); +} + +/* + * Implementation of set dirty/clean. + * + * In the 'clean' case, not finding a map is OK. + */ +static kern_return_t +vfs_drt_do_mark_pages( + void **private, + u_int64_t offset, + u_int length, + int *setcountp, + int dirty) +{ + struct vfs_drt_clustermap *cmap, **cmapp; + kern_return_t kret; + int i, index, pgoff, pgcount, setcount, ecount; + + cmapp = (struct vfs_drt_clustermap **)private; + cmap = *cmapp; + + vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0); + + if (setcountp != NULL) + *setcountp = 0; + + /* allocate a cluster map if we don't already have one */ + if (cmap == NULL) { + /* no cluster map, nothing to clean */ + if (!dirty) { + vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0); + return(KERN_SUCCESS); + } + kret = vfs_drt_alloc_map(cmapp); + if (kret != KERN_SUCCESS) { + vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0); + return(kret); + } + } + setcount = 0; + + /* + * Iterate over the length of the region. + */ + while (length > 0) { + /* + * Get the hashtable index for this offset. + * + * XXX this will add blank entries if we are clearing a range + * that hasn't been dirtied. + */ + kret = vfs_drt_get_index(cmapp, offset, &index, 0); + cmap = *cmapp; /* may have changed! */ + /* this may be a partial-success return */ + if (kret != KERN_SUCCESS) { + if (setcountp != NULL) + *setcountp = setcount; + vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0); + + return(kret); + } + + /* + * Work out how many pages we're modifying in this + * hashtable entry. + */ + pgoff = (offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE; + pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff)); + + /* + * Iterate over pages, dirty/clearing as we go. + */ + ecount = DRT_HASH_GET_COUNT(cmap, index); + for (i = 0; i < pgcount; i++) { + if (dirty) { + if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) { + DRT_HASH_SET_BIT(cmap, index, pgoff + i); + ecount++; + setcount++; + } + } else { + if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) { + DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i); + ecount--; + setcount++; + } + } + } + DRT_HASH_SET_COUNT(cmap, index, ecount); +next: + offset += pgcount * PAGE_SIZE; + length -= pgcount * PAGE_SIZE; + } + if (setcountp != NULL) + *setcountp = setcount; + + vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0); + + return(KERN_SUCCESS); +} + +/* + * Mark a set of pages as dirty/clean. + * + * This is a public interface. + * + * cmapp + * Pointer to storage suitable for holding a pointer. Note that + * this must either be NULL or a value set by this function. + * + * size + * Current file size in bytes. + * + * offset + * Offset of the first page to be marked as dirty, in bytes. Must be + * page-aligned. + * + * length + * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE. + * + * setcountp + * Number of pages newly marked dirty by this call (optional). + * + * Returns KERN_SUCCESS if all the pages were successfully marked. + */ +static kern_return_t +vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, int *setcountp) +{ + /* XXX size unused, drop from interface */ + return(vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1)); +} + +static kern_return_t +vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length) +{ + return(vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0)); +} + +/* + * Get a cluster of dirty pages. + * + * This is a public interface. + * + * cmapp + * Pointer to storage managed by drt_mark_pages. Note that this must + * be NULL or a value set by drt_mark_pages. + * + * offsetp + * Returns the byte offset into the file of the first page in the cluster. + * + * lengthp + * Returns the length in bytes of the cluster of dirty pages. + * + * Returns success if a cluster was found. If KERN_FAILURE is returned, there + * are no dirty pages meeting the minmum size criteria. Private storage will + * be released if there are no more dirty pages left in the map + * + */ +static kern_return_t +vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp) +{ + struct vfs_drt_clustermap *cmap; + u_int64_t offset; + u_int length; + int index, i, j, fs, ls; + + /* sanity */ + if ((cmapp == NULL) || (*cmapp == NULL)) + return(KERN_FAILURE); + cmap = *cmapp; + + /* walk the hashtable */ + for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) { + index = DRT_HASH(cmap, offset); + + if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0)) + continue; + + /* scan the bitfield for a string of bits */ + fs = -1; + + for (i = 0; i < DRT_BITVECTOR_PAGES; i++) { + if (DRT_HASH_TEST_BIT(cmap, index, i)) { + fs = i; + break; + } + } + if (fs == -1) { + /* didn't find any bits set */ + panic("vfs_drt: entry summary count > 0 but no bits set in map"); + } + for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) { + if (!DRT_HASH_TEST_BIT(cmap, index, i)) + break; + } + + /* compute offset and length, mark pages clean */ + offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs); + length = ls * PAGE_SIZE; + vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0); + cmap->scm_lastclean = index; + + /* return successful */ + *offsetp = (off_t)offset; + *lengthp = length; + + vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0); + return(KERN_SUCCESS); + } + /* + * We didn't find anything... hashtable is empty + * emit stats into trace buffer and + * then free it + */ + vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA, + cmap->scm_modulus, + cmap->scm_buckets, + cmap->scm_lastclean, + cmap->scm_iskips); + + vfs_drt_free_map(cmap); + *cmapp = NULL; + + return(KERN_FAILURE); +} + + +static kern_return_t +vfs_drt_control(void **cmapp, int op_type) +{ + struct vfs_drt_clustermap *cmap; + + /* sanity */ + if ((cmapp == NULL) || (*cmapp == NULL)) + return(KERN_FAILURE); + cmap = *cmapp; + + switch (op_type) { + case 0: + /* emit stats into trace buffer */ + vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA, + cmap->scm_modulus, + cmap->scm_buckets, + cmap->scm_lastclean, + cmap->scm_iskips); + + vfs_drt_free_map(cmap); + *cmapp = NULL; + break; + + case 1: + cmap->scm_lastclean = 0; + break; + } + return(KERN_SUCCESS); +} + + + +/* + * Emit a summary of the state of the clustermap into the trace buffer + * along with some caller-provided data. + */ +static void +vfs_drt_trace(struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4) +{ + KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0); +} + +/* + * Perform basic sanity check on the hash entry summary count + * vs. the actual bits set in the entry. + */ +static void +vfs_drt_sanity(struct vfs_drt_clustermap *cmap) +{ + int index, i; + int bits_on; + + for (index = 0; index < cmap->scm_modulus; index++) { + if (DRT_HASH_VACANT(cmap, index)) + continue; + + for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) { + if (DRT_HASH_TEST_BIT(cmap, index, i)) + bits_on++; + } + if (bits_on != DRT_HASH_GET_COUNT(cmap, index)) + panic("bits_on = %d, index = %d\n", bits_on, index); + } } diff --git a/bsd/vfs/vfs_conf.c b/bsd/vfs/vfs_conf.c index 3b83965b0..df0896ded 100644 --- a/bsd/vfs/vfs_conf.c +++ b/bsd/vfs/vfs_conf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -106,7 +106,7 @@ static struct vfsconf vfsconflist[] = { /* ISO9660 (aka CDROM) Filesystem */ #if CD9660 - { &cd9660_vfsops, "cd9660", 14, 0, MNT_LOCAL | MNT_DOVOLFS, cd9660_mountroot, NULL }, + { &cd9660_vfsops, "cd9660", 14, 0, MNT_LOCAL, cd9660_mountroot, NULL }, #endif /* Memory-based Filesystem */ @@ -198,6 +198,7 @@ extern struct vnodeopv_desc hfs_specop_opv_desc; extern struct vnodeopv_desc hfs_fifoop_opv_desc; extern struct vnodeopv_desc volfs_vnodeop_opv_desc; extern struct vnodeopv_desc cd9660_vnodeop_opv_desc; +extern struct vnodeopv_desc cd9660_cdxaop_opv_desc; extern struct vnodeopv_desc cd9660_specop_opv_desc; extern struct vnodeopv_desc cd9660_fifoop_opv_desc; extern struct vnodeopv_desc union_vnodeop_opv_desc; @@ -240,6 +241,7 @@ struct vnodeopv_desc *vfs_opv_descs[] = { #endif #if CD9660 &cd9660_vnodeop_opv_desc, + &cd9660_cdxaop_opv_desc, &cd9660_specop_opv_desc, #if FIFO &cd9660_fifoop_opv_desc, diff --git a/bsd/vfs/vfs_init.c b/bsd/vfs/vfs_init.c index 6d84b356a..8f59fb010 100644 --- a/bsd/vfs/vfs_init.c +++ b/bsd/vfs/vfs_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -146,9 +146,8 @@ vfs_opv_init() * Also handle backwards compatibility. */ if (*opv_desc_vector_p == NULL) { - /* XXX - shouldn't be M_VNODE */ MALLOC(*opv_desc_vector_p, PFI*, - vfs_opv_numops*sizeof(PFI), M_VNODE, M_WAITOK); + vfs_opv_numops*sizeof(PFI), M_TEMP, M_WAITOK); bzero (*opv_desc_vector_p, vfs_opv_numops*sizeof(PFI)); DODEBUG(printf("vector at %x allocated\n", opv_desc_vector_p)); @@ -258,6 +257,10 @@ vfsinit() * Initialize the vnode table */ vntblinit(); + /* + * Initialize the filesystem event mechanism. + */ + vfs_event_init(); /* * Initialize the vnode name cache */ @@ -268,7 +271,8 @@ vfsinit() vfs_op_init(); vfs_opv_init(); /* finish the job */ /* - * Initialize each file system type. + * Initialize each file system type in the static list, + * until the first NULL ->vfs_vfsops is encountered. */ vattr_null(&va_null); numused_vfsslots = maxtypenum = 0; @@ -285,57 +289,137 @@ vfsinit() maxvfsconf = maxtypenum; } +/* + * Name: vfsconf_add + * + * Description: Add a filesystem to the vfsconf list at the first + * unused slot. If no slots are available, return an + * error. + * + * Parameter: nvfsp vfsconf for VFS to add + * + * Returns: 0 Success + * -1 Failure + * + * Notes: The vfsconf should be treated as a linked list by + * all external references, as the implementation is + * expected to change in the future. The linkage is + * through ->vfc_next, and the list is NULL terminated. + * + * Warning: This code assumes that vfsconf[0] is non-empty. + */ int vfsconf_add(struct vfsconf *nvfsp) { - struct vfsconf *vfsp; + int slot; + struct vfsconf *slotp; - if ((numused_vfsslots >= maxvfsslots) || (nvfsp == (struct vfsconf *)0)) + if (nvfsp == NULL) /* overkill */ return (-1); - bcopy(nvfsp, &vfsconf[numused_vfsslots], sizeof(struct vfsconf)); - vfsconf[numused_vfsslots-1].vfc_next = &vfsconf[numused_vfsslots]; - if (nvfsp->vfc_typenum <= maxvfsconf ) - maxvfsconf = nvfsp->vfc_typenum + 1; + /* + * Find the next empty slot; we recognize an empty slot by a + * NULL-valued ->vfc_vfsops, so if we delete a VFS, we must + * ensure we set the entry back to NULL. + */ + for (slot = 0; slot < maxvfsslots; slot++) { + if (vfsconf[slot].vfc_vfsops == NULL) + break; + } + if (slot == maxvfsslots) { + /* out of static slots; allocate one instead */ + MALLOC(slotp, struct vfsconf *, sizeof(struct vfsconf), + M_TEMP, M_WAITOK); + } else { + slotp = &vfsconf[slot]; + } + + /* + * Replace the contents of the next empty slot with the contents + * of the provided nvfsp. + * + * Note; Takes advantage of the fact that 'slot' was left + * with the value of 'maxvfslots' in the allocation case. + */ + bcopy(nvfsp, slotp, sizeof(struct vfsconf)); + if (slot != 0) { + slotp->vfc_next = vfsconf[slot - 1].vfc_next; + vfsconf[slot - 1].vfc_next = slotp; + } else { + slotp->vfc_next = NULL; + } numused_vfsslots++; + + /* + * Call through the ->vfs_init(); use slotp instead of nvfsp, + * so that if the FS cares where it's instance record is, it + * can find it later. + * + * XXX All code that calls ->vfs_init treats it as if it + * XXX returns a "void', and can never fail. + */ if (nvfsp->vfc_vfsops->vfs_init) - (*nvfsp->vfc_vfsops->vfs_init)(nvfsp); + (*nvfsp->vfc_vfsops->vfs_init)(slotp); + return(0); } +/* + * Name: vfsconf_del + * + * Description: Remove a filesystem from the vfsconf list by name. + * If no such filesystem exists, return an error. + * + * Parameter: fs_name name of VFS to remove + * + * Returns: 0 Success + * -1 Failure + * + * Notes: Hopefully all filesystems have unique names. + */ int vfsconf_del(char * fs_name) { - int entriesRemaining; - struct vfsconf *vfsconflistentry; - struct vfsconf *prevconf = NULL; - struct vfsconf *targetconf = NULL; + struct vfsconf **vcpp; + struct vfsconf *vcdelp; - prevconf = vfsconflistentry = vfsconf; - for (entriesRemaining = maxvfsslots; - (entriesRemaining > 0) && (vfsconflistentry != NULL); - --entriesRemaining) { - if ((vfsconflistentry->vfc_vfsops != NULL) && (strcmp(vfsconflistentry->vfc_name, fs_name) == 0)) { - targetconf = vfsconflistentry; + /* + * Traverse the list looking for fs_name; if found, *vcpp + * will contain the address of the pointer to the entry to + * be removed. + */ + for( vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) { + if (strcmp( (*vcpp)->vfc_name, fs_name) == 0) break; - }; - prevconf = vfsconflistentry; - vfsconflistentry = vfsconflistentry->vfc_next; - }; - - if (targetconf != NULL) { - if (prevconf != NULL) { - /* Unlink the target entry from the list: - and decrement our count */ - prevconf->vfc_next = targetconf->vfc_next; - numused_vfsslots--; - } else { - /* XXX need real error code for no previous entry in list */ - return(-1); } - } else { + + if (*vcpp == NULL) { /* XXX need real error code for entry not found */ return(-1); - }; + } + + /* Unlink entry */ + vcdelp = *vcpp; + *vcpp = (*vcpp)->vfc_next; + + /* + * Is this an entry from our static table? We find out by + * seeing if the pointer to the object to be deleted places + * the object in the address space containing the table (or not). + */ + if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */ + /* Mark as empty for vfscon_add() */ + bzero(vcdelp, sizeof(struct vfsconf)); + numused_vfsslots--; + } else { /* N */ + /* + * This entry was dynamically allocated; we must free it; + * we would prefer to have just linked the caller's + * vfsconf onto our list, but it may not be persistent + * because of the previous (copying) implementation. + */ + FREE(vcdelp, M_TEMP); + } + return(0); } diff --git a/bsd/vfs/vfs_journal.c b/bsd/vfs/vfs_journal.c index 8e11a87b1..80ff96a5e 100644 --- a/bsd/vfs/vfs_journal.c +++ b/bsd/vfs/vfs_journal.c @@ -85,6 +85,24 @@ static void abort_transaction(journal *jnl, transaction *tr); static void dump_journal(journal *jnl); +// +// 3105942 - Coalesce writes to the same block on journal replay +// + +typedef struct bucket { + off_t block_num; + size_t jnl_offset; + size_t block_size; +} bucket; + +#define STARTING_BUCKETS 256 + +static int add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr); +static int grow_table(struct bucket **buf_ptr, int num_buckets, int new_size); +static int lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full); +static int do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr); +static int insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting); + #define CHECK_JOURNAL(jnl) \ do { \ if (jnl == NULL) {\ @@ -102,17 +120,17 @@ static void dump_journal(journal *jnl); }\ if ( jnl->jhdr->start <= 0 \ || jnl->jhdr->start > jnl->jhdr->size\ - || jnl->jhdr->start > 128*1024*1024) {\ + || jnl->jhdr->start > 1024*1024*1024) {\ panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \ __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size);\ }\ if ( jnl->jhdr->end <= 0 \ || jnl->jhdr->end > jnl->jhdr->size\ - || jnl->jhdr->end > 128*1024*1024) {\ + || jnl->jhdr->end > 1024*1024*1024) {\ panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \ __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size);\ }\ - if (jnl->jhdr->size > 128*1024*1024) {\ + if (jnl->jhdr->size > 1024*1024*1024) {\ panic("%s:%d: jhdr size looks bad (0x%llx)\n",\ __FILE__, __LINE__, jnl->jhdr->size);\ } \ @@ -132,13 +150,13 @@ static void dump_journal(journal *jnl); if (tr->total_bytes < 0) {\ panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes);\ }\ - if (tr->journal_start < 0 || tr->journal_start > 128*1024*1024) {\ + if (tr->journal_start < 0 || tr->journal_start > 1024*1024*1024) {\ panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start);\ }\ - if (tr->journal_end < 0 || tr->journal_end > 128*1024*1024) {\ + if (tr->journal_end < 0 || tr->journal_end > 1024*1024*1024) {\ panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end);\ }\ - if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > 2048)) {\ + if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) {\ panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks);\ }\ } while(0) @@ -164,8 +182,9 @@ calc_checksum(char *ptr, int len) } -#define JNL_WRITE 1 -#define JNL_READ 2 +#define JNL_WRITE 0x0001 +#define JNL_READ 0x0002 +#define JNL_HEADER 0x8000 // // This function sets up a fake buf and passes it directly to the @@ -190,11 +209,11 @@ do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction again: bp = alloc_io_buf(jnl->jdev, 1); - if (direction == JNL_WRITE) { + if (direction & JNL_WRITE) { bp->b_flags |= 0; // don't have to set any flags (was: B_WRITEINPROG) jnl->jdev->v_numoutput++; vfs_io_attributes(jnl->jdev, B_WRITE, &max_iosize, &max_vectors); - } else if (direction == JNL_READ) { + } else if (direction & JNL_READ) { bp->b_flags |= B_READ; vfs_io_attributes(jnl->jdev, B_READ, &max_iosize, &max_vectors); } @@ -219,6 +238,10 @@ do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction panic("jnl: do_jnl_io: curlen == %d, offset 0x%llx len %d\n", curlen, *offset, len); } + if (*offset == 0 && (direction & JNL_HEADER) == 0) { + panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %d, data %p)\n", curlen, data); + } + bp->b_bufsize = curlen; bp->b_bcount = curlen; bp->b_data = data; @@ -269,9 +292,18 @@ write_journal_data(journal *jnl, off_t *offset, void *data, size_t len) } +static int +read_journal_header(journal *jnl, void *data, size_t len) +{ + off_t hdr_offset = 0; + + return do_journal_io(jnl, &hdr_offset, data, len, JNL_READ|JNL_HEADER); +} + static int write_journal_header(journal *jnl) { + static int num_err_prints = 0; int ret; off_t jhdr_offset = 0; @@ -280,18 +312,44 @@ write_journal_header(journal *jnl) // ret = VOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NOCRED, current_proc()); if (ret != 0) { - printf("jnl: flushing fs disk buffer returned 0x%x\n", ret); + // + // Only print this error if it's a different error than the + // previous one, or if it's the first time for this device + // or if the total number of printfs is less than 25. We + // allow for up to 25 printfs to insure that some make it + // into the on-disk syslog. Otherwise if we only printed + // one, it's possible it would never make it to the syslog + // for the root volume and that makes debugging hard. + // + if ( ret != jnl->last_flush_err + || (jnl->flags & JOURNAL_FLUSHCACHE_ERR) == 0 + || num_err_prints++ < 25) { + + printf("jnl: flushing fs disk buffer returned 0x%x\n", ret); + + jnl->flags |= JOURNAL_FLUSHCACHE_ERR; + jnl->last_flush_err = ret; + } } - + jnl->jhdr->checksum = 0; jnl->jhdr->checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header)); - if (write_journal_data(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size) != jnl->jhdr->jhdr_size) { - printf("jnl: write_journal_header: error writing the journal header!\n"); - jnl->flags |= JOURNAL_INVALID; - return -1; + if (do_journal_io(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size, JNL_WRITE|JNL_HEADER) != jnl->jhdr->jhdr_size) { + printf("jnl: write_journal_header: error writing the journal header!\n"); + jnl->flags |= JOURNAL_INVALID; + return -1; } + // Have to flush after writing the journal header so that + // a future transaction doesn't sneak out to disk before + // the header does and thus overwrite data that the old + // journal header refers to. Saw this exact case happen + // on an IDE bus analyzer with Larry Barras so while it + // may seem obscure, it's not. + // + VOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NOCRED, current_proc()); + return 0; } @@ -475,6 +533,51 @@ buffer_flushed_callback(struct buf *bp) } } + +#include + +#define SWAP16(x) OSSwapInt16(x) +#define SWAP32(x) OSSwapInt32(x) +#define SWAP64(x) OSSwapInt64(x) + + +static void +swap_journal_header(journal *jnl) +{ + jnl->jhdr->magic = SWAP32(jnl->jhdr->magic); + jnl->jhdr->endian = SWAP32(jnl->jhdr->endian); + jnl->jhdr->start = SWAP64(jnl->jhdr->start); + jnl->jhdr->end = SWAP64(jnl->jhdr->end); + jnl->jhdr->size = SWAP64(jnl->jhdr->size); + jnl->jhdr->blhdr_size = SWAP32(jnl->jhdr->blhdr_size); + jnl->jhdr->checksum = SWAP32(jnl->jhdr->checksum); + jnl->jhdr->jhdr_size = SWAP32(jnl->jhdr->jhdr_size); +} + +static void +swap_block_list_header(journal *jnl, block_list_header *blhdr) +{ + int i; + + blhdr->max_blocks = SWAP16(blhdr->max_blocks); + blhdr->num_blocks = SWAP16(blhdr->num_blocks); + blhdr->bytes_used = SWAP32(blhdr->bytes_used); + blhdr->checksum = SWAP32(blhdr->checksum); + blhdr->pad = SWAP32(blhdr->pad); + + if (blhdr->num_blocks * sizeof(blhdr->binfo[0]) > jnl->jhdr->blhdr_size) { + printf("jnl: blhdr num blocks looks suspicious (%d). not swapping.\n", blhdr->num_blocks); + return; + } + + for(i=0; i < blhdr->num_blocks; i++) { + blhdr->binfo[i].bnum = SWAP64(blhdr->binfo[i].bnum); + blhdr->binfo[i].bsize = SWAP32(blhdr->binfo[i].bsize); + blhdr->binfo[i].bp = (void *)SWAP32((int)blhdr->binfo[i].bp); + } +} + + static int update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize) { @@ -524,16 +627,291 @@ update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize) return 0; } +static int +grow_table(struct bucket **buf_ptr, int num_buckets, int new_size) +{ + struct bucket *newBuf; + int current_size = num_buckets, i; + + // return if newsize is less than the current size + if (new_size < num_buckets) { + return current_size; + } + + if ((MALLOC(newBuf, struct bucket *, new_size*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) { + printf("jnl: grow_table: no memory to expand coalesce buffer!\n"); + return -1; + } + + // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size); + + // copy existing elements + bcopy(*buf_ptr, newBuf, num_buckets*sizeof(struct bucket)); + + // initialize the new ones + for(i=num_buckets; i < new_size; i++) { + newBuf[i].block_num = (off_t)-1; + } + + // free the old container + FREE(*buf_ptr, M_TEMP); + + // reset the buf_ptr + *buf_ptr = newBuf; + + return new_size; +} + +static int +lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full) +{ + int lo, hi, index, matches, i; + + if (num_full == 0) { + return 0; // table is empty, so insert at index=0 + } + + lo = 0; + hi = num_full - 1; + index = -1; + + // perform binary search for block_num + do { + int mid = (hi - lo)/2 + lo; + off_t this_num = (*buf_ptr)[mid].block_num; + + if (block_num == this_num) { + index = mid; + break; + } + + if (block_num < this_num) { + hi = mid; + continue; + } + + if (block_num > this_num) { + lo = mid + 1; + continue; + } + } while(lo < hi); + + // check if lo and hi converged on the match + if (block_num == (*buf_ptr)[hi].block_num) { + index = hi; + } + + // if no existing entry found, find index for new one + if (index == -1) { + index = (block_num < (*buf_ptr)[hi].block_num) ? hi : hi + 1; + } else { + // make sure that we return the right-most index in the case of multiple matches + matches = 0; + i = index + 1; + while(i < num_full && block_num == (*buf_ptr)[i].block_num) { + matches++; + i++; + } + + index += matches; + } + + return index; +} + +static int +insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting) +{ + if (!overwriting) { + // grow the table if we're out of space + if (*num_full_ptr >= *num_buckets_ptr) { + int new_size = *num_buckets_ptr * 2; + int grow_size = grow_table(buf_ptr, *num_buckets_ptr, new_size); + + if (grow_size < new_size) { + printf("jnl: add_block: grow_table returned an error!\n"); + return -1; + } + + *num_buckets_ptr = grow_size; //update num_buckets to reflect the new size + } + + // if we're not inserting at the end, we need to bcopy + if (blk_index != *num_full_ptr) { + bcopy( (*buf_ptr)+(blk_index), (*buf_ptr)+(blk_index+1), (*num_full_ptr-blk_index)*sizeof(struct bucket) ); + } + + (*num_full_ptr)++; // increment only if we're not overwriting + } + + // sanity check the values we're about to add + if (offset >= jnl->jhdr->size) { + offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size); + } + if (size <= 0) { + panic("jnl: insert_block: bad size in insert_block (%d)\n", size); + } + + (*buf_ptr)[blk_index].block_num = num; + (*buf_ptr)[blk_index].block_size = size; + (*buf_ptr)[blk_index].jnl_offset = offset; + + return blk_index; +} + +static int +do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr) +{ + int num_to_remove, index, i, overwrite, err; + size_t jhdr_size = jnl->jhdr->jhdr_size, new_offset; + off_t overlap, block_start, block_end; + + block_start = block_num*jhdr_size; + block_end = block_start + size; + overwrite = (block_num == (*buf_ptr)[blk_index].block_num && size >= (*buf_ptr)[blk_index].block_size); + + // first, eliminate any overlap with the previous entry + if (blk_index != 0 && !overwrite) { + off_t prev_block_start = (*buf_ptr)[blk_index-1].block_num*jhdr_size; + off_t prev_block_end = prev_block_start + (*buf_ptr)[blk_index-1].block_size; + overlap = prev_block_end - block_start; + if (overlap > 0) { + if (overlap % jhdr_size != 0) { + panic("jnl: do_overlap: overlap with previous entry not a multiple of %d\n", jhdr_size); + } + + // if the previous entry completely overlaps this one, we need to break it into two pieces. + if (prev_block_end > block_end) { + off_t new_num = block_end / jhdr_size; + size_t new_size = prev_block_end - block_end; + size_t new_offset = (*buf_ptr)[blk_index-1].jnl_offset + (block_end - prev_block_start); + + err = insert_block(jnl, buf_ptr, blk_index, new_num, new_size, new_offset, num_buckets_ptr, num_full_ptr, 0); + if (err < 0) { + panic("jnl: do_overlap: error inserting during pre-overlap\n"); + } + } + + // Regardless, we need to truncate the previous entry to the beginning of the overlap + (*buf_ptr)[blk_index-1].block_size = block_start - prev_block_start; + } + } + + // then, bail out fast if there's no overlap with the entries that follow + if (!overwrite && block_end <= (*buf_ptr)[blk_index].block_num*jhdr_size) { + return 0; // no overlap, no overwrite + } else if (overwrite && (blk_index + 1 >= *num_full_ptr || block_end <= (*buf_ptr)[blk_index+1].block_num*jhdr_size)) { + return 1; // simple overwrite + } + + // Otherwise, find all cases of total and partial overlap. We use the special + // block_num of -2 to designate entries that are completely overlapped and must + // be eliminated. The block_num, size, and jnl_offset of partially overlapped + // entries must be adjusted to keep the array consistent. + index = blk_index; + num_to_remove = 0; + while(index < *num_full_ptr && block_end > (*buf_ptr)[index].block_num*jhdr_size) { + if (block_end >= ((*buf_ptr)[index].block_num*jhdr_size + (*buf_ptr)[index].block_size)) { + (*buf_ptr)[index].block_num = -2; // mark this for deletion + num_to_remove++; + } else { + overlap = block_end - (*buf_ptr)[index].block_num*jhdr_size; + if (overlap > 0) { + if (overlap % jhdr_size != 0) { + panic("jnl: do_overlap: overlap of %d is not multiple of %d\n", overlap, jhdr_size); + } + + // if we partially overlap this entry, adjust its block number, jnl offset, and size + (*buf_ptr)[index].block_num += (overlap / jhdr_size); // make sure overlap is multiple of jhdr_size, or round up + + new_offset = (*buf_ptr)[index].jnl_offset + overlap; // check for wrap-around + if (new_offset >= jnl->jhdr->size) { + new_offset = jhdr_size + (new_offset - jnl->jhdr->size); + } + (*buf_ptr)[index].jnl_offset = new_offset; + + (*buf_ptr)[index].block_size -= overlap; // sanity check for negative value + if ((*buf_ptr)[index].block_size <= 0) { + panic("jnl: do_overlap: after overlap, new block size is invalid (%d)\n", (*buf_ptr)[index].block_size); + // return -1; // if above panic is removed, return -1 for error + } + } + + } + + index++; + } + + // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out) + index--; // start with the last index used within the above loop + while(index >= blk_index) { + if ((*buf_ptr)[index].block_num == -2) { + if (index == *num_full_ptr-1) { + (*buf_ptr)[index].block_num = -1; // it's the last item in the table... just mark as free + } else { + bcopy( (*buf_ptr)+(index+1), (*buf_ptr)+(index), (*num_full_ptr - (index + 1)) * sizeof(struct bucket) ); + } + (*num_full_ptr)--; + } + index--; + } + + // eliminate any stale entries at the end of the table + for(i=*num_full_ptr; i < (*num_full_ptr + num_to_remove); i++) { + (*buf_ptr)[i].block_num = -1; + } + + return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite) +} + +// PR-3105942: Coalesce writes to the same block in journal replay +// We coalesce writes by maintaining a dynamic sorted array of physical disk blocks +// to be replayed and the corresponding location in the journal which contains +// the most recent data for those blocks. The array is "played" once the all the +// blocks in the journal have been coalesced. The code for the case of conflicting/ +// overlapping writes to a single block is the most dense. Because coalescing can +// disrupt the existing time-ordering of blocks in the journal playback, care +// is taken to catch any overlaps and keep the array consistent. +static int +add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr) +{ + int blk_index, overwriting; + size_t jhdr_size = jnl->jhdr->jhdr_size; + + // on return from lookup_bucket(), blk_index is the index into the table where block_num should be + // inserted (or the index of the elem to overwrite). + blk_index = lookup_bucket( buf_ptr, block_num, *num_full_ptr); + + // check if the index is within bounds (if we're adding this block to the end of + // the table, blk_index will be equal to num_full) + if (blk_index < 0 || blk_index > *num_full_ptr) { + //printf("jnl: add_block: trouble adding block to co_buf\n"); + return -1; + } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index); + + // Determine whether we're overwriting an existing entry by checking for overlap + overwriting = do_overlap(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr); + if (overwriting < 0) { + return -1; // if we got an error, pass it along + } + + // returns the index, or -1 on error + blk_index = insert_block(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr, overwriting); + + return blk_index; +} static int replay_journal(journal *jnl) { - int i, ret, checksum, max_bsize; + int i, ret, orig_checksum, checksum, max_bsize; struct buf *oblock_bp; block_list_header *blhdr; off_t offset; char *buf, *block_ptr=NULL; - + struct bucket *co_buf; + int num_buckets = STARTING_BUCKETS, num_full; + // wrap the start ptr if it points to the very end of the journal if (jnl->jhdr->start == jnl->jhdr->size) { jnl->jhdr->start = jnl->jhdr->jhdr_size; @@ -552,7 +930,19 @@ replay_journal(journal *jnl) jnl->jhdr->blhdr_size); return -1; } - + + // allocate memory for the coalesce buffer + if ((MALLOC(co_buf, struct bucket *, num_buckets*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) { + printf("jnl: replay_journal: no memory for coalesce buffer!\n"); + return -1; + } + + // initialize entries + for(i=0; i < num_buckets; i++) { + co_buf[i].block_num = -1; + } + num_full = 0; // empty at first + printf("jnl: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n", jnl->jhdr->start, jnl->jhdr->end, jnl->jdev_offset); @@ -566,13 +956,23 @@ replay_journal(journal *jnl) } blhdr = (block_list_header *)buf; - checksum = blhdr->checksum; + + orig_checksum = blhdr->checksum; blhdr->checksum = 0; - if (checksum != calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE)) { - printf("jnl: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n", - offset, checksum, calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE)); - goto bad_replay; + if (jnl->flags & JOURNAL_NEED_SWAP) { + // calculate the checksum based on the unswapped data + // because it is done byte-at-a-time. + orig_checksum = SWAP32(orig_checksum); + checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE); + swap_block_list_header(jnl, blhdr); + } else { + checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE); } + if (checksum != orig_checksum) { + printf("jnl: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n", + offset, orig_checksum, checksum); + goto bad_replay; + } if ( blhdr->max_blocks <= 0 || blhdr->max_blocks > 2048 || blhdr->num_blocks <= 0 || blhdr->num_blocks > blhdr->max_blocks) { printf("jnl: replay_journal: bad looking journal entry: max: %d num: %d\n", @@ -595,62 +995,98 @@ replay_journal(journal *jnl) max_bsize = (max_bsize + PAGE_SIZE) & ~(PAGE_SIZE - 1); } - if (kmem_alloc(kernel_map, (vm_offset_t *)&block_ptr, max_bsize)) { - goto bad_replay; - } - //printf("jnl: replay_journal: %d blocks in journal entry @ 0x%llx\n", blhdr->num_blocks-1, - // jnl->jhdr->start); + //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n", + // blhdr->num_blocks-1, jnl->jhdr->start); for(i=1; i < blhdr->num_blocks; i++) { - int size; + int size, ret_val; + off_t number; size = blhdr->binfo[i].bsize; - - ret = read_journal_data(jnl, &offset, block_ptr, size); - if (ret != size) { - printf("jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", offset); - goto bad_replay; - } - - // don't replay "killed" blocks - if (blhdr->binfo[i].bnum == (off_t)-1) { - // printf("jnl: replay_journal: skipping killed fs block (slot %d)\n", i); + number = blhdr->binfo[i].bnum; + + // don't add "killed" blocks + if (number == (off_t)-1) { + //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i); } else { - //printf("jnl: replay_journal: fixing fs block # %lld (%d)\n", - // blhdr->binfo[i].bnum, blhdr->binfo[i].bsize); - - if (update_fs_block(jnl, block_ptr, blhdr->binfo[i].bnum, blhdr->binfo[i].bsize) != 0) { - goto bad_replay; - } + // add this bucket to co_buf, coalescing where possible + // printf("jnl: replay_journal: adding block 0x%llx\n", number); + ret_val = add_block(jnl, &co_buf, number, size, (size_t) offset, &num_buckets, &num_full); + + if (ret_val == -1) { + printf("jnl: replay_journal: trouble adding block to co_buf\n"); + goto bad_replay; + } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number); } - - // check if we need to wrap offset back to the beginning - // (which is just past the journal header) + + // increment offset + offset += size; + + // check if the last block added puts us off the end of the jnl. + // if so, we need to wrap to the beginning and take any remainder + // into account // if (offset >= jnl->jhdr->size) { - offset = jnl->jhdr->jhdr_size; + offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size); } } - kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize); - block_ptr = NULL; - + jnl->jhdr->start += blhdr->bytes_used; if (jnl->jhdr->start >= jnl->jhdr->size) { // wrap around and skip the journal header block jnl->jhdr->start = (jnl->jhdr->start % jnl->jhdr->size) + jnl->jhdr->jhdr_size; } + } - // only update the on-disk journal header if we've reached the - // last chunk of updates from this transaction. if binfo[0].bnum - // is zero then we know we're at the end. - if (blhdr->binfo[0].bnum == 0) { - if (write_journal_header(jnl) != 0) { - goto bad_replay; - } - } + + //printf("jnl: replay_journal: replaying %d blocks\n", num_full); + + if (kmem_alloc(kernel_map, (vm_offset_t *)&block_ptr, max_bsize)) { + goto bad_replay; + } + + // Replay the coalesced entries in the co-buf + for(i=0; i < num_full; i++) { + size_t size = co_buf[i].block_size; + off_t jnl_offset = (off_t) co_buf[i].jnl_offset; + off_t number = co_buf[i].block_num; + + + // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num, + // co_buf[i].block_size, co_buf[i].jnl_offset); + + if (number == (off_t)-1) { + // printf("jnl: replay_journal: skipping killed fs block\n"); + } else { + + // do journal read, and set the phys. block + ret = read_journal_data(jnl, &jnl_offset, block_ptr, size); + if (ret != size) { + printf("jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", offset); + goto bad_replay; + } + + if (update_fs_block(jnl, block_ptr, number, size) != 0) { + goto bad_replay; + } + } } + + // done replaying; update jnl header + if (write_journal_header(jnl) != 0) { + goto bad_replay; + } + + // free block_ptr + kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize); + block_ptr = NULL; + + // free the coalesce buffer + FREE(co_buf, M_TEMP); + co_buf = NULL; + kmem_free(kernel_map, (vm_offset_t)buf, jnl->jhdr->blhdr_size); return 0; @@ -658,7 +1094,11 @@ replay_journal(journal *jnl) if (block_ptr) { kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize); } + if (co_buf) { + FREE(co_buf, M_TEMP); + } kmem_free(kernel_map, (vm_offset_t)buf, jnl->jhdr->blhdr_size); + return -1; } @@ -721,9 +1161,12 @@ size_up_tbuffer(journal *jnl, int tbuffer_size, int phys_blksz) } jnl->jhdr->blhdr_size = (jnl->tbuffer_size / jnl->jhdr->jhdr_size) * sizeof(block_info); - if (jnl->jhdr->blhdr_size < phys_blksz) { - jnl->jhdr->blhdr_size = phys_blksz; - } + if (jnl->jhdr->blhdr_size < phys_blksz) { + jnl->jhdr->blhdr_size = phys_blksz; + } else if ((jnl->jhdr->blhdr_size % phys_blksz) != 0) { + // have to round up so we're an even multiple of the physical block size + jnl->jhdr->blhdr_size = (jnl->jhdr->blhdr_size + (phys_blksz - 1)) & ~(phys_blksz - 1); + } } @@ -792,10 +1235,7 @@ journal_create(struct vnode *jvp, // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3); // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3); - if (semaphore_create(kernel_task, &jnl->jsem, SYNC_POLICY_FIFO, 1) != 0) { - printf("jnl: journal_create: failed to create journal semaphore..\n"); - goto bad_sem; - } + lockinit(&jnl->jlock, PINOD, "journal", 0, 0); if (write_journal_header(jnl) != 0) { printf("jnl: journal_create: failed to write journal header.\n"); @@ -806,8 +1246,6 @@ journal_create(struct vnode *jvp, bad_write: - semaphore_destroy(kernel_task, jnl->jsem); - bad_sem: kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz); bad_kmem_alloc: jnl->jhdr = NULL; @@ -829,7 +1267,7 @@ journal_open(struct vnode *jvp, { journal *jnl; int orig_blksz=0, phys_blksz, blhdr_size; - off_t hdr_offset=0; + int orig_checksum, checksum; /* Get the real physical block size. */ if (VOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, FSCRED, NULL)) { @@ -870,12 +1308,25 @@ journal_open(struct vnode *jvp, // we have to set this up here so that do_journal_io() will work jnl->jhdr->jhdr_size = phys_blksz; - if (read_journal_data(jnl, &hdr_offset, jnl->jhdr, phys_blksz) != phys_blksz) { + if (read_journal_header(jnl, jnl->jhdr, phys_blksz) != phys_blksz) { printf("jnl: open: could not read %d bytes for the journal header.\n", phys_blksz); goto bad_journal; } + orig_checksum = jnl->jhdr->checksum; + jnl->jhdr->checksum = 0; + + if (jnl->jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) { + // do this before the swap since it's done byte-at-a-time + orig_checksum = SWAP32(orig_checksum); + checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header)); + swap_journal_header(jnl); + jnl->flags |= JOURNAL_NEED_SWAP; + } else { + checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header)); + } + if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC && jnl->jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) { printf("jnl: open: journal magic is bad (0x%x != 0x%x)\n", jnl->jhdr->magic, JOURNAL_HEADER_MAGIC); @@ -884,12 +1335,11 @@ journal_open(struct vnode *jvp, // only check if we're the current journal header magic value if (jnl->jhdr->magic == JOURNAL_HEADER_MAGIC) { - int orig_checksum = jnl->jhdr->checksum; - jnl->jhdr->checksum = 0; - if (orig_checksum != calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header))) { - printf("jnl: open: journal checksum is bad (0x%x != 0x%x)\n", orig_checksum, - calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header))); + if (orig_checksum != checksum) { + printf("jnl: open: journal checksum is bad (0x%x != 0x%x)\n", + orig_checksum, checksum); + //goto bad_journal; } } @@ -914,7 +1364,7 @@ journal_open(struct vnode *jvp, if ( jnl->jhdr->start <= 0 || jnl->jhdr->start > jnl->jhdr->size - || jnl->jhdr->start > 128*1024*1024) { + || jnl->jhdr->start > 1024*1024*1024) { printf("jnl: open: jhdr start looks bad (0x%llx max size 0x%llx)\n", jnl->jhdr->start, jnl->jhdr->size); goto bad_journal; @@ -922,13 +1372,13 @@ journal_open(struct vnode *jvp, if ( jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size - || jnl->jhdr->end > 128*1024*1024) { + || jnl->jhdr->end > 1024*1024*1024) { printf("jnl: open: jhdr end looks bad (0x%llx max size 0x%llx)\n", jnl->jhdr->end, jnl->jhdr->size); goto bad_journal; } - if (jnl->jhdr->size > 128*1024*1024) { + if (jnl->jhdr->size > 1024*1024*1024) { printf("jnl: open: jhdr size looks bad (0x%llx)\n", jnl->jhdr->size); goto bad_journal; } @@ -965,6 +1415,12 @@ journal_open(struct vnode *jvp, if (orig_blksz != 0) { VOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, FSCRED, NULL); phys_blksz = orig_blksz; + if (orig_blksz < jnl->jhdr->jhdr_size) { + printf("jnl: open: jhdr_size is %d but orig phys blk size is %d. switching.\n", + jnl->jhdr->jhdr_size, orig_blksz); + + jnl->jhdr->jhdr_size = orig_blksz; + } } // make sure this is in sync! @@ -973,10 +1429,7 @@ journal_open(struct vnode *jvp, // set this now, after we've replayed the journal size_up_tbuffer(jnl, tbuffer_size, phys_blksz); - if (semaphore_create(kernel_task, &jnl->jsem, SYNC_POLICY_FIFO, 1) != 0) { - printf("jnl: journal_create: failed to create journal semaphore..\n"); - goto bad_journal; - } + lockinit(&jnl->jlock, PINOD, "journal", 0, 0); return jnl; @@ -1007,11 +1460,9 @@ journal_close(journal *jnl) if (jnl->owner != current_act()) { int ret; - while ((ret = semaphore_wait(jnl->jsem)) == KERN_ABORTED) { - // just keep trying if we've been ^C'ed - } + ret = lockmgr(&jnl->jlock, LK_EXCLUSIVE|LK_RETRY, NULL, current_proc()); if (ret != 0) { - printf("jnl: close: sem wait failed.\n"); + printf("jnl: close: locking the journal (0x%x) failed %d.\n", jnl, ret); return; } } @@ -1081,7 +1532,6 @@ journal_close(journal *jnl) kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, jnl->jhdr->jhdr_size); jnl->jhdr = (void *)0xbeefbabe; - semaphore_destroy(kernel_task, jnl->jsem); FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL); } @@ -1139,6 +1589,8 @@ check_free_space(journal *jnl, int desired_size) // desired_size, free_space(jnl)); while (1) { + int old_start_empty; + if (counter++ == 5000) { dump_journal(jnl); panic("jnl: check_free_space: buffer flushing isn't working " @@ -1159,6 +1611,7 @@ check_free_space(journal *jnl, int desired_size) // here's where we lazily bump up jnl->jhdr->start. we'll consume // entries until there is enough space for the next transaction. // + old_start_empty = 1; simple_lock(&jnl->old_start_lock); for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) { int counter; @@ -1182,6 +1635,7 @@ check_free_space(journal *jnl, int desired_size) continue; } + old_start_empty = 0; jnl->jhdr->start = jnl->old_start[i]; jnl->old_start[i] = 0; if (free_space(jnl) > desired_size) { @@ -1194,6 +1648,19 @@ check_free_space(journal *jnl, int desired_size) // if we bumped the start, loop and try again if (i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) { continue; + } else if (old_start_empty) { + // + // if there is nothing in old_start anymore then we can + // bump the jhdr->start to be the same as active_start + // since it is possible there was only one very large + // transaction in the old_start array. if we didn't do + // this then jhdr->start would never get updated and we + // would wind up looping until we hit the panic at the + // start of the loop. + // + jnl->jhdr->start = jnl->active_start; + write_journal_header(jnl); + continue; } @@ -1217,6 +1684,7 @@ journal_start_transaction(journal *jnl) { int ret; transaction *tr; + int prev_priv; CHECK_JOURNAL(jnl); @@ -1233,11 +1701,9 @@ journal_start_transaction(journal *jnl) return 0; } - while ((ret = semaphore_wait(jnl->jsem)) == KERN_ABORTED) { - // just keep looping if we've been ^C'ed - } + ret = lockmgr(&jnl->jlock, LK_EXCLUSIVE|LK_RETRY, NULL, current_proc()); if (ret != 0) { - printf("jnl: start_tr: sem wait failed.\n"); + printf("jnl: start_tr: locking the journal (0x%x) failed %d.\n", jnl, ret); return EINVAL; } @@ -1270,12 +1736,15 @@ journal_start_transaction(journal *jnl) memset(tr, 0, sizeof(transaction)); tr->tbuffer_size = jnl->tbuffer_size; + thread_wire_internal(host_priv_self(), current_act(), TRUE, &prev_priv); if (kmem_alloc(kernel_map, (vm_offset_t *)&tr->tbuffer, tr->tbuffer_size)) { FREE_ZONE(tr, sizeof(transaction), M_JNL_TR); printf("jnl: start transaction failed: no tbuffer mem\n"); ret = ENOMEM; + thread_wire_internal(host_priv_self(), current_act(), prev_priv, NULL); goto bad_start; } + thread_wire_internal(host_priv_self(), current_act(), prev_priv, NULL); // journal replay code checksum check depends on this. memset(tr->tbuffer, 0, BLHDR_CHECKSUM_SIZE); @@ -1298,7 +1767,7 @@ journal_start_transaction(journal *jnl) bad_start: jnl->owner = NULL; jnl->nested_count = 0; - semaphore_signal(jnl->jsem); + lockmgr(&jnl->jlock, LK_RELEASE, NULL, current_proc()); return ret; } @@ -1492,6 +1961,7 @@ journal_modify_block_end(journal *jnl, struct buf *bp) blhdr = prev; } else if (blhdr == NULL) { block_list_header *nblhdr; + int prev_priv; if (prev == NULL) { panic("jnl: modify block end: no way man, prev == NULL?!?, jnl 0x%x, bp 0x%x\n", jnl, bp); @@ -1504,10 +1974,12 @@ journal_modify_block_end(journal *jnl, struct buf *bp) // through prev->binfo[0].bnum. that's a skanky way to do things but // avoids having yet another linked list of small data structures to manage. + thread_wire_internal(host_priv_self(), current_act(), TRUE, &prev_priv); if (kmem_alloc(kernel_map, (vm_offset_t *)&nblhdr, tr->tbuffer_size)) { panic("jnl: end_tr: no space for new block tr @ 0x%x (total bytes: %d)!\n", tr, tr->total_bytes); } + thread_wire_internal(host_priv_self(), current_act(), prev_priv, NULL); // journal replay code checksum check depends on this. memset(nblhdr, 0, BLHDR_CHECKSUM_SIZE); @@ -1542,7 +2014,7 @@ journal_modify_block_end(journal *jnl, struct buf *bp) if (i >= blhdr->num_blocks) { vget(bp->b_vp, 0, current_proc()); - blhdr->binfo[i].bnum = bp->b_blkno; + blhdr->binfo[i].bnum = (off_t)((unsigned)bp->b_blkno); blhdr->binfo[i].bsize = bp->b_bufsize; blhdr->binfo[i].bp = bp; @@ -1764,7 +2236,7 @@ end_transaction(transaction *tr, int force_it) } // update this so we write out the correct physical block number! - blhdr->binfo[i].bnum = bp->b_blkno; + blhdr->binfo[i].bnum = (off_t)((unsigned)bp->b_blkno); } next = (block_list_header *)((long)blhdr->binfo[0].bnum); @@ -1897,6 +2369,7 @@ abort_transaction(journal *jnl, transaction *tr) int i, ret; block_list_header *blhdr, *next; struct buf *bp; + struct vnode *save_vp; // for each block list header, iterate over the blocks then // free up the memory associated with the block list. @@ -1925,9 +2398,12 @@ abort_transaction(journal *jnl, transaction *tr) // don't want these blocks going to disk. bp->b_flags &= ~(B_LOCKED|B_DELWRI); bp->b_flags |= B_INVAL; + save_vp = bp->b_vp; brelse(bp); + vrele(save_vp); + } else { printf("jnl: abort_tr: could not find block %Ld vp 0x%x!\n", blhdr->binfo[i].bnum, blhdr->binfo[i].bp); @@ -1992,7 +2468,7 @@ journal_end_transaction(journal *jnl) } jnl->owner = NULL; - semaphore_signal(jnl->jsem); + lockmgr(&jnl->jlock, LK_RELEASE, NULL, current_proc()); return EINVAL; } @@ -2009,7 +2485,7 @@ journal_end_transaction(journal *jnl) ret = end_transaction(tr, 0); jnl->owner = NULL; - semaphore_signal(jnl->jsem); + lockmgr(&jnl->jlock, LK_RELEASE, NULL, current_proc()); return ret; } @@ -2029,11 +2505,9 @@ journal_flush(journal *jnl) if (jnl->owner != current_act()) { int ret; - while ((ret = semaphore_wait(jnl->jsem)) == KERN_ABORTED) { - // just keep looping if we've ben ^C'ed - } + ret = lockmgr(&jnl->jlock, LK_EXCLUSIVE|LK_RETRY, NULL, current_proc()); if (ret != 0) { - printf("jnl: flush: sem wait failed.\n"); + printf("jnl: flush: locking the journal (0x%x) failed %d.\n", jnl, ret); return -1; } need_signal = 1; @@ -2050,7 +2524,7 @@ journal_flush(journal *jnl) } if (need_signal) { - semaphore_signal(jnl->jsem); + lockmgr(&jnl->jlock, LK_RELEASE, NULL, current_proc()); } return 0; diff --git a/bsd/vfs/vfs_journal.h b/bsd/vfs/vfs_journal.h index 9551218a0..fcc8a1975 100644 --- a/bsd/vfs/vfs_journal.h +++ b/bsd/vfs/vfs_journal.h @@ -37,6 +37,7 @@ #ifdef __APPLE_API_UNSTABLE #include +#include typedef struct block_info { off_t bnum; // block # on the file system device @@ -96,6 +97,8 @@ typedef struct journal_header { * In memory structure about the journal. */ typedef struct journal { + struct lock__bsd__ jlock; + struct vnode *jdev; // vnode of the device where the journal lives off_t jdev_offset; // byte offset to the start of the journal @@ -122,12 +125,14 @@ typedef struct journal { simple_lock_data_t old_start_lock; // guard access volatile off_t old_start[16]; // this is how we do lazy start update - semaphore_t jsem; + int last_flush_err; // last error from flushing the cache } journal; /* internal-only journal flags (top 16 bits) */ #define JOURNAL_CLOSE_PENDING 0x00010000 #define JOURNAL_INVALID 0x00020000 +#define JOURNAL_FLUSHCACHE_ERR 0x00040000 // means we already printed this err +#define JOURNAL_NEED_SWAP 0x00080000 // swap any data read from disk /* journal_open/create options are always in the low-16 bits */ #define JOURNAL_OPTION_FLAGS_MASK 0x0000ffff diff --git a/bsd/vfs/vfs_lookup.c b/bsd/vfs/vfs_lookup.c index 189de5062..4069d33a6 100644 --- a/bsd/vfs/vfs_lookup.c +++ b/bsd/vfs/vfs_lookup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -64,6 +64,7 @@ */ #include +#include #include #include #include @@ -76,11 +77,14 @@ #include #include #include /* For _PC_NAME_MAX */ +#include #if KTRACE #include #endif +static void kdebug_lookup(struct vnode *dp, struct componentname *cnp); + /* * Convert a pathname into a pointer to a locked inode. * @@ -113,6 +117,7 @@ namei(ndp) int error, linklen; struct componentname *cnp = &ndp->ni_cnd; struct proc *p = cnp->cn_proc; + char *tmppn; ndp->ni_cnd.cn_cred = ndp->ni_cnd.cn_proc->p_ucred; #if DIAGNOSTIC @@ -123,7 +128,7 @@ namei(ndp) if (cnp->cn_flags & OPMASK) panic ("namei: flags contaminated with nameiops"); #endif - fdp = cnp->cn_proc->p_fd; + fdp = p->p_fd; /* * Get a buffer for the name to be translated, and copy the @@ -133,28 +138,42 @@ namei(ndp) MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); cnp->cn_pnlen = MAXPATHLEN; + cnp->cn_flags |= HASBUF; } if (ndp->ni_segflg == UIO_SYSSPACE) error = copystr(ndp->ni_dirp, cnp->cn_pnbuf, - MAXPATHLEN, &ndp->ni_pathlen); + MAXPATHLEN, (size_t *)&ndp->ni_pathlen); else error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf, - MAXPATHLEN, &ndp->ni_pathlen); + MAXPATHLEN, (size_t *)&ndp->ni_pathlen); + + /* If we are auditing the kernel pathname, save the user pathname */ + if (cnp->cn_flags & AUDITVNPATH1) + AUDIT_ARG(upath, p, cnp->cn_pnbuf, ARG_UPATH1); + if (cnp->cn_flags & AUDITVNPATH2) + AUDIT_ARG(upath, p, cnp->cn_pnbuf, ARG_UPATH2); + /* * Do not allow empty pathnames */ if (!error && *cnp->cn_pnbuf == '\0') error = ENOENT; + if (!error && ((dp = fdp->fd_cdir) == NULL)) + error = EPERM; /* 3382843 */ + if (error) { - _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + tmppn = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmppn, cnp->cn_pnlen, M_NAMEI); ndp->ni_vp = NULL; return (error); } ndp->ni_loopcnt = 0; #if KTRACE - if (KTRPOINT(cnp->cn_proc, KTR_NAMEI)) - ktrnamei(cnp->cn_proc->p_tracep, cnp->cn_pnbuf); + if (KTRPOINT(p, KTR_NAMEI)) + ktrnamei(p->p_tracep, cnp->cn_pnbuf); #endif /* @@ -162,7 +181,13 @@ namei(ndp) */ if ((ndp->ni_rootdir = fdp->fd_rdir) == NULL) ndp->ni_rootdir = rootvnode; - dp = fdp->fd_cdir; + if (ndp->ni_cnd.cn_flags & USEDVP) { + dp = ndp->ni_dvp; + ndp->ni_dvp = NULL; + } else { + dp = fdp->fd_cdir; + } + VREF(dp); for (;;) { /* @@ -181,7 +206,11 @@ namei(ndp) } ndp->ni_startdir = dp; if (error = lookup(ndp)) { - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + long len = cnp->cn_pnlen; + tmppn = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmppn, len, M_NAMEI); return (error); } /* @@ -189,8 +218,10 @@ namei(ndp) */ if ((cnp->cn_flags & ISSYMLINK) == 0) { if ((cnp->cn_flags & (SAVENAME | SAVESTART)) == 0) { - FREE_ZONE(cnp->cn_pnbuf, - cnp->cn_pnlen, M_NAMEI); + tmppn = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmppn, cnp->cn_pnlen, M_NAMEI); } else { cnp->cn_flags |= HASBUF; } @@ -218,28 +249,35 @@ namei(ndp) auio.uio_resid = MAXPATHLEN; if (error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred)) { if (ndp->ni_pathlen > 1) - _FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); + FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); break; } linklen = MAXPATHLEN - auio.uio_resid; if (linklen + ndp->ni_pathlen >= MAXPATHLEN) { if (ndp->ni_pathlen > 1) - _FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); + FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); error = ENAMETOOLONG; break; } if (ndp->ni_pathlen > 1) { + long len = cnp->cn_pnlen; + tmppn = cnp->cn_pnbuf; bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); cnp->cn_pnbuf = cp; cnp->cn_pnlen = MAXPATHLEN; + FREE_ZONE(tmppn, len, M_NAMEI); } else cnp->cn_pnbuf[linklen] = '\0'; ndp->ni_pathlen += linklen; vput(ndp->ni_vp); dp = ndp->ni_dvp; } - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + + tmppn = cnp->cn_pnbuf; + cnp->cn_pnbuf = NULL; + cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmppn, cnp->cn_pnlen, M_NAMEI); + vrele(ndp->ni_dvp); vput(ndp->ni_vp); ndp->ni_vp = NULL; @@ -297,7 +335,7 @@ lookup(ndp) int wantparent; /* 1 => wantparent or lockparent flag */ int dp_unlocked = 0; /* 1 => dp already VOP_UNLOCK()-ed */ int rdonly; /* lookup read-only flag bit */ - int trailing_slash; + int trailing_slash = 0; int error = 0; struct componentname *cnp = &ndp->ni_cnd; struct proc *p = cnp->cn_proc; @@ -318,28 +356,45 @@ lookup(ndp) dp = ndp->ni_startdir; ndp->ni_startdir = NULLVP; vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p); + cnp->cn_consume = 0; dirloop: /* * Search a new directory. * * The cn_hash value is for use by vfs_cache. - * Check pathconf for maximun length of name * The last component of the filename is left accessible via * cnp->cn_nameptr for callers that need the name. Callers needing * the name set the SAVENAME flag. When done, they assume * responsibility for freeing the pathname buffer. */ - cnp->cn_consume = 0; - cnp->cn_hash = 0; - for (cp = cnp->cn_nameptr, i=1; *cp != 0 && *cp != '/'; i++, cp++) - cnp->cn_hash += (unsigned char)*cp * i; + { + register unsigned int hash; + register unsigned int ch; + register int i; + + hash = 0; + cp = cnp->cn_nameptr; + ch = *cp; + if (ch == '\0') { + cnp->cn_namelen = 0; + goto emptyname; + } + + for (i = 1; (ch != '/') && (ch != '\0'); i++) { + hash += ch * i; + ch = *(++cp); + } + cnp->cn_hash = hash; + } cnp->cn_namelen = cp - cnp->cn_nameptr; - if (VOP_PATHCONF(dp, _PC_NAME_MAX, &namemax)) - namemax = NAME_MAX; - if (cnp->cn_namelen > namemax) { - error = ENAMETOOLONG; - goto bad; + if (cnp->cn_namelen > NCHNAMLEN) { + if (VOP_PATHCONF(dp, _PC_NAME_MAX, &namemax)) + namemax = NAME_MAX; + if (cnp->cn_namelen > namemax) { + error = ENAMETOOLONG; + goto bad; + } } #ifdef NAMEI_DIAGNOSTIC { char c = *cp; @@ -371,43 +426,12 @@ dirloop: cnp->cn_flags |= MAKEENTRY; if (*cp == '\0' && docache == 0) cnp->cn_flags &= ~MAKEENTRY; - if (cnp->cn_namelen == 2 && - cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') - cnp->cn_flags |= ISDOTDOT; - else - cnp->cn_flags &= ~ISDOTDOT; + if (*ndp->ni_next == 0) cnp->cn_flags |= ISLASTCN; else cnp->cn_flags &= ~ISLASTCN; - - /* - * Check for degenerate name (e.g. / or "") - * which is a way of talking about a directory, - * e.g. like "/." or ".". - */ - if (cnp->cn_nameptr[0] == '\0') { - if (dp->v_type != VDIR) { - error = ENOTDIR; - goto bad; - } - if (cnp->cn_nameiop != LOOKUP) { - error = EISDIR; - goto bad; - } - if (wantparent) { - ndp->ni_dvp = dp; - VREF(dp); - } - ndp->ni_vp = dp; - if (!(cnp->cn_flags & (LOCKPARENT | LOCKLEAF))) - VOP_UNLOCK(dp, 0, p); - if (cnp->cn_flags & SAVESTART) - panic("lookup: SAVESTART"); - return (0); - } - /* * Handle "..": two special cases. * 1. If at root directory (e.g. after chroot) @@ -418,7 +442,10 @@ dirloop: * vnode which was mounted on so we take the * .. in the other file system. */ - if (cnp->cn_flags & ISDOTDOT) { + if (cnp->cn_namelen == 2 && + cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') { + cnp->cn_flags |= ISDOTDOT; + for (;;) { if (dp == ndp->ni_rootdir || dp == rootvnode) { ndp->ni_dvp = dp; @@ -440,6 +467,8 @@ dirloop: VREF(dp); vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p); } + } else { + cnp->cn_flags &= ~ISDOTDOT; } /* @@ -508,6 +537,19 @@ unionlookup: ndp->ni_next += cnp->cn_consume; ndp->ni_pathlen -= cnp->cn_consume; cnp->cn_consume = 0; + } else { + int isdot_or_dotdot; + + isdot_or_dotdot = (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') || (cnp->cn_flags & ISDOTDOT); + + if (VNAME(ndp->ni_vp) == NULL && isdot_or_dotdot == 0) { + VNAME(ndp->ni_vp) = add_name(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0); + } + if (VPARENT(ndp->ni_vp) == NULL && isdot_or_dotdot == 0) { + if (vget(ndp->ni_dvp, 0, p) == 0) { + VPARENT(ndp->ni_vp) = ndp->ni_dvp; + } + } } dp = ndp->ni_vp; @@ -517,8 +559,10 @@ unionlookup: */ while (dp->v_type == VDIR && (mp = dp->v_mountedhere) && (cnp->cn_flags & NOCROSSMOUNT) == 0) { - if (vfs_busy(mp, 0, 0, p)) - continue; + if (vfs_busy(mp, LK_NOWAIT, 0, p)) { + error = ENOENT; + goto bad2; + } VOP_UNLOCK(dp, 0, p); error = VFS_ROOT(mp, &tdp); vfs_unbusy(mp, p); @@ -543,9 +587,12 @@ unionlookup: /* * Check for bogus trailing slashes. */ - if (trailing_slash && dp->v_type != VDIR) { - error = ENOTDIR; - goto bad2; + if (trailing_slash) { + if (dp->v_type != VDIR) { + error = ENOTDIR; + goto bad2; + } + trailing_slash = 0; } nextname: @@ -554,7 +601,8 @@ nextname: * continue at next component, else return. */ if (*ndp->ni_next == '/') { - cnp->cn_nameptr = ndp->ni_next; + cnp->cn_nameptr = ndp->ni_next + 1; + ndp->ni_pathlen--; while (*cnp->cn_nameptr == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; @@ -577,12 +625,47 @@ nextname: } if (!wantparent) vrele(ndp->ni_dvp); + if (cnp->cn_flags & AUDITVNPATH1) + AUDIT_ARG(vnpath, dp, ARG_VNODE1); + else if (cnp->cn_flags & AUDITVNPATH2) + AUDIT_ARG(vnpath, dp, ARG_VNODE2); if ((cnp->cn_flags & LOCKLEAF) == 0) VOP_UNLOCK(dp, 0, p); if (kdebug_enable) kdebug_lookup(dp, cnp); return (0); +emptyname: + /* + * A degenerate name (e.g. / or "") which is a way of + * talking about a directory, e.g. like "/." or ".". + */ + if (dp->v_type != VDIR) { + error = ENOTDIR; + goto bad; + } + if (cnp->cn_nameiop != LOOKUP) { + error = EISDIR; + goto bad; + } + if (wantparent) { + ndp->ni_dvp = dp; + VREF(dp); + } + cnp->cn_flags &= ~ISDOTDOT; + cnp->cn_flags |= ISLASTCN; + ndp->ni_next = cp; + ndp->ni_vp = dp; + if (cnp->cn_flags & AUDITVNPATH1) + AUDIT_ARG(vnpath, dp, ARG_VNODE1); + else if (cnp->cn_flags & AUDITVNPATH2) + AUDIT_ARG(vnpath, dp, ARG_VNODE2); + if (!(cnp->cn_flags & (LOCKPARENT | LOCKLEAF))) + VOP_UNLOCK(dp, 0, p); + if (cnp->cn_flags & SAVESTART) + panic("lookup: SAVESTART"); + return (0); + bad2: if ((cnp->cn_flags & LOCKPARENT) && *ndp->ni_next == '\0') VOP_UNLOCK(ndp->ni_dvp, 0, p); @@ -615,7 +698,7 @@ relookup(dvp, vpp, cnp) int rdonly; /* lookup read-only flag bit */ int error = 0; #ifdef NAMEI_DIAGNOSTIC - int newhash; /* DEBUG: check name hash */ + int i, newhash; /* DEBUG: check name hash */ char *cp; /* DEBUG: check name ptr/len */ #endif @@ -643,8 +726,8 @@ relookup(dvp, vpp, cnp) * responsibility for freeing the pathname buffer. */ #ifdef NAMEI_DIAGNOSTIC - for (newhash = 0, cp = cnp->cn_nameptr; *cp != 0 && *cp != '/'; cp++) - newhash += (unsigned char)*cp; + for (i=1, newhash = 0, cp = cnp->cn_nameptr; *cp != 0 && *cp != '/'; cp++) + newhash += (unsigned char)*cp * i; if (newhash != cnp->cn_hash) panic("relookup: bad hash"); if (cnp->cn_namelen != cp - cnp->cn_nameptr) @@ -748,11 +831,12 @@ bad: #define NUMPARMS 23 +static void kdebug_lookup(dp, cnp) - struct vnode *dp; + struct vnode *dp; struct componentname *cnp; { - register int i, n; + register int i, n; register int dbg_namelen; register int save_dbg_namelen; register char *dbg_nameptr; @@ -802,7 +886,7 @@ kdebug_lookup(dp, cnp) entries, we must mark the start of the path's string. */ KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW,36)) | DBG_FUNC_START, - dp, dbg_parms[0], dbg_parms[1], dbg_parms[2], 0); + (unsigned int)dp, dbg_parms[0], dbg_parms[1], dbg_parms[2], 0); for (dbg_namelen = save_dbg_namelen-12, i=3; dbg_namelen > 0; diff --git a/bsd/vfs/vfs_subr.c b/bsd/vfs/vfs_subr.c index 742af69fd..8207964e4 100644 --- a/bsd/vfs/vfs_subr.c +++ b/bsd/vfs/vfs_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -88,6 +88,12 @@ #include #include #include +#include +#include + +#include +#include + #include @@ -166,7 +172,7 @@ struct mntlist mountlist; /* mounted filesystem list */ #define VORECLAIM_ENABLE(vp) \ do { \ if (ISSET((vp)->v_flag, VORECLAIM)) \ - panic("vm object raclaim already"); \ + panic("vm_object_reclaim already"); \ SET((vp)->v_flag, VORECLAIM); \ } while(0) @@ -399,8 +405,7 @@ vfs_getvfs(fsid) register struct mount *mp; simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; - mp = mp->mnt_list.cqe_next) { + CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { simple_unlock(&mountlist_slock); @@ -431,7 +436,7 @@ static u_short xxxfs_mntid; ++xxxfs_mntid; tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); tfsid.val[1] = mtype; - if (mountlist.cqh_first != (void *)&mountlist) { + if (!CIRCLEQ_EMPTY(&mountlist)) { while (vfs_getvfs(&tfsid)) { tfsid.val[0]++; xxxfs_mntid++; @@ -541,8 +546,8 @@ retry: simple_unlock(&vp->v_interlock); reclaimhits++; } else - break; - } + break; + } } /* @@ -591,15 +596,37 @@ retry: else vp->v_ubcinfo = 0; + if (vp->v_flag & VHASDIRTY) + cluster_release(vp); + + // make sure all these fields are cleared out as the + // name/parent stuff uses them and assumes they're + // cleared to null/0. + if (vp->v_scmap != NULL) { + panic("getnewvnode: vp @ 0x%x has non-null scmap.\n", vp); + } + vp->v_un.vu_name = NULL; + vp->v_scdirty = 0; + vp->v_un1.v_cl.v_pad = 0; + + vp->v_lastr = -1; vp->v_ralen = 0; vp->v_maxra = 0; - vp->v_lastw = 0; vp->v_ciosiz = 0; - vp->v_cstart = 0; vp->v_clen = 0; vp->v_socket = 0; + /* we may have blocked, re-evaluate state */ + simple_lock(&vnode_free_list_slock); + if (VONLIST(vp)) { + if (vp->v_usecount == 0) + VREMFREE("getnewvnode", vp); + else if (ISSET((vp)->v_flag, VUINACTIVE)) + VREMINACTIVE("getnewvnode", vp); + } + simple_unlock(&vnode_free_list_slock); + done: vp->v_flag = VSTANDARD; vp->v_type = VNON; @@ -680,18 +707,8 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) { return (error); } - - // XXXdbg - if there are dirty bufs, wait for 'em if they're busy - for (bp=vp->v_dirtyblkhd.lh_first; bp; bp=nbp) { - nbp = bp->b_vnbufs.le_next; - if (ISSET(bp->b_flags, B_BUSY)) { - SET(bp->b_flags, B_WANTED); - tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), "vinvalbuf", 0); - nbp = vp->v_dirtyblkhd.lh_first; - } else { - panic("vinvalbuf: dirty buf (vp 0x%x, bp 0x%x)", vp, bp); - } - } + if (vp->v_dirtyblkhd.lh_first) + panic("vinvalbuf: dirty bufs (vp 0x%x, bp 0x%x)", vp, vp->v_dirtyblkhd.lh_first); } for (;;) { @@ -735,7 +752,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) } if (bp->b_flags & B_LOCKED) { - panic("vinvalbuf: bp @ 0x%x is locked!\n", bp); + panic("vinvalbuf: bp @ 0x%x is locked!", bp); break; } else { SET(bp->b_flags, B_INVAL); @@ -799,14 +816,13 @@ checkalias(nvp, nvp_rdev, mp) struct proc *p = current_proc(); /* XXX */ struct vnode *vp; struct vnode **vpp; - struct specinfo * bufhold; - int buffree = 1; + struct specinfo *specinfop; if (nvp->v_type != VBLK && nvp->v_type != VCHR) return (NULLVP); - bufhold = (struct specinfo *)_MALLOC_ZONE(sizeof(struct specinfo), - M_VNODE, M_WAITOK); + MALLOC_ZONE(specinfop, struct specinfo *, sizeof(struct specinfo), + M_SPECINFO, M_WAITOK); vpp = &speclisth[SPECHASH(nvp_rdev)]; loop: simple_lock(&spechash_slock); @@ -829,8 +845,8 @@ loop: break; } if (vp == NULL || vp->v_tag != VT_NON) { - nvp->v_specinfo = bufhold; - buffree = 0; /* buffer used */ + nvp->v_specinfo = specinfop; + specinfop = 0; /* buffer used */ bzero(nvp->v_specinfo, sizeof(struct specinfo)); nvp->v_rdev = nvp_rdev; nvp->v_hashchain = vpp; @@ -854,8 +870,8 @@ loop: vp->v_tag = nvp->v_tag; nvp->v_type = VNON; insmntque(vp, mp); - if (buffree) - _FREE_ZONE((void *)bufhold, sizeof (struct specinfo), M_VNODE); + if (specinfop) + FREE_ZONE((void *)specinfop, sizeof(struct specinfo), M_SPECINFO); return (vp); } @@ -876,6 +892,9 @@ vget(vp, flags, p) struct proc *p; { int error = 0; + u_long vpid; + + vpid = vp->v_id; // save off the original v_id retry: @@ -901,7 +920,7 @@ retry: if (ISSET(vp->v_flag, VTERMINATE)) { SET(vp->v_flag, VTERMWANT); simple_unlock(&vp->v_interlock); - (void)tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "vclean", 0); + (void)tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "vget1", 0); return (ENOENT); } @@ -910,30 +929,19 @@ retry: * wait for it to finish initialization */ if (ISSET(vp->v_flag, VUINIT)) { - if (ISSET(vp->v_flag, VUINIT)) { - SET(vp->v_flag, VUWANT); - simple_unlock(&vp->v_interlock); - (void) tsleep((caddr_t)vp, PINOD, "vget2", 0); - goto retry; - } + SET(vp->v_flag, VUWANT); + simple_unlock(&vp->v_interlock); + (void) tsleep((caddr_t)vp, PINOD, "vget2", 0); + goto retry; } simple_lock(&vnode_free_list_slock); - if (vp->v_usecount == 0) { - /* If on the free list, remove it from there */ - if (VONLIST(vp)) + if (VONLIST(vp)) { + if (vp->v_usecount == 0) VREMFREE("vget", vp); - } else { - /* If on the inactive list, remove it from there */ - if ((vp->v_usecount == 1) && UBCINFOEXISTS(vp)) { - if (VONLIST(vp)) - VREMINACTIVE("vget", vp); - } + else if (ISSET((vp)->v_flag, VUINACTIVE)) + VREMINACTIVE("vget", vp); } - - /* The vnode should not be on the inactive list here */ - VINACTIVECHECK("vget", vp, 0); - simple_unlock(&vnode_free_list_slock); if (++vp->v_usecount <= 0) @@ -944,7 +952,7 @@ retry: */ if (UBCISVALID(vp) && !ubc_issetflags(vp, UI_HASOBJREF)) { simple_unlock(&vp->v_interlock); - if (ubc_getobject(vp, UBC_HOLDOBJECT)) { + if (ubc_getobject(vp, UBC_HOLDOBJECT) == MEMORY_OBJECT_CONTROL_NULL) { error = ENOENT; goto errout; } @@ -954,21 +962,44 @@ retry: if (flags & LK_TYPE_MASK) { if (error = vn_lock(vp, flags | LK_INTERLOCK, p)) goto errout; + if (vpid != vp->v_id) { // make sure it's still the same vnode + vput(vp); + return ENOENT; + } return (0); } if ((flags & LK_INTERLOCK) == 0) simple_unlock(&vp->v_interlock); + + if (vpid != vp->v_id) { // make sure it's still the same vnode + vrele(vp); + return ENOENT; + } + return (0); errout: + simple_lock(&vp->v_interlock); + + /* + * we may have blocked. Re-evaluate the state + */ + simple_lock(&vnode_free_list_slock); + if (VONLIST(vp)) { + if (vp->v_usecount == 0) + VREMFREE("vget", vp); + else if (ISSET((vp)->v_flag, VUINACTIVE)) + VREMINACTIVE("vget", vp); + } + simple_unlock(&vnode_free_list_slock); + /* * If the vnode was not active in the first place * must not call vrele() as VOP_INACTIVE() is not * required. * So inlined part of vrele() here. */ - simple_lock(&vp->v_interlock); if (--vp->v_usecount == 1) { if (UBCINFOEXISTS(vp)) { vinactive(vp); @@ -991,7 +1022,7 @@ errout: * Get a pager reference on the particular vnode. * * This is called from ubc_info_init() and it is asumed that - * the vnode is neither on the free list on on the inactive list. + * the vnode is not on the free list. * It is also assumed that the vnode is neither being recycled * by vgonel nor being terminated by vnode_pager_vrele(). * @@ -1002,25 +1033,22 @@ vnode_pager_vget(vp) struct vnode *vp; { simple_lock(&vp->v_interlock); - if (UBCINFOMISSING(vp)) - panic("vnode_pager_vget: stolen ubc_info"); - - if (!UBCINFOEXISTS(vp)) - panic("vnode_pager_vget: lost ubc_info"); - if ((vp->v_flag & VXLOCK) || (vp->v_flag & VORECLAIM)) - panic("vnode_pager_vget: already being reclaimd"); + UBCINFOCHECK("vnode_pager_vget", vp); - if (ISSET(vp->v_flag, VTERMINATE)) - panic("vnode_pager_vget: already being terminated"); + if (ISSET(vp->v_flag, (VXLOCK|VORECLAIM|VTERMINATE))) + panic("%s: dying vnode", "vnode_pager_vget"); simple_lock(&vnode_free_list_slock); - /* The vnode should not be on ANY list */ - if (VONLIST(vp)) - panic("vnode_pager_vget: still on the list"); + /* The vnode should not be on free list */ + if (VONLIST(vp)) { + if (vp->v_usecount == 0) + panic("%s: still on list", "vnode_pager_vget"); + else if (ISSET((vp)->v_flag, VUINACTIVE)) + VREMINACTIVE("vnode_pager_vget", vp); + } /* The vnode should not be on the inactive list here */ - VINACTIVECHECK("vnode_pager_vget", vp, 0); simple_unlock(&vnode_free_list_slock); /* After all those checks, now do the real work :-) */ @@ -1066,8 +1094,8 @@ vop_nolock(ap) if (vp->v_vnlock == NULL) { if ((flags & LK_TYPE_MASK) == LK_DRAIN) return (0); - MALLOC_ZONE(vp->v_vnlock, struct lock__bsd__ *, - sizeof(struct lock__bsd__), M_VNODE, M_WAITOK); + MALLOC(vp->v_vnlock, struct lock__bsd__ *, + sizeof(struct lock__bsd__), M_TEMP, M_WAITOK); lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); } switch (flags & LK_TYPE_MASK) { @@ -1147,21 +1175,41 @@ vref(vp) panic("vref used where vget required"); /* If on the inactive list, remove it from there */ - if ((vp->v_usecount == 1) && UBCINFOEXISTS(vp)) { - if (VONLIST(vp)) { - simple_lock(&vnode_free_list_slock); - VREMINACTIVE("vref", vp); - simple_unlock(&vnode_free_list_slock); - } - } - /* The vnode should not be on the inactive list here */ - VINACTIVECHECK("vref", vp, 0); + simple_lock(&vnode_free_list_slock); + if (ISSET((vp)->v_flag, VUINACTIVE)) + VREMINACTIVE("vref", vp); + simple_unlock(&vnode_free_list_slock); if (++vp->v_usecount <= 0) panic("vref v_usecount"); simple_unlock(&vp->v_interlock); } +static void +clean_up_name_parent_ptrs(struct vnode *vp) +{ + if (VNAME(vp) || VPARENT(vp)) { + char *tmp1; + struct vnode *tmp2; + + // do it this way so we don't block before clearing + // these fields. + tmp1 = VNAME(vp); + tmp2 = VPARENT(vp); + VNAME(vp) = NULL; + VPARENT(vp) = NULL; + + if (tmp1) { + remove_name(tmp1); + } + + if (tmp2) { + vrele(tmp2); + } + } +} + + /* * put the vnode on appropriate free list. * called with v_interlock held. @@ -1170,6 +1218,13 @@ static void vfree(vp) struct vnode *vp; { + funnel_t *curflock; + extern int disable_funnel; + + if ((curflock = thread_funnel_get()) != kernel_flock && + !(disable_funnel && curflock != THR_FUNNEL_NULL)) + panic("Entering vfree() without kernel funnel"); + /* * if the vnode is not obtained by calling getnewvnode() we * are not responsible for the cleanup. Just return. @@ -1184,8 +1239,11 @@ vfree(vp) /* insert at tail of LRU list or at head if VAGE is set */ simple_lock(&vnode_free_list_slock); + // make sure the name & parent pointers get cleared out +// clean_up_name_parent_ptrs(vp); + if (VONLIST(vp)) - panic("vfree: vnode still on list"); + panic("%s: vnode still on list", "vfree"); if (vp->v_flag & VAGE) { TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); @@ -1205,6 +1263,13 @@ static void vinactive(vp) struct vnode *vp; { + funnel_t *curflock; + extern int disable_funnel; + + if ((curflock = thread_funnel_get()) != kernel_flock && + !(disable_funnel && curflock != THR_FUNNEL_NULL)) + panic("Entering vinactive() without kernel funnel"); + if (!UBCINFOEXISTS(vp)) panic("vinactive: not a UBC vnode"); @@ -1214,7 +1279,7 @@ vinactive(vp) simple_lock(&vnode_free_list_slock); if (VONLIST(vp)) - panic("vinactive: vnode still on list"); + panic("%s: vnode still on list", "vinactive"); VINACTIVECHECK("vinactive", vp, 0); TAILQ_INSERT_TAIL(&vnode_inactive_list, vp, v_freelist); @@ -1257,8 +1322,10 @@ vput(vp) vp->v_usecount, vp->v_writecount); } #endif - if (ISSET((vp)->v_flag, VUINACTIVE) && VONLIST(vp)) - VREMINACTIVE("vrele", vp); + simple_lock(&vnode_free_list_slock); + if (ISSET((vp)->v_flag, VUINACTIVE)) + VREMINACTIVE("vref", vp); + simple_unlock(&vnode_free_list_slock); simple_unlock(&vp->v_interlock); VOP_INACTIVE(vp, p); @@ -1287,11 +1354,18 @@ vrele(vp) struct vnode *vp; { struct proc *p = current_proc(); /* XXX */ + funnel_t *curflock; + extern int disable_funnel; + + if ((curflock = thread_funnel_get()) != kernel_flock && + !(disable_funnel && curflock != THR_FUNNEL_NULL)) + panic("Entering vrele() without kernel funnel"); simple_lock(&vp->v_interlock); if (--vp->v_usecount == 1) { if (UBCINFOEXISTS(vp)) { - vinactive(vp); + if ((vp->v_flag & VXLOCK) == 0) + vinactive(vp); simple_unlock(&vp->v_interlock); return; } @@ -1306,9 +1380,6 @@ vrele(vp) panic("vrele: ref cnt"); } #endif - if (ISSET((vp)->v_flag, VUINACTIVE) && VONLIST(vp)) - VREMINACTIVE("vrele", vp); - if ((vp->v_flag & VXLOCK) || (vp->v_flag & VORECLAIM)) { /* vnode is being cleaned, just return */ @@ -1493,7 +1564,6 @@ vclean(vp, flags, p) struct proc *p; { int active; - int removed = 0; int didhold; /* @@ -1511,9 +1581,23 @@ vclean(vp, flags, p) * so that its count cannot fall to zero and generate a * race against ourselves to recycle it. */ - if (active = vp->v_usecount) + if (active = vp->v_usecount) { + /* + * active vnode can not be on the free list. + * we are about to take an extra reference on this vnode + * do the queue management as needed + * Not doing so can cause "still on list" or + * "vnreclaim: v_usecount" panic if VOP_LOCK() blocks. + */ + simple_lock(&vnode_free_list_slock); + if (ISSET((vp)->v_flag, VUINACTIVE)) + VREMINACTIVE("vclean", vp); + simple_unlock(&vnode_free_list_slock); + if (++vp->v_usecount <= 0) panic("vclean: v_usecount"); + } + /* * Prevent the vnode from being recycled or * brought into use while we clean it out. @@ -1532,16 +1616,15 @@ vclean(vp, flags, p) VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); /* + * While blocked in VOP_LOCK() someone could have dropped + * reference[s] and we could land on the inactive list. * if this vnode is on the inactive list * take it off the list. */ - if ((active == 1) && - (ISSET((vp)->v_flag, VUINACTIVE) && VONLIST(vp))) { - simple_lock(&vnode_free_list_slock); + simple_lock(&vnode_free_list_slock); + if (ISSET((vp)->v_flag, VUINACTIVE)) VREMINACTIVE("vclean", vp); - simple_unlock(&vnode_free_list_slock); - removed++; - } + simple_unlock(&vnode_free_list_slock); /* Clean the pages in VM. */ if (active && (flags & DOCLOSE)) @@ -1557,10 +1640,10 @@ vclean(vp, flags, p) */ if (flags & DOCLOSE) { if (vp->v_tag == VT_NFS) - nfs_vinvalbuf(vp, V_SAVE, NOCRED, p, 0); - else - vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); - } + nfs_vinvalbuf(vp, V_SAVE, NOCRED, p, 0); + else + vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); + } if (active) VOP_INACTIVE(vp, p); @@ -1568,28 +1651,43 @@ vclean(vp, flags, p) VOP_UNLOCK(vp, 0, p); /* Destroy ubc named reference */ - if (didhold) { - ubc_rele(vp); + if (didhold) { + ubc_rele(vp); ubc_destroy_named(vp); } + /* + * Make sure vp isn't on the inactive list. + */ + simple_lock(&vnode_free_list_slock); + if (ISSET((vp)->v_flag, VUINACTIVE)) { + VREMINACTIVE("vclean", vp); + } + simple_unlock(&vnode_free_list_slock); /* * Reclaim the vnode. */ if (VOP_RECLAIM(vp, p)) panic("vclean: cannot reclaim"); + + // make sure the name & parent ptrs get cleaned out! + clean_up_name_parent_ptrs(vp); + cache_purge(vp); if (vp->v_vnlock) { - if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0) + struct lock__bsd__ *tmp = vp->v_vnlock; + if ((tmp->lk_flags & LK_DRAINED) == 0) vprint("vclean: lock not drained", vp); - FREE_ZONE(vp->v_vnlock, sizeof (struct lock__bsd__), M_VNODE); vp->v_vnlock = NULL; + FREE(tmp, M_TEMP); } /* It's dead, Jim! */ vp->v_op = dead_vnodeop_p; vp->v_tag = VT_NON; + insmntque(vp, (struct mount *)0); + /* * Done with purge, notify sleepers of the grim news. */ @@ -1781,8 +1879,11 @@ vgonel(vp, p) vp->v_flag &= ~VALIASED; } simple_unlock(&spechash_slock); - FREE_ZONE(vp->v_specinfo, sizeof (struct specinfo), M_VNODE); + { + struct specinfo *tmp = vp->v_specinfo; vp->v_specinfo = NULL; + FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO); + } } /* * If it is on the freelist and not already at the head, @@ -1797,7 +1898,7 @@ vgonel(vp, p) * getnewvnode after removing it from the freelist to ensure * that we do not try to move it here. */ - if (vp->v_usecount == 0) { + if (vp->v_usecount == 0 && (vp->v_flag & VUINACTIVE) == 0) { simple_lock(&vnode_free_list_slock); if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && vnode_free_list.tqh_first != vp) { @@ -1945,6 +2046,74 @@ printlockedvnodes() } #endif +static int +build_path(struct vnode *vp, char *buff, int buflen, int *outlen) +{ + char *end, *str; + int i, len, ret=0, counter=0; + + end = &buff[buflen-1]; + *--end = '\0'; + + while(vp && VPARENT(vp) != vp) { + // the maximum depth of a file system hierarchy is MAXPATHLEN/2 + // (with single-char names separated by slashes). we panic if + // we've ever looped more than that. + if (counter++ > MAXPATHLEN/2) { + panic("build_path: vnode parent chain is too long! vp 0x%x\n", vp); + } + str = VNAME(vp); + if (VNAME(vp) == NULL) { + if (VPARENT(vp) != NULL) { + ret = EINVAL; + } + break; + } + + // count how long the string is + for(len=0; *str; str++, len++) + /* nothing */; + + // check that there's enough space + if ((end - buff) < len) { + ret = ENOSPC; + break; + } + + // copy it backwards + for(; len > 0; len--) { + *--end = *--str; + } + + // put in the path separator + *--end = '/'; + + // walk up the chain. + vp = VPARENT(vp); + + // check if we're crossing a mount point and + // switch the vp if we are. + if (vp && (vp->v_flag & VROOT)) { + vp = vp->v_mount->mnt_vnodecovered; + } + } + + // slide it down to the beginning of the buffer + memmove(buff, end, &buff[buflen] - end); + + *outlen = &buff[buflen] - end; + + return ret; +} + +__private_extern__ int +vn_getpath(struct vnode *vp, char *pathbuf, int *len) +{ + return build_path(vp, pathbuf, *len, len); +} + + + /* * Top level filesystem related information gathering. */ @@ -1959,6 +2128,9 @@ vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) struct proc *p; { struct vfsconf *vfsp; + int *username; + u_int usernamelen; + int error; /* * The VFS_NUMMNTOPS shouldn't be at name[0] since @@ -1977,7 +2149,7 @@ vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) /* all sysctl names at this level are at least name and field */ if (namelen < 2) - return (ENOTDIR); /* overloaded */ + return (EISDIR); /* overloaded */ if (name[0] != VFS_GENERIC) { for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) if (vfsp->vfc_typenum == name[0]) @@ -2001,7 +2173,19 @@ vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) return (sysctl_rdstruct(oldp, oldlenp, newp, vfsp, sizeof(struct vfsconf))); } - return (EOPNOTSUPP); + /* + * We need to get back into the general MIB, so we need to re-prepend + * CTL_VFS to our name and try userland_sysctl(). + */ + usernamelen = namelen + 1; + MALLOC(username, int *, usernamelen * sizeof(*username), + M_TEMP, M_WAITOK); + bcopy(name, username + 1, namelen * sizeof(*name)); + username[0] = CTL_VFS; + error = userland_sysctl(p, username, usernamelen, oldp, oldlenp, 1, + newp, newlen, oldlenp); + FREE(username, M_TEMP); + return (error); } int kinfo_vdebug = 1; @@ -2058,13 +2242,16 @@ again: nvp = vp->v_mntvnodes.le_next; if (bp + VPTRSZ + VNODESZ > ewhere) { simple_unlock(&mntvnode_slock); + vfs_unbusy(mp, p); *sizep = bp - where; return (ENOMEM); } simple_unlock(&mntvnode_slock); if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) || - (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ))) + (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ))) { + vfs_unbusy(mp, p); return (error); + } bp += VPTRSZ + VNODESZ; simple_lock(&mntvnode_slock); } @@ -2377,6 +2564,16 @@ restart: goto restart; } + /* + * if the vnode is being initialized, + * skip over it + */ + if (ISSET(vp->v_flag, VUINIT)) { + SET(vp->v_flag, VUWANT); + simple_unlock(&vp->v_interlock); + continue; + } + VREMINACTIVE("vnreclaim", vp); simple_unlock(&vnode_free_list_slock); @@ -2494,9 +2691,6 @@ vnode_pager_vrele(struct vnode *vp) boolean_t funnel_state; int isvnreclaim = 1; - if (vp == (struct vnode *) NULL) - panic("vnode_pager_vrele: null vp"); - funnel_state = thread_funnel_set(kernel_flock, TRUE); /* Mark the vnode to be recycled */ @@ -2533,6 +2727,9 @@ vnode_pager_vrele(struct vnode *vp) } if (!ISSET(vp->v_flag, VTERMINATE)) SET(vp->v_flag, VTERMINATE); + + cache_purge(vp); + if (UBCINFOEXISTS(vp)) { struct ubc_info *uip = vp->v_ubcinfo; @@ -2618,6 +2815,14 @@ walk_allvnodes() } #endif /* DIAGNOSTIC */ + +struct x_constraints { + u_int32_t x_maxreadcnt; + u_int32_t x_maxsegreadsize; + u_int32_t x_maxsegwritesize; +}; + + void vfs_io_attributes(vp, flags, iosize, vectors) struct vnode *vp; @@ -2635,7 +2840,10 @@ vfs_io_attributes(vp, flags, iosize, vectors) if (mp != NULL) { switch (flags) { case B_READ: - *iosize = mp->mnt_maxreadcnt; + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + *iosize = ((struct x_constraints *)(mp->mnt_xinfo_ptr))->x_maxreadcnt; + else + *iosize = mp->mnt_maxreadcnt; *vectors = mp->mnt_segreadcnt; break; case B_WRITE: @@ -2645,12 +2853,62 @@ vfs_io_attributes(vp, flags, iosize, vectors) default: break; } + if (*iosize == 0) + *iosize = MAXPHYS; + if (*vectors == 0) + *vectors = 32; } - return; } -#include +__private_extern__ +void +vfs_io_maxsegsize(vp, flags, maxsegsize) + struct vnode *vp; + int flags; /* B_READ or B_WRITE */ + int *maxsegsize; +{ + struct mount *mp; + + /* start with "reasonable" default */ + *maxsegsize = MAXPHYS; + + mp = vp->v_mount; + if (mp != NULL) { + switch (flags) { + case B_READ: + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + *maxsegsize = ((struct x_constraints *)(mp->mnt_xinfo_ptr))->x_maxsegreadsize; + else + /* + * if the extended info doesn't exist + * then use the maxread I/O size as the + * max segment size... this is the previous behavior + */ + *maxsegsize = mp->mnt_maxreadcnt; + break; + case B_WRITE: + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + *maxsegsize = ((struct x_constraints *)(mp->mnt_xinfo_ptr))->x_maxsegwritesize; + else + /* + * if the extended info doesn't exist + * then use the maxwrite I/O size as the + * max segment size... this is the previous behavior + */ + *maxsegsize = mp->mnt_maxwritecnt; + break; + default: + break; + } + if (*maxsegsize == 0) + *maxsegsize = MAXPHYS; + } +} + + +#include + int vfs_init_io_attributes(devvp, mp) @@ -2660,8 +2918,12 @@ vfs_init_io_attributes(devvp, mp) int error; off_t readblockcnt; off_t writeblockcnt; + off_t readmaxcnt; + off_t writemaxcnt; off_t readsegcnt; off_t writesegcnt; + off_t readsegsize; + off_t writesegsize; u_long blksize; u_int64_t temp; @@ -2669,6 +2931,32 @@ vfs_init_io_attributes(devvp, mp) struct proc *p = current_proc(); struct ucred *cred = p->p_ucred; + int isvirtual = 0; + /* + * determine if this mount point exists on the same device as the root + * partition... if so, then it comes under the hard throttle control + */ + int thisunit = -1; + static int rootunit = -1; + extern struct vnode *rootvp; + + if (rootunit == -1) { + if (VOP_IOCTL(rootvp, DKIOCGETBSDUNIT, (caddr_t)&rootunit, 0, cred, p)) + rootunit = -1; + else if (rootvp == devvp) + mp->mnt_kern_flag |= MNTK_ROOTDEV; + } + if (devvp != rootvp && rootunit != -1) { + if (VOP_IOCTL(devvp, DKIOCGETBSDUNIT, (caddr_t)&thisunit, 0, cred, p) == 0) { + if (thisunit == rootunit) + mp->mnt_kern_flag |= MNTK_ROOTDEV; + } + } + if (VOP_IOCTL(devvp, DKIOCGETISVIRTUAL, (caddr_t)&isvirtual, 0, cred, p) == 0) { + if (isvirtual) + mp->mnt_kern_flag |= MNTK_VIRTUALDEV; + } + if ((error = VOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t)&readblockcnt, 0, cred, p))) return (error); @@ -2677,6 +2965,14 @@ vfs_init_io_attributes(devvp, mp) (caddr_t)&writeblockcnt, 0, cred, p))) return (error); + if ((error = VOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD, + (caddr_t)&readmaxcnt, 0, cred, p))) + return (error); + + if ((error = VOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE, + (caddr_t)&writemaxcnt, 0, cred, p))) + return (error); + if ((error = VOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t)&readsegcnt, 0, cred, p))) return (error); @@ -2685,32 +2981,315 @@ vfs_init_io_attributes(devvp, mp) (caddr_t)&writesegcnt, 0, cred, p))) return (error); + if ((error = VOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD, + (caddr_t)&readsegsize, 0, cred, p))) + return (error); + + if ((error = VOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, + (caddr_t)&writesegsize, 0, cred, p))) + return (error); + if ((error = VOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, cred, p))) return (error); - temp = readblockcnt * blksize; - temp = (temp > UINT32_MAX) ? (UINT32_MAX / blksize) * blksize : temp; - mp->mnt_maxreadcnt = (u_int32_t)temp; - temp = writeblockcnt * blksize; - temp = (temp > UINT32_MAX) ? (UINT32_MAX / blksize) * blksize : temp; + if ( !(mp->mnt_kern_flag & MNTK_IO_XINFO)) { + MALLOC(mp->mnt_xinfo_ptr, void *, sizeof(struct x_constraints), M_TEMP, M_WAITOK); + mp->mnt_kern_flag |= MNTK_IO_XINFO; + } + + if (readmaxcnt) + temp = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt; + else { + if (readblockcnt) { + temp = readblockcnt * blksize; + temp = (temp > UINT32_MAX) ? UINT32_MAX : temp; + } else + temp = MAXPHYS; + } + ((struct x_constraints *)(mp->mnt_xinfo_ptr))->x_maxreadcnt = (u_int32_t)temp; + + if (writemaxcnt) + temp = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt; + else { + if (writeblockcnt) { + temp = writeblockcnt * blksize; + temp = (temp > UINT32_MAX) ? UINT32_MAX : temp; + } else + temp = MAXPHYS; + } mp->mnt_maxwritecnt = (u_int32_t)temp; - temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt; - mp->mnt_segreadcnt = (u_int16_t)temp; + if (readsegcnt) { + temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt; + mp->mnt_segreadcnt = (u_int16_t)temp; + } + if (writesegcnt) { + temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt; + mp->mnt_segwritecnt = (u_int16_t)temp; + } + if (readsegsize) + temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize; + else + temp = mp->mnt_maxreadcnt; + ((struct x_constraints *)(mp->mnt_xinfo_ptr))->x_maxsegreadsize = (u_int32_t)temp; - temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt; - mp->mnt_segwritecnt = (u_int16_t)temp; + if (writesegsize) + temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize; + else + temp = mp->mnt_maxwritecnt; + ((struct x_constraints *)(mp->mnt_xinfo_ptr))->x_maxsegwritesize = (u_int32_t)temp; -#if 0 - printf("--- IO attributes for mount point 0x%08x ---\n", mp); - printf("\tmnt_maxreadcnt = 0x%x", mp->mnt_maxreadcnt); - printf("\tmnt_maxwritecnt = 0x%x\n", mp->mnt_maxwritecnt); - printf("\tmnt_segreadcnt = 0x%x", mp->mnt_segreadcnt); - printf("\tmnt_segwritecnt = 0x%x\n", mp->mnt_segwritecnt); -#endif /* 0 */ + return (error); +} + +static struct klist fs_klist; + +void +vfs_event_init(void) +{ + + klist_init(&fs_klist); +} + +void +vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data) +{ + + KNOTE(&fs_klist, event); +} + +/* + * return the number of mounted filesystems. + */ +static int +sysctl_vfs_getvfscnt(void) +{ + struct mount *mp; + int ret = 0; + simple_lock(&mountlist_slock); + CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) + ret++; + simple_unlock(&mountlist_slock); + return (ret); +} + +/* + * fill in the array of fsid_t's up to a max of 'count', the actual + * number filled in will be set in '*actual'. If there are more fsid_t's + * than room in fsidlst then ENOMEM will be returned and '*actual' will + * have the actual count. + * having *actual filled out even in the error case is depended upon. + */ +static int +sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual) +{ + struct mount *mp; + + *actual = 0; + simple_lock(&mountlist_slock); + CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { + (*actual)++; + if (*actual <= count) + fsidlst[(*actual) - 1] = mp->mnt_stat.f_fsid; + } + simple_unlock(&mountlist_slock); + return (*actual <= count ? 0 : ENOMEM); +} + +static int +sysctl_vfs_vfslist SYSCTL_HANDLER_ARGS +{ + int actual, error; + size_t space; + fsid_t *fsidlst; + + /* This is a readonly node. */ + if (req->newptr != NULL) + return (EPERM); + + /* they are querying us so just return the space required. */ + if (req->oldptr == NULL) { + req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t); + return 0; + } +again: + /* + * Retrieve an accurate count of the amount of space required to copy + * out all the fsids in the system. + */ + space = req->oldlen; + req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t); + + /* they didn't give us enough space. */ + if (space < req->oldlen) + return (ENOMEM); + + MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK); + error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t), + &actual); + /* + * If we get back ENOMEM, then another mount has been added while we + * slept in malloc above. If this is the case then try again. + */ + if (error == ENOMEM) { + FREE(fsidlst, M_TEMP); + req->oldlen = space; + goto again; + } + if (error == 0) { + error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t)); + } + FREE(fsidlst, M_TEMP); + return (error); +} + +/* + * Do a sysctl by fsid. + */ +static int +sysctl_vfs_ctlbyfsid SYSCTL_HANDLER_ARGS +{ + struct vfsidctl vc; + struct mount *mp; + struct statfs *sp; + struct proc *p; + int *name; + int error, flags, namelen; + + name = arg1; + namelen = arg2; + p = req->p; + + error = SYSCTL_IN(req, &vc, sizeof(vc)); + if (error) + return (error); + if (vc.vc_vers != VFS_CTL_VERS1) + return (EINVAL); + mp = vfs_getvfs(&vc.vc_fsid); + if (mp == NULL) + return (ENOENT); + /* reset so that the fs specific code can fetch it. */ + req->newidx = 0; + /* + * Note if this is a VFS_CTL then we pass the actual sysctl req + * in for "oldp" so that the lower layer can DTRT and use the + * SYSCTL_IN/OUT routines. + */ + if (mp->mnt_op->vfs_sysctl != NULL) { + error = mp->mnt_op->vfs_sysctl(name, namelen, + req, NULL, NULL, 0, req->p); + if (error != EOPNOTSUPP) + return (error); + } + switch (name[0]) { + case VFS_CTL_UMOUNT: + VCTLTOREQ(&vc, req); + error = SYSCTL_IN(req, &flags, sizeof(flags)); + if (error) + break; + error = safedounmount(mp, flags, p); + break; + case VFS_CTL_STATFS: + VCTLTOREQ(&vc, req); + error = SYSCTL_IN(req, &flags, sizeof(flags)); + if (error) + break; + sp = &mp->mnt_stat; + if (((flags & MNT_NOWAIT) == 0 || (flags & MNT_WAIT)) && + (error = VFS_STATFS(mp, sp, p))) + return (error); + sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; + error = SYSCTL_OUT(req, sp, sizeof(*sp)); + break; + default: + return (EOPNOTSUPP); + } return (error); } +static int filt_fsattach(struct knote *kn); +static void filt_fsdetach(struct knote *kn); +static int filt_fsevent(struct knote *kn, long hint); + +struct filterops fs_filtops = + { 0, filt_fsattach, filt_fsdetach, filt_fsevent }; + +static int +filt_fsattach(struct knote *kn) +{ + + kn->kn_flags |= EV_CLEAR; + KNOTE_ATTACH(&fs_klist, kn); + return (0); +} + +static void +filt_fsdetach(struct knote *kn) +{ + + KNOTE_DETACH(&fs_klist, kn); +} + +static int +filt_fsevent(struct knote *kn, long hint) +{ + + kn->kn_fflags |= hint; + return (kn->kn_fflags != 0); +} + +static int +sysctl_vfs_noremotehang SYSCTL_HANDLER_ARGS +{ + int out, error; + pid_t pid; + size_t space; + struct proc *p; + + /* We need a pid. */ + if (req->newptr == NULL) + return (EINVAL); + + error = SYSCTL_IN(req, &pid, sizeof(pid)); + if (error) + return (error); + + p = pfind(pid < 0 ? -pid : pid); + if (p == NULL) + return (ESRCH); + + /* + * Fetching the value is ok, but we only fetch if the old + * pointer is given. + */ + if (req->oldptr != NULL) { + out = !((p->p_flag & P_NOREMOTEHANG) == 0); + error = SYSCTL_OUT(req, &out, sizeof(out)); + return (error); + } + + /* cansignal offers us enough security. */ + if (p != req->p && suser(req->p->p_ucred, &req->p->p_acflag) != 0) + return (EPERM); + + if (pid < 0) + p->p_flag &= ~P_NOREMOTEHANG; + else + p->p_flag |= P_NOREMOTEHANG; + + return (0); +} +/* the vfs.generic. branch. */ +SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RW, 0, "vfs generic hinge"); +/* retreive a list of mounted filesystem fsid_t */ +SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist, CTLFLAG_RD, + 0, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids"); +/* perform operations on filesystem via fsid_t */ +SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW, + sysctl_vfs_ctlbyfsid, "ctlbyfsid"); +SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW, + 0, 0, sysctl_vfs_noremotehang, "I", "noremotehang"); + diff --git a/bsd/vfs/vfs_support.c b/bsd/vfs/vfs_support.c index c74bde6fa..bf68731d6 100644 --- a/bsd/vfs/vfs_support.c +++ b/bsd/vfs/vfs_support.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -738,8 +738,13 @@ struct vop_abortop_args /* { int nop_abortop(struct vop_abortop_args *ap) { - if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) - FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { + char *tmp = ap->a_cnp->cn_pnbuf; + ap->a_cnp->cn_pnbuf = NULL; + ap->a_cnp->cn_flags &= ~HASBUF; + FREE_ZONE(tmp, ap->a_cnp->cn_pnlen, M_NAMEI); + } + return (0); } diff --git a/bsd/vfs/vfs_syscalls.c b/bsd/vfs/vfs_syscalls.c index 567965c3b..d8dbc492e 100644 --- a/bsd/vfs/vfs_syscalls.c +++ b/bsd/vfs/vfs_syscalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1995-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -79,9 +79,13 @@ #include #include #include +#include +#include #include #include +#include + struct lock__bsd__ exchangelock; /* @@ -127,12 +131,15 @@ mount(p, uap, retval) char fstypename[MFSNAMELEN]; size_t dummy=0; + AUDIT_ARG(fflags, uap->flags); + /* * Get vnode to be covered */ - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; @@ -249,6 +256,8 @@ mount(p, uap, retval) vput(vp); return (error); } + /* XXXAUDIT: Should we capture the type on the error path as well? */ + AUDIT_ARG(text, fstypename); for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) if (!strcmp(vfsp->vfc_name, fstypename)) break; @@ -268,7 +277,7 @@ mount(p, uap, retval) /* * Allocate and initialize the filesystem. */ - mp = (struct mount *)_MALLOC_ZONE((u_long)sizeof(struct mount), + MALLOC_ZONE(mp, struct mount *, (u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); @@ -335,9 +344,9 @@ update: vp->v_mountedhere =mp; simple_unlock(&vp->v_interlock); simple_lock(&mountlist_slock); - CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); simple_unlock(&mountlist_slock); + vfs_event_signal(NULL, VQ_MOUNT, NULL); checkdirs(vp); VOP_UNLOCK(vp, 0, p); vfs_unbusy(mp, p); @@ -354,8 +363,11 @@ update: CLR(vp->v_flag, VMOUNT); simple_unlock(&vp->v_interlock); mp->mnt_vfc->vfc_refcount--; + + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + FREE(mp->mnt_xinfo_ptr, M_TEMP); vfs_unbusy(mp, p); - _FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); + FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); if (err2) vrele(vp); else @@ -470,40 +482,51 @@ unmount(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; mp = vp->v_mount; + /* + * Must be the root of the filesystem + */ + if ((vp->v_flag & VROOT) == 0) { + vput(vp); + return (EINVAL); + } + vput(vp); + return (safedounmount(mp, uap->flags, p)); +} + +/* + * Do the actual file system unmount, prevent some common foot shooting. + */ +int +safedounmount(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + int error; + /* * Only root, or the user that did the original mount is * permitted to unmount this filesystem. */ if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) && - (error = suser(p->p_ucred, &p->p_acflag))) { - vput(vp); + (error = suser(p->p_ucred, &p->p_acflag))) return (error); - } /* * Don't allow unmounting the root file system. */ - if (mp->mnt_flag & MNT_ROOTFS) { - vput(vp); + if (mp->mnt_flag & MNT_ROOTFS) return (EBUSY); /* the root is always busy */ - } - /* - * Must be the root of the filesystem - */ - if ((vp->v_flag & VROOT) == 0) { - vput(vp); - return (EINVAL); - } - vput(vp); - return (dounmount(mp, uap->flags, p)); + return (dounmount(mp, flags, p)); } /* @@ -520,6 +543,8 @@ dounmount(mp, flags, p) simple_lock(&mountlist_slock); /* XXX post jaguar fix LK_DRAIN - then clean this up */ + if ((flags & MNT_FORCE)) + mp->mnt_kern_flag |= MNTK_FRCUNMOUNT; if (mp->mnt_kern_flag & MNTK_UNMOUNT) { simple_unlock(&mountlist_slock); mp->mnt_kern_flag |= MNTK_MWAIT; @@ -556,7 +581,6 @@ dounmount(mp, flags, p) /* increment the operations count */ if (!error) vfs_nummntops++; - CIRCLEQ_REMOVE(&mountlist, mp, mnt_list); if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) { coveredvp->v_mountedhere = (struct mount *)0; @@ -569,11 +593,15 @@ dounmount(mp, flags, p) panic("unmount: dangling vnode"); } lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p); + vfs_event_signal(NULL, VQ_UNMOUNT, NULL); out: if (mp->mnt_kern_flag & MNTK_MWAIT) wakeup((caddr_t)mp); - if (!error) - _FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); + if (!error) { + if (mp->mnt_kern_flag & MNTK_IO_XINFO) + FREE(mp->mnt_xinfo_ptr, M_TEMP); + FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); + } return (error); } @@ -656,8 +684,11 @@ quotactl(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + AUDIT_ARG(uid, uap->uid, 0, 0, 0); + AUDIT_ARG(cmd, uap->cmd); + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); mp = nd.ni_vp->v_mount; vrele(nd.ni_vp); @@ -684,8 +715,9 @@ statfs(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); mp = nd.ni_vp->v_mount; sp = &mp->mnt_stat; @@ -716,8 +748,13 @@ fstatfs(p, uap, retval) register struct statfs *sp; int error; + AUDIT_ARG(fd, uap->fd); + if (error = getvnode(p, uap->fd, &fp)) return (error); + + AUDIT_ARG(vnpath, (struct vnode *)fp->f_data, ARG_VNODE1); + mp = ((struct vnode *)fp->f_data)->v_mount; if (!mp) return (EBADF); @@ -772,8 +809,10 @@ getfsstat(p, uap, retval) continue; } sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; - if (error = copyout((caddr_t)sp, sfsp, sizeof(*sp))) + if (error = copyout((caddr_t)sp, sfsp, sizeof(*sp))) { + vfs_unbusy(mp, p); return (error); + } sfsp += sizeof(*sp); } count++; @@ -825,9 +864,12 @@ ogetfsstat(p, uap, retval) } sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; error = copyout((caddr_t)sp, sfsp, - sizeof(*sp) - sizeof(sp->f_reserved3) - sizeof(sp->f_reserved4)); - if (error) + sizeof(*sp) - sizeof(sp->f_reserved3) + - sizeof(sp->f_reserved4)); + if (error) { + vfs_unbusy(mp, p); return (error); + } sfsp += sizeof(*sp) - sizeof(sp->f_reserved4); } count++; @@ -868,13 +910,18 @@ fchdir(p, uap, retval) vp = (struct vnode *)fp->f_data; VREF(vp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + + AUDIT_ARG(vnpath, vp, ARG_VNODE1); + if (vp->v_type != VDIR) error = ENOTDIR; else error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p); while (!error && (mp = vp->v_mountedhere) != NULL) { - if (vfs_busy(mp, 0, 0, p)) - continue; + if (vfs_busy(mp, LK_NOWAIT, 0, p)) { + vput(vp); + return (EACCES); + } error = VFS_ROOT(mp, &tdp); vfs_unbusy(mp, p); if (error) @@ -911,9 +958,10 @@ chdir(p, uap, retval) struct nameidata nd; struct vnode *tvp; - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); - if (error = change_dir(&nd, p)) + error = change_dir(&nd, p); + if (error) return (error); tvp = fdp->fd_cdir; fdp->fd_cdir = nd.ni_vp; @@ -943,9 +991,10 @@ chroot(p, uap, retval) if (error = suser(p->p_ucred, &p->p_acflag)) return (error); - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); - if (error = change_dir(&nd, p)) + error = change_dir(&nd, p); + if (error) return (error); if(p->p_flag & P_NOSHLIB) { @@ -954,7 +1003,7 @@ chroot(p, uap, retval) shared_regions_active = TRUE; } - if(error = clone_system_shared_regions(shared_regions_active)) { + if(error = clone_system_shared_regions(shared_regions_active, nd.ni_vp)) { vrele(nd.ni_vp); return (error); } @@ -1020,13 +1069,14 @@ open(p, uap, retval) if ((oflags & O_ACCMODE) == O_ACCMODE) return(EINVAL); flags = FFLAGS(uap->flags); + AUDIT_ARG(fflags, oflags); + cmode = ((uap->mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT; if (error = falloc(p, &nfp, &indx)) return (error); fp = nfp; - cmode = ((uap->mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT; NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); p->p_dupfd = -indx - 1; /* XXX check for fdopen */ - if (error = vn_open(&nd, flags, cmode)) { + if (error = vn_open_modflags(&nd, &flags, cmode)) { ffree(fp); if ((error == ENODEV || error == ENXIO) && p->p_dupfd >= 0 && /* XXX from fdopen */ @@ -1046,6 +1096,8 @@ open(p, uap, retval) fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (caddr_t)vp; + + VOP_UNLOCK(vp, 0, p); if (flags & (O_EXLOCK | O_SHLOCK)) { lf.l_whence = SEEK_SET; lf.l_start = 0; @@ -1057,20 +1109,34 @@ open(p, uap, retval) type = F_FLOCK; if ((flags & FNONBLOCK) == 0) type |= F_WAIT; - VOP_UNLOCK(vp, 0, p); - if (error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) { - (void) vn_close(vp, fp->f_flag, fp->f_cred, p); - ffree(fp); - fdrelse(p, indx); - return (error); - } - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) + goto bad; fp->f_flag |= FHASLOCK; } - VOP_UNLOCK(vp, 0, p); + + if (flags & O_TRUNC) { + struct vattr vat; + struct vattr *vap = &vat; + + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + (void)vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ + VATTR_NULL(vap); + vap->va_size = 0; + /* try to truncate by setting the size attribute */ + error = VOP_SETATTR(vp, vap, p->p_ucred, p); + VOP_UNLOCK(vp, 0, p); /* XXX */ + if (error) + goto bad; + } + *fdflags(p, indx) &= ~UF_RESERVED; *retval = indx; return (0); +bad: + vn_close(vp, fp->f_flag, fp->f_cred, p); + ffree(fp); + fdrelse(p, indx); + return (error); } #if COMPAT_43 @@ -1113,22 +1179,26 @@ mknod(p, uap, retval) { register struct vnode *vp; struct vattr vattr; - int error; + int cmode, error; int whiteout; struct nameidata nd; + AUDIT_ARG(mode, uap->mode); + AUDIT_ARG(dev, uap->dev); + cmode = (uap->mode & ALLPERMS) &~ p->p_fd->fd_cmask; if (error = suser(p->p_ucred, &p->p_acflag)) return (error); bwillwrite(); - NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + NDINIT(&nd, CREATE, LOCKPARENT | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; if (vp != NULL) error = EEXIST; else { VATTR_NULL(&vattr); - vattr.va_mode = (uap->mode & ALLPERMS) &~ p->p_fd->fd_cmask; + vattr.va_mode = cmode; vattr.va_rdev = uap->dev; whiteout = 0; @@ -1151,6 +1221,8 @@ mknod(p, uap, retval) } } if (!error) { + char *nameptr; + nameptr = add_name(nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, 0); VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); if (whiteout) { error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE); @@ -1161,6 +1233,22 @@ mknod(p, uap, retval) error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); } + + if (error == 0 && nd.ni_vp) { + if (VNAME(nd.ni_vp) == NULL) { + VNAME(nd.ni_vp) = nameptr; + nameptr = NULL; + } + if (VPARENT(nd.ni_vp) == NULL) { + if (vget(nd.ni_dvp, 0, p) == 0) { + VPARENT(nd.ni_vp) = nd.ni_dvp; + } + } + } + if (nameptr) { + remove_name(nameptr); + nameptr = NULL; + } } else { VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); if (nd.ni_dvp == vp) @@ -1190,13 +1278,16 @@ mkfifo(p, uap, retval) struct vattr vattr; int error; struct nameidata nd; + char *nameptr=NULL; + #if !FIFO return (EOPNOTSUPP); #else bwillwrite(); - NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + NDINIT(&nd, CREATE, LOCKPARENT | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); if (nd.ni_vp != NULL) { VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); @@ -1207,11 +1298,35 @@ mkfifo(p, uap, retval) vrele(nd.ni_vp); return (EEXIST); } + + nameptr = add_name(nd.ni_cnd.cn_nameptr, + nd.ni_cnd.cn_namelen, + nd.ni_cnd.cn_hash, 0); VATTR_NULL(&vattr); vattr.va_type = VFIFO; vattr.va_mode = (uap->mode & ALLPERMS) &~ p->p_fd->fd_cmask; VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); - return (VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr)); + error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); + + if (error == 0 && nd.ni_vp && nd.ni_vp->v_type == VFIFO) { + int vpid = nd.ni_vp->v_id; + if (vget(nd.ni_vp, 0, p) == 0) { + if (vpid == nd.ni_vp->v_id && nd.ni_vp->v_type == VFIFO) { + VNAME(nd.ni_vp) = nameptr; + nameptr = NULL; + + if (VPARENT(nd.ni_vp) == NULL) { + if (vget(nd.ni_dvp, 0, p) == 0) { + VPARENT(nd.ni_vp) = nd.ni_dvp; + } + } + } + } + } + if (nameptr) { + remove_name(nameptr); + } + return error; #endif /* FIFO */ } @@ -1234,17 +1349,19 @@ link(p, uap, retval) int error; bwillwrite(); - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; if (vp->v_type == VDIR) error = EPERM; /* POSIX */ else { nd.ni_cnd.cn_nameiop = CREATE; - nd.ni_cnd.cn_flags = LOCKPARENT; + nd.ni_cnd.cn_flags = LOCKPARENT | AUDITVNPATH2; nd.ni_dirp = uap->link; - if ((error = namei(&nd)) == 0) { + error = namei(&nd); + if (error == 0) { if (nd.ni_vp != NULL) error = EEXIST; if (!error) { @@ -1282,16 +1399,20 @@ symlink(p, uap, retval) register_t *retval; { struct vattr vattr; - char *path; + char *path, *nameptr; int error; struct nameidata nd; size_t dummy=0; + u_long vpid; + MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); if (error = copyinstr(uap->path, path, MAXPATHLEN, &dummy)) goto out; + AUDIT_ARG(text, path); /* This is the link string */ bwillwrite(); - NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, uap->link, p); - if (error = namei(&nd)) + NDINIT(&nd, CREATE, LOCKPARENT | AUDITVNPATH1, UIO_USERSPACE, uap->link, p); + error = namei(&nd); + if (error) goto out; if (nd.ni_vp) { VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); @@ -1306,7 +1427,31 @@ symlink(p, uap, retval) VATTR_NULL(&vattr); vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask; VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + + nameptr = add_name(nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, 0); + error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path); + + // have to do this little dance because nd.ni_vp is not locked + // on return from the VOP_SYMLINK() call. + // + if (error == 0 && nd.ni_vp && nd.ni_vp->v_type == VLNK) { + vpid = nd.ni_vp->v_id; + if (vget(nd.ni_vp, 0, p) == 0) { + if (vpid == nd.ni_vp->v_id && nd.ni_vp->v_type == VLNK) { + VNAME(nd.ni_vp) = nameptr; + nameptr = NULL; + + if (VPARENT(nd.ni_vp) == NULL && vget(nd.ni_dvp, 0, p) == 0) { + VPARENT(nd.ni_vp) = nd.ni_dvp; + } + } + vrele(nd.ni_vp); + } + } + if (nameptr) { // only true if we didn't add it to the vnode + remove_name(nameptr); + } out: FREE_ZONE(path, MAXPATHLEN, M_NAMEI); return (error); @@ -1329,7 +1474,7 @@ undelete(p, uap, retval) struct nameidata nd; bwillwrite(); - NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT, UIO_USERSPACE, + NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT|AUDITVNPATH1, UIO_USERSPACE, uap->path, p); error = namei(&nd); if (error) @@ -1372,12 +1517,14 @@ _unlink(p, uap, retval, nodelbusy) struct nameidata nd; bwillwrite(); - NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, uap->path, p); + NDINIT(&nd, DELETE, LOCKPARENT | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); /* with Carbon semantics, busy files cannot be deleted */ if (nodelbusy) nd.ni_cnd.cn_flags |= NODELETEBUSY; - if (error = namei(&nd)) + error = namei(&nd); + if (error) return (error); + vp = nd.ni_vp; VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); @@ -1452,31 +1599,56 @@ lseek(p, uap, retval) { struct ucred *cred = p->p_ucred; struct file *fp; + struct vnode *vp; struct vattr vattr; + off_t offset = uap->offset; int error; if (error = fdgetf(p, uap->fd, &fp)) return (error); - if (fp->f_type != DTYPE_VNODE) + if (fref(fp) == -1) + return (EBADF); + if (fp->f_type != DTYPE_VNODE) { + frele(fp); return (ESPIPE); + } + vp = (struct vnode *)fp->f_data; switch (uap->whence) { case L_INCR: - fp->f_offset += uap->offset; + offset += fp->f_offset; break; case L_XTND: - if (error = - VOP_GETATTR((struct vnode *)fp->f_data, &vattr, cred, p)) - return (error); - fp->f_offset = uap->offset + vattr.va_size; + if (error = VOP_GETATTR(vp, &vattr, cred, p)) + break; + offset += vattr.va_size; break; case L_SET: - fp->f_offset = uap->offset; break; default: - return (EINVAL); + error = EINVAL; } - *(off_t *)retval = fp->f_offset; - return (0); + if (error == 0) { + if (uap->offset > 0 && offset < 0) { + /* Incremented/relative move past max size */ + error = EOVERFLOW; + } else { + /* + * Allow negative offsets on character devices, per + * POSIX 1003.1-2001. Most likely for writing disk + * labels. + */ + if (offset < 0 && vp->v_type != VCHR) { + /* Decremented/relative move before start */ + error = EINVAL; + } else { + /* Success */ + fp->f_offset = offset; + *(off_t *)retval = fp->f_offset; + } + } + } + frele(fp); + return (error); } #if COMPAT_43 @@ -1536,9 +1708,10 @@ access(p, uap, retval) t_gid = cred->cr_groups[0]; cred->cr_uid = p->p_cred->p_ruid; cred->cr_groups[0] = p->p_cred->p_rgid; - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + error = namei(&nd); + if (error) goto out1; vp = nd.ni_vp; @@ -1685,6 +1858,12 @@ cvtstat(st, ost) } #endif /* COMPAT_43 */ +/* + * The stat buffer spare fields are uninitialized + * so don't include them in the copyout. + */ +#define STATBUFSIZE \ + (sizeof(struct stat) - sizeof(int32_t) - 2 * sizeof(int64_t)) /* * Get file status; this version follows links. */ @@ -1703,15 +1882,16 @@ stat(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, - uap->path, p); - if (error = namei(&nd)) + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | SHAREDLEAF | AUDITVNPATH1, + UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); error = vn_stat(nd.ni_vp, &sb, p); vput(nd.ni_vp); if (error) return (error); - error = copyout((caddr_t)&sb, (caddr_t)uap->ub, sizeof (sb)); + error = copyout((caddr_t)&sb, (caddr_t)uap->ub, STATBUFSIZE); return (error); } @@ -1730,50 +1910,21 @@ lstat(p, uap, retval) register_t *retval; { int error; - struct vnode *vp, *dvp; - struct stat sb, sb1; + struct vnode *vp; + struct stat sb; struct nameidata nd; - NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKPARENT, UIO_USERSPACE, - uap->path, p); - if (error = namei(&nd)) + NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, + uap->path, p); + error = namei(&nd); + if (error) return (error); - /* - * For symbolic links, always return the attributes of its containing - * directory, except for mode, size, inode number, and links. - */ vp = nd.ni_vp; - dvp = nd.ni_dvp; - if ((vp->v_type != VLNK) || ((vp->v_type == VLNK) && (vp->v_tag == VT_NFS))) { - if (dvp == vp) - vrele(dvp); - else - vput(dvp); - error = vn_stat(vp, &sb, p); - vput(vp); - if (error) - return (error); - if (vp->v_type == VLNK) - sb.st_mode |= S_IFLNK; - } else { - error = vn_stat(dvp, &sb, p); - vput(dvp); - if (error) { - vput(vp); - return (error); - } - error = vn_stat(vp, &sb1, p); - vput(vp); - if (error) - return (error); - sb.st_mode &= ~S_IFDIR; - sb.st_mode |= S_IFLNK; - sb.st_nlink = sb1.st_nlink; - sb.st_size = sb1.st_size; - sb.st_blocks = sb1.st_blocks; - sb.st_ino = sb1.st_ino; - } - error = copyout((caddr_t)&sb, (caddr_t)uap->ub, sizeof (sb)); + error = vn_stat(vp, &sb, p); + vput(vp); + if (error) + return (error); + error = copyout((caddr_t)&sb, (caddr_t)uap->ub, STATBUFSIZE); return (error); } @@ -1794,9 +1945,10 @@ pathconf(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + error = namei(&nd); + if (error) return (error); error = VOP_PATHCONF(nd.ni_vp, uap->name, retval); vput(nd.ni_vp); @@ -1824,9 +1976,10 @@ readlink(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF, UIO_USERSPACE, + NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; if (vp->v_type != VLNK) @@ -1867,8 +2020,10 @@ chflags(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + AUDIT_ARG(fflags, uap->flags); + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); @@ -1899,9 +2054,15 @@ fchflags(p, uap, retval) struct file *fp; int error; + AUDIT_ARG(fd, uap->fd); + AUDIT_ARG(fflags, uap->flags); if (error = getvnode(p, uap->fd, &fp)) return (error); + vp = (struct vnode *)fp->f_data; + + AUDIT_ARG(vnpath, vp, ARG_VNODE1); + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); VATTR_NULL(&vattr); @@ -1930,8 +2091,11 @@ chmod(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + AUDIT_ARG(mode, (mode_t)uap->mode); + + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); @@ -1962,15 +2126,23 @@ fchmod(p, uap, retval) struct file *fp; int error; + AUDIT_ARG(fd, uap->fd); + AUDIT_ARG(mode, (mode_t)uap->mode); if (error = getvnode(p, uap->fd, &fp)) return (error); + vp = (struct vnode *)fp->f_data; VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + + AUDIT_ARG(vnpath, vp, ARG_VNODE1); + VATTR_NULL(&vattr); vattr.va_mode = uap->mode & ALLPERMS; + AUDIT_ARG(mode, (mode_t)vattr.va_mode); error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); VOP_UNLOCK(vp, 0, p); + return (error); } @@ -1994,8 +2166,11 @@ chown(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + AUDIT_ARG(owner, uap->uid, uap->gid); + + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; @@ -2003,7 +2178,7 @@ chown(p, uap, retval) * XXX A TEMPORARY HACK FOR NOW: Try to track console_user * by looking for chown() calls on /dev/console from a console process. */ - if ((vp) && (vp->v_specinfo) && + if ((vp) && (vp->v_type == VBLK || vp->v_type == VCHR) && (vp->v_specinfo) && (major(vp->v_specinfo->si_rdev) == CONSMAJOR) && (minor(vp->v_specinfo->si_rdev) == 0)) { console_user = uap->uid; @@ -2039,11 +2214,18 @@ fchown(p, uap, retval) struct file *fp; int error; + AUDIT_ARG(owner, uap->uid, uap->gid); + AUDIT_ARG(fd, uap->fd); + if (error = getvnode(p, uap->fd, &fp)) return (error); + vp = (struct vnode *)fp->f_data; VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + + AUDIT_ARG(vnpath, vp, ARG_VNODE1); + VATTR_NULL(&vattr); vattr.va_uid = uap->uid; vattr.va_gid = uap->gid; @@ -2065,7 +2247,7 @@ getutimes(usrtvp, tsp) TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); tsp[1] = tsp[0]; } else { - if ((error = copyin(usrtvp, tv, sizeof (tv))) != 0) + if ((error = copyin((void *)usrtvp, (void *)tv, sizeof (tv))) != 0) return (error); TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); TIMEVAL_TO_TIMESPEC(&tv[1], &tsp[1]); @@ -2117,12 +2299,19 @@ utimes(p, uap, retval) int error; struct nameidata nd; - usrtvp = uap->tptr; - if ((error = getutimes(usrtvp, ts)) != 0) + /* AUDIT: Needed to change the order of operations to do the + * name lookup first because auditing wants the path. + */ + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); - if ((error = namei(&nd)) != 0) + + usrtvp = uap->tptr; + if ((error = getutimes(usrtvp, ts)) != 0) { + vrele(nd.ni_vp); return (error); + } error = setutimes(p, nd.ni_vp, ts, usrtvp == NULL); vrele(nd.ni_vp); return (error); @@ -2147,11 +2336,15 @@ futimes(p, uap, retval) struct timeval *usrtvp; int error; + AUDIT_ARG(fd, uap->fd); usrtvp = uap->tptr; if ((error = getutimes(usrtvp, ts)) != 0) return (error); if ((error = getvnode(p, uap->fd, &fp)) != 0) return (error); + + AUDIT_ARG(vnpath, (struct vnode *)fp->f_data, ARG_VNODE1); + return setutimes(p, (struct vnode *)fp->f_data, ts, usrtvp == NULL); } @@ -2179,7 +2372,7 @@ truncate(p, uap, retval) if (uap->length < 0) return(EINVAL); - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); if (error = namei(&nd)) return (error); vp = nd.ni_vp; @@ -2219,12 +2412,15 @@ ftruncate(p, uap, retval) struct file *fp; int error; + AUDIT_ARG(fd, uap->fd); if (uap->length < 0) return(EINVAL); if (error = fdgetf(p, uap->fd, &fp)) return (error); + AUDIT_ARG(vnpath, (struct vnode *)fp->f_data, ARG_VNODE1); + if (fp->f_type == DTYPE_PSXSHM) { return(pshm_truncate(p, fp, uap->fd, uap->length, retval)); } @@ -2322,10 +2518,13 @@ fsync(p, uap, retval) if (error = getvnode(p, uap->fd, &fp)) return (error); + if (fref(fp) == -1) + return (EBADF); vp = (struct vnode *)fp->f_data; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p); VOP_UNLOCK(vp, 0, p); + frele(fp); return (error); } @@ -2348,16 +2547,15 @@ copyfile(p, uap, retval) register_t *retval; { register struct vnode *tvp, *fvp, *tdvp; - register struct ucred *cred = p->p_ucred; + register struct ucred *cred = p->p_ucred; struct nameidata fromnd, tond; int error; - - /* Check that the flags are valid. - */ + + /* Check that the flags are valid. */ if (uap->flags & ~CPF_MASK) { - return(EINVAL); - } + return(EINVAL); + } NDINIT(&fromnd, LOOKUP, SAVESTART, UIO_USERSPACE, uap->from, p); @@ -2385,7 +2583,7 @@ copyfile(p, uap, retval) goto out; } - if (error = VOP_ACCESS(tdvp, VWRITE, cred, p)) + if (error = VOP_ACCESS(tdvp, VWRITE, cred, p)) goto out; if (fvp == tdvp) @@ -2441,17 +2639,21 @@ rename(p, uap, retval) int error; int mntrename; int casesense,casepres; - + char *nameptr=NULL, *oname; + struct vnode *oparent; + mntrename = FALSE; bwillwrite(); - NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART, UIO_USERSPACE, - uap->from, p); - if (error = namei(&fromnd)) + NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART | AUDITVNPATH1, + UIO_USERSPACE, uap->from, p); + error = namei(&fromnd); + if (error) return (error); fvp = fromnd.ni_vp; - NDINIT(&tond, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART, + NDINIT(&tond, RENAME, + LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | AUDITVNPATH2, UIO_USERSPACE, uap->to, p); if (fromnd.ni_vp->v_type == VDIR) tond.ni_cnd.cn_flags |= WILLBEDIR; @@ -2538,10 +2740,33 @@ out: VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE); if (tvp) VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE); + + // XXXdbg - so that the fs won't block when it vrele()'s + // these nodes before returning + if (fromnd.ni_dvp != tdvp) { + vget(tdvp, 0, p); + } + + // save these off so we can later verify that fvp is the same + oname = VNAME(fvp); + oparent = VPARENT(fvp); + + nameptr = add_name(tond.ni_cnd.cn_nameptr, + tond.ni_cnd.cn_namelen, + tond.ni_cnd.cn_hash, 0); + + error = VOP_RENAME(fromnd.ni_dvp, fvp, &fromnd.ni_cnd, tond.ni_dvp, tvp, &tond.ni_cnd); - if (error) - goto out1; + if (error) { + remove_name(nameptr); + nameptr = NULL; + if (fromnd.ni_dvp != tdvp) { + vrele(tdvp); + } + + goto out1; + } /* * update filesystem's mount point data @@ -2588,6 +2813,49 @@ out: vrele(fvp); vfs_unbusy(mp, p); } + + + // fix up name & parent pointers. note that we first + // check that fvp has the same name/parent pointers it + // had before the rename call and then we lock fvp so + // that it won't go away on us when we hit blocking + // points like remove_name() or vrele() where fvp could + // get recycled. + if (oname == VNAME(fvp) && oparent == VPARENT(fvp) && vget(fvp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { + if (VNAME(fvp)) { + char *tmp = VNAME(fvp); + VNAME(fvp) = NULL; + remove_name(tmp); + } + + VNAME(fvp) = nameptr; + nameptr = NULL; + + if (fromnd.ni_dvp != tdvp) { + struct vnode *tmpvp; + + tmpvp = VPARENT(fvp); + VPARENT(fvp) = NULL; + vrele(tmpvp); + + VPARENT(fvp) = tdvp; + + // note: we don't vrele() tdvp because we want to keep + // the reference until fvp gets recycled + } + + vput(fvp); + + } else { + // if fvp isn't kosher anymore and we locked tdvp, + // release tdvp + if (fromnd.ni_dvp != tdvp) { + vrele(tdvp); + } + remove_name(nameptr); + nameptr = NULL; + } + } else { VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd); if (tdvp == tvp) @@ -2630,11 +2898,14 @@ mkdir(p, uap, retval) struct vattr vattr; int error; struct nameidata nd; + char *nameptr; + AUDIT_ARG(mode, (mode_t)uap->mode); bwillwrite(); - NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, uap->path, p); + NDINIT(&nd, CREATE, LOCKPARENT | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); nd.ni_cnd.cn_flags |= WILLBEDIR; - if (error = namei(&nd)) + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; if (vp != NULL) { @@ -2650,9 +2921,18 @@ mkdir(p, uap, retval) vattr.va_type = VDIR; vattr.va_mode = (uap->mode & ACCESSPERMS) &~ p->p_fd->fd_cmask; VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + + nameptr = add_name(nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, 0); + error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); - if (!error) - vput(nd.ni_vp); + if (!error) { + VNAME(nd.ni_vp) = nameptr; + if (VPARENT(nd.ni_vp) == NULL && vget(nd.ni_dvp, 0, p) == 0) { + VPARENT(nd.ni_vp) = nd.ni_dvp; + } + + vput(nd.ni_vp); + } return (error); } @@ -2674,9 +2954,10 @@ rmdir(p, uap, retval) struct nameidata nd; bwillwrite(); - NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE, + NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; if (vp->v_type != VDIR) { @@ -2757,7 +3038,7 @@ unionread: # if (BYTE_ORDER != LITTLE_ENDIAN) if (vp->v_mount->mnt_maxsymlinklen <= 0) { error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, - (int *)0, (u_long *)0); + (int *)0, (u_long **)0); fp->f_offset = auio.uio_offset; } else # endif @@ -2769,7 +3050,7 @@ unionread: MALLOC(dirbuf, caddr_t, uap->count, M_TEMP, M_WAITOK); kiov.iov_base = dirbuf; error = VOP_READDIR(vp, &kuio, fp->f_cred, &eofflag, - (int *)0, (u_long *)0); + (int *)0, (u_long **)0); fp->f_offset = kuio.uio_offset; if (error == 0) { readcnt = uap->count - kuio.uio_resid; @@ -2893,8 +3174,13 @@ getdirentries(p, uap, retval) long loff; int error, eofflag; - if (error = getvnode(p, uap->fd, &fp)) + AUDIT_ARG(fd, uap->fd); + error = getvnode(p, uap->fd, &fp); + if (error) return (error); + + AUDIT_ARG(vnpath, (struct vnode *)fp->f_data, ARG_VNODE1); + if ((fp->f_flag & FREAD) == 0) return (EBADF); vp = (struct vnode *)fp->f_data; @@ -2912,7 +3198,7 @@ unionread: vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); loff = auio.uio_offset = fp->f_offset; error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, - (int *)0, (u_long *)0); + (int *)0, (u_long **)0); fp->f_offset = auio.uio_offset; VOP_UNLOCK(vp, 0, p); if (error) @@ -2993,6 +3279,7 @@ umask(p, uap, retval) { register struct filedesc *fdp; + AUDIT_ARG(mask, uap->newmask); fdp = p->p_fd; *retval = fdp->fd_cmask; fdp->fd_cmask = uap->newmask & ALLPERMS; @@ -3018,8 +3305,9 @@ revoke(p, uap, retval) int error; struct nameidata nd; - NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); - if (error = namei(&nd)) + NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, p); + error = namei(&nd); + if (error) return (error); vp = nd.ni_vp; if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) @@ -3226,15 +3514,16 @@ getattrlist (p,uap,retval) } /* Get the vnode for the file we are getting info on. */ - nameiflags = LOCKLEAF; + nameiflags = LOCKLEAF | SHAREDLEAF; if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; - NDINIT(&nd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path, p); + NDINIT(&nd, LOOKUP, nameiflags | AUDITVNPATH1, UIO_USERSPACE, + (char *)uap->path, p); - if (error = namei(&nd)) + error = namei(&nd); + if (error) return (error); /* Set up the UIO structure for use by the vfs routine */ - aiov.iov_base = uap->attributeBuffer; aiov.iov_len = uap->bufferSize; @@ -3305,9 +3594,11 @@ setattrlist (p,uap,retval) /* Get the vnode for the file whose attributes are being set. */ nameiflags = LOCKLEAF; if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; - NDINIT(&nd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path, p); - if (error = namei(&nd)) - return (error); + NDINIT(&nd, LOOKUP, nameiflags | AUDITVNPATH1, UIO_USERSPACE, + (char *)uap->path, p); + error = namei(&nd); + if (error) + return (error); /* Set up the UIO structure for use by the vfs routine */ aiov.iov_base = uap->attributeBuffer; @@ -3364,6 +3655,8 @@ getdirentriesattr (p,uap,retval) long loff; struct attrlist attributelist; + AUDIT_ARG(fd, uap->fd); + /* Get the attributes into kernel space */ if (error = copyin((caddr_t)uap->alist, (caddr_t) &attributelist, sizeof (attributelist))) return(error); @@ -3372,6 +3665,9 @@ getdirentriesattr (p,uap,retval) if (error = getvnode(p, uap->fd, &fp)) return (error); + + AUDIT_ARG(vnpath, (struct vnode *)fp->f_data, ARG_VNODE1); + if ((fp->f_flag & FREAD) == 0) return(EBADF); vp = (struct vnode *)fp->f_data; @@ -3393,7 +3689,7 @@ getdirentriesattr (p,uap,retval) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_READDIRATTR (vp, &attributelist, &auio, actualcount, uap->options, &newstate, &eofflag, - &actualcount, ((u_long **)0), p->p_cred); + &actualcount, ((u_long **)0), p->p_ucred); VOP_UNLOCK(vp, 0, p); if (error) return (error); @@ -3440,19 +3736,23 @@ exchangedata (p,uap,retval) /* Global lock, to prevent race condition, only one exchange at a time */ lockmgr(&exchangelock, LK_EXCLUSIVE , (struct slock *)0, p); - NDINIT(&fnd, LOOKUP, nameiflags, UIO_USERSPACE, (char *) uap->path1, p); + NDINIT(&fnd, LOOKUP, nameiflags | AUDITVNPATH1, UIO_USERSPACE, + (char *) uap->path1, p); - if (error = namei(&fnd)) + error = namei(&fnd); + if (error) goto out2; fvp = fnd.ni_vp; - NDINIT(&snd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path2, p); + NDINIT(&snd, LOOKUP, nameiflags | AUDITVNPATH2, UIO_USERSPACE, + (char *)uap->path2, p); - if (error = namei(&snd)) { - vrele(fvp); - goto out2; - } + error = namei(&snd); + if (error) { + vrele(fvp); + goto out2; + } svp = snd.ni_vp; @@ -3476,6 +3776,14 @@ exchangedata (p,uap,retval) /* Ok, make the call */ error = VOP_EXCHANGE (fvp, svp, p->p_ucred, p); + if (error == 0 && VPARENT(fvp) != VPARENT(svp)) { + struct vnode *tmp; + + tmp = VPARENT(fvp); + VPARENT(fvp) = VPARENT(svp); + VPARENT(svp) = tmp; + } + out: vput (svp); vput (fvp); @@ -3664,9 +3972,11 @@ searchfs (p,uap,retval) nameiflags = LOCKLEAF; if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; - NDINIT(&nd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path, p); + NDINIT(&nd, LOOKUP, nameiflags | AUDITVNPATH1, UIO_USERSPACE, + (char *)uap->path, p); - if (error = namei(&nd)) + error = namei(&nd); + if (error) goto freeandexit; vp = nd.ni_vp; @@ -3826,3 +4136,297 @@ sync_internal(void) return (error); } /* end of sync_internal call */ + + +// XXXdbg fmod watching calls +#define NUM_CHANGE_NODES 256 +static int changed_init=0; +static volatile int fmod_watch_enabled = 0; +static simple_lock_data_t changed_nodes_lock; // guard access +static volatile struct vnode *changed_nodes[NUM_CHANGE_NODES]; +static volatile pid_t changed_nodes_pid[NUM_CHANGE_NODES]; +static volatile int changed_rd_index=0, changed_wr_index=0; +static volatile int notifier_sleeping=0; + + +void +notify_filemod_watchers(struct vnode *vp, struct proc *p) +{ + int ret; + + // only want notification on regular files. + if (vp->v_type != VREG || fmod_watch_enabled == 0) { + return; + } + + // grab a reference so it doesn't go away + if (vget(vp, 0, p) != 0) { + return; + } + + retry: + simple_lock(&changed_nodes_lock); + + // If the table is full, block until it clears up + if (((changed_wr_index+1) % NUM_CHANGE_NODES) == changed_rd_index) { + simple_unlock(&changed_nodes_lock); + + notifier_sleeping++; + // wait up to 10 seconds for the queue to drain + ret = tsleep((caddr_t)&changed_wr_index, PINOD, "changed_nodes_full", 10*hz); + if (ret != 0 || fmod_watch_enabled == 0) { + notifier_sleeping--; + printf("notify_filemod: err %d from tsleep/enabled %d. bailing out (vp 0x%x).\n", + ret, fmod_watch_enabled, vp); + vrele(vp); + return; + } + + notifier_sleeping--; + goto retry; + } + + // insert our new guy + if (changed_nodes[changed_wr_index] != NULL) { + panic("notify_fmod_watchers: index %d is 0x%x, not null!\n", + changed_wr_index, changed_nodes[changed_wr_index]); + } + changed_nodes[changed_wr_index] = vp; + changed_nodes_pid[changed_wr_index] = current_proc()->p_pid; + changed_wr_index = (changed_wr_index + 1) % NUM_CHANGE_NODES; + + simple_unlock(&changed_nodes_lock); + + wakeup((caddr_t)&changed_rd_index); +} + + +struct fmod_watch_args { + int *new_fd; + char *pathbuf; + int len; + pid_t pid; +}; + +int +fmod_watch(struct proc *p, struct fmod_watch_args *uap, register_t *retval) +{ + int fd, didhold = 0; + struct filedesc *fdp; + struct file *fp; + struct vnode *vp; + int flags; + int type, indx, error, need_wakeup=0; + struct flock lf; + struct nameidata nd; + extern struct fileops vnops; + pid_t pid; + + if (fmod_watch_enabled == 0) { + *retval = -1; + return EINVAL; + } + + p = current_proc(); + + if (changed_init == 0) { + changed_init = 1; + simple_lock_init(&changed_nodes_lock); + } + + if (changed_rd_index == changed_wr_index) { + // there's nothing to do, go to sleep + error = tsleep((caddr_t)&changed_rd_index, PUSER|PCATCH, "changed_nodes_empty", 0); + if (error != 0) { + // XXXdbg - what if after we unblock the changed_nodes + // table is full? We should wakeup() the writer. + *retval = -1; + return error; + } + } + + simple_lock(&changed_nodes_lock); + + vp = (struct vnode *)changed_nodes[changed_rd_index]; + pid = changed_nodes_pid[changed_rd_index]; + + changed_nodes[changed_rd_index] = NULL; + changed_rd_index = (changed_rd_index + 1) % NUM_CHANGE_NODES; + + if (vp == NULL) { + panic("watch_file_changes: Someone put a null vnode in my table! (%d %d)\n", + changed_rd_index, changed_wr_index); + } + + simple_unlock(&changed_nodes_lock); + + // if the writers are blocked, wake them up as we just freed up + // some space for them. + if (notifier_sleeping > 0) { + wakeup((caddr_t)&changed_wr_index); + } + + if (vp->v_type != VREG) { + error = EBADF; + goto err1; + } + + if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p)) != 0) { + printf("fmod_watch: vn_lock returned %d\n", error); + goto err1; + } + + // first copy out the name + if (uap->pathbuf) { + char *buff; + int len=MAXPATHLEN; + + MALLOC(buff, char *, len, M_TEMP, M_WAITOK); + error = vn_getpath(vp, buff, &len); + if (error == 0) { + if (len < uap->len) + error = copyout(buff, (void *)uap->pathbuf, len); + else + error = ENOSPC; + } + FREE(buff, M_TEMP); + if (error) { + goto err1; + } + } + + // now copy out the pid of the person that changed the file + if (uap->pid) { + if ((error = copyout((caddr_t)&pid, (void *)uap->pid, sizeof(pid_t))) != 0) { + printf("fmod_watch: failed to copy out the pid (%d)\n", pid); + goto err1; + } + } + + // now create a file descriptor for this vnode + fdp = p->p_fd; + flags = FREAD; + if (error = falloc(p, &fp, &indx)) { + printf("fmod_watch: failed to allocate an fd...\n"); + goto err2; + } + + if ((error = copyout((caddr_t)&indx, (void *)uap->new_fd, sizeof(int))) != 0) { + printf("fmod_watch: failed to copy out the new fd (%d)\n", indx); + goto err3; + } + + fp->f_flag = flags & FMASK; + fp->f_type = DTYPE_VNODE; + fp->f_ops = &vnops; + fp->f_data = (caddr_t)vp; + + if (UBCINFOEXISTS(vp) && ((didhold = ubc_hold(vp)) == 0)) { + goto err3; + } + + error = VOP_OPEN(vp, flags, p->p_ucred, p); + if (error) { + goto err4; + } + + VOP_UNLOCK(vp, 0, p); + + *fdflags(p, indx) &= ~UF_RESERVED; + + // note: we explicitly don't vrele() here because it + // happens when the fd is closed. + + return error; + + err4: + if (didhold) { + ubc_rele(vp); + } + err3: + ffree(fp); + fdrelse(p, indx); + err2: + VOP_UNLOCK(vp, 0, p); + err1: + vrele(vp); // undoes the vref() in notify_filemod_watchers() + + *retval = -1; + return error; +} + +static int +enable_fmod_watching(register_t *retval) +{ + *retval = -1; + + if (!is_suser()) { + return EPERM; + } + + // XXXdbg for now we only allow one watcher at a time. + if (fmod_watch_enabled) { + return EBUSY; + } + + fmod_watch_enabled++; + *retval = 0; + return 0; +} + +static int +disable_fmod_watching(register_t *retval) +{ + fmod_watch_enabled--; + if (fmod_watch_enabled < 0) { + panic("fmod_watching: too many disables! (%d)\n", fmod_watch_enabled); + } + + // if we're the last guy, clear out any remaining vnodes + // in the table so they don't remain referenced. + // + if (fmod_watch_enabled == 0) { + int i; + for(i=changed_rd_index; i != changed_wr_index; ) { + if (changed_nodes[i] == NULL) { + panic("disable_fmod_watch: index %d is NULL!\n", i); + } + vrele((struct vnode *)changed_nodes[i]); + changed_nodes[i] = NULL; + i = (i + 1) % NUM_CHANGE_NODES; + } + changed_wr_index = changed_rd_index = 0; + } + + // wake up anyone that may be waiting for the + // queue to clear out. + // + while(notifier_sleeping) { + wakeup((caddr_t)&changed_wr_index); + + // yield the cpu so the notifiers can run + tsleep((caddr_t)&fmod_watch_enabled, PINOD, "disable_fmod_watch", 1); + } + + *retval = 0; + return 0; +} + + +struct fmod_watch_enable_args { + int on_or_off; +}; + +int +fmod_watch_enable(struct proc *p, struct fmod_watch_enable_args *uap, register_t *retval) +{ + int ret; + + if (uap->on_or_off != 0) { + ret = enable_fmod_watching(retval); + } else { + ret = disable_fmod_watching(retval); + } + + return ret; +} diff --git a/bsd/vfs/vfs_utfconv.c b/bsd/vfs/vfs_utfconv.c index 7d2e88396..45e2b0c7e 100644 --- a/bsd/vfs/vfs_utfconv.c +++ b/bsd/vfs/vfs_utfconv.c @@ -317,8 +317,8 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, { u_int16_t* bufstart; u_int16_t* bufend; - u_int16_t ucs_ch; - u_int8_t byte; + unsigned int ucs_ch; + unsigned int byte; int result = 0; int decompose, precompose, swapbytes; @@ -335,7 +335,7 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, /* check for ascii */ if (byte < 0x80) { - ucs_ch = byte; /* 1st byte */ + ucs_ch = byte; /* 1st byte */ } else { u_int32_t ch; int extrabytes = utf_extrabytes[byte >> 3]; @@ -345,44 +345,66 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, utf8len -= extrabytes; switch (extrabytes) { - case 1: ch = byte; /* 1st byte */ - ch <<= 6; - ch += *utf8p++; /* 2nd byte */ - ch -= 0x00003080UL; - if (ch < 0x0080) - goto invalid; - ucs_ch = ch; + case 1: + ch = byte; ch <<= 6; /* 1st byte */ + byte = *utf8p++; /* 2nd byte */ + if ((byte >> 6) != 2) + goto invalid; + ch += byte; + ch -= 0x00003080UL; + if (ch < 0x0080) + goto invalid; + ucs_ch = ch; break; - - case 2: ch = byte; /* 1st byte */ - ch <<= 6; - ch += *utf8p++; /* 2nd byte */ - ch <<= 6; - ch += *utf8p++; /* 3rd byte */ - ch -= 0x000E2080UL; - if (ch < 0x0800) + case 2: + ch = byte; ch <<= 6; /* 1st byte */ + byte = *utf8p++; /* 2nd byte */ + if ((byte >> 6) != 2) + goto invalid; + ch += byte; ch <<= 6; + byte = *utf8p++; /* 3rd byte */ + if ((byte >> 6) != 2) + goto invalid; + ch += byte; + ch -= 0x000E2080UL; + if (ch < 0x0800) + goto invalid; + if (ch >= 0xD800) { + if (ch <= 0xDFFF) goto invalid; - ucs_ch = ch; - break; - - case 3: ch = byte; /* 1st byte */ - ch <<= 6; - ch += *utf8p++; /* 2nd byte */ - ch <<= 6; - ch += *utf8p++; /* 3rd byte */ - ch <<= 6; - ch += *utf8p++; /* 4th byte */ - ch -= 0x03C82080UL + SP_HALF_BASE; - ucs_ch = (ch >> SP_HALF_SHIFT) + SP_HIGH_FIRST; - *ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch; - if (ucsp >= bufend) - goto toolong; - ucs_ch = (ch & SP_HALF_MASK) + SP_LOW_FIRST; - *ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch; + if (ch == 0xFFFE || ch == 0xFFFF) + goto invalid; + } + ucs_ch = ch; + break; + case 3: + ch = byte; ch <<= 6; /* 1st byte */ + byte = *utf8p++; /* 2nd byte */ + if ((byte >> 6) != 2) + goto invalid; + ch += byte; ch <<= 6; + byte = *utf8p++; /* 3rd byte */ + if ((byte >> 6) != 2) + goto invalid; + ch += byte; ch <<= 6; + byte = *utf8p++; /* 4th byte */ + if ((byte >> 6) != 2) + goto invalid; + ch += byte; + ch -= 0x03C82080UL + SP_HALF_BASE; + ucs_ch = (ch >> SP_HALF_SHIFT) + SP_HIGH_FIRST; + if (ucs_ch < SP_HIGH_FIRST || ucs_ch > SP_HIGH_LAST) + goto invalid; + *ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch; + if (ucsp >= bufend) + goto toolong; + ucs_ch = (ch & SP_HALF_MASK) + SP_LOW_FIRST; + if (ucs_ch < SP_LOW_FIRST || ucs_ch > SP_LOW_LAST) + goto invalid; + *ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch; continue; - default: - goto invalid; + goto invalid; } if (decompose) { if (unicode_decomposeable(ucs_ch)) { diff --git a/bsd/vfs/vfs_vnops.c b/bsd/vfs/vfs_vnops.c index 40177b401..349f32c6d 100644 --- a/bsd/vfs/vfs_vnops.c +++ b/bsd/vfs/vfs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -93,9 +93,11 @@ static int vn_write __P((struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct proc *p)); static int vn_select __P(( struct file *fp, int which, void * wql, struct proc *p)); +static int vn_kqfilt_add __P((struct file *fp, struct knote *kn, struct proc *p)); +static int vn_kqfilt_remove __P((struct vnode *vp, uintptr_t ident, struct proc *p)); struct fileops vnops = - { vn_read, vn_write, vn_ioctl, vn_select, vn_closefile }; + { vn_read, vn_write, vn_ioctl, vn_select, vn_closefile, vn_kqfilt_add }; /* * Common code for vnode open operations. @@ -105,6 +107,15 @@ int vn_open(ndp, fmode, cmode) register struct nameidata *ndp; int fmode, cmode; +{ + return vn_open_modflags(ndp,&fmode,cmode); +} + +__private_extern__ int +vn_open_modflags(ndp, fmodep, cmode) + register struct nameidata *ndp; + int *fmodep; + int cmode; { register struct vnode *vp; register struct proc *p = ndp->ni_cnd.cn_proc; @@ -113,16 +124,22 @@ vn_open(ndp, fmode, cmode) struct vattr *vap = &vat; int error; int didhold = 0; + char *nameptr; + int fmode = *fmodep; if (fmode & O_CREAT) { ndp->ni_cnd.cn_nameiop = CREATE; - ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; + ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | AUDITVNPATH1; if ((fmode & O_EXCL) == 0) ndp->ni_cnd.cn_flags |= FOLLOW; bwillwrite(); if (error = namei(ndp)) return (error); if (ndp->ni_vp == NULL) { + nameptr = add_name(ndp->ni_cnd.cn_nameptr, + ndp->ni_cnd.cn_namelen, + ndp->ni_cnd.cn_hash, 0); + VATTR_NULL(vap); vap->va_type = VREG; vap->va_mode = cmode; @@ -130,10 +147,17 @@ vn_open(ndp, fmode, cmode) vap->va_vaflags |= VA_EXCLUSIVE; VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE); if (error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, - &ndp->ni_cnd, vap)) + &ndp->ni_cnd, vap)) { + remove_name(nameptr); return (error); + } fmode &= ~O_TRUNC; vp = ndp->ni_vp; + + VNAME(vp) = nameptr; + if (vget(ndp->ni_dvp, 0, p) == 0) { + VPARENT(vp) = ndp->ni_dvp; + } } else { VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd); if (ndp->ni_dvp == ndp->ni_vp) @@ -150,7 +174,7 @@ vn_open(ndp, fmode, cmode) } } else { ndp->ni_cnd.cn_nameiop = LOOKUP; - ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF; + ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | AUDITVNPATH1; if (error = namei(ndp)) return (error); vp = ndp->ni_vp; @@ -195,15 +219,6 @@ vn_open(ndp, fmode, cmode) goto bad; } } - if (fmode & O_TRUNC) { - VOP_UNLOCK(vp, 0, p); /* XXX */ - VOP_LEASE(vp, p, cred, LEASE_WRITE); - (void)vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ - VATTR_NULL(vap); - vap->va_size = 0; - if (error = VOP_SETATTR(vp, vap, cred, p)) - goto bad; - } if (error = VOP_OPEN(vp, fmode, cred, p)) { goto bad; @@ -212,12 +227,14 @@ vn_open(ndp, fmode, cmode) if (fmode & FWRITE) if (++vp->v_writecount <= 0) panic("vn_open: v_writecount"); + *fmodep = fmode; return (0); bad: VOP_UNLOCK(vp, 0, p); if (didhold) ubc_rele(vp); vrele(vp); + ndp->ni_vp = NULL; return (error); } @@ -255,8 +272,17 @@ vn_close(vp, flags, cred, p) { int error; - if (flags & FWRITE) + if (flags & FWRITE) { + vp->v_writecount--; + + { + extern void notify_filemod_watchers(struct vnode *vp, struct proc *p); + + notify_filemod_watchers(vp, p); + } + } + error = VOP_CLOSE(vp, flags, cred, p); ubc_rele(vp); vrele(vp); @@ -558,7 +584,7 @@ vn_stat(vp, sb, p) sb->st_blksize = vap->va_blocksize; sb->st_flags = vap->va_flags; /* Do not give the generation number out to unpriviledged users */ - if (suser(p->p_ucred, &p->p_acflag)) + if (vap->va_gen && suser(p->p_ucred, &p->p_acflag)) sb->st_gen = 0; else sb->st_gen = vap->va_gen; @@ -691,3 +717,34 @@ vn_closefile(fp, p) return (vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp->f_cred, p)); } + +static int +vn_kqfilt_add(fp, kn, p) + struct file *fp; + struct knote *kn; + struct proc *p; +{ + struct vnode *vp = (struct vnode *)fp->f_data; + int error; + + error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (error) return (error); + error = VOP_KQFILT_ADD(vp, kn, p); + (void)VOP_UNLOCK(vp, 0, p); + return (error); +} + +static int +vn_kqfilt_remove(vp, ident, p) + struct vnode *vp; + uintptr_t ident; + struct proc *p; +{ + int error; + + error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (error) return (error); + error = VOP_KQFILT_REMOVE(vp, ident, p); + (void)VOP_UNLOCK(vp, 0, p); + return (error); +} diff --git a/bsd/vfs/vnode_if.c b/bsd/vfs/vnode_if.c index 3a5438e57..2c2f9cc04 100644 --- a/bsd/vfs/vnode_if.c +++ b/bsd/vfs/vnode_if.c @@ -390,6 +390,38 @@ struct vnodeop_desc vop_exchange_desc = { NULL, }; +int vop_kqfilt_add_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_kqfilt_add_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_kqfilt_add_desc = { + 0, + "vop_kqfilt_add", + 0, + vop_kqfilt_add_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_kqfilt_add_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_kqfilt_remove_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_kqfilt_remove_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_kqfilt_remove_desc = { + 0, + "vop_kqfilt_remove", + 0, + vop_kqfilt_remove_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_kqfilt_remove_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + int vop_revoke_vp_offsets[] = { VOPARG_OFFSETOF(struct vop_revoke_args,a_vp), VDESC_NO_OFFSET @@ -479,7 +511,7 @@ int vop_link_vp_offsets[] = { struct vnodeop_desc vop_link_desc = { 0, "vop_link", - 0 | VDESC_VP0_WILLRELE, + 0 | VDESC_VP1_WILLRELE, vop_link_vp_offsets, VDESC_NO_OFFSET, VDESC_NO_OFFSET, @@ -1096,6 +1128,8 @@ struct vnodeop_desc *vfs_op_descs[] = { &vop_ioctl_desc, &vop_select_desc, &vop_exchange_desc, + &vop_kqfilt_add_desc, + &vop_kqfilt_remove_desc, &vop_revoke_desc, &vop_mmap_desc, &vop_fsync_desc, diff --git a/bsd/vfs/vnode_if.sh b/bsd/vfs/vnode_if.sh index 574dd9770..84b383645 100644 --- a/bsd/vfs/vnode_if.sh +++ b/bsd/vfs/vnode_if.sh @@ -315,13 +315,15 @@ function doit() { printf("\t0"); vpnum = 0; for (i=0; i +/* + * Routine: macx_backing_store_recovery + * Function: + * Syscall interface to set a tasks privilege + * level so that it is not subject to + * macx_backing_store_suspend + */ +int +macx_backing_store_recovery( + int pid) +{ + int error; + struct proc *p = current_proc(); + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + if ((error = suser(p->p_ucred, &p->p_acflag))) + goto backing_store_recovery_return; + + /* for now restrict backing_store_recovery */ + /* usage to only present task */ + if(pid != p->p_pid) { + error = EINVAL; + goto backing_store_recovery_return; + } + + task_backing_store_privileged(p->task); + +backing_store_recovery_return: + (void) thread_funnel_set(kernel_flock, FALSE); + return(error); +} + +/* + * Routine: macx_backing_store_suspend + * Function: + * Syscall interface to stop new demand for + * backing store when backing store is low + */ + +int +macx_backing_store_suspend( + boolean_t suspend) +{ + int error; + struct proc *p = current_proc(); + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + if ((error = suser(p->p_ucred, &p->p_acflag))) + goto backing_store_suspend_return; + + vm_backing_store_disable(suspend); + +backing_store_suspend_return: + (void) thread_funnel_set(kernel_flock, FALSE); + return(error); +} + /* * Routine: macx_swapon * Function: @@ -205,6 +264,8 @@ macx_swapon( /* Mark this vnode as being used for swapfile */ SET(vp->v_flag, VSWAP); + ubc_setcred(vp, p); + /* * take an extra reference on the vnode to keep * vnreclaim() away from this vnode. diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c index 7922e830f..f84e8b4c5 100644 --- a/bsd/vm/vm_unix.c +++ b/bsd/vm/vm_unix.c @@ -69,7 +69,6 @@ #include -extern shared_region_mapping_t system_shared_region; extern zone_t lsf_zone; useracc(addr, len, prot) @@ -79,7 +78,7 @@ useracc(addr, len, prot) { return (vm_map_check_protection( current_map(), - trunc_page(addr), round_page(addr+len), + trunc_page_32((unsigned int)addr), round_page_32((unsigned int)(addr+len)), prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); } @@ -88,8 +87,8 @@ vslock(addr, len) int len; { kern_return_t kret; - kret = vm_map_wire(current_map(), trunc_page(addr), - round_page(addr+len), + kret = vm_map_wire(current_map(), trunc_page_32((unsigned int)addr), + round_page_32((unsigned int)(addr+len)), VM_PROT_READ | VM_PROT_WRITE ,FALSE); switch (kret) { @@ -120,7 +119,7 @@ vsunlock(addr, len, dirtied) #if FIXME /* [ */ if (dirtied) { pmap = get_task_pmap(current_task()); - for (vaddr = trunc_page(addr); vaddr < round_page(addr+len); + for (vaddr = trunc_page((unsigned int)(addr)); vaddr < round_page((unsigned int)(addr+len)); vaddr += PAGE_SIZE) { paddr = pmap_extract(pmap, vaddr); pg = PHYS_TO_VM_PAGE(paddr); @@ -131,8 +130,8 @@ vsunlock(addr, len, dirtied) #ifdef lint dirtied++; #endif /* lint */ - kret = vm_map_unwire(current_map(), trunc_page(addr), - round_page(addr+len), FALSE); + kret = vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr)), + round_page_32((unsigned int)(addr+len)), FALSE); switch (kret) { case KERN_SUCCESS: return (0); @@ -392,12 +391,33 @@ load_shared_file( } if(local_flags & QUERY_IS_SYSTEM_REGION) { + shared_region_mapping_t default_shared_region; vm_get_shared_region(current_task(), &shared_region); - if (shared_region == system_shared_region) { + task_mapping_info.self = (vm_offset_t)shared_region; + + shared_region_mapping_info(shared_region, + &(task_mapping_info.text_region), + &(task_mapping_info.text_size), + &(task_mapping_info.data_region), + &(task_mapping_info.data_size), + &(task_mapping_info.region_mappings), + &(task_mapping_info.client_base), + &(task_mapping_info.alternate_base), + &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), + &(task_mapping_info.flags), &next); + + default_shared_region = + lookup_default_shared_region( + ENV_DEFAULT_ROOT, + task_mapping_info.system); + if (shared_region == default_shared_region) { local_flags = SYSTEM_REGION_BACKED; } else { local_flags = 0; } + shared_region_mapping_dealloc(default_shared_region); error = 0; error = copyout(&local_flags, flags, sizeof (int)); goto lsf_bailout; @@ -458,28 +478,6 @@ load_shared_file( goto lsf_bailout_free_vput; } - vm_get_shared_region(current_task(), &shared_region); - if(shared_region == system_shared_region) { - default_regions = 1; - } - if(((vp->v_mount != rootvnode->v_mount) - && (shared_region == system_shared_region)) - && (lsf_mapping_pool_gauge() < 75)) { - /* We don't want to run out of shared memory */ - /* map entries by starting too many private versions */ - /* of the shared library structures */ - int error; - if(p->p_flag & P_NOSHLIB) { - error = clone_system_shared_regions(FALSE); - } else { - error = clone_system_shared_regions(TRUE); - } - if (error) { - goto lsf_bailout_free_vput; - } - local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS; - vm_get_shared_region(current_task(), &shared_region); - } #ifdef notdef if(vattr.va_size != mapped_file_size) { error = EINVAL; @@ -493,13 +491,13 @@ load_shared_file( /* load alternate regions if the caller has requested. */ /* Note: the new regions are "clean slates" */ if (local_flags & NEW_LOCAL_SHARED_REGIONS) { - error = clone_system_shared_regions(FALSE); + error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT); if (error) { goto lsf_bailout_free_vput; } - vm_get_shared_region(current_task(), &shared_region); } + vm_get_shared_region(current_task(), &shared_region); task_mapping_info.self = (vm_offset_t)shared_region; shared_region_mapping_info(shared_region, @@ -511,7 +509,53 @@ load_shared_file( &(task_mapping_info.client_base), &(task_mapping_info.alternate_base), &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), + &(task_mapping_info.flags), &next); + + { + shared_region_mapping_t default_shared_region; + default_shared_region = + lookup_default_shared_region( + ENV_DEFAULT_ROOT, + task_mapping_info.system); + if(shared_region == default_shared_region) { + default_regions = 1; + } + shared_region_mapping_dealloc(default_shared_region); + } + /* If we are running on a removable file system we must not */ + /* be in a set of shared regions or the file system will not */ + /* be removable. */ + if(((vp->v_mount != rootvnode->v_mount) && (default_regions)) + && (lsf_mapping_pool_gauge() < 75)) { + /* We don't want to run out of shared memory */ + /* map entries by starting too many private versions */ + /* of the shared library structures */ + int error; + if(p->p_flag & P_NOSHLIB) { + error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT); + } else { + error = clone_system_shared_regions(TRUE, ENV_DEFAULT_ROOT); + } + if (error) { + goto lsf_bailout_free_vput; + } + local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS; + vm_get_shared_region(current_task(), &shared_region); + shared_region_mapping_info(shared_region, + &(task_mapping_info.text_region), + &(task_mapping_info.text_size), + &(task_mapping_info.data_region), + &(task_mapping_info.data_size), + &(task_mapping_info.region_mappings), + &(task_mapping_info.client_base), + &(task_mapping_info.alternate_base), + &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), &(task_mapping_info.flags), &next); + } /* This is a work-around to allow executables which have been */ /* built without knowledge of the proper shared segment to */ @@ -692,24 +736,8 @@ new_system_shared_regions( return EINVAL; } - /* get current shared region info for */ - /* restoration after new system shared */ - /* regions are in place */ - vm_get_shared_region(current_task(), ®ions); - - /* usually only called at boot time */ - /* shared_file_boot_time_init creates */ - /* a new set of system shared regions */ - /* and places them as the system */ - /* shared regions. */ - shared_file_boot_time_init(); - - /* set current task back to its */ - /* original regions. */ - vm_get_shared_region(current_task(), &new_regions); - shared_region_mapping_dealloc(new_regions); - - vm_set_shared_region(current_task(), regions); + /* clear all of our existing defaults */ + remove_all_shared_regions(); *retval = 0; return 0; @@ -718,7 +746,7 @@ new_system_shared_regions( int -clone_system_shared_regions(shared_regions_active) +clone_system_shared_regions(shared_regions_active, base_vnode) { shared_region_mapping_t new_shared_region; shared_region_mapping_t next; @@ -728,8 +756,6 @@ clone_system_shared_regions(shared_regions_active) struct proc *p; - if (shared_file_create_system_region(&new_shared_region)) - return (ENOMEM); vm_get_shared_region(current_task(), &old_shared_region); old_info.self = (vm_offset_t)old_shared_region; shared_region_mapping_info(old_shared_region, @@ -741,7 +767,27 @@ clone_system_shared_regions(shared_regions_active) &(old_info.client_base), &(old_info.alternate_base), &(old_info.alternate_next), + &(old_info.fs_base), + &(old_info.system), &(old_info.flags), &next); + if ((shared_regions_active) || + (base_vnode == ENV_DEFAULT_ROOT)) { + if (shared_file_create_system_region(&new_shared_region)) + return (ENOMEM); + } else { + new_shared_region = + lookup_default_shared_region( + base_vnode, old_info.system); + if(new_shared_region == NULL) { + shared_file_boot_time_init( + base_vnode, old_info.system); + vm_get_shared_region(current_task(), &new_shared_region); + } else { + vm_set_shared_region(current_task(), new_shared_region); + } + if(old_shared_region) + shared_region_mapping_dealloc(old_shared_region); + } new_info.self = (vm_offset_t)new_shared_region; shared_region_mapping_info(new_shared_region, &(new_info.text_region), @@ -752,6 +798,8 @@ clone_system_shared_regions(shared_regions_active) &(new_info.client_base), &(new_info.alternate_base), &(new_info.alternate_next), + &(new_info.fs_base), + &(new_info.system), &(new_info.flags), &next); if(shared_regions_active) { if(vm_region_clone(old_info.text_region, new_info.text_region)) { @@ -907,48 +955,92 @@ restart: } lru = global_user_profile_cache.age; + *profile = NULL; for(i = 0; iage = global_user_profile_cache.age; - global_user_profile_cache.age+=1; break; } + /* Otherwise grab the oldest entry */ if(global_user_profile_cache.profiles[i].age < lru) { lru = global_user_profile_cache.profiles[i].age; *profile = &global_user_profile_cache.profiles[i]; } } + /* Did we set it? */ + if (*profile == NULL) { + /* + * No entries are available; this can only happen if all + * of them are currently in the process of being reused; + * if this happens, we sleep on the address of the first + * element, and restart. This is less than ideal, but we + * know it will work because we know that there will be a + * wakeup on any entry currently in the process of being + * reused. + * + * XXX Reccomend a two handed clock and more than 3 total + * XXX cache entries at some point in the future. + */ + /* + * drop funnel and wait + */ + (void)tsleep((void *) + &global_user_profile_cache.profiles[0], + PRIBIO, "app_profile", 0); + goto restart; + } + + /* + * If it's currently busy, we've picked the one at the end of the + * LRU list, but it's currently being actively used. We sleep on + * its address and restart. + */ if ((*profile)->busy) { /* * drop funnel and wait */ (void)tsleep((void *) - &(global_user_profile_cache), + *profile, PRIBIO, "app_profile", 0); goto restart; } (*profile)->busy = 1; (*profile)->user = user; - if((*profile)->data_vp != NULL) { + /* + * put dummy value in for now to get competing request to wait + * above until we are finished + * + * Save the data_vp before setting it, so we can set it before + * we kmem_free() or vrele(). If we don't do this, then we + * have a potential funnel race condition we have to deal with. + */ + data_vp = (*profile)->data_vp; + (*profile)->data_vp = (struct vnode *)0xFFFFFFFF; + + /* + * Age the cache here in all cases; this guarantees that we won't + * be reusing only one entry over and over, once the system reaches + * steady-state. + */ + global_user_profile_cache.age+=1; + + if(data_vp != NULL) { kmem_free(kernel_map, (*profile)->buf_ptr, 4 * PAGE_SIZE); if ((*profile)->names_vp) { vrele((*profile)->names_vp); (*profile)->names_vp = NULL; } - if ((*profile)->data_vp) { - vrele((*profile)->data_vp); - (*profile)->data_vp = NULL; - } + vrele(data_vp); } - - /* put dummy value in for now to get */ - /* competing request to wait above */ - /* until we are finished */ - (*profile)->data_vp = (struct vnode *)0xFFFFFFFF; /* Try to open the appropriate users profile files */ /* If neither file is present, try to create them */ @@ -956,7 +1048,6 @@ restart: /* If the files do exist, check them for the app_file */ /* requested and read it in if present */ - ret = kmem_alloc(kernel_map, (vm_offset_t *)&profile_data_string, PATH_MAX); @@ -1337,7 +1428,7 @@ bsd_search_page_cache_data_base( resid_off = 0; while(size) { error = vn_rdwr(UIO_READ, vp, - (caddr_t)(local_buf + resid_off), + CAST_DOWN(caddr_t, (local_buf + resid_off)), size, file_off + resid_off, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p); if((error) || (size == resid)) { diff --git a/bsd/vm/vnode_pager.c b/bsd/vm/vnode_pager.c index 123ad45cc..4328ecb6d 100644 --- a/bsd/vm/vnode_pager.c +++ b/bsd/vm/vnode_pager.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -137,7 +137,7 @@ vnode_pageout(struct vnode *vp, goto out; } - ubc_create_upl(vp, f_offset, isize, &vpupl, &pl, UPL_COPYOUT_FROM); + ubc_create_upl(vp, f_offset, isize, &vpupl, &pl, UPL_FOR_PAGEOUT | UPL_COPYOUT_FROM | UPL_SET_LITE); if (vpupl == (upl_t) 0) { result = error = PAGER_ABSENT; @@ -201,7 +201,20 @@ vnode_pageout(struct vnode *vp, blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset)); s = splbio(); vp_pgoclean++; - if ((bp = incore(vp, blkno)) && + if (vp->v_tag == VT_NFS) { + /* check with nfs if page is OK to drop */ + error = nfs_buf_page_inval(vp, (off_t)(f_offset + offset)); + splx(s); + if (error) { + ubc_upl_abort_range(vpupl, offset, PAGE_SIZE, + UPL_ABORT_FREE_ON_EMPTY); + result = error = PAGER_ERROR; + offset += PAGE_SIZE; + isize -= PAGE_SIZE; + pg_index++; + continue; + } + } else if ((bp = incore(vp, blkno)) && ISSET(bp->b_flags, B_BUSY | B_NEEDCOMMIT)) { splx(s); ubc_upl_abort_range(vpupl, offset, PAGE_SIZE, @@ -309,7 +322,7 @@ vnode_pagein( error = PAGER_ERROR; goto out; } - ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_RET_ONLY_ABSENT); + ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_RET_ONLY_ABSENT | UPL_SET_LITE); if (upl == (upl_t)NULL) { result = PAGER_ABSENT; diff --git a/config/BSDKernel.exports b/config/BSDKernel.exports new file mode 100644 index 000000000..3faa37e59 --- /dev/null +++ b/config/BSDKernel.exports @@ -0,0 +1,3790 @@ +_BF_encrypt +_BF_set_key +_BestBlockSizeFit +_CURSIG +_ConvertUnicodeToUTF8Mangled +_DebugStr +_DisposePtr +_FastRelString +_FastUnicodeCompare +_GetEmbeddedFileID +_GetLogicalBlockSize +_GetTimeUTC +_LocalToUTC +_MAXNBUF +_MCFail +_MD5Final +_MD5Init +_MD5Pad +_MD5Transform +_MD5Update +_MDFail +_MPFail +_MacToVFSError +_NewPtr +_NewPtrSysClear +_PreliminarySetup +_RandomULong +_SHA1Final +_SHA1Init +_SHA1Transform +_SHA1Update +_SHA256_Data +_SHA256_End +_SHA256_Final +_SHA256_Init +_SHA256_Transform +_SHA256_Update +_SHA384_Data +_SHA384_End +_SHA384_Final +_SHA384_Init +_SHA384_Update +_SHA512_Data +_SHA512_End +_SHA512_Final +_SHA512_Init +_SHA512_Last +_SHA512_Transform +_SHA512_Update +_UTCToLocal +__FREE +__FREE_ZONE +__MALLOC +__MALLOC_ZONE +___sysctl +__dist_code +__length_code +__printf +__tr_align +__tr_flush_block +__tr_init +__tr_stored_block +__tr_tally +_accept +_access +_acct +_acct_process +_acctchkfreq +_acctp +_acctresume +_acctsuspend +_acctwatch +_acctwatch_funnel +_add_name +_add_pcbuffer +_add_profil +_add_to_time_wait +_addlog +_addupc_task +_adjtime +_adler32 +_advisory_read +_age_is_stale +_ah4_calccksum +_ah4_input +_ah4_output +_ah6_calccksum +_ah6_ctlinput +_ah6_input +_ah6_output +_ah_algorithm_lookup +_ah_hdrlen +_ah_hdrsiz +_alert +_alert_done +_allocbuf +_allproc +_app_profile +_apple_hwcksum_rx +_apple_hwcksum_tx +_around +_arp_ifinit +_arp_rtrequest +_arpintr +_arpintrq +_arpresolve +_arpwhohas +_at_ether_input +_attrcalcsize +_averunnable +_b_to_q +_badport_bandlim +_bawrite +_bcd2bin_data +_bdevsw +_bdevsw_add +_bdevsw_isfree +_bdevsw_remove +_bdevvp +_bdwrite +_bflushq +_bin2bcd_data +_bind +_biodone +_biowait +_blaundrycnt +_block_procsigmask +_boot +_boothowto +_bootp +_boottime +_both +_bpf_filter +_bpf_init +_bpf_mtap +_bpf_tap +_bpf_tap_callback +_bpf_validate +_bpfattach +_bpfclose +_bpfdetach +_bpfioctl +_bpfopen +_bpfpoll +_bpfread +_bpfwrite +_branch_tracing_enabled +_bread +_breada +_breadn +_brelse +_bremfree +_bs_port_table +_bsd_ast +_bsd_autoconf +_bsd_bufferinit +_bsd_close_page_cache_files +_bsd_hardclock +_bsd_hardclockinit +_bsd_init +_bsd_open_page_cache_files +_bsd_osrelease +_bsd_ostype +_bsd_pageable_map +_bsd_read_page_cache_file +_bsd_search_page_cache_data_base +_bsd_startupearly +_bsd_uprofil +_bsd_version +_bsd_version_major +_bsd_version_minor +_bsd_version_variant +_bsd_write_page_cache_file +_bsdinit_task +_buf +_bufferhdr_map +_bufhash +_bufhashlist_slock +_bufhashtbl +_bufqlim +_bufqscanwait +_bufqueues +_bufstats +_busyprt +_bwillwrite +_bwrite +_byte_swap_cgin +_byte_swap_cgout +_byte_swap_csum +_byte_swap_dir_block_in +_byte_swap_dir_block_out +_byte_swap_dir_out +_byte_swap_direct +_byte_swap_dirtemplate_in +_byte_swap_inode_in +_byte_swap_inode_out +_byte_swap_ints +_byte_swap_longlongs +_byte_swap_minidir_in +_byte_swap_sbin +_byte_swap_sbout +_byte_swap_shorts +_cache_enter +_cache_lookup +_cache_purge +_cache_purgevfs +_cached_sock_alloc +_cached_sock_count +_cached_sock_free +_calcru +_callout +_cansignal +_cast128_decrypt_round12 +_cast128_decrypt_round16 +_cast128_encrypt_round12 +_cast128_encrypt_round16 +_catq +_cd9660_access +_cd9660_blkatoff +_cd9660_blktooff +_cd9660_bmap +_cd9660_cdxaop_entries +_cd9660_cdxaop_opv_desc +_cd9660_cdxaop_p +_cd9660_close +_cd9660_cmap +_cd9660_defattr +_cd9660_deftstamp +_cd9660_enotsupp +_cd9660_fhtovp +_cd9660_fifoop_entries +_cd9660_fifoop_opv_desc +_cd9660_fifoop_p +_cd9660_getattr +_cd9660_getattrlist +_cd9660_ihashget +_cd9660_ihashins +_cd9660_ihashrem +_cd9660_inactive +_cd9660_init +_cd9660_ioctl +_cd9660_islocked +_cd9660_lock +_cd9660_lookup +_cd9660_mmap +_cd9660_mount +_cd9660_mountroot +_cd9660_offtoblk +_cd9660_open +_cd9660_pagein +_cd9660_pathconf +_cd9660_print +_cd9660_quotactl +_cd9660_read +_cd9660_readdir +_cd9660_readlink +_cd9660_reclaim +_cd9660_remove +_cd9660_rmdir +_cd9660_root +_cd9660_rrip_analyze +_cd9660_rrip_getname +_cd9660_rrip_getsymname +_cd9660_rrip_offset +_cd9660_seek +_cd9660_select +_cd9660_specop_entries +_cd9660_specop_opv_desc +_cd9660_specop_p +_cd9660_start +_cd9660_statfs +_cd9660_strategy +_cd9660_sync +_cd9660_sysctl +_cd9660_tstamp_conv17 +_cd9660_tstamp_conv7 +_cd9660_unlock +_cd9660_unmount +_cd9660_vfsops +_cd9660_vget +_cd9660_vget_internal +_cd9660_vnodeop_entries +_cd9660_vnodeop_opv_desc +_cd9660_vnodeop_p +_cd9660_vptofh +_cd9660_xa_read +_cdevsw +_cdevsw_add +_cdevsw_add_with_bdev +_cdevsw_isfree +_cdevsw_remove +_cfree +_cfreecount +_cfreelist +_chdir +_check_cpu_subtype +_check_exec_access +_check_routeselfref +_checkalias +_checkuseraccess +_chflags +_chgproccnt +_chkdq +_chkdqchg +_chkiq +_chkiqchg +_chkvnlock +_chmod +_chown +_chroot +_chrtoblk +_chrtoblk_set +_cinit +_cjk_encoding +_cjk_lastunique +_clalloc +_clear_procsiglist +_clfree +_clone_system_shared_regions +_close +_closef +_clrbits +_cluster_bp +_cluster_copy_ubc_data +_cluster_copy_upl_data +_cluster_pagein +_cluster_pageout +_cluster_push +_cluster_read +_cluster_release +_cluster_write +_cmask +_cnodehash +_cnodehashtbl +_collectth_state +_comp_add_data +_comp_end +_comp_get_ratio +_comp_init +_compute_averunnable +_concat_domain +_connect +_cons +_cons_cinput +_console_user +_constty +_copyfile +_copyright +_copystr +_copywithin +_coredump +_count_busy_buffers +_count_lock_queue +_crcmp +_crcopy +_crdup +_create_unix_stack +_cred0 +_crfree +_crget +_ctl_attach +_ctl_connect +_ctl_ctloutput +_ctl_deregister +_ctl_disconnect +_ctl_enqueuedata +_ctl_enqueuembuf +_ctl_find +_ctl_head +_ctl_ioctl +_ctl_post_msg +_ctl_register +_ctl_send +_ctl_usrreqs +_ctlsw +_cttyioctl +_cttyopen +_cttyread +_cttyselect +_cttywrite +_cur_tw_slot +_current_proc +_current_proc_EXTERNAL +_cvtstat +_dead_badop +_dead_blktooff +_dead_bmap +_dead_cmap +_dead_ebadf +_dead_ioctl +_dead_lock +_dead_lookup +_dead_nullop +_dead_offtoblk +_dead_open +_dead_print +_dead_read +_dead_select +_dead_strategy +_dead_vnodeop_entries +_dead_vnodeop_opv_desc +_dead_vnodeop_p +_dead_write +_def_tbuffer_size +_default_pager_init_flag +_deflate +_deflateCopy +_deflateEnd +_deflateInit2_ +_deflateInit_ +_deflateParams +_deflateReset +_deflateSetDictionary +_deflate_copyright +_defrouter_addreq +_defrouter_delreq +_defrouter_lookup +_defrouter_select +_defrtrlist_del +_delack_bitmask +_delete +_delete_each_prefix +_des_SPtrans +_des_check_key +_des_check_key_parity +_des_decrypt3 +_des_ecb3_encrypt +_des_ecb_encrypt +_des_encrypt1 +_des_encrypt2 +_des_encrypt3 +_des_fixup_key_parity +_des_is_weak_key +_des_key_sched +_des_options +_des_set_key +_des_set_key_checked +_des_set_key_unchecked +_des_set_odd_parity +_desireddquot +_desiredvnodes +_dest6_input +_dev_add_entry +_dev_add_name +_dev_add_node +_dev_dup_entry +_dev_dup_plane +_dev_finddir +_dev_findname +_dev_free_hier +_dev_free_name +_dev_root +_devcls +_devfs_checkpath +_devfs_dn_free +_devfs_dntovn +_devfs_free_plane +_devfs_kernel_mount +_devfs_lock +_devfs_make_link +_devfs_make_node +_devfs_mknod +_devfs_mount +_devfs_propogate +_devfs_remove +_devfs_sinit +_devfs_spec_vnodeop_opv_desc +_devfs_spec_vnodeop_p +_devfs_stats +_devfs_update +_devfs_vfsops +_devfs_vnodeop_opv_desc +_devfs_vnodeop_p +_devin +_devio +_devioc +_devnode_free +_devopn +_devout +_devwait +_dhcpol_add +_dhcpol_concat +_dhcpol_count +_dhcpol_element +_dhcpol_find +_dhcpol_free +_dhcpol_get +_dhcpol_init +_dhcpol_parse_buffer +_dhcpol_parse_packet +_dhcpol_parse_vendor +_dirchk +_disableConsoleOutput +_disable_branch_tracing +_disable_funnel +_div_init +_div_input +_div_usrreqs +_divert_packet +_dlil_attach_interface_filter +_dlil_attach_protocol +_dlil_attach_protocol_filter +_dlil_dereg_if_modules +_dlil_dereg_proto_module +_dlil_detach_filter +_dlil_detach_protocol +_dlil_event +_dlil_expand_mcl +_dlil_find_dltag +_dlil_if_acquire +_dlil_if_attach +_dlil_if_detach +_dlil_if_release +_dlil_init +_dlil_initialized +_dlil_inject_if_input +_dlil_inject_if_output +_dlil_inject_pr_input +_dlil_inject_pr_output +_dlil_input +_dlil_input_lock +_dlil_input_packet +_dlil_input_thread_continue +_dlil_input_thread_wakeup +_dlil_ioctl +_dlil_output +_dlil_plumb_protocol +_dlil_post_msg +_dlil_reg_if_modules +_dlil_reg_proto_module +_dlil_stats +_dlil_unplumb_protocol +_dlttoproto +_dmmax +_dmmin +_dmtext +_doasyncfree +_doclusterread +_doclusterwrite +_doingcache +_domaininit +_domainname +_domainnamelen +_domains +_donice +_doreallocblks +_dosetrlimit +_dounmount +_dp_pgins +_dp_pgouts +_dqdirtylist +_dqfileclose +_dqfileopen +_dqflush +_dqfreelist +_dqget +_dqhash +_dqhashtbl +_dqinit +_dqreclaim +_dqref +_dqrele +_dqsync +_dqsync_orphans +_dump_string_table +_dumpdev +_dumplo +_dup +_dup2 +_dup_sockaddr +_dupfdopen +_dylink_test +_embutl +_enable_branch_tracing +_enable_funnel +_encap4_input +_encap6_input +_encap_attach +_encap_attach_func +_encap_detach +_encap_getarg +_encap_init +_encaptab +_encode_comp_t +_enodev +_enodev_strat +_enoioctl +_enosys +_enterpgrp +_enxio +_eopnotsupp +_err_abortop +_err_access +_err_advlock +_err_allocate +_err_blkatoff +_err_blktooff +_err_bmap +_err_bwrite +_err_close +_err_cmap +_err_copyfile +_err_create +_err_devblocksize +_err_exchange +_err_fsync +_err_getattr +_err_getattrlist +_err_inactive +_err_ioctl +_err_islocked +_err_lease +_err_link +_err_lock +_err_mkcomplex +_err_mkdir +_err_mknod +_err_mmap +_err_offtoblk +_err_open +_err_pagein +_err_pageout +_err_pathconf +_err_pgrd +_err_pgwr +_err_print +_err_read +_err_readdir +_err_readdirattr +_err_readlink +_err_reallocblks +_err_reclaim +_err_remove +_err_rename +_err_revoke +_err_rmdir +_err_searchfs +_err_seek +_err_select +_err_setattr +_err_setattrlist +_err_strategy +_err_symlink +_err_truncate +_err_unlock +_err_update +_err_valloc +_err_vfree +_err_whiteout +_err_write +_errsys +_esp4_input +_esp4_output +_esp6_ctlinput +_esp6_input +_esp6_output +_esp_algorithm_lookup +_esp_auth +_esp_hdrsiz +_esp_max_ivlen +_esp_rijndael_blockdecrypt +_esp_rijndael_blockencrypt +_esp_rijndael_schedlen +_esp_rijndael_schedule +_esp_schedule +_esp_udp_encap_port +_ether_addmulti +_ether_attach_at +_ether_attach_inet +_ether_attach_inet6 +_ether_delmulti +_ether_demux +_ether_detach_at +_ether_detach_inet +_ether_detach_inet6 +_ether_family_init +_ether_frameout +_ether_ifattach +_ether_ifmod_ioctl +_ether_inet6_prmod_ioctl +_ether_inet_prmod_ioctl +_ether_input +_ether_ipmulticast_max +_ether_ipmulticast_min +_ether_pre_output +_ether_prmod_ioctl +_ether_resolvemulti +_ether_sprintf +_event_usrreqs +_eventsw +_evprocdeque +_evprocenque +_evsofree +_exchangedata +_exchangelock +_execsigs +_execv +_execve +_execve_semaphore +_exit +_exit1 +_falloc +_fatfile_getarch +_fatfile_getarch_affinity +_fchdir +_fchflags +_fchmod +_fchown +_fcntl +_fcount +_fdalloc +_fdavail +_fdesc_allocvp +_fdesc_badop +_fdesc_getattr +_fdesc_inactive +_fdesc_init +_fdesc_ioctl +_fdesc_lookup +_fdesc_mount +_fdesc_open +_fdesc_pathconf +_fdesc_print +_fdesc_read +_fdesc_readdir +_fdesc_readlink +_fdesc_reclaim +_fdesc_root +_fdesc_select +_fdesc_setattr +_fdesc_start +_fdesc_statfs +_fdesc_sync +_fdesc_unmount +_fdesc_vfree +_fdesc_vfsops +_fdesc_vnodeop_entries +_fdesc_vnodeop_opv_desc +_fdesc_vnodeop_p +_fdesc_write +_fdexpand +_fdgetf +_fdhash +_fdhashtbl +_fdopen +_fdrelse +_ffree +_ffs +_ffs_alloc +_ffs_balloc +_ffs_blkalloc +_ffs_blkatoff +_ffs_blkfree +_ffs_blkpref +_ffs_blktooff +_ffs_clrblock +_ffs_clusteracct +_ffs_fhtovp +_ffs_fifoop_entries +_ffs_fifoop_opv_desc +_ffs_fifoop_p +_ffs_flushfiles +_ffs_fragacct +_ffs_fsync +_ffs_init +_ffs_isblock +_ffs_mount +_ffs_mountfs +_ffs_mountroot +_ffs_offtoblk +_ffs_oldfscompat +_ffs_pagein +_ffs_pageout +_ffs_read +_ffs_reallocblks +_ffs_realloccg +_ffs_reclaim +_ffs_reload +_ffs_sbupdate +_ffs_setblock +_ffs_specop_entries +_ffs_specop_opv_desc +_ffs_specop_p +_ffs_statfs +_ffs_sync +_ffs_sysctl +_ffs_truncate +_ffs_unmount +_ffs_update +_ffs_valloc +_ffs_vfree +_ffs_vget +_ffs_vnodeop_entries +_ffs_vnodeop_opv_desc +_ffs_vnodeop_p +_ffs_vptofh +_ffs_write +_fhopen +_fifo_advlock +_fifo_bmap +_fifo_close +_fifo_ebadf +_fifo_inactive +_fifo_ioctl +_fifo_lookup +_fifo_nfsv2nodeop_opv_desc +_fifo_nfsv2nodeop_p +_fifo_open +_fifo_pathconf +_fifo_print +_fifo_printinfo +_fifo_read +_fifo_select +_fifo_vnodeop_entries +_fifo_vnodeop_opv_desc +_fifo_vnodeop_p +_fifo_write +_filedesc0 +_filehead +_find_nke +_finishdup +_firstc +_firstsect +_firstseg +_firstsegfromheader +_fixjobc +_flock +_fmod_watch +_fmod_watch_enable +_fork +_fpathconf +_fr_checkp +_frag6_doing_reass +_frag6_drain +_frag6_init +_frag6_input +_frag6_nfragpackets +_frag6_slowtimo +_fragtbl +_fragtbl124 +_fragtbl8 +_freevnodes +_fref +_frele +_fs_filtops +_fsctl +_fstat +_fstatfs +_fstatv +_fsync +_ftruncate +_fubyte +_fuibyte +_fuiword +_futimes +_fuword +_fw_enable +_gCompareTable +_gLatinCaseFold +_gLowerCaseTable +_gTimeZone +_getProcName +_get_aiotask +_get_bsduthreadarg +_get_bsduthreadrval +_get_inpcb_str_size +_get_kernel_symfile +_get_new_filter_id +_get_procrustime +_get_signalthread +_get_tcp_str_size +_getattrlist +_getblk +_getc +_getdirentries +_getdirentriesattr +_getdtablesize +_geteblk +_getegid +_geteuid +_getfakefvmseg +_getfh +_getfsstat +_getgid +_getgroups +_getinoquota +_getitimer +_getlastaddr +_getlogin +_getnewvnode +_getpeername +_getpgid +_getpgrp +_getpid +_getppid +_getpriority +_getquota +_getrlimit +_getrusage +_getsectbyname +_getsectbynamefromheader +_getsectdatafromheader +_getsegbyname +_getsegbynamefromheader +_getsegdatafromheader +_getsid +_getsock +_getsockaddr +_getsockname +_getsockopt +_gettimeofday +_getuid +_getvnode +_gif_attach_inet +_gif_attach_inet6 +_gif_attach_proto_family +_gif_delete_tunnel +_gif_demux +_gif_detach_inet +_gif_detach_inet6 +_gif_detach_proto_family +_gif_encapcheck4 +_gif_encapcheck6 +_gif_input +_gif_ioctl +_gif_pre_output +_gif_reg_if_mods +_gif_shutdown +_gifattach +_gifs +_global_state_pid +_global_user_profile_cache +_grade_cpu_subtype +_groupmember +_gsignal +_hard_throttle_on_root +_hashinit +_hex2ascii_data +_hfc_tag +_hfs_addconverter +_hfs_allocate +_hfs_blktooff +_hfs_bmap +_hfs_bwrite +_hfs_catname +_hfs_chash_slock +_hfs_chkdq +_hfs_chkdqchg +_hfs_chkiq +_hfs_chkiqchg +_hfs_clearlock +_hfs_cmap +_hfs_converterinit +_hfs_encoding_list +_hfs_encoding_list_slock +_hfs_encodingbias +_hfs_extname +_hfs_fifoop_entries +_hfs_fifoop_opv_desc +_hfs_fifoop_p +_hfs_findoverlap +_hfs_generate_volume_notifications +_hfs_getblock +_hfs_getconverter +_hfs_getinoquota +_hfs_getlock +_hfs_getquota +_hfs_ioctl +_hfs_islatinbias +_hfs_offtoblk +_hfs_owner_rights +_hfs_pagein +_hfs_pageout +_hfs_pickencoding +_hfs_privdirname +_hfs_qsync +_hfs_quotactl +_hfs_quotaoff +_hfs_quotaon +_hfs_quotastat +_hfs_read +_hfs_relconverter +_hfs_remconverter +_hfs_select +_hfs_setlock +_hfs_setquota +_hfs_setuse +_hfs_specop_entries +_hfs_specop_opv_desc +_hfs_specop_p +_hfs_split +_hfs_strategy +_hfs_swap_BTNode +_hfs_swap_HFSBTInternalNode +_hfs_swap_HFSPlusBTInternalNode +_hfs_swap_HFSPlusForkData +_hfs_to_utf8 +_hfs_truncate +_hfs_vbmname +_hfs_vfsops +_hfs_vnodeop_entries +_hfs_vnodeop_opv_desc +_hfs_vnodeop_p +_hfs_wakelock +_hfs_write +_hfsfifo_kqfilt_add +_hfsfifo_kqfilt_remove +_hfsmaxlockdepth +_holdrele +_hostid +_hostname +_hostnamelen +_hz +_hzto +_icmp6_ctloutput +_icmp6_error +_icmp6_fasttimo +_icmp6_ifstat +_icmp6_ifstatmax +_icmp6_init +_icmp6_input +_icmp6_mtudisc_update +_icmp6_nodeinfo +_icmp6_rediraccept +_icmp6_redirect_input +_icmp6_redirect_output +_icmp6_redirtimeout +_icmp6_reflect +_icmp6errppslim +_icmp6stat +_icmp_error +_icmp_input +_if_addmulti +_if_allmulti +_if_attach +_if_delmulti +_if_delmultiaddr +_if_down +_if_down_all +_if_index +_if_name +_if_route +_if_rtproto_del +_if_unroute +_if_up +_if_withname +_ifa_ifwithaddr +_ifa_ifwithdstaddr +_ifa_ifwithnet +_ifa_ifwithroute +_ifafree +_ifaof_ifpforaddr +_ifaref +_ifbyfamily +_ifindex2ifnet +_ifioctl +_ifma_lostlist +_ifmaof_ifpforaddr +_ifmedia_add +_ifmedia_init +_ifmedia_ioctl +_ifmedia_list_add +_ifmedia_removeall +_ifmedia_set +_ifnet +_ifnet_addrs +_ifpromisc +_ifptodlt +_ifqmaxlen +_iftovt_tab +_ifunit +_igmp_fasttimo +_igmp_init +_igmp_input +_igmp_joingroup +_igmp_leavegroup +_igmp_slowtimo +_ihash +_ihashtbl +_in6_addmulti +_in6_addr2scopeid +_in6_addrscope +_in6_are_prefix_equal +_in6_cksum +_in6_clearscope +_in6_control +_in6_delmulti +_in6_dinit +_in6_embedscope +_in6_get_tmpifid +_in6_gif_input +_in6_gif_output +_in6_gif_protosw +_in6_if_up +_in6_ifaddr +_in6_ifattach +_in6_ifawithifp +_in6_ifawithscope +_in6_ifdetach +_in6_ifindex2scopeid +_in6_ifstat +_in6_ifstatmax +_in6_init2done +_in6_init_prefix_ltimes +_in6_inithead +_in6_is_addr_deprecated +_in6_len2mask +_in6_localaddr +_in6_losing +_in6_mapped_peeraddr +_in6_mapped_sockaddr +_in6_mask2len +_in6_matchlen +_in6_maxmtu +_in6_multihead +_in6_nigroup +_in6_nigroup_attach +_in6_nigroup_detach +_in6_pcbbind +_in6_pcbconnect +_in6_pcbdetach +_in6_pcbdisconnect +_in6_pcbladdr +_in6_pcblookup_hash +_in6_pcblookup_local +_in6_pcbnotify +_in6_pcbpurgeif0 +_in6_pcbsetport +_in6_post_msg +_in6_prefix_add_ifid +_in6_prefix_ioctl +_in6_prefix_remove_ifid +_in6_prefixlen2mask +_in6_proto_count +_in6_purgeaddr +_in6_purgeif +_in6_purgeprefix +_in6_recoverscope +_in6_rr_timer +_in6_rr_timer_funneled +_in6_rtchange +_in6_selecthlim +_in6_selectsrc +_in6_setmaxmtu +_in6_setpeeraddr +_in6_setsockaddr +_in6_sin6_2_sin +_in6_sin6_2_sin_in_sock +_in6_sin_2_v4mapsin6 +_in6_sin_2_v4mapsin6_in_sock +_in6_sockaddr +_in6_tmpaddrtimer +_in6_tmpaddrtimer_funneled +_in6_tmpifadd +_in6_update_ifa +_in6_v4mapsin6_sockaddr +_in6addr_any +_in6addr_linklocal_allnodes +_in6addr_linklocal_allrouters +_in6addr_loopback +_in6addr_nodelocal_allnodes +_in6if_do_dad +_in6ifa_ifpforlinklocal +_in6ifa_ifpwithaddr +_in6mask0 +_in6mask128 +_in6mask32 +_in6mask64 +_in6mask96 +_in_addmulti +_in_addword +_in_broadcast +_in_canforward +_in_cksum +_in_cksum_skip +_in_control +_in_delayed_cksum +_in_delmulti +_in_dinit +_in_gif_input +_in_gif_output +_in_gif_protosw +_in_ifaddrhead +_in_ifadown +_in_ifscrub +_in_inithead +_in_localaddr +_in_losing +_in_multihead +_in_pcb_get_owner +_in_pcb_grab_port +_in_pcb_letgo_port +_in_pcb_nat_init +_in_pcb_new_share_client +_in_pcb_rem_share_client +_in_pcballoc +_in_pcbbind +_in_pcbconnect +_in_pcbdetach +_in_pcbdisconnect +_in_pcbinshash +_in_pcbladdr +_in_pcblookup_hash +_in_pcblookup_local +_in_pcbnotifyall +_in_pcbpurgeif0 +_in_pcbrehash +_in_pcbremlists +_in_proto_count +_in_pseudo +_in_rtchange +_in_rtqdrain +_in_setpeeraddr +_in_setsockaddr +_in_stf_input +_in_stf_protosw +_inactivevnodes +_incore +_inet6_ether_input +_inet6_ether_pre_output +_inet6ctlerrmap +_inet6domain +_inet6sw +_inet_aton +_inet_ether_input +_inet_ether_pre_output +_inet_ntoa +_inetctlerrmap +_inetdomain +_inetsw +_inferior +_inflate +_inflateEnd +_inflateInit2_ +_inflateInit_ +_inflateReset +_inflateSetDictionary +_inflateSync +_inflateSyncPoint +_inflate_blocks +_inflate_blocks_free +_inflate_blocks_new +_inflate_blocks_reset +_inflate_blocks_sync_point +_inflate_codes +_inflate_codes_free +_inflate_codes_new +_inflate_copyright +_inflate_fast +_inflate_flush +_inflate_mask +_inflate_set_dictionary +_inflate_trees_bits +_inflate_trees_dynamic +_inflate_trees_fixed +_init_args +_init_attempts +_init_domain +_init_ip6pktopts +_init_sin6 +_initialized +_initproc +_inittodr +_inside +_insmntque +_int6intrq_present +_invalhash +_iobufqueue +_ioctl +_ip4_ah_cleartos +_ip4_ah_net_deflev +_ip4_ah_offsetmask +_ip4_ah_trans_deflev +_ip4_def_policy +_ip4_esp_net_deflev +_ip4_esp_randpad +_ip4_esp_trans_deflev +_ip4_ipsec_dfbit +_ip4_ipsec_ecn +_ip6_accept_rtadv +_ip6_addaux +_ip6_ah_net_deflev +_ip6_ah_trans_deflev +_ip6_auto_flowlabel +_ip6_auto_linklocal +_ip6_clearpktopts +_ip6_copypktopts +_ip6_ctloutput +_ip6_dad_count +_ip6_def_policy +_ip6_defhlim +_ip6_defmcasthlim +_ip6_delaux +_ip6_desync_factor +_ip6_ecn_egress +_ip6_ecn_ingress +_ip6_esp_net_deflev +_ip6_esp_randpad +_ip6_esp_trans_deflev +_ip6_findaux +_ip6_flow_seq +_ip6_forward +_ip6_forward_rt +_ip6_forward_srcrt +_ip6_forwarding +_ip6_freemoptions +_ip6_freepcbopts +_ip6_fw_chk_ptr +_ip6_fw_ctl_ptr +_ip6_fw_enable +_ip6_get_prevhdr +_ip6_getdstifaddr +_ip6_gif_hlim +_ip6_hdrnestlimit +_ip6_id +_ip6_init +_ip6_input +_ip6_ipsec_ecn +_ip6_keepfaith +_ip6_lasthdr +_ip6_log_interval +_ip6_log_time +_ip6_maxfragpackets +_ip6_mforward +_ip6_mloopback +_ip6_mrouter +_ip6_mrouter_done +_ip6_mrouter_get +_ip6_mrouter_set +_ip6_mrouter_ver +_ip6_mrtproto +_ip6_nexthdr +_ip6_optlen +_ip6_ours_check_algorithm +_ip6_output +_ip6_process_hopopts +_ip6_protox +_ip6_rr_prune +_ip6_savecontrol +_ip6_sendredirects +_ip6_setpktoptions +_ip6_sourcecheck +_ip6_sourcecheck_interval +_ip6_sprintf +_ip6_temp_preferred_lifetime +_ip6_temp_regen_advance +_ip6_temp_valid_lifetime +_ip6_unknown_opt +_ip6_use_deprecated +_ip6_use_tempaddr +_ip6_v6only +_ip6intr +_ip6intrq +_ip6q +_ip6stat +_ip_ctloutput +_ip_defttl +_ip_divert_cookie +_ip_drain +_ip_ecn_egress +_ip_ecn_ingress +_ip_freemoptions +_ip_fw_chk_ptr +_ip_fw_ctl_ptr +_ip_fw_fwd_addr +_ip_gif_ttl +_ip_id +_ip_init +_ip_input +_ip_linklocal_in_allowbadttl +_ip_linklocal_stat +_ip_mcast_src +_ip_mforward +_ip_mrouter +_ip_mrouter_done +_ip_mrouter_get +_ip_mrouter_set +_ip_optcopy +_ip_output +_ip_pkt_to_mbuf +_ip_protox +_ip_rsvp_done +_ip_rsvp_force_done +_ip_rsvp_init +_ip_rsvp_vif_done +_ip_rsvp_vif_init +_ip_rsvpd +_ip_savecontrol +_ip_slowtimo +_ip_srcroute +_ip_stripoptions +_ipcomp4_input +_ipcomp4_output +_ipcomp6_input +_ipcomp6_output +_ipcomp_algorithm_lookup +_ipcperm +_ipflow_create +_ipflow_fastforward +_ipflow_slowtimo +_ipforwarding +_ipintr +_ipintrq +_ipintrq_present +_ipip_input +_ipport_firstauto +_ipport_hifirstauto +_ipport_hilastauto +_ipport_lastauto +_ipport_lowfirstauto +_ipport_lowlastauto +_ipsec4_delete_pcbpolicy +_ipsec4_get_policy +_ipsec4_getpolicybyaddr +_ipsec4_getpolicybysock +_ipsec4_hdrsiz +_ipsec4_in_reject +_ipsec4_in_reject_so +_ipsec4_logpacketstr +_ipsec4_output +_ipsec4_set_policy +_ipsec4_tunnel_validate +_ipsec6_delete_pcbpolicy +_ipsec6_get_policy +_ipsec6_getpolicybyaddr +_ipsec6_getpolicybysock +_ipsec6_hdrsiz +_ipsec6_in_reject +_ipsec6_in_reject_so +_ipsec6_logpacketstr +_ipsec6_output_trans +_ipsec6_output_tunnel +_ipsec6_set_policy +_ipsec6_tunnel_validate +_ipsec6stat +_ipsec_addhist +_ipsec_bypass +_ipsec_chkreplay +_ipsec_clearhist +_ipsec_copy_policy +_ipsec_copypkt +_ipsec_debug +_ipsec_delaux +_ipsec_dumpmbuf +_ipsec_get_reqlevel +_ipsec_gethist +_ipsec_getsocket +_ipsec_hdrsiz_tcp +_ipsec_init_policy +_ipsec_logsastr +_ipsec_setsocket +_ipsec_updatereplay +_ipsecstat +_ipstat +_iptime +_is_file_clean +_is_suser +_is_suser1 +_isdisk +_isinferior +_iskmemdev +_isn_ctx +_isn_last_reseed +_isn_secret +_iso_nchstats +_isodirino +_isofncmp +_isofntrans +_isohash +_isohashtbl +_isonullname +_issetugid +_issignal +_issingleuser +_itimerdecr +_itimerfix +_journal_active +_journal_close +_journal_create +_journal_end_transaction +_journal_flush +_journal_kill_block +_journal_modify_block_abort +_journal_modify_block_end +_journal_modify_block_start +_journal_open +_journal_start_transaction +_kd_buffer +_kd_buflast +_kd_bufptr +_kd_bufsize +_kd_buftomem +_kd_entropy_buffer +_kd_entropy_bufsize +_kd_entropy_buftomem +_kd_entropy_count +_kd_entropy_indx +_kd_mapcount +_kd_mapptr +_kd_mapsize +_kd_maptomem +_kd_prev_timebase +_kd_readlast +_kd_trace_lock +_kdbg_bootstrap +_kdbg_clear +_kdbg_control +_kdbg_control_chud +_kdbg_getentropy +_kdbg_getreg +_kdbg_mapinit +_kdbg_read +_kdbg_readmap +_kdbg_reinit +_kdbg_resolve_map +_kdbg_setpid +_kdbg_setpidex +_kdbg_setreg +_kdbg_setrtcdec +_kdbg_trace_data +_kdbg_trace_string +_kdebug_chudhook +_kdebug_enable +_kdebug_flags +_kdebug_nolog +_kdebug_ops +_kdebug_trace +_kdlog_beg +_kdlog_end +_kdlog_value1 +_kdlog_value2 +_kdlog_value3 +_kdlog_value4 +_kern_control_init +_kern_event_init +_kern_sysctl +_kernacc +_kernel_debug +_kernel_debug1 +_kernel_flock +_kernel_sysctl +_kernproc +_kev_attach +_kev_control +_kev_detach +_kev_post_msg +_kevent +_key_allocsa +_key_allocsp +_key_cb +_key_checkrequest +_key_checktunnelsanity +_key_debug_level +_key_dst +_key_freereg +_key_freesav +_key_freeso +_key_freesp +_key_gettunnel +_key_init +_key_ismyaddr +_key_msg2sp +_key_newsp +_key_output +_key_parse +_key_random +_key_randomfill +_key_sa_recordxfer +_key_sa_routechange +_key_sa_stir_iv +_key_sendup +_key_sendup_mbuf +_key_sp2msg +_key_spdacquire +_key_src +_key_timehandler +_key_timehandler_funnel +_key_usrreqs +_keydb_delsecashead +_keydb_delsecpolicy +_keydb_delsecreg +_keydb_delsecreplay +_keydb_freesecasvar +_keydb_newsecashead +_keydb_newsecasvar +_keydb_newsecpolicy +_keydb_newsecreg +_keydb_newsecreplay +_keydb_refsecasvar +_keydomain +_keystat +_keysw +_kill +_killpg1 +_kinfo_vdebug +_klist_init +_klogwakeup +_km_tty +_kmclose +_kmem_mb_alloc +_kmeminit +_kmemstats +_kmgetc +_kmgetc_silent +_kminit +_kmioctl +_kmopen +_kmputc +_kmread +_kmwrite +_kmzones +_knote +_knote_attach +_knote_detach +_krpc_call +_krpc_portmap +_ktrace +_ktrcsw +_ktrgenio +_ktrnamei +_ktrpsig +_ktrsyscall +_ktrsysret +_kvprintf +_lbolt +_ldisc_deregister +_ldisc_register +_lease_check +_lease_updatetime +_leavepgrp +_legal_vif_num +_lf_clearlock +_lf_findoverlap +_lf_getblock +_lf_getlock +_lf_setlock +_lf_split +_lf_wakelock +_lightning_bolt +_limcopy +_limit0 +_linesw +_link +_listen +_llinfo_nd6 +_lo_attach_inet +_lo_attach_inet6 +_lo_demux +_lo_framer +_lo_input +_lo_reg_if_mods +_lo_set_bpf_tap +_lo_shutdown +_load_ipfw +_load_machfile +_local_proto_count +_localdomain +_lockinit +_lockmgr +_lockmgr_printinfo +_lockstatus +_log_in_vain +_log_init +_log_lock +_log_open +_log_putc +_logclose +_logioctl +_logopen +_logpri +_logread +_logselect +_logsoftc +_logwakeup +_loif +_lookup +_loopattach +_lru_is_stale +_lseek +_lstat +_lstatv +_m_adj +_m_aux_add +_m_aux_delete +_m_aux_find +_m_cat +_m_clalloc +_m_cltom +_m_copy_pkthdr +_m_copyback +_m_copydata +_m_copym +_m_copym_with_hdrs +_m_devget +_m_dtom +_m_dup +_m_expand +_m_free +_m_freem +_m_freem_list +_m_get +_m_getclr +_m_gethdr +_m_getpacket +_m_getpackethdrs +_m_getpackets +_m_leadingspace +_m_mcheck +_m_mchtype +_m_mclalloc +_m_mclfree +_m_mclget +_m_mclhasreference +_m_mclref +_m_mclunref +_m_mtocl +_m_mtod +_m_prepend +_m_prepend_2 +_m_pulldown +_m_pullup +_m_reclaim +_m_retry +_m_retryhdr +_m_split +_m_trailingspace +_m_want +_mac_roman_to_unicode +_mac_roman_to_utf8 +_machdep_sysctl_list +_machine_exception +_macx_backing_store_recovery +_macx_backing_store_suspend +_macx_swapoff +_macx_swapon +_madvise +_map_fd +_map_fd_funneled +_max_datalen +_max_hdr +_max_linkhdr +_max_protohdr +_maxdmap +_maxfiles +_maxfilesperproc +_maxlockdepth +_maxproc +_maxprocperuid +_maxsmap +_maxsockets +_maxvfsconf +_maxvfsslots +_mb_map +_mbinit +_mbstat +_mbuf_slock +_mbutl +_mcl_paddr +_mcl_to_paddr +_mclfree +_mclrefcnt +_mdev +_mdevBMajor +_mdevCMajor +_mdevadd +_mdevinit +_mdevlookup +_memmove +_memname +_meta_bread +_meta_breadn +_meta_is_stale +_meta_zones +_mf6ctable +_mfree +_mfreelater +_microtime +_microuptime +_mincore +_minherit +_minphys +_mkcomplex +_mkdir +_mkfifo +_mknod +_mld6_fasttimeo +_mld6_init +_mld6_input +_mld6_start_listening +_mld6_stop_listening +_mlock +_mlockall +_mmFree +_mmGetPtr +_mmInit +_mmMalloc +_mmReturnPtr +_mmap +_mmread +_mmrw +_mmwrite +_mntid_slock +_mntvnode_slock +_modetodirtype +_modwatch +_mount +_mountlist +_mountlist_slock +_mountroot +_mountroot_post_hook +_mprotect +_mremap +_mrt6_ioctl +_mrt6stat +_mrt_ioctl +_msgbufp +_msgctl +_msgget +_msgrcv +_msgsnd +_msgsys +_msync +_multicast_register_if +_munlock +_munlockall +_munmap +_munmapfd +_mynum_flavors +_n6expire +_name_cmp +_namei +_nanotime +_nanouptime +_nbdwrite +_nblkdev +_nbuf +_nbufh +_nbufhigh +_nbuflow +_nbuftarget +_ncallout +_nchash +_nchashtbl +_nchinit +_nchrdev +_nchstats +_ncl +_nclruhead +_nd6_cache_lladdr +_nd6_dad_duplicated +_nd6_dad_start +_nd6_dad_stop +_nd6_dad_stoptimer +_nd6_debug +_nd6_defifindex +_nd6_delay +_nd6_free +_nd6_gctimer +_nd6_ifattach +_nd6_ifptomac +_nd6_init +_nd6_ioctl +_nd6_is_addr_neighbor +_nd6_lookup +_nd6_maxndopt +_nd6_maxnudhint +_nd6_mmaxtries +_nd6_na_input +_nd6_na_output +_nd6_need_cache +_nd6_ns_input +_nd6_ns_output +_nd6_nud_hint +_nd6_option +_nd6_option_init +_nd6_options +_nd6_output +_nd6_prefix_lookup +_nd6_prefix_offlink +_nd6_prefix_onlink +_nd6_prelist_add +_nd6_prune +_nd6_purge +_nd6_ra_input +_nd6_recalc_reachtm_interval +_nd6_rs_input +_nd6_rtrequest +_nd6_setdefaultiface +_nd6_setmtu +_nd6_storelladdr +_nd6_timer +_nd6_timer_funneled +_nd6_umaxtries +_nd6_useloopback +_nd_defrouter +_nd_ifinfo +_nd_prefix +_ndflush +_ndqb +_ndrv_abort +_ndrv_attach +_ndrv_bind +_ndrv_connect +_ndrv_control +_ndrv_ctlinput +_ndrv_ctloutput +_ndrv_delspec +_ndrv_detach +_ndrv_disconnect +_ndrv_do_detach +_ndrv_do_disconnect +_ndrv_dominit +_ndrv_drain +_ndrv_find_tag +_ndrv_flushq +_ndrv_get_ifp +_ndrv_handle_ifp_detach +_ndrv_init +_ndrv_input +_ndrv_output +_ndrv_peeraddr +_ndrv_read_event +_ndrv_recvspace +_ndrv_send +_ndrv_sendspace +_ndrv_sense +_ndrv_setspec +_ndrv_shutdown +_ndrv_sockaddr +_ndrv_sysctl +_ndrv_to_dlil_demux +_ndrv_usrreqs +_ndrvdomain +_ndrvl +_ndrvsw +_net_add_domain +_net_add_proto +_net_del_domain +_net_del_proto +_net_sysctl +_netaddr_match +_netboot_iaddr +_netboot_mountroot +_netboot_root +_netboot_rootpath +_netboot_setup +_netisr +_network_flock +_new_sysctl +_new_system_shared_regions +_newsysctl_list +_nextc +_nextgennumber +_nextsect +_nextseg +_nextsegfromheader +_nextvnodeid +_nf_list +_nfiles +_nfs_adv +_nfs_async +_nfs_asyncio +_nfs_bioread +_nfs_boot_getfh +_nfs_boot_init +_nfs_buf_get +_nfs_buf_incore +_nfs_buf_iodone +_nfs_buf_iowait +_nfs_buf_page_inval +_nfs_buf_release +_nfs_buf_remfree +_nfs_buf_upl_check +_nfs_buf_upl_setup +_nfs_buf_write +_nfs_buf_write_delayed +_nfs_bufq +_nfs_clearcommit +_nfs_cltpsock +_nfs_commit +_nfs_connect +_nfs_defect +_nfs_disconnect +_nfs_doio +_nfs_dolock +_nfs_false +_nfs_flushcommits +_nfs_fsinfo +_nfs_getattrcache +_nfs_getauth +_nfs_getcookie +_nfs_getnickauth +_nfs_getreq +_nfs_hash +_nfs_inactive +_nfs_init +_nfs_invaldir +_nfs_ioddelwri +_nfs_iodmount +_nfs_iodwant +_nfs_islocked +_nfs_ispublicfh +_nfs_loadattrcache +_nfs_lock +_nfs_mount_type +_nfs_mountroot +_nfs_namei +_nfs_nbdwrite +_nfs_nbinit +_nfs_nget +_nfs_nhinit +_nfs_node_hash_lock +_nfs_numasync +_nfs_prog +_nfs_readdirplusrpc +_nfs_readdirrpc +_nfs_readlinkrpc +_nfs_readrpc +_nfs_reclaim +_nfs_removeit +_nfs_rephead +_nfs_reply +_nfs_reqq +_nfs_request +_nfs_savenickauth +_nfs_send +_nfs_sigintr +_nfs_slplock +_nfs_slpunlock +_nfs_sndlock +_nfs_sndunlock +_nfs_ticks +_nfs_timer +_nfs_timer_funnel +_nfs_true +_nfs_udpsock +_nfs_unlock +_nfs_vfsops +_nfs_vinvalbuf +_nfs_write +_nfs_writerpc +_nfs_xdrneg1 +_nfs_xidwrap +_nfsadvlock_longest +_nfsadvlocks +_nfsadvlocks_time +_nfsbufcnt +_nfsbufdelwri +_nfsbufdelwricnt +_nfsbuffree +_nfsbuffreecnt +_nfsbufhash +_nfsbufhashlock +_nfsbufhashtbl +_nfsbufmax +_nfsbufmin +_nfsclnt +_nfsd_head +_nfsd_head_flag +_nfsd_waiting +_nfslockdans +_nfslockdfd +_nfslockdfp +_nfslockdwait +_nfslockdwaiting +_nfsm_adj +_nfsm_disct +_nfsm_mbuftouio +_nfsm_reqh +_nfsm_rpchead +_nfsm_srvfattr +_nfsm_srvpostopattr +_nfsm_srvwcc +_nfsm_strtmbuf +_nfsm_uiotombuf +_nfsneedbuffer +_nfsnodehash +_nfsnodehashtbl +_nfsrtt +_nfsrtton +_nfsrv3_access +_nfsrv3_procs +_nfsrv_cleancache +_nfsrv_commit +_nfsrv_create +_nfsrv_dorec +_nfsrv_errmap +_nfsrv_fhtovp +_nfsrv_fsinfo +_nfsrv_getattr +_nfsrv_getcache +_nfsrv_init +_nfsrv_initcache +_nfsrv_link +_nfsrv_lookup +_nfsrv_mkdir +_nfsrv_mknod +_nfsrv_noop +_nfsrv_null +_nfsrv_object_create +_nfsrv_pathconf +_nfsrv_rcv +_nfsrv_read +_nfsrv_readdir +_nfsrv_readdirplus +_nfsrv_readlink +_nfsrv_remove +_nfsrv_rename +_nfsrv_rmdir +_nfsrv_setattr +_nfsrv_setcred +_nfsrv_slpderef +_nfsrv_statfs +_nfsrv_symlink +_nfsrv_updatecache +_nfsrv_wakenfsd +_nfsrv_write +_nfsrv_writegather +_nfsrvhash +_nfsrvhashtbl +_nfsrvlruhead +_nfsrvw_procrastinate +_nfsrvw_procrastinate_v3 +_nfsrvw_sort +_nfsstats +_nfssvc +_nfssvc_sockhead +_nfssvc_sockhead_flag +_nfsv2_procid +_nfsv2_type +_nfsv2_vnodeop_opv_desc +_nfsv2_vnodeop_p +_nfsv3_procid +_nfsv3_type +_ngif +_niobuf +_nkdbufs +_nke_insert +_nlinesw +_nmbclusters +_nobdev +_nocdev +_nop_abortop +_nop_access +_nop_advlock +_nop_allocate +_nop_blkatoff +_nop_blktooff +_nop_bmap +_nop_bwrite +_nop_close +_nop_cmap +_nop_copyfile +_nop_create +_nop_devblocksize +_nop_exchange +_nop_fsync +_nop_getattr +_nop_getattrlist +_nop_inactive +_nop_ioctl +_nop_islocked +_nop_lease +_nop_link +_nop_lock +_nop_mkcomplex +_nop_mkdir +_nop_mknod +_nop_mmap +_nop_offtoblk +_nop_open +_nop_pagein +_nop_pageout +_nop_pathconf +_nop_pgrd +_nop_pgwr +_nop_print +_nop_read +_nop_readdir +_nop_readdirattr +_nop_readlink +_nop_reallocblks +_nop_reclaim +_nop_remove +_nop_rename +_nop_revoke +_nop_rmdir +_nop_searchfs +_nop_seek +_nop_select +_nop_setattr +_nop_setattrlist +_nop_strategy +_nop_symlink +_nop_truncate +_nop_unlock +_nop_update +_nop_valloc +_nop_vfree +_nop_whiteout +_nop_write +_nosys +_notify_filemod_watchers +_npcbufs +_nport +_nprocs +_nqfhhash +_nqfhhashtbl +_nqnfs_callback +_nqnfs_clientd +_nqnfs_clientlease +_nqnfs_getlease +_nqnfs_lease_check +_nqnfs_piggy +_nqnfs_prog +_nqnfs_serverd +_nqnfsrv_getlease +_nqnfsrv_vacated +_nqnfsstarttime +_nqsrv_clockskew +_nqsrv_getlease +_nqsrv_maxlease +_nqsrv_writeslack +_nqtimerhead +_nselcoll +_nswap +_nswapmap +_nswdev +_nsysent +_nulldev +_nullop +_nullsys +_numcache +_numdquot +_numnfsrvcache +_numused_vfsslots +_numvnodes +_nv3tov_type +_oaccept +_obreak +_ocreat +_ofstat +_oftruncate +_ogetdirentries +_ogetdomainname +_ogetdtablesize +_ogethostid +_ogethostname +_ogetpagesize +_ogetpeername +_ogetrlimit +_ogetsockname +_okillpg +_old_if_attach +_olseek +_olstat +_open +_orecv +_orecvfrom +_orecvmsg +_osend +_osendmsg +_osetdomainname +_osethostid +_osethostname +_osetregid +_osetreuid +_osetrlimit +_osigblock +_osigsetmask +_osigstack +_osigvec +_osmmap +_ostat +_otruncate +_ovadvise +_ovbcopy +_owait +_owait3 +_packattrblk +_packcommonattr +_packdirattr +_packfileattr +_packvolattr +_parse_bsd_args +_pathconf +_pc_buffer +_pc_buflast +_pc_bufptr +_pc_bufsize +_pc_buftomem +_pc_sample_pid +_pc_trace_frameworks +_pcb_synch +_pcsample_beg +_pcsample_comm +_pcsample_enable +_pcsample_end +_pcsample_flags +_pcsamples_bootstrap +_pcsamples_clear +_pcsamples_control +_pcsamples_ops +_pcsamples_read +_pcsamples_reinit +_pfctlinput +_pfctlinput2 +_pffasttimo +_pffinddomain +_pffindproto +_pffindtype +_pfind +_pfkeystat +_pfslowtimo +_pfxlist_onlink_check +_pgdelete +_pgfind +_pgrp0 +_pgrphash +_pgrphashtbl +_pgsignal +_physio +_pid_for_task +_pidhash +_pidhashtbl +_pim6_input +_pipe +_pmtu_expire +_pmtu_probe +_postevent +_postsig +_pread +_prelist_remove +_prelist_update +_prepare_profile_database +_prf +_print_vmpage_stat +_priority_IO_timestamp_for_root +_prngAllowReseed +_prngDestroy +_prngForceReseed +_prngInitialize +_prngInput +_prngOutput +_prngProcessSeedBuffer +_prngStretch +_proc0 +_proc_exit +_proc_is_classic +_proc_name +_proc_prepareexit +_proc_reparent +_procdup +_process_terminate_self +_procinit +_profil +_prtactive +_pru_abort_notsupp +_pru_accept_notsupp +_pru_attach_notsupp +_pru_bind_notsupp +_pru_connect2_notsupp +_pru_connect_notsupp +_pru_control_notsupp +_pru_detach_notsupp +_pru_disconnect_notsupp +_pru_listen_notsupp +_pru_peeraddr_notsupp +_pru_rcvd_notsupp +_pru_rcvoob_notsupp +_pru_send_notsupp +_pru_sense_null +_pru_shutdown_notsupp +_pru_sockaddr_notsupp +_pru_sopoll_notsupp +_pru_soreceive +_pru_soreceive_notsupp +_pru_sosend +_pru_sosend_notsupp +_pseudo_inits +_psignal +_psignal_lock +_psignal_sigprof +_psignal_uthread +_psignal_vfork +_psignal_vtalarm +_psignal_xcpu +_pstats0 +_pt_setrunnable +_pthread_sigmask +_ptrace +_pty_init +_putc +_pwrite +_q_to_b +_qsync +_quotactl +_quotaoff +_quotaon +_quotastat +_random +_random_close +_random_init +_random_ioctl +_random_open +_random_read +_random_write +_raw_attach +_raw_ctlinput +_raw_detach +_raw_disconnect +_raw_init +_raw_input +_raw_usrreqs +_rawcb_list +_rawread +_rawwrite +_rc4_crypt +_rc4_init +_read +_read_random +_readlink +_readv +_realitexpire +_reassignbuf +_reboot +_receive_packet +_recvfrom +_recvmsg +_register_sockfilter +_relookup +_remove_name +_rename +_resetpriority +_resize_namecache +_revoke +_rijndaelDecrypt +_rijndaelEncrypt +_rijndaelKeyEncToDec +_rijndaelKeySched +_rijndael_blockDecrypt +_rijndael_blockEncrypt +_rijndael_cipherInit +_rijndael_makeKey +_rijndael_padDecrypt +_rijndael_padEncrypt +_rip6_ctlinput +_rip6_ctloutput +_rip6_input +_rip6_output +_rip6_recvspace +_rip6_sendspace +_rip6_usrreqs +_rip6stat +_rip_ctlinput +_rip_ctloutput +_rip_init +_rip_input +_rip_output +_rip_recvspace +_rip_sendspace +_rip_usrreqs +_ripcb +_ripcbinfo +_rl_add +_rl_init +_rl_remove +_rl_scan +_rmdir +_rn_addmask +_rn_addroute +_rn_delete +_rn_init +_rn_inithead +_rn_lookup +_rn_match +_rn_refines +_rootdev +_rootdevice +_rootfs +_rootvnode +_rootvp +_route6_input +_route_cb +_route_init +_routedomain +_rpc_auth_kerb +_rpc_auth_unix +_rpc_autherr +_rpc_call +_rpc_mismatch +_rpc_msgaccepted +_rpc_msgdenied +_rpc_reply +_rpc_vers +_rr_prefix +_rsvp_input +_rsvp_on +_rt6_flush +_rt_ifmsg +_rt_missmsg +_rt_newaddrmsg +_rt_newmaddrmsg +_rt_setgate +_rt_tables +_rtalloc +_rtalloc1 +_rtalloc_ign +_rtfree +_rtinit +_rtioctl +_rtredirect +_rtref +_rtrequest +_rtsetifa +_rtunref +_ruadd +_run_netisr +_rwuio +_sa6_any +_safedounmount +_savacctp +_sb_lock +_sb_max +_sb_notify +_sballoc +_sbappend +_sbappendaddr +_sbappendcontrol +_sbappendrecord +_sbcompress +_sbcreatecontrol +_sbdrop +_sbdroprecord +_sbflush +_sbfree +_sbinsertoob +_sblock +_sbrelease +_sbreserve +_sbrk +_sbspace +_sbtoxsockbuf +_sbunlock +_sbwait +_scanc +_scope6_addr2default +_scope6_get +_scope6_get_default +_scope6_ids +_scope6_ifattach +_scope6_set +_scope6_setdefault +_searchfs +_securelevel +_selcontinue +_select +_selprocess +_selrecord +_selthreadclear +_seltrue +_selwait +_selwakeup +_sem +_sem_close +_sem_destroy +_sem_getvalue +_sem_init +_sem_open +_sem_post +_sem_trywait +_sem_unlink +_sem_wait +_sema +_semconfig +_semctl +_semexit +_semget +_seminfo +_seminit +_semop +_semsys +_semu +_sendmsg +_sendsig +_sendto +_session0 +_sessrele +_set_blocksize +_set_bsduthreadargs +_set_cast128_subkey +_set_fsblocksize +_set_procsigmask +_set_security_token +_setattrlist +_setconf +_setegid +_seteuid +_setgid +_setgroups +_setitimer +_setlogin +_setpgid +_setpriority +_setprivexec +_setquota +_setrlimit +_setsid +_setsigvec +_setsockopt +_setthetime +_settimeofday +_setuid +_setuse +_sfilter_init +_sfilter_term +_sha1_init +_sha1_loop +_sha1_pad +_sha1_result +_shadow_map_create +_shadow_map_free +_shadow_map_read +_shadow_map_shadow_size +_shadow_map_write +_shm_open +_shm_unlink +_shmat +_shmctl +_shmdt +_shmexit +_shmfork +_shmget +_shminfo +_shminit +_shmsegs +_shmsys +_shutdown +_sig_filtops +_sig_lock_to_exit +_sig_try_locked +_sigaction +_sigacts0 +_sigaltstack +_sigcontinue +_sigexit_locked +_siginit +_signal_lock +_signal_setast +_signal_unlock +_sigpending +_sigprocmask +_sigprop +_sigreturn +_sigsuspend +_sigwait +_skpc +_sleep +_snprintf +_so_cache_hw +_so_cache_init_done +_so_cache_max_freed +_so_cache_time +_so_cache_timeouts +_so_cache_timer +_so_cache_zone +_so_gencnt +_soabort +_soaccept +_soalloc +_sobind +_socantrcvmore +_socantsendmore +_sockargs +_socket +_socket_cache_head +_socket_cache_tail +_socket_debug +_socket_zone +_socketinit +_socketops +_socketpair +_soclose +_soconnect +_soconnect2 +_socreate +_sodealloc +_sodelayed_copy +_sodisconnect +_sodropablereq +_sofree +_sogetopt +_sohasoutofband +_soisconnected +_soisconnecting +_soisdisconnected +_soisdisconnecting +_solisten +_sonewconn +_soo_close +_soo_ioctl +_soo_kqfilter +_soo_read +_soo_select +_soo_stat +_soo_write +_soopt_getm +_soopt_mcopyin +_soopt_mcopyout +_sooptcopyin +_sooptcopyout +_sopoll +_soreadable +_soreceive +_soreserve +_sorflush +_sorwakeup +_sosend +_sosendallatonce +_sosetopt +_soshutdown +_sotoxsocket +_sowakeup +_sowriteable +_sowwakeup +_spec_badop +_spec_blktooff +_spec_bmap +_spec_close +_spec_cmap +_spec_devblocksize +_spec_ebadf +_spec_fsync +_spec_ioctl +_spec_lookup +_spec_nfsv2nodeop_opv_desc +_spec_nfsv2nodeop_p +_spec_offtoblk +_spec_open +_spec_pathconf +_spec_print +_spec_read +_spec_select +_spec_strategy +_spec_vnodeop_entries +_spec_vnodeop_opv_desc +_spec_vnodeop_p +_spec_write +_spechash_slock +_speclisth +_spl0 +_splbio +_splclock +_splhigh +_splimp +_spllo +_spln +_splnet +_sploff +_splon +_splpower +_splsched +_splsoftclock +_spltty +_splvm +_splx +_srv +_ss_fltsz +_ss_fltsz_local +_sstk +_startprofclock +_stat +_statfs +_statv +_stf_attach_inet6 +_stf_detach_inet6 +_stf_ioctl +_stf_pre_output +_stf_reg_if_mods +_stf_shutdown +_stfattach +_stop +_stopprofclock +_subyte +_suibyte +_suiword +_suser +_suword +_swapmap +_swapon +_swdevt +_symlink +_sync +_synthfs_access +_synthfs_adddirentry +_synthfs_cached_lookup +_synthfs_chflags +_synthfs_chmod +_synthfs_chown +_synthfs_create +_synthfs_fhtovp +_synthfs_getattr +_synthfs_inactive +_synthfs_init +_synthfs_islocked +_synthfs_lock +_synthfs_lookup +_synthfs_mkdir +_synthfs_mmap +_synthfs_mount +_synthfs_mount_fs +_synthfs_move_rename_entry +_synthfs_new_directory +_synthfs_new_symlink +_synthfs_open +_synthfs_pathconf +_synthfs_quotactl +_synthfs_readdir +_synthfs_readlink +_synthfs_reclaim +_synthfs_remove +_synthfs_remove_directory +_synthfs_remove_entry +_synthfs_remove_symlink +_synthfs_rename +_synthfs_rmdir +_synthfs_root +_synthfs_select +_synthfs_setattr +_synthfs_setupuio +_synthfs_start +_synthfs_statfs +_synthfs_symlink +_synthfs_sync +_synthfs_sysctl +_synthfs_unlock +_synthfs_unmount +_synthfs_update +_synthfs_vfsops +_synthfs_vget +_synthfs_vnodeop_entries +_synthfs_vnodeop_opv_desc +_synthfs_vnodeop_p +_synthfs_vptofh +_syscallnames +_sysctl__children +_sysctl__debug +_sysctl__debug_bpf_bufsize +_sysctl__debug_bpf_maxbufsize +_sysctl__debug_children +_sysctl__hw +_sysctl__hw_activecpu +_sysctl__hw_busfrequency +_sysctl__hw_busfrequency_compat +_sysctl__hw_busfrequency_max +_sysctl__hw_busfrequency_min +_sysctl__hw_byteorder +_sysctl__hw_cachelinesize +_sysctl__hw_cachelinesize_compat +_sysctl__hw_children +_sysctl__hw_cpufrequency +_sysctl__hw_cpufrequency_compat +_sysctl__hw_cpufrequency_max +_sysctl__hw_cpufrequency_min +_sysctl__hw_cpusubtype +_sysctl__hw_cputype +_sysctl__hw_epoch +_sysctl__hw_l1dcachesize +_sysctl__hw_l1dcachesize_compat +_sysctl__hw_l1icachesize +_sysctl__hw_l1icachesize_compat +_sysctl__hw_l2cachesize +_sysctl__hw_l2cachesize_compat +_sysctl__hw_l2settings +_sysctl__hw_l3cachesize +_sysctl__hw_l3cachesize_compat +_sysctl__hw_l3settings +_sysctl__hw_machine +_sysctl__hw_memsize +_sysctl__hw_model +_sysctl__hw_ncpu +_sysctl__hw_optional +_sysctl__hw_optional_children +_sysctl__hw_optional_floatingpoint +_sysctl__hw_pagesize +_sysctl__hw_pagesize_compat +_sysctl__hw_physmem +_sysctl__hw_tbfrequency +_sysctl__hw_tbfrequency_compat +_sysctl__hw_usermem +_sysctl__hw_vectorunit +_sysctl__kern +_sysctl__kern_children +_sysctl__kern_dummy +_sysctl__kern_ipc +_sysctl__kern_ipc_children +_sysctl__kern_ipc_maxsockbuf +_sysctl__kern_ipc_maxsockets +_sysctl__kern_ipc_nmbclusters +_sysctl__kern_ipc_sockbuf_waste_factor +_sysctl__kern_ipc_somaxconn +_sysctl__kern_ipc_sorecvmincopy +_sysctl__kern_ipc_sosendminchain +_sysctl__kern_maxfilesperproc +_sysctl__kern_maxprocperuid +_sysctl__kern_sysv +_sysctl__kern_sysv_children +_sysctl__kern_sysv_shmall +_sysctl__kern_sysv_shmmax +_sysctl__kern_sysv_shmmin +_sysctl__kern_sysv_shmmni +_sysctl__kern_sysv_shmseg +_sysctl__machdep +_sysctl__machdep_children +_sysctl__net +_sysctl__net_children +_sysctl__net_inet +_sysctl__net_inet6 +_sysctl__net_inet6_children +_sysctl__net_inet6_icmp6 +_sysctl__net_inet6_icmp6_children +_sysctl__net_inet6_icmp6_errppslimit +_sysctl__net_inet6_icmp6_nd6_debug +_sysctl__net_inet6_icmp6_nd6_delay +_sysctl__net_inet6_icmp6_nd6_maxnudhint +_sysctl__net_inet6_icmp6_nd6_mmaxtries +_sysctl__net_inet6_icmp6_nd6_prune +_sysctl__net_inet6_icmp6_nd6_umaxtries +_sysctl__net_inet6_icmp6_nd6_useloopback +_sysctl__net_inet6_icmp6_nodeinfo +_sysctl__net_inet6_icmp6_rediraccept +_sysctl__net_inet6_icmp6_redirtimeout +_sysctl__net_inet6_icmp6_stats +_sysctl__net_inet6_ip6 +_sysctl__net_inet6_ip6_accept_rtadv +_sysctl__net_inet6_ip6_auto_flowlabel +_sysctl__net_inet6_ip6_auto_linklocal +_sysctl__net_inet6_ip6_children +_sysctl__net_inet6_ip6_dad_count +_sysctl__net_inet6_ip6_defmcasthlim +_sysctl__net_inet6_ip6_forwarding +_sysctl__net_inet6_ip6_gifhlim +_sysctl__net_inet6_ip6_hdrnestlimit +_sysctl__net_inet6_ip6_hlim +_sysctl__net_inet6_ip6_kame_version +_sysctl__net_inet6_ip6_keepfaith +_sysctl__net_inet6_ip6_log_interval +_sysctl__net_inet6_ip6_maxfragpackets +_sysctl__net_inet6_ip6_redirect +_sysctl__net_inet6_ip6_rip6stats +_sysctl__net_inet6_ip6_rr_prune +_sysctl__net_inet6_ip6_rtexpire +_sysctl__net_inet6_ip6_rtmaxcache +_sysctl__net_inet6_ip6_rtminexpire +_sysctl__net_inet6_ip6_stats +_sysctl__net_inet6_ip6_temppltime +_sysctl__net_inet6_ip6_tempvltime +_sysctl__net_inet6_ip6_use_deprecated +_sysctl__net_inet6_ip6_use_tempaddr +_sysctl__net_inet6_ip6_v6only +_sysctl__net_inet6_ipsec6 +_sysctl__net_inet6_ipsec6_ah_net_deflev +_sysctl__net_inet6_ipsec6_ah_trans_deflev +_sysctl__net_inet6_ipsec6_children +_sysctl__net_inet6_ipsec6_debug +_sysctl__net_inet6_ipsec6_def_policy +_sysctl__net_inet6_ipsec6_ecn +_sysctl__net_inet6_ipsec6_esp_net_deflev +_sysctl__net_inet6_ipsec6_esp_randpad +_sysctl__net_inet6_ipsec6_esp_trans_deflev +_sysctl__net_inet6_ipsec6_stats +_sysctl__net_inet6_tcp6 +_sysctl__net_inet6_tcp6_children +_sysctl__net_inet6_udp6 +_sysctl__net_inet6_udp6_children +_sysctl__net_inet_children +_sysctl__net_inet_div +_sysctl__net_inet_div_children +_sysctl__net_inet_icmp +_sysctl__net_inet_icmp_bmcastecho +_sysctl__net_inet_icmp_children +_sysctl__net_inet_icmp_drop_redirect +_sysctl__net_inet_icmp_icmplim +_sysctl__net_inet_icmp_log_redirect +_sysctl__net_inet_icmp_maskrepl +_sysctl__net_inet_icmp_stats +_sysctl__net_inet_igmp +_sysctl__net_inet_igmp_children +_sysctl__net_inet_igmp_stats +_sysctl__net_inet_ip +_sysctl__net_inet_ip_accept_sourceroute +_sysctl__net_inet_ip_check_interface +_sysctl__net_inet_ip_check_route_selfref +_sysctl__net_inet_ip_children +_sysctl__net_inet_ip_fastforwarding +_sysctl__net_inet_ip_forwarding +_sysctl__net_inet_ip_gifttl +_sysctl__net_inet_ip_intr_queue_drops +_sysctl__net_inet_ip_intr_queue_maxlen +_sysctl__net_inet_ip_keepfaith +_sysctl__net_inet_ip_linklocal +_sysctl__net_inet_ip_linklocal_children +_sysctl__net_inet_ip_linklocal_in +_sysctl__net_inet_ip_linklocal_in_allowbadttl +_sysctl__net_inet_ip_linklocal_in_children +_sysctl__net_inet_ip_linklocal_stat +_sysctl__net_inet_ip_maxfragpackets +_sysctl__net_inet_ip_portrange +_sysctl__net_inet_ip_portrange_children +_sysctl__net_inet_ip_portrange_first +_sysctl__net_inet_ip_portrange_hifirst +_sysctl__net_inet_ip_portrange_hilast +_sysctl__net_inet_ip_portrange_last +_sysctl__net_inet_ip_portrange_lowfirst +_sysctl__net_inet_ip_portrange_lowlast +_sysctl__net_inet_ip_redirect +_sysctl__net_inet_ip_rtexpire +_sysctl__net_inet_ip_rtmaxcache +_sysctl__net_inet_ip_rtminexpire +_sysctl__net_inet_ip_sourceroute +_sysctl__net_inet_ip_stats +_sysctl__net_inet_ip_subnets_are_local +_sysctl__net_inet_ip_ttl +_sysctl__net_inet_ip_use_route_genid +_sysctl__net_inet_ipsec +_sysctl__net_inet_ipsec_ah_cleartos +_sysctl__net_inet_ipsec_ah_net_deflev +_sysctl__net_inet_ipsec_ah_offsetmask +_sysctl__net_inet_ipsec_ah_trans_deflev +_sysctl__net_inet_ipsec_bypass +_sysctl__net_inet_ipsec_children +_sysctl__net_inet_ipsec_debug +_sysctl__net_inet_ipsec_def_policy +_sysctl__net_inet_ipsec_dfbit +_sysctl__net_inet_ipsec_ecn +_sysctl__net_inet_ipsec_esp_net_deflev +_sysctl__net_inet_ipsec_esp_port +_sysctl__net_inet_ipsec_esp_randpad +_sysctl__net_inet_ipsec_esp_trans_deflev +_sysctl__net_inet_ipsec_stats +_sysctl__net_inet_raw +_sysctl__net_inet_raw_children +_sysctl__net_inet_raw_maxdgram +_sysctl__net_inet_raw_pcblist +_sysctl__net_inet_raw_recvspace +_sysctl__net_inet_tcp +_sysctl__net_inet_tcp_always_keepalive +_sysctl__net_inet_tcp_blackhole +_sysctl__net_inet_tcp_children +_sysctl__net_inet_tcp_delacktime +_sysctl__net_inet_tcp_delayed_ack +_sysctl__net_inet_tcp_do_tcpdrain +_sysctl__net_inet_tcp_drop_synfin +_sysctl__net_inet_tcp_icmp_may_rst +_sysctl__net_inet_tcp_isn_reseed_interval +_sysctl__net_inet_tcp_keepidle +_sysctl__net_inet_tcp_keepinit +_sysctl__net_inet_tcp_keepintvl +_sysctl__net_inet_tcp_local_slowstart_flightsize +_sysctl__net_inet_tcp_log_in_vain +_sysctl__net_inet_tcp_msl +_sysctl__net_inet_tcp_mssdflt +_sysctl__net_inet_tcp_newreno +_sysctl__net_inet_tcp_path_mtu_discovery +_sysctl__net_inet_tcp_pcbcount +_sysctl__net_inet_tcp_pcblist +_sysctl__net_inet_tcp_recvspace +_sysctl__net_inet_tcp_rfc1323 +_sysctl__net_inet_tcp_rfc1644 +_sysctl__net_inet_tcp_sendspace +_sysctl__net_inet_tcp_slowlink_wsize +_sysctl__net_inet_tcp_slowstart_flightsize +_sysctl__net_inet_tcp_sockthreshold +_sysctl__net_inet_tcp_stats +_sysctl__net_inet_tcp_strict_rfc1948 +_sysctl__net_inet_tcp_tcbhashsize +_sysctl__net_inet_tcp_tcp_lq_overflow +_sysctl__net_inet_tcp_v6mssdflt +_sysctl__net_inet_udp +_sysctl__net_inet_udp_blackhole +_sysctl__net_inet_udp_checksum +_sysctl__net_inet_udp_children +_sysctl__net_inet_udp_log_in_vain +_sysctl__net_inet_udp_maxdgram +_sysctl__net_inet_udp_pcblist +_sysctl__net_inet_udp_recvspace +_sysctl__net_inet_udp_stats +_sysctl__net_key +_sysctl__net_key_ah_keymin +_sysctl__net_key_blockacq_count +_sysctl__net_key_blockacq_lifetime +_sysctl__net_key_children +_sysctl__net_key_debug +_sysctl__net_key_esp_auth +_sysctl__net_key_esp_keymin +_sysctl__net_key_int_random +_sysctl__net_key_larval_lifetime +_sysctl__net_key_natt_keepalive_interval +_sysctl__net_key_prefered_oldsa +_sysctl__net_key_spi_maxval +_sysctl__net_key_spi_minval +_sysctl__net_key_spi_trycnt +_sysctl__net_link +_sysctl__net_link_children +_sysctl__net_link_ether +_sysctl__net_link_ether_children +_sysctl__net_link_ether_inet +_sysctl__net_link_ether_inet_apple_hwcksum_rx +_sysctl__net_link_ether_inet_apple_hwcksum_tx +_sysctl__net_link_ether_inet_children +_sysctl__net_link_ether_inet_host_down_time +_sysctl__net_link_ether_inet_log_arp_wrong_iface +_sysctl__net_link_ether_inet_max_age +_sysctl__net_link_ether_inet_maxtries +_sysctl__net_link_ether_inet_proxyall +_sysctl__net_link_ether_inet_prune_intvl +_sysctl__net_link_ether_inet_useloopback +_sysctl__net_link_generic +_sysctl__net_link_generic_children +_sysctl__net_local +_sysctl__net_local_children +_sysctl__net_local_dgram +_sysctl__net_local_dgram_children +_sysctl__net_local_dgram_maxdgram +_sysctl__net_local_dgram_pcblist +_sysctl__net_local_dgram_recvspace +_sysctl__net_local_inflight +_sysctl__net_local_stream +_sysctl__net_local_stream_children +_sysctl__net_local_stream_pcblist +_sysctl__net_local_stream_recvspace +_sysctl__net_local_stream_sendspace +_sysctl__net_routetable +_sysctl__net_routetable_children +_sysctl__sysctl +_sysctl__sysctl_children +_sysctl__sysctl_debug +_sysctl__sysctl_name +_sysctl__sysctl_name2oid +_sysctl__sysctl_name_children +_sysctl__sysctl_next +_sysctl__sysctl_next_children +_sysctl__sysctl_oidfmt +_sysctl__sysctl_oidfmt_children +_sysctl__user +_sysctl__user_children +_sysctl__vfs +_sysctl__vfs_children +_sysctl__vfs_generic +_sysctl__vfs_generic_children +_sysctl__vfs_generic_ctlbyfsid +_sysctl__vfs_generic_ctlbyfsid_children +_sysctl__vfs_generic_nfs +_sysctl__vfs_generic_nfs_children +_sysctl__vfs_generic_nfs_client +_sysctl__vfs_generic_nfs_client_children +_sysctl__vfs_generic_nfs_client_initialdowndelay +_sysctl__vfs_generic_nfs_client_nextdowndelay +_sysctl__vfs_generic_vfsidlist +_sysctl__vm +_sysctl__vm_children +_sysctl_clockrate +_sysctl_doproc +_sysctl_file +_sysctl_handle_int +_sysctl_handle_int2quad +_sysctl_handle_long +_sysctl_handle_opaque +_sysctl_handle_quad +_sysctl_handle_string +_sysctl_int +_sysctl_mib_init +_sysctl_procargs +_sysctl_quad +_sysctl_rdint +_sysctl_rdquad +_sysctl_rdstring +_sysctl_rdstruct +_sysctl_register_all +_sysctl_register_fixed +_sysctl_register_oid +_sysctl_register_set +_sysctl_set +_sysctl_string +_sysctl_struct +_sysctl_unregister_oid +_sysctl_unregister_set +_sysctl_vnode +_sysctlbyname +_sysent +_systemdomain +_systemdomain_init +_tablefull +_task_for_pid +_tbeproc +_tcb +_tcbinfo +_tcp6_ctlinput +_tcp6_input +_tcp6_usrreqs +_tcp_backoff +_tcp_canceltimers +_tcp_ccgen +_tcp_close +_tcp_ctlinput +_tcp_ctloutput +_tcp_delack_enabled +_tcp_delacktime +_tcp_do_newreno +_tcp_drain +_tcp_drop +_tcp_drop_syn_sent +_tcp_fasttimo +_tcp_fillheaders +_tcp_freeq +_tcp_gettaocache +_tcp_init +_tcp_input +_tcp_keepidle +_tcp_keepinit +_tcp_keepintvl +_tcp_lq_overflow +_tcp_maketemplate +_tcp_maxidle +_tcp_maxpersistidle +_tcp_msl +_tcp_mss +_tcp_mssdflt +_tcp_mssopt +_tcp_mtudisc +_tcp_new_isn +_tcp_newtcpcb +_tcp_now +_tcp_output +_tcp_quench +_tcp_recvspace +_tcp_respond +_tcp_rtlookup +_tcp_rtlookup6 +_tcp_sendspace +_tcp_setpersist +_tcp_slowtimo +_tcp_syn_backoff +_tcp_timers +_tcp_usrreqs +_tcp_v6mssdflt +_tcpstat +_temp_msgbuf +_termioschars +_thread_flavor_array +_thread_funnel_get +_thread_funnel_merge +_thread_funnel_set +_thread_funnel_switch +_threadsignal +_tick +_time +_time_wait_slots +_time_zone_slock_init +_timeout +_timevaladd +_timevalfix +_timevalsub +_tk_cancc +_tk_nin +_tk_nout +_tk_rawcc +_to_bsd_time +_to_hfs_time +_tprintf +_tprintf_close +_tprintf_open +_tputchar +_trashMemory +_truncate +_tsleep +_tsleep0 +_tsleep1 +_ttioctl +_ttread +_ttrstrt +_ttselect +_ttsetwater +_ttspeedtab +_ttstart +_ttwakeup +_ttwrite +_ttwwakeup +_tty_pgsignal +_ttyblock +_ttychars +_ttycheckoutq +_ttyclose +_ttyflush +_ttyfree +_ttyinfo +_ttyinput +_ttylclose +_ttymalloc +_ttymodem +_ttyopen +_ttyprintf +_ttyselect +_ttysleep +_ttywait +_tvtoabstime +_tvtohz +_tz +_tz_slock +_uap +_ubc_blktooff +_ubc_clean +_ubc_clearflags +_ubc_create_upl +_ubc_getcred +_ubc_getobject +_ubc_getsize +_ubc_hold +_ubc_info_deallocate +_ubc_info_init +_ubc_info_zone +_ubc_invalidate +_ubc_isinuse +_ubc_issetflags +_ubc_offtoblk +_ubc_page_op +_ubc_pushdirty +_ubc_pushdirty_range +_ubc_range_op +_ubc_rele +_ubc_release +_ubc_release_named +_ubc_setcred +_ubc_setflags +_ubc_setpager +_ubc_setsize +_ubc_uncache +_ubc_upl_abort +_ubc_upl_abort_range +_ubc_upl_commit +_ubc_upl_commit_range +_ubc_upl_map +_ubc_upl_pageinfo +_ubc_upl_unmap +_ucsfncmp +_ucsfntrans +_udb +_udbinfo +_udp6_ctlinput +_udp6_input +_udp6_output +_udp6_recvspace +_udp6_sendspace +_udp6_usrreqs +_udp_ctlinput +_udp_in6 +_udp_init +_udp_input +_udp_ip6 +_udp_notify +_udp_recvspace +_udp_sendspace +_udp_shutdown +_udp_usrreqs +_udpstat +_ufs_access +_ufs_advlock +_ufs_bmap +_ufs_bmaparray +_ufs_check_export +_ufs_checkpath +_ufs_close +_ufs_cmap +_ufs_create +_ufs_dirbad +_ufs_dirbadentry +_ufs_dirempty +_ufs_direnter +_ufs_direnter2 +_ufs_dirremove +_ufs_dirrewrite +_ufs_getattr +_ufs_getlbns +_ufs_ihash_slock +_ufs_ihashget +_ufs_ihashinit +_ufs_ihashins +_ufs_ihashlookup +_ufs_ihashrem +_ufs_inactive +_ufs_init +_ufs_ioctl +_ufs_islocked +_ufs_kqfilt_add +_ufs_link +_ufs_lock +_ufs_lookup +_ufs_makeinode +_ufs_mkdir +_ufs_mknod +_ufs_mmap +_ufs_open +_ufs_pathconf +_ufs_print +_ufs_quotactl +_ufs_readdir +_ufs_readlink +_ufs_reclaim +_ufs_remove +_ufs_rename +_ufs_rmdir +_ufs_root +_ufs_seek +_ufs_select +_ufs_setattr +_ufs_start +_ufs_strategy +_ufs_symlink +_ufs_unlock +_ufs_vfsops +_ufs_vinit +_ufs_whiteout +_ufsfifo_close +_ufsfifo_kqfilt_add +_ufsfifo_read +_ufsfifo_write +_ufsspec_close +_ufsspec_read +_ufsspec_write +_uihash +_uihashtbl +_uiomove +_uiomove64 +_uipc_usrreqs +_umask +_unblock_procsigmask +_undelete +_unicode_to_hfs +_union_abortop +_union_access +_union_advlock +_union_allocvp +_union_blktooff +_union_bmap +_union_close +_union_cmap +_union_copyfile +_union_copyup +_union_create +_union_dircache +_union_dowhiteout +_union_freevp +_union_fsync +_union_getattr +_union_inactive +_union_init +_union_ioctl +_union_islocked +_union_lease +_union_link +_union_lock +_union_lookup +_union_mkdir +_union_mknod +_union_mkshadow +_union_mkwhiteout +_union_mmap +_union_mount +_union_newlower +_union_newsize +_union_newupper +_union_offtoblk +_union_open +_union_pagein +_union_pageout +_union_pathconf +_union_print +_union_read +_union_readdir +_union_readlink +_union_reclaim +_union_remove +_union_removed_upper +_union_rename +_union_revoke +_union_rmdir +_union_root +_union_seek +_union_select +_union_setattr +_union_start +_union_statfs +_union_strategy +_union_symlink +_union_unlock +_union_unmount +_union_updatevp +_union_vfsops +_union_vn_close +_union_vn_create +_union_vnodeop_entries +_union_vnodeop_opv_desc +_union_vnodeop_p +_union_whiteout +_union_write +_unix_syscall +_unix_syscall_return +_unlink +_unmount +_unp_connect2 +_unp_dispose +_unp_externalize +_unp_init +_unp_zone +_unputc +_unregister_sockfilter +_untimeout +_upl_get_internal_page_list +_uprintf +_ureadc +_useracc +_userland_sysctl +_utf8_decodestr +_utf8_encodelen +_utf8_encodestr +_utf8_to_hfs +_utf8_to_mac_roman +_utf_extrabytes +_utimes +_utrace +_v_putc +_va_null +_vagevp +_vattr_null +_vcount +_vfinddev +_vflush +_vfork +_vfork_exit +_vfork_return +_vfs_busy +_vfs_event_init +_vfs_event_signal +_vfs_export +_vfs_export_lookup +_vfs_getnewfsid +_vfs_getvfs +_vfs_init_io_attributes +_vfs_io_attributes +_vfs_mountedon +_vfs_mountroot +_vfs_nummntops +_vfs_op_descs +_vfs_op_init +_vfs_opv_descs +_vfs_opv_init +_vfs_opv_numops +_vfs_rootmountalloc +_vfs_sysctl +_vfs_unbusy +_vfsconf +_vfsconf_add +_vfsconf_del +_vfsinit +_vget +_vgone +_vgonel +_vhold +_vinvalbuf +_vm_initial_limit_core +_vm_initial_limit_data +_vm_initial_limit_stack +_vm_sysctl +_vn_bwrite +_vn_close +_vn_default_error +_vn_lock +_vn_mkdir +_vn_open +_vn_rdwr +_vn_stat +_vn_symlink +_vn_table +_vn_writechk +_vndevice_init +_vndevice_root_image +_vnode_free_list +_vnode_free_list_slock +_vnode_inactive_list +_vnode_objects_reclaimed +_vnode_pagein +_vnode_pageout +_vnode_pager_get_filesize +_vnode_reclaim_tried +_vnodetarget +_vnops +_volfs_access +_volfs_fhtovp +_volfs_getattr +_volfs_init +_volfs_islocked +_volfs_load +_volfs_lock +_volfs_lookup +_volfs_mount +_volfs_pathconf +_volfs_quotactl +_volfs_readdir +_volfs_reclaim +_volfs_rmdir +_volfs_root +_volfs_select +_volfs_start +_volfs_statfs +_volfs_sync +_volfs_sysctl +_volfs_unlock +_volfs_unmount +_volfs_vfsops +_volfs_vget +_volfs_vnodeop_entries +_volfs_vnodeop_opv_desc +_volfs_vnodeop_p +_volfs_vptofh +_vop_abortop_desc +_vop_abortop_vp_offsets +_vop_access_desc +_vop_access_vp_offsets +_vop_advlock_desc +_vop_advlock_vp_offsets +_vop_allocate_desc +_vop_allocate_vp_offsets +_vop_blkatoff_desc +_vop_blkatoff_vp_offsets +_vop_blktooff_desc +_vop_blktooff_vp_offsets +_vop_bmap_desc +_vop_bmap_vp_offsets +_vop_bwrite_desc +_vop_bwrite_vp_offsets +_vop_cachedlookup_desc +_vop_cachedlookup_vp_offsets +_vop_close_desc +_vop_close_vp_offsets +_vop_cmap_desc +_vop_cmap_vp_offsets +_vop_copyfile_desc +_vop_copyfile_vp_offsets +_vop_create_desc +_vop_create_vp_offsets +_vop_default_desc +_vop_devblocksize_desc +_vop_devblocksize_vp_offsets +_vop_exchange_desc +_vop_exchange_vp_offsets +_vop_fsync_desc +_vop_fsync_vp_offsets +_vop_getattr_desc +_vop_getattr_vp_offsets +_vop_getattrlist_desc +_vop_getattrlist_vp_offsets +_vop_inactive_desc +_vop_inactive_vp_offsets +_vop_ioctl_desc +_vop_ioctl_vp_offsets +_vop_islocked_desc +_vop_islocked_vp_offsets +_vop_kqfilt_add_desc +_vop_kqfilt_add_vp_offsets +_vop_kqfilt_remove_desc +_vop_kqfilt_remove_vp_offsets +_vop_lease_desc +_vop_lease_vp_offsets +_vop_link_desc +_vop_link_vp_offsets +_vop_lock_desc +_vop_lock_vp_offsets +_vop_lookup_desc +_vop_lookup_vp_offsets +_vop_mkcomplex_desc +_vop_mkcomplex_vp_offsets +_vop_mkdir_desc +_vop_mkdir_vp_offsets +_vop_mknod_desc +_vop_mknod_vp_offsets +_vop_mmap_desc +_vop_mmap_vp_offsets +_vop_noislocked +_vop_nolock +_vop_nounlock +_vop_offtoblk_desc +_vop_offtoblk_vp_offsets +_vop_open_desc +_vop_open_vp_offsets +_vop_pagein_desc +_vop_pagein_vp_offsets +_vop_pageout_desc +_vop_pageout_vp_offsets +_vop_pathconf_desc +_vop_pathconf_vp_offsets +_vop_pgrd_desc +_vop_pgrd_vp_offsets +_vop_pgwr_desc +_vop_pgwr_vp_offsets +_vop_print_desc +_vop_print_vp_offsets +_vop_read_desc +_vop_read_vp_offsets +_vop_readdir_desc +_vop_readdir_vp_offsets +_vop_readdirattr_desc +_vop_readdirattr_vp_offsets +_vop_readlink_desc +_vop_readlink_vp_offsets +_vop_reallocblks_desc +_vop_reallocblks_vp_offsets +_vop_reclaim_desc +_vop_reclaim_vp_offsets +_vop_remove_desc +_vop_remove_vp_offsets +_vop_rename_desc +_vop_rename_vp_offsets +_vop_revoke +_vop_revoke_desc +_vop_revoke_vp_offsets +_vop_rmdir_desc +_vop_rmdir_vp_offsets +_vop_searchfs_desc +_vop_searchfs_vp_offsets +_vop_seek_desc +_vop_seek_vp_offsets +_vop_select_desc +_vop_select_vp_offsets +_vop_setattr_desc +_vop_setattr_vp_offsets +_vop_setattrlist_desc +_vop_setattrlist_vp_offsets +_vop_strategy_desc +_vop_strategy_vp_offsets +_vop_symlink_desc +_vop_symlink_vp_offsets +_vop_truncate_desc +_vop_truncate_vp_offsets +_vop_unlock_desc +_vop_unlock_vp_offsets +_vop_update_desc +_vop_update_vp_offsets +_vop_valloc_desc +_vop_valloc_vp_offsets +_vop_vfree_desc +_vop_vfree_vp_offsets +_vop_whiteout_desc +_vop_whiteout_vp_offsets +_vop_write_desc +_vop_write_vp_offsets +_vp_pagein +_vp_pgoclean +_vp_pgodirty +_vprint +_vput +_vpwakeup +_vrecycle +_vref +_vrele +_vslock +_vsnprintf +_vsprintf +_vsunlock +_vttoif_tab +_vwakeup +_wait1 +_wait1continue +_wait4 +_waitevent +_waittime +_wakeup +_wakeup_one +_walk_allvnodes +_walk_vnodes_debug +_watchevent +_write +_writev +_ws_disabled +_zError +_z_errmsg +_zeroin6_addr +_zeroin_addr +_zlibVersion +_zombproc + diff --git a/config/BSDKernel.i386.exports b/config/BSDKernel.i386.exports new file mode 100644 index 000000000..e69de29bb diff --git a/config/BSDKernel.ppc.exports b/config/BSDKernel.ppc.exports new file mode 100644 index 000000000..195e5f977 --- /dev/null +++ b/config/BSDKernel.ppc.exports @@ -0,0 +1,489 @@ +_AARPwakeup +_ASPgetmsg +_ASPputmsg +_ATPgetreq +_ATPgetrsp +_ATPsndreq +_ATPsndrsp +_ATgetmsg +_ATputmsg +_ATsocket +_AURPaccess +_AURPcleanup +_AURPcmdx +_AURPfreemsg +_AURPgetmsg +_AURPgetri +_AURPinit +_AURPiocack +_AURPiocnak +_AURPpurgeri +_AURPrcvOpenReq +_AURPrcvOpenRsp +_AURPrcvRDReq +_AURPrcvRIAck +_AURPrcvRIReq +_AURPrcvRIRsp +_AURPrcvRIUpd +_AURPrcvTickle +_AURPrcvTickleAck +_AURPrcvZReq +_AURPrcvZRsp +_AURPrtupdate +_AURPsend +_AURPsetri +_AURPshutdown +_AURPsndGDZL +_AURPsndGZN +_AURPsndOpenReq +_AURPsndOpenReq_funnel +_AURPsndRDReq +_AURPsndRIAck +_AURPsndRIReq +_AURPsndRIReq_funnel +_AURPsndRIRsp_funnel +_AURPsndRIUpd +_AURPsndRIUpd_funnel +_AURPsndTickle +_AURPsndZReq +_AURPsndZRsp +_AURPupdate +_AURPupdateri +_AbortIO +_AdspBad +_CalcRecvWdw +_CalcSendQFree +_CheckAttn +_CheckOkToClose +_CheckReadQueue +_CheckRecvSeq +_CheckSend +_CleanupGlobals +_CompleteQueue +_DDP_chksum_on +_DDP_slfsnd_on +_DoClose +_DoTimerElem +_ErrorRTMPoverflow +_ErrorZIPoverflow +_FillSendQueue +_FindSender +_InitGlobals +_InsertTimerElem +_NextCID +_NotifyUser +_RT_maxentry +_RT_table +_RT_table_freelist +_RT_table_start +_RXAttention +_RXData +_RXFReset +_RXFResetAck +_RemoveCCB +_RemoveTimerElem +_RouterError +_RouterMix +_RxClose +_SndMsgUp +_TimerQueueTick +_TimerStop +_TimerTick +_TimerTick_funnel +_TrashSession +_UrgentUser +_ZIPwakeup +_ZT_maxentry +_ZT_table +__ATPgetreq +__ATPgetrsp +__ATPsndreq +__ATPsndrsp +__ATclose +__ATgetmsg +__ATioctl +__ATkqfilter +__ATputmsg +__ATread +__ATrw +__ATselect +__ATsocket +__ATwrite +_aarp_chk_addr +_aarp_init1 +_aarp_init2 +_aarp_rcv_pkt +_aarp_sched_probe +_aarp_send_data +_aarp_table +_abs +_add_ddp_handler +_adspAllocateCCB +_adspAssignSocket +_adspAttention +_adspCLDeny +_adspCLListen +_adspClose +_adspDeassignSocket +_adspGlobal +_adspInit +_adspInited +_adspMode +_adspNewCID +_adspOpen +_adspOptions +_adspPacket +_adspRead +_adspReadAttention +_adspReadHandler +_adspRelease +_adspReset +_adspStatus +_adspWrite +_adspWriteHandler +_adsp_close +_adsp_dequeue_ccb +_adsp_input +_adsp_inputC +_adsp_inputQ +_adsp_open +_adsp_pidM +_adsp_readable +_adsp_rput +_adsp_sendddp +_adsp_window +_adsp_wput +_adsp_writeable +_adspall_lock +_adspgen_lock +_adspioc_ack +_adsptmr_lock +_append_copy +_appletalk_hack_start +_appletalk_inited +_arpinp_lock +_asp_ack_reply +_asp_clock +_asp_clock_funnel +_asp_close +_asp_init +_asp_inpC +_asp_nak_reply +_asp_open +_asp_pack_bdsp +_asp_readable +_asp_scbQ +_asp_wput +_aspall_lock +_asptmo_lock +_at_control +_at_ddp_brt +_at_ddp_stats +_at_ifQueueHd +_at_insert +_at_interfaces +_at_ioctl +_at_memzone_init +_at_pcballoc +_at_pcbbind +_at_pcbdetach +_at_reg_mcast +_at_state +_at_unreg_mcast +_atalk_closeref +_atalk_enablew +_atalk_flush +_atalk_getref +_atalk_gettrace +_atalk_load +_atalk_notify +_atalk_notify_sel +_atalk_openref +_atalk_peek +_atalk_post_msg +_atalk_putnext +_atalk_settrace +_atalk_to_ip +_atalk_unload +_atalkdomain +_atalkintr +_atalkintrq +_atalksw +_atp_bind +_atp_build_release +_atp_cancel_req +_atp_close +_atp_delete_free_clusters +_atp_dequeue_atp +_atp_drop_req +_atp_free +_atp_free_cluster_list +_atp_free_cluster_timeout_set +_atp_free_list +_atp_init +_atp_inited +_atp_input +_atp_inputQ +_atp_iocack +_atp_iocnak +_atp_link +_atp_lomask +_atp_mask +_atp_need_rel +_atp_open +_atp_pidM +_atp_rcb_alloc +_atp_rcb_data +_atp_rcb_free +_atp_rcb_free_list +_atp_rcb_timer +_atp_reply +_atp_req_ind +_atp_req_timeout +_atp_resource_m +_atp_retry_req +_atp_rput +_atp_rsp_ind +_atp_send +_atp_send_replies +_atp_send_req +_atp_send_rsp +_atp_state_data +_atp_tid +_atp_timout +_atp_trans_abort +_atp_trans_alloc +_atp_trans_free +_atp_trans_free_list +_atp_treq_event +_atp_trp_clock +_atp_trp_clock_funnel +_atp_unlink +_atp_untimout +_atp_used_list +_atp_wput +_atp_x_done +_atp_x_done_funnel +_atpall_lock +_atpcb_zone +_atpgen_lock +_atptmo_lock +_attachData +_aurp_close +_aurp_global +_aurp_gref +_aurp_ifID +_aurp_open +_aurp_state +_aurp_wakeup +_aurp_wput +_aurpd_start +_aurpgen_lock +_calcRecvQ +_calcSendQ +_ccb_used_list +_completepb +_cons_getc +_cons_putc +_consclose +_consioctl +_consopen +_consread +_consselect +_conswrite +_copy_pkt +_dbgBits +_ddp_AURPfuncx +_ddp_AURPsendx +_ddp_add_if +_ddp_adjmsg +_ddp_age_router +_ddp_bit_reverse +_ddp_brt_init +_ddp_brt_shutdown +_ddp_brt_sweep +_ddp_brt_sweep_funnel +_ddp_brt_sweep_timer +_ddp_checksum +_ddp_compress_msg +_ddp_ctloutput +_ddp_glean +_ddp_growmsg +_ddp_handler +_ddp_head +_ddp_init +_ddp_input +_ddp_notify_nbp +_ddp_output +_ddp_pru_abort +_ddp_pru_attach +_ddp_pru_bind +_ddp_pru_connect +_ddp_pru_control +_ddp_pru_detach +_ddp_pru_disconnect +_ddp_pru_peeraddr +_ddp_pru_send +_ddp_pru_shutdown +_ddp_pru_sockaddr +_ddp_putmsg +_ddp_recvspace +_ddp_rem_if +_ddp_router_output +_ddp_sendspace +_ddp_shutdown +_ddp_slowtimo +_ddp_socket_inuse +_ddp_start +_ddp_usrreqs +_ddpall_lock +_ddpinp_lock +_dst_addr_cnt +_elap_dataput +_elap_offline +_elap_online3 +_elap_wput +_ep_input +_errstr +_et_zeroaddr +_etalk_multicast_addr +_find_ifID +_forUs +_gbuf_freel +_gbuf_linkpkt +_gbuf_strip +_getIfUsage +_getLocalZone +_getNbpTable +_getNbpTableSize +_getPhysAddrSize +_getRTRLocalZone +_getRtmpTable +_getRtmpTableSize +_getSPLocalZone +_getZipTable +_getZipTableSize +_getchar +_gets +_gref_alloc +_gref_close +_gref_wput +_ifID_home +_ifID_table +_init_ddp_handler +_ioc_ack +_lap_online +_m_clattach +_m_lgbuf_alloc +_m_lgbuf_free +_name_registry +_nbp_add_multicast +_nbp_delete_entry +_nbp_fillin_nve +_nbp_find_nve +_nbp_input +_nbp_mh_reg +_nbp_new_nve_entry +_nbp_shutdown +_nbp_strhash +_net_access +_net_access_cnt +_net_export +_net_port +_no_of_nets_tried +_no_of_nodes_tried +_nve_lock +_ot_ddp_check_socket +_pat_output +_pktsDropped +_pktsHome +_pktsIn +_pktsOut +_ppc_gettimeofday +_prep_ZIP_reply_packet +_probe_cb +_qAddToEnd +_qfind_m +_rcv_connection_id +_reboot_how +_refall_lock +_regDefaultZone +_releaseData +_routerStart +_router_added +_router_killed +_routershutdown +_routing_needed +_rt_bdelete +_rt_binsert +_rt_blookup +_rt_delete +_rt_getNextRoute +_rt_insert +_rt_show +_rt_sortedshow +_rt_table_init +_rtmp_dropper +_rtmp_init +_rtmp_input +_rtmp_prep_new_packet +_rtmp_purge +_rtmp_r_find_bridge +_rtmp_router_input +_rtmp_router_start +_rtmp_send_port +_rtmp_send_port_funnel +_rtmp_shutdown +_rtmp_timeout +_scb_free_list +_scb_resource_m +_scb_used_list +_setLocalZones +_sethzonehash +_sip_input +_snmpFlags +_snmpStats +_sys_ATPgetreq +_sys_ATPgetrsp +_sys_ATPsndreq +_sys_ATPsndrsp +_sys_ATgetmsg +_sys_ATputmsg +_sys_ATsocket +_sysctl__net_appletalk +_sysctl__net_appletalk_children +_sysctl__net_appletalk_ddpstats +_sysctl__net_appletalk_debug +_sysctl__net_appletalk_routermix +_trackrouter +_trackrouter_rem_if +_trp_tmo_rcb +_ttalk_multicast_addr +_update_tmo +_upshift8 +_uwritec +_xpatcnt +_xsum_assym +_zip_control +_zip_handle_getmyzone +_zip_prep_query_packet +_zip_reply_received +_zip_reply_to_getlocalzones +_zip_reply_to_getzonelist +_zip_router_input +_zip_sched_getnetinfo +_zip_send_queries +_zip_type_packet +_zonename_equal +_zt_add_zone +_zt_add_zonename +_zt_clr_zmap +_zt_compute_hash +_zt_ent_zcount +_zt_ent_zindex +_zt_find_zname +_zt_getNextZone +_zt_get_zmcast +_zt_remove_zones +_zt_set_zmap +_zt_upper_zname diff --git a/config/IOKit.exports b/config/IOKit.exports new file mode 100644 index 000000000..0c77e7040 --- /dev/null +++ b/config/IOKit.exports @@ -0,0 +1,2388 @@ +_IOAlignmentToSize +_IOBSDNameMatching +_IOBSDRegistryEntryForDeviceTree +_IOBSDRegistryEntryGetData +_IOBSDRegistryEntryRelease +_IOCDMatching +_IOCreateThread +_IODTFreeLoaderInfo +_IODTGetLoaderInfo +_IODelay +_IODiskMatching +_IOExitThread +_IOFindBSDRoot +_IOFindMatchingChild +_IOFindNameForValue +_IOFindValueForName +_IOFlushProcessorCache +_IOFree +_IOFreeAligned +_IOFreeContiguous +_IOFreePageable +_IOGetTime +_IOIteratePageableMaps +_IOKitBSDInit +_IOKitResetTime +_IOLibInit +_IOLockAlloc +_IOLockFree +_IOLockInitWithState +_IOLog +_IOMalloc +_IOMallocAligned +_IOMallocContiguous +_IOMallocPageable +_IOMappedRead16 +_IOMappedRead32 +_IOMappedRead64 +_IOMappedRead8 +_IOMappedWrite16 +_IOMappedWrite32 +_IOMappedWrite64 +_IOMappedWrite8 +_IOMapperIOVMAlloc +_IOMapperIOVMFree +_IOMapperInsertPPNPages +_IOMapperInsertPage +_IOMapperInsertUPLPages +_IONDRVLibrariesInitialize +_IONetworkMatching +_IONetworkNamePrefixMatching +_IOOFPathMatching +_IOPageableMapForAddress +_IOPanic +_IOPrintPlane +_IORWLockAlloc +_IORWLockFree +_IORecursiveLockAlloc +_IORecursiveLockFree +_IORecursiveLockHaveLock +_IORecursiveLockLock +_IORecursiveLockSleep +_IORecursiveLockTryLock +_IORecursiveLockUnlock +_IORecursiveLockWakeup +_IOSetProcessorCacheMode +_IOSimpleLockAlloc +_IOSimpleLockFree +_IOSimpleLockInit +_IOSizeToAlignment +_IOSleep +_IOSpinUnlock +_IOSystemShutdownNotification +_IOTrySpinLock +_IOZeroTvalspec +_OSKernelStackRemaining +_OSPrintMemory +_PEGetGMTTimeOfDay +_PEGetMachineName +_PEGetModelName +_PEGetPlatformEpoch +_PEHaltRestart +_PESavePanicInfo +_PESetGMTTimeOfDay +_PE_cpu_halt +_PE_cpu_machine_init +_PE_cpu_machine_quiesce +_PE_cpu_signal +_PE_cpu_start +_PE_call_timebase_callback +_PE_enter_debugger +_PE_halt_restart +_PE_parse_boot_arg +_PE_poll_input +_StartIOKit +__Z10tellClientP8OSObjectPv +__Z16IOCPUSleepKernelv +__Z16IODTFindSlotNameP15IORegistryEntrym +__Z16IODTSetResolvingP15IORegistryEntryPFlmPmS1_EPFvS0_PhS4_S4_E +__Z17IODTGetCellCountsP15IORegistryEntryPmS1_ +__Z17IODTMapInterruptsP15IORegistryEntry +__Z17IODeviceTreeAllocPv +__Z17IOServiceOrderingPK15OSMetaClassBaseS1_Pv +__Z18IODTCompareNubNamePK15IORegistryEntryP8OSStringPS3_ +__Z19IODTMapOneInterruptP15IORegistryEntryPmPP6OSDataPPK8OSSymbol +__Z19printDictionaryKeysP12OSDictionaryPc +__Z19tellAppWithResponseP8OSObjectPv +__Z20IODTMakeNVDescriptorP15IORegistryEntryP17IONVRAMDescriptor +__Z20IODTMatchNubWithKeysP15IORegistryEntryPKc +__Z21IODTResolveAddressingP15IORegistryEntryPKcP14IODeviceMemory +__Z22IODTResolveAddressCellP15IORegistryEntryPmS1_S1_ +__Z22tellClientWithResponseP8OSObjectPv +__Z23IODTFindInterruptParentP15IORegistryEntry +__Z23IODTFindMatchingEntriesP15IORegistryEntrymPKc +__Z24broadcast_aggressivenessP8OSObjectPvS1_S1_S1_ +__Z26serializedAllowPowerChangeP8OSObjectPvS1_S1_S1_ +__Z27IODTInterruptControllerNameP15IORegistryEntry +__Z27serializedCancelPowerChangeP8OSObjectPvS1_S1_S1_ +__ZN10IOMachPort10gMetaClassE +__ZN10IOMachPort10superClassE +__ZN10IOMachPort11dictForTypeEj +__ZN10IOMachPort13portForObjectEP8OSObjectj +__ZN10IOMachPort14setHoldDestroyEP8OSObjectj +__ZN10IOMachPort20makeSendRightForTaskEP4taskP8OSObjectj +__ZN10IOMachPort20releasePortForObjectEP8OSObjectj +__ZN10IOMachPort22noMoreSendersForObjectEP8OSObjectjPj +__ZN10IOMachPort4freeEv +__ZN10IOMachPort9MetaClassC1Ev +__ZN10IOMachPort9MetaClassC2Ev +__ZN10IOMachPort9metaClassE +__ZN10IOMachPortC1EPK11OSMetaClass +__ZN10IOMachPortC1Ev +__ZN10IOMachPortC2EPK11OSMetaClass +__ZN10IOMachPortC2Ev +__ZN10IOMachPortD0Ev +__ZN10IOMachPortD2Ev +__ZN10IONotifier10gMetaClassE +__ZN10IONotifier10superClassE +__ZN10IONotifier9MetaClassC1Ev +__ZN10IONotifier9MetaClassC2Ev +__ZN10IONotifier9metaClassE +__ZN10IONotifierC1EPK11OSMetaClass +__ZN10IONotifierC2EPK11OSMetaClass +__ZN10IONotifierD0Ev +__ZN10IONotifierD2Ev +__ZN10IOWorkLoop10gMetaClassE +__ZN10IOWorkLoop10superClassE +__ZN10IOWorkLoop10threadMainEv +__ZN10IOWorkLoop10wakeupGateEPvb +__ZN10IOWorkLoop12tryCloseGateEv +__ZN10IOWorkLoop13_maintRequestEPvS0_S0_S0_ +__ZN10IOWorkLoop14addEventSourceEP13IOEventSource +__ZN10IOWorkLoop16launchThreadMainEPv +__ZN10IOWorkLoop17removeEventSourceEP13IOEventSource +__ZN10IOWorkLoop19signalWorkAvailableEv +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop1Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop2Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop3Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop4Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop5Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop6Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop7Ev +__ZN10IOWorkLoop22threadMainContinuationEv +__ZN10IOWorkLoop4freeEv +__ZN10IOWorkLoop4initEv +__ZN10IOWorkLoop8openGateEv +__ZN10IOWorkLoop8workLoopEv +__ZN10IOWorkLoop9MetaClassC1Ev +__ZN10IOWorkLoop9MetaClassC2Ev +__ZN10IOWorkLoop9closeGateEv +__ZN10IOWorkLoop9metaClassE +__ZN10IOWorkLoop9runActionEPFiP8OSObjectPvS2_S2_S2_ES1_S2_S2_S2_S2_ +__ZN10IOWorkLoop9sleepGateEPvm +__ZN10IOWorkLoopC1EPK11OSMetaClass +__ZN10IOWorkLoopC1Ev +__ZN10IOWorkLoopC2EPK11OSMetaClass +__ZN10IOWorkLoopC2Ev +__ZN10IOWorkLoopD0Ev +__ZN10IOWorkLoopD2Ev +__ZN11IOCatalogue10addDriversEP7OSArrayb +__ZN11IOCatalogue10gMetaClassE +__ZN11IOCatalogue10initializeEv +__ZN11IOCatalogue10superClassE +__ZN11IOCatalogue11findDriversEP12OSDictionaryPl +__ZN11IOCatalogue11findDriversEP9IOServicePl +__ZN11IOCatalogue13removeDriversEP12OSDictionaryb +__ZN11IOCatalogue13startMatchingEP12OSDictionary +__ZN11IOCatalogue15moduleHasLoadedEP8OSString +__ZN11IOCatalogue15moduleHasLoadedEPKc +__ZN11IOCatalogue16terminateDriversEP12OSDictionary +__ZN11IOCatalogue18removeKernelLinkerEv +__ZN11IOCatalogue23recordStartupExtensionsEv +__ZN11IOCatalogue24addExtensionsFromArchiveEP6OSData +__ZN11IOCatalogue25terminateDriversForModuleEP8OSStringb +__ZN11IOCatalogue25terminateDriversForModuleEPKcb +__ZN11IOCatalogue4freeEv +__ZN11IOCatalogue4initEP7OSArray +__ZN11IOCatalogue5resetEv +__ZN11IOCatalogue9MetaClassC1Ev +__ZN11IOCatalogue9MetaClassC2Ev +__ZN11IOCatalogue9metaClassE +__ZN11IOCatalogueC1EPK11OSMetaClass +__ZN11IOCatalogueC1Ev +__ZN11IOCatalogueC2EPK11OSMetaClass +__ZN11IOCatalogueC2Ev +__ZN11IOCatalogueD0Ev +__ZN11IOCatalogueD2Ev +__ZN11IODataQueue10gMetaClassE +__ZN11IODataQueue10superClassE +__ZN11IODataQueue11withEntriesEmm +__ZN11IODataQueue12withCapacityEm +__ZN11IODataQueue15initWithEntriesEmm +__ZN11IODataQueue16initWithCapacityEm +__ZN11IODataQueue19getMemoryDescriptorEv +__ZN11IODataQueue19setNotificationPortEP8ipc_port +__ZN11IODataQueue29sendDataAvailableNotificationEv +__ZN11IODataQueue4freeEv +__ZN11IODataQueue7enqueueEPvm +__ZN11IODataQueue9MetaClassC1Ev +__ZN11IODataQueue9MetaClassC2Ev +__ZN11IODataQueue9metaClassE +__ZN11IODataQueueC1EPK11OSMetaClass +__ZN11IODataQueueC1Ev +__ZN11IODataQueueC2EPK11OSMetaClass +__ZN11IODataQueueC2Ev +__ZN11IODataQueueD0Ev +__ZN11IODataQueueD2Ev +__ZN11IOMemoryMap10gMetaClassE +__ZN11IOMemoryMap10superClassE +__ZN11IOMemoryMap18getPhysicalAddressEv +__ZN11IOMemoryMap9MetaClassC1Ev +__ZN11IOMemoryMap9MetaClassC2Ev +__ZN11IOMemoryMap9metaClassE +__ZN11IOMemoryMapC1EPK11OSMetaClass +__ZN11IOMemoryMapC2EPK11OSMetaClass +__ZN11IOMemoryMapD0Ev +__ZN11IOMemoryMapD2Ev +__ZN11IOResources10gMetaClassE +__ZN11IOResources10superClassE +__ZN11IOResources13setPropertiesEP8OSObject +__ZN11IOResources18matchPropertyTableEP12OSDictionary +__ZN11IOResources9MetaClassC1Ev +__ZN11IOResources9MetaClassC2Ev +__ZN11IOResources9metaClassE +__ZN11IOResources9resourcesEv +__ZN11IOResourcesC1EPK11OSMetaClass +__ZN11IOResourcesC1Ev +__ZN11IOResourcesC2EPK11OSMetaClass +__ZN11IOResourcesC2Ev +__ZN11IOResourcesD0Ev +__ZN11IOResourcesD2Ev +__ZN12IOPMinformee10gMetaClassE +__ZN12IOPMinformee10initializeEP9IOService +__ZN12IOPMinformee10superClassE +__ZN12IOPMinformee4freeEv +__ZN12IOPMinformee9MetaClassC1Ev +__ZN12IOPMinformee9MetaClassC2Ev +__ZN12IOPMinformee9metaClassE +__ZN12IOPMinformeeC1EPK11OSMetaClass +__ZN12IOPMinformeeC1Ev +__ZN12IOPMinformeeC2EPK11OSMetaClass +__ZN12IOPMinformeeC2Ev +__ZN12IOPMinformeeD0Ev +__ZN12IOPMinformeeD2Ev +__ZN12IORootParent10dozeSystemEv +__ZN12IORootParent10gMetaClassE +__ZN12IORootParent10superClassE +__ZN12IORootParent10wakeSystemEv +__ZN12IORootParent11sleepSystemEv +__ZN12IORootParent11sleepToDozeEv +__ZN12IORootParent13restartSystemEv +__ZN12IORootParent14shutDownSystemEv +__ZN12IORootParent5startEP9IOService +__ZN12IORootParent9MetaClassC1Ev +__ZN12IORootParent9MetaClassC2Ev +__ZN12IORootParent9metaClassE +__ZN12IORootParentC1EPK11OSMetaClass +__ZN12IORootParentC1Ev +__ZN12IORootParentC2EPK11OSMetaClass +__ZN12IORootParentC2Ev +__ZN12IORootParentD0Ev +__ZN12IORootParentD2Ev +__ZN12IOUserClient10clientDiedEv +__ZN12IOUserClient10gMetaClassE +__ZN12IOUserClient10getServiceEv +__ZN12IOUserClient10initializeEv +__ZN12IOUserClient10superClassE +__ZN12IOUserClient11clientCloseEv +__ZN12IOUserClient12initWithTaskEP4taskPvm +__ZN12IOUserClient12initWithTaskEP4taskPvmP12OSDictionary +__ZN12IOUserClient13connectClientEPS_ +__ZN12IOUserClient15mapClientMemoryEmP4taskmj +__ZN12IOUserClient15sendAsyncResultEPjiPPvm +__ZN12IOUserClient17setAsyncReferenceEPjP8ipc_portPvS3_ +__ZN12IOUserClient18clientHasPrivilegeEPvPKc +__ZN12IOUserClient19clientMemoryForTypeEmPmPP18IOMemoryDescriptor +__ZN12IOUserClient20exportObjectToClientEP4taskP8OSObjectPS3_ +__ZN12IOUserClient21destroyUserReferencesEP8OSObject +__ZN12IOUserClient22_RESERVEDIOUserClient0Ev +__ZN12IOUserClient22_RESERVEDIOUserClient1Ev +__ZN12IOUserClient22_RESERVEDIOUserClient2Ev +__ZN12IOUserClient22_RESERVEDIOUserClient3Ev +__ZN12IOUserClient22_RESERVEDIOUserClient4Ev +__ZN12IOUserClient22_RESERVEDIOUserClient5Ev +__ZN12IOUserClient22_RESERVEDIOUserClient6Ev +__ZN12IOUserClient22_RESERVEDIOUserClient7Ev +__ZN12IOUserClient22_RESERVEDIOUserClient8Ev +__ZN12IOUserClient22_RESERVEDIOUserClient9Ev +__ZN12IOUserClient23_RESERVEDIOUserClient10Ev +__ZN12IOUserClient23_RESERVEDIOUserClient11Ev +__ZN12IOUserClient23_RESERVEDIOUserClient12Ev +__ZN12IOUserClient23_RESERVEDIOUserClient13Ev +__ZN12IOUserClient23_RESERVEDIOUserClient14Ev +__ZN12IOUserClient23_RESERVEDIOUserClient15Ev +__ZN12IOUserClient23getExternalTrapForIndexEm +__ZN12IOUserClient24getNotificationSemaphoreEmPP9semaphore +__ZN12IOUserClient24getTargetAndTrapForIndexEPP9IOServicem +__ZN12IOUserClient24registerNotificationPortEP8ipc_portmm +__ZN12IOUserClient25getExternalMethodForIndexEm +__ZN12IOUserClient26getTargetAndMethodForIndexEPP9IOServicem +__ZN12IOUserClient30getExternalAsyncMethodForIndexEm +__ZN12IOUserClient31getAsyncTargetAndMethodForIndexEPP9IOServicem +__ZN12IOUserClient4freeEv +__ZN12IOUserClient9MetaClassC1Ev +__ZN12IOUserClient9MetaClassC2Ev +__ZN12IOUserClient9metaClassE +__ZN12IOUserClientC1EPK11OSMetaClass +__ZN12IOUserClientC2EPK11OSMetaClass +__ZN12IOUserClientD0Ev +__ZN12IOUserClientD2Ev +__ZN12_IOMemoryMap10gMetaClassE +__ZN12_IOMemoryMap10superClassE +__ZN12_IOMemoryMap13getMapOptionsEv +__ZN12_IOMemoryMap14copyCompatibleEP18IOMemoryDescriptorP4taskjmmm +__ZN12_IOMemoryMap14getAddressTaskEv +__ZN12_IOMemoryMap14initCompatibleEP18IOMemoryDescriptorP11IOMemoryMapmm +__ZN12_IOMemoryMap17getVirtualAddressEv +__ZN12_IOMemoryMap18getPhysicalSegmentEmPm +__ZN12_IOMemoryMap18initWithDescriptorEP18IOMemoryDescriptorP4taskjmmm +__ZN12_IOMemoryMap19getMemoryDescriptorEv +__ZN12_IOMemoryMap4freeEv +__ZN12_IOMemoryMap5unmapEv +__ZN12_IOMemoryMap8redirectEP4taskb +__ZN12_IOMemoryMap8taskDiedEv +__ZN12_IOMemoryMap9MetaClassC1Ev +__ZN12_IOMemoryMap9MetaClassC2Ev +__ZN12_IOMemoryMap9getLengthEv +__ZN12_IOMemoryMap9metaClassE +__ZN12_IOMemoryMapC1EPK11OSMetaClass +__ZN12_IOMemoryMapC1Ev +__ZN12_IOMemoryMapC2EPK11OSMetaClass +__ZN12_IOMemoryMapC2Ev +__ZN12_IOMemoryMapD0Ev +__ZN12_IOMemoryMapD2Ev +__ZN13IOCommandGate10gMetaClassE +__ZN13IOCommandGate10runCommandEPvS0_S0_S0_ +__ZN13IOCommandGate10superClassE +__ZN13IOCommandGate11commandGateEP8OSObjectPFiS1_PvS2_S2_S2_E +__ZN13IOCommandGate12checkForWorkEv +__ZN13IOCommandGate12commandSleepEPvm +__ZN13IOCommandGate13attemptActionEPFiP8OSObjectPvS2_S2_S2_ES2_S2_S2_S2_ +__ZN13IOCommandGate13commandWakeupEPvb +__ZN13IOCommandGate14attemptCommandEPvS0_S0_S0_ +__ZN13IOCommandGate23_RESERVEDIOCommandGate0Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate1Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate2Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate3Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate4Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate5Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate6Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate7Ev +__ZN13IOCommandGate4initEP8OSObjectPFiS1_PvS2_S2_S2_E +__ZN13IOCommandGate9MetaClassC1Ev +__ZN13IOCommandGate9MetaClassC2Ev +__ZN13IOCommandGate9metaClassE +__ZN13IOCommandGate9runActionEPFiP8OSObjectPvS2_S2_S2_ES2_S2_S2_S2_ +__ZN13IOCommandGateC1EPK11OSMetaClass +__ZN13IOCommandGateC1Ev +__ZN13IOCommandGateC2EPK11OSMetaClass +__ZN13IOCommandGateC2Ev +__ZN13IOCommandGateD0Ev +__ZN13IOCommandGateD2Ev +__ZN13IOCommandPool10gMetaClassE +__ZN13IOCommandPool10getCommandEb +__ZN13IOCommandPool10superClassE +__ZN13IOCommandPool11commandPoolEP9IOServiceP10IOWorkLoopm +__ZN13IOCommandPool12withWorkLoopEP10IOWorkLoop +__ZN13IOCommandPool13returnCommandEP9IOCommand +__ZN13IOCommandPool15gatedGetCommandEPP9IOCommandb +__ZN13IOCommandPool16initWithWorkLoopEP10IOWorkLoop +__ZN13IOCommandPool18gatedReturnCommandEP9IOCommand +__ZN13IOCommandPool23_RESERVEDIOCommandPool0Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool1Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool2Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool3Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool4Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool5Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool6Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool7Ev +__ZN13IOCommandPool4freeEv +__ZN13IOCommandPool4initEP9IOServiceP10IOWorkLoopm +__ZN13IOCommandPool9MetaClassC1Ev +__ZN13IOCommandPool9MetaClassC2Ev +__ZN13IOCommandPool9metaClassE +__ZN13IOCommandPoolC1EPK11OSMetaClass +__ZN13IOCommandPoolC1Ev +__ZN13IOCommandPoolC2EPK11OSMetaClass +__ZN13IOCommandPoolC2Ev +__ZN13IOCommandPoolD0Ev +__ZN13IOCommandPoolD2Ev +__ZN13IOEventSource10gMetaClassE +__ZN13IOEventSource10superClassE +__ZN13IOEventSource10wakeupGateEPvb +__ZN13IOEventSource11setWorkLoopEP10IOWorkLoop +__ZN13IOEventSource12tryCloseGateEv +__ZN13IOEventSource19signalWorkAvailableEv +__ZN13IOEventSource23_RESERVEDIOEventSource0Ev +__ZN13IOEventSource23_RESERVEDIOEventSource1Ev +__ZN13IOEventSource23_RESERVEDIOEventSource2Ev +__ZN13IOEventSource23_RESERVEDIOEventSource3Ev +__ZN13IOEventSource23_RESERVEDIOEventSource4Ev +__ZN13IOEventSource23_RESERVEDIOEventSource5Ev +__ZN13IOEventSource23_RESERVEDIOEventSource6Ev +__ZN13IOEventSource23_RESERVEDIOEventSource7Ev +__ZN13IOEventSource4initEP8OSObjectPFvS1_zE +__ZN13IOEventSource6enableEv +__ZN13IOEventSource7disableEv +__ZN13IOEventSource7setNextEPS_ +__ZN13IOEventSource8openGateEv +__ZN13IOEventSource9MetaClassC1Ev +__ZN13IOEventSource9MetaClassC2Ev +__ZN13IOEventSource9closeGateEv +__ZN13IOEventSource9metaClassE +__ZN13IOEventSource9setActionEPFvP8OSObjectzE +__ZN13IOEventSource9sleepGateEPvm +__ZN13IOEventSourceC1EPK11OSMetaClass +__ZN13IOEventSourceC2EPK11OSMetaClass +__ZN13IOEventSourceD0Ev +__ZN13IOEventSourceD2Ev +__ZN13_IOServiceJob10gMetaClassE +__ZN13_IOServiceJob10pingConfigEPS_ +__ZN13_IOServiceJob10superClassE +__ZN13_IOServiceJob8startJobEP9IOServiceim +__ZN13_IOServiceJob9MetaClassC1Ev +__ZN13_IOServiceJob9MetaClassC2Ev +__ZN13_IOServiceJob9metaClassE +__ZN13_IOServiceJobC1EPK11OSMetaClass +__ZN13_IOServiceJobC1Ev +__ZN13_IOServiceJobC2EPK11OSMetaClass +__ZN13_IOServiceJobC2Ev +__ZN13_IOServiceJobD0Ev +__ZN13_IOServiceJobD2Ev +__ZN14IOCommandQueue10gMetaClassE +__ZN14IOCommandQueue10superClassE +__ZN14IOCommandQueue12checkForWorkEv +__ZN14IOCommandQueue12commandQueueEP8OSObjectPFvS1_PvS2_S2_S2_Ei +__ZN14IOCommandQueue14enqueueCommandEbPvS0_S0_S0_ +__ZN14IOCommandQueue15performAndFlushEP8OSObjectPFvS1_PvS2_S2_S2_E +__ZN14IOCommandQueue4freeEv +__ZN14IOCommandQueue4initEP8OSObjectPFvS1_PvS2_S2_S2_Ei +__ZN14IOCommandQueue9MetaClassC1Ev +__ZN14IOCommandQueue9MetaClassC2Ev +__ZN14IOCommandQueue9metaClassE +__ZN14IOCommandQueueC1EPK11OSMetaClass +__ZN14IOCommandQueueC1Ev +__ZN14IOCommandQueueC2EPK11OSMetaClass +__ZN14IOCommandQueueC2Ev +__ZN14IOCommandQueueD0Ev +__ZN14IOCommandQueueD2Ev +__ZN14IODeviceMemory12withSubRangeEPS_mm +__ZN14IODeviceMemory13arrayFromListEPNS_11InitElementEm +__ZN14IODeviceMemory9withRangeEmm +__ZN14IOMemoryCursor10gMetaClassE +__ZN14IOMemoryCursor10superClassE +__ZN14IOMemoryCursor17withSpecificationEPFvNS_15PhysicalSegmentEPvmEmmm +__ZN14IOMemoryCursor19genPhysicalSegmentsEP18IOMemoryDescriptormPvmmPm +__ZN14IOMemoryCursor21initWithSpecificationEPFvNS_15PhysicalSegmentEPvmEmmm +__ZN14IOMemoryCursor9MetaClassC1Ev +__ZN14IOMemoryCursor9MetaClassC2Ev +__ZN14IOMemoryCursor9metaClassE +__ZN14IOMemoryCursorC1EPK11OSMetaClass +__ZN14IOMemoryCursorC1Ev +__ZN14IOMemoryCursorC2EPK11OSMetaClass +__ZN14IOMemoryCursorC2Ev +__ZN14IOMemoryCursorD0Ev +__ZN14IOMemoryCursorD2Ev +__ZN14IOPMrootDomain10gMetaClassE +__ZN14IOPMrootDomain10superClassE +__ZN14IOPMrootDomain10youAreRootEv +__ZN14IOPMrootDomain11sleepSystemEv +__ZN14IOPMrootDomain12broadcast_itEmm +__ZN14IOPMrootDomain12tellChangeUpEm +__ZN14IOPMrootDomain12unIdleDeviceEP9IOServicem +__ZN14IOPMrootDomain12wakeFromDozeEv +__ZN14IOPMrootDomain13askChangeDownEm +__ZN14IOPMrootDomain13restartSystemEv +__ZN14IOPMrootDomain13setPropertiesEP8OSObject +__ZN14IOPMrootDomain14publishFeatureEPKc +__ZN14IOPMrootDomain14shutdownSystemEv +__ZN14IOPMrootDomain14tellChangeDownEm +__ZN14IOPMrootDomain15powerChangeDoneEm +__ZN14IOPMrootDomain15reportUserInputEv +__ZN14IOPMrootDomain16adjustPowerStateEv +__ZN14IOPMrootDomain16command_receivedEPvS0_S0_S0_ +__ZN14IOPMrootDomain16tellNoChangeDownEm +__ZN14IOPMrootDomain17getSleepSupportedEv +__ZN14IOPMrootDomain17setAggressivenessEmm +__ZN14IOPMrootDomain17setSleepSupportedEm +__ZN14IOPMrootDomain18changePowerStateToEm +__ZN14IOPMrootDomain19sysPowerDownHandlerEPvS0_mP9IOServiceS0_j +__ZN14IOPMrootDomain22changePowerStateToPrivEm +__ZN14IOPMrootDomain23requestPowerDomainStateEmP17IOPowerConnectionm +__ZN14IOPMrootDomain23setQuickSpinDownTimeoutEv +__ZN14IOPMrootDomain24displayWranglerPublishedEPvS0_P9IOService +__ZN14IOPMrootDomain24receivePowerNotificationEm +__ZN14IOPMrootDomain25announcePowerSourceChangeEv +__ZN14IOPMrootDomain26handleSleepTimerExpirationEv +__ZN14IOPMrootDomain26restoreUserSpinDownTimeoutEv +__ZN14IOPMrootDomain27displayWranglerNotificationEPvS0_mP9IOServiceS0_j +__ZN14IOPMrootDomain39stopIgnoringClamshellEventsDuringWakeupEv +__ZN14IOPMrootDomain5startEP9IOService +__ZN14IOPMrootDomain9MetaClassC1Ev +__ZN14IOPMrootDomain9MetaClassC2Ev +__ZN14IOPMrootDomain9constructEv +__ZN14IOPMrootDomain9metaClassE +__ZN14IOPMrootDomainC1EPK11OSMetaClass +__ZN14IOPMrootDomainC1Ev +__ZN14IOPMrootDomainC2EPK11OSMetaClass +__ZN14IOPMrootDomainC2Ev +__ZN14IOPMrootDomainD0Ev +__ZN14IOPMrootDomainD2Ev +__ZN15IOConditionLock10gMetaClassE +__ZN15IOConditionLock10superClassE +__ZN15IOConditionLock10unlockWithEi +__ZN15IOConditionLock12setConditionEi +__ZN15IOConditionLock13withConditionEib +__ZN15IOConditionLock17initWithConditionEib +__ZN15IOConditionLock4freeEv +__ZN15IOConditionLock4lockEv +__ZN15IOConditionLock6unlockEv +__ZN15IOConditionLock7tryLockEv +__ZN15IOConditionLock8lockWhenEi +__ZN15IOConditionLock9MetaClassC1Ev +__ZN15IOConditionLock9MetaClassC2Ev +__ZN15IOConditionLock9metaClassE +__ZN15IOConditionLockC1EPK11OSMetaClass +__ZN15IOConditionLockC1Ev +__ZN15IOConditionLockC2EPK11OSMetaClass +__ZN15IOConditionLockC2Ev +__ZN15IOConditionLockD0Ev +__ZN15IOConditionLockD2Ev +__ZN15IOPMPowerSource10gMetaClassE +__ZN15IOPMPowerSource10isChargingEv +__ZN15IOPMPowerSource10superClassE +__ZN15IOPMPowerSource11acConnectedEv +__ZN15IOPMPowerSource11atWarnLevelEv +__ZN15IOPMPowerSource11curCapacityEv +__ZN15IOPMPowerSource11isInstalledEv +__ZN15IOPMPowerSource11maxCapacityEv +__ZN15IOPMPowerSource12currentDrawnEv +__ZN15IOPMPowerSource12updateStatusEv +__ZN15IOPMPowerSource13timeRemainingEv +__ZN15IOPMPowerSource24capacityPercentRemainingEv +__ZN15IOPMPowerSource4initEt +__ZN15IOPMPowerSource7voltageEv +__ZN15IOPMPowerSource8depletedEv +__ZN15IOPMPowerSource9MetaClassC1Ev +__ZN15IOPMPowerSource9MetaClassC2Ev +__ZN15IOPMPowerSource9metaClassE +__ZN15IOPMPowerSourceC1EPK11OSMetaClass +__ZN15IOPMPowerSourceC1Ev +__ZN15IOPMPowerSourceC2EPK11OSMetaClass +__ZN15IOPMPowerSourceC2Ev +__ZN15IOPMPowerSourceD0Ev +__ZN15IOPMPowerSourceD2Ev +__ZN15IOPanicPlatform10gMetaClassE +__ZN15IOPanicPlatform10superClassE +__ZN15IOPanicPlatform5startEP9IOService +__ZN15IOPanicPlatform9MetaClassC1Ev +__ZN15IOPanicPlatform9MetaClassC2Ev +__ZN15IOPanicPlatform9metaClassE +__ZN15IOPanicPlatformC1EPK11OSMetaClass +__ZN15IOPanicPlatformC1Ev +__ZN15IOPanicPlatformC2EPK11OSMetaClass +__ZN15IOPanicPlatformC2Ev +__ZN15IOPanicPlatformD0Ev +__ZN15IOPanicPlatformD2Ev +__ZN15IORegistryEntry10gMetaClassE +__ZN15IORegistryEntry10initializeEv +__ZN15IORegistryEntry10superClassE +__ZN15IORegistryEntry11dealiasPathEPPKcPK15IORegistryPlane +__ZN15IORegistryEntry11detachAboveEPK15IORegistryPlane +__ZN15IORegistryEntry11setLocationEPK8OSSymbolPK15IORegistryPlane +__ZN15IORegistryEntry11setLocationEPKcPK15IORegistryPlane +__ZN15IORegistryEntry11setPropertyEPK8OSStringP8OSObject +__ZN15IORegistryEntry11setPropertyEPK8OSSymbolP8OSObject +__ZN15IORegistryEntry11setPropertyEPKcP8OSObject +__ZN15IORegistryEntry11setPropertyEPKcPvj +__ZN15IORegistryEntry11setPropertyEPKcS1_ +__ZN15IORegistryEntry11setPropertyEPKcb +__ZN15IORegistryEntry11setPropertyEPKcyj +__ZN15IORegistryEntry13attachToChildEPS_PK15IORegistryPlane +__ZN15IORegistryEntry13childFromPathEPKcPK15IORegistryPlanePcPi +__ZN15IORegistryEntry13setPropertiesEP8OSObject +__ZN15IORegistryEntry14attachToParentEPS_PK15IORegistryPlane +__ZN15IORegistryEntry14removePropertyEPK8OSString +__ZN15IORegistryEntry14removePropertyEPK8OSSymbol +__ZN15IORegistryEntry14removePropertyEPKc +__ZN15IORegistryEntry15detachFromChildEPS_PK15IORegistryPlane +__ZN15IORegistryEntry15getRegistryRootEv +__ZN15IORegistryEntry16detachFromParentEPS_PK15IORegistryPlane +__ZN15IORegistryEntry16setPropertyTableEP12OSDictionary +__ZN15IORegistryEntry17matchPathLocationEPKcPK15IORegistryPlane +__ZN15IORegistryEntry18getGenerationCountEv +__ZN15IORegistryEntry21getChildFromComponentEPPKcPK15IORegistryPlane +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry5Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry6Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry7Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry8Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry9Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry10Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry11Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry12Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry13Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry14Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry15Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry16Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry17Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry18Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry19Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry20Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry21Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry22Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry23Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry24Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry25Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry26Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry27Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry28Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry29Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry30Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry31Ev +__ZN15IORegistryEntry4freeEv +__ZN15IORegistryEntry4initEP12OSDictionary +__ZN15IORegistryEntry4initEPS_PK15IORegistryPlane +__ZN15IORegistryEntry7setNameEPK8OSSymbolPK15IORegistryPlane +__ZN15IORegistryEntry7setNameEPKcPK15IORegistryPlane +__ZN15IORegistryEntry8fromPathEPKcPK15IORegistryPlanePcPiPS_ +__ZN15IORegistryEntry8getPlaneEPKc +__ZN15IORegistryEntry9MetaClassC1Ev +__ZN15IORegistryEntry9MetaClassC2Ev +__ZN15IORegistryEntry9detachAllEPK15IORegistryPlane +__ZN15IORegistryEntry9makePlaneEPKc +__ZN15IORegistryEntry9metaClassE +__ZN15IORegistryEntryC1EPK11OSMetaClass +__ZN15IORegistryEntryC1Ev +__ZN15IORegistryEntryC2EPK11OSMetaClass +__ZN15IORegistryEntryC2Ev +__ZN15IORegistryEntryD0Ev +__ZN15IORegistryEntryD2Ev +__ZN15IORegistryPlane10gMetaClassE +__ZN15IORegistryPlane10superClassE +__ZN15IORegistryPlane9MetaClassC1Ev +__ZN15IORegistryPlane9MetaClassC2Ev +__ZN15IORegistryPlane9metaClassE +__ZN15IORegistryPlaneC1EPK11OSMetaClass +__ZN15IORegistryPlaneC1Ev +__ZN15IORegistryPlaneC2EPK11OSMetaClass +__ZN15IORegistryPlaneC2Ev +__ZN15IORegistryPlaneD0Ev +__ZN15IORegistryPlaneD2Ev +__ZN15IOWatchDogTimer10gMetaClassE +__ZN15IOWatchDogTimer10superClassE +__ZN15IOWatchDogTimer13setPropertiesEP8OSObject +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer0Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer1Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer2Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer3Ev +__ZN15IOWatchDogTimer4stopEP9IOService +__ZN15IOWatchDogTimer5startEP9IOService +__ZN15IOWatchDogTimer9MetaClassC1Ev +__ZN15IOWatchDogTimer9MetaClassC2Ev +__ZN15IOWatchDogTimer9metaClassE +__ZN15IOWatchDogTimerC1EPK11OSMetaClass +__ZN15IOWatchDogTimerC2EPK11OSMetaClass +__ZN15IOWatchDogTimerD0Ev +__ZN15IOWatchDogTimerD2Ev +__ZN15_IOConfigThread10gMetaClassE +__ZN15_IOConfigThread10superClassE +__ZN15_IOConfigThread12configThreadEv +__ZN15_IOConfigThread4freeEv +__ZN15_IOConfigThread4mainEPS_ +__ZN15_IOConfigThread9MetaClassC1Ev +__ZN15_IOConfigThread9MetaClassC2Ev +__ZN15_IOConfigThread9metaClassE +__ZN15_IOConfigThreadC1EPK11OSMetaClass +__ZN15_IOConfigThreadC1Ev +__ZN15_IOConfigThreadC2EPK11OSMetaClass +__ZN15_IOConfigThreadC2Ev +__ZN15_IOConfigThreadD0Ev +__ZN15_IOConfigThreadD2Ev +__ZN16IOKitDiagnostics10gMetaClassE +__ZN16IOKitDiagnostics10superClassE +__ZN16IOKitDiagnostics11diagnosticsEv +__ZN16IOKitDiagnostics12updateOffsetEP12OSDictionarymPKc +__ZN16IOKitDiagnostics9MetaClassC1Ev +__ZN16IOKitDiagnostics9MetaClassC2Ev +__ZN16IOKitDiagnostics9metaClassE +__ZN16IOKitDiagnosticsC1EPK11OSMetaClass +__ZN16IOKitDiagnosticsC1Ev +__ZN16IOKitDiagnosticsC2EPK11OSMetaClass +__ZN16IOKitDiagnosticsC2Ev +__ZN16IOKitDiagnosticsD0Ev +__ZN16IOKitDiagnosticsD2Ev +__ZN16IOPMPagingPlexus10gMetaClassE +__ZN16IOPMPagingPlexus10superClassE +__ZN16IOPMPagingPlexus12findProviderEP9IOService +__ZN16IOPMPagingPlexus15processChildrenEv +__ZN16IOPMPagingPlexus15processSiblingsEP9IOService +__ZN16IOPMPagingPlexus17setAggressivenessEmm +__ZN16IOPMPagingPlexus5startEP9IOService +__ZN16IOPMPagingPlexus9MetaClassC1Ev +__ZN16IOPMPagingPlexus9MetaClassC2Ev +__ZN16IOPMPagingPlexus9metaClassE +__ZN16IOPMPagingPlexusC1EPK11OSMetaClass +__ZN16IOPMPagingPlexusC1Ev +__ZN16IOPMPagingPlexusC2EPK11OSMetaClass +__ZN16IOPMPagingPlexusC2Ev +__ZN16IOPMPagingPlexusD0Ev +__ZN16IOPMPagingPlexusD2Ev +__ZN16IOPMinformeeList10gMetaClassE +__ZN16IOPMinformeeList10initializeEv +__ZN16IOPMinformeeList10nextInListEP12IOPMinformee +__ZN16IOPMinformeeList10superClassE +__ZN16IOPMinformeeList11firstInListEv +__ZN16IOPMinformeeList13numberOfItemsEv +__ZN16IOPMinformeeList14removeFromListEP9IOService +__ZN16IOPMinformeeList4freeEv +__ZN16IOPMinformeeList8findItemEP9IOService +__ZN16IOPMinformeeList9MetaClassC1Ev +__ZN16IOPMinformeeList9MetaClassC2Ev +__ZN16IOPMinformeeList9addToListEP12IOPMinformee +__ZN16IOPMinformeeList9metaClassE +__ZN16IOPMinformeeListC1EPK11OSMetaClass +__ZN16IOPMinformeeListC1Ev +__ZN16IOPMinformeeListC2EPK11OSMetaClass +__ZN16IOPMinformeeListC2Ev +__ZN16IOPMinformeeListD0Ev +__ZN16IOPMinformeeListD2Ev +__ZN16IOPlatformDevice10gMetaClassE +__ZN16IOPlatformDevice10superClassE +__ZN16IOPlatformDevice12getResourcesEv +__ZN16IOPlatformDevice13matchLocationEP9IOService +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice0Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice1Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice2Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice3Ev +__ZN16IOPlatformDevice9MetaClassC1Ev +__ZN16IOPlatformDevice9MetaClassC2Ev +__ZN16IOPlatformDevice9metaClassE +__ZN16IOPlatformDeviceC1EPK11OSMetaClass +__ZN16IOPlatformDeviceC1Ev +__ZN16IOPlatformDeviceC2EPK11OSMetaClass +__ZN16IOPlatformDeviceC2Ev +__ZN16IOPlatformDeviceD0Ev +__ZN16IOPlatformDeviceD2Ev +__ZN16IOPlatformExpert10gMetaClassE +__ZN16IOPlatformExpert10superClassE +__ZN16IOPlatformExpert11haltRestartEj +__ZN16IOPlatformExpert11sleepKernelEv +__ZN16IOPlatformExpert12CheckSubTreeEP7OSArrayP9IOServiceS3_P12OSDictionary +__ZN16IOPlatformExpert12getModelNameEPci +__ZN16IOPlatformExpert12hasPMFeatureEm +__ZN16IOPlatformExpert13savePanicInfoEPhm +__ZN16IOPlatformExpert14getBootROMTypeEv +__ZN16IOPlatformExpert14getChipSetTypeEv +__ZN16IOPlatformExpert14getConsoleInfoEP8PE_Video +__ZN16IOPlatformExpert14getMachineNameEPci +__ZN16IOPlatformExpert14getMachineTypeEv +__ZN16IOPlatformExpert14setBootROMTypeEl +__ZN16IOPlatformExpert14setChipSetTypeEl +__ZN16IOPlatformExpert14setConsoleInfoEP8PE_Videoj +__ZN16IOPlatformExpert14setMachineTypeEl +__ZN16IOPlatformExpert15getGMTTimeOfDayEv +__ZN16IOPlatformExpert15getNubResourcesEP9IOService +__ZN16IOPlatformExpert15setGMTTimeOfDayEl +__ZN16IOPlatformExpert16PMRegisterDeviceEP9IOServiceS1_ +__ZN16IOPlatformExpert16atInterruptLevelEv +__ZN16IOPlatformExpert16hasPrivPMFeatureEm +__ZN16IOPlatformExpert20callPlatformFunctionEPK8OSSymbolbPvS3_S3_S3_ +__ZN16IOPlatformExpert21RegisterServiceInTreeEP9IOServiceP12OSDictionaryS3_S1_ +__ZN16IOPlatformExpert21numBatteriesSupportedEv +__ZN16IOPlatformExpert21platformAdjustServiceEP9IOService +__ZN16IOPlatformExpert23registerNVRAMControllerEP17IONVRAMController +__ZN16IOPlatformExpert25PMInstantiatePowerDomainsEv +__ZN16IOPlatformExpert25getPhysicalRangeAllocatorEv +__ZN16IOPlatformExpert25lookUpInterruptControllerEP8OSSymbol +__ZN16IOPlatformExpert25setCPUInterruptPropertiesEP9IOService +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert2Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert3Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert4Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert5Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert6Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert7Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert8Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert9Ev +__ZN16IOPlatformExpert27_RESERVEDIOPlatformExpert10Ev +__ZN16IOPlatformExpert27_RESERVEDIOPlatformExpert11Ev +__ZN16IOPlatformExpert27registerInterruptControllerEP8OSSymbolP21IOInterruptController +__ZN16IOPlatformExpert30createSystemSerialNumberStringEP6OSData +__ZN16IOPlatformExpert5PMLogEPKcmmm +__ZN16IOPlatformExpert5startEP9IOService +__ZN16IOPlatformExpert6attachEP9IOService +__ZN16IOPlatformExpert9MetaClassC1Ev +__ZN16IOPlatformExpert9MetaClassC2Ev +__ZN16IOPlatformExpert9configureEP9IOService +__ZN16IOPlatformExpert9createNubEP12OSDictionary +__ZN16IOPlatformExpert9metaClassE +__ZN16IOPlatformExpertC1EPK11OSMetaClass +__ZN16IOPlatformExpertC1Ev +__ZN16IOPlatformExpertC2EPK11OSMetaClass +__ZN16IOPlatformExpertC2Ev +__ZN16IOPlatformExpertD0Ev +__ZN16IOPlatformExpertD2Ev +__ZN16IORangeAllocator10deallocateEmm +__ZN16IORangeAllocator10gMetaClassE +__ZN16IORangeAllocator10superClassE +__ZN16IORangeAllocator12allocElementEm +__ZN16IORangeAllocator12getFreeCountEv +__ZN16IORangeAllocator13allocateRangeEmm +__ZN16IORangeAllocator14deallocElementEm +__ZN16IORangeAllocator16getFragmentCountEv +__ZN16IORangeAllocator19getFragmentCapacityEv +__ZN16IORangeAllocator28setFragmentCapacityIncrementEm +__ZN16IORangeAllocator4freeEv +__ZN16IORangeAllocator4initEmmmm +__ZN16IORangeAllocator8allocateEmPmm +__ZN16IORangeAllocator9MetaClassC1Ev +__ZN16IORangeAllocator9MetaClassC2Ev +__ZN16IORangeAllocator9metaClassE +__ZN16IORangeAllocator9withRangeEmmmm +__ZN16IORangeAllocatorC1EPK11OSMetaClass +__ZN16IORangeAllocatorC1Ev +__ZN16IORangeAllocatorC2EPK11OSMetaClass +__ZN16IORangeAllocatorC2Ev +__ZN16IORangeAllocatorD0Ev +__ZN16IORangeAllocatorD2Ev +__ZN17IOBigMemoryCursor10gMetaClassE +__ZN17IOBigMemoryCursor10superClassE +__ZN17IOBigMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvm +__ZN17IOBigMemoryCursor17withSpecificationEmmm +__ZN17IOBigMemoryCursor21initWithSpecificationEmmm +__ZN17IOBigMemoryCursor9MetaClassC1Ev +__ZN17IOBigMemoryCursor9MetaClassC2Ev +__ZN17IOBigMemoryCursor9metaClassE +__ZN17IOBigMemoryCursorC1EPK11OSMetaClass +__ZN17IOBigMemoryCursorC1Ev +__ZN17IOBigMemoryCursorC2EPK11OSMetaClass +__ZN17IOBigMemoryCursorC2Ev +__ZN17IOBigMemoryCursorD0Ev +__ZN17IOBigMemoryCursorD2Ev +__ZN17IOPowerConnection10gMetaClassE +__ZN17IOPowerConnection10superClassE +__ZN17IOPowerConnection14getAwaitingAckEv +__ZN17IOPowerConnection14setAwaitingAckEb +__ZN17IOPowerConnection16parentKnowsStateEv +__ZN17IOPowerConnection19setParentKnowsStateEb +__ZN17IOPowerConnection21getDesiredDomainStateEv +__ZN17IOPowerConnection21setDesiredDomainStateEm +__ZN17IOPowerConnection22childHasRequestedPowerEv +__ZN17IOPowerConnection23getPreventIdleSleepFlagEv +__ZN17IOPowerConnection23parentCurrentPowerFlagsEv +__ZN17IOPowerConnection23setPreventIdleSleepFlagEm +__ZN17IOPowerConnection25getPreventSystemSleepFlagEv +__ZN17IOPowerConnection25setChildHasRequestedPowerEv +__ZN17IOPowerConnection25setPreventSystemSleepFlagEm +__ZN17IOPowerConnection26setParentCurrentPowerFlagsEm +__ZN17IOPowerConnection9MetaClassC1Ev +__ZN17IOPowerConnection9MetaClassC2Ev +__ZN17IOPowerConnection9metaClassE +__ZN17IOPowerConnectionC1EPK11OSMetaClass +__ZN17IOPowerConnectionC1Ev +__ZN17IOPowerConnectionC2EPK11OSMetaClass +__ZN17IOPowerConnectionC2Ev +__ZN17IOPowerConnectionD0Ev +__ZN17IOPowerConnectionD2Ev +__ZN18IODTPlatformExpert10createNubsEP9IOServiceP10OSIterator +__ZN18IODTPlatformExpert10gMetaClassE +__ZN18IODTPlatformExpert10superClassE +__ZN18IODTPlatformExpert10writeXPRAMEmPhm +__ZN18IODTPlatformExpert11haltRestartEj +__ZN18IODTPlatformExpert12getModelNameEPci +__ZN18IODTPlatformExpert13savePanicInfoEPhm +__ZN18IODTPlatformExpert14getMachineNameEPci +__ZN18IODTPlatformExpert15getNubResourcesEP9IOService +__ZN18IODTPlatformExpert15processTopLevelEP15IORegistryEntry +__ZN18IODTPlatformExpert17readNVRAMPropertyEP15IORegistryEntryPPK8OSSymbolPP6OSData +__ZN18IODTPlatformExpert18getNVRAMPartitionsEv +__ZN18IODTPlatformExpert18readNVRAMPartitionEPK8OSSymbolmPhm +__ZN18IODTPlatformExpert18writeNVRAMPropertyEP15IORegistryEntryPK8OSSymbolP6OSData +__ZN18IODTPlatformExpert19writeNVRAMPartitionEPK8OSSymbolmPhm +__ZN18IODTPlatformExpert23registerNVRAMControllerEP17IONVRAMController +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert0Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert1Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert2Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert3Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert4Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert5Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert6Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert7Ev +__ZN18IODTPlatformExpert30createSystemSerialNumberStringEP6OSData +__ZN18IODTPlatformExpert5probeEP9IOServicePl +__ZN18IODTPlatformExpert9MetaClassC1Ev +__ZN18IODTPlatformExpert9MetaClassC2Ev +__ZN18IODTPlatformExpert9configureEP9IOService +__ZN18IODTPlatformExpert9createNubEP15IORegistryEntry +__ZN18IODTPlatformExpert9metaClassE +__ZN18IODTPlatformExpert9readXPRAMEmPhm +__ZN18IODTPlatformExpertC1EPK11OSMetaClass +__ZN18IODTPlatformExpertC2EPK11OSMetaClass +__ZN18IODTPlatformExpertD0Ev +__ZN18IODTPlatformExpertD2Ev +__ZN18IOMemoryDescriptor10addMappingEP11IOMemoryMap +__ZN18IOMemoryDescriptor10gMetaClassE +__ZN18IOMemoryDescriptor10initializeEv +__ZN18IOMemoryDescriptor10setMappingEP4taskjm +__ZN18IOMemoryDescriptor10superClassE +__ZN18IOMemoryDescriptor10withRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN18IOMemoryDescriptor10writeBytesEmPKvm +__ZN18IOMemoryDescriptor11handleFaultEPvP6vm_mapjmmm +__ZN18IOMemoryDescriptor11makeMappingEPS_P4taskjmmm +__ZN18IOMemoryDescriptor11withAddressEPvm11IODirection +__ZN18IOMemoryDescriptor11withAddressEjm11IODirectionP4task +__ZN18IOMemoryDescriptor11withOptionsEPvmmP4taskmP8IOMapper +__ZN18IOMemoryDescriptor12withSubRangeEPS_mm11IODirection +__ZN18IOMemoryDescriptor13removeMappingEP11IOMemoryMap +__ZN18IOMemoryDescriptor15initWithOptionsEPvmmP4taskmP8IOMapper +__ZN18IOMemoryDescriptor16getSourceSegmentEmPm +__ZN18IOMemoryDescriptor18getPhysicalAddressEv +__ZN18IOMemoryDescriptor18withPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN18IOMemoryDescriptor19withPhysicalAddressEmm11IODirection +__ZN18IOMemoryDescriptor20getPhysicalSegment64EmPm +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor3Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor4Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor5Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor6Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor7Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor8Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor9Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor10Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor11Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor12Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor13Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor14Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor15Ev +__ZN18IOMemoryDescriptor3mapEP4taskjmmm +__ZN18IOMemoryDescriptor3mapEm +__ZN18IOMemoryDescriptor4freeEv +__ZN18IOMemoryDescriptor5doMapEP6vm_mapPjmmm +__ZN18IOMemoryDescriptor6getTagEv +__ZN18IOMemoryDescriptor6setTagEm +__ZN18IOMemoryDescriptor7doUnmapEP6vm_mapjm +__ZN18IOMemoryDescriptor8redirectEP4taskb +__ZN18IOMemoryDescriptor9MetaClassC1Ev +__ZN18IOMemoryDescriptor9MetaClassC2Ev +__ZN18IOMemoryDescriptor9metaClassE +__ZN18IOMemoryDescriptor9readBytesEmPvm +__ZN18IOMemoryDescriptorC1EPK11OSMetaClass +__ZN18IOMemoryDescriptorC2EPK11OSMetaClass +__ZN18IOMemoryDescriptorD0Ev +__ZN18IOMemoryDescriptorD2Ev +__ZN18IOPMchangeNoteList10gMetaClassE +__ZN18IOPMchangeNoteList10initializeEv +__ZN18IOPMchangeNoteList10superClassE +__ZN18IOPMchangeNoteList12latestChangeEv +__ZN18IOPMchangeNoteList13currentChangeEv +__ZN18IOPMchangeNoteList14nextChangeNoteEm +__ZN18IOPMchangeNoteList15changeNoteInUseEm +__ZN18IOPMchangeNoteList16createChangeNoteEv +__ZN18IOPMchangeNoteList18previousChangeNoteEm +__ZN18IOPMchangeNoteList21releaseHeadChangeNoteEv +__ZN18IOPMchangeNoteList21releaseTailChangeNoteEv +__ZN18IOPMchangeNoteList9MetaClassC1Ev +__ZN18IOPMchangeNoteList9MetaClassC2Ev +__ZN18IOPMchangeNoteList9decrementEm +__ZN18IOPMchangeNoteList9incrementEm +__ZN18IOPMchangeNoteList9listEmptyEv +__ZN18IOPMchangeNoteList9metaClassE +__ZN18IOPMchangeNoteListC1EPK11OSMetaClass +__ZN18IOPMchangeNoteListC1Ev +__ZN18IOPMchangeNoteListC2EPK11OSMetaClass +__ZN18IOPMchangeNoteListC2Ev +__ZN18IOPMchangeNoteListD0Ev +__ZN18IOPMchangeNoteListD2Ev +__ZN18IORegistryIterator10enterEntryEPK15IORegistryPlane +__ZN18IORegistryIterator10enterEntryEv +__ZN18IORegistryIterator10gMetaClassE +__ZN18IORegistryIterator10iterateAllEv +__ZN18IORegistryIterator10superClassE +__ZN18IORegistryIterator11iterateOverEP15IORegistryEntryPK15IORegistryPlanem +__ZN18IORegistryIterator11iterateOverEPK15IORegistryPlanem +__ZN18IORegistryIterator13getNextObjectEv +__ZN18IORegistryIterator15getCurrentEntryEv +__ZN18IORegistryIterator17getNextObjectFlatEv +__ZN18IORegistryIterator22getNextObjectRecursiveEv +__ZN18IORegistryIterator4freeEv +__ZN18IORegistryIterator5resetEv +__ZN18IORegistryIterator7isValidEv +__ZN18IORegistryIterator9MetaClassC1Ev +__ZN18IORegistryIterator9MetaClassC2Ev +__ZN18IORegistryIterator9exitEntryEv +__ZN18IORegistryIterator9metaClassE +__ZN18IORegistryIteratorC1EPK11OSMetaClass +__ZN18IORegistryIteratorC1Ev +__ZN18IORegistryIteratorC2EPK11OSMetaClass +__ZN18IORegistryIteratorC2Ev +__ZN18IORegistryIteratorD0Ev +__ZN18IORegistryIteratorD2Ev +__ZN18IOTimerEventSource10gMetaClassE +__ZN18IOTimerEventSource10setTimeoutE12UnsignedWide +__ZN18IOTimerEventSource10setTimeoutE13mach_timespec +__ZN18IOTimerEventSource10setTimeoutEmm +__ZN18IOTimerEventSource10superClassE +__ZN18IOTimerEventSource10wakeAtTimeE12UnsignedWide +__ZN18IOTimerEventSource10wakeAtTimeE13mach_timespec +__ZN18IOTimerEventSource10wakeAtTimeEmm +__ZN18IOTimerEventSource12checkForWorkEv +__ZN18IOTimerEventSource12setTimeoutMSEm +__ZN18IOTimerEventSource12setTimeoutUSEm +__ZN18IOTimerEventSource12wakeAtTimeMSEm +__ZN18IOTimerEventSource12wakeAtTimeUSEm +__ZN18IOTimerEventSource13cancelTimeoutEv +__ZN18IOTimerEventSource14setTimeoutFuncEv +__ZN18IOTimerEventSource15setTimeoutTicksEm +__ZN18IOTimerEventSource15wakeAtTimeTicksEm +__ZN18IOTimerEventSource16timerEventSourceEP8OSObjectPFvS1_PS_E +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource0Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource1Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource2Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource3Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource4Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource5Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource6Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource7Ev +__ZN18IOTimerEventSource4freeEv +__ZN18IOTimerEventSource4initEP8OSObjectPFvS1_PS_E +__ZN18IOTimerEventSource6enableEv +__ZN18IOTimerEventSource7disableEv +__ZN18IOTimerEventSource7timeoutEPv +__ZN18IOTimerEventSource9MetaClassC1Ev +__ZN18IOTimerEventSource9MetaClassC2Ev +__ZN18IOTimerEventSource9metaClassE +__ZN18IOTimerEventSourceC1EPK11OSMetaClass +__ZN18IOTimerEventSourceC1Ev +__ZN18IOTimerEventSourceC2EPK11OSMetaClass +__ZN18IOTimerEventSourceC2Ev +__ZN18IOTimerEventSourceD0Ev +__ZN18IOTimerEventSourceD2Ev +__ZN18IOUserNotification10gMetaClassE +__ZN18IOUserNotification10superClassE +__ZN18IOUserNotification15setNotificationEP10IONotifier +__ZN18IOUserNotification4freeEv +__ZN18IOUserNotification4initEv +__ZN18IOUserNotification5resetEv +__ZN18IOUserNotification7isValidEv +__ZN18IOUserNotification9MetaClassC1Ev +__ZN18IOUserNotification9MetaClassC2Ev +__ZN18IOUserNotification9metaClassE +__ZN18IOUserNotificationC1EPK11OSMetaClass +__ZN18IOUserNotificationC2EPK11OSMetaClass +__ZN18IOUserNotificationD0Ev +__ZN18IOUserNotificationD2Ev +__ZN18_IOServiceNotifier10gMetaClassE +__ZN18_IOServiceNotifier10superClassE +__ZN18_IOServiceNotifier4freeEv +__ZN18_IOServiceNotifier4waitEv +__ZN18_IOServiceNotifier6enableEb +__ZN18_IOServiceNotifier6removeEv +__ZN18_IOServiceNotifier7disableEv +__ZN18_IOServiceNotifier9MetaClassC1Ev +__ZN18_IOServiceNotifier9MetaClassC2Ev +__ZN18_IOServiceNotifier9metaClassE +__ZN18_IOServiceNotifierC1EPK11OSMetaClass +__ZN18_IOServiceNotifierC1Ev +__ZN18_IOServiceNotifierC2EPK11OSMetaClass +__ZN18_IOServiceNotifierC2Ev +__ZN18_IOServiceNotifierD0Ev +__ZN18_IOServiceNotifierD2Ev +__ZN19IOPMPowerSourceList10gMetaClassE +__ZN19IOPMPowerSourceList10initializeEv +__ZN19IOPMPowerSourceList10nextInListEP15IOPMPowerSource +__ZN19IOPMPowerSourceList10superClassE +__ZN19IOPMPowerSourceList11firstInListEv +__ZN19IOPMPowerSourceList13numberOfItemsEv +__ZN19IOPMPowerSourceList14removeFromListEP15IOPMPowerSource +__ZN19IOPMPowerSourceList4freeEv +__ZN19IOPMPowerSourceList9MetaClassC1Ev +__ZN19IOPMPowerSourceList9MetaClassC2Ev +__ZN19IOPMPowerSourceList9addToListEP15IOPMPowerSource +__ZN19IOPMPowerSourceList9metaClassE +__ZN19IOPMPowerSourceListC1EPK11OSMetaClass +__ZN19IOPMPowerSourceListC1Ev +__ZN19IOPMPowerSourceListC2EPK11OSMetaClass +__ZN19IOPMPowerSourceListC2Ev +__ZN19IOPMPowerSourceListD0Ev +__ZN19IOPMPowerSourceListD2Ev +__ZN19IOPMPowerStateQueue10gMetaClassE +__ZN19IOPMPowerStateQueue10superClassE +__ZN19IOPMPowerStateQueue12checkForWorkEv +__ZN19IOPMPowerStateQueue14unIdleOccurredEP9IOServicem +__ZN19IOPMPowerStateQueue17PMPowerStateQueueEP8OSObject +__ZN19IOPMPowerStateQueue4initEP8OSObjectPFvS1_zE +__ZN19IOPMPowerStateQueue9MetaClassC1Ev +__ZN19IOPMPowerStateQueue9MetaClassC2Ev +__ZN19IOPMPowerStateQueue9metaClassE +__ZN19IOPMPowerStateQueueC1EPK11OSMetaClass +__ZN19IOPMPowerStateQueueC1Ev +__ZN19IOPMPowerStateQueueC2EPK11OSMetaClass +__ZN19IOPMPowerStateQueueC2Ev +__ZN19IOPMPowerStateQueueD0Ev +__ZN19IOPMPowerStateQueueD2Ev +__ZN20IOLittleMemoryCursor10gMetaClassE +__ZN20IOLittleMemoryCursor10superClassE +__ZN20IOLittleMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvm +__ZN20IOLittleMemoryCursor17withSpecificationEmmm +__ZN20IOLittleMemoryCursor21initWithSpecificationEmmm +__ZN20IOLittleMemoryCursor9MetaClassC1Ev +__ZN20IOLittleMemoryCursor9MetaClassC2Ev +__ZN20IOLittleMemoryCursor9metaClassE +__ZN20IOLittleMemoryCursorC1EPK11OSMetaClass +__ZN20IOLittleMemoryCursorC1Ev +__ZN20IOLittleMemoryCursorC2EPK11OSMetaClass +__ZN20IOLittleMemoryCursorC2Ev +__ZN20IOLittleMemoryCursorD0Ev +__ZN20IOLittleMemoryCursorD2Ev +__ZN20RootDomainUserClient10gMetaClassE +__ZN20RootDomainUserClient10superClassE +__ZN20RootDomainUserClient11clientCloseEv +__ZN20RootDomainUserClient15setPreventativeEmm +__ZN20RootDomainUserClient26getTargetAndMethodForIndexEPP9IOServicem +__ZN20RootDomainUserClient5startEP9IOService +__ZN20RootDomainUserClient9MetaClassC1Ev +__ZN20RootDomainUserClient9MetaClassC2Ev +__ZN20RootDomainUserClient9metaClassE +__ZN20RootDomainUserClientC1EPK11OSMetaClass +__ZN20RootDomainUserClientC1Ev +__ZN20RootDomainUserClientC2EPK11OSMetaClass +__ZN20RootDomainUserClientC2Ev +__ZN20RootDomainUserClientD0Ev +__ZN20RootDomainUserClientD2Ev +__ZN21IOInterruptController10gMetaClassE +__ZN21IOInterruptController10initVectorElP17IOInterruptVector +__ZN21IOInterruptController10superClassE +__ZN21IOInterruptController11causeVectorElP17IOInterruptVector +__ZN21IOInterruptController12enableVectorElP17IOInterruptVector +__ZN21IOInterruptController13getVectorTypeElP17IOInterruptVector +__ZN21IOInterruptController14causeInterruptEP9IOServicei +__ZN21IOInterruptController15enableInterruptEP9IOServicei +__ZN21IOInterruptController15handleInterruptEPvP9IOServicei +__ZN21IOInterruptController16disableInterruptEP9IOServicei +__ZN21IOInterruptController16getInterruptTypeEP9IOServiceiPi +__ZN21IOInterruptController17disableVectorHardElP17IOInterruptVector +__ZN21IOInterruptController17registerInterruptEP9IOServiceiPvPFvS2_S2_S2_iES2_ +__ZN21IOInterruptController17vectorCanBeSharedElP17IOInterruptVector +__ZN21IOInterruptController19unregisterInterruptEP9IOServicei +__ZN21IOInterruptController26getInterruptHandlerAddressEv +__ZN21IOInterruptController31_RESERVEDIOInterruptController0Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController1Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController2Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController3Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController4Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController5Ev +__ZN21IOInterruptController9MetaClassC1Ev +__ZN21IOInterruptController9MetaClassC2Ev +__ZN21IOInterruptController9metaClassE +__ZN21IOInterruptControllerC1EPK11OSMetaClass +__ZN21IOInterruptControllerC2EPK11OSMetaClass +__ZN21IOInterruptControllerD0Ev +__ZN21IOInterruptControllerD2Ev +__ZN21IONaturalMemoryCursor10gMetaClassE +__ZN21IONaturalMemoryCursor10superClassE +__ZN21IONaturalMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvm +__ZN21IONaturalMemoryCursor17withSpecificationEmmm +__ZN21IONaturalMemoryCursor21initWithSpecificationEmmm +__ZN21IONaturalMemoryCursor9MetaClassC1Ev +__ZN21IONaturalMemoryCursor9MetaClassC2Ev +__ZN21IONaturalMemoryCursor9metaClassE +__ZN21IONaturalMemoryCursorC1EPK11OSMetaClass +__ZN21IONaturalMemoryCursorC1Ev +__ZN21IONaturalMemoryCursorC2EPK11OSMetaClass +__ZN21IONaturalMemoryCursorC2Ev +__ZN21IONaturalMemoryCursorD0Ev +__ZN21IONaturalMemoryCursorD2Ev +__ZN21IOSubMemoryDescriptor10gMetaClassE +__ZN21IOSubMemoryDescriptor10superClassE +__ZN21IOSubMemoryDescriptor10writeBytesEmPKvm +__ZN21IOSubMemoryDescriptor11makeMappingEP18IOMemoryDescriptorP4taskjmmm +__ZN21IOSubMemoryDescriptor12initSubRangeEP18IOMemoryDescriptormm11IODirection +__ZN21IOSubMemoryDescriptor14initWithRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN21IOSubMemoryDescriptor15initWithAddressEPvm11IODirection +__ZN21IOSubMemoryDescriptor15initWithAddressEjm11IODirectionP4task +__ZN21IOSubMemoryDescriptor16getSourceSegmentEmPm +__ZN21IOSubMemoryDescriptor17getVirtualSegmentEmPm +__ZN21IOSubMemoryDescriptor18getPhysicalSegmentEmPm +__ZN21IOSubMemoryDescriptor22initWithPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN21IOSubMemoryDescriptor23initWithPhysicalAddressEmm11IODirection +__ZN21IOSubMemoryDescriptor4freeEv +__ZN21IOSubMemoryDescriptor7prepareE11IODirection +__ZN21IOSubMemoryDescriptor8completeE11IODirection +__ZN21IOSubMemoryDescriptor8redirectEP4taskb +__ZN21IOSubMemoryDescriptor9MetaClassC1Ev +__ZN21IOSubMemoryDescriptor9MetaClassC2Ev +__ZN21IOSubMemoryDescriptor9metaClassE +__ZN21IOSubMemoryDescriptor9readBytesEmPvm +__ZN21IOSubMemoryDescriptorC1EPK11OSMetaClass +__ZN21IOSubMemoryDescriptorC1Ev +__ZN21IOSubMemoryDescriptorC2EPK11OSMetaClass +__ZN21IOSubMemoryDescriptorC2Ev +__ZN21IOSubMemoryDescriptorD0Ev +__ZN21IOSubMemoryDescriptorD2Ev +__ZN22IOInterruptEventSource10gMetaClassE +__ZN22IOInterruptEventSource10superClassE +__ZN22IOInterruptEventSource12checkForWorkEv +__ZN22IOInterruptEventSource17interruptOccurredEPvP9IOServicei +__ZN22IOInterruptEventSource20interruptEventSourceEP8OSObjectPFvS1_PS_iEP9IOServicei +__ZN22IOInterruptEventSource23normalInterruptOccurredEPvP9IOServicei +__ZN22IOInterruptEventSource24disableInterruptOccurredEPvP9IOServicei +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource0Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource1Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource2Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource3Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource4Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource5Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource6Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource7Ev +__ZN22IOInterruptEventSource4freeEv +__ZN22IOInterruptEventSource4initEP8OSObjectPFvS1_PS_iEP9IOServicei +__ZN22IOInterruptEventSource6enableEv +__ZN22IOInterruptEventSource7disableEv +__ZN22IOInterruptEventSource9MetaClassC1Ev +__ZN22IOInterruptEventSource9MetaClassC2Ev +__ZN22IOInterruptEventSource9metaClassE +__ZN22IOInterruptEventSourceC1EPK11OSMetaClass +__ZN22IOInterruptEventSourceC1Ev +__ZN22IOInterruptEventSourceC2EPK11OSMetaClass +__ZN22IOInterruptEventSourceC2Ev +__ZN22IOInterruptEventSourceD0Ev +__ZN22IOInterruptEventSourceD2Ev +__ZN22IOPlatformExpertDevice10gMetaClassE +__ZN22IOPlatformExpertDevice10superClassE +__ZN22IOPlatformExpertDevice12initWithArgsEPvS0_S0_S0_ +__ZN22IOPlatformExpertDevice32_RESERVEDIOPlatformExpertDevice0Ev +__ZN22IOPlatformExpertDevice32_RESERVEDIOPlatformExpertDevice1Ev +__ZN22IOPlatformExpertDevice32_RESERVEDIOPlatformExpertDevice2Ev +__ZN22IOPlatformExpertDevice32_RESERVEDIOPlatformExpertDevice3Ev +__ZN22IOPlatformExpertDevice4freeEv +__ZN22IOPlatformExpertDevice9MetaClassC1Ev +__ZN22IOPlatformExpertDevice9MetaClassC2Ev +__ZN22IOPlatformExpertDevice9metaClassE +__ZN22IOPlatformExpertDeviceC1EPK11OSMetaClass +__ZN22IOPlatformExpertDeviceC1Ev +__ZN22IOPlatformExpertDeviceC2EPK11OSMetaClass +__ZN22IOPlatformExpertDeviceC2Ev +__ZN22IOPlatformExpertDeviceD0Ev +__ZN22IOPlatformExpertDeviceD2Ev +__ZN22_IOOpenServiceIterator10gMetaClassE +__ZN22_IOOpenServiceIterator10superClassE +__ZN22_IOOpenServiceIterator13getNextObjectEv +__ZN22_IOOpenServiceIterator4freeEv +__ZN22_IOOpenServiceIterator5resetEv +__ZN22_IOOpenServiceIterator7isValidEv +__ZN22_IOOpenServiceIterator8iteratorEP10OSIteratorPK9IOServiceS4_ +__ZN22_IOOpenServiceIterator9MetaClassC1Ev +__ZN22_IOOpenServiceIterator9MetaClassC2Ev +__ZN22_IOOpenServiceIterator9metaClassE +__ZN22_IOOpenServiceIteratorC1EPK11OSMetaClass +__ZN22_IOOpenServiceIteratorC1Ev +__ZN22_IOOpenServiceIteratorC2EPK11OSMetaClass +__ZN22_IOOpenServiceIteratorC2Ev +__ZN22_IOOpenServiceIteratorD0Ev +__ZN22_IOOpenServiceIteratorD2Ev +__ZN23IOMultiMemoryDescriptor10gMetaClassE +__ZN23IOMultiMemoryDescriptor10superClassE +__ZN23IOMultiMemoryDescriptor10writeBytesEmPKvm +__ZN23IOMultiMemoryDescriptor14initWithRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN23IOMultiMemoryDescriptor15initWithAddressEPvm11IODirection +__ZN23IOMultiMemoryDescriptor15initWithAddressEjm11IODirectionP4task +__ZN23IOMultiMemoryDescriptor15withDescriptorsEPP18IOMemoryDescriptorm11IODirectionb +__ZN23IOMultiMemoryDescriptor16getSourceSegmentEmPm +__ZN23IOMultiMemoryDescriptor17getVirtualSegmentEmPm +__ZN23IOMultiMemoryDescriptor18getPhysicalSegmentEmPm +__ZN23IOMultiMemoryDescriptor19initWithDescriptorsEPP18IOMemoryDescriptorm11IODirectionb +__ZN23IOMultiMemoryDescriptor22initWithPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN23IOMultiMemoryDescriptor23initWithPhysicalAddressEmm11IODirection +__ZN23IOMultiMemoryDescriptor4freeEv +__ZN23IOMultiMemoryDescriptor7prepareE11IODirection +__ZN23IOMultiMemoryDescriptor8completeE11IODirection +__ZN23IOMultiMemoryDescriptor9MetaClassC1Ev +__ZN23IOMultiMemoryDescriptor9MetaClassC2Ev +__ZN23IOMultiMemoryDescriptor9metaClassE +__ZN23IOMultiMemoryDescriptor9readBytesEmPvm +__ZN23IOMultiMemoryDescriptorC1EPK11OSMetaClass +__ZN23IOMultiMemoryDescriptorC1Ev +__ZN23IOMultiMemoryDescriptorC2EPK11OSMetaClass +__ZN23IOMultiMemoryDescriptorC2Ev +__ZN23IOMultiMemoryDescriptorD0Ev +__ZN23IOMultiMemoryDescriptorD2Ev +__ZN24IOBufferMemoryDescriptor10gMetaClassE +__ZN24IOBufferMemoryDescriptor10superClassE +__ZN24IOBufferMemoryDescriptor11appendBytesEPKvj +__ZN24IOBufferMemoryDescriptor11withOptionsEmjj +__ZN24IOBufferMemoryDescriptor12setDirectionE11IODirection +__ZN24IOBufferMemoryDescriptor12withCapacityEj11IODirectionb +__ZN24IOBufferMemoryDescriptor13initWithBytesEPKvj11IODirectionb +__ZN24IOBufferMemoryDescriptor14getBytesNoCopyEjj +__ZN24IOBufferMemoryDescriptor14getBytesNoCopyEv +__ZN24IOBufferMemoryDescriptor14initWithRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN24IOBufferMemoryDescriptor15initWithAddressEPvm11IODirection +__ZN24IOBufferMemoryDescriptor15initWithAddressEjm11IODirectionP4task +__ZN24IOBufferMemoryDescriptor15initWithOptionsEmjj +__ZN24IOBufferMemoryDescriptor15initWithOptionsEmjjP4task +__ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskmjj +__ZN24IOBufferMemoryDescriptor22initWithPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN24IOBufferMemoryDescriptor23initWithPhysicalAddressEmm11IODirection +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor1Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor2Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor3Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor4Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor5Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor6Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor7Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor8Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor9Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor10Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor11Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor12Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor13Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor14Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor15Ev +__ZN24IOBufferMemoryDescriptor4freeEv +__ZN24IOBufferMemoryDescriptor9MetaClassC1Ev +__ZN24IOBufferMemoryDescriptor9MetaClassC2Ev +__ZN24IOBufferMemoryDescriptor9metaClassE +__ZN24IOBufferMemoryDescriptor9setLengthEj +__ZN24IOBufferMemoryDescriptor9withBytesEPKvj11IODirectionb +__ZN24IOBufferMemoryDescriptorC1EPK11OSMetaClass +__ZN24IOBufferMemoryDescriptorC1Ev +__ZN24IOBufferMemoryDescriptorC2EPK11OSMetaClass +__ZN24IOBufferMemoryDescriptorC2Ev +__ZN24IOBufferMemoryDescriptorD0Ev +__ZN24IOBufferMemoryDescriptorD2Ev +__ZN24IOCPUInterruptController10gMetaClassE +__ZN24IOCPUInterruptController10superClassE +__ZN24IOCPUInterruptController14causeInterruptEP9IOServicei +__ZN24IOCPUInterruptController15enableInterruptEP9IOServicei +__ZN24IOCPUInterruptController15handleInterruptEPvP9IOServicei +__ZN24IOCPUInterruptController16disableInterruptEP9IOServicei +__ZN24IOCPUInterruptController16getInterruptTypeEP9IOServiceiPi +__ZN24IOCPUInterruptController17registerInterruptEP9IOServiceiPvPFvS2_S2_S2_iES2_ +__ZN24IOCPUInterruptController18enableCPUInterruptEP5IOCPU +__ZN24IOCPUInterruptController25setCPUInterruptPropertiesEP9IOService +__ZN24IOCPUInterruptController26initCPUInterruptControllerEi +__ZN24IOCPUInterruptController30registerCPUInterruptControllerEv +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController0Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController1Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController2Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController3Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController4Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController5Ev +__ZN24IOCPUInterruptController9MetaClassC1Ev +__ZN24IOCPUInterruptController9MetaClassC2Ev +__ZN24IOCPUInterruptController9metaClassE +__ZN24IOCPUInterruptControllerC1EPK11OSMetaClass +__ZN24IOCPUInterruptControllerC1Ev +__ZN24IOCPUInterruptControllerC2EPK11OSMetaClass +__ZN24IOCPUInterruptControllerC2Ev +__ZN24IOCPUInterruptControllerD0Ev +__ZN24IOCPUInterruptControllerD2Ev +__ZN25IOGeneralMemoryDescriptor10gMetaClassE +__ZN25IOGeneralMemoryDescriptor10superClassE +__ZN25IOGeneralMemoryDescriptor11setPositionEm +__ZN25IOGeneralMemoryDescriptor11wireVirtualE11IODirection +__ZN25IOGeneralMemoryDescriptor13mapIntoKernelEj +__ZN25IOGeneralMemoryDescriptor14initWithRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN25IOGeneralMemoryDescriptor15initWithAddressEPvm11IODirection +__ZN25IOGeneralMemoryDescriptor15initWithAddressEjm11IODirectionP4task +__ZN25IOGeneralMemoryDescriptor15initWithOptionsEPvmmP4taskmP8IOMapper +__ZN25IOGeneralMemoryDescriptor15unmapFromKernelEv +__ZN25IOGeneralMemoryDescriptor16getSourceSegmentEmPm +__ZN25IOGeneralMemoryDescriptor17getVirtualSegmentEmPm +__ZN25IOGeneralMemoryDescriptor18getPhysicalSegmentEmPm +__ZN25IOGeneralMemoryDescriptor22initWithPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN25IOGeneralMemoryDescriptor23initWithPhysicalAddressEmm11IODirection +__ZN25IOGeneralMemoryDescriptor4freeEv +__ZN25IOGeneralMemoryDescriptor5doMapEP6vm_mapPjmmm +__ZN25IOGeneralMemoryDescriptor7doUnmapEP6vm_mapjm +__ZN25IOGeneralMemoryDescriptor7prepareE11IODirection +__ZN25IOGeneralMemoryDescriptor8completeE11IODirection +__ZN25IOGeneralMemoryDescriptor9MetaClassC1Ev +__ZN25IOGeneralMemoryDescriptor9MetaClassC2Ev +__ZN25IOGeneralMemoryDescriptor9metaClassE +__ZN25IOGeneralMemoryDescriptorC1EPK11OSMetaClass +__ZN25IOGeneralMemoryDescriptorC1Ev +__ZN25IOGeneralMemoryDescriptorC2EPK11OSMetaClass +__ZN25IOGeneralMemoryDescriptorC2Ev +__ZN25IOGeneralMemoryDescriptorD0Ev +__ZN25IOGeneralMemoryDescriptorD2Ev +__ZN25IOServiceUserNotification10gMetaClassE +__ZN25IOServiceUserNotification10superClassE +__ZN25IOServiceUserNotification13getNextObjectEv +__ZN25IOServiceUserNotification4freeEv +__ZN25IOServiceUserNotification4initEP8ipc_portjPj +__ZN25IOServiceUserNotification7handlerEPvP9IOService +__ZN25IOServiceUserNotification8_handlerEPvS0_P9IOService +__ZN25IOServiceUserNotification9MetaClassC1Ev +__ZN25IOServiceUserNotification9MetaClassC2Ev +__ZN25IOServiceUserNotification9metaClassE +__ZN25IOServiceUserNotificationC1EPK11OSMetaClass +__ZN25IOServiceUserNotificationC1Ev +__ZN25IOServiceUserNotificationC2EPK11OSMetaClass +__ZN25IOServiceUserNotificationC2Ev +__ZN25IOServiceUserNotificationD0Ev +__ZN25IOServiceUserNotificationD2Ev +__ZN26_IOServiceInterestNotifier10gMetaClassE +__ZN26_IOServiceInterestNotifier10superClassE +__ZN26_IOServiceInterestNotifier4freeEv +__ZN26_IOServiceInterestNotifier4waitEv +__ZN26_IOServiceInterestNotifier6enableEb +__ZN26_IOServiceInterestNotifier6removeEv +__ZN26_IOServiceInterestNotifier7disableEv +__ZN26_IOServiceInterestNotifier9MetaClassC1Ev +__ZN26_IOServiceInterestNotifier9MetaClassC2Ev +__ZN26_IOServiceInterestNotifier9metaClassE +__ZN26_IOServiceInterestNotifierC1EPK11OSMetaClass +__ZN26_IOServiceInterestNotifierC1Ev +__ZN26_IOServiceInterestNotifierC2EPK11OSMetaClass +__ZN26_IOServiceInterestNotifierC2Ev +__ZN26_IOServiceInterestNotifierD0Ev +__ZN26_IOServiceInterestNotifierD2Ev +__ZN27IOSharedInterruptController10gMetaClassE +__ZN27IOSharedInterruptController10superClassE +__ZN27IOSharedInterruptController15enableInterruptEP9IOServicei +__ZN27IOSharedInterruptController15handleInterruptEPvP9IOServicei +__ZN27IOSharedInterruptController16disableInterruptEP9IOServicei +__ZN27IOSharedInterruptController16getInterruptTypeEP9IOServiceiPi +__ZN27IOSharedInterruptController17registerInterruptEP9IOServiceiPvPFvS2_S2_S2_iES2_ +__ZN27IOSharedInterruptController19unregisterInterruptEP9IOServicei +__ZN27IOSharedInterruptController23initInterruptControllerEP21IOInterruptControllerP6OSData +__ZN27IOSharedInterruptController26getInterruptHandlerAddressEv +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController0Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController1Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController2Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController3Ev +__ZN27IOSharedInterruptController9MetaClassC1Ev +__ZN27IOSharedInterruptController9MetaClassC2Ev +__ZN27IOSharedInterruptController9metaClassE +__ZN27IOSharedInterruptControllerC1EPK11OSMetaClass +__ZN27IOSharedInterruptControllerC1Ev +__ZN27IOSharedInterruptControllerC2EPK11OSMetaClass +__ZN27IOSharedInterruptControllerC2Ev +__ZN27IOSharedInterruptControllerD0Ev +__ZN27IOSharedInterruptControllerD2Ev +__ZN28IOFilterInterruptEventSource10gMetaClassE +__ZN28IOFilterInterruptEventSource10superClassE +__ZN28IOFilterInterruptEventSource15signalInterruptEv +__ZN28IOFilterInterruptEventSource20interruptEventSourceEP8OSObjectPFvS1_P22IOInterruptEventSourceiEP9IOServicei +__ZN28IOFilterInterruptEventSource23normalInterruptOccurredEPvP9IOServicei +__ZN28IOFilterInterruptEventSource24disableInterruptOccurredEPvP9IOServicei +__ZN28IOFilterInterruptEventSource26filterInterruptEventSourceEP8OSObjectPFvS1_P22IOInterruptEventSourceiEPFbS1_PS_EP9IOServicei +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource0Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource1Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource2Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource3Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource4Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource5Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource6Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource7Ev +__ZN28IOFilterInterruptEventSource4initEP8OSObjectPFvS1_P22IOInterruptEventSourceiEP9IOServicei +__ZN28IOFilterInterruptEventSource4initEP8OSObjectPFvS1_P22IOInterruptEventSourceiEPFbS1_PS_EP9IOServicei +__ZN28IOFilterInterruptEventSource9MetaClassC1Ev +__ZN28IOFilterInterruptEventSource9MetaClassC2Ev +__ZN28IOFilterInterruptEventSource9metaClassE +__ZN28IOFilterInterruptEventSourceC1EPK11OSMetaClass +__ZN28IOFilterInterruptEventSourceC1Ev +__ZN28IOFilterInterruptEventSourceC2EPK11OSMetaClass +__ZN28IOFilterInterruptEventSourceC2Ev +__ZN28IOFilterInterruptEventSourceD0Ev +__ZN28IOFilterInterruptEventSourceD2Ev +__ZN32IOServiceMessageUserNotification10gMetaClassE +__ZN32IOServiceMessageUserNotification10superClassE +__ZN32IOServiceMessageUserNotification13getNextObjectEv +__ZN32IOServiceMessageUserNotification4freeEv +__ZN32IOServiceMessageUserNotification4initEP8ipc_portjPjj +__ZN32IOServiceMessageUserNotification7handlerEPvmP9IOServiceS0_j +__ZN32IOServiceMessageUserNotification8_handlerEPvS0_mP9IOServiceS0_j +__ZN32IOServiceMessageUserNotification9MetaClassC1Ev +__ZN32IOServiceMessageUserNotification9MetaClassC2Ev +__ZN32IOServiceMessageUserNotification9metaClassE +__ZN32IOServiceMessageUserNotificationC1EPK11OSMetaClass +__ZN32IOServiceMessageUserNotificationC1Ev +__ZN32IOServiceMessageUserNotificationC2EPK11OSMetaClass +__ZN32IOServiceMessageUserNotificationC2Ev +__ZN32IOServiceMessageUserNotificationD0Ev +__ZN32IOServiceMessageUserNotificationD2Ev +__ZN5IOCPU10gMetaClassE +__ZN5IOCPU10superClassE +__ZN5IOCPU11getCPUGroupEv +__ZN5IOCPU11getCPUStateEv +__ZN5IOCPU11setCPUStateEm +__ZN5IOCPU12getCPUNumberEv +__ZN5IOCPU12setCPUNumberEm +__ZN5IOCPU13setPropertiesEP8OSObject +__ZN5IOCPU15_RESERVEDIOCPU0Ev +__ZN5IOCPU15_RESERVEDIOCPU1Ev +__ZN5IOCPU15_RESERVEDIOCPU2Ev +__ZN5IOCPU15_RESERVEDIOCPU3Ev +__ZN5IOCPU15_RESERVEDIOCPU4Ev +__ZN5IOCPU15_RESERVEDIOCPU5Ev +__ZN5IOCPU15_RESERVEDIOCPU6Ev +__ZN5IOCPU15_RESERVEDIOCPU7Ev +__ZN5IOCPU15getCPUGroupSizeEv +__ZN5IOCPU16getMachProcessorEv +__ZN5IOCPU17enableCPUTimeBaseEb +__ZN5IOCPU5startEP9IOService +__ZN5IOCPU8initCPUsEv +__ZN5IOCPU9MetaClassC1Ev +__ZN5IOCPU9MetaClassC2Ev +__ZN5IOCPU9metaClassE +__ZN5IOCPU9signalCPUEPS_ +__ZN5IOCPUC1EPK11OSMetaClass +__ZN5IOCPUC2EPK11OSMetaClass +__ZN5IOCPUD0Ev +__ZN5IOCPUD2Ev +__ZN8IOMapper10allocTableEm +__ZN8IOMapper10gMetaClassE +__ZN8IOMapper10iovmInsertEjmP13upl_page_infom +__ZN8IOMapper10iovmInsertEjmPjm +__ZN8IOMapper10superClassE +__ZN8IOMapper11NewARTTableEmPPvPj +__ZN8IOMapper12FreeARTTableEP6OSDatam +__ZN8IOMapper17setMapperRequiredEb +__ZN8IOMapper18_RESERVEDIOMapper0Ev +__ZN8IOMapper18_RESERVEDIOMapper1Ev +__ZN8IOMapper18_RESERVEDIOMapper2Ev +__ZN8IOMapper18_RESERVEDIOMapper3Ev +__ZN8IOMapper18_RESERVEDIOMapper4Ev +__ZN8IOMapper18_RESERVEDIOMapper5Ev +__ZN8IOMapper18_RESERVEDIOMapper6Ev +__ZN8IOMapper18_RESERVEDIOMapper7Ev +__ZN8IOMapper18_RESERVEDIOMapper8Ev +__ZN8IOMapper18_RESERVEDIOMapper9Ev +__ZN8IOMapper19_RESERVEDIOMapper10Ev +__ZN8IOMapper19_RESERVEDIOMapper11Ev +__ZN8IOMapper19_RESERVEDIOMapper12Ev +__ZN8IOMapper19_RESERVEDIOMapper13Ev +__ZN8IOMapper19_RESERVEDIOMapper14Ev +__ZN8IOMapper19_RESERVEDIOMapper15Ev +__ZN8IOMapper19waitForSystemMapperEv +__ZN8IOMapper4freeEv +__ZN8IOMapper5startEP9IOService +__ZN8IOMapper7gSystemE +__ZN8IOMapper9MetaClassC1Ev +__ZN8IOMapper9MetaClassC2Ev +__ZN8IOMapper9metaClassE +__ZN8IOMapperC1EPK11OSMetaClass +__ZN8IOMapperC2EPK11OSMetaClass +__ZN8IOMapperD0Ev +__ZN8IOMapperD2Ev +__ZN8IOPMpriv10gMetaClassE +__ZN8IOPMpriv10superClassE +__ZN8IOPMpriv9MetaClassC1Ev +__ZN8IOPMpriv9MetaClassC2Ev +__ZN8IOPMpriv9metaClassE +__ZN8IOPMprivC1EPK11OSMetaClass +__ZN8IOPMprivC1Ev +__ZN8IOPMprivC2EPK11OSMetaClass +__ZN8IOPMprivC2Ev +__ZN8IOPMprivD0Ev +__ZN8IOPMprivD2Ev +__ZN8IOPMprot10gMetaClassE +__ZN8IOPMprot10superClassE +__ZN8IOPMprot9MetaClassC1Ev +__ZN8IOPMprot9MetaClassC2Ev +__ZN8IOPMprot9metaClassE +__ZN8IOPMprotC1EPK11OSMetaClass +__ZN8IOPMprotC1Ev +__ZN8IOPMprotC2EPK11OSMetaClass +__ZN8IOPMprotC2Ev +__ZN8IOPMprotD0Ev +__ZN8IOPMprotD2Ev +__ZN8IOSyncer10gMetaClassE +__ZN8IOSyncer10superClassE +__ZN8IOSyncer13privateSignalEv +__ZN8IOSyncer4freeEv +__ZN8IOSyncer4initEb +__ZN8IOSyncer4waitEb +__ZN8IOSyncer6createEb +__ZN8IOSyncer6reinitEv +__ZN8IOSyncer6signalEib +__ZN8IOSyncer9MetaClassC1Ev +__ZN8IOSyncer9MetaClassC2Ev +__ZN8IOSyncer9metaClassE +__ZN8IOSyncerC1EPK11OSMetaClass +__ZN8IOSyncerC1Ev +__ZN8IOSyncerC2EPK11OSMetaClass +__ZN8IOSyncerC2Ev +__ZN8IOSyncerD0Ev +__ZN8IOSyncerD2Ev +__ZN9IOCommand10gMetaClassE +__ZN9IOCommand10superClassE +__ZN9IOCommand4initEv +__ZN9IOCommand9MetaClassC1Ev +__ZN9IOCommand9MetaClassC2Ev +__ZN9IOCommand9metaClassE +__ZN9IOCommandC1EPK11OSMetaClass +__ZN9IOCommandC2EPK11OSMetaClass +__ZN9IOCommandD0Ev +__ZN9IOCommandD2Ev +__ZN9IODTNVRAM10gMetaClassE +__ZN9IODTNVRAM10superClassE +__ZN9IODTNVRAM10writeXPRAMEmPhm +__ZN9IODTNVRAM11setPropertyEPK8OSSymbolP8OSObject +__ZN9IODTNVRAM13savePanicInfoEPhm +__ZN9IODTNVRAM13setPropertiesEP8OSObject +__ZN9IODTNVRAM15initOFVariablesEv +__ZN9IODTNVRAM15syncOFVariablesEv +__ZN9IODTNVRAM16escapeDataToDataEP6OSData +__ZN9IODTNVRAM16updateOWBootArgsEPK8OSSymbolP8OSObject +__ZN9IODTNVRAM17getOWVariableInfoEmPPK8OSSymbolPmS4_ +__ZN9IODTNVRAM17readNVRAMPropertyEP15IORegistryEntryPPK8OSSymbolPP6OSData +__ZN9IODTNVRAM18generateOWChecksumEPh +__ZN9IODTNVRAM18getNVRAMPartitionsEv +__ZN9IODTNVRAM18readNVRAMPartitionEPK8OSSymbolmPhm +__ZN9IODTNVRAM18validateOWChecksumEPh +__ZN9IODTNVRAM18writeNVRAMPropertyEP15IORegistryEntryPK8OSSymbolP6OSData +__ZN9IODTNVRAM19convertObjectToPropEPhPmPK8OSSymbolP8OSObject +__ZN9IODTNVRAM19convertPropToObjectEPhmS0_mPPK8OSSymbolPP8OSObject +__ZN9IODTNVRAM19searchNVRAMPropertyEP17IONVRAMDescriptorPm +__ZN9IODTNVRAM19unescapeBytesToDataEPhm +__ZN9IODTNVRAM19writeNVRAMPartitionEPK8OSSymbolmPhm +__ZN9IODTNVRAM22readNVRAMPropertyType0EP15IORegistryEntryPPK8OSSymbolPP6OSData +__ZN9IODTNVRAM22readNVRAMPropertyType1EP15IORegistryEntryPPK8OSSymbolPP6OSData +__ZN9IODTNVRAM23registerNVRAMControllerEP17IONVRAMController +__ZN9IODTNVRAM23writeNVRAMPropertyType0EP15IORegistryEntryPK8OSSymbolP6OSData +__ZN9IODTNVRAM23writeNVRAMPropertyType1EP15IORegistryEntryPK8OSSymbolP6OSData +__ZN9IODTNVRAM26calculatePartitionChecksumEPh +__ZN9IODTNVRAM4initEP15IORegistryEntryPK15IORegistryPlane +__ZN9IODTNVRAM4syncEv +__ZN9IODTNVRAM9MetaClassC1Ev +__ZN9IODTNVRAM9MetaClassC2Ev +__ZN9IODTNVRAM9metaClassE +__ZN9IODTNVRAM9readXPRAMEmPhm +__ZN9IODTNVRAMC1EPK11OSMetaClass +__ZN9IODTNVRAMC1Ev +__ZN9IODTNVRAMC2EPK11OSMetaClass +__ZN9IODTNVRAMC2Ev +__ZN9IODTNVRAMD0Ev +__ZN9IODTNVRAMD2Ev +__ZN9IOService10actionStopEPS_S0_ +__ZN9IOService10adjustBusyEl +__ZN9IOService10ask_parentEm +__ZN9IOService10gMetaClassE +__ZN9IOService10handleOpenEPS_mPv +__ZN9IOService10initializeEv +__ZN9IOService10joinPMtreeEPS_ +__ZN9IOService10makeUsableEv +__ZN9IOService10superClassE +__ZN9IOService10systemWakeEv +__ZN9IOService10youAreRootEv +__ZN9IOService11_adjustBusyEl +__ZN9IOService11addLocationEP12OSDictionary +__ZN9IOService11changeStateEv +__ZN9IOService11getPlatformEv +__ZN9IOService11handleCloseEPS_m +__ZN9IOService11notifyChildEP17IOPowerConnectionb +__ZN9IOService11setPlatformEP16IOPlatformExpert +__ZN9IOService11tellClientsEi +__ZN9IOService12acquire_lockEv +__ZN9IOService12checkForDoneEv +__ZN9IOService12clampPowerOnEm +__ZN9IOService12didTerminateEPS_mPb +__ZN9IOService12driver_ackedEv +__ZN9IOService12getBusyStateEv +__ZN9IOService12getResourcesEv +__ZN9IOService12nameMatchingEPK8OSStringP12OSDictionary +__ZN9IOService12nameMatchingEPKcP12OSDictionary +__ZN9IOService12passiveMatchEP12OSDictionaryb +__ZN9IOService12requestProbeEm +__ZN9IOService12scheduleStopEPS_ +__ZN9IOService12tellChangeUpEm +__ZN9IOService12waitForStateEmmP13mach_timespec +__ZN9IOService13addPowerChildEPS_ +__ZN9IOService13askChangeDownEm +__ZN9IOService13checkResourceEP8OSObject +__ZN9IOService13getPMworkloopEv +__ZN9IOService13invokeNotiferEP18_IOServiceNotifier +__ZN9IOService13matchLocationEPS_ +__ZN9IOService13messageClientEmP8OSObjectPvj +__ZN9IOService13newUserClientEP4taskPvmP12OSDictionaryPP12IOUserClient +__ZN9IOService13newUserClientEP4taskPvmPP12IOUserClient +__ZN9IOService13responseValidEm +__ZN9IOService13setParentInfoEmP17IOPowerConnection +__ZN9IOService13setPowerStateEmPS_ +__ZN9IOService13startMatchingEm +__ZN9IOService13waitMatchIdleEm +__ZN9IOService13willTerminateEPS_m +__ZN9IOService14actionFinalizeEPS_m +__ZN9IOService14activityTickleEmm +__ZN9IOService14applyToClientsEPFvPS_PvES1_ +__ZN9IOService14causeInterruptEi +__ZN9IOService14checkResourcesEv +__ZN9IOService14doServiceMatchEm +__ZN9IOService14getServiceRootEv +__ZN9IOService14messageClientsEmPvj +__ZN9IOService14newTemperatureElPS_ +__ZN9IOService14setPowerParentEP17IOPowerConnectionbm +__ZN9IOService14startCandidateEPS_ +__ZN9IOService14stop_ack_timerEv +__ZN9IOService14tellChangeDownEm +__ZN9IOService14waitForServiceEP12OSDictionaryP13mach_timespec +__ZN9IOService15OurChangeFinishEv +__ZN9IOService15addNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_ES5_S5_l +__ZN9IOService15comparePropertyEP12OSDictionaryPK8OSString +__ZN9IOService15comparePropertyEP12OSDictionaryPKc +__ZN9IOService15enableInterruptEi +__ZN9IOService15errnoFromReturnEi +__ZN9IOService15getDeviceMemoryEv +__ZN9IOService15getPMRootDomainEv +__ZN9IOService15instruct_driverEm +__ZN9IOService15lookupInterruptEibPP21IOInterruptController +__ZN9IOService15powerChangeDoneEm +__ZN9IOService15probeCandidatesEP12OSOrderedSet +__ZN9IOService15publishResourceEPK8OSSymbolP8OSObject +__ZN9IOService15publishResourceEPKcP8OSObject +__ZN9IOService15registerServiceEm +__ZN9IOService15serviceMatchingEPK8OSStringP12OSDictionary +__ZN9IOService15serviceMatchingEPKcP12OSDictionary +__ZN9IOService15setDeviceMemoryEP7OSArray +__ZN9IOService15setNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_ES5_S5_l +__ZN9IOService15setPMRootDomainEP14IOPMrootDomain +__ZN9IOService15start_ack_timerEv +__ZN9IOService15tellChangeDown1Em +__ZN9IOService15tellChangeDown2Em +__ZN9IOService15terminateClientEPS_m +__ZN9IOService15terminatePhase1Em +__ZN9IOService15terminateThreadEPv +__ZN9IOService15terminateWorkerEm +__ZN9IOService16ack_timer_tickedEv +__ZN9IOService16allowPowerChangeEm +__ZN9IOService16applyToProvidersEPFvPS_PvES1_ +__ZN9IOService16command_receivedEPvS0_S0_S0_ +__ZN9IOService16didYouWakeSystemEv +__ZN9IOService16disableInterruptEi +__ZN9IOService16getInterruptTypeEiPi +__ZN9IOService16registerInterestEPK8OSSymbolPFiPvS3_mPS_S3_jES3_S3_ +__ZN9IOService16removePowerChildEP17IOPowerConnection +__ZN9IOService16requestTerminateEPS_m +__ZN9IOService16resolveInterruptEPS_i +__ZN9IOService16resourceMatchingEPK8OSStringP12OSDictionary +__ZN9IOService16resourceMatchingEPKcP12OSDictionary +__ZN9IOService16scheduleFinalizeEv +__ZN9IOService16startSettleTimerEm +__ZN9IOService16start_our_changeEm +__ZN9IOService16stringFromReturnEi +__ZN9IOService16tellNoChangeDownEm +__ZN9IOService17addNeededResourceEPKc +__ZN9IOService17allowCancelCommonEv +__ZN9IOService17applyToInterestedEPK8OSSymbolPFvP8OSObjectPvES5_ +__ZN9IOService17cancelPowerChangeEm +__ZN9IOService17catalogNewDriversEP12OSOrderedSet +__ZN9IOService17comparePropertiesEP12OSDictionaryP12OSCollection +__ZN9IOService17currentCapabilityEv +__ZN9IOService17getAggressivenessEmPm +__ZN9IOService17registerInterruptEiP8OSObjectPFvS1_PvPS_iES2_ +__ZN9IOService17setAggressivenessEmm +__ZN9IOService18actionDidTerminateEPS_m +__ZN9IOService18changePowerStateToEm +__ZN9IOService18doServiceTerminateEm +__ZN9IOService18enqueuePowerChangeEmmmP17IOPowerConnectionm +__ZN9IOService18getResourceServiceEv +__ZN9IOService18lockForArbitrationEb +__ZN9IOService18matchPropertyTableEP12OSDictionary +__ZN9IOService18matchPropertyTableEP12OSDictionaryPl +__ZN9IOService18setIdleTimerPeriodEm +__ZN9IOService18settleTimerExpiredEv +__ZN9IOService19_RESERVEDIOService3Ev +__ZN9IOService19_RESERVEDIOService4Ev +__ZN9IOService19_RESERVEDIOService5Ev +__ZN9IOService19_RESERVEDIOService6Ev +__ZN9IOService19_RESERVEDIOService7Ev +__ZN9IOService19_RESERVEDIOService8Ev +__ZN9IOService19_RESERVEDIOService9Ev +__ZN9IOService19actionWillTerminateEPS_mP7OSArray +__ZN9IOService19computeDesiredStateEv +__ZN9IOService19compute_settle_timeEv +__ZN9IOService19deliverNotificationEPK8OSSymbolmm +__ZN9IOService19getExistingServicesEP12OSDictionarymm +__ZN9IOService19getMatchingServicesEP12OSDictionary +__ZN9IOService19installNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_ES5_S5_lPP10OSIterator +__ZN9IOService19powerOverrideOnPrivEv +__ZN9IOService19registerPowerDriverEPS_P14IOPMPowerStatem +__ZN9IOService19start_PM_idle_timerEv +__ZN9IOService19start_parent_changeEm +__ZN9IOService19unregisterInterruptEi +__ZN9IOService20_RESERVEDIOService10Ev +__ZN9IOService20_RESERVEDIOService11Ev +__ZN9IOService20_RESERVEDIOService12Ev +__ZN9IOService20_RESERVEDIOService13Ev +__ZN9IOService20_RESERVEDIOService14Ev +__ZN9IOService20_RESERVEDIOService15Ev +__ZN9IOService20_RESERVEDIOService16Ev +__ZN9IOService20_RESERVEDIOService17Ev +__ZN9IOService20_RESERVEDIOService18Ev +__ZN9IOService20_RESERVEDIOService19Ev +__ZN9IOService20_RESERVEDIOService20Ev +__ZN9IOService20_RESERVEDIOService21Ev +__ZN9IOService20_RESERVEDIOService22Ev +__ZN9IOService20_RESERVEDIOService23Ev +__ZN9IOService20_RESERVEDIOService24Ev +__ZN9IOService20_RESERVEDIOService25Ev +__ZN9IOService20_RESERVEDIOService26Ev +__ZN9IOService20_RESERVEDIOService27Ev +__ZN9IOService20_RESERVEDIOService28Ev +__ZN9IOService20_RESERVEDIOService29Ev +__ZN9IOService20_RESERVEDIOService30Ev +__ZN9IOService20_RESERVEDIOService31Ev +__ZN9IOService20_RESERVEDIOService32Ev +__ZN9IOService20_RESERVEDIOService33Ev +__ZN9IOService20_RESERVEDIOService34Ev +__ZN9IOService20_RESERVEDIOService35Ev +__ZN9IOService20_RESERVEDIOService36Ev +__ZN9IOService20_RESERVEDIOService37Ev +__ZN9IOService20_RESERVEDIOService38Ev +__ZN9IOService20_RESERVEDIOService39Ev +__ZN9IOService20_RESERVEDIOService40Ev +__ZN9IOService20_RESERVEDIOService41Ev +__ZN9IOService20_RESERVEDIOService42Ev +__ZN9IOService20_RESERVEDIOService43Ev +__ZN9IOService20_RESERVEDIOService44Ev +__ZN9IOService20_RESERVEDIOService45Ev +__ZN9IOService20_RESERVEDIOService46Ev +__ZN9IOService20_RESERVEDIOService47Ev +__ZN9IOService20_RESERVEDIOService48Ev +__ZN9IOService20_RESERVEDIOService49Ev +__ZN9IOService20_RESERVEDIOService50Ev +__ZN9IOService20_RESERVEDIOService51Ev +__ZN9IOService20_RESERVEDIOService52Ev +__ZN9IOService20_RESERVEDIOService53Ev +__ZN9IOService20_RESERVEDIOService54Ev +__ZN9IOService20_RESERVEDIOService55Ev +__ZN9IOService20_RESERVEDIOService56Ev +__ZN9IOService20_RESERVEDIOService57Ev +__ZN9IOService20_RESERVEDIOService58Ev +__ZN9IOService20_RESERVEDIOService59Ev +__ZN9IOService20_RESERVEDIOService60Ev +__ZN9IOService20_RESERVEDIOService61Ev +__ZN9IOService20_RESERVEDIOService62Ev +__ZN9IOService20_RESERVEDIOService63Ev +__ZN9IOService20callPlatformFunctionEPK8OSSymbolbPvS3_S3_S3_ +__ZN9IOService20callPlatformFunctionEPKcbPvS2_S2_S2_ +__ZN9IOService20getDeviceMemoryCountEv +__ZN9IOService20powerOverrideOffPrivEv +__ZN9IOService20unlockForArbitrationEv +__ZN9IOService21doInstallNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_ES5_S5_lPP10OSIterator +__ZN9IOService21getClientWithCategoryEPK8OSSymbol +__ZN9IOService21powerStateDidChangeToEmmPS_ +__ZN9IOService21rebuildChildClampBitsEv +__ZN9IOService21temporaryPowerClampOnEv +__ZN9IOService21unregisterAllInterestEv +__ZN9IOService22OurChangeSetPowerStateEv +__ZN9IOService22PM_Clamp_Timer_ExpiredEv +__ZN9IOService22acknowledgePowerChangeEPS_ +__ZN9IOService22changePowerStateToPrivEm +__ZN9IOService22powerDomainDidChangeToEmP17IOPowerConnection +__ZN9IOService22powerStateWillChangeToEmmPS_ +__ZN9IOService23acknowledgeNotificationEPvm +__ZN9IOService23currentPowerConsumptionEv +__ZN9IOService23powerDomainWillChangeToEmP17IOPowerConnection +__ZN9IOService23requestPowerDomainStateEmP17IOPowerConnectionm +__ZN9IOService23scheduleTerminatePhase2Em +__ZN9IOService23syncNotificationHandlerEPvS0_PS_ +__ZN9IOService23tellClientsWithResponseEi +__ZN9IOService24PM_idle_timer_expirationEv +__ZN9IOService24acknowledgeSetPowerStateEv +__ZN9IOService24getDeviceMemoryWithIndexEj +__ZN9IOService24mapDeviceMemoryWithIndexEjm +__ZN9IOService24powerStateForDomainStateEm +__ZN9IOService24registerInterestedDriverEPS_ +__ZN9IOService26add_child_to_active_changeEP17IOPowerConnection +__ZN9IOService26deRegisterInterestedDriverEPS_ +__ZN9IOService26temperatureCriticalForZoneEPS_ +__ZN9IOService27OurChangeWaitForPowerSettleEv +__ZN9IOService27add_driver_to_active_changeEP12IOPMinformee +__ZN9IOService27maxCapabilityForDomainStateEm +__ZN9IOService27serializedAllowPowerChange2Em +__ZN9IOService28serializedCancelPowerChange2Em +__ZN9IOService29OurChangeTellClientsPowerDownEv +__ZN9IOService29ParentUpSetPowerState_DelayedEv +__ZN9IOService31ParentDownSetPowerState_DelayedEv +__ZN9IOService31ParentUpSetPowerState_ImmediateEv +__ZN9IOService31initialPowerStateForDomainStateEm +__ZN9IOService33ParentDownSetPowerState_ImmediateEv +__ZN9IOService33ParentUpWaitForSettleTime_DelayedEv +__ZN9IOService35ParentDownAcknowledgeChange_DelayedEv +__ZN9IOService35ParentUpWaitForSettleTime_ImmediateEv +__ZN9IOService36ParentDownWaitForPowerSettle_DelayedEv +__ZN9IOService37OurChangeTellPriorityClientsPowerDownEv +__ZN9IOService38ParentUpAcknowledgePowerChange_DelayedEv +__ZN9IOService41OurChangeNotifyInterestedDriversDidChangeEv +__ZN9IOService42OurChangeNotifyInterestedDriversWillChangeEv +__ZN9IOService46ParentDownTellPriorityClientsPowerDown_DelayedEv +__ZN9IOService48ParentDownTellPriorityClientsPowerDown_ImmediateEv +__ZN9IOService48ParentUpNotifyInterestedDriversDidChange_DelayedEv +__ZN9IOService4freeEv +__ZN9IOService4openEPS_mPv +__ZN9IOService4stopEPS_ +__ZN9IOService50ParentUpNotifyInterestedDriversDidChange_ImmediateEv +__ZN9IOService51ParentDownNotifyInterestedDriversWillChange_DelayedEv +__ZN9IOService53ParentDownNotifyDidChangeAndAcknowledgeChange_DelayedEv +__ZN9IOService53ParentDownNotifyInterestedDriversWillChange_ImmediateEv +__ZN9IOService56ParentDownWaitForPowerSettleAndNotifyDidChange_ImmediateEv +__ZN9IOService5closeEPS_m +__ZN9IOService5probeEPS_Pl +__ZN9IOService5startEPS_ +__ZN9IOService6PMfreeEv +__ZN9IOService6PMinitEv +__ZN9IOService6PMstopEv +__ZN9IOService6attachEPS_ +__ZN9IOService6detachEPS_ +__ZN9IOService6informEP12IOPMinformeeb +__ZN9IOService7messageEmPS_Pv +__ZN9IOService8all_doneEv +__ZN9IOService8finalizeEm +__ZN9IOService9MetaClassC1Ev +__ZN9IOService9MetaClassC2Ev +__ZN9IOService9all_ackedEv +__ZN9IOService9metaClassE +__ZN9IOService9notifyAllEb +__ZN9IOService9resourcesEv +__ZN9IOService9terminateEm +__ZN9IOService9waitQuietEP13mach_timespec +__ZN9IOServiceC1EPK11OSMetaClass +__ZN9IOServiceC1Ev +__ZN9IOServiceC2EPK11OSMetaClass +__ZN9IOServiceC2Ev +__ZN9IOServiceD0Ev +__ZN9IOServiceD2Ev +__ZNK10IOMachPort12getMetaClassEv +__ZNK10IOMachPort9MetaClass5allocEv +__ZNK10IONotifier12getMetaClassEv +__ZNK10IONotifier9MetaClass5allocEv +__ZNK10IOWorkLoop12getMetaClassEv +__ZNK10IOWorkLoop19enableAllInterruptsEv +__ZNK10IOWorkLoop20disableAllInterruptsEv +__ZNK10IOWorkLoop21enableAllEventSourcesEv +__ZNK10IOWorkLoop22disableAllEventSourcesEv +__ZNK10IOWorkLoop6inGateEv +__ZNK10IOWorkLoop8onThreadEv +__ZNK10IOWorkLoop9MetaClass5allocEv +__ZNK10IOWorkLoop9getThreadEv +__ZNK11IOCatalogue12getMetaClassEv +__ZNK11IOCatalogue12unloadModuleEP8OSString +__ZNK11IOCatalogue13serializeDataEmP11OSSerialize +__ZNK11IOCatalogue14isModuleLoadedEP12OSDictionary +__ZNK11IOCatalogue14isModuleLoadedEP8OSString +__ZNK11IOCatalogue14isModuleLoadedEPKc +__ZNK11IOCatalogue18getGenerationCountEv +__ZNK11IOCatalogue9MetaClass5allocEv +__ZNK11IOCatalogue9serializeEP11OSSerialize +__ZNK11IODataQueue12getMetaClassEv +__ZNK11IODataQueue9MetaClass5allocEv +__ZNK11IOMemoryMap12getMetaClassEv +__ZNK11IOMemoryMap9MetaClass5allocEv +__ZNK11IOResources11getWorkLoopEv +__ZNK11IOResources12getMetaClassEv +__ZNK11IOResources9MetaClass5allocEv +__ZNK12IOPMinformee12getMetaClassEv +__ZNK12IOPMinformee9MetaClass5allocEv +__ZNK12IORootParent12getMetaClassEv +__ZNK12IORootParent9MetaClass5allocEv +__ZNK12IOUserClient12getMetaClassEv +__ZNK12IOUserClient9MetaClass5allocEv +__ZNK12_IOMemoryMap12getMetaClassEv +__ZNK12_IOMemoryMap13taggedReleaseEPKv +__ZNK12_IOMemoryMap9MetaClass5allocEv +__ZNK13IOCommandGate12getMetaClassEv +__ZNK13IOCommandGate9MetaClass5allocEv +__ZNK13IOCommandPool12getMetaClassEv +__ZNK13IOCommandPool9MetaClass5allocEv +__ZNK13IOEventSource11getWorkLoopEv +__ZNK13IOEventSource12getMetaClassEv +__ZNK13IOEventSource7getNextEv +__ZNK13IOEventSource8onThreadEv +__ZNK13IOEventSource9MetaClass5allocEv +__ZNK13IOEventSource9getActionEv +__ZNK13IOEventSource9isEnabledEv +__ZNK13_IOServiceJob12getMetaClassEv +__ZNK13_IOServiceJob9MetaClass5allocEv +__ZNK14IOCommandQueue12getMetaClassEv +__ZNK14IOCommandQueue9MetaClass5allocEv +__ZNK14IOMemoryCursor12getMetaClassEv +__ZNK14IOMemoryCursor9MetaClass5allocEv +__ZNK14IOPMrootDomain12getMetaClassEv +__ZNK14IOPMrootDomain9MetaClass5allocEv +__ZNK15IOConditionLock12getConditionEv +__ZNK15IOConditionLock12getMetaClassEv +__ZNK15IOConditionLock16getInterruptibleEv +__ZNK15IOConditionLock9MetaClass5allocEv +__ZNK15IOPMPowerSource12getMetaClassEv +__ZNK15IOPMPowerSource9MetaClass5allocEv +__ZNK15IOPanicPlatform12getMetaClassEv +__ZNK15IOPanicPlatform9MetaClass5allocEv +__ZNK15IORegistryEntry11compareNameEP8OSStringPS1_ +__ZNK15IORegistryEntry11getLocationEPK15IORegistryPlane +__ZNK15IORegistryEntry11getPropertyEPK8OSString +__ZNK15IORegistryEntry11getPropertyEPK8OSStringPK15IORegistryPlanem +__ZNK15IORegistryEntry11getPropertyEPK8OSSymbol +__ZNK15IORegistryEntry11getPropertyEPK8OSSymbolPK15IORegistryPlanem +__ZNK15IORegistryEntry11getPropertyEPKc +__ZNK15IORegistryEntry11getPropertyEPKcPK15IORegistryPlanem +__ZNK15IORegistryEntry12compareNamesEP8OSObjectPP8OSString +__ZNK15IORegistryEntry12copyLocationEPK15IORegistryPlane +__ZNK15IORegistryEntry12copyPropertyEPK8OSString +__ZNK15IORegistryEntry12copyPropertyEPK8OSStringPK15IORegistryPlanem +__ZNK15IORegistryEntry12copyPropertyEPK8OSSymbol +__ZNK15IORegistryEntry12copyPropertyEPK8OSSymbolPK15IORegistryPlanem +__ZNK15IORegistryEntry12copyPropertyEPKc +__ZNK15IORegistryEntry12copyPropertyEPKcPK15IORegistryPlanem +__ZNK15IORegistryEntry12getMetaClassEv +__ZNK15IORegistryEntry13getChildEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry14applyToParentsEPFvPS_PvES1_PK15IORegistryPlane +__ZNK15IORegistryEntry14copyChildEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry14getParentEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry15applyToChildrenEPFvPS_PvES1_PK15IORegistryPlane +__ZNK15IORegistryEntry15copyParentEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry16getChildIteratorEPK15IORegistryPlane +__ZNK15IORegistryEntry16getPathComponentEPcPiPK15IORegistryPlane +__ZNK15IORegistryEntry16getPropertyTableEv +__ZNK15IORegistryEntry17getParentIteratorEPK15IORegistryPlane +__ZNK15IORegistryEntry19serializePropertiesEP11OSSerialize +__ZNK15IORegistryEntry20getChildSetReferenceEPK15IORegistryPlane +__ZNK15IORegistryEntry21getParentSetReferenceEPK15IORegistryPlane +__ZNK15IORegistryEntry24dictionaryWithPropertiesEv +__ZNK15IORegistryEntry7getNameEPK15IORegistryPlane +__ZNK15IORegistryEntry7getPathEPcPiPK15IORegistryPlane +__ZNK15IORegistryEntry7inPlaneEPK15IORegistryPlane +__ZNK15IORegistryEntry7isChildEPS_PK15IORegistryPlaneb +__ZNK15IORegistryEntry8copyNameEPK15IORegistryPlane +__ZNK15IORegistryEntry8getDepthEPK15IORegistryPlane +__ZNK15IORegistryEntry8hasAliasEPK15IORegistryPlanePcPi +__ZNK15IORegistryEntry8isParentEPS_PK15IORegistryPlaneb +__ZNK15IORegistryEntry8makeLinkEPS_jPK15IORegistryPlane +__ZNK15IORegistryEntry9MetaClass5allocEv +__ZNK15IORegistryEntry9breakLinkEPS_jPK15IORegistryPlane +__ZNK15IORegistryPlane12getMetaClassEv +__ZNK15IORegistryPlane9MetaClass5allocEv +__ZNK15IORegistryPlane9serializeEP11OSSerialize +__ZNK15IOWatchDogTimer12getMetaClassEv +__ZNK15IOWatchDogTimer9MetaClass5allocEv +__ZNK15_IOConfigThread12getMetaClassEv +__ZNK15_IOConfigThread9MetaClass5allocEv +__ZNK16IOKitDiagnostics12getMetaClassEv +__ZNK16IOKitDiagnostics9MetaClass5allocEv +__ZNK16IOKitDiagnostics9serializeEP11OSSerialize +__ZNK16IOPMPagingPlexus12getMetaClassEv +__ZNK16IOPMPagingPlexus9MetaClass5allocEv +__ZNK16IOPMinformeeList12getMetaClassEv +__ZNK16IOPMinformeeList9MetaClass5allocEv +__ZNK16IOPlatformDevice11compareNameEP8OSStringPS1_ +__ZNK16IOPlatformDevice12getMetaClassEv +__ZNK16IOPlatformDevice9MetaClass5allocEv +__ZNK16IOPlatformExpert12getMetaClassEv +__ZNK16IOPlatformExpert14compareNubNameEPK9IOServiceP8OSStringPS4_ +__ZNK16IOPlatformExpert9MetaClass5allocEv +__ZNK16IORangeAllocator12getMetaClassEv +__ZNK16IORangeAllocator9MetaClass5allocEv +__ZNK16IORangeAllocator9serializeEP11OSSerialize +__ZNK17IOBigMemoryCursor12getMetaClassEv +__ZNK17IOBigMemoryCursor9MetaClass5allocEv +__ZNK17IOPowerConnection12getMetaClassEv +__ZNK17IOPowerConnection9MetaClass5allocEv +__ZNK18IODTPlatformExpert12getMetaClassEv +__ZNK18IODTPlatformExpert14compareNubNameEPK9IOServiceP8OSStringPS4_ +__ZNK18IODTPlatformExpert9MetaClass5allocEv +__ZNK18IOMemoryDescriptor12getDirectionEv +__ZNK18IOMemoryDescriptor12getMetaClassEv +__ZNK18IOMemoryDescriptor9MetaClass5allocEv +__ZNK18IOMemoryDescriptor9getLengthEv +__ZNK18IOPMchangeNoteList12getMetaClassEv +__ZNK18IOPMchangeNoteList9MetaClass5allocEv +__ZNK18IORegistryIterator12getMetaClassEv +__ZNK18IORegistryIterator9MetaClass5allocEv +__ZNK18IOTimerEventSource12getMetaClassEv +__ZNK18IOTimerEventSource9MetaClass5allocEv +__ZNK18IOUserNotification12getMetaClassEv +__ZNK18IOUserNotification9MetaClass5allocEv +__ZNK18_IOServiceNotifier12getMetaClassEv +__ZNK18_IOServiceNotifier9MetaClass5allocEv +__ZNK19IOPMPowerSourceList12getMetaClassEv +__ZNK19IOPMPowerSourceList9MetaClass5allocEv +__ZNK19IOPMPowerStateQueue12getMetaClassEv +__ZNK19IOPMPowerStateQueue9MetaClass5allocEv +__ZNK20IOLittleMemoryCursor12getMetaClassEv +__ZNK20IOLittleMemoryCursor9MetaClass5allocEv +__ZNK20RootDomainUserClient12getMetaClassEv +__ZNK20RootDomainUserClient9MetaClass5allocEv +__ZNK21IOInterruptController12getMetaClassEv +__ZNK21IOInterruptController9MetaClass5allocEv +__ZNK21IONaturalMemoryCursor12getMetaClassEv +__ZNK21IONaturalMemoryCursor9MetaClass5allocEv +__ZNK21IOSubMemoryDescriptor12getMetaClassEv +__ZNK21IOSubMemoryDescriptor9MetaClass5allocEv +__ZNK21IOSubMemoryDescriptor9serializeEP11OSSerialize +__ZNK22IOInterruptEventSource11getIntIndexEv +__ZNK22IOInterruptEventSource11getProviderEv +__ZNK22IOInterruptEventSource12getMetaClassEv +__ZNK22IOInterruptEventSource14getAutoDisableEv +__ZNK22IOInterruptEventSource9MetaClass5allocEv +__ZNK22IOPlatformExpertDevice11compareNameEP8OSStringPS1_ +__ZNK22IOPlatformExpertDevice11getWorkLoopEv +__ZNK22IOPlatformExpertDevice12getMetaClassEv +__ZNK22IOPlatformExpertDevice9MetaClass5allocEv +__ZNK22_IOOpenServiceIterator12getMetaClassEv +__ZNK22_IOOpenServiceIterator9MetaClass5allocEv +__ZNK23IOMultiMemoryDescriptor12getMetaClassEv +__ZNK23IOMultiMemoryDescriptor9MetaClass5allocEv +__ZNK24IOBufferMemoryDescriptor11getCapacityEv +__ZNK24IOBufferMemoryDescriptor12getMetaClassEv +__ZNK24IOBufferMemoryDescriptor9MetaClass5allocEv +__ZNK24IOCPUInterruptController12getMetaClassEv +__ZNK24IOCPUInterruptController9MetaClass5allocEv +__ZNK25IOGeneralMemoryDescriptor12getMetaClassEv +__ZNK25IOGeneralMemoryDescriptor9MetaClass5allocEv +__ZNK25IOGeneralMemoryDescriptor9serializeEP11OSSerialize +__ZNK25IOServiceUserNotification12getMetaClassEv +__ZNK25IOServiceUserNotification9MetaClass5allocEv +__ZNK26_IOServiceInterestNotifier12getMetaClassEv +__ZNK26_IOServiceInterestNotifier9MetaClass5allocEv +__ZNK27IOSharedInterruptController12getMetaClassEv +__ZNK27IOSharedInterruptController9MetaClass5allocEv +__ZNK28IOFilterInterruptEventSource12getMetaClassEv +__ZNK28IOFilterInterruptEventSource15getFilterActionEv +__ZNK28IOFilterInterruptEventSource9MetaClass5allocEv +__ZNK32IOServiceMessageUserNotification12getMetaClassEv +__ZNK32IOServiceMessageUserNotification9MetaClass5allocEv +__ZNK5IOCPU12getMetaClassEv +__ZNK5IOCPU9MetaClass5allocEv +__ZNK8IOMapper12getMetaClassEv +__ZNK8IOMapper9MetaClass5allocEv +__ZNK8IOPMpriv12getMetaClassEv +__ZNK8IOPMpriv9MetaClass5allocEv +__ZNK8IOPMpriv9serializeEP11OSSerialize +__ZNK8IOPMprot12getMetaClassEv +__ZNK8IOPMprot9MetaClass5allocEv +__ZNK8IOPMprot9serializeEP11OSSerialize +__ZNK8IOSyncer12getMetaClassEv +__ZNK8IOSyncer9MetaClass5allocEv +__ZNK9IOCommand12getMetaClassEv +__ZNK9IOCommand9MetaClass5allocEv +__ZNK9IODTNVRAM11getPropertyEPK8OSSymbol +__ZNK9IODTNVRAM11getPropertyEPKc +__ZNK9IODTNVRAM12getMetaClassEv +__ZNK9IODTNVRAM17getOFVariablePermEPK8OSSymbol +__ZNK9IODTNVRAM17getOFVariableTypeEPK8OSSymbol +__ZNK9IODTNVRAM19serializePropertiesEP11OSSerialize +__ZNK9IODTNVRAM9MetaClass5allocEv +__ZNK9IOService10isInactiveEv +__ZNK9IOService11getProviderEv +__ZNK9IOService11getWorkLoopEv +__ZNK9IOService12getMetaClassEv +__ZNK9IOService12handleIsOpenEPKS_ +__ZNK9IOService17getClientIteratorEv +__ZNK9IOService19getProviderIteratorEv +__ZNK9IOService19serializePropertiesEP11OSSerialize +__ZNK9IOService21getOpenClientIteratorEv +__ZNK9IOService23getOpenProviderIteratorEv +__ZNK9IOService6isOpenEPKS_ +__ZNK9IOService8getStateEv +__ZNK9IOService9MetaClass5allocEv +__ZNK9IOService9getClientEv +__ZTV10IOMachPort +__ZTV10IONotifier +__ZTV10IOWorkLoop +__ZTV11IOCatalogue +__ZTV11IODataQueue +__ZTV11IOMemoryMap +__ZTV11IOResources +__ZTV12IOPMinformee +__ZTV12IORootParent +__ZTV12IOUserClient +__ZTV12_IOMemoryMap +__ZTV13IOCommandGate +__ZTV13IOCommandPool +__ZTV13IOEventSource +__ZTV13_IOServiceJob +__ZTV14IOCommandQueue +__ZTV14IOMemoryCursor +__ZTV14IOPMrootDomain +__ZTV15IOConditionLock +__ZTV15IOPMPowerSource +__ZTV15IOPanicPlatform +__ZTV15IORegistryEntry +__ZTV15IORegistryPlane +__ZTV15IOWatchDogTimer +__ZTV15_IOConfigThread +__ZTV16IOKitDiagnostics +__ZTV16IOPMPagingPlexus +__ZTV16IOPMinformeeList +__ZTV16IOPlatformDevice +__ZTV16IOPlatformExpert +__ZTV16IORangeAllocator +__ZTV17IOBigMemoryCursor +__ZTV17IOPowerConnection +__ZTV18IODTPlatformExpert +__ZTV18IOMemoryDescriptor +__ZTV18IOPMchangeNoteList +__ZTV18IORegistryIterator +__ZTV18IOTimerEventSource +__ZTV18IOUserNotification +__ZTV18_IOServiceNotifier +__ZTV19IOPMPowerSourceList +__ZTV19IOPMPowerStateQueue +__ZTV20IOLittleMemoryCursor +__ZTV20RootDomainUserClient +__ZTV21IOInterruptController +__ZTV21IONaturalMemoryCursor +__ZTV21IOSubMemoryDescriptor +__ZTV22IOInterruptEventSource +__ZTV22IOPlatformExpertDevice +__ZTV22_IOOpenServiceIterator +__ZTV23IOMultiMemoryDescriptor +__ZTV24IOBufferMemoryDescriptor +__ZTV24IOCPUInterruptController +__ZTV25IOGeneralMemoryDescriptor +__ZTV25IOServiceUserNotification +__ZTV26_IOServiceInterestNotifier +__ZTV27IOSharedInterruptController +__ZTV28IOFilterInterruptEventSource +__ZTV32IOServiceMessageUserNotification +__ZTV5IOCPU +__ZTV8IOMapper +__ZTV8IOPMpriv +__ZTV8IOPMprot +__ZTV8IOSyncer +__ZTV9IOCommand +__ZTV9IODTNVRAM +__ZTV9IOService +__ZTVN10IOMachPort9MetaClassE +__ZTVN10IONotifier9MetaClassE +__ZTVN10IOWorkLoop9MetaClassE +__ZTVN11IOCatalogue9MetaClassE +__ZTVN11IODataQueue9MetaClassE +__ZTVN11IOMemoryMap9MetaClassE +__ZTVN11IOResources9MetaClassE +__ZTVN12IOPMinformee9MetaClassE +__ZTVN12IORootParent9MetaClassE +__ZTVN12IOUserClient9MetaClassE +__ZTVN12_IOMemoryMap9MetaClassE +__ZTVN13IOCommandGate9MetaClassE +__ZTVN13IOCommandPool9MetaClassE +__ZTVN13IOEventSource9MetaClassE +__ZTVN13_IOServiceJob9MetaClassE +__ZTVN14IOCommandQueue9MetaClassE +__ZTVN14IOMemoryCursor9MetaClassE +__ZTVN14IOPMrootDomain9MetaClassE +__ZTVN15IOConditionLock9MetaClassE +__ZTVN15IOPMPowerSource9MetaClassE +__ZTVN15IOPanicPlatform9MetaClassE +__ZTVN15IORegistryEntry9MetaClassE +__ZTVN15IORegistryPlane9MetaClassE +__ZTVN15IOWatchDogTimer9MetaClassE +__ZTVN15_IOConfigThread9MetaClassE +__ZTVN16IOKitDiagnostics9MetaClassE +__ZTVN16IOPMPagingPlexus9MetaClassE +__ZTVN16IOPMinformeeList9MetaClassE +__ZTVN16IOPlatformDevice9MetaClassE +__ZTVN16IOPlatformExpert9MetaClassE +__ZTVN16IORangeAllocator9MetaClassE +__ZTVN17IOBigMemoryCursor9MetaClassE +__ZTVN17IOPowerConnection9MetaClassE +__ZTVN18IODTPlatformExpert9MetaClassE +__ZTVN18IOMemoryDescriptor9MetaClassE +__ZTVN18IOPMchangeNoteList9MetaClassE +__ZTVN18IORegistryIterator9MetaClassE +__ZTVN18IOTimerEventSource9MetaClassE +__ZTVN18IOUserNotification9MetaClassE +__ZTVN18_IOServiceNotifier9MetaClassE +__ZTVN19IOPMPowerSourceList9MetaClassE +__ZTVN19IOPMPowerStateQueue9MetaClassE +__ZTVN20IOLittleMemoryCursor9MetaClassE +__ZTVN20RootDomainUserClient9MetaClassE +__ZTVN21IOInterruptController9MetaClassE +__ZTVN21IONaturalMemoryCursor9MetaClassE +__ZTVN21IOSubMemoryDescriptor9MetaClassE +__ZTVN22IOInterruptEventSource9MetaClassE +__ZTVN22IOPlatformExpertDevice9MetaClassE +__ZTVN22_IOOpenServiceIterator9MetaClassE +__ZTVN23IOMultiMemoryDescriptor9MetaClassE +__ZTVN24IOBufferMemoryDescriptor9MetaClassE +__ZTVN24IOCPUInterruptController9MetaClassE +__ZTVN25IOGeneralMemoryDescriptor9MetaClassE +__ZTVN25IOServiceUserNotification9MetaClassE +__ZTVN26_IOServiceInterestNotifier9MetaClassE +__ZTVN27IOSharedInterruptController9MetaClassE +__ZTVN28IOFilterInterruptEventSource9MetaClassE +__ZTVN32IOServiceMessageUserNotification9MetaClassE +__ZTVN5IOCPU9MetaClassE +__ZTVN8IOMapper9MetaClassE +__ZTVN8IOPMpriv9MetaClassE +__ZTVN8IOPMprot9MetaClassE +__ZTVN8IOSyncer9MetaClassE +__ZTVN9IOCommand9MetaClassE +__ZTVN9IODTNVRAM9MetaClassE +__ZTVN9IOService9MetaClassE +__giDebugLogDataInternal +__giDebugLogInternal +__giDebugReserved1 +__giDebugReserved2 +_acknowledgeSleepWakeNotification +_add_from_mkext_function +_db_dumpiojunk +_db_piokjunk +_debug_container_malloc_size +_debug_iomalloc_size +_debug_malloc_size +_device_close +_device_data_action +_di_root_image +_ev_try_lock +_ev_unlock +_gIOAppPowerStateInterest +_gIOBusyInterest +_gIOClassKey +_gIOCommandPoolSizeKey +_gIODTAAPLInterruptsKey +_gIODTAddressCellKey +_gIODTCompatibleKey +_gIODTDefaultInterruptController +_gIODTInterruptCellKey +_gIODTInterruptParentKey +_gIODTModelKey +_gIODTNWInterruptMappingKey +_gIODTNameKey +_gIODTPHandleKey +_gIODTPersistKey +_gIODTPlane +_gIODTRangeKey +_gIODTSizeCellKey +_gIODTTypeKey +_gIODTUnitKey +_gIODefaultMatchCategoryKey +_gIODeviceMemoryKey +_gIOFirstMatchNotification +_gIOFirstPublishNotification +_gIOGeneralInterest +_gIOInterruptControllersKey +_gIOInterruptSpecifiersKey +_gIOKLDLock +_gIOKernelConfigTables +_gIOKernelKmods +_gIOKitDebug +_gIOKitDebugKey +_gIOLocationKey +_gIOLocationMatchKey +_gIOMatchCategoryKey +_gIOMatchedNotification +_gIOMatchedServiceCountKey +_gIOModuleIdentifierKey +_gIONameKey +_gIONameMatchKey +_gIONameMatchedKey +_gIOParentMatchKey +_gIOPathMatchKey +_gIOPowerPlane +_gIOPrelinkedModules +_gIOPriorityPowerStateInterest +_gIOProbeScoreKey +_gIOPropertyMatchKey +_gIOProviderClassKey +_gIOPublishNotification +_gIORangeAllocatorLock +_gIOResourceMatchKey +_gIOResourcesKey +_gIOServiceKey +_gIOServicePlane +_gIOTerminatedNotification +_gIOUserClientClassKey +_gOFVariables +_gPlatformInterruptControllerName diff --git a/config/IOKit.i386.exports b/config/IOKit.i386.exports new file mode 100644 index 000000000..e69de29bb diff --git a/config/IOKit.ppc.exports b/config/IOKit.ppc.exports new file mode 100644 index 000000000..fcfff58c6 --- /dev/null +++ b/config/IOKit.ppc.exports @@ -0,0 +1,184 @@ +_CallTVector +__Z11IODBDMAStopPV23IODBDMAChannelRegisters +__Z12IODBDMAFlushPV23IODBDMAChannelRegisters +__Z12IODBDMAPausePV23IODBDMAChannelRegisters +__Z12IODBDMAResetPV23IODBDMAChannelRegisters +__Z12IODBDMAStartPV23IODBDMAChannelRegistersPV17IODBDMADescriptor +__Z14RootRegisteredP8OSObjectPvP9IOService +__Z15IODBDMAContinuePV23IODBDMAChannelRegisters +__Z32IOFreePhysicallyContiguousMemoryPjj +__Z36IOAllocatePhysicallyContiguousMemoryjjPjPm +__ZN10AppleMacIO10deleteListEv +__ZN10AppleMacIO10gMetaClassE +__ZN10AppleMacIO10processNubEP9IOService +__ZN10AppleMacIO10superClassE +__ZN10AppleMacIO11excludeListEv +__ZN10AppleMacIO12publishBelowEP15IORegistryEntry +__ZN10AppleMacIO15getNubResourcesEP9IOService +__ZN10AppleMacIO20_RESERVEDAppleMacIO0Ev +__ZN10AppleMacIO20_RESERVEDAppleMacIO1Ev +__ZN10AppleMacIO20_RESERVEDAppleMacIO2Ev +__ZN10AppleMacIO20_RESERVEDAppleMacIO3Ev +__ZN10AppleMacIO5startEP9IOService +__ZN10AppleMacIO8selfTestEv +__ZN10AppleMacIO9MetaClassC1Ev +__ZN10AppleMacIO9MetaClassC2Ev +__ZN10AppleMacIO9createNubEP15IORegistryEntry +__ZN10AppleMacIO9metaClassE +__ZN10AppleMacIOC1EPK11OSMetaClass +__ZN10AppleMacIOC2EPK11OSMetaClass +__ZN10AppleMacIOD0Ev +__ZN10AppleMacIOD2Ev +__ZN10AppleNVRAM10gMetaClassE +__ZN10AppleNVRAM10superClassE +__ZN10AppleNVRAM4readEmPhm +__ZN10AppleNVRAM5startEP9IOService +__ZN10AppleNVRAM5writeEmPhm +__ZN10AppleNVRAM9MetaClassC1Ev +__ZN10AppleNVRAM9MetaClassC2Ev +__ZN10AppleNVRAM9metaClassE +__ZN10AppleNVRAMC1EPK11OSMetaClass +__ZN10AppleNVRAMC1Ev +__ZN10AppleNVRAMC2EPK11OSMetaClass +__ZN10AppleNVRAMC2Ev +__ZN10AppleNVRAMD0Ev +__ZN10AppleNVRAMD2Ev +__ZN16AppleMacIODevice10gMetaClassE +__ZN16AppleMacIODevice10superClassE +__ZN16AppleMacIODevice12getResourcesEv +__ZN16AppleMacIODevice13matchLocationEP9IOService +__ZN16AppleMacIODevice26_RESERVEDAppleMacIODevice0Ev +__ZN16AppleMacIODevice26_RESERVEDAppleMacIODevice1Ev +__ZN16AppleMacIODevice26_RESERVEDAppleMacIODevice2Ev +__ZN16AppleMacIODevice26_RESERVEDAppleMacIODevice3Ev +__ZN16AppleMacIODevice9MetaClassC1Ev +__ZN16AppleMacIODevice9MetaClassC2Ev +__ZN16AppleMacIODevice9metaClassE +__ZN16AppleMacIODeviceC1EPK11OSMetaClass +__ZN16AppleMacIODeviceC1Ev +__ZN16AppleMacIODeviceC2EPK11OSMetaClass +__ZN16AppleMacIODeviceC2Ev +__ZN16AppleMacIODeviceD0Ev +__ZN16AppleMacIODeviceD2Ev +__ZN17IONVRAMController10gMetaClassE +__ZN17IONVRAMController10superClassE +__ZN17IONVRAMController4syncEv +__ZN17IONVRAMController5startEP9IOService +__ZN17IONVRAMController9MetaClassC1Ev +__ZN17IONVRAMController9MetaClassC2Ev +__ZN17IONVRAMController9metaClassE +__ZN17IONVRAMControllerC1EPK11OSMetaClass +__ZN17IONVRAMControllerC2EPK11OSMetaClass +__ZN17IONVRAMControllerD0Ev +__ZN17IONVRAMControllerD2Ev +__ZN19ApplePlatformExpert10deleteListEv +__ZN19ApplePlatformExpert10gMetaClassE +__ZN19ApplePlatformExpert10superClassE +__ZN19ApplePlatformExpert11excludeListEv +__ZN19ApplePlatformExpert14getMachineNameEPci +__ZN19ApplePlatformExpert15getGMTTimeOfDayEv +__ZN19ApplePlatformExpert15setGMTTimeOfDayEl +__ZN19ApplePlatformExpert23registerNVRAMControllerEP17IONVRAMController +__ZN19ApplePlatformExpert29_RESERVEDApplePlatformExpert0Ev +__ZN19ApplePlatformExpert29_RESERVEDApplePlatformExpert1Ev +__ZN19ApplePlatformExpert29_RESERVEDApplePlatformExpert2Ev +__ZN19ApplePlatformExpert29_RESERVEDApplePlatformExpert3Ev +__ZN19ApplePlatformExpert5startEP9IOService +__ZN19ApplePlatformExpert9MetaClassC1Ev +__ZN19ApplePlatformExpert9MetaClassC2Ev +__ZN19ApplePlatformExpert9configureEP9IOService +__ZN19ApplePlatformExpert9metaClassE +__ZN19ApplePlatformExpertC1EPK11OSMetaClass +__ZN19ApplePlatformExpertC2EPK11OSMetaClass +__ZN19ApplePlatformExpertD0Ev +__ZN19ApplePlatformExpertD2Ev +__ZN19IODBDMAMemoryCursor10gMetaClassE +__ZN19IODBDMAMemoryCursor10superClassE +__ZN19IODBDMAMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvm +__ZN19IODBDMAMemoryCursor17withSpecificationEmmm +__ZN19IODBDMAMemoryCursor21initWithSpecificationEmmm +__ZN19IODBDMAMemoryCursor9MetaClassC1Ev +__ZN19IODBDMAMemoryCursor9MetaClassC2Ev +__ZN19IODBDMAMemoryCursor9metaClassE +__ZN19IODBDMAMemoryCursorC1EPK11OSMetaClass +__ZN19IODBDMAMemoryCursorC1Ev +__ZN19IODBDMAMemoryCursorC2EPK11OSMetaClass +__ZN19IODBDMAMemoryCursorC2Ev +__ZN19IODBDMAMemoryCursorD0Ev +__ZN19IODBDMAMemoryCursorD2Ev +__ZN8AppleCPU10gMetaClassE +__ZN8AppleCPU10getCPUNameEv +__ZN8AppleCPU10quiesceCPUEv +__ZN8AppleCPU10superClassE +__ZN8AppleCPU5startEP9IOService +__ZN8AppleCPU7haltCPUEv +__ZN8AppleCPU7initCPUEb +__ZN8AppleCPU8startCPUEjj +__ZN8AppleCPU9MetaClassC1Ev +__ZN8AppleCPU9MetaClassC2Ev +__ZN8AppleCPU9metaClassE +__ZN8AppleCPUC1EPK11OSMetaClass +__ZN8AppleCPUC1Ev +__ZN8AppleCPUC2EPK11OSMetaClass +__ZN8AppleCPUC2Ev +__ZN8AppleCPUD0Ev +__ZN8AppleCPUD2Ev +__ZN8AppleNMI10gMetaClassE +__ZN8AppleNMI10superClassE +__ZN8AppleNMI15handleInterruptEPvP9IOServicei +__ZN8AppleNMI18_RESERVEDAppleNMI0Ev +__ZN8AppleNMI18_RESERVEDAppleNMI1Ev +__ZN8AppleNMI18_RESERVEDAppleNMI2Ev +__ZN8AppleNMI18_RESERVEDAppleNMI3Ev +__ZN8AppleNMI22powerStateWillChangeToEmmP9IOService +__ZN8AppleNMI5startEP9IOService +__ZN8AppleNMI7initNMIEP21IOInterruptControllerP6OSData +__ZN8AppleNMI9MetaClassC1Ev +__ZN8AppleNMI9MetaClassC2Ev +__ZN8AppleNMI9metaClassE +__ZN8AppleNMIC1EPK11OSMetaClass +__ZN8AppleNMIC1Ev +__ZN8AppleNMIC2EPK11OSMetaClass +__ZN8AppleNMIC2Ev +__ZN8AppleNMID0Ev +__ZN8AppleNMID2Ev +__ZNK10AppleMacIO12getMetaClassEv +__ZNK10AppleMacIO14compareNubNameEPK9IOServiceP8OSStringPS4_ +__ZNK10AppleMacIO9MetaClass5allocEv +__ZNK10AppleNVRAM12getMetaClassEv +__ZNK10AppleNVRAM9MetaClass5allocEv +__ZNK16AppleMacIODevice11compareNameEP8OSStringPS1_ +__ZNK16AppleMacIODevice12getMetaClassEv +__ZNK16AppleMacIODevice9MetaClass5allocEv +__ZNK17IONVRAMController12getMetaClassEv +__ZNK17IONVRAMController9MetaClass5allocEv +__ZNK19ApplePlatformExpert12getMetaClassEv +__ZNK19ApplePlatformExpert9MetaClass5allocEv +__ZNK19IODBDMAMemoryCursor12getMetaClassEv +__ZNK19IODBDMAMemoryCursor9MetaClass5allocEv +__ZNK8AppleCPU12getMetaClassEv +__ZNK8AppleCPU9MetaClass5allocEv +__ZNK8AppleNMI12getMetaClassEv +__ZNK8AppleNMI9MetaClass5allocEv +__ZTV10AppleMacIO +__ZTV10AppleNVRAM +__ZTV16AppleMacIODevice +__ZTV17IONVRAMController +__ZTV19ApplePlatformExpert +__ZTV19IODBDMAMemoryCursor +__ZTV8AppleCPU +__ZTV8AppleNMI +__ZTVN10AppleMacIO9MetaClassE +__ZTVN10AppleNVRAM9MetaClassE +__ZTVN16AppleMacIODevice9MetaClassE +__ZTVN17IONVRAMController9MetaClassE +__ZTVN19ApplePlatformExpert9MetaClassE +__ZTVN19IODBDMAMemoryCursor9MetaClassE +__ZTVN8AppleCPU9MetaClassE +__ZTVN8AppleNMI9MetaClassE +__eSynchronizeIO +_gGetDefaultBusSpeedsKey +_PE_Determine_Clock_Speeds +_PE_read_write_time_of_day +_PE_write_IIC + diff --git a/config/Libkern.exports b/config/Libkern.exports new file mode 100644 index 000000000..263d70407 --- /dev/null +++ b/config/Libkern.exports @@ -0,0 +1,746 @@ +_OSAddAtomic +_OSAddAtomic16 +_OSAddAtomic8 +_OSBitAndAtomic +_OSBitAndAtomic16 +_OSBitAndAtomic8 +_OSBitOrAtomic +_OSBitOrAtomic16 +_OSBitOrAtomic8 +_OSBitXorAtomic +_OSBitXorAtomic16 +_OSBitXorAtomic8 +_OSCompareAndSwap +_OSDecrementAtomic +_OSDecrementAtomic16 +_OSDecrementAtomic8 +_OSDequeueAtomic +_OSEnqueueAtomic +_OSIncrementAtomic +_OSIncrementAtomic16 +_OSIncrementAtomic8 +_OSRuntimeFinalizeCPP +_OSRuntimeInitializeCPP +_OSRuntimeUnloadCPP +_OSRuntimeUnloadCPPForSegment +_OSTestAndClear +_OSTestAndSet +_OSUnserializechar +_OSUnserializelval +_OSUnserializenerrs +_OSlibkernInit +__Z13OSUnserializePKcPP8OSString +__Z16OSUnserializeXMLPKcPP8OSString +__ZN10OSIterator10gMetaClassE +__ZN10OSIterator10superClassE +__ZN10OSIterator20_RESERVEDOSIterator0Ev +__ZN10OSIterator20_RESERVEDOSIterator1Ev +__ZN10OSIterator20_RESERVEDOSIterator2Ev +__ZN10OSIterator20_RESERVEDOSIterator3Ev +__ZN10OSIterator9MetaClassC1Ev +__ZN10OSIterator9MetaClassC2Ev +__ZN10OSIterator9metaClassE +__ZN10OSIteratorC1EPK11OSMetaClass +__ZN10OSIteratorC2EPK11OSMetaClass +__ZN10OSIteratorD0Ev +__ZN10OSIteratorD2Ev +__ZN11OSMetaClass10preModLoadEPKc +__ZN11OSMetaClass11postModLoadEPv +__ZN11OSMetaClass12checkModLoadEPv +__ZN11OSMetaClass14modHasInstanceEPKc +__ZN11OSMetaClass15considerUnloadsEv +__ZN11OSMetaClass18allocClassWithNameEPK8OSString +__ZN11OSMetaClass18allocClassWithNameEPK8OSSymbol +__ZN11OSMetaClass18allocClassWithNameEPKc +__ZN11OSMetaClass18getClassDictionaryEv +__ZN11OSMetaClass18reportModInstancesEPKc +__ZN11OSMetaClass19printInstanceCountsEv +__ZN11OSMetaClass20getMetaClassWithNameEPK8OSSymbol +__ZN11OSMetaClass21_RESERVEDOSMetaClass0Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass1Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass2Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass3Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass4Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass5Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass6Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass7Ev +__ZN11OSMetaClass21checkMetaCastWithNameEPK8OSStringPK15OSMetaClassBase +__ZN11OSMetaClass21checkMetaCastWithNameEPK8OSSymbolPK15OSMetaClassBase +__ZN11OSMetaClass21checkMetaCastWithNameEPKcPK15OSMetaClassBase +__ZN11OSMetaClass24serializeClassDictionaryEP12OSDictionary +__ZN11OSMetaClass8logErrorEi +__ZN11OSMetaClass9metaClassE +__ZN11OSMetaClassC1EPKcPKS_j +__ZN11OSMetaClassC2EPKcPKS_j +__ZN11OSMetaClassD0Ev +__ZN11OSMetaClassD2Ev +__ZN11OSMetaClassdlEPvm +__ZN11OSMetaClassnwEm +__ZN11OSSerialize10gMetaClassE +__ZN11OSSerialize10superClassE +__ZN11OSSerialize12addXMLEndTagEPKc +__ZN11OSSerialize12withCapacityEj +__ZN11OSSerialize14addXMLStartTagEPK15OSMetaClassBasePKc +__ZN11OSSerialize14ensureCapacityEj +__ZN11OSSerialize16initWithCapacityEj +__ZN11OSSerialize20previouslySerializedEPK15OSMetaClassBase +__ZN11OSSerialize20setCapacityIncrementEj +__ZN11OSSerialize21_RESERVEDOSSerialize0Ev +__ZN11OSSerialize21_RESERVEDOSSerialize1Ev +__ZN11OSSerialize21_RESERVEDOSSerialize2Ev +__ZN11OSSerialize21_RESERVEDOSSerialize3Ev +__ZN11OSSerialize21_RESERVEDOSSerialize4Ev +__ZN11OSSerialize21_RESERVEDOSSerialize5Ev +__ZN11OSSerialize21_RESERVEDOSSerialize6Ev +__ZN11OSSerialize21_RESERVEDOSSerialize7Ev +__ZN11OSSerialize4freeEv +__ZN11OSSerialize7addCharEc +__ZN11OSSerialize9MetaClassC1Ev +__ZN11OSSerialize9MetaClassC2Ev +__ZN11OSSerialize9addStringEPKc +__ZN11OSSerialize9clearTextEv +__ZN11OSSerialize9metaClassE +__ZN11OSSerializeC1EPK11OSMetaClass +__ZN11OSSerializeC1Ev +__ZN11OSSerializeC2EPK11OSMetaClass +__ZN11OSSerializeC2Ev +__ZN11OSSerializeD0Ev +__ZN11OSSerializeD2Ev +__ZN12OSCollection10gMetaClassE +__ZN12OSCollection10superClassE +__ZN12OSCollection22_RESERVEDOSCollection0Ev +__ZN12OSCollection22_RESERVEDOSCollection1Ev +__ZN12OSCollection22_RESERVEDOSCollection2Ev +__ZN12OSCollection22_RESERVEDOSCollection3Ev +__ZN12OSCollection22_RESERVEDOSCollection4Ev +__ZN12OSCollection22_RESERVEDOSCollection5Ev +__ZN12OSCollection22_RESERVEDOSCollection6Ev +__ZN12OSCollection22_RESERVEDOSCollection7Ev +__ZN12OSCollection4initEv +__ZN12OSCollection9MetaClassC1Ev +__ZN12OSCollection9MetaClassC2Ev +__ZN12OSCollection9metaClassE +__ZN12OSCollectionC1EPK11OSMetaClass +__ZN12OSCollectionC2EPK11OSMetaClass +__ZN12OSCollectionD0Ev +__ZN12OSCollectionD2Ev +__ZN12OSDictionary10gMetaClassE +__ZN12OSDictionary10superClassE +__ZN12OSDictionary11withObjectsEPPK8OSObjectPPK8OSStringjj +__ZN12OSDictionary11withObjectsEPPK8OSObjectPPK8OSSymboljj +__ZN12OSDictionary12removeObjectEPK8OSString +__ZN12OSDictionary12removeObjectEPK8OSSymbol +__ZN12OSDictionary12removeObjectEPKc +__ZN12OSDictionary12withCapacityEj +__ZN12OSDictionary14ensureCapacityEj +__ZN12OSDictionary14withDictionaryEPKS_j +__ZN12OSDictionary15flushCollectionEv +__ZN12OSDictionary15initWithObjectsEPPK8OSObjectPPK8OSStringjj +__ZN12OSDictionary15initWithObjectsEPPK8OSObjectPPK8OSSymboljj +__ZN12OSDictionary16initWithCapacityEj +__ZN12OSDictionary18initWithDictionaryEPKS_j +__ZN12OSDictionary20setCapacityIncrementEj +__ZN12OSDictionary22_RESERVEDOSDictionary0Ev +__ZN12OSDictionary22_RESERVEDOSDictionary1Ev +__ZN12OSDictionary22_RESERVEDOSDictionary2Ev +__ZN12OSDictionary22_RESERVEDOSDictionary3Ev +__ZN12OSDictionary22_RESERVEDOSDictionary4Ev +__ZN12OSDictionary22_RESERVEDOSDictionary5Ev +__ZN12OSDictionary22_RESERVEDOSDictionary6Ev +__ZN12OSDictionary22_RESERVEDOSDictionary7Ev +__ZN12OSDictionary4freeEv +__ZN12OSDictionary5mergeEPKS_ +__ZN12OSDictionary9MetaClassC1Ev +__ZN12OSDictionary9MetaClassC2Ev +__ZN12OSDictionary9metaClassE +__ZN12OSDictionary9setObjectEPK8OSStringPK15OSMetaClassBase +__ZN12OSDictionary9setObjectEPK8OSSymbolPK15OSMetaClassBase +__ZN12OSDictionary9setObjectEPKcPK15OSMetaClassBase +__ZN12OSDictionaryC1EPK11OSMetaClass +__ZN12OSDictionaryC1Ev +__ZN12OSDictionaryC2EPK11OSMetaClass +__ZN12OSDictionaryC2Ev +__ZN12OSDictionaryD0Ev +__ZN12OSDictionaryD2Ev +__ZN12OSOrderedSet10gMetaClassE +__ZN12OSOrderedSet10superClassE +__ZN12OSOrderedSet11orderObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet12removeObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet12withCapacityEjPFlPK15OSMetaClassBaseS2_PvES3_ +__ZN12OSOrderedSet13setLastObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet14ensureCapacityEj +__ZN12OSOrderedSet14getOrderingRefEv +__ZN12OSOrderedSet14setFirstObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet15flushCollectionEv +__ZN12OSOrderedSet16initWithCapacityEjPFlPK15OSMetaClassBaseS2_PvES3_ +__ZN12OSOrderedSet20setCapacityIncrementEj +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet0Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet1Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet2Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet3Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet4Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet5Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet6Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet7Ev +__ZN12OSOrderedSet4freeEv +__ZN12OSOrderedSet9MetaClassC1Ev +__ZN12OSOrderedSet9MetaClassC2Ev +__ZN12OSOrderedSet9metaClassE +__ZN12OSOrderedSet9setObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet9setObjectEjPK15OSMetaClassBase +__ZN12OSOrderedSetC1EPK11OSMetaClass +__ZN12OSOrderedSetC1Ev +__ZN12OSOrderedSetC2EPK11OSMetaClass +__ZN12OSOrderedSetC2Ev +__ZN12OSOrderedSetD0Ev +__ZN12OSOrderedSetD2Ev +__ZN12OSSerializer10gMetaClassE +__ZN12OSSerializer10superClassE +__ZN12OSSerializer9MetaClassC1Ev +__ZN12OSSerializer9MetaClassC2Ev +__ZN12OSSerializer9forTargetEPvPFbS0_S0_P11OSSerializeES0_ +__ZN12OSSerializer9metaClassE +__ZN12OSSerializerC1EPK11OSMetaClass +__ZN12OSSerializerC1Ev +__ZN12OSSerializerC2EPK11OSMetaClass +__ZN12OSSerializerC2Ev +__ZN12OSSerializerD0Ev +__ZN12OSSerializerD2Ev +__ZN12OSSymbolPool12insertSymbolEP8OSSymbol +__ZN12OSSymbolPool12removeSymbolEP8OSSymbol +__ZN12OSSymbolPool13initHashStateEv +__ZN12OSSymbolPool13nextHashStateEP17OSSymbolPoolState +__ZN12OSSymbolPool18reconstructSymbolsEv +__ZN12OSSymbolPool4initEv +__ZN12OSSymbolPool4log2Ej +__ZN12OSSymbolPool6exp2mlEj +__ZN12OSSymbolPoolC1EPKS_ +__ZN12OSSymbolPoolC2EPKS_ +__ZN12OSSymbolPoolD0Ev +__ZN12OSSymbolPoolD1Ev +__ZN12OSSymbolPoolD2Ev +__ZN12OSSymbolPooldlEPvm +__ZN12OSSymbolPoolnwEm +__ZN15OSMetaClassBase12safeMetaCastEPKS_PK11OSMetaClass +__ZN15OSMetaClassBase13checkTypeInstEPKS_S1_ +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase3Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase4Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase5Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase6Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase7Ev +__ZN15OSMetaClassBaseC1Ev +__ZN15OSMetaClassBaseC2Ev +__ZN15OSMetaClassBaseD0Ev +__ZN15OSMetaClassBaseD2Ev +__ZN15OSMetaClassMetaC1Ev +__ZN15OSMetaClassMetaC2Ev +__ZN20OSCollectionIterator10gMetaClassE +__ZN20OSCollectionIterator10superClassE +__ZN20OSCollectionIterator13getNextObjectEv +__ZN20OSCollectionIterator14withCollectionEPK12OSCollection +__ZN20OSCollectionIterator18initWithCollectionEPK12OSCollection +__ZN20OSCollectionIterator4freeEv +__ZN20OSCollectionIterator5resetEv +__ZN20OSCollectionIterator7isValidEv +__ZN20OSCollectionIterator9MetaClassC1Ev +__ZN20OSCollectionIterator9MetaClassC2Ev +__ZN20OSCollectionIterator9metaClassE +__ZN20OSCollectionIteratorC1EPK11OSMetaClass +__ZN20OSCollectionIteratorC1Ev +__ZN20OSCollectionIteratorC2EPK11OSMetaClass +__ZN20OSCollectionIteratorC2Ev +__ZN20OSCollectionIteratorD0Ev +__ZN20OSCollectionIteratorD2Ev +__ZN5OSSet10gMetaClassE +__ZN5OSSet10superClassE +__ZN5OSSet11initWithSetEPKS_j +__ZN5OSSet11withObjectsEPPK8OSObjectjj +__ZN5OSSet12removeObjectEPK15OSMetaClassBase +__ZN5OSSet12withCapacityEj +__ZN5OSSet13initWithArrayEPK7OSArrayj +__ZN5OSSet14ensureCapacityEj +__ZN5OSSet15_RESERVEDOSSet0Ev +__ZN5OSSet15_RESERVEDOSSet1Ev +__ZN5OSSet15_RESERVEDOSSet2Ev +__ZN5OSSet15_RESERVEDOSSet3Ev +__ZN5OSSet15_RESERVEDOSSet4Ev +__ZN5OSSet15_RESERVEDOSSet5Ev +__ZN5OSSet15_RESERVEDOSSet6Ev +__ZN5OSSet15_RESERVEDOSSet7Ev +__ZN5OSSet15flushCollectionEv +__ZN5OSSet15initWithObjectsEPPK8OSObjectjj +__ZN5OSSet16initWithCapacityEj +__ZN5OSSet20setCapacityIncrementEj +__ZN5OSSet4freeEv +__ZN5OSSet5mergeEPK7OSArray +__ZN5OSSet5mergeEPKS_ +__ZN5OSSet7withSetEPKS_j +__ZN5OSSet9MetaClassC1Ev +__ZN5OSSet9MetaClassC2Ev +__ZN5OSSet9metaClassE +__ZN5OSSet9setObjectEPK15OSMetaClassBase +__ZN5OSSet9withArrayEPK7OSArrayj +__ZN5OSSetC1EPK11OSMetaClass +__ZN5OSSetC1Ev +__ZN5OSSetC2EPK11OSMetaClass +__ZN5OSSetC2Ev +__ZN5OSSetD0Ev +__ZN5OSSetD2Ev +__ZN6OSData10appendByteEhj +__ZN6OSData10gMetaClassE +__ZN6OSData10superClassE +__ZN6OSData11appendBytesEPKS_ +__ZN6OSData11appendBytesEPKvj +__ZN6OSData12initWithDataEPKS_ +__ZN6OSData12initWithDataEPKS_jj +__ZN6OSData12withCapacityEj +__ZN6OSData13initWithBytesEPKvj +__ZN6OSData14ensureCapacityEj +__ZN6OSData15withBytesNoCopyEPvj +__ZN6OSData16_RESERVEDOSData0Ev +__ZN6OSData16_RESERVEDOSData1Ev +__ZN6OSData16_RESERVEDOSData2Ev +__ZN6OSData16_RESERVEDOSData3Ev +__ZN6OSData16_RESERVEDOSData4Ev +__ZN6OSData16_RESERVEDOSData5Ev +__ZN6OSData16_RESERVEDOSData6Ev +__ZN6OSData16_RESERVEDOSData7Ev +__ZN6OSData16initWithCapacityEj +__ZN6OSData19initWithBytesNoCopyEPvj +__ZN6OSData20setCapacityIncrementEj +__ZN6OSData4freeEv +__ZN6OSData8withDataEPKS_ +__ZN6OSData8withDataEPKS_jj +__ZN6OSData9MetaClassC1Ev +__ZN6OSData9MetaClassC2Ev +__ZN6OSData9metaClassE +__ZN6OSData9withBytesEPKvj +__ZN6OSDataC1EPK11OSMetaClass +__ZN6OSDataC1Ev +__ZN6OSDataC2EPK11OSMetaClass +__ZN6OSDataC2Ev +__ZN6OSDataD0Ev +__ZN6OSDataD2Ev +__ZN7OSArray10gMetaClassE +__ZN7OSArray10superClassE +__ZN7OSArray11withObjectsEPPK8OSObjectjj +__ZN7OSArray12removeObjectEj +__ZN7OSArray12withCapacityEj +__ZN7OSArray13initWithArrayEPKS_j +__ZN7OSArray13replaceObjectEjPK15OSMetaClassBase +__ZN7OSArray14ensureCapacityEj +__ZN7OSArray15flushCollectionEv +__ZN7OSArray15initWithObjectsEPPK8OSObjectjj +__ZN7OSArray16initWithCapacityEj +__ZN7OSArray17_RESERVEDOSArray0Ev +__ZN7OSArray17_RESERVEDOSArray1Ev +__ZN7OSArray17_RESERVEDOSArray2Ev +__ZN7OSArray17_RESERVEDOSArray3Ev +__ZN7OSArray17_RESERVEDOSArray4Ev +__ZN7OSArray17_RESERVEDOSArray5Ev +__ZN7OSArray17_RESERVEDOSArray6Ev +__ZN7OSArray17_RESERVEDOSArray7Ev +__ZN7OSArray20setCapacityIncrementEj +__ZN7OSArray4freeEv +__ZN7OSArray5mergeEPKS_ +__ZN7OSArray9MetaClassC1Ev +__ZN7OSArray9MetaClassC2Ev +__ZN7OSArray9metaClassE +__ZN7OSArray9setObjectEPK15OSMetaClassBase +__ZN7OSArray9setObjectEjPK15OSMetaClassBase +__ZN7OSArray9withArrayEPKS_j +__ZN7OSArrayC1EPK11OSMetaClass +__ZN7OSArrayC1Ev +__ZN7OSArrayC2EPK11OSMetaClass +__ZN7OSArrayC2Ev +__ZN7OSArrayD0Ev +__ZN7OSArrayD2Ev +__ZN8OSNumber10gMetaClassE +__ZN8OSNumber10superClassE +__ZN8OSNumber10withNumberEPKcj +__ZN8OSNumber10withNumberEyj +__ZN8OSNumber18_RESERVEDOSNumber0Ev +__ZN8OSNumber18_RESERVEDOSNumber1Ev +__ZN8OSNumber18_RESERVEDOSNumber2Ev +__ZN8OSNumber18_RESERVEDOSNumber3Ev +__ZN8OSNumber18_RESERVEDOSNumber4Ev +__ZN8OSNumber18_RESERVEDOSNumber5Ev +__ZN8OSNumber18_RESERVEDOSNumber6Ev +__ZN8OSNumber18_RESERVEDOSNumber7Ev +__ZN8OSNumber4freeEv +__ZN8OSNumber4initEPKcj +__ZN8OSNumber4initEyj +__ZN8OSNumber8addValueEx +__ZN8OSNumber8setValueEy +__ZN8OSNumber9MetaClassC1Ev +__ZN8OSNumber9MetaClassC2Ev +__ZN8OSNumber9metaClassE +__ZN8OSNumberC1EPK11OSMetaClass +__ZN8OSNumberC1Ev +__ZN8OSNumberC2EPK11OSMetaClass +__ZN8OSNumberC2Ev +__ZN8OSNumberD0Ev +__ZN8OSNumberD2Ev +__ZN8OSObject10gMetaClassE +__ZN8OSObject10superClassE +__ZN8OSObject18_RESERVEDOSObject0Ev +__ZN8OSObject18_RESERVEDOSObject1Ev +__ZN8OSObject18_RESERVEDOSObject2Ev +__ZN8OSObject18_RESERVEDOSObject3Ev +__ZN8OSObject18_RESERVEDOSObject4Ev +__ZN8OSObject18_RESERVEDOSObject5Ev +__ZN8OSObject18_RESERVEDOSObject6Ev +__ZN8OSObject18_RESERVEDOSObject7Ev +__ZN8OSObject18_RESERVEDOSObject8Ev +__ZN8OSObject18_RESERVEDOSObject9Ev +__ZN8OSObject19_RESERVEDOSObject10Ev +__ZN8OSObject19_RESERVEDOSObject11Ev +__ZN8OSObject19_RESERVEDOSObject12Ev +__ZN8OSObject19_RESERVEDOSObject13Ev +__ZN8OSObject19_RESERVEDOSObject14Ev +__ZN8OSObject19_RESERVEDOSObject15Ev +__ZN8OSObject19_RESERVEDOSObject16Ev +__ZN8OSObject19_RESERVEDOSObject17Ev +__ZN8OSObject19_RESERVEDOSObject18Ev +__ZN8OSObject19_RESERVEDOSObject19Ev +__ZN8OSObject19_RESERVEDOSObject20Ev +__ZN8OSObject19_RESERVEDOSObject21Ev +__ZN8OSObject19_RESERVEDOSObject22Ev +__ZN8OSObject19_RESERVEDOSObject23Ev +__ZN8OSObject19_RESERVEDOSObject24Ev +__ZN8OSObject19_RESERVEDOSObject25Ev +__ZN8OSObject19_RESERVEDOSObject26Ev +__ZN8OSObject19_RESERVEDOSObject27Ev +__ZN8OSObject19_RESERVEDOSObject28Ev +__ZN8OSObject19_RESERVEDOSObject29Ev +__ZN8OSObject19_RESERVEDOSObject30Ev +__ZN8OSObject19_RESERVEDOSObject31Ev +__ZN8OSObject4freeEv +__ZN8OSObject4initEv +__ZN8OSObject9MetaClassC1Ev +__ZN8OSObject9MetaClassC2Ev +__ZN8OSObject9metaClassE +__ZN8OSObjectC1EPK11OSMetaClass +__ZN8OSObjectC1Ev +__ZN8OSObjectC2EPK11OSMetaClass +__ZN8OSObjectC2Ev +__ZN8OSObjectD0Ev +__ZN8OSObjectD2Ev +__ZN8OSObjectdlEPvm +__ZN8OSObjectnwEm +__ZN8OSString10gMetaClassE +__ZN8OSString10superClassE +__ZN8OSString10withStringEPKS_ +__ZN8OSString11withCStringEPKc +__ZN8OSString14initWithStringEPKS_ +__ZN8OSString15initWithCStringEPKc +__ZN8OSString17withCStringNoCopyEPKc +__ZN8OSString18_RESERVEDOSString0Ev +__ZN8OSString18_RESERVEDOSString1Ev +__ZN8OSString18_RESERVEDOSString2Ev +__ZN8OSString18_RESERVEDOSString3Ev +__ZN8OSString18_RESERVEDOSString4Ev +__ZN8OSString18_RESERVEDOSString5Ev +__ZN8OSString18_RESERVEDOSString6Ev +__ZN8OSString18_RESERVEDOSString7Ev +__ZN8OSString18_RESERVEDOSString8Ev +__ZN8OSString18_RESERVEDOSString9Ev +__ZN8OSString19_RESERVEDOSString10Ev +__ZN8OSString19_RESERVEDOSString11Ev +__ZN8OSString19_RESERVEDOSString12Ev +__ZN8OSString19_RESERVEDOSString13Ev +__ZN8OSString19_RESERVEDOSString14Ev +__ZN8OSString19_RESERVEDOSString15Ev +__ZN8OSString21initWithCStringNoCopyEPKc +__ZN8OSString4freeEv +__ZN8OSString7setCharEcj +__ZN8OSString9MetaClassC1Ev +__ZN8OSString9MetaClassC2Ev +__ZN8OSString9metaClassE +__ZN8OSStringC1EPK11OSMetaClass +__ZN8OSStringC1Ev +__ZN8OSStringC2EPK11OSMetaClass +__ZN8OSStringC2Ev +__ZN8OSStringD0Ev +__ZN8OSStringD2Ev +__ZN8OSSymbol10gMetaClassE +__ZN8OSSymbol10initializeEv +__ZN8OSSymbol10superClassE +__ZN8OSSymbol10withStringEPK8OSString +__ZN8OSSymbol11withCStringEPKc +__ZN8OSSymbol14initWithStringEPK8OSString +__ZN8OSSymbol15initWithCStringEPKc +__ZN8OSSymbol17withCStringNoCopyEPKc +__ZN8OSSymbol18_RESERVEDOSSymbol0Ev +__ZN8OSSymbol18_RESERVEDOSSymbol1Ev +__ZN8OSSymbol18_RESERVEDOSSymbol2Ev +__ZN8OSSymbol18_RESERVEDOSSymbol3Ev +__ZN8OSSymbol18_RESERVEDOSSymbol4Ev +__ZN8OSSymbol18_RESERVEDOSSymbol5Ev +__ZN8OSSymbol18_RESERVEDOSSymbol6Ev +__ZN8OSSymbol18_RESERVEDOSSymbol7Ev +__ZN8OSSymbol18checkForPageUnloadEPvS0_ +__ZN8OSSymbol21initWithCStringNoCopyEPKc +__ZN8OSSymbol4freeEv +__ZN8OSSymbol9MetaClassC1Ev +__ZN8OSSymbol9MetaClassC2Ev +__ZN8OSSymbol9metaClassE +__ZN8OSSymbolC1EPK11OSMetaClass +__ZN8OSSymbolC1Ev +__ZN8OSSymbolC2EPK11OSMetaClass +__ZN8OSSymbolC2Ev +__ZN8OSSymbolD0Ev +__ZN8OSSymbolD2Ev +__ZN9OSBoolean10gMetaClassE +__ZN9OSBoolean10initializeEv +__ZN9OSBoolean10superClassE +__ZN9OSBoolean11withBooleanEb +__ZN9OSBoolean19_RESERVEDOSBoolean0Ev +__ZN9OSBoolean19_RESERVEDOSBoolean1Ev +__ZN9OSBoolean19_RESERVEDOSBoolean2Ev +__ZN9OSBoolean19_RESERVEDOSBoolean3Ev +__ZN9OSBoolean19_RESERVEDOSBoolean4Ev +__ZN9OSBoolean19_RESERVEDOSBoolean5Ev +__ZN9OSBoolean19_RESERVEDOSBoolean6Ev +__ZN9OSBoolean19_RESERVEDOSBoolean7Ev +__ZN9OSBoolean4freeEv +__ZN9OSBoolean9MetaClassC1Ev +__ZN9OSBoolean9MetaClassC2Ev +__ZN9OSBoolean9metaClassE +__ZN9OSBooleanC1EPK11OSMetaClass +__ZN9OSBooleanC1Ev +__ZN9OSBooleanC2EPK11OSMetaClass +__ZN9OSBooleanC2Ev +__ZN9OSBooleanD0Ev +__ZN9OSBooleanD2Ev +__ZNK10OSIterator12getMetaClassEv +__ZNK10OSIterator9MetaClass5allocEv +__ZNK11OSMetaClass12getClassNameEv +__ZNK11OSMetaClass12getClassSizeEv +__ZNK11OSMetaClass12getMetaClassEv +__ZNK11OSMetaClass12taggedRetainEPKv +__ZNK11OSMetaClass13checkMetaCastEPK15OSMetaClassBase +__ZNK11OSMetaClass13getSuperClassEv +__ZNK11OSMetaClass13taggedReleaseEPKv +__ZNK11OSMetaClass13taggedReleaseEPKvi +__ZNK11OSMetaClass14getRetainCountEv +__ZNK11OSMetaClass14reservedCalledEi +__ZNK11OSMetaClass16getInstanceCountEv +__ZNK11OSMetaClass18instanceDestructedEv +__ZNK11OSMetaClass19instanceConstructedEv +__ZNK11OSMetaClass6retainEv +__ZNK11OSMetaClass7releaseEi +__ZNK11OSMetaClass7releaseEv +__ZNK11OSMetaClass9serializeEP11OSSerialize +__ZNK11OSSerialize11getCapacityEv +__ZNK11OSSerialize12getMetaClassEv +__ZNK11OSSerialize20getCapacityIncrementEv +__ZNK11OSSerialize4textEv +__ZNK11OSSerialize9MetaClass5allocEv +__ZNK11OSSerialize9getLengthEv +__ZNK12OSCollection12getMetaClassEv +__ZNK12OSCollection9MetaClass5allocEv +__ZNK12OSDictionary11getCapacityEv +__ZNK12OSDictionary12getMetaClassEv +__ZNK12OSDictionary12initIteratorEPv +__ZNK12OSDictionary12iteratorSizeEv +__ZNK12OSDictionary20getCapacityIncrementEv +__ZNK12OSDictionary24getNextObjectForIteratorEPvPP8OSObject +__ZNK12OSDictionary8getCountEv +__ZNK12OSDictionary9MetaClass5allocEv +__ZNK12OSDictionary9getObjectEPK8OSString +__ZNK12OSDictionary9getObjectEPK8OSSymbol +__ZNK12OSDictionary9getObjectEPKc +__ZNK12OSDictionary9isEqualToEPK15OSMetaClassBase +__ZNK12OSDictionary9isEqualToEPKS_ +__ZNK12OSDictionary9isEqualToEPKS_PK12OSCollection +__ZNK12OSDictionary9serializeEP11OSSerialize +__ZNK12OSOrderedSet11getCapacityEv +__ZNK12OSOrderedSet12getMetaClassEv +__ZNK12OSOrderedSet12initIteratorEPv +__ZNK12OSOrderedSet12iteratorSizeEv +__ZNK12OSOrderedSet13getLastObjectEv +__ZNK12OSOrderedSet14containsObjectEPK15OSMetaClassBase +__ZNK12OSOrderedSet14getFirstObjectEv +__ZNK12OSOrderedSet20getCapacityIncrementEv +__ZNK12OSOrderedSet24getNextObjectForIteratorEPvPP8OSObject +__ZNK12OSOrderedSet6memberEPK15OSMetaClassBase +__ZNK12OSOrderedSet8getCountEv +__ZNK12OSOrderedSet9MetaClass5allocEv +__ZNK12OSOrderedSet9getObjectEj +__ZNK12OSOrderedSet9isEqualToEPK15OSMetaClassBase +__ZNK12OSOrderedSet9isEqualToEPKS_ +__ZNK12OSSerializer12getMetaClassEv +__ZNK12OSSerializer9MetaClass5allocEv +__ZNK12OSSerializer9serializeEP11OSSerialize +__ZNK12OSSymbolPool10findSymbolEPKc +__ZNK15OSMetaClassBase8metaCastEPK11OSMetaClass +__ZNK15OSMetaClassBase8metaCastEPK8OSString +__ZNK15OSMetaClassBase8metaCastEPK8OSSymbol +__ZNK15OSMetaClassBase8metaCastEPKc +__ZNK15OSMetaClassBase9isEqualToEPKS_ +__ZNK15OSMetaClassMeta5allocEv +__ZNK20OSCollectionIterator12getMetaClassEv +__ZNK20OSCollectionIterator9MetaClass5allocEv +__ZNK5OSSet11getCapacityEv +__ZNK5OSSet12getAnyObjectEv +__ZNK5OSSet12getMetaClassEv +__ZNK5OSSet12initIteratorEPv +__ZNK5OSSet12iteratorSizeEv +__ZNK5OSSet14containsObjectEPK15OSMetaClassBase +__ZNK5OSSet20getCapacityIncrementEv +__ZNK5OSSet24getNextObjectForIteratorEPvPP8OSObject +__ZNK5OSSet6memberEPK15OSMetaClassBase +__ZNK5OSSet8getCountEv +__ZNK5OSSet9MetaClass5allocEv +__ZNK5OSSet9isEqualToEPK15OSMetaClassBase +__ZNK5OSSet9isEqualToEPKS_ +__ZNK5OSSet9serializeEP11OSSerialize +__ZNK6OSData11getCapacityEv +__ZNK6OSData12getMetaClassEv +__ZNK6OSData14getBytesNoCopyEjj +__ZNK6OSData14getBytesNoCopyEv +__ZNK6OSData20getCapacityIncrementEv +__ZNK6OSData9MetaClass5allocEv +__ZNK6OSData9getLengthEv +__ZNK6OSData9isEqualToEPK15OSMetaClassBase +__ZNK6OSData9isEqualToEPK8OSString +__ZNK6OSData9isEqualToEPKS_ +__ZNK6OSData9isEqualToEPKvj +__ZNK6OSData9serializeEP11OSSerialize +__ZNK7OSArray11getCapacityEv +__ZNK7OSArray12getMetaClassEv +__ZNK7OSArray12initIteratorEPv +__ZNK7OSArray12iteratorSizeEv +__ZNK7OSArray13getLastObjectEv +__ZNK7OSArray20getCapacityIncrementEv +__ZNK7OSArray20getNextIndexOfObjectEPK15OSMetaClassBasej +__ZNK7OSArray24getNextObjectForIteratorEPvPP8OSObject +__ZNK7OSArray8getCountEv +__ZNK7OSArray9MetaClass5allocEv +__ZNK7OSArray9getObjectEj +__ZNK7OSArray9isEqualToEPK15OSMetaClassBase +__ZNK7OSArray9isEqualToEPKS_ +__ZNK7OSArray9serializeEP11OSSerialize +__ZNK8OSNumber12getMetaClassEv +__ZNK8OSNumber12numberOfBitsEv +__ZNK8OSNumber13numberOfBytesEv +__ZNK8OSNumber17unsigned8BitValueEv +__ZNK8OSNumber18unsigned16BitValueEv +__ZNK8OSNumber18unsigned32BitValueEv +__ZNK8OSNumber18unsigned64BitValueEv +__ZNK8OSNumber9MetaClass5allocEv +__ZNK8OSNumber9isEqualToEPK15OSMetaClassBase +__ZNK8OSNumber9isEqualToEPKS_ +__ZNK8OSNumber9serializeEP11OSSerialize +__ZNK8OSObject12getMetaClassEv +__ZNK8OSObject12taggedRetainEPKv +__ZNK8OSObject13taggedReleaseEPKv +__ZNK8OSObject13taggedReleaseEPKvi +__ZNK8OSObject14getRetainCountEv +__ZNK8OSObject6retainEv +__ZNK8OSObject7releaseEi +__ZNK8OSObject7releaseEv +__ZNK8OSObject9MetaClass5allocEv +__ZNK8OSObject9serializeEP11OSSerialize +__ZNK8OSString12getMetaClassEv +__ZNK8OSString16getCStringNoCopyEv +__ZNK8OSString7getCharEj +__ZNK8OSString9MetaClass5allocEv +__ZNK8OSString9getLengthEv +__ZNK8OSString9isEqualToEPK15OSMetaClassBase +__ZNK8OSString9isEqualToEPK6OSData +__ZNK8OSString9isEqualToEPKS_ +__ZNK8OSString9isEqualToEPKc +__ZNK8OSString9serializeEP11OSSerialize +__ZNK8OSSymbol12getMetaClassEv +__ZNK8OSSymbol13taggedReleaseEPKv +__ZNK8OSSymbol13taggedReleaseEPKvi +__ZNK8OSSymbol9MetaClass5allocEv +__ZNK8OSSymbol9isEqualToEPK15OSMetaClassBase +__ZNK8OSSymbol9isEqualToEPKS_ +__ZNK8OSSymbol9isEqualToEPKc +__ZNK9OSBoolean12getMetaClassEv +__ZNK9OSBoolean12taggedRetainEPKv +__ZNK9OSBoolean13taggedReleaseEPKvi +__ZNK9OSBoolean6isTrueEv +__ZNK9OSBoolean7isFalseEv +__ZNK9OSBoolean8getValueEv +__ZNK9OSBoolean9MetaClass5allocEv +__ZNK9OSBoolean9isEqualToEPK15OSMetaClassBase +__ZNK9OSBoolean9isEqualToEPKS_ +__ZNK9OSBoolean9serializeEP11OSSerialize +__ZTV10OSIterator +__ZTV11OSMetaClass +__ZTV11OSSerialize +__ZTV12OSCollection +__ZTV12OSDictionary +__ZTV12OSOrderedSet +__ZTV12OSSerializer +__ZTV12OSSymbolPool +__ZTV15OSMetaClassBase +__ZTV15OSMetaClassMeta +__ZTV20OSCollectionIterator +__ZTV5OSSet +__ZTV6OSData +__ZTV7OSArray +__ZTV8OSNumber +__ZTV8OSObject +__ZTV8OSString +__ZTV8OSSymbol +__ZTV9OSBoolean +__ZTVN10OSIterator9MetaClassE +__ZTVN11OSSerialize9MetaClassE +__ZTVN12OSCollection9MetaClassE +__ZTVN12OSDictionary9MetaClassE +__ZTVN12OSOrderedSet9MetaClassE +__ZTVN12OSSerializer9MetaClassE +__ZTVN20OSCollectionIterator9MetaClassE +__ZTVN5OSSet9MetaClassE +__ZTVN6OSData9MetaClassE +__ZTVN7OSArray9MetaClassE +__ZTVN8OSNumber9MetaClassE +__ZTVN8OSObject9MetaClassE +__ZTVN8OSString9MetaClassE +__ZTVN8OSSymbol9MetaClassE +__ZTVN9OSBoolean9MetaClassE +__ZdlPv +__Znwm +___cxa_pure_virtual +_atoi +_bcmp +_bcopy +_bcopy_phys +_bzero +_bzero_phys +_copyin +_copyout +_debug_ivars_size +_itoa +_kOSBooleanFalse +_kOSBooleanTrue +_kern_os_free +_kern_os_malloc +_kern_os_malloc_size +_kern_os_realloc +_kprintf +_memcmp +_memcpy +_memset +_panic +_printf +_sprintf +_strcat +_strchr +_strcmp +_strcpy +_strlen +_strncat +_strncmp +_strncpy +_strprefix +_strtol +_strtoq +_strtoul +_strtouq + diff --git a/config/Libkern.i386.exports b/config/Libkern.i386.exports new file mode 100644 index 000000000..e69de29bb diff --git a/config/Libkern.ppc.exports b/config/Libkern.ppc.exports new file mode 100644 index 000000000..e69de29bb diff --git a/config/Mach.exports b/config/Mach.exports new file mode 100644 index 000000000..668072d1e --- /dev/null +++ b/config/Mach.exports @@ -0,0 +1,2070 @@ +_Assert +_Debugger +_IODefaultCacheBits +_IOGetTime +_IOMapPages +_IOUnmapPages +_KERNEL_AUDIT_TOKEN +_KERNEL_SECURITY_TOKEN +_KUNCExecute +_KUNCGetNotificationID +_KUNCUserNotificationCancel +_KUNCUserNotificationDisplayAlert +_KUNCUserNotificationDisplayFromBundle +_KUNCUserNotificationDisplayNotice +_NDR_record +_Switch_context +_TRAP_TYPES +_UNDAlertCompletedWithResult_rpc +_UNDCancelNotification_rpc +_UNDDisplayAlertFromBundle_rpc +_UNDDisplayAlertSimple_rpc +_UNDDisplayCustomFromBundle_rpc +_UNDDisplayCustomFromDictionary_rpc +_UNDDisplayNoticeFromBundle_rpc +_UNDDisplayNoticeSimple_rpc +_UNDExecute_rpc +_UNDNotificationCreated_rpc +_UNDReply_deallocate +_UNDReply_server +_UNDReply_server_routine +_UNDReply_subsystem +___doprnt +__cpu_capabilities +__disable_preemption +__doprnt +__doprnt_truncates +__enable_preemption +__enable_preemption_no_check +__longjmp +__mk_sp_thread_begin +__mk_sp_thread_depress_abort +__mk_sp_thread_depress_abstime +__mk_sp_thread_depress_ms +__mk_sp_thread_dispatch +__mk_sp_thread_done +__mk_sp_thread_perhaps_yield +__mk_sp_thread_switch +__mk_sp_thread_switch_continue +__mk_sp_thread_unblock +__mutex_lock +__mutex_try +__setjmp +__start +__vm_external_state_get +__vm_map_clip_end +__vm_map_clip_start +__vm_map_entry_create +__vm_map_entry_dispose +_absolutetime_to_nanoseconds +_act_abort +_act_attach +_act_deallocate +_act_detach +_act_execute_returnhandlers +_act_free_swapin +_act_get_state +_act_get_state_locked +_act_lock_thread +_act_machine_sv_free +_act_reference +_act_set_apc +_act_set_astbsd +_act_set_state +_act_set_state_locked +_act_thread_catt +_act_thread_cfree +_act_thread_csave +_act_ulock_release_all +_act_unlock_thread +_active_debugger +_adjust_vm_object_cache +_adr +_all_zones_lock +_allow_clustered_pageouts +_assert_wait +_assert_wait_possible +_assert_wait_prim +_assert_wait_timeout +_assert_wait_timeout_event +_ast_check +_ast_init +_ast_taken +_astbsd_on +_atoi +_atoi_term +_avail_remaining +_avenrun +_backing_store_add +_backing_store_alloc +_backing_store_list +_backing_store_lookup +_backing_store_release_trigger_disable +_bcopy +_bcopy_phys +_be_tracing +_bs_commit +_bs_get_global_clsize +_bs_global_info +_bs_initialize +_bs_low +_bs_more_space +_bs_no_paging_space +_bs_set_default_clsize +_bsd_exception +_bsd_init_task +_bzero +_bzero_phys +_c_incoming_interrupts +_c_mach_msg_trap_switch_fast +_c_mmot_combined_S_R +_c_mmot_kernel_send +_c_swapin_thread_block +_c_syscalls_mach +_c_syscalls_unix +_c_thr_exc_raise +_c_thr_exc_raise_state +_c_thr_exc_raise_state_id +_c_thread_invoke_csw +_c_thread_invoke_hits +_c_thread_invoke_misses +_c_thread_invoke_same +_c_thread_invoke_same_cont +_c_tsk_exc_raise +_c_tsk_exc_raise_state +_c_tsk_exc_raise_state_id +_c_vm_page_grab_fictitious +_c_vm_page_more_fictitious +_c_vm_page_release_fictitious +_c_weird_pset_ref_exit +_calend_config +_calend_getattr +_calend_gettime +_calend_init +_calend_ops +_call_continuation +_call_thread_block +_call_thread_unblock +_catch_exc_subsystem +_cause_ast_check +_check_actforsig +_clear_wait +_clock_absolutetime_interval_to_deadline +_clock_adjtime +_clock_adjust_calendar +_clock_alarm +_clock_alarm_intr +_clock_alarm_reply +_clock_config +_clock_count +_clock_deadline_for_periodic_event +_clock_get_attributes +_clock_get_calendar_microtime +_clock_get_calendar_nanotime +_clock_get_calendar_value +_clock_get_system_microtime +_clock_get_system_nanotime +_clock_get_system_value +_clock_get_time +_clock_get_uptime +_clock_init +_clock_initialize_calendar +_clock_interval_to_absolutetime_interval +_clock_interval_to_deadline +_clock_list +_clock_priv_server +_clock_priv_server_routine +_clock_priv_subsystem +_clock_server +_clock_server_routine +_clock_service_create +_clock_set_attributes +_clock_set_calendar_adjtime +_clock_set_calendar_microtime +_clock_set_time +_clock_set_timer_deadline +_clock_set_timer_func +_clock_sleep_internal +_clock_sleep_trap +_clock_subsystem +_clock_timebase_info +_clock_timebase_init +_clock_wakeup_calendar +_clr_be_bit +_clrbit +_cluster_transfer_minimum +_clustered_reads +_clustered_writes +_clusters_available +_clusters_committed +_clusters_committed_peak +_cngetc +_cnmaygetc +_cnputc +_com_mapping_resource +_com_region_handle +_com_region_map +_com_region_size +_commpage_populate +_compute_mach_factor +_compute_my_priority +_compute_priority +_consdebug_putc +_consider_machine_adjust +_consider_machine_collect +_consider_task_collect +_consider_zone_gc +_conslog_putc +_convert_act_to_port +_convert_clock_ctrl_to_port +_convert_clock_to_port +_convert_host_to_port +_convert_ledger_to_port +_convert_lock_set_to_port +_convert_memory_object_to_port +_convert_mig_object_to_port +_convert_mo_control_to_port +_convert_port_entry_to_map +_convert_port_entry_to_object +_convert_port_to_UNDReply +_convert_port_to_act +_convert_port_to_clock +_convert_port_to_clock_ctrl +_convert_port_to_host +_convert_port_to_host_priv +_convert_port_to_host_security +_convert_port_to_ledger +_convert_port_to_lock_set +_convert_port_to_locked_task +_convert_port_to_map +_convert_port_to_memory_object +_convert_port_to_mig_object +_convert_port_to_mo_control +_convert_port_to_processor +_convert_port_to_pset +_convert_port_to_pset_name +_convert_port_to_semaphore +_convert_port_to_space +_convert_port_to_task +_convert_port_to_upl +_convert_processor_to_port +_convert_pset_name_to_port +_convert_pset_to_port +_convert_semaphore_to_port +_convert_task_to_port +_convert_upl_to_port +_copyin +_copyin_shared_file +_copyinmap +_copyinmsg +_copyinstr +_copyout +_copyoutmap +_copyoutmsg +_copyoutstr +_copypv +_coredumpok +_cpm_allocate +_cpu_control +_cpu_down +_cpu_info +_cpu_info_count +_cpu_init +_cpu_launch_first_thread +_cpu_machine_init +_cpu_number +_cpu_register +_cpu_signal_handler +_cpu_sleep +_cpu_start +_cpu_up +_csw_check +_cthread_stack_size +_current_act +_current_debugger +_current_map +_current_task +_current_thread +_current_thread_aborted +_current_timer +_d_to_i +_db_thread_read_times +_db_timer_grab +_dbugprintf +_ddb_regs +_debug_buf +_debug_buf_ptr +_debug_buf_size +_debug_log_init +_debug_mode +_debug_putc +_default_environment_shared_regions +_default_pager +_default_pager_add_file +_default_pager_async_lock +_default_pager_backing_store_create +_default_pager_backing_store_delete +_default_pager_backing_store_info +_default_pager_backing_store_monitor +_default_pager_clsize +_default_pager_default_set +_default_pager_external_count +_default_pager_external_set +_default_pager_info +_default_pager_info_verbose +_default_pager_initialize +_default_pager_internal_count +_default_pager_internal_set +_default_pager_memory_object_create +_default_pager_memory_object_default_subsystem +_default_pager_object +_default_pager_object_create +_default_pager_object_pages +_default_pager_object_server +_default_pager_object_server_routine +_default_pager_object_subsystem +_default_pager_objects +_default_pager_space_alert +_default_pager_triggers +_default_preemption_rate +_default_pset +_delay +_device_object_create +_device_pager_bootstrap +_device_pager_data_initialize +_device_pager_data_request +_device_pager_data_return +_device_pager_data_unlock +_device_pager_deallocate +_device_pager_init +_device_pager_lookup +_device_pager_populate_object +_device_pager_reference +_device_pager_setup +_device_pager_synchronize +_device_pager_terminate +_device_pager_unmap +_device_pager_workaround +_device_pager_zone +_device_service_create +_disableDebugOuput +_disable_bluebox +_dispatch_counts +_dp_memory_object_data_initialize +_dp_memory_object_data_request +_dp_memory_object_data_return +_dp_memory_object_data_unlock +_dp_memory_object_deallocate +_dp_memory_object_init +_dp_memory_object_reference +_dp_memory_object_subsystem +_dp_memory_object_synchronize +_dp_memory_object_terminate +_dp_memory_object_unmap +_dp_pages_free +_dp_parse_argument +_dpt_array +_dpt_lock +_draw_panic_dialog +_dynamic_pager_control_port +_edata +_eml_init +_eml_task_deallocate +_eml_task_reference +_enable_bluebox +_enable_hotpath +_end +_etap_get_info +_etap_interrupt_probe +_etap_machcall_probe1 +_etap_machcall_probe2 +_etap_mon_reconfig +_etap_new_probe +_etap_probe +_etap_trace_event +_etap_trace_thread +_etext +_exc_server +_exc_server_routine +_exception +_exception_deliver +_exception_raise +_exception_raise_state +_exception_raise_state_identity +_ffsbit +_fillPage +_first_avail +_first_free_check +_first_free_is_valid +_first_k_zone +_first_zone +_flush_dcache +_flush_dcache64 +_funnel_alloc +_funnel_free +_funnel_lock +_funnel_unlock +_gIOKitPortCount +_gc_buffer_lock +_gc_vt100state +_get_bsdtask_info +_get_bsdthread_info +_get_dp_control_port +_get_firstthread +_get_map_end +_get_map_max +_get_map_min +_get_map_nentries +_get_map_pmap +_get_map_start +_get_read_buffer +_get_set_state +_get_signalact +_get_state_handler +_get_task_ipcspace +_get_task_map +_get_task_numacts +_get_task_pmap +_get_task_userstop +_get_thread_userstop +_get_thread_waitresult +_get_threadtask +_get_user_regs +_get_useraddr +_get_vmmap_entries +_get_vmmap_size +_get_vmsubmap_entries +_getact_thread +_getmachheaders +_getsectcmdsymtabfromheader +_getshuttle_thread +_getsymtab +_global_stats +_halt_all_cpus +_halt_cpu +_halt_in_debugger +_hertz_tick +_host_default_memory_manager +_host_get_UNDServer +_host_get_boot_info +_host_get_clock_control +_host_get_clock_service +_host_get_exception_ports +_host_get_io_master +_host_get_special_port +_host_info +_host_ipc_hash_info +_host_kernel_version +_host_load_symbol_table +_host_notify_calendar_change +_host_notify_init +_host_notify_port_destroy +_host_page_size +_host_priv_self +_host_priv_server +_host_priv_server_routine +_host_priv_statistics +_host_priv_subsystem +_host_processor_info +_host_processor_set_priv +_host_processor_sets +_host_processors +_host_reboot +_host_request_notification +_host_security_create_task_token +_host_security_self +_host_security_server +_host_security_server_routine +_host_security_set_task_token +_host_security_subsystem +_host_self +_host_self_trap +_host_set_UNDServer +_host_set_exception_ports +_host_set_special_port +_host_stack_usage +_host_statistics +_host_swap_exception_ports +_host_virtual_physical_table_info +_host_zone_info +_hw_atomic_add +_hw_atomic_and +_hw_atomic_or +_hw_atomic_sub +_hw_compare_and_store +_hw_lock_held +_hw_lock_init +_hw_lock_lock +_hw_lock_to +_hw_lock_try +_hw_lock_unlock +_idle_thread +_idle_thread_continue +_init_ast_check +_init_task_failure_data +_init_timers +_initialize_screen +_install_special_handler +_install_special_handler_locked +_interlock_unlock +_intstack +_invalidate_icache +_invalidate_icache64 +_io_map +_io_map_spec +_io_throttle_zero_fill +_iokit_alloc_object_port +_iokit_destroy_object_port +_iokit_lookup_connect_port +_iokit_lookup_connect_ref +_iokit_lookup_connect_ref_current_task +_iokit_lookup_object_port +_iokit_make_connect_port +_iokit_make_object_port +_iokit_make_send_right +_iokit_notify +_iokit_release_port +_iokit_retain_port +_iokit_server +_iokit_server_routine +_iokit_switch_object_port +_ipc_bootstrap +_ipc_clock_enable +_ipc_clock_init +_ipc_entry_alloc +_ipc_entry_alloc_name +_ipc_entry_dealloc +_ipc_entry_get +_ipc_entry_grow_table +_ipc_entry_lookup +_ipc_entry_tree_collision +_ipc_hash_delete +_ipc_hash_global_delete +_ipc_hash_global_insert +_ipc_hash_global_lookup +_ipc_hash_global_mask +_ipc_hash_global_size +_ipc_hash_global_table +_ipc_hash_init +_ipc_hash_insert +_ipc_hash_local_delete +_ipc_hash_local_insert +_ipc_hash_local_lookup +_ipc_hash_lookup +_ipc_host_init +_ipc_init +_ipc_kernel_copy_map +_ipc_kernel_copy_map_size +_ipc_kernel_map +_ipc_kernel_map_size +_ipc_kmsg_alloc +_ipc_kmsg_cache +_ipc_kmsg_cache_avail +_ipc_kmsg_clean +_ipc_kmsg_clean_body +_ipc_kmsg_clean_partial +_ipc_kmsg_clear_prealloc +_ipc_kmsg_copyin +_ipc_kmsg_copyin_body +_ipc_kmsg_copyin_from_kernel +_ipc_kmsg_copyin_header +_ipc_kmsg_copyin_scatter +_ipc_kmsg_copyout +_ipc_kmsg_copyout_body +_ipc_kmsg_copyout_dest +_ipc_kmsg_copyout_header +_ipc_kmsg_copyout_object +_ipc_kmsg_copyout_pseudo +_ipc_kmsg_copyout_to_kernel +_ipc_kmsg_dequeue +_ipc_kmsg_destroy +_ipc_kmsg_destroy_dest +_ipc_kmsg_enqueue +_ipc_kmsg_free +_ipc_kmsg_free_scatter +_ipc_kmsg_get +_ipc_kmsg_get_from_kernel +_ipc_kmsg_init +_ipc_kmsg_max_vm_space +_ipc_kmsg_put +_ipc_kmsg_put_to_kernel +_ipc_kmsg_queue_next +_ipc_kmsg_rmqueue +_ipc_kmsg_send +_ipc_kmsg_set_prealloc +_ipc_kobject_destroy +_ipc_kobject_notify +_ipc_kobject_server +_ipc_kobject_set +_ipc_kobject_set_atomically +_ipc_mqueue_add +_ipc_mqueue_changed +_ipc_mqueue_copyin +_ipc_mqueue_destroy +_ipc_mqueue_full +_ipc_mqueue_init +_ipc_mqueue_member +_ipc_mqueue_post +_ipc_mqueue_rcv +_ipc_mqueue_receive +_ipc_mqueue_receive_continue +_ipc_mqueue_receive_results +_ipc_mqueue_release_msgcount +_ipc_mqueue_remove +_ipc_mqueue_remove_all +_ipc_mqueue_remove_from_all +_ipc_mqueue_select +_ipc_mqueue_send +_ipc_mqueue_set_qlimit +_ipc_mqueue_set_seqno +_ipc_notify_dead_name +_ipc_notify_no_senders +_ipc_notify_port_deleted +_ipc_notify_port_destroyed +_ipc_notify_send_once +_ipc_object_alloc +_ipc_object_alloc_dead +_ipc_object_alloc_dead_name +_ipc_object_alloc_name +_ipc_object_copyin +_ipc_object_copyin_from_kernel +_ipc_object_copyin_type +_ipc_object_copyout +_ipc_object_copyout_dest +_ipc_object_copyout_name +_ipc_object_destroy +_ipc_object_reference +_ipc_object_release +_ipc_object_rename +_ipc_object_translate +_ipc_object_translate_two +_ipc_object_zones +_ipc_port_alloc +_ipc_port_alloc_name +_ipc_port_alloc_special +_ipc_port_check_circularity +_ipc_port_clear_receiver +_ipc_port_copy_send +_ipc_port_copyout_send +_ipc_port_dealloc_special +_ipc_port_destroy +_ipc_port_dncancel +_ipc_port_dngrow +_ipc_port_dnnotify +_ipc_port_dnrequest +_ipc_port_init +_ipc_port_lookup_notify +_ipc_port_make_send +_ipc_port_make_send_locked +_ipc_port_make_sonce +_ipc_port_max +_ipc_port_multiple_lock_data +_ipc_port_nsrequest +_ipc_port_pdrequest +_ipc_port_release +_ipc_port_release_receive +_ipc_port_release_send +_ipc_port_release_sonce +_ipc_port_timestamp +_ipc_port_timestamp_data +_ipc_port_timestamp_lock_data +_ipc_processor_disable +_ipc_processor_enable +_ipc_processor_init +_ipc_processor_terminate +_ipc_pset_add +_ipc_pset_alloc +_ipc_pset_alloc_name +_ipc_pset_destroy +_ipc_pset_disable +_ipc_pset_enable +_ipc_pset_init +_ipc_pset_max +_ipc_pset_member +_ipc_pset_remove +_ipc_pset_remove_from_all +_ipc_pset_terminate +_ipc_right_check +_ipc_right_clean +_ipc_right_copyin +_ipc_right_copyin_check +_ipc_right_copyin_two +_ipc_right_copyin_undo +_ipc_right_copyout +_ipc_right_dealloc +_ipc_right_delta +_ipc_right_destroy +_ipc_right_dncancel +_ipc_right_dnrequest +_ipc_right_info +_ipc_right_inuse +_ipc_right_lookup_two_write +_ipc_right_lookup_write +_ipc_right_rename +_ipc_right_reverse +_ipc_space_clean +_ipc_space_create +_ipc_space_create_special +_ipc_space_destroy +_ipc_space_kernel +_ipc_space_max +_ipc_space_reference +_ipc_space_release +_ipc_space_reply +_ipc_space_zone +_ipc_splay_traverse_finish +_ipc_splay_traverse_next +_ipc_splay_traverse_start +_ipc_splay_tree_bounds +_ipc_splay_tree_delete +_ipc_splay_tree_init +_ipc_splay_tree_insert +_ipc_splay_tree_join +_ipc_splay_tree_lookup +_ipc_splay_tree_pick +_ipc_splay_tree_split +_ipc_table_alloc +_ipc_table_dnrequests +_ipc_table_dnrequests_size +_ipc_table_entries +_ipc_table_entries_size +_ipc_table_fill +_ipc_table_free +_ipc_table_init +_ipc_table_realloc +_ipc_task_disable +_ipc_task_enable +_ipc_task_init +_ipc_task_terminate +_ipc_thr_act_disable +_ipc_thr_act_init +_ipc_thr_act_terminate +_ipc_thread_init +_ipc_thread_terminate +_ipc_tree_entry_max +_ipc_tree_entry_zone +_is_64signalregset +_is_iokit_subsystem +_is_kerneltask +_is_thread_active +_is_thread_idle +_is_thread_running +_iso_font +_itoa +_k_zone +_k_zone_max +_kalloc +_kalloc_canblock +_kalloc_fake_zone_info +_kalloc_init +_kalloc_large_inuse +_kalloc_large_max +_kalloc_large_total +_kalloc_map +_kalloc_map_size +_kalloc_max +_kalloc_max_prerounded +_kalloc_noblock +_kalloc_zone +_kdb_printf +_kdp +_kdp_call +_kdp_call_kdb +_kdp_exception +_kdp_exception_ack +_kdp_flag +_kdp_get_ip_address +_kdp_get_mac_addr +_kdp_getc +_kdp_intr_disbl +_kdp_intr_enbl +_kdp_machine_hostinfo +_kdp_machine_read_regs +_kdp_machine_write_regs +_kdp_ml_get_breakinsn +_kdp_packet +_kdp_panic +_kdp_raise_exception +_kdp_reboot +_kdp_register_send_receive +_kdp_remove_all_breakpoints +_kdp_reset +_kdp_set_ip_and_mac_addresses +_kdp_sync_cache +_kdp_unregister_send_receive +_kdp_us_spin +_kdp_vm_read +_kdp_vm_write +_kentry_count +_kentry_data +_kentry_data_size +_kern_invalid +_kern_invalid_debug +_kernel_map +_kernel_memory_allocate +_kernel_object_iopl_request +_kernel_pageable_map +_kernel_pmap +_kernel_pmap_store +_kernel_set_special_port +_kernel_task +_kernel_task_create +_kernel_thread +_kernel_thread_create +_kernel_thread_with_priority +_kernel_timer +_kernel_upl_abort +_kernel_upl_abort_range +_kernel_upl_commit +_kernel_upl_commit_range +_kernel_upl_map +_kernel_upl_unmap +_kernel_vm_map_reference +_kfree +_kget +_kmem_alloc +_kmem_alloc_aligned +_kmem_alloc_contig +_kmem_alloc_pageable +_kmem_alloc_pages +_kmem_alloc_wired +_kmem_free +_kmem_init +_kmem_io_object_deallocate +_kmem_io_object_trunc +_kmem_realloc +_kmem_remap_pages +_kmem_suballoc +_kmod +_kmod_cmd_queue +_kmod_control +_kmod_create +_kmod_create_fake +_kmod_create_internal +_kmod_default_start +_kmod_default_stop +_kmod_destroy +_kmod_destroy_internal +_kmod_dump +_kmod_finalize_cpp +_kmod_get_info +_kmod_init +_kmod_initialize_cpp +_kmod_load_extension +_kmod_load_extension_with_dependencies +_kmod_lock +_kmod_lookupbyid +_kmod_lookupbyid_locked +_kmod_lookupbyname +_kmod_lookupbyname_locked +_kmod_queue_cmd +_kmod_queue_lock +_kmod_release +_kmod_retain +_kmod_send_generic +_kmod_start_or_stop +_krealloc +_kvtophys +_last_page_zf +_last_zone +_ledger_copy +_ledger_create +_ledger_enter +_ledger_init +_ledger_read +_ledger_server +_ledger_server_routine +_ledger_subsystem +_ledger_terminate +_ledger_transfer +_local_log2 +_lock_acquire +_lock_alloc +_lock_done +_lock_free +_lock_handoff +_lock_handoff_accept +_lock_init +_lock_make_stable +_lock_make_unstable +_lock_read +_lock_read_to_write +_lock_release +_lock_release_internal +_lock_set_create +_lock_set_dereference +_lock_set_destroy +_lock_set_event +_lock_set_handoff +_lock_set_init +_lock_set_reference +_lock_set_server +_lock_set_server_routine +_lock_set_subsystem +_lock_try +_lock_wait_time +_lock_write +_lock_write_to_read +_log +_logPanicDataToScreen +_lookup_default_shared_region +_lsf_mapping_pool_gauge +_lsf_remove_regions_mappings +_lsf_zone +_mach_absolute_time +_mach_assert +_mach_destroy_memory_entry +_mach_factor +_mach_host_server +_mach_host_server_routine +_mach_host_subsystem +_mach_make_memory_entry +_mach_make_memory_entry_64 +_mach_memory_object_memory_entry +_mach_memory_object_memory_entry_64 +_mach_msg_overwrite +_mach_msg_overwrite_trap +_mach_msg_receive +_mach_msg_receive_continue +_mach_msg_receive_results +_mach_msg_rpc_from_kernel +_mach_msg_send +_mach_msg_send_from_kernel +_mach_msg_trap +_mach_notify_dead_name +_mach_notify_no_senders +_mach_notify_port_deleted +_mach_notify_port_destroyed +_mach_notify_send_once +_mach_port_allocate +_mach_port_allocate_full +_mach_port_allocate_name +_mach_port_allocate_qos +_mach_port_deallocate +_mach_port_destroy +_mach_port_dnrequest_info +_mach_port_extract_member +_mach_port_extract_right +_mach_port_get_attributes +_mach_port_get_refs +_mach_port_get_set_status +_mach_port_get_srights +_mach_port_gst_helper +_mach_port_insert_member +_mach_port_insert_right +_mach_port_kernel_object +_mach_port_mod_refs +_mach_port_move_member +_mach_port_names +_mach_port_names_helper +_mach_port_rename +_mach_port_request_notification +_mach_port_server +_mach_port_server_routine +_mach_port_set_attributes +_mach_port_set_mscount +_mach_port_set_seqno +_mach_port_space_info +_mach_port_subsystem +_mach_port_type +_mach_ports_lookup +_mach_ports_register +_mach_reply_port +_mach_thread_self +_mach_timebase_info +_mach_trap_count +_mach_trap_table +_mach_vm_region_info +_mach_vm_region_info_64 +_mach_wait_until +_machine_boot_info +_machine_idle +_machine_info +_machine_init +_machine_load_context +_machine_signal_idle +_machine_slot +_machine_stack_attach +_machine_stack_detach +_machine_stack_handoff +_machine_startup +_machine_switch_act +_machine_switch_context +_machine_thread_create +_machine_thread_destroy +_machine_thread_dup +_machine_thread_get_state +_machine_thread_init +_machine_thread_set_current +_machine_thread_set_state +_machine_thread_terminate_self +_machine_wake_thread +_macx_triggers +_map_data +_map_data_size +_mapping_set_mod +_master_cpu +_master_device_port +_master_processor +_max_doubled_size +_max_mem +_max_pages_trigger_port +_max_poll_computation +_max_poll_quanta +_max_rt_quantum +_max_unsafe_computation +_max_unsafe_quanta +_maximum_pages_free +_mem_size +_memcpy +_memory_manager_default +_memory_manager_default_cluster +_memory_manager_default_lock +_memory_object_change_attributes +_memory_object_control_deallocate +_memory_object_control_disable +_memory_object_control_reference +_memory_object_control_server +_memory_object_control_server_routine +_memory_object_control_subsystem +_memory_object_create +_memory_object_create_named +_memory_object_data_initialize +_memory_object_data_request +_memory_object_data_return +_memory_object_data_unlock +_memory_object_deactivate_pages +_memory_object_deallocate +_memory_object_default_deallocate +_memory_object_default_reference +_memory_object_default_server +_memory_object_default_server_routine +_memory_object_destroy +_memory_object_get_attributes +_memory_object_init +_memory_object_iopl_request +_memory_object_lock_page +_memory_object_lock_request +_memory_object_name_server +_memory_object_name_server_routine +_memory_object_name_subsystem +_memory_object_page_op +_memory_object_range_op +_memory_object_recover_named +_memory_object_reference +_memory_object_release_name +_memory_object_server +_memory_object_server_routine +_memory_object_super_upl_request +_memory_object_synchronize +_memory_object_synchronize_completed +_memory_object_terminate +_memory_object_unmap +_memory_object_upl_request +_memset +_mig_buckets +_mig_dealloc_reply_port +_mig_e +_mig_get_reply_port +_mig_init +_mig_object_deallocate +_mig_object_destroy +_mig_object_init +_mig_object_no_senders +_mig_object_reference +_mig_put_reply_port +_mig_reply_size +_mig_strncpy +_mig_table_max_displ +_mig_user_allocate +_mig_user_deallocate +_min_pages_trigger_port +_min_rt_quantum +_min_std_quantum +_minimum_pages_remaining +_mk_timebase_info +_mk_timer_arm +_mk_timer_cancel +_mk_timer_create +_mk_timer_destroy +_mk_timer_init +_mk_timer_port_destroy +_ml_at_interrupt_context +_ml_cause_interrupt +_ml_cpu_get_info +_ml_get_interrupts_enabled +_ml_get_max_cpus +_ml_get_timebase +_ml_init_interrupt +_ml_init_max_cpus +_ml_install_interrupt_handler +_ml_io_map +_ml_phys_read +_ml_phys_read_64 +_ml_phys_read_byte +_ml_phys_read_byte_64 +_ml_phys_read_double +_ml_phys_read_double_64 +_ml_phys_read_half +_ml_phys_read_half_64 +_ml_phys_read_word +_ml_phys_read_word_64 +_ml_phys_write +_ml_phys_write_64 +_ml_phys_write_byte +_ml_phys_write_byte_64 +_ml_phys_write_double +_ml_phys_write_double_64 +_ml_phys_write_half +_ml_phys_write_half_64 +_ml_phys_write_word +_ml_phys_write_word_64 +_ml_probe_read +_ml_probe_read_64 +_ml_processor_register +_ml_set_interrupts_enabled +_ml_static_malloc +_ml_static_mfree +_ml_static_ptovirt +_ml_thread_policy +_ml_vtophys +_msg_ool_size_small +_msg_receive_error +_mutex_alloc +_mutex_free +_mutex_init +_mutex_lock +_mutex_lock_acquire +_mutex_lock_wait +_mutex_pause +_mutex_preblock +_mutex_preblock_wait +_mutex_try +_mutex_unlock +_mutex_unlock_wakeup +_my_name +_nanoseconds_to_absolutetime +_need_ast +_nestedpanic +_new_addr_hash +_new_obj_hash +_newtest +_no_dispatch_count +_noresume_on_disconnect +_norma_mk +_not_implemented +_null_port +_num_zones +_osfmk_osrelease +_osfmk_ostype +_osfmk_version +_osfmk_version_major +_osfmk_version_minor +_osfmk_version_variant +_page_mask +_page_shift +_page_size +_paging_segment_count +_paging_segment_max +_paging_segments +_paging_segments_lock +_panic +_panicDebugging +_panicDialogDesired +_panic_init +_panic_is_inited +_panic_lock +_panic_ui_initialize +_paniccpu +_panicstr +_panicwait +_pc_trace_buf +_pc_trace_cnt +_physical_transfer_cluster_count +_pmap_bootstrap +_pmap_change_wiring +_pmap_clear_modify +_pmap_clear_reference +_pmap_collect +_pmap_copy_page +_pmap_copy_part_page +_pmap_create +_pmap_destroy +_pmap_enter +_pmap_extract +_pmap_find_phys +_pmap_free_pages +_pmap_init +_pmap_initialized +_pmap_is_modified +_pmap_is_referenced +_pmap_map +_pmap_modify_pages +_pmap_next_page +_pmap_page_protect +_pmap_pageable +_pmap_protect +_pmap_reference +_pmap_remove +_pmap_remove_some_phys +_pmap_startup +_pmap_steal_memory +_pmap_sync_caches_phys +_pmap_verify_free +_pmap_virtual_space +_pmap_zero_page +_pmap_zero_part_page +_pmap_zone +_port_name_to_act +_port_name_to_clock +_port_name_to_semaphore +_port_name_to_task +_print_saved_state +_printf_init +_printf_lock +_processor_array +_processor_assign +_processor_control +_processor_doshutdown +_processor_exit +_processor_get_assignment +_processor_info +_processor_info_count +_processor_init +_processor_offline +_processor_ptr +_processor_server +_processor_server_routine +_processor_set_base +_processor_set_create +_processor_set_default +_processor_set_destroy +_processor_set_info +_processor_set_limit +_processor_set_max_priority +_processor_set_policy_control +_processor_set_policy_disable +_processor_set_policy_enable +_processor_set_server +_processor_set_server_routine +_processor_set_stack_usage +_processor_set_statistics +_processor_set_subsystem +_processor_set_tasks +_processor_set_things +_processor_set_threads +_processor_shutdown +_processor_start +_processor_subsystem +_prof_queue +_profile_kernel_services +_ps_allocate_cluster +_ps_clmap +_ps_clunmap +_ps_dealloc_vsmap +_ps_deallocate_cluster +_ps_delete +_ps_enter +_ps_map_extend +_ps_read_device +_ps_read_file +_ps_select_array +_ps_select_segment +_ps_vs_write_complete +_ps_vstruct_allocated_pages +_ps_vstruct_allocated_size +_ps_vstruct_create +_ps_vstruct_dealloc +_ps_vstruct_transfer_from_segment +_ps_write_device +_ps_write_file +_pset_add_processor +_pset_add_task +_pset_add_thread +_pset_deallocate +_pset_init +_pset_quanta_setup +_pset_reference +_pset_remove_processor +_pset_remove_task +_pset_remove_thread +_pset_sys_bootstrap +_pvs_cluster_read +_pvs_object_data_provided +_real_ncpus +_realhost +_reattach_wait +_ref_act_port_locked +_ref_pset_port_locked +_refresh_screen +_refunnel_hint +_refunnel_hint_enabled +_remove_all_shared_regions +_remove_default_shared_region +_retrieve_act_self_fast +_retrieve_task_self_fast +_return_on_panic +_root_paged_ledger +_root_wired_ledger +_rtclock_intr +_rtclock_reset +_run_queue_remove +_safe_gets +_sane_size +_save_waits +_sched_init +_sched_poll_yield_shift +_sched_safe_duration +_sched_tick +_sched_tick_init +_sched_tick_thread +_sched_tick_thread_continue +_sched_timebase_init +_sectDATAB +_sectLINKB +_sectPRELINKB +_sectSizeDATA +_sectSizeLINK +_sectSizePRELINK +_sectSizeTEXT +_sectTEXTB +_semaphore_convert_wait_result +_semaphore_create +_semaphore_dereference +_semaphore_destroy +_semaphore_init +_semaphore_max +_semaphore_reference +_semaphore_server +_semaphore_server_routine +_semaphore_signal +_semaphore_signal_all +_semaphore_signal_all_trap +_semaphore_signal_internal +_semaphore_signal_thread +_semaphore_signal_thread_trap +_semaphore_signal_trap +_semaphore_subsystem +_semaphore_timedwait +_semaphore_timedwait_continue +_semaphore_timedwait_signal +_semaphore_timedwait_signal_trap +_semaphore_timedwait_trap +_semaphore_wait +_semaphore_wait_continue +_semaphore_wait_internal +_semaphore_wait_signal +_semaphore_wait_signal_trap +_semaphore_wait_trap +_semaphore_zone +_set_be_bit +_set_bsdtask_info +_set_dp_control_port +_set_priority +_set_sched_pri +_set_state_handler +_setbit +_setup_main +_sfma_handle +_shared_com_boot_time_init +_shared_data_region_handle +_shared_file_available_hash_ele +_shared_file_boot_time_init +_shared_file_create_system_region +_shared_file_data_region +_shared_file_mapping_array +_shared_file_text_region +_shared_region_mapping_create +_shared_region_mapping_dealloc +_shared_region_mapping_info +_shared_region_mapping_ref +_shared_region_mapping_set_alt_next +_shared_region_object_chain_attach +_shared_text_region_handle +_slave_machine_init +_slave_main +_space_deallocate +_special_handler +_special_handler_continue +_split_funnel_off +_sprintf +_sprintf_lock +_sscanf +_stack_alloc +_stack_alloc_bndry +_stack_alloc_hits +_stack_alloc_hiwater +_stack_alloc_misses +_stack_alloc_total +_stack_alloc_try +_stack_cache_hits +_stack_collect +_stack_fake_zone_info +_stack_free +_stack_free_count +_stack_free_limit +_stack_free_max +_stack_free_stack +_stack_privilege +_stack_statistics +_start_cpu_thread +_start_def_pager +_start_kernel_threads +_startup_miss +_state_count +_std_quantum +_std_quantum_us +_strcat +_strcmp +_strcpy +_strncmp +_strncpy +_strprefix +_swap_act_map +_swap_task_map +_swapin_init +_swapin_lock +_swapin_queue +_swapin_thread +_swapin_thread_continue +_switch_act +_switch_act_swapins +_switch_debugger +_switch_to_serial_console +_switch_to_shutdown_context +_swtch +_swtch_continue +_swtch_pri +_swtch_pri_continue +_sysclk_config +_sysclk_getattr +_sysclk_gettime +_sysclk_init +_sysclk_ops +_sysclk_setalarm +_systemLogDiags +_task_act_iterate_wth_args +_task_assign +_task_assign_default +_task_backing_store_privileged +_task_collect_allowed +_task_collect_last_tick +_task_collect_max_rate +_task_collect_scan +_task_create +_task_create_internal +_task_deallocate +_task_get_assignment +_task_get_emulation_vector +_task_get_exception_ports +_task_get_special_port +_task_halt +_task_hold +_task_hold_locked +_task_importance +_task_info +_task_init +_task_is_classic +_task_policy +_task_policy_get +_task_policy_set +_task_reference +_task_reference_try +_task_release +_task_release_locked +_task_resume +_task_sample +_task_self_trap +_task_server +_task_server_routine +_task_set_emulation +_task_set_emulation_vector +_task_set_emulation_vector_internal +_task_set_exception_ports +_task_set_info +_task_set_ledger +_task_set_policy +_task_set_port_space +_task_set_ras_pc +_task_set_special_port +_task_subsystem +_task_suspend +_task_swap_exception_ports +_task_swappable +_task_synchronizer_destroy_all +_task_terminate +_task_terminate_internal +_task_threads +_task_wait_locked +_task_wire +_task_working_set_create +_task_zone +_test_tws +_testbit +_thread_abort +_thread_abort_safely +_thread_act_server +_thread_act_server_routine +_thread_act_subsystem +_thread_apc_clear +_thread_apc_set +_thread_assign +_thread_assign_default +_thread_bind +_thread_block +_thread_block_reason +_thread_bootstrap +_thread_bootstrap_return +_thread_call_allocate +_thread_call_cancel +_thread_call_enter +_thread_call_enter1 +_thread_call_enter1_delayed +_thread_call_enter_delayed +_thread_call_free +_thread_call_func +_thread_call_func_cancel +_thread_call_func_delayed +_thread_call_initialize +_thread_call_is_delayed +_thread_call_setup +_thread_cancel_timer +_thread_change_psets +_thread_continue +_thread_create +_thread_create_running +_thread_deallocate +_thread_depress_abort +_thread_depress_expire +_thread_dispatch +_thread_doreap +_thread_doswapin +_thread_dup +_thread_entrypoint +_thread_exception_return +_thread_get_assignment +_thread_get_cont_arg +_thread_get_exception_ports +_thread_get_special_port +_thread_get_state +_thread_getstatus +_thread_go_locked +_thread_hold +_thread_info +_thread_info_shuttle +_thread_init +_thread_invoke +_thread_lock_act +_thread_policy +_thread_policy_get +_thread_policy_set +_thread_quantum_expire +_thread_read_times +_thread_reaper_enqueue +_thread_reaper_init +_thread_reference +_thread_release +_thread_resume +_thread_run +_thread_sample +_thread_scan_enabled +_thread_select +_thread_self +_thread_self_trap +_thread_set_child +_thread_set_cont_arg +_thread_set_exception_ports +_thread_set_parent +_thread_set_policy +_thread_set_special_port +_thread_set_state +_thread_set_timer +_thread_set_timer_deadline +_thread_setrun +_thread_setstatus +_thread_should_abort +_thread_should_halt +_thread_sleep_funnel +_thread_sleep_lock_write +_thread_sleep_mutex +_thread_sleep_mutex_deadline +_thread_sleep_usimple_lock +_thread_stop +_thread_suspend +_thread_swap_exception_ports +_thread_swapin +_thread_switch +_thread_syscall_return +_thread_task_priority +_thread_terminate +_thread_terminate_internal +_thread_terminate_self +_thread_termination_continue +_thread_timer_expire +_thread_timer_setup +_thread_timer_terminate +_thread_unlock_act +_thread_unstop +_thread_userstack +_thread_wait +_thread_wakeup +_thread_wakeup_prim +_thread_wire +_timer_call_cancel +_timer_call_enter +_timer_call_enter1 +_timer_call_initialize +_timer_call_is_delayed +_timer_call_setup +_timer_call_shutdown +_timer_delta +_timer_grab +_timer_init +_timer_normalize +_timer_read +_trailer_template +_trap_type +_trigger_name_to_port +_tws_build_cluster +_tws_create_startup_list +_tws_expand_working_set +_tws_handle_startup_file +_tws_hash_clear +_tws_hash_create +_tws_hash_destroy +_tws_hash_line_clear +_tws_hash_ws_flush +_tws_insert +_tws_internal_lookup +_tws_internal_startup_send +_tws_line_signal +_tws_lookup +_tws_read_startup_file +_tws_send_startup_info +_tws_startup_list_lookup +_tws_test_for_community +_tws_traverse_address_hash_list +_tws_traverse_object_hash_list +_tws_write_startup_file +_udp_ttl +_update_default_shared_region +_update_priority +_upl_abort +_upl_abort_range +_upl_clear_dirty +_upl_commit +_upl_commit_range +_upl_deallocate +_upl_dirty_page +_upl_get_internal_pagelist_offset +_upl_offset_to_pagelist +_upl_page_present +_upl_phys_page +_upl_server +_upl_server_routine +_upl_set_dirty +_upl_subsystem +_upl_valid_page +_user_warned +_usimple_lock +_usimple_lock_init +_usimple_lock_try +_usimple_unlock +_vc_display_icon +_vc_progress_initialize +_vc_progress_lock +_vcattach +_vcputc +_verbose +_video_scroll_down +_video_scroll_up +_vinfo +_virtual_space_end +_virtual_space_start +_vm_accellerate_zf_pageout_trigger +_vm_allocate +_vm_allocate_cpm +_vm_allow_clustered_pagein +_vm_backing_store_disable +_vm_backing_store_low +_vm_behavior_set +_vm_conflict_check +_vm_copy +_vm_countdirtypages +_vm_deallocate +_vm_default_ahead +_vm_default_behind +_vm_external_copy +_vm_external_create +_vm_external_destroy +_vm_external_map_size +_vm_external_module_initialize +_vm_external_state_clr +_vm_external_state_set +_vm_external_within +_vm_fault +_vm_fault_cleanup +_vm_fault_copy +_vm_fault_copy_cleanup +_vm_fault_copy_dst_cleanup +_vm_fault_debug +_vm_fault_init +_vm_fault_list_request +_vm_fault_page +_vm_fault_unwire +_vm_fault_wire +_vm_fault_wire_fast +_vm_free_page_pause +_vm_get_shared_region +_vm_inherit +_vm_last_addr +_vm_machine_attribute +_vm_map +_vm_map_64 +_vm_map_aggressive_enter +_vm_map_aggressive_enter_max +_vm_map_behavior_set +_vm_map_check_protection +_vm_map_copy_copy +_vm_map_copy_discard +_vm_map_copy_overwrite +_vm_map_copy_overwrite_aligned +_vm_map_copy_overwrite_nested +_vm_map_copy_overwrite_unaligned +_vm_map_copy_zone +_vm_map_copyin_common +_vm_map_copyin_kernel_buffer +_vm_map_copyin_object +_vm_map_copyout +_vm_map_copyout_kernel_buffer +_vm_map_create +_vm_map_deallocate +_vm_map_delete +_vm_map_destroy +_vm_map_enter +_vm_map_entry_delete +_vm_map_entry_insert +_vm_map_entry_zone +_vm_map_find_space +_vm_map_fork +_vm_map_fork_copy +_vm_map_fork_share +_vm_map_get_phys_page +_vm_map_get_upl +_vm_map_inherit +_vm_map_init +_vm_map_kentry_zone +_vm_map_lookup_entry +_vm_map_lookup_locked +_vm_map_machine_attribute +_vm_map_overwrite_submap_recurse +_vm_map_page_query +_vm_map_pmap_enter +_vm_map_pmap_enter_enable +_vm_map_pmap_enter_print +_vm_map_protect +_vm_map_range_check +_vm_map_read_user +_vm_map_reference +_vm_map_region_replace +_vm_map_remove +_vm_map_server +_vm_map_server_routine +_vm_map_simplify +_vm_map_steal_memory +_vm_map_submap +_vm_map_submap_pmap_clean +_vm_map_subsystem +_vm_map_switch +_vm_map_unwire +_vm_map_unwire_nested +_vm_map_verify +_vm_map_wire +_vm_map_wire_nested +_vm_map_write_user +_vm_map_zone +_vm_mapped_pages_info +_vm_mem_bootstrap +_vm_mem_init +_vm_msync +_vm_object_absent_max +_vm_object_destroy +_vm_object_enter +_vm_object_hash_entry_free +_vm_object_iopl_request +_vm_object_page_map +_vm_object_page_remove_iterate +_vm_object_page_remove_lookup +_vm_object_pager_create +_vm_object_populate_with_private +_vm_object_shadow_check +_vm_object_sync +_vm_object_terminate_remove_all +_vm_object_update +_vm_page_activate +_vm_page_active_count +_vm_page_alloc +_vm_page_alloc_lock +_vm_page_bootstrap +_vm_page_bucket_count +_vm_page_bucket_hash +_vm_page_bucket_lock +_vm_page_buckets +_vm_page_convert +_vm_page_copy +_vm_page_create +_vm_page_deactivate +_vm_page_deactivate_behind +_vm_page_deactivate_hint +_vm_page_fictitious_addr +_vm_page_fictitious_count +_vm_page_free +_vm_page_free_count +_vm_page_free_count_init +_vm_page_free_count_minimum +_vm_page_free_list +_vm_page_free_min +_vm_page_free_reserve +_vm_page_free_reserved +_vm_page_free_target +_vm_page_free_verify +_vm_page_free_wanted +_vm_page_gobble +_vm_page_gobble_count +_vm_page_gobble_count_warning +_vm_page_grab +_vm_page_grab_count +_vm_page_grab_fictitious +_vm_page_hash_mask +_vm_page_hash_shift +_vm_page_inactive_count +_vm_page_inactive_target +_vm_page_init +_vm_page_insert +_vm_page_laundry_count +_vm_page_laundry_max +_vm_page_laundry_min +_vm_page_limbo_count +_vm_page_limbo_real_count +_vm_page_lookup +_vm_page_mask +_vm_page_module_init +_vm_page_more_fictitious +_vm_page_pages +_vm_page_part_copy +_vm_page_part_zero_fill +_vm_page_pin_count +_vm_page_preppin_lock +_vm_page_queue_active +_vm_page_queue_fictitious +_vm_page_queue_free +_vm_page_queue_free_lock +_vm_page_queue_inactive +_vm_page_queue_limbo +_vm_page_queue_lock +_vm_page_queue_zf +_vm_page_release +_vm_page_release_fictitious +_vm_page_remove +_vm_page_rename +_vm_page_replace +_vm_page_shift +_vm_page_template +_vm_page_ticket +_vm_page_ticket_roll +_vm_page_unwire +_vm_page_wait +_vm_page_wire +_vm_page_wire_count +_vm_page_wire_count_warning +_vm_page_zero_fill +_vm_page_zero_fill_lock +_vm_page_zone +_vm_pageclean_copy +_vm_pageclean_setup +_vm_pagein_cluster_unused +_vm_pagein_cluster_used +_vm_pageout +_vm_pageout_active +_vm_pageout_burst_max +_vm_pageout_burst_min +_vm_pageout_burst_wait +_vm_pageout_clean_active_pages +_vm_pageout_cluster +_vm_pageout_cluster_page +_vm_pageout_continue +_vm_pageout_dirty_no_pager +_vm_pageout_emergency_availability_request +_vm_pageout_empty_wait +_vm_pageout_in_place +_vm_pageout_inactive +_vm_pageout_inactive_absent +_vm_pageout_inactive_avoid +_vm_pageout_inactive_busy +_vm_pageout_inactive_clean +_vm_pageout_inactive_dirty +_vm_pageout_inactive_forced +_vm_pageout_inactive_nolock +_vm_pageout_inactive_throttled +_vm_pageout_inactive_used +_vm_pageout_initialize_page +_vm_pageout_object_allocate +_vm_pageout_object_terminate +_vm_pageout_out_of_line +_vm_pageout_pause_count +_vm_pageout_pause_max +_vm_pageout_reserved_internal +_vm_pageout_reserved_really +_vm_pageout_scan +_vm_pageout_scan_active_emm_throttle +_vm_pageout_scan_active_emm_throttle_failure +_vm_pageout_scan_active_emm_throttle_success +_vm_pageout_scan_continue +_vm_pageout_scan_event_counter +_vm_pageout_scan_inactive_emm_throttle +_vm_pageout_scan_inactive_emm_throttle_failure +_vm_pageout_scan_inactive_emm_throttle_success +_vm_pageout_setup +_vm_pageout_throttle +_vm_pool_low +_vm_protect +_vm_read +_vm_read_list +_vm_read_overwrite +_vm_region +_vm_region_64 +_vm_region_clone +_vm_region_count_obj_refs +_vm_region_look_for_page +_vm_region_object_create +_vm_region_recurse +_vm_region_recurse_64 +_vm_region_top_walk +_vm_region_walk +_vm_remap +_vm_remap_extract +_vm_remap_range_allocate +_vm_set_page_size +_vm_set_shared_region +_vm_stat +_vm_stat_discard +_vm_stat_discard_cleared_reply +_vm_stat_discard_cleared_too_late +_vm_stat_discard_cleared_unset +_vm_stat_discard_failure +_vm_stat_discard_sent +_vm_stat_discard_throttle +_vm_submap_object +_vm_upl_map +_vm_upl_unmap +_vm_wire +_vm_write +_vm_zf_count +_vm_zf_iterator +_vm_zf_iterator_count +_vnode_object_create +_vnode_pager_bootstrap +_vnode_pager_cluster_read +_vnode_pager_cluster_write +_vnode_pager_data_initialize +_vnode_pager_data_request +_vnode_pager_data_return +_vnode_pager_data_unlock +_vnode_pager_deallocate +_vnode_pager_get_object_size +_vnode_pager_init +_vnode_pager_lookup +_vnode_pager_reference +_vnode_pager_release_from_cache +_vnode_pager_setup +_vnode_pager_synchronize +_vnode_pager_terminate +_vnode_pager_unmap +_vnode_pager_workaround +_vnode_pager_zone +_vs_alloc_async +_vs_alloc_async_count +_vs_alloc_async_failed +_vs_async_free_list +_vs_cl_write_complete +_vs_cluster_transfer +_vs_cluster_write +_vs_do_async_write +_vs_free_async +_vs_get_map_entry +_vs_object_create +_vstruct_def_clshift +_vstruct_list +_vstruct_zone +_wait_queue_alloc +_wait_queue_assert_wait +_wait_queue_assert_wait64 +_wait_queue_free +_wait_queue_init +_wait_queue_link +_wait_queue_link_noalloc +_wait_queue_link_size +_wait_queue_member +_wait_queue_pull_thread_locked +_wait_queue_set_alloc +_wait_queue_set_free +_wait_queue_set_init +_wait_queue_set_size +_wait_queue_set_unlink_all +_wait_queue_set_unlink_all_nofree +_wait_queue_sub_clearrefs +_wait_queue_sub_init +_wait_queue_unlink +_wait_queue_unlink_all +_wait_queue_unlink_one +_wait_queue_unlinkall_nofree +_wait_queue_wakeup64_all +_wait_queue_wakeup64_one +_wait_queue_wakeup64_thread +_wait_queue_wakeup_all +_wait_queue_wakeup_one +_wait_queue_wakeup_thread +_wait_queues +_wait_queues_init +_wait_shift +_wait_subqueue_unlink_all +_wncpu +_zalloc +_zalloc_async +_zalloc_canblock +_zalloc_end_of_space +_zalloc_next_space +_zalloc_noblock +_zalloc_wasted_space +_zcram +_zdata +_zdata_size +_zfill +_zfree +_zget +_zget_space +_zget_space_lock +_zinit +_zone_bootstrap +_zone_change +_zone_check +_zone_free_count +_zone_gc +_zone_gc_allowed +_zone_gc_forced +_zone_gc_last_tick +_zone_gc_lock +_zone_gc_max_rate +_zone_init +_zone_map +_zone_map_max_address +_zone_map_min_address +_zone_page_alloc +_zone_page_collectable +_zone_page_init +_zone_page_keep +_zone_page_table +_zone_pages +_zone_steal_memory +_zone_zone +_zprealloc diff --git a/config/Mach.i386.exports b/config/Mach.i386.exports new file mode 100644 index 000000000..e69de29bb diff --git a/config/Mach.ppc.exports b/config/Mach.ppc.exports new file mode 100644 index 000000000..d1daff044 --- /dev/null +++ b/config/Mach.ppc.exports @@ -0,0 +1,581 @@ +Choke +ClearRealCall +CreateFakeDECCall +CreateFakeIOCall +CreateShutdownCTXCall +CutTrace +DoPreemptCall +LoadDBATsCall +LoadIBATsCall +NullCall +StoreRealCall +SwitchContextCall +_AlignAssist +_AlignAssist64 +_AltivecAssist +_Call_Debugger +_Call_DebuggerC +_Call_continuation +_ChokeSys +_ClearReal +_ClearRealLL +_CreateFakeDEC +_CreateFakeDECLL +_CreateFakeIO +_CreateFakeIOLL +_CreateShutdownCTX +_CreateShutdownCTXLL +_DebugWork +_DoChokeLL +_DoPreemptLL +_EmulExit +_Emulate +_Emulate64 +_ExceptionVectorsEnd +_ExceptionVectorsStart +_FCReturn +_FWtable +_FirmwareCall +_FixedStackEnd +_FixedStackStart +_FloatInit +_GratefulDebInit +_GratefulDebWork +_LLTraceSet +_LoadDBATs +_LoadIBATs +_MapUserAddressSpace +_MapUserAddressSpaceInit +_NMIss +_NullLL +_PFSExit +_PPCcalls +_QNaNbarbarian +_ReadReal +_ReleaseUserAddressSpace +_ResetHandler +_RuptCtrs +_StoreReal +_StoreRealLL +_SwitchContextLL +_SysChoked +__start_cpu +_aaFPopTable +_atomic_switch_syscall +_atomic_switch_trap +_backchain +_backpocket +_bbSetRupt +_bb_disable_bluebox +_bb_enable_bluebox +_bb_settaskenv +_bcopy_64 +_bcopy_970 +_bcopy_g3 +_bcopy_g4 +_bcopy_nc +_bcopy_physvir +_bigcopy_970 +_boot_args_buf +_bzero_128 +_bzero_32 +_bzero_nc +_cacheDisable +_cacheInit +_cbfpend +_cbfr +_chandler +_checkBogus +_checkNMI +_clock_delay_until +_clock_gettimeofday +_cnputcusr +_cntlzw +_commPagePtr +_commpage_flush_dcache +_commpage_flush_icache +_commpage_set_timestamp +_commpage_stuff +_commpage_time_dcba +_condStop +_cons_ops +_cons_ops_index +_consider_mapping_adjust +_console_chan_default +_console_is_serial +_console_unit +_copyin_multiple +_copyout_multiple +_cpu_doshutdown +_cpu_signal +_cpu_sync_timebase +_cpus_holding_bkpts +_current_free_region +_cursor_pmap +_db_breakpoints_inserted +_db_im_stepping +_db_recover +_db_run_mode +_dbfloats +_dbgCkpt +_dbgCkptLL +_dbgDisp +_dbgDispLL +_dbgRegsLL +_dbgTrace +_dbspecrs +_dbvecs +_debcnputc +_debsave0 +_debstack +_debstack_top_ss +_debstackptr +_debugNoop +_debugbackpocket +_debugger_active +_debugger_cpu +_debugger_debug +_debugger_holdoff +_debugger_is_slave +_debugger_lock +_debugger_pending +_debugger_sync +_delay_for_interval +_dgVideo +_dgWork +_diagCall +_diagTrap +_disable_bluebox_internal +_doexception +_dump_backtrace +_dump_savearea +_enter_funnel_section +_env_buf +_exception_end +_exception_entry +_exception_exit +_exit_funnel_section +_extPatch32 +_extPatchMCK +_failNames +_fastexit +_fctx_test +_find_user_fpu +_find_user_regs +_find_user_vec +_first_free_virt +_forcenap +_fpu_save +_fpu_switch +_free_mappings +_free_pmap_count +_free_pmap_list +_free_pmap_lock +_free_pmap_max +_fwEmMck +_fwSCCinit +_fwSCOM +_get_got +_get_msr_exportmask +_get_msr_nbits +_get_msr_rbits +_get_preemption_level +_get_simple_lock_count +_getrpc +_gettimeofday_32 +_gettimeofday_64 +_handleDSeg +_handleISeg +_handlePF +_hash_table_base +_hash_table_size +_hid0get64 +_hw_add_map +_hw_blow_seg +_hw_cpu_sync +_hw_cpu_wcng +_hw_dequeue_atomic +_hw_find_map +_hw_find_space +_hw_hash_init +_hw_lock_bit +_hw_lock_mbits +_hw_map_seg +_hw_perfmon_lock +_hw_protect +_hw_purge_map +_hw_purge_phys +_hw_purge_space +_hw_queue_atomic +_hw_queue_atomic_list +_hw_rem_map +_hw_set_user_space +_hw_set_user_space_dis +_hw_setup_trans +_hw_start_trans +_hw_test_rc +_hw_unlock_bit +_hw_walk_phys +_hwulckPatch_eieio +_hwulckPatch_isync +_hwulckbPatch_eieio +_hwulckbPatch_isync +_iNullLL +_ignore_zero_fault +_ihandler +_ihandler_ret +_incrVSID +_initialize_serial +_interrupt +_interrupt_disable +_interrupt_enable +_intstack_top_ss +_invalidateSegs +_invalidate_dcache +_invalidate_dcache64 +_invxcption +_isync_mfdec +_kdb_trap +_kdp_backtrace +_kdp_copy_phys +_kdp_dabr +_kdp_noisy +_kdp_pmap +_kdp_print_backtrace +_kdp_print_registers +_kdp_sr_dump +_kdp_trans_off +_kdp_trap +_kdp_trap_codes +_kdp_vtophys +_kernel_args_buf +_kernel_pmap_phys +_killprint +_killresv +_lastTrace +_lock_debugger +_lowGlo +_mach_absolute_time_32 +_mach_absolute_time_64 +_machine_act_terminate +_machine_clock_assist +_machine_conf +_machine_idle_ppc +_machine_idle_ret +_mapCtl +_mapInsert +_mapLog +_mapRemove +_mapSearch +_mapSearchFull +_mapSetLists +_mapSetUp +_mapSkipListVerify +_mapSkipListVerifyC +_mapalc1 +_mapalc2 +_mapdebug +_mapping_adjust +_mapping_adjust_call +_mapping_alloc +_mapping_clr_mod +_mapping_clr_ref +_mapping_drop_busy +_mapping_fake_zone_info +_mapping_find +_mapping_free +_mapping_free_init +_mapping_free_prime +_mapping_init +_mapping_make +_mapping_map +_mapping_p2v +_mapping_phys_lookup +_mapping_phys_unused +_mapping_prealloc +_mapping_protect +_mapping_protect_phys +_mapping_relpre +_mapping_remove +_mapping_set_ref +_mapping_tst_mod +_mapping_tst_ref +_mapping_verify +_mappingdeb0 +_mappingdeb1 +_max_cpus_initialized +_mem_actual +_mfdar +_mflr +_mfmmcr0 +_mfmmcr1 +_mfmmcr2 +_mfmsr +_mfpmc1 +_mfpmc2 +_mfpmc3 +_mfpmc4 +_mfpvr +_mfrtcl +_mfrtcu +_mfsda +_mfsia +_mfsrin +_mftb +_mftbu +_ml_enable_cache_level +_ml_enable_nap +_ml_ppc_sleep +_ml_probe_read_mck +_ml_probe_read_mck_64 +_ml_read_temp +_ml_restore +_ml_sense_nmi +_ml_set_physical +_ml_set_physical_disabled +_ml_set_physical_get_ffs +_ml_set_processor_speed +_ml_set_processor_voltage +_ml_set_translation_off +_ml_thrm_init +_ml_thrm_set +_ml_throttle +_mtdar +_mtdec +_mtmmcr0 +_mtmmcr1 +_mtmmcr2 +_mtmsr +_mtpmc1 +_mtpmc2 +_mtpmc3 +_mtpmc4 +_mtsdr1 +_mtsrin +_mulckPatch_eieio +_mulckPatch_isync +_mutex_unlock_rwcmb +_packAsc +_patch_table +_pbtcnt +_pbtcpu +_pbtlock +_per_proc_info +_perfIntHook +_perfTrapHook +_perfmon_acquire_facility +_perfmon_clear_counters +_perfmon_control +_perfmon_disable +_perfmon_enable +_perfmon_handle_pmi +_perfmon_init +_perfmon_read_counters +_perfmon_release_facility +_perfmon_set_event +_perfmon_set_event_func +_perfmon_set_tbsel +_perfmon_set_threshold +_perfmon_start_counters +_perfmon_stop_counters +_perfmon_write_counters +_phys_copy +_phys_table +_phystokv +_pmapTrans +_pmap_activate +_pmap_add_physical_memory +_pmap_attribute +_pmap_attribute_cache_sync +_pmap_boot_map +_pmap_canExecute +_pmap_deactivate +_pmap_find_physentry +_pmap_map_block +_pmap_map_block_rc +_pmap_mem_regions +_pmap_mem_regions_count +_pmap_nest +_pmap_switch +_pmap_unnest +_powermac_scc_get_datum +_powermac_scc_set_datum +_ppcNull +_ppcNullinst +_ppc_checkthreadstate +_ppc_init +_ppc_init_cpu +_ppc_max_adrsp +_ppc_max_pmaps +_ppc_usimple_lock +_ppc_usimple_lock_init +_ppc_usimple_lock_try +_ppc_usimple_unlock_rwcmb +_ppc_usimple_unlock_rwmb +_ppc_vm_cpu_init +_ppc_vm_init +_ppcscret +_pper_proc_info +_print_backtrace +_pthread_getspecific_sprg3 +_pthread_getspecific_uftrap +_pthread_self_sprg3 +_pthread_self_uftrap +_resetPOR +_resethandler_target +_retFromVM +_rtclock_decrementer_min +_save_adjust +_save_alloc +_save_cpv +_save_fake_zone_info +_save_get +_save_get_init +_save_get_phys_32 +_save_get_phys_64 +_save_queue +_save_recover +_save_release +_save_ret +_save_ret_phys +_save_ret_wMSR +_save_trim_free +_saveanchor +_savearea_init +_scc_funnel_initted +_scc_getc +_scc_param +_scc_parm_done +_scc_probe +_scc_putc +_scc_softc +_scc_std +_scc_stomp +_scc_tty +_scc_uses_modem_control +_sconowner +_sectKLDB +_sectSizeKLD +_serial_initted +_serial_keyboard_init +_serial_keyboard_poll +_serial_keyboard_start +_serialmode +_setPmon +_set_machine_current_act +_shadow_BAT +_shandler +_sharedPage +_sharedPmap +_spinlock_32_lock_mp +_spinlock_32_lock_up +_spinlock_32_try_mp +_spinlock_32_try_up +_spinlock_32_unlock_mp +_spinlock_32_unlock_up +_spinlock_64_lock_mp +_spinlock_64_lock_up +_spinlock_64_try_mp +_spinlock_64_try_up +_spinlock_64_unlock_mp +_spinlock_64_unlock_up +_spinlock_relinquish +_stFloat +_stSpecrs +_stVectors +_static_memory_end +_sulckPatch_eieio +_sulckPatch_isync +_switchIntoVM +_switchSegs +_switch_in +_switch_to_old_console +_switch_to_video_console +_syncClkSpot +_sync_cache +_sync_cache64 +_sync_cache_virtual +_sync_ppage +_syscall_error +_syscall_notify_interrupt +_syscall_trace +_syscall_trace_end +_taproot_addr +_taproot_size +_testPerfTrap +_thandler +_thread_adjuserstack +_thread_enable_fpe +_thread_setentrypoint +_thread_setuserstack +_tlbie +_toss_live_fpu +_toss_live_vec +_trap +_trcWork +_tstbit +_unlock_debugger +_vcgetc +_vec_save +_vec_switch +_vm_max_address +_vm_max_physical +_vmm_dispatch +_vmm_dispatch_table +_vmm_execute_vm +_vmm_exit +_vmm_fam_exc +_vmm_fam_pf +_vmm_fam_reserved +_vmm_force_exit +_vmm_get_XA +_vmm_get_adsp +_vmm_get_entry +_vmm_get_features +_vmm_get_features_sel +_vmm_get_float_state +_vmm_get_page_dirty_flag +_vmm_get_page_dirty_flag32 +_vmm_get_page_mapping +_vmm_get_page_mapping32 +_vmm_get_timer +_vmm_get_vector_state +_vmm_get_version +_vmm_get_version_sel +_vmm_init_context +_vmm_init_context_sel +_vmm_interrupt +_vmm_map_execute +_vmm_map_execute32 +_vmm_map_list +_vmm_map_list32 +_vmm_map_list64 +_vmm_map_page +_vmm_map_page32 +_vmm_max_addr +_vmm_protect_execute +_vmm_protect_execute32 +_vmm_protect_page +_vmm_protect_page32 +_vmm_set_XA +_vmm_set_timer +_vmm_stop_vm +_vmm_tear_down_all +_vmm_tear_down_context +_vmm_timer_pop +_vmm_ufp +_vmm_unmap_all_pages +_vmm_unmap_list +_vmm_unmap_page +_vmm_unmap_page32 +_xLoadDBATsLL +_xLoadIBATsLL +dbgCkptCall +dbgDispCall +dbgRegsCall +debstash +fwdisplock +hexTab +hexfont +iNullCall + diff --git a/config/Makefile b/config/Makefile index 5e4c73ade..36d0759dd 100644 --- a/config/Makefile +++ b/config/Makefile @@ -25,22 +25,73 @@ COMP_SUBDIRS = INST_SUBDIRS = + INSTALL_DATA_LIST= \ - System.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/AppleNMI.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/ApplePlatformFamily.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/BSDKernel.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/IOADBFamily.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/IOKit.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/IONVRAMFamily.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/IOSystemManagement.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/Libkern.kext/Contents/Info.plist \ - System.kext/Contents/PlugIns/Mach.kext/Contents/Info.plist + System.kext/Info.plist \ + System.kext/PlugIns/Libkern.kext/Info.plist \ + System.kext/PlugIns/Mach.kext/Info.plist \ + System.kext/PlugIns/BSDKernel.kext/Info.plist \ + System.kext/PlugIns/IOKit.kext/Info.plist \ + System.kext/PlugIns/AppleNMI.kext/Info.plist \ + System.kext/PlugIns/ApplePlatformFamily.kext/Info.plist \ + System.kext/PlugIns/IONVRAMFamily.kext/Info.plist \ + System.kext/PlugIns/IOSystemManagement.kext/Info.plist \ + \ + System.kext/PlugIns/System6.0.kext/Info.plist \ + System.kext/PlugIns/Libkern6.0.kext/Info.plist \ + System.kext/PlugIns/Mach6.0.kext/Info.plist \ + System.kext/PlugIns/BSDKernel6.0.kext/Info.plist \ + System.kext/PlugIns/IOKit6.0.kext/Info.plist \ + INSTALL_DATA_DIR= \ /System/Library/Extensions/ INSTMAN_SUBDIRS = +# + +KEXT_CREATE_SYMBOL_SET = /usr/local/bin/kextsymboltool + +SYMBOL_COMPONENT_LIST = \ + System6.0 \ + BSDKernel \ + IOKit \ + Libkern \ + Mach + +SYMBOL_SET_BUILD = $(foreach set, $(SYMBOL_COMPONENT_LIST), $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(set).symbolset) +SYMBOL_SET_FAT = $(foreach set, $(SYMBOL_COMPONENT_LIST), $(OBJROOT)/$(set).symbolset) + +## .SUFFIXES: .symbolset .symbollist + +$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/allsymbols: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/mach_kernel + nm -gj $< > $@ + +$(SYMBOL_SET_BUILD): $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/%.symbolset : %.exports %.$(ARCH_CONFIG_LC).exports $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/allsymbols + $(KEXT_CREATE_SYMBOL_SET) \ + -arch $(ARCH_CONFIG_LC) \ + -import $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/allsymbols \ + -export $*.exports \ + -export $*.$(ARCH_CONFIG_LC).exports \ + -output $@; + +$(SYMBOL_SET_FAT): $(OBJROOT)/%.symbolset : $(foreach arch, $(INSTALL_ARCHS), $(OBJROOT)/$(KERNEL_CONFIG)_$(arch)/%.symbolset) + $(LIPO) $(foreach arch, $(INSTALL_ARCHS), $(OBJROOT)/$(KERNEL_CONFIG)_$(arch)/$*.symbolset) -create -output $@; + + +build_symbol_sets: $(SYMBOL_SET_BUILD) + +install_symbol_sets: $(SYMBOL_SET_FAT) + install $(INSTALL_FLAGS) $(OBJROOT)/System6.0.symbolset $(DSTROOT)/$(INSTALL_DATA_DIR)/System.kext/PlugIns/System6.0.kext/kernel.6.0; + install $(INSTALL_FLAGS) $(OBJROOT)/BSDKernel.symbolset $(DSTROOT)/$(INSTALL_DATA_DIR)/System.kext/PlugIns/BSDKernel.kext/BSDKernel; + install $(INSTALL_FLAGS) $(OBJROOT)/IOKit.symbolset $(DSTROOT)/$(INSTALL_DATA_DIR)/System.kext/PlugIns/IOKit.kext/IOKit; + install $(INSTALL_FLAGS) $(OBJROOT)/Libkern.symbolset $(DSTROOT)/$(INSTALL_DATA_DIR)/System.kext/PlugIns/Libkern.kext/Libkern; + install $(INSTALL_FLAGS) $(OBJROOT)/Mach.symbolset $(DSTROOT)/$(INSTALL_DATA_DIR)/System.kext/PlugIns/Mach.kext/Mach; + +do_build_all: build_symbol_sets + +do_build_install: install_symbol_sets + include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/config/System.kext/Contents/Info.plist b/config/System.kext/Info.plist similarity index 85% rename from config/System.kext/Contents/Info.plist rename to config/System.kext/Info.plist index 934093d68..e259100dd 100644 --- a/config/System.kext/Contents/Info.plist +++ b/config/System.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - System Resource Pseudoextension, Apple Computer Inc, 6.8 + System Resource Pseudoextension, Apple Computer Inc, 7.0 CFBundleIdentifier com.apple.kernel CFBundleInfoDictionaryVersion @@ -15,13 +15,13 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 7.0 CFBundleSignature ???? CFBundleVersion - 6.8 + 7.0 OSBundleCompatibleVersion - 6.8 + 7.0 OSBundleRequired Root OSKernelResource diff --git a/config/System.kext/Contents/PlugIns/AppleNMI.kext/Contents/Info.plist b/config/System.kext/PlugIns/AppleNMI.kext/Info.plist similarity index 87% rename from config/System.kext/Contents/PlugIns/AppleNMI.kext/Contents/Info.plist rename to config/System.kext/PlugIns/AppleNMI.kext/Info.plist index ceed00437..8bb6e3818 100644 --- a/config/System.kext/Contents/PlugIns/AppleNMI.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/AppleNMI.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - AppleNMI Pseudoextension, Apple Computer Inc, 6.8 + AppleNMI Pseudoextension, Apple Computer Inc, 7.0 CFBundleIdentifier com.apple.driver.AppleNMI CFBundleInfoDictionaryVersion @@ -15,11 +15,11 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 7.0 CFBundleSignature ???? CFBundleVersion - 6.8 + 7.0 OSBundleRequired Root OSKernelResource diff --git a/config/System.kext/Contents/PlugIns/ApplePlatformFamily.kext/Contents/Info.plist b/config/System.kext/PlugIns/ApplePlatformFamily.kext/Info.plist similarity index 93% rename from config/System.kext/Contents/PlugIns/ApplePlatformFamily.kext/Contents/Info.plist rename to config/System.kext/PlugIns/ApplePlatformFamily.kext/Info.plist index d669dfde8..553282439 100644 --- a/config/System.kext/Contents/PlugIns/ApplePlatformFamily.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/ApplePlatformFamily.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - Apple Platform Family Pseudoextension, Apple Computer Inc, 6.8 + Apple Platform Family Pseudoextension, Apple Computer Inc, 7.0 CFBundleIdentifier com.apple.iokit.ApplePlatformFamily CFBundleInfoDictionaryVersion @@ -15,11 +15,11 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 7.0 CFBundleSignature ???? CFBundleVersion - 6.8 + 7.0 OSBundleCompatibleVersion 1.0 OSBundleRequired diff --git a/config/System.kext/Contents/PlugIns/IOADBFamily.kext/Contents/Info.plist b/config/System.kext/PlugIns/BSDKernel.kext/Info.plist similarity index 72% rename from config/System.kext/Contents/PlugIns/IOADBFamily.kext/Contents/Info.plist rename to config/System.kext/PlugIns/BSDKernel.kext/Info.plist index eb31436b2..a77384459 100644 --- a/config/System.kext/Contents/PlugIns/IOADBFamily.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/BSDKernel.kext/Info.plist @@ -4,24 +4,26 @@ CFBundleDevelopmentRegion English + CFBundleExecutable + BSDKernel CFBundleGetInfoString - ADB Family Pseudoextension, Apple Computer Inc, 6.8 + BSD Kernel Pseudoextension, Apple Computer Inc, 7.0 CFBundleIdentifier - com.apple.iokit.IOADBFamily + com.apple.kpi.bsd CFBundleInfoDictionaryVersion 6.0 CFBundleName - ADB Family Pseudoextension + BSD Kernel Pseudoextension CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 7.0 CFBundleSignature ???? CFBundleVersion - 6.8 + 7.0 OSBundleCompatibleVersion - 1.0.0b1 + 7.0 OSBundleRequired Root OSKernelResource diff --git a/config/System.kext/Contents/PlugIns/BSDKernel.kext/Contents/Info.plist b/config/System.kext/PlugIns/BSDKernel6.0.kext/Info.plist similarity index 80% rename from config/System.kext/Contents/PlugIns/BSDKernel.kext/Contents/Info.plist rename to config/System.kext/PlugIns/BSDKernel6.0.kext/Info.plist index 56773f0dc..dd1a2c3a0 100644 --- a/config/System.kext/Contents/PlugIns/BSDKernel.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/BSDKernel6.0.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - BSD Kernel Pseudoextension, Apple Computer Inc, 6.8 + BSD Kernel Pseudoextension, Apple Computer Inc, 6.9.9 CFBundleIdentifier com.apple.kernel.bsd CFBundleInfoDictionaryVersion @@ -15,15 +15,17 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 6.9.9 CFBundleSignature ???? CFBundleVersion - 6.8 + 6.9.9 OSBundleCompatibleVersion 1.1 OSBundleRequired Root + OSBundleSharedExecutableIdentifier + com.apple.kernel.6.0 OSKernelResource diff --git a/config/System.kext/PlugIns/IOKit.kext/Info.plist b/config/System.kext/PlugIns/IOKit.kext/Info.plist new file mode 100644 index 000000000..236a023ea --- /dev/null +++ b/config/System.kext/PlugIns/IOKit.kext/Info.plist @@ -0,0 +1,32 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + IOKit + CFBundleGetInfoString + I/O Kit Pseudoextension, Apple Computer Inc, 7.0 + CFBundleIdentifier + com.apple.kpi.iokit + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + I/O Kit Pseudoextension + CFBundlePackageType + KEXT + CFBundleShortVersionString + 7.0 + CFBundleSignature + ???? + CFBundleVersion + 7.0 + OSBundleCompatibleVersion + 7.0 + OSBundleRequired + Root + OSKernelResource + + + diff --git a/config/System.kext/Contents/PlugIns/IOKit.kext/Contents/Info.plist b/config/System.kext/PlugIns/IOKit6.0.kext/Info.plist similarity index 80% rename from config/System.kext/Contents/PlugIns/IOKit.kext/Contents/Info.plist rename to config/System.kext/PlugIns/IOKit6.0.kext/Info.plist index 4f3235a17..8c1f742b4 100644 --- a/config/System.kext/Contents/PlugIns/IOKit.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/IOKit6.0.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - I/O Kit Pseudoextension, Apple Computer Inc, 6.8 + I/O Kit Pseudoextension, Apple Computer Inc, 6.9.9 CFBundleIdentifier com.apple.kernel.iokit CFBundleInfoDictionaryVersion @@ -15,15 +15,17 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 6.9.9 CFBundleSignature ???? CFBundleVersion - 6.8 + 6.9.9 OSBundleCompatibleVersion 1.0.0b1 OSBundleRequired Root + OSBundleSharedExecutableIdentifier + com.apple.kernel.6.0 OSKernelResource diff --git a/config/System.kext/Contents/PlugIns/IONVRAMFamily.kext/Contents/Info.plist b/config/System.kext/PlugIns/IONVRAMFamily.kext/Info.plist similarity index 88% rename from config/System.kext/Contents/PlugIns/IONVRAMFamily.kext/Contents/Info.plist rename to config/System.kext/PlugIns/IONVRAMFamily.kext/Info.plist index 43ef3258d..d29160140 100644 --- a/config/System.kext/Contents/PlugIns/IONVRAMFamily.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/IONVRAMFamily.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - AppleNMI Pseudoextension, Apple Computer Inc, 6.8 + AppleNMI Pseudoextension, Apple Computer Inc, 7.0 CFBundleIdentifier com.apple.iokit.IONVRAMFamily CFBundleInfoDictionaryVersion @@ -15,11 +15,11 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 7.0 CFBundleSignature ???? CFBundleVersion - 6.8 + 7.0 OSBundleCompatibleVersion 1.1 OSBundleRequired diff --git a/config/System.kext/Contents/PlugIns/IOSystemManagement.kext/Contents/Info.plist b/config/System.kext/PlugIns/IOSystemManagement.kext/Info.plist similarity index 94% rename from config/System.kext/Contents/PlugIns/IOSystemManagement.kext/Contents/Info.plist rename to config/System.kext/PlugIns/IOSystemManagement.kext/Info.plist index 885815478..9be693d71 100644 --- a/config/System.kext/Contents/PlugIns/IOSystemManagement.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/IOSystemManagement.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - System Management Pseudoextension, Apple Computer Inc, 6.8 + System Management Pseudoextension, Apple Computer Inc, 7.0 CFBundleIdentifier com.apple.iokit.IOSystemManagementFamily CFBundleInfoDictionaryVersion @@ -15,11 +15,11 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 7.0 CFBundleSignature ???? CFBundleVersion - 6.8 + 7.0 OSBundleCompatibleVersion 1.0.0b1 OSBundleRequired diff --git a/config/System.kext/PlugIns/Libkern.kext/Info.plist b/config/System.kext/PlugIns/Libkern.kext/Info.plist new file mode 100644 index 000000000..c88f26282 --- /dev/null +++ b/config/System.kext/PlugIns/Libkern.kext/Info.plist @@ -0,0 +1,32 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + Libkern + CFBundleGetInfoString + Libkern Pseudoextension, Apple Computer Inc, 7.0 + CFBundleIdentifier + com.apple.kpi.libkern + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + Libkern Pseudoextension + CFBundlePackageType + KEXT + CFBundleShortVersionString + 7.0 + CFBundleSignature + ???? + CFBundleVersion + 7.0 + OSBundleCompatibleVersion + 7.0 + OSBundleRequired + Root + OSKernelResource + + + diff --git a/config/System.kext/Contents/PlugIns/Libkern.kext/Contents/Info.plist b/config/System.kext/PlugIns/Libkern6.0.kext/Info.plist similarity index 80% rename from config/System.kext/Contents/PlugIns/Libkern.kext/Contents/Info.plist rename to config/System.kext/PlugIns/Libkern6.0.kext/Info.plist index dd3f72585..03a831442 100644 --- a/config/System.kext/Contents/PlugIns/Libkern.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/Libkern6.0.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - Libkern Pseudoextension, Apple Computer Inc, 6.8 + Libkern Pseudoextension, Apple Computer Inc, 6.9.9 CFBundleIdentifier com.apple.kernel.libkern CFBundleInfoDictionaryVersion @@ -15,15 +15,17 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 6.9.9 CFBundleSignature ???? CFBundleVersion - 6.8 + 6.9.9 OSBundleCompatibleVersion 1.0.0b1 OSBundleRequired Root + OSBundleSharedExecutableIdentifier + com.apple.kernel.6.0 OSKernelResource diff --git a/config/System.kext/PlugIns/Mach.kext/Info.plist b/config/System.kext/PlugIns/Mach.kext/Info.plist new file mode 100644 index 000000000..ee1c118ba --- /dev/null +++ b/config/System.kext/PlugIns/Mach.kext/Info.plist @@ -0,0 +1,32 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + Mach + CFBundleGetInfoString + Mach Kernel Pseudoextension, Apple Computer Inc, 7.0 + CFBundleIdentifier + com.apple.kpi.mach + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + Mach Kernel Pseudoextension + CFBundlePackageType + KEXT + CFBundleShortVersionString + 7.0 + CFBundleSignature + ???? + CFBundleVersion + 7.0 + OSBundleCompatibleVersion + 7.0 + OSBundleRequired + Root + OSKernelResource + + + diff --git a/config/System.kext/Contents/PlugIns/Mach.kext/Contents/Info.plist b/config/System.kext/PlugIns/Mach6.0.kext/Info.plist similarity index 80% rename from config/System.kext/Contents/PlugIns/Mach.kext/Contents/Info.plist rename to config/System.kext/PlugIns/Mach6.0.kext/Info.plist index d25c6b996..0e607e92c 100644 --- a/config/System.kext/Contents/PlugIns/Mach.kext/Contents/Info.plist +++ b/config/System.kext/PlugIns/Mach6.0.kext/Info.plist @@ -5,7 +5,7 @@ CFBundleDevelopmentRegion English CFBundleGetInfoString - Mach Kernel Pseudoextension, Apple Computer Inc, 6.8 + Mach Kernel Pseudoextension, Apple Computer Inc, 6.9.9 CFBundleIdentifier com.apple.kernel.mach CFBundleInfoDictionaryVersion @@ -15,15 +15,17 @@ CFBundlePackageType KEXT CFBundleShortVersionString - 6.8 + 6.9.9 CFBundleSignature ???? CFBundleVersion - 6.8 + 6.9.9 OSBundleCompatibleVersion 1.0.0b1 OSBundleRequired Root + OSBundleSharedExecutableIdentifier + com.apple.kernel.6.0 OSKernelResource diff --git a/config/System.kext/PlugIns/System6.0.kext/Info.plist b/config/System.kext/PlugIns/System6.0.kext/Info.plist new file mode 100644 index 000000000..fd7e2f91e --- /dev/null +++ b/config/System.kext/PlugIns/System6.0.kext/Info.plist @@ -0,0 +1,32 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + kernel.6.0 + CFBundleGetInfoString + System Resource Pseudoextension, Apple Computer Inc, 6.9.9 + CFBundleIdentifier + com.apple.kernel.6.0 + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + System Resource Pseudoextension + CFBundlePackageType + KEXT + CFBundleShortVersionString + 6.9.9 + CFBundleSignature + ???? + CFBundleVersion + 6.9.9 + OSBundleCompatibleVersion + 6.9.9 + OSBundleRequired + Root + OSKernelResource + + + diff --git a/config/System6.0.exports b/config/System6.0.exports new file mode 100644 index 000000000..ddc9ebda8 --- /dev/null +++ b/config/System6.0.exports @@ -0,0 +1,9236 @@ +_AgeCatalogIterator +_AllocateNode +_Assert +_BF_decrypt +_BF_encrypt +_BF_set_key +_BTClosePath +_BTDeleteRecord +_BTFlushPath +_BTGetInformation +_BTGetLastSync +_BTInsertRecord +_BTInvalidateHint +_BTIterateRecord +_BTIterateRecords +_BTOpenPath +_BTReloadData +_BTReplaceRecord +_BTScanInitialize +_BTScanNextRecord +_BTScanTerminate +_BTSearchRecord +_BTSetLastSync +_BTUpdateRecord +_BestBlockSizeFit +_BuildCatalogKey +_BuildCatalogKeyUTF8 +_CURSIG +_CalcKeyRecordSize +_CalcMapBits +_CheckExtents +_CheckInsertParams +_CheckNode +_ClearNode +_CompareCatalogKeys +_CompareExtendedCatalogKeys +_ConvertUnicodeToUTF8Mangled +_CopyBigCatalogNodeInfo +_CopyCatalogName +_CopyCatalogNodeInfo +_CopyExtentInfo +_DTCreateEntryIterator +_DTCreatePropertyIterator +_DTDisposeEntryIterator +_DTDisposePropertyIterator +_DTEnterEntry +_DTEntryIsEqual +_DTExitEntry +_DTFindEntry +_DTGetProperty +_DTInit +_DTIterateEntries +_DTIterateProperties +_DTLookupEntry +_DTRestartEntryIteration +_DTRestartPropertyIteration +_DebugStr +_Debugger +_DeleteExtents +_DeleteOffset +_DeleteRecord +_DeleteTree +_DisposePtr +_ExchangeFileIDs +_ExtendBTree +_FastRelString +_FastUnicodeCompare +_FindIteratorPosition +_FlushCatalog +_FreeNode +_GetCatalogIterator +_GetChildNodeNum +_GetDirEntrySize +_GetEmbeddedFileID +_GetLogicalBlockSize +_GetMapNode +_GetNewNode +_GetNode +_GetNodeDataSize +_GetNodeFreeSize +_GetOffsetAddress +_GetRecordByIndex +_GetRecordSize +_GetTimeUTC +_IOAlignmentToSize +_IOBSDNameMatching +_IOBSDRegistryEntryForDeviceTree +_IOBSDRegistryEntryGetData +_IOBSDRegistryEntryRelease +_IOCDMatching +_IOCreateThread +_IODTFreeLoaderInfo +_IODTGetLoaderInfo +_IODefaultCacheBits +_IODelay +_IODiskMatching +_IOExitThread +_IOFindBSDRoot +_IOFindMatchingChild +_IOFindNameForValue +_IOFindValueForName +_IOFlushProcessorCache +_IOFree +_IOFreeAligned +_IOFreeContiguous +_IOFreePageable +_IOGetTime +_IOIteratePageableMaps +_IOKitBSDInit +_IOKitResetTime +_IOLibInit +_IOLockAlloc +_IOLockFree +_IOLockInitWithState +_IOLog +_IOMalloc +_IOMallocAligned +_IOMallocContiguous +_IOMallocPageable +_IOMapPages +_IOMappedRead16 +_IOMappedRead32 +_IOMappedRead64 +_IOMappedRead8 +_IOMappedWrite16 +_IOMappedWrite32 +_IOMappedWrite64 +_IOMappedWrite8 +_IOMapperIOVMAlloc +_IOMapperIOVMFree +_IOMapperInsertPPNPages +_IOMapperInsertPage +_IOMapperInsertUPLPages +_IONDRVLibrariesInitialize +_IONetworkMatching +_IONetworkNamePrefixMatching +_IOOFPathMatching +_IOPageableMapForAddress +_IOPanic +_IOPrintPlane +_IORWLockAlloc +_IORWLockFree +_IORecursiveLockAlloc +_IORecursiveLockFree +_IORecursiveLockHaveLock +_IORecursiveLockLock +_IORecursiveLockSleep +_IORecursiveLockTryLock +_IORecursiveLockUnlock +_IORecursiveLockWakeup +_IOSetProcessorCacheMode +_IOSimpleLockAlloc +_IOSimpleLockFree +_IOSimpleLockInit +_IOSizeToAlignment +_IOSleep +_IOSpinUnlock +_IOSystemShutdownNotification +_IOTrySpinLock +_IOUnmapPages +_IOZeroTvalspec +_InitCatalogCache +_InsertKeyRecord +_InsertOffset +_InsertRecord +_InsertTree +_InvalidateCatalogCache +_IsItAHint +_KERNEL_SECURITY_TOKEN +_KUNCExecute +_KUNCGetNotificationID +_KUNCUserNotificationCancel +_KUNCUserNotificationDisplayAlert +_KUNCUserNotificationDisplayFromBundle +_KUNCUserNotificationDisplayNotice +_LocalToUTC +_LocateCatalogNode +_LocateCatalogNodeByKey +_LocateCatalogRecord +_LockTimeOut +_MAXNBUF +_MCFail +_MD5Final +_MD5Init +_MD5Pad +_MD5Transform +_MD5Update +_MDFail +_MPFail +_MacToVFSError +_MoveExtents +_NDR_record +_NewPtr +_NewPtrSysClear +_OSAddAtomic +_OSAddAtomic16 +_OSAddAtomic8 +_OSBitAndAtomic +_OSBitAndAtomic16 +_OSBitAndAtomic8 +_OSBitOrAtomic +_OSBitOrAtomic16 +_OSBitOrAtomic8 +_OSBitXorAtomic +_OSBitXorAtomic16 +_OSBitXorAtomic8 +_OSCompareAndSwap +_OSDecrementAtomic +_OSDecrementAtomic16 +_OSDecrementAtomic8 +_OSDequeueAtomic +_OSEnqueueAtomic +_OSIncrementAtomic +_OSIncrementAtomic16 +_OSIncrementAtomic8 +_OSKernelStackRemaining +_OSPrintMemory +_OSRuntimeFinalizeCPP +_OSRuntimeInitializeCPP +_OSRuntimeUnloadCPP +_OSRuntimeUnloadCPPForSegment +_OSTestAndClear +_OSTestAndSet +_OSUnserializechar +_OSUnserializelval +_OSUnserializenerrs +_OSlibkernInit +_PEGetGMTTimeOfDay +_PEGetMachineName +_PEGetModelName +_PEGetPlatformEpoch +_PEHaltRestart +_PESavePanicInfo +_PESetGMTTimeOfDay +_PE_boot_args +_PE_call_timebase_callback +_PE_cpu_halt +_PE_cpu_machine_init +_PE_cpu_machine_quiesce +_PE_cpu_signal +_PE_cpu_start +_PE_create_console +_PE_current_console +_PE_display_icon +_PE_enter_debugger +_PE_get_hotkey +_PE_halt_restart +_PE_init_iokit +_PE_init_kprintf +_PE_init_platform +_PE_init_printf +_PE_initialize_console +_PE_kputc +_PE_parse_boot_arg +_PE_poll_input +_PE_putc +_PE_register_timebase_callback +_PE_state +_PositionIterator +_PreliminarySetup +_RandomULong +_ReleaseCatalogIterator +_ReleaseNode +_ReplaceBTreeRecord +_S +_SHA1Final +_SHA1Init +_SHA1Transform +_SHA1Update +_SHA256_Data +_SHA256_End +_SHA256_Final +_SHA256_Init +_SHA256_Transform +_SHA256_Update +_SHA384_Data +_SHA384_End +_SHA384_Final +_SHA384_Init +_SHA384_Update +_SHA512_Data +_SHA512_End +_SHA512_Final +_SHA512_Init +_SHA512_Last +_SHA512_Transform +_SHA512_Update +_SearchBTreeRecord +_SearchNode +_SearchTree +_StartIOKit +_Switch_context +_TRAP_TYPES +_TrashCatalogIterator +_TrashNode +_TrySimpleReplace +_UNDAlertCompletedWithResult_rpc +_UNDCancelNotification_rpc +_UNDDisplayAlertFromBundle_rpc +_UNDDisplayAlertSimple_rpc +_UNDDisplayCustomFromBundle_rpc +_UNDDisplayCustomFromDictionary_rpc +_UNDDisplayNoticeFromBundle_rpc +_UNDDisplayNoticeSimple_rpc +_UNDExecute_rpc +_UNDNotificationCreated_rpc +_UNDReply_deallocate +_UNDReply_server +_UNDReply_server_routine +_UNDReply_subsystem +_UTCToLocal +_UpdateBtreeIterator +_UpdateCatalogIterator +_UpdateCatalogName +_UpdateHeader +_UpdateNode +_VerifyHeader +__FREE +__FREE_ZONE +__MALLOC +__MALLOC_ZONE +__Z10tellClientP8OSObjectPv +__Z13OSUnserializePKcPP8OSString +__Z13readExtensionP12OSDictionaryPKc +__Z14readExtensionsP12OSDictionaryPKcS0_ +__Z16IOCPUSleepKernelv +__Z16IODTFindSlotNameP15IORegistryEntrym +__Z16IODTSetResolvingP15IORegistryEntryPFlmPmS1_EPFvS0_PhS4_S4_E +__Z16OSUnserializeXMLPKcPP8OSString +__Z16addPersonalitiesP12OSDictionary +__Z16uncompressModuleP6OSDataPS0_ +__Z17IODTGetCellCountsP15IORegistryEntryPmS1_ +__Z17IODTMapInterruptsP15IORegistryEntry +__Z17IODeviceTreeAllocPv +__Z17IOServiceOrderingPK15OSMetaClassBaseS1_Pv +__Z18IODTCompareNubNamePK15IORegistryEntryP8OSStringPS3_ +__Z19IODTMapOneInterruptP15IORegistryEntryPmPP6OSDataPPK8OSSymbol +__Z19printDictionaryKeysP12OSDictionaryPc +__Z19tellAppWithResponseP8OSObjectPv +__Z20IODTMakeNVDescriptorP15IORegistryEntryP17IONVRAMDescriptor +__Z20IODTMatchNubWithKeysP15IORegistryEntryPKc +__Z20getBootLoaderObjectsv +__Z20getStartupExtensionsv +__Z21IODTResolveAddressingP15IORegistryEntryPKcP14IODeviceMemory +__Z21validateExtensionDictP12OSDictionaryi +__Z22IODTResolveAddressCellP15IORegistryEntryPmS1_S1_ +__Z22removeStartupExtensionPKc +__Z22tellClientWithResponseP8OSObjectPv +__Z23IODTFindInterruptParentP15IORegistryEntry +__Z23IODTFindMatchingEntriesP15IORegistryEntrymPKc +__Z23recordStartupExtensionsv +__Z24addExtensionsFromArchiveP6OSData +__Z24broadcast_aggressivenessP8OSObjectPvS1_S1_S1_ +__Z24compareExtensionVersionsP12OSDictionaryS0_ +__Z26mergeExtensionDictionariesP12OSDictionaryS0_ +__Z26serializedAllowPowerChangeP8OSObjectPvS1_S1_S1_ +__Z27IODTInterruptControllerNameP15IORegistryEntry +__Z27serializedCancelPowerChangeP8OSObjectPvS1_S1_S1_ +__Z28extractExtensionsFromArchiveP17MemoryMapFileInfoP12OSDictionary +__Z35clearStartupExtensionsAndLoaderInfov +__ZN10IOMachPort10gMetaClassE +__ZN10IOMachPort10superClassE +__ZN10IOMachPort11dictForTypeEj +__ZN10IOMachPort13portForObjectEP8OSObjectj +__ZN10IOMachPort14setHoldDestroyEP8OSObjectj +__ZN10IOMachPort20makeSendRightForTaskEP4taskP8OSObjectj +__ZN10IOMachPort20releasePortForObjectEP8OSObjectj +__ZN10IOMachPort22noMoreSendersForObjectEP8OSObjectjPj +__ZN10IOMachPort4freeEv +__ZN10IOMachPort9MetaClassC1Ev +__ZN10IOMachPort9MetaClassC2Ev +__ZN10IOMachPort9metaClassE +__ZN10IOMachPortC1EPK11OSMetaClass +__ZN10IOMachPortC1Ev +__ZN10IOMachPortC2EPK11OSMetaClass +__ZN10IOMachPortC2Ev +__ZN10IOMachPortD0Ev +__ZN10IOMachPortD2Ev +__ZN10IONotifier10gMetaClassE +__ZN10IONotifier10superClassE +__ZN10IONotifier9MetaClassC1Ev +__ZN10IONotifier9MetaClassC2Ev +__ZN10IONotifier9metaClassE +__ZN10IONotifierC1EPK11OSMetaClass +__ZN10IONotifierC2EPK11OSMetaClass +__ZN10IONotifierD0Ev +__ZN10IONotifierD2Ev +__ZN10IOWorkLoop10gMetaClassE +__ZN10IOWorkLoop10superClassE +__ZN10IOWorkLoop10threadMainEv +__ZN10IOWorkLoop10wakeupGateEPvb +__ZN10IOWorkLoop12tryCloseGateEv +__ZN10IOWorkLoop13_maintRequestEPvS0_S0_S0_ +__ZN10IOWorkLoop14addEventSourceEP13IOEventSource +__ZN10IOWorkLoop16launchThreadMainEPv +__ZN10IOWorkLoop17removeEventSourceEP13IOEventSource +__ZN10IOWorkLoop19signalWorkAvailableEv +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop1Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop2Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop3Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop4Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop5Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop6Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop7Ev +__ZN10IOWorkLoop22threadMainContinuationEv +__ZN10IOWorkLoop4freeEv +__ZN10IOWorkLoop4initEv +__ZN10IOWorkLoop8openGateEv +__ZN10IOWorkLoop8workLoopEv +__ZN10IOWorkLoop9MetaClassC1Ev +__ZN10IOWorkLoop9MetaClassC2Ev +__ZN10IOWorkLoop9closeGateEv +__ZN10IOWorkLoop9metaClassE +__ZN10IOWorkLoop9runActionEPFiP8OSObjectPvS2_S2_S2_ES1_S2_S2_S2_S2_ +__ZN10IOWorkLoop9sleepGateEPvm +__ZN10IOWorkLoopC1EPK11OSMetaClass +__ZN10IOWorkLoopC1Ev +__ZN10IOWorkLoopC2EPK11OSMetaClass +__ZN10IOWorkLoopC2Ev +__ZN10IOWorkLoopD0Ev +__ZN10IOWorkLoopD2Ev +__ZN10OSIterator10gMetaClassE +__ZN10OSIterator10superClassE +__ZN10OSIterator20_RESERVEDOSIterator0Ev +__ZN10OSIterator20_RESERVEDOSIterator1Ev +__ZN10OSIterator20_RESERVEDOSIterator2Ev +__ZN10OSIterator20_RESERVEDOSIterator3Ev +__ZN10OSIterator9MetaClassC1Ev +__ZN10OSIterator9MetaClassC2Ev +__ZN10OSIterator9metaClassE +__ZN10OSIteratorC1EPK11OSMetaClass +__ZN10OSIteratorC2EPK11OSMetaClass +__ZN10OSIteratorD0Ev +__ZN10OSIteratorD2Ev +__ZN11IOCatalogue10addDriversEP7OSArrayb +__ZN11IOCatalogue10gMetaClassE +__ZN11IOCatalogue10initializeEv +__ZN11IOCatalogue10superClassE +__ZN11IOCatalogue11findDriversEP12OSDictionaryPl +__ZN11IOCatalogue11findDriversEP9IOServicePl +__ZN11IOCatalogue13removeDriversEP12OSDictionaryb +__ZN11IOCatalogue13startMatchingEP12OSDictionary +__ZN11IOCatalogue15moduleHasLoadedEP8OSString +__ZN11IOCatalogue15moduleHasLoadedEPKc +__ZN11IOCatalogue16terminateDriversEP12OSDictionary +__ZN11IOCatalogue18removeKernelLinkerEv +__ZN11IOCatalogue23recordStartupExtensionsEv +__ZN11IOCatalogue24addExtensionsFromArchiveEP6OSData +__ZN11IOCatalogue25terminateDriversForModuleEP8OSStringb +__ZN11IOCatalogue25terminateDriversForModuleEPKcb +__ZN11IOCatalogue4freeEv +__ZN11IOCatalogue4initEP7OSArray +__ZN11IOCatalogue5resetEv +__ZN11IOCatalogue9MetaClassC1Ev +__ZN11IOCatalogue9MetaClassC2Ev +__ZN11IOCatalogue9metaClassE +__ZN11IOCatalogueC1EPK11OSMetaClass +__ZN11IOCatalogueC1Ev +__ZN11IOCatalogueC2EPK11OSMetaClass +__ZN11IOCatalogueC2Ev +__ZN11IOCatalogueD0Ev +__ZN11IOCatalogueD2Ev +__ZN11IODataQueue10gMetaClassE +__ZN11IODataQueue10superClassE +__ZN11IODataQueue11withEntriesEmm +__ZN11IODataQueue12withCapacityEm +__ZN11IODataQueue15initWithEntriesEmm +__ZN11IODataQueue16initWithCapacityEm +__ZN11IODataQueue19getMemoryDescriptorEv +__ZN11IODataQueue19setNotificationPortEP8ipc_port +__ZN11IODataQueue29sendDataAvailableNotificationEv +__ZN11IODataQueue4freeEv +__ZN11IODataQueue7enqueueEPvm +__ZN11IODataQueue9MetaClassC1Ev +__ZN11IODataQueue9MetaClassC2Ev +__ZN11IODataQueue9metaClassE +__ZN11IODataQueueC1EPK11OSMetaClass +__ZN11IODataQueueC1Ev +__ZN11IODataQueueC2EPK11OSMetaClass +__ZN11IODataQueueC2Ev +__ZN11IODataQueueD0Ev +__ZN11IODataQueueD2Ev +__ZN11IOMemoryMap10gMetaClassE +__ZN11IOMemoryMap10superClassE +__ZN11IOMemoryMap18getPhysicalAddressEv +__ZN11IOMemoryMap9MetaClassC1Ev +__ZN11IOMemoryMap9MetaClassC2Ev +__ZN11IOMemoryMap9metaClassE +__ZN11IOMemoryMapC1EPK11OSMetaClass +__ZN11IOMemoryMapC2EPK11OSMetaClass +__ZN11IOMemoryMapD0Ev +__ZN11IOMemoryMapD2Ev +__ZN11IOResources10gMetaClassE +__ZN11IOResources10superClassE +__ZN11IOResources13setPropertiesEP8OSObject +__ZN11IOResources18matchPropertyTableEP12OSDictionary +__ZN11IOResources9MetaClassC1Ev +__ZN11IOResources9MetaClassC2Ev +__ZN11IOResources9metaClassE +__ZN11IOResources9resourcesEv +__ZN11IOResourcesC1EPK11OSMetaClass +__ZN11IOResourcesC1Ev +__ZN11IOResourcesC2EPK11OSMetaClass +__ZN11IOResourcesC2Ev +__ZN11IOResourcesD0Ev +__ZN11IOResourcesD2Ev +__ZN11OSMetaClass10preModLoadEPKc +__ZN11OSMetaClass11postModLoadEPv +__ZN11OSMetaClass12checkModLoadEPv +__ZN11OSMetaClass14modHasInstanceEPKc +__ZN11OSMetaClass15considerUnloadsEv +__ZN11OSMetaClass18allocClassWithNameEPK8OSString +__ZN11OSMetaClass18allocClassWithNameEPK8OSSymbol +__ZN11OSMetaClass18allocClassWithNameEPKc +__ZN11OSMetaClass18getClassDictionaryEv +__ZN11OSMetaClass18reportModInstancesEPKc +__ZN11OSMetaClass19printInstanceCountsEv +__ZN11OSMetaClass20getMetaClassWithNameEPK8OSSymbol +__ZN11OSMetaClass21_RESERVEDOSMetaClass0Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass1Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass2Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass3Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass4Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass5Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass6Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass7Ev +__ZN11OSMetaClass21checkMetaCastWithNameEPK8OSStringPK15OSMetaClassBase +__ZN11OSMetaClass21checkMetaCastWithNameEPK8OSSymbolPK15OSMetaClassBase +__ZN11OSMetaClass21checkMetaCastWithNameEPKcPK15OSMetaClassBase +__ZN11OSMetaClass24serializeClassDictionaryEP12OSDictionary +__ZN11OSMetaClass8logErrorEi +__ZN11OSMetaClass9metaClassE +__ZN11OSMetaClassC1EPKcPKS_j +__ZN11OSMetaClassC2EPKcPKS_j +__ZN11OSMetaClassD0Ev +__ZN11OSMetaClassD2Ev +__ZN11OSMetaClassdlEPvm +__ZN11OSMetaClassnwEm +__ZN11OSSerialize10gMetaClassE +__ZN11OSSerialize10superClassE +__ZN11OSSerialize12addXMLEndTagEPKc +__ZN11OSSerialize12withCapacityEj +__ZN11OSSerialize14addXMLStartTagEPK15OSMetaClassBasePKc +__ZN11OSSerialize14ensureCapacityEj +__ZN11OSSerialize16initWithCapacityEj +__ZN11OSSerialize20previouslySerializedEPK15OSMetaClassBase +__ZN11OSSerialize20setCapacityIncrementEj +__ZN11OSSerialize21_RESERVEDOSSerialize0Ev +__ZN11OSSerialize21_RESERVEDOSSerialize1Ev +__ZN11OSSerialize21_RESERVEDOSSerialize2Ev +__ZN11OSSerialize21_RESERVEDOSSerialize3Ev +__ZN11OSSerialize21_RESERVEDOSSerialize4Ev +__ZN11OSSerialize21_RESERVEDOSSerialize5Ev +__ZN11OSSerialize21_RESERVEDOSSerialize6Ev +__ZN11OSSerialize21_RESERVEDOSSerialize7Ev +__ZN11OSSerialize4freeEv +__ZN11OSSerialize7addCharEc +__ZN11OSSerialize9MetaClassC1Ev +__ZN11OSSerialize9MetaClassC2Ev +__ZN11OSSerialize9addStringEPKc +__ZN11OSSerialize9clearTextEv +__ZN11OSSerialize9metaClassE +__ZN11OSSerializeC1EPK11OSMetaClass +__ZN11OSSerializeC1Ev +__ZN11OSSerializeC2EPK11OSMetaClass +__ZN11OSSerializeC2Ev +__ZN11OSSerializeD0Ev +__ZN11OSSerializeD2Ev +__ZN12IOPMinformee10gMetaClassE +__ZN12IOPMinformee10initializeEP9IOService +__ZN12IOPMinformee10superClassE +__ZN12IOPMinformee4freeEv +__ZN12IOPMinformee9MetaClassC1Ev +__ZN12IOPMinformee9MetaClassC2Ev +__ZN12IOPMinformee9metaClassE +__ZN12IOPMinformeeC1EPK11OSMetaClass +__ZN12IOPMinformeeC1Ev +__ZN12IOPMinformeeC2EPK11OSMetaClass +__ZN12IOPMinformeeC2Ev +__ZN12IOPMinformeeD0Ev +__ZN12IOPMinformeeD2Ev +__ZN12IORootParent10dozeSystemEv +__ZN12IORootParent10gMetaClassE +__ZN12IORootParent10superClassE +__ZN12IORootParent10wakeSystemEv +__ZN12IORootParent11sleepSystemEv +__ZN12IORootParent11sleepToDozeEv +__ZN12IORootParent13restartSystemEv +__ZN12IORootParent14shutDownSystemEv +__ZN12IORootParent5startEP9IOService +__ZN12IORootParent9MetaClassC1Ev +__ZN12IORootParent9MetaClassC2Ev +__ZN12IORootParent9metaClassE +__ZN12IORootParentC1EPK11OSMetaClass +__ZN12IORootParentC1Ev +__ZN12IORootParentC2EPK11OSMetaClass +__ZN12IORootParentC2Ev +__ZN12IORootParentD0Ev +__ZN12IORootParentD2Ev +__ZN12IOUserClient10clientDiedEv +__ZN12IOUserClient10gMetaClassE +__ZN12IOUserClient10getServiceEv +__ZN12IOUserClient10initializeEv +__ZN12IOUserClient10superClassE +__ZN12IOUserClient11clientCloseEv +__ZN12IOUserClient12initWithTaskEP4taskPvm +__ZN12IOUserClient12initWithTaskEP4taskPvmP12OSDictionary +__ZN12IOUserClient13connectClientEPS_ +__ZN12IOUserClient15mapClientMemoryEmP4taskmj +__ZN12IOUserClient15sendAsyncResultEPjiPPvm +__ZN12IOUserClient17setAsyncReferenceEPjP8ipc_portPvS3_ +__ZN12IOUserClient18clientHasPrivilegeEPvPKc +__ZN12IOUserClient19clientMemoryForTypeEmPmPP18IOMemoryDescriptor +__ZN12IOUserClient20exportObjectToClientEP4taskP8OSObjectPS3_ +__ZN12IOUserClient21destroyUserReferencesEP8OSObject +__ZN12IOUserClient22_RESERVEDIOUserClient0Ev +__ZN12IOUserClient22_RESERVEDIOUserClient1Ev +__ZN12IOUserClient22_RESERVEDIOUserClient2Ev +__ZN12IOUserClient22_RESERVEDIOUserClient3Ev +__ZN12IOUserClient22_RESERVEDIOUserClient4Ev +__ZN12IOUserClient22_RESERVEDIOUserClient5Ev +__ZN12IOUserClient22_RESERVEDIOUserClient6Ev +__ZN12IOUserClient22_RESERVEDIOUserClient7Ev +__ZN12IOUserClient22_RESERVEDIOUserClient8Ev +__ZN12IOUserClient22_RESERVEDIOUserClient9Ev +__ZN12IOUserClient23_RESERVEDIOUserClient10Ev +__ZN12IOUserClient23_RESERVEDIOUserClient11Ev +__ZN12IOUserClient23_RESERVEDIOUserClient12Ev +__ZN12IOUserClient23_RESERVEDIOUserClient13Ev +__ZN12IOUserClient23_RESERVEDIOUserClient14Ev +__ZN12IOUserClient23_RESERVEDIOUserClient15Ev +__ZN12IOUserClient23getExternalTrapForIndexEm +__ZN12IOUserClient24getNotificationSemaphoreEmPP9semaphore +__ZN12IOUserClient24getTargetAndTrapForIndexEPP9IOServicem +__ZN12IOUserClient24registerNotificationPortEP8ipc_portmm +__ZN12IOUserClient25getExternalMethodForIndexEm +__ZN12IOUserClient26getTargetAndMethodForIndexEPP9IOServicem +__ZN12IOUserClient30getExternalAsyncMethodForIndexEm +__ZN12IOUserClient31getAsyncTargetAndMethodForIndexEPP9IOServicem +__ZN12IOUserClient4freeEv +__ZN12IOUserClient9MetaClassC1Ev +__ZN12IOUserClient9MetaClassC2Ev +__ZN12IOUserClient9metaClassE +__ZN12IOUserClientC1EPK11OSMetaClass +__ZN12IOUserClientC2EPK11OSMetaClass +__ZN12IOUserClientD0Ev +__ZN12IOUserClientD2Ev +__ZN12KLDBootstrapC1Ev +__ZN12KLDBootstrapC2Ev +__ZN12KLDBootstrapD1Ev +__ZN12KLDBootstrapD2Ev +__ZN12OSCollection10gMetaClassE +__ZN12OSCollection10superClassE +__ZN12OSCollection22_RESERVEDOSCollection0Ev +__ZN12OSCollection22_RESERVEDOSCollection1Ev +__ZN12OSCollection22_RESERVEDOSCollection2Ev +__ZN12OSCollection22_RESERVEDOSCollection3Ev +__ZN12OSCollection22_RESERVEDOSCollection4Ev +__ZN12OSCollection22_RESERVEDOSCollection5Ev +__ZN12OSCollection22_RESERVEDOSCollection6Ev +__ZN12OSCollection22_RESERVEDOSCollection7Ev +__ZN12OSCollection4initEv +__ZN12OSCollection9MetaClassC1Ev +__ZN12OSCollection9MetaClassC2Ev +__ZN12OSCollection9metaClassE +__ZN12OSCollectionC1EPK11OSMetaClass +__ZN12OSCollectionC2EPK11OSMetaClass +__ZN12OSCollectionD0Ev +__ZN12OSCollectionD2Ev +__ZN12OSDictionary10gMetaClassE +__ZN12OSDictionary10superClassE +__ZN12OSDictionary11withObjectsEPPK8OSObjectPPK8OSStringjj +__ZN12OSDictionary11withObjectsEPPK8OSObjectPPK8OSSymboljj +__ZN12OSDictionary12removeObjectEPK8OSString +__ZN12OSDictionary12removeObjectEPK8OSSymbol +__ZN12OSDictionary12removeObjectEPKc +__ZN12OSDictionary12withCapacityEj +__ZN12OSDictionary14ensureCapacityEj +__ZN12OSDictionary14withDictionaryEPKS_j +__ZN12OSDictionary15flushCollectionEv +__ZN12OSDictionary15initWithObjectsEPPK8OSObjectPPK8OSStringjj +__ZN12OSDictionary15initWithObjectsEPPK8OSObjectPPK8OSSymboljj +__ZN12OSDictionary16initWithCapacityEj +__ZN12OSDictionary18initWithDictionaryEPKS_j +__ZN12OSDictionary20setCapacityIncrementEj +__ZN12OSDictionary22_RESERVEDOSDictionary0Ev +__ZN12OSDictionary22_RESERVEDOSDictionary1Ev +__ZN12OSDictionary22_RESERVEDOSDictionary2Ev +__ZN12OSDictionary22_RESERVEDOSDictionary3Ev +__ZN12OSDictionary22_RESERVEDOSDictionary4Ev +__ZN12OSDictionary22_RESERVEDOSDictionary5Ev +__ZN12OSDictionary22_RESERVEDOSDictionary6Ev +__ZN12OSDictionary22_RESERVEDOSDictionary7Ev +__ZN12OSDictionary4freeEv +__ZN12OSDictionary5mergeEPKS_ +__ZN12OSDictionary9MetaClassC1Ev +__ZN12OSDictionary9MetaClassC2Ev +__ZN12OSDictionary9metaClassE +__ZN12OSDictionary9setObjectEPK8OSStringPK15OSMetaClassBase +__ZN12OSDictionary9setObjectEPK8OSSymbolPK15OSMetaClassBase +__ZN12OSDictionary9setObjectEPKcPK15OSMetaClassBase +__ZN12OSDictionaryC1EPK11OSMetaClass +__ZN12OSDictionaryC1Ev +__ZN12OSDictionaryC2EPK11OSMetaClass +__ZN12OSDictionaryC2Ev +__ZN12OSDictionaryD0Ev +__ZN12OSDictionaryD2Ev +__ZN12OSOrderedSet10gMetaClassE +__ZN12OSOrderedSet10superClassE +__ZN12OSOrderedSet11orderObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet12removeObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet12withCapacityEjPFlPK15OSMetaClassBaseS2_PvES3_ +__ZN12OSOrderedSet13setLastObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet14ensureCapacityEj +__ZN12OSOrderedSet14getOrderingRefEv +__ZN12OSOrderedSet14setFirstObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet15flushCollectionEv +__ZN12OSOrderedSet16initWithCapacityEjPFlPK15OSMetaClassBaseS2_PvES3_ +__ZN12OSOrderedSet20setCapacityIncrementEj +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet0Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet1Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet2Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet3Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet4Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet5Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet6Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet7Ev +__ZN12OSOrderedSet4freeEv +__ZN12OSOrderedSet9MetaClassC1Ev +__ZN12OSOrderedSet9MetaClassC2Ev +__ZN12OSOrderedSet9metaClassE +__ZN12OSOrderedSet9setObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet9setObjectEjPK15OSMetaClassBase +__ZN12OSOrderedSetC1EPK11OSMetaClass +__ZN12OSOrderedSetC1Ev +__ZN12OSOrderedSetC2EPK11OSMetaClass +__ZN12OSOrderedSetC2Ev +__ZN12OSOrderedSetD0Ev +__ZN12OSOrderedSetD2Ev +__ZN12OSSerializer10gMetaClassE +__ZN12OSSerializer10superClassE +__ZN12OSSerializer9MetaClassC1Ev +__ZN12OSSerializer9MetaClassC2Ev +__ZN12OSSerializer9forTargetEPvPFbS0_S0_P11OSSerializeES0_ +__ZN12OSSerializer9metaClassE +__ZN12OSSerializerC1EPK11OSMetaClass +__ZN12OSSerializerC1Ev +__ZN12OSSerializerC2EPK11OSMetaClass +__ZN12OSSerializerC2Ev +__ZN12OSSerializerD0Ev +__ZN12OSSerializerD2Ev +__ZN12OSSymbolPool12insertSymbolEP8OSSymbol +__ZN12OSSymbolPool12removeSymbolEP8OSSymbol +__ZN12OSSymbolPool13initHashStateEv +__ZN12OSSymbolPool13nextHashStateEP17OSSymbolPoolState +__ZN12OSSymbolPool18reconstructSymbolsEv +__ZN12OSSymbolPool4initEv +__ZN12OSSymbolPool4log2Ej +__ZN12OSSymbolPool6exp2mlEj +__ZN12OSSymbolPoolC1EPKS_ +__ZN12OSSymbolPoolC2EPKS_ +__ZN12OSSymbolPoolD0Ev +__ZN12OSSymbolPoolD1Ev +__ZN12OSSymbolPoolD2Ev +__ZN12OSSymbolPooldlEPvm +__ZN12OSSymbolPoolnwEm +__ZN12_IOMemoryMap10gMetaClassE +__ZN12_IOMemoryMap10superClassE +__ZN12_IOMemoryMap13getMapOptionsEv +__ZN12_IOMemoryMap14copyCompatibleEP18IOMemoryDescriptorP4taskjmmm +__ZN12_IOMemoryMap14getAddressTaskEv +__ZN12_IOMemoryMap14initCompatibleEP18IOMemoryDescriptorP11IOMemoryMapmm +__ZN12_IOMemoryMap17getVirtualAddressEv +__ZN12_IOMemoryMap18getPhysicalSegmentEmPm +__ZN12_IOMemoryMap18initWithDescriptorEP18IOMemoryDescriptorP4taskjmmm +__ZN12_IOMemoryMap19getMemoryDescriptorEv +__ZN12_IOMemoryMap4freeEv +__ZN12_IOMemoryMap5unmapEv +__ZN12_IOMemoryMap8redirectEP4taskb +__ZN12_IOMemoryMap8taskDiedEv +__ZN12_IOMemoryMap9MetaClassC1Ev +__ZN12_IOMemoryMap9MetaClassC2Ev +__ZN12_IOMemoryMap9getLengthEv +__ZN12_IOMemoryMap9metaClassE +__ZN12_IOMemoryMapC1EPK11OSMetaClass +__ZN12_IOMemoryMapC1Ev +__ZN12_IOMemoryMapC2EPK11OSMetaClass +__ZN12_IOMemoryMapC2Ev +__ZN12_IOMemoryMapD0Ev +__ZN12_IOMemoryMapD2Ev +__ZN13IOCommandGate10gMetaClassE +__ZN13IOCommandGate10runCommandEPvS0_S0_S0_ +__ZN13IOCommandGate10superClassE +__ZN13IOCommandGate11commandGateEP8OSObjectPFiS1_PvS2_S2_S2_E +__ZN13IOCommandGate12checkForWorkEv +__ZN13IOCommandGate12commandSleepEPvm +__ZN13IOCommandGate13attemptActionEPFiP8OSObjectPvS2_S2_S2_ES2_S2_S2_S2_ +__ZN13IOCommandGate13commandWakeupEPvb +__ZN13IOCommandGate14attemptCommandEPvS0_S0_S0_ +__ZN13IOCommandGate23_RESERVEDIOCommandGate0Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate1Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate2Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate3Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate4Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate5Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate6Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate7Ev +__ZN13IOCommandGate4initEP8OSObjectPFiS1_PvS2_S2_S2_E +__ZN13IOCommandGate9MetaClassC1Ev +__ZN13IOCommandGate9MetaClassC2Ev +__ZN13IOCommandGate9metaClassE +__ZN13IOCommandGate9runActionEPFiP8OSObjectPvS2_S2_S2_ES2_S2_S2_S2_ +__ZN13IOCommandGateC1EPK11OSMetaClass +__ZN13IOCommandGateC1Ev +__ZN13IOCommandGateC2EPK11OSMetaClass +__ZN13IOCommandGateC2Ev +__ZN13IOCommandGateD0Ev +__ZN13IOCommandGateD2Ev +__ZN13IOCommandPool10gMetaClassE +__ZN13IOCommandPool10getCommandEb +__ZN13IOCommandPool10superClassE +__ZN13IOCommandPool11commandPoolEP9IOServiceP10IOWorkLoopm +__ZN13IOCommandPool12withWorkLoopEP10IOWorkLoop +__ZN13IOCommandPool13returnCommandEP9IOCommand +__ZN13IOCommandPool15gatedGetCommandEPP9IOCommandb +__ZN13IOCommandPool16initWithWorkLoopEP10IOWorkLoop +__ZN13IOCommandPool18gatedReturnCommandEP9IOCommand +__ZN13IOCommandPool23_RESERVEDIOCommandPool0Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool1Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool2Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool3Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool4Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool5Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool6Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool7Ev +__ZN13IOCommandPool4freeEv +__ZN13IOCommandPool4initEP9IOServiceP10IOWorkLoopm +__ZN13IOCommandPool9MetaClassC1Ev +__ZN13IOCommandPool9MetaClassC2Ev +__ZN13IOCommandPool9metaClassE +__ZN13IOCommandPoolC1EPK11OSMetaClass +__ZN13IOCommandPoolC1Ev +__ZN13IOCommandPoolC2EPK11OSMetaClass +__ZN13IOCommandPoolC2Ev +__ZN13IOCommandPoolD0Ev +__ZN13IOCommandPoolD2Ev +__ZN13IOEventSource10gMetaClassE +__ZN13IOEventSource10superClassE +__ZN13IOEventSource10wakeupGateEPvb +__ZN13IOEventSource11setWorkLoopEP10IOWorkLoop +__ZN13IOEventSource12tryCloseGateEv +__ZN13IOEventSource19signalWorkAvailableEv +__ZN13IOEventSource23_RESERVEDIOEventSource0Ev +__ZN13IOEventSource23_RESERVEDIOEventSource1Ev +__ZN13IOEventSource23_RESERVEDIOEventSource2Ev +__ZN13IOEventSource23_RESERVEDIOEventSource3Ev +__ZN13IOEventSource23_RESERVEDIOEventSource4Ev +__ZN13IOEventSource23_RESERVEDIOEventSource5Ev +__ZN13IOEventSource23_RESERVEDIOEventSource6Ev +__ZN13IOEventSource23_RESERVEDIOEventSource7Ev +__ZN13IOEventSource4initEP8OSObjectPFvS1_zE +__ZN13IOEventSource6enableEv +__ZN13IOEventSource7disableEv +__ZN13IOEventSource7setNextEPS_ +__ZN13IOEventSource8openGateEv +__ZN13IOEventSource9MetaClassC1Ev +__ZN13IOEventSource9MetaClassC2Ev +__ZN13IOEventSource9closeGateEv +__ZN13IOEventSource9metaClassE +__ZN13IOEventSource9setActionEPFvP8OSObjectzE +__ZN13IOEventSource9sleepGateEPvm +__ZN13IOEventSourceC1EPK11OSMetaClass +__ZN13IOEventSourceC2EPK11OSMetaClass +__ZN13IOEventSourceD0Ev +__ZN13IOEventSourceD2Ev +__ZN13_IOServiceJob10gMetaClassE +__ZN13_IOServiceJob10pingConfigEPS_ +__ZN13_IOServiceJob10superClassE +__ZN13_IOServiceJob8startJobEP9IOServiceim +__ZN13_IOServiceJob9MetaClassC1Ev +__ZN13_IOServiceJob9MetaClassC2Ev +__ZN13_IOServiceJob9metaClassE +__ZN13_IOServiceJobC1EPK11OSMetaClass +__ZN13_IOServiceJobC1Ev +__ZN13_IOServiceJobC2EPK11OSMetaClass +__ZN13_IOServiceJobC2Ev +__ZN13_IOServiceJobD0Ev +__ZN13_IOServiceJobD2Ev +__ZN14IOCommandQueue10gMetaClassE +__ZN14IOCommandQueue10superClassE +__ZN14IOCommandQueue12checkForWorkEv +__ZN14IOCommandQueue12commandQueueEP8OSObjectPFvS1_PvS2_S2_S2_Ei +__ZN14IOCommandQueue14enqueueCommandEbPvS0_S0_S0_ +__ZN14IOCommandQueue15performAndFlushEP8OSObjectPFvS1_PvS2_S2_S2_E +__ZN14IOCommandQueue4freeEv +__ZN14IOCommandQueue4initEP8OSObjectPFvS1_PvS2_S2_S2_Ei +__ZN14IOCommandQueue9MetaClassC1Ev +__ZN14IOCommandQueue9MetaClassC2Ev +__ZN14IOCommandQueue9metaClassE +__ZN14IOCommandQueueC1EPK11OSMetaClass +__ZN14IOCommandQueueC1Ev +__ZN14IOCommandQueueC2EPK11OSMetaClass +__ZN14IOCommandQueueC2Ev +__ZN14IOCommandQueueD0Ev +__ZN14IOCommandQueueD2Ev +__ZN14IODeviceMemory12withSubRangeEPS_mm +__ZN14IODeviceMemory13arrayFromListEPNS_11InitElementEm +__ZN14IODeviceMemory9withRangeEmm +__ZN14IOMemoryCursor10gMetaClassE +__ZN14IOMemoryCursor10superClassE +__ZN14IOMemoryCursor17withSpecificationEPFvNS_15PhysicalSegmentEPvmEmmm +__ZN14IOMemoryCursor19genPhysicalSegmentsEP18IOMemoryDescriptormPvmmPm +__ZN14IOMemoryCursor21initWithSpecificationEPFvNS_15PhysicalSegmentEPvmEmmm +__ZN14IOMemoryCursor9MetaClassC1Ev +__ZN14IOMemoryCursor9MetaClassC2Ev +__ZN14IOMemoryCursor9metaClassE +__ZN14IOMemoryCursorC1EPK11OSMetaClass +__ZN14IOMemoryCursorC1Ev +__ZN14IOMemoryCursorC2EPK11OSMetaClass +__ZN14IOMemoryCursorC2Ev +__ZN14IOMemoryCursorD0Ev +__ZN14IOMemoryCursorD2Ev +__ZN14IOPMrootDomain10gMetaClassE +__ZN14IOPMrootDomain10superClassE +__ZN14IOPMrootDomain10youAreRootEv +__ZN14IOPMrootDomain11sleepSystemEv +__ZN14IOPMrootDomain12broadcast_itEmm +__ZN14IOPMrootDomain12tellChangeUpEm +__ZN14IOPMrootDomain12unIdleDeviceEP9IOServicem +__ZN14IOPMrootDomain12wakeFromDozeEv +__ZN14IOPMrootDomain13askChangeDownEm +__ZN14IOPMrootDomain13restartSystemEv +__ZN14IOPMrootDomain13setPropertiesEP8OSObject +__ZN14IOPMrootDomain14publishFeatureEPKc +__ZN14IOPMrootDomain14shutdownSystemEv +__ZN14IOPMrootDomain14tellChangeDownEm +__ZN14IOPMrootDomain15powerChangeDoneEm +__ZN14IOPMrootDomain15reportUserInputEv +__ZN14IOPMrootDomain16adjustPowerStateEv +__ZN14IOPMrootDomain16command_receivedEPvS0_S0_S0_ +__ZN14IOPMrootDomain16tellNoChangeDownEm +__ZN14IOPMrootDomain17getSleepSupportedEv +__ZN14IOPMrootDomain17setAggressivenessEmm +__ZN14IOPMrootDomain17setSleepSupportedEm +__ZN14IOPMrootDomain18changePowerStateToEm +__ZN14IOPMrootDomain19sysPowerDownHandlerEPvS0_mP9IOServiceS0_j +__ZN14IOPMrootDomain22changePowerStateToPrivEm +__ZN14IOPMrootDomain23requestPowerDomainStateEmP17IOPowerConnectionm +__ZN14IOPMrootDomain23setQuickSpinDownTimeoutEv +__ZN14IOPMrootDomain24displayWranglerPublishedEPvS0_P9IOService +__ZN14IOPMrootDomain24receivePowerNotificationEm +__ZN14IOPMrootDomain25announcePowerSourceChangeEv +__ZN14IOPMrootDomain26handleSleepTimerExpirationEv +__ZN14IOPMrootDomain26restoreUserSpinDownTimeoutEv +__ZN14IOPMrootDomain27displayWranglerNotificationEPvS0_mP9IOServiceS0_j +__ZN14IOPMrootDomain39stopIgnoringClamshellEventsDuringWakeupEv +__ZN14IOPMrootDomain5startEP9IOService +__ZN14IOPMrootDomain9MetaClassC1Ev +__ZN14IOPMrootDomain9MetaClassC2Ev +__ZN14IOPMrootDomain9constructEv +__ZN14IOPMrootDomain9metaClassE +__ZN14IOPMrootDomainC1EPK11OSMetaClass +__ZN14IOPMrootDomainC1Ev +__ZN14IOPMrootDomainC2EPK11OSMetaClass +__ZN14IOPMrootDomainC2Ev +__ZN14IOPMrootDomainD0Ev +__ZN14IOPMrootDomainD2Ev +__ZN15IOConditionLock10gMetaClassE +__ZN15IOConditionLock10superClassE +__ZN15IOConditionLock10unlockWithEi +__ZN15IOConditionLock12setConditionEi +__ZN15IOConditionLock13withConditionEib +__ZN15IOConditionLock17initWithConditionEib +__ZN15IOConditionLock4freeEv +__ZN15IOConditionLock4lockEv +__ZN15IOConditionLock6unlockEv +__ZN15IOConditionLock7tryLockEv +__ZN15IOConditionLock8lockWhenEi +__ZN15IOConditionLock9MetaClassC1Ev +__ZN15IOConditionLock9MetaClassC2Ev +__ZN15IOConditionLock9metaClassE +__ZN15IOConditionLockC1EPK11OSMetaClass +__ZN15IOConditionLockC1Ev +__ZN15IOConditionLockC2EPK11OSMetaClass +__ZN15IOConditionLockC2Ev +__ZN15IOConditionLockD0Ev +__ZN15IOConditionLockD2Ev +__ZN15IOPMPowerSource10gMetaClassE +__ZN15IOPMPowerSource10isChargingEv +__ZN15IOPMPowerSource10superClassE +__ZN15IOPMPowerSource11acConnectedEv +__ZN15IOPMPowerSource11atWarnLevelEv +__ZN15IOPMPowerSource11curCapacityEv +__ZN15IOPMPowerSource11isInstalledEv +__ZN15IOPMPowerSource11maxCapacityEv +__ZN15IOPMPowerSource12currentDrawnEv +__ZN15IOPMPowerSource12updateStatusEv +__ZN15IOPMPowerSource13timeRemainingEv +__ZN15IOPMPowerSource24capacityPercentRemainingEv +__ZN15IOPMPowerSource4initEt +__ZN15IOPMPowerSource7voltageEv +__ZN15IOPMPowerSource8depletedEv +__ZN15IOPMPowerSource9MetaClassC1Ev +__ZN15IOPMPowerSource9MetaClassC2Ev +__ZN15IOPMPowerSource9metaClassE +__ZN15IOPMPowerSourceC1EPK11OSMetaClass +__ZN15IOPMPowerSourceC1Ev +__ZN15IOPMPowerSourceC2EPK11OSMetaClass +__ZN15IOPMPowerSourceC2Ev +__ZN15IOPMPowerSourceD0Ev +__ZN15IOPMPowerSourceD2Ev +__ZN15IOPanicPlatform10gMetaClassE +__ZN15IOPanicPlatform10superClassE +__ZN15IOPanicPlatform5startEP9IOService +__ZN15IOPanicPlatform9MetaClassC1Ev +__ZN15IOPanicPlatform9MetaClassC2Ev +__ZN15IOPanicPlatform9metaClassE +__ZN15IOPanicPlatformC1EPK11OSMetaClass +__ZN15IOPanicPlatformC1Ev +__ZN15IOPanicPlatformC2EPK11OSMetaClass +__ZN15IOPanicPlatformC2Ev +__ZN15IOPanicPlatformD0Ev +__ZN15IOPanicPlatformD2Ev +__ZN15IORegistryEntry10gMetaClassE +__ZN15IORegistryEntry10initializeEv +__ZN15IORegistryEntry10superClassE +__ZN15IORegistryEntry11dealiasPathEPPKcPK15IORegistryPlane +__ZN15IORegistryEntry11detachAboveEPK15IORegistryPlane +__ZN15IORegistryEntry11setLocationEPK8OSSymbolPK15IORegistryPlane +__ZN15IORegistryEntry11setLocationEPKcPK15IORegistryPlane +__ZN15IORegistryEntry11setPropertyEPK8OSStringP8OSObject +__ZN15IORegistryEntry11setPropertyEPK8OSSymbolP8OSObject +__ZN15IORegistryEntry11setPropertyEPKcP8OSObject +__ZN15IORegistryEntry11setPropertyEPKcPvj +__ZN15IORegistryEntry11setPropertyEPKcS1_ +__ZN15IORegistryEntry11setPropertyEPKcb +__ZN15IORegistryEntry11setPropertyEPKcyj +__ZN15IORegistryEntry13attachToChildEPS_PK15IORegistryPlane +__ZN15IORegistryEntry13childFromPathEPKcPK15IORegistryPlanePcPi +__ZN15IORegistryEntry13setPropertiesEP8OSObject +__ZN15IORegistryEntry14attachToParentEPS_PK15IORegistryPlane +__ZN15IORegistryEntry14removePropertyEPK8OSString +__ZN15IORegistryEntry14removePropertyEPK8OSSymbol +__ZN15IORegistryEntry14removePropertyEPKc +__ZN15IORegistryEntry15detachFromChildEPS_PK15IORegistryPlane +__ZN15IORegistryEntry15getRegistryRootEv +__ZN15IORegistryEntry16detachFromParentEPS_PK15IORegistryPlane +__ZN15IORegistryEntry16setPropertyTableEP12OSDictionary +__ZN15IORegistryEntry17matchPathLocationEPKcPK15IORegistryPlane +__ZN15IORegistryEntry18getGenerationCountEv +__ZN15IORegistryEntry21getChildFromComponentEPPKcPK15IORegistryPlane +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry5Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry6Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry7Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry8Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry9Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry10Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry11Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry12Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry13Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry14Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry15Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry16Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry17Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry18Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry19Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry20Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry21Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry22Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry23Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry24Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry25Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry26Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry27Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry28Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry29Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry30Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry31Ev +__ZN15IORegistryEntry4freeEv +__ZN15IORegistryEntry4initEP12OSDictionary +__ZN15IORegistryEntry4initEPS_PK15IORegistryPlane +__ZN15IORegistryEntry7setNameEPK8OSSymbolPK15IORegistryPlane +__ZN15IORegistryEntry7setNameEPKcPK15IORegistryPlane +__ZN15IORegistryEntry8fromPathEPKcPK15IORegistryPlanePcPiPS_ +__ZN15IORegistryEntry8getPlaneEPKc +__ZN15IORegistryEntry9MetaClassC1Ev +__ZN15IORegistryEntry9MetaClassC2Ev +__ZN15IORegistryEntry9detachAllEPK15IORegistryPlane +__ZN15IORegistryEntry9makePlaneEPKc +__ZN15IORegistryEntry9metaClassE +__ZN15IORegistryEntryC1EPK11OSMetaClass +__ZN15IORegistryEntryC1Ev +__ZN15IORegistryEntryC2EPK11OSMetaClass +__ZN15IORegistryEntryC2Ev +__ZN15IORegistryEntryD0Ev +__ZN15IORegistryEntryD2Ev +__ZN15IORegistryPlane10gMetaClassE +__ZN15IORegistryPlane10superClassE +__ZN15IORegistryPlane9MetaClassC1Ev +__ZN15IORegistryPlane9MetaClassC2Ev +__ZN15IORegistryPlane9metaClassE +__ZN15IORegistryPlaneC1EPK11OSMetaClass +__ZN15IORegistryPlaneC1Ev +__ZN15IORegistryPlaneC2EPK11OSMetaClass +__ZN15IORegistryPlaneC2Ev +__ZN15IORegistryPlaneD0Ev +__ZN15IORegistryPlaneD2Ev +__ZN15IOWatchDogTimer10gMetaClassE +__ZN15IOWatchDogTimer10superClassE +__ZN15IOWatchDogTimer13setPropertiesEP8OSObject +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer0Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer1Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer2Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer3Ev +__ZN15IOWatchDogTimer4stopEP9IOService +__ZN15IOWatchDogTimer5startEP9IOService +__ZN15IOWatchDogTimer9MetaClassC1Ev +__ZN15IOWatchDogTimer9MetaClassC2Ev +__ZN15IOWatchDogTimer9metaClassE +__ZN15IOWatchDogTimerC1EPK11OSMetaClass +__ZN15IOWatchDogTimerC2EPK11OSMetaClass +__ZN15IOWatchDogTimerD0Ev +__ZN15IOWatchDogTimerD2Ev +__ZN15OSMetaClassBase12safeMetaCastEPKS_PK11OSMetaClass +__ZN15OSMetaClassBase13checkTypeInstEPKS_S1_ +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase3Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase4Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase5Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase6Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase7Ev +__ZN15OSMetaClassBaseC1Ev +__ZN15OSMetaClassBaseC2Ev +__ZN15OSMetaClassBaseD0Ev +__ZN15OSMetaClassBaseD2Ev +__ZN15OSMetaClassMetaC1Ev +__ZN15OSMetaClassMetaC2Ev +__ZN15_IOConfigThread10gMetaClassE +__ZN15_IOConfigThread10superClassE +__ZN15_IOConfigThread12configThreadEv +__ZN15_IOConfigThread4freeEv +__ZN15_IOConfigThread4mainEPS_ +__ZN15_IOConfigThread9MetaClassC1Ev +__ZN15_IOConfigThread9MetaClassC2Ev +__ZN15_IOConfigThread9metaClassE +__ZN15_IOConfigThreadC1EPK11OSMetaClass +__ZN15_IOConfigThreadC1Ev +__ZN15_IOConfigThreadC2EPK11OSMetaClass +__ZN15_IOConfigThreadC2Ev +__ZN15_IOConfigThreadD0Ev +__ZN15_IOConfigThreadD2Ev +__ZN16IOKitDiagnostics10gMetaClassE +__ZN16IOKitDiagnostics10superClassE +__ZN16IOKitDiagnostics11diagnosticsEv +__ZN16IOKitDiagnostics12updateOffsetEP12OSDictionarymPKc +__ZN16IOKitDiagnostics9MetaClassC1Ev +__ZN16IOKitDiagnostics9MetaClassC2Ev +__ZN16IOKitDiagnostics9metaClassE +__ZN16IOKitDiagnosticsC1EPK11OSMetaClass +__ZN16IOKitDiagnosticsC1Ev +__ZN16IOKitDiagnosticsC2EPK11OSMetaClass +__ZN16IOKitDiagnosticsC2Ev +__ZN16IOKitDiagnosticsD0Ev +__ZN16IOKitDiagnosticsD2Ev +__ZN16IOPMPagingPlexus10gMetaClassE +__ZN16IOPMPagingPlexus10superClassE +__ZN16IOPMPagingPlexus12findProviderEP9IOService +__ZN16IOPMPagingPlexus15processChildrenEv +__ZN16IOPMPagingPlexus15processSiblingsEP9IOService +__ZN16IOPMPagingPlexus17setAggressivenessEmm +__ZN16IOPMPagingPlexus5startEP9IOService +__ZN16IOPMPagingPlexus9MetaClassC1Ev +__ZN16IOPMPagingPlexus9MetaClassC2Ev +__ZN16IOPMPagingPlexus9metaClassE +__ZN16IOPMPagingPlexusC1EPK11OSMetaClass +__ZN16IOPMPagingPlexusC1Ev +__ZN16IOPMPagingPlexusC2EPK11OSMetaClass +__ZN16IOPMPagingPlexusC2Ev +__ZN16IOPMPagingPlexusD0Ev +__ZN16IOPMPagingPlexusD2Ev +__ZN16IOPMinformeeList10gMetaClassE +__ZN16IOPMinformeeList10initializeEv +__ZN16IOPMinformeeList10nextInListEP12IOPMinformee +__ZN16IOPMinformeeList10superClassE +__ZN16IOPMinformeeList11firstInListEv +__ZN16IOPMinformeeList13numberOfItemsEv +__ZN16IOPMinformeeList14removeFromListEP9IOService +__ZN16IOPMinformeeList4freeEv +__ZN16IOPMinformeeList8findItemEP9IOService +__ZN16IOPMinformeeList9MetaClassC1Ev +__ZN16IOPMinformeeList9MetaClassC2Ev +__ZN16IOPMinformeeList9addToListEP12IOPMinformee +__ZN16IOPMinformeeList9metaClassE +__ZN16IOPMinformeeListC1EPK11OSMetaClass +__ZN16IOPMinformeeListC1Ev +__ZN16IOPMinformeeListC2EPK11OSMetaClass +__ZN16IOPMinformeeListC2Ev +__ZN16IOPMinformeeListD0Ev +__ZN16IOPMinformeeListD2Ev +__ZN16IOPlatformDevice10gMetaClassE +__ZN16IOPlatformDevice10superClassE +__ZN16IOPlatformDevice12getResourcesEv +__ZN16IOPlatformDevice13matchLocationEP9IOService +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice0Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice1Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice2Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice3Ev +__ZN16IOPlatformDevice9MetaClassC1Ev +__ZN16IOPlatformDevice9MetaClassC2Ev +__ZN16IOPlatformDevice9metaClassE +__ZN16IOPlatformDeviceC1EPK11OSMetaClass +__ZN16IOPlatformDeviceC1Ev +__ZN16IOPlatformDeviceC2EPK11OSMetaClass +__ZN16IOPlatformDeviceC2Ev +__ZN16IOPlatformDeviceD0Ev +__ZN16IOPlatformDeviceD2Ev +__ZN16IOPlatformExpert10gMetaClassE +__ZN16IOPlatformExpert10superClassE +__ZN16IOPlatformExpert11haltRestartEj +__ZN16IOPlatformExpert11sleepKernelEv +__ZN16IOPlatformExpert12CheckSubTreeEP7OSArrayP9IOServiceS3_P12OSDictionary +__ZN16IOPlatformExpert12getModelNameEPci +__ZN16IOPlatformExpert12hasPMFeatureEm +__ZN16IOPlatformExpert13savePanicInfoEPhm +__ZN16IOPlatformExpert14getBootROMTypeEv +__ZN16IOPlatformExpert14getChipSetTypeEv +__ZN16IOPlatformExpert14getConsoleInfoEP8PE_Video +__ZN16IOPlatformExpert14getMachineNameEPci +__ZN16IOPlatformExpert14getMachineTypeEv +__ZN16IOPlatformExpert14setBootROMTypeEl +__ZN16IOPlatformExpert14setChipSetTypeEl +__ZN16IOPlatformExpert14setConsoleInfoEP8PE_Videoj +__ZN16IOPlatformExpert14setMachineTypeEl +__ZN16IOPlatformExpert15getGMTTimeOfDayEv +__ZN16IOPlatformExpert15getNubResourcesEP9IOService +__ZN16IOPlatformExpert15setGMTTimeOfDayEl +__ZN16IOPlatformExpert16PMRegisterDeviceEP9IOServiceS1_ +__ZN16IOPlatformExpert16atInterruptLevelEv +__ZN16IOPlatformExpert16hasPrivPMFeatureEm +__ZN16IOPlatformExpert20callPlatformFunctionEPK8OSSymbolbPvS3_S3_S3_ +__ZN16IOPlatformExpert21RegisterServiceInTreeEP9IOServiceP12OSDictionaryS3_S1_ +__ZN16IOPlatformExpert21numBatteriesSupportedEv +__ZN16IOPlatformExpert21platformAdjustServiceEP9IOService +__ZN16IOPlatformExpert23registerNVRAMControllerEP17IONVRAMController +__ZN16IOPlatformExpert25PMInstantiatePowerDomainsEv +__ZN16IOPlatformExpert25getPhysicalRangeAllocatorEv +__ZN16IOPlatformExpert25lookUpInterruptControllerEP8OSSymbol +__ZN16IOPlatformExpert25setCPUInterruptPropertiesEP9IOService +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert2Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert3Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert4Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert5Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert6Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert7Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert8Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert9Ev +__ZN16IOPlatformExpert27_RESERVEDIOPlatformExpert10Ev +__ZN16IOPlatformExpert27_RESERVEDIOPlatformExpert11Ev +__ZN16IOPlatformExpert27registerInterruptControllerEP8OSSymbolP21IOInterruptController +__ZN16IOPlatformExpert30createSystemSerialNumberStringEP6OSData +__ZN16IOPlatformExpert5PMLogEPKcmmm +__ZN16IOPlatformExpert5startEP9IOService +__ZN16IOPlatformExpert6attachEP9IOService +__ZN16IOPlatformExpert9MetaClassC1Ev +__ZN16IOPlatformExpert9MetaClassC2Ev +__ZN16IOPlatformExpert9configureEP9IOService +__ZN16IOPlatformExpert9createNubEP12OSDictionary +__ZN16IOPlatformExpert9metaClassE +__ZN16IOPlatformExpertC1EPK11OSMetaClass +__ZN16IOPlatformExpertC1Ev +__ZN16IOPlatformExpertC2EPK11OSMetaClass +__ZN16IOPlatformExpertC2Ev +__ZN16IOPlatformExpertD0Ev +__ZN16IOPlatformExpertD2Ev +__ZN16IORangeAllocator10deallocateEmm +__ZN16IORangeAllocator10gMetaClassE +__ZN16IORangeAllocator10superClassE +__ZN16IORangeAllocator12allocElementEm +__ZN16IORangeAllocator12getFreeCountEv +__ZN16IORangeAllocator13allocateRangeEmm +__ZN16IORangeAllocator14deallocElementEm +__ZN16IORangeAllocator16getFragmentCountEv +__ZN16IORangeAllocator19getFragmentCapacityEv +__ZN16IORangeAllocator28setFragmentCapacityIncrementEm +__ZN16IORangeAllocator4freeEv +__ZN16IORangeAllocator4initEmmmm +__ZN16IORangeAllocator8allocateEmPmm +__ZN16IORangeAllocator9MetaClassC1Ev +__ZN16IORangeAllocator9MetaClassC2Ev +__ZN16IORangeAllocator9metaClassE +__ZN16IORangeAllocator9withRangeEmmmm +__ZN16IORangeAllocatorC1EPK11OSMetaClass +__ZN16IORangeAllocatorC1Ev +__ZN16IORangeAllocatorC2EPK11OSMetaClass +__ZN16IORangeAllocatorC2Ev +__ZN16IORangeAllocatorD0Ev +__ZN16IORangeAllocatorD2Ev +__ZN17IOBigMemoryCursor10gMetaClassE +__ZN17IOBigMemoryCursor10superClassE +__ZN17IOBigMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvm +__ZN17IOBigMemoryCursor17withSpecificationEmmm +__ZN17IOBigMemoryCursor21initWithSpecificationEmmm +__ZN17IOBigMemoryCursor9MetaClassC1Ev +__ZN17IOBigMemoryCursor9MetaClassC2Ev +__ZN17IOBigMemoryCursor9metaClassE +__ZN17IOBigMemoryCursorC1EPK11OSMetaClass +__ZN17IOBigMemoryCursorC1Ev +__ZN17IOBigMemoryCursorC2EPK11OSMetaClass +__ZN17IOBigMemoryCursorC2Ev +__ZN17IOBigMemoryCursorD0Ev +__ZN17IOBigMemoryCursorD2Ev +__ZN17IOPowerConnection10gMetaClassE +__ZN17IOPowerConnection10superClassE +__ZN17IOPowerConnection14getAwaitingAckEv +__ZN17IOPowerConnection14setAwaitingAckEb +__ZN17IOPowerConnection16parentKnowsStateEv +__ZN17IOPowerConnection19setParentKnowsStateEb +__ZN17IOPowerConnection21getDesiredDomainStateEv +__ZN17IOPowerConnection21setDesiredDomainStateEm +__ZN17IOPowerConnection22childHasRequestedPowerEv +__ZN17IOPowerConnection23getPreventIdleSleepFlagEv +__ZN17IOPowerConnection23parentCurrentPowerFlagsEv +__ZN17IOPowerConnection23setPreventIdleSleepFlagEm +__ZN17IOPowerConnection25getPreventSystemSleepFlagEv +__ZN17IOPowerConnection25setChildHasRequestedPowerEv +__ZN17IOPowerConnection25setPreventSystemSleepFlagEm +__ZN17IOPowerConnection26setParentCurrentPowerFlagsEm +__ZN17IOPowerConnection9MetaClassC1Ev +__ZN17IOPowerConnection9MetaClassC2Ev +__ZN17IOPowerConnection9metaClassE +__ZN17IOPowerConnectionC1EPK11OSMetaClass +__ZN17IOPowerConnectionC1Ev +__ZN17IOPowerConnectionC2EPK11OSMetaClass +__ZN17IOPowerConnectionC2Ev +__ZN17IOPowerConnectionD0Ev +__ZN17IOPowerConnectionD2Ev +__ZN18IODTPlatformExpert10createNubsEP9IOServiceP10OSIterator +__ZN18IODTPlatformExpert10gMetaClassE +__ZN18IODTPlatformExpert10superClassE +__ZN18IODTPlatformExpert10writeXPRAMEmPhm +__ZN18IODTPlatformExpert11haltRestartEj +__ZN18IODTPlatformExpert12getModelNameEPci +__ZN18IODTPlatformExpert13savePanicInfoEPhm +__ZN18IODTPlatformExpert14getMachineNameEPci +__ZN18IODTPlatformExpert15getNubResourcesEP9IOService +__ZN18IODTPlatformExpert15processTopLevelEP15IORegistryEntry +__ZN18IODTPlatformExpert17readNVRAMPropertyEP15IORegistryEntryPPK8OSSymbolPP6OSData +__ZN18IODTPlatformExpert18getNVRAMPartitionsEv +__ZN18IODTPlatformExpert18readNVRAMPartitionEPK8OSSymbolmPhm +__ZN18IODTPlatformExpert18writeNVRAMPropertyEP15IORegistryEntryPK8OSSymbolP6OSData +__ZN18IODTPlatformExpert19writeNVRAMPartitionEPK8OSSymbolmPhm +__ZN18IODTPlatformExpert23registerNVRAMControllerEP17IONVRAMController +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert0Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert1Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert2Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert3Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert4Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert5Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert6Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert7Ev +__ZN18IODTPlatformExpert30createSystemSerialNumberStringEP6OSData +__ZN18IODTPlatformExpert5probeEP9IOServicePl +__ZN18IODTPlatformExpert9MetaClassC1Ev +__ZN18IODTPlatformExpert9MetaClassC2Ev +__ZN18IODTPlatformExpert9configureEP9IOService +__ZN18IODTPlatformExpert9createNubEP15IORegistryEntry +__ZN18IODTPlatformExpert9metaClassE +__ZN18IODTPlatformExpert9readXPRAMEmPhm +__ZN18IODTPlatformExpertC1EPK11OSMetaClass +__ZN18IODTPlatformExpertC2EPK11OSMetaClass +__ZN18IODTPlatformExpertD0Ev +__ZN18IODTPlatformExpertD2Ev +__ZN18IOMemoryDescriptor10addMappingEP11IOMemoryMap +__ZN18IOMemoryDescriptor10gMetaClassE +__ZN18IOMemoryDescriptor10initializeEv +__ZN18IOMemoryDescriptor10setMappingEP4taskjm +__ZN18IOMemoryDescriptor10superClassE +__ZN18IOMemoryDescriptor10withRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN18IOMemoryDescriptor10writeBytesEmPKvm +__ZN18IOMemoryDescriptor11handleFaultEPvP6vm_mapjmmm +__ZN18IOMemoryDescriptor11makeMappingEPS_P4taskjmmm +__ZN18IOMemoryDescriptor11withAddressEPvm11IODirection +__ZN18IOMemoryDescriptor11withAddressEjm11IODirectionP4task +__ZN18IOMemoryDescriptor11withOptionsEPvmmP4taskmP8IOMapper +__ZN18IOMemoryDescriptor12withSubRangeEPS_mm11IODirection +__ZN18IOMemoryDescriptor13removeMappingEP11IOMemoryMap +__ZN18IOMemoryDescriptor15initWithOptionsEPvmmP4taskmP8IOMapper +__ZN18IOMemoryDescriptor16getSourceSegmentEmPm +__ZN18IOMemoryDescriptor18getPhysicalAddressEv +__ZN18IOMemoryDescriptor18withPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN18IOMemoryDescriptor19withPhysicalAddressEmm11IODirection +__ZN18IOMemoryDescriptor20getPhysicalSegment64EmPm +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor3Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor4Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor5Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor6Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor7Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor8Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor9Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor10Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor11Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor12Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor13Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor14Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor15Ev +__ZN18IOMemoryDescriptor3mapEP4taskjmmm +__ZN18IOMemoryDescriptor3mapEm +__ZN18IOMemoryDescriptor4freeEv +__ZN18IOMemoryDescriptor5doMapEP6vm_mapPjmmm +__ZN18IOMemoryDescriptor6getTagEv +__ZN18IOMemoryDescriptor6setTagEm +__ZN18IOMemoryDescriptor7doUnmapEP6vm_mapjm +__ZN18IOMemoryDescriptor8redirectEP4taskb +__ZN18IOMemoryDescriptor9MetaClassC1Ev +__ZN18IOMemoryDescriptor9MetaClassC2Ev +__ZN18IOMemoryDescriptor9metaClassE +__ZN18IOMemoryDescriptor9readBytesEmPvm +__ZN18IOMemoryDescriptorC1EPK11OSMetaClass +__ZN18IOMemoryDescriptorC2EPK11OSMetaClass +__ZN18IOMemoryDescriptorD0Ev +__ZN18IOMemoryDescriptorD2Ev +__ZN18IOPMchangeNoteList10gMetaClassE +__ZN18IOPMchangeNoteList10initializeEv +__ZN18IOPMchangeNoteList10superClassE +__ZN18IOPMchangeNoteList12latestChangeEv +__ZN18IOPMchangeNoteList13currentChangeEv +__ZN18IOPMchangeNoteList14nextChangeNoteEm +__ZN18IOPMchangeNoteList15changeNoteInUseEm +__ZN18IOPMchangeNoteList16createChangeNoteEv +__ZN18IOPMchangeNoteList18previousChangeNoteEm +__ZN18IOPMchangeNoteList21releaseHeadChangeNoteEv +__ZN18IOPMchangeNoteList21releaseTailChangeNoteEv +__ZN18IOPMchangeNoteList9MetaClassC1Ev +__ZN18IOPMchangeNoteList9MetaClassC2Ev +__ZN18IOPMchangeNoteList9decrementEm +__ZN18IOPMchangeNoteList9incrementEm +__ZN18IOPMchangeNoteList9listEmptyEv +__ZN18IOPMchangeNoteList9metaClassE +__ZN18IOPMchangeNoteListC1EPK11OSMetaClass +__ZN18IOPMchangeNoteListC1Ev +__ZN18IOPMchangeNoteListC2EPK11OSMetaClass +__ZN18IOPMchangeNoteListC2Ev +__ZN18IOPMchangeNoteListD0Ev +__ZN18IOPMchangeNoteListD2Ev +__ZN18IORegistryIterator10enterEntryEPK15IORegistryPlane +__ZN18IORegistryIterator10enterEntryEv +__ZN18IORegistryIterator10gMetaClassE +__ZN18IORegistryIterator10iterateAllEv +__ZN18IORegistryIterator10superClassE +__ZN18IORegistryIterator11iterateOverEP15IORegistryEntryPK15IORegistryPlanem +__ZN18IORegistryIterator11iterateOverEPK15IORegistryPlanem +__ZN18IORegistryIterator13getNextObjectEv +__ZN18IORegistryIterator15getCurrentEntryEv +__ZN18IORegistryIterator17getNextObjectFlatEv +__ZN18IORegistryIterator22getNextObjectRecursiveEv +__ZN18IORegistryIterator4freeEv +__ZN18IORegistryIterator5resetEv +__ZN18IORegistryIterator7isValidEv +__ZN18IORegistryIterator9MetaClassC1Ev +__ZN18IORegistryIterator9MetaClassC2Ev +__ZN18IORegistryIterator9exitEntryEv +__ZN18IORegistryIterator9metaClassE +__ZN18IORegistryIteratorC1EPK11OSMetaClass +__ZN18IORegistryIteratorC1Ev +__ZN18IORegistryIteratorC2EPK11OSMetaClass +__ZN18IORegistryIteratorC2Ev +__ZN18IORegistryIteratorD0Ev +__ZN18IORegistryIteratorD2Ev +__ZN18IOTimerEventSource10gMetaClassE +__ZN18IOTimerEventSource10setTimeoutE12UnsignedWide +__ZN18IOTimerEventSource10setTimeoutE13mach_timespec +__ZN18IOTimerEventSource10setTimeoutEmm +__ZN18IOTimerEventSource10superClassE +__ZN18IOTimerEventSource10wakeAtTimeE12UnsignedWide +__ZN18IOTimerEventSource10wakeAtTimeE13mach_timespec +__ZN18IOTimerEventSource10wakeAtTimeEmm +__ZN18IOTimerEventSource12checkForWorkEv +__ZN18IOTimerEventSource12setTimeoutMSEm +__ZN18IOTimerEventSource12setTimeoutUSEm +__ZN18IOTimerEventSource12wakeAtTimeMSEm +__ZN18IOTimerEventSource12wakeAtTimeUSEm +__ZN18IOTimerEventSource13cancelTimeoutEv +__ZN18IOTimerEventSource14setTimeoutFuncEv +__ZN18IOTimerEventSource15setTimeoutTicksEm +__ZN18IOTimerEventSource15wakeAtTimeTicksEm +__ZN18IOTimerEventSource16timerEventSourceEP8OSObjectPFvS1_PS_E +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource0Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource1Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource2Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource3Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource4Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource5Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource6Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource7Ev +__ZN18IOTimerEventSource4freeEv +__ZN18IOTimerEventSource4initEP8OSObjectPFvS1_PS_E +__ZN18IOTimerEventSource6enableEv +__ZN18IOTimerEventSource7disableEv +__ZN18IOTimerEventSource7timeoutEPv +__ZN18IOTimerEventSource9MetaClassC1Ev +__ZN18IOTimerEventSource9MetaClassC2Ev +__ZN18IOTimerEventSource9metaClassE +__ZN18IOTimerEventSourceC1EPK11OSMetaClass +__ZN18IOTimerEventSourceC1Ev +__ZN18IOTimerEventSourceC2EPK11OSMetaClass +__ZN18IOTimerEventSourceC2Ev +__ZN18IOTimerEventSourceD0Ev +__ZN18IOTimerEventSourceD2Ev +__ZN18IOUserNotification10gMetaClassE +__ZN18IOUserNotification10superClassE +__ZN18IOUserNotification15setNotificationEP10IONotifier +__ZN18IOUserNotification4freeEv +__ZN18IOUserNotification4initEv +__ZN18IOUserNotification5resetEv +__ZN18IOUserNotification7isValidEv +__ZN18IOUserNotification9MetaClassC1Ev +__ZN18IOUserNotification9MetaClassC2Ev +__ZN18IOUserNotification9metaClassE +__ZN18IOUserNotificationC1EPK11OSMetaClass +__ZN18IOUserNotificationC2EPK11OSMetaClass +__ZN18IOUserNotificationD0Ev +__ZN18IOUserNotificationD2Ev +__ZN18_IOServiceNotifier10gMetaClassE +__ZN18_IOServiceNotifier10superClassE +__ZN18_IOServiceNotifier4freeEv +__ZN18_IOServiceNotifier4waitEv +__ZN18_IOServiceNotifier6enableEb +__ZN18_IOServiceNotifier6removeEv +__ZN18_IOServiceNotifier7disableEv +__ZN18_IOServiceNotifier9MetaClassC1Ev +__ZN18_IOServiceNotifier9MetaClassC2Ev +__ZN18_IOServiceNotifier9metaClassE +__ZN18_IOServiceNotifierC1EPK11OSMetaClass +__ZN18_IOServiceNotifierC1Ev +__ZN18_IOServiceNotifierC2EPK11OSMetaClass +__ZN18_IOServiceNotifierC2Ev +__ZN18_IOServiceNotifierD0Ev +__ZN18_IOServiceNotifierD2Ev +__ZN19IOPMPowerSourceList10gMetaClassE +__ZN19IOPMPowerSourceList10initializeEv +__ZN19IOPMPowerSourceList10nextInListEP15IOPMPowerSource +__ZN19IOPMPowerSourceList10superClassE +__ZN19IOPMPowerSourceList11firstInListEv +__ZN19IOPMPowerSourceList13numberOfItemsEv +__ZN19IOPMPowerSourceList14removeFromListEP15IOPMPowerSource +__ZN19IOPMPowerSourceList4freeEv +__ZN19IOPMPowerSourceList9MetaClassC1Ev +__ZN19IOPMPowerSourceList9MetaClassC2Ev +__ZN19IOPMPowerSourceList9addToListEP15IOPMPowerSource +__ZN19IOPMPowerSourceList9metaClassE +__ZN19IOPMPowerSourceListC1EPK11OSMetaClass +__ZN19IOPMPowerSourceListC1Ev +__ZN19IOPMPowerSourceListC2EPK11OSMetaClass +__ZN19IOPMPowerSourceListC2Ev +__ZN19IOPMPowerSourceListD0Ev +__ZN19IOPMPowerSourceListD2Ev +__ZN19IOPMPowerStateQueue10gMetaClassE +__ZN19IOPMPowerStateQueue10superClassE +__ZN19IOPMPowerStateQueue12checkForWorkEv +__ZN19IOPMPowerStateQueue14unIdleOccurredEP9IOServicem +__ZN19IOPMPowerStateQueue17PMPowerStateQueueEP8OSObject +__ZN19IOPMPowerStateQueue4initEP8OSObjectPFvS1_zE +__ZN19IOPMPowerStateQueue9MetaClassC1Ev +__ZN19IOPMPowerStateQueue9MetaClassC2Ev +__ZN19IOPMPowerStateQueue9metaClassE +__ZN19IOPMPowerStateQueueC1EPK11OSMetaClass +__ZN19IOPMPowerStateQueueC1Ev +__ZN19IOPMPowerStateQueueC2EPK11OSMetaClass +__ZN19IOPMPowerStateQueueC2Ev +__ZN19IOPMPowerStateQueueD0Ev +__ZN19IOPMPowerStateQueueD2Ev +__ZN20IOLittleMemoryCursor10gMetaClassE +__ZN20IOLittleMemoryCursor10superClassE +__ZN20IOLittleMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvm +__ZN20IOLittleMemoryCursor17withSpecificationEmmm +__ZN20IOLittleMemoryCursor21initWithSpecificationEmmm +__ZN20IOLittleMemoryCursor9MetaClassC1Ev +__ZN20IOLittleMemoryCursor9MetaClassC2Ev +__ZN20IOLittleMemoryCursor9metaClassE +__ZN20IOLittleMemoryCursorC1EPK11OSMetaClass +__ZN20IOLittleMemoryCursorC1Ev +__ZN20IOLittleMemoryCursorC2EPK11OSMetaClass +__ZN20IOLittleMemoryCursorC2Ev +__ZN20IOLittleMemoryCursorD0Ev +__ZN20IOLittleMemoryCursorD2Ev +__ZN20OSCollectionIterator10gMetaClassE +__ZN20OSCollectionIterator10superClassE +__ZN20OSCollectionIterator13getNextObjectEv +__ZN20OSCollectionIterator14withCollectionEPK12OSCollection +__ZN20OSCollectionIterator18initWithCollectionEPK12OSCollection +__ZN20OSCollectionIterator4freeEv +__ZN20OSCollectionIterator5resetEv +__ZN20OSCollectionIterator7isValidEv +__ZN20OSCollectionIterator9MetaClassC1Ev +__ZN20OSCollectionIterator9MetaClassC2Ev +__ZN20OSCollectionIterator9metaClassE +__ZN20OSCollectionIteratorC1EPK11OSMetaClass +__ZN20OSCollectionIteratorC1Ev +__ZN20OSCollectionIteratorC2EPK11OSMetaClass +__ZN20OSCollectionIteratorC2Ev +__ZN20OSCollectionIteratorD0Ev +__ZN20OSCollectionIteratorD2Ev +__ZN20RootDomainUserClient10gMetaClassE +__ZN20RootDomainUserClient10superClassE +__ZN20RootDomainUserClient11clientCloseEv +__ZN20RootDomainUserClient15setPreventativeEmm +__ZN20RootDomainUserClient26getTargetAndMethodForIndexEPP9IOServicem +__ZN20RootDomainUserClient5startEP9IOService +__ZN20RootDomainUserClient9MetaClassC1Ev +__ZN20RootDomainUserClient9MetaClassC2Ev +__ZN20RootDomainUserClient9metaClassE +__ZN20RootDomainUserClientC1EPK11OSMetaClass +__ZN20RootDomainUserClientC1Ev +__ZN20RootDomainUserClientC2EPK11OSMetaClass +__ZN20RootDomainUserClientC2Ev +__ZN20RootDomainUserClientD0Ev +__ZN20RootDomainUserClientD2Ev +__ZN21IOInterruptController10gMetaClassE +__ZN21IOInterruptController10initVectorElP17IOInterruptVector +__ZN21IOInterruptController10superClassE +__ZN21IOInterruptController11causeVectorElP17IOInterruptVector +__ZN21IOInterruptController12enableVectorElP17IOInterruptVector +__ZN21IOInterruptController13getVectorTypeElP17IOInterruptVector +__ZN21IOInterruptController14causeInterruptEP9IOServicei +__ZN21IOInterruptController15enableInterruptEP9IOServicei +__ZN21IOInterruptController15handleInterruptEPvP9IOServicei +__ZN21IOInterruptController16disableInterruptEP9IOServicei +__ZN21IOInterruptController16getInterruptTypeEP9IOServiceiPi +__ZN21IOInterruptController17disableVectorHardElP17IOInterruptVector +__ZN21IOInterruptController17registerInterruptEP9IOServiceiPvPFvS2_S2_S2_iES2_ +__ZN21IOInterruptController17vectorCanBeSharedElP17IOInterruptVector +__ZN21IOInterruptController19unregisterInterruptEP9IOServicei +__ZN21IOInterruptController26getInterruptHandlerAddressEv +__ZN21IOInterruptController31_RESERVEDIOInterruptController0Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController1Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController2Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController3Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController4Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController5Ev +__ZN21IOInterruptController9MetaClassC1Ev +__ZN21IOInterruptController9MetaClassC2Ev +__ZN21IOInterruptController9metaClassE +__ZN21IOInterruptControllerC1EPK11OSMetaClass +__ZN21IOInterruptControllerC2EPK11OSMetaClass +__ZN21IOInterruptControllerD0Ev +__ZN21IOInterruptControllerD2Ev +__ZN21IONaturalMemoryCursor10gMetaClassE +__ZN21IONaturalMemoryCursor10superClassE +__ZN21IONaturalMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvm +__ZN21IONaturalMemoryCursor17withSpecificationEmmm +__ZN21IONaturalMemoryCursor21initWithSpecificationEmmm +__ZN21IONaturalMemoryCursor9MetaClassC1Ev +__ZN21IONaturalMemoryCursor9MetaClassC2Ev +__ZN21IONaturalMemoryCursor9metaClassE +__ZN21IONaturalMemoryCursorC1EPK11OSMetaClass +__ZN21IONaturalMemoryCursorC1Ev +__ZN21IONaturalMemoryCursorC2EPK11OSMetaClass +__ZN21IONaturalMemoryCursorC2Ev +__ZN21IONaturalMemoryCursorD0Ev +__ZN21IONaturalMemoryCursorD2Ev +__ZN21IOSubMemoryDescriptor10gMetaClassE +__ZN21IOSubMemoryDescriptor10superClassE +__ZN21IOSubMemoryDescriptor10writeBytesEmPKvm +__ZN21IOSubMemoryDescriptor11makeMappingEP18IOMemoryDescriptorP4taskjmmm +__ZN21IOSubMemoryDescriptor12initSubRangeEP18IOMemoryDescriptormm11IODirection +__ZN21IOSubMemoryDescriptor14initWithRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN21IOSubMemoryDescriptor15initWithAddressEPvm11IODirection +__ZN21IOSubMemoryDescriptor15initWithAddressEjm11IODirectionP4task +__ZN21IOSubMemoryDescriptor16getSourceSegmentEmPm +__ZN21IOSubMemoryDescriptor17getVirtualSegmentEmPm +__ZN21IOSubMemoryDescriptor18getPhysicalSegmentEmPm +__ZN21IOSubMemoryDescriptor22initWithPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN21IOSubMemoryDescriptor23initWithPhysicalAddressEmm11IODirection +__ZN21IOSubMemoryDescriptor4freeEv +__ZN21IOSubMemoryDescriptor7prepareE11IODirection +__ZN21IOSubMemoryDescriptor8completeE11IODirection +__ZN21IOSubMemoryDescriptor8redirectEP4taskb +__ZN21IOSubMemoryDescriptor9MetaClassC1Ev +__ZN21IOSubMemoryDescriptor9MetaClassC2Ev +__ZN21IOSubMemoryDescriptor9metaClassE +__ZN21IOSubMemoryDescriptor9readBytesEmPvm +__ZN21IOSubMemoryDescriptorC1EPK11OSMetaClass +__ZN21IOSubMemoryDescriptorC1Ev +__ZN21IOSubMemoryDescriptorC2EPK11OSMetaClass +__ZN21IOSubMemoryDescriptorC2Ev +__ZN21IOSubMemoryDescriptorD0Ev +__ZN21IOSubMemoryDescriptorD2Ev +__ZN22IOInterruptEventSource10gMetaClassE +__ZN22IOInterruptEventSource10superClassE +__ZN22IOInterruptEventSource12checkForWorkEv +__ZN22IOInterruptEventSource17interruptOccurredEPvP9IOServicei +__ZN22IOInterruptEventSource20interruptEventSourceEP8OSObjectPFvS1_PS_iEP9IOServicei +__ZN22IOInterruptEventSource23normalInterruptOccurredEPvP9IOServicei +__ZN22IOInterruptEventSource24disableInterruptOccurredEPvP9IOServicei +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource0Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource1Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource2Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource3Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource4Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource5Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource6Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource7Ev +__ZN22IOInterruptEventSource4freeEv +__ZN22IOInterruptEventSource4initEP8OSObjectPFvS1_PS_iEP9IOServicei +__ZN22IOInterruptEventSource6enableEv +__ZN22IOInterruptEventSource7disableEv +__ZN22IOInterruptEventSource9MetaClassC1Ev +__ZN22IOInterruptEventSource9MetaClassC2Ev +__ZN22IOInterruptEventSource9metaClassE +__ZN22IOInterruptEventSourceC1EPK11OSMetaClass +__ZN22IOInterruptEventSourceC1Ev +__ZN22IOInterruptEventSourceC2EPK11OSMetaClass +__ZN22IOInterruptEventSourceC2Ev +__ZN22IOInterruptEventSourceD0Ev +__ZN22IOInterruptEventSourceD2Ev +__ZN22IOPlatformExpertDevice10gMetaClassE +__ZN22IOPlatformExpertDevice10superClassE +__ZN22IOPlatformExpertDevice12initWithArgsEPvS0_S0_S0_ +__ZN22IOPlatformExpertDevice32_RESERVEDIOPlatformExpertDevice0Ev +__ZN22IOPlatformExpertDevice32_RESERVEDIOPlatformExpertDevice1Ev +__ZN22IOPlatformExpertDevice32_RESERVEDIOPlatformExpertDevice2Ev +__ZN22IOPlatformExpertDevice32_RESERVEDIOPlatformExpertDevice3Ev +__ZN22IOPlatformExpertDevice4freeEv +__ZN22IOPlatformExpertDevice9MetaClassC1Ev +__ZN22IOPlatformExpertDevice9MetaClassC2Ev +__ZN22IOPlatformExpertDevice9metaClassE +__ZN22IOPlatformExpertDeviceC1EPK11OSMetaClass +__ZN22IOPlatformExpertDeviceC1Ev +__ZN22IOPlatformExpertDeviceC2EPK11OSMetaClass +__ZN22IOPlatformExpertDeviceC2Ev +__ZN22IOPlatformExpertDeviceD0Ev +__ZN22IOPlatformExpertDeviceD2Ev +__ZN22_IOOpenServiceIterator10gMetaClassE +__ZN22_IOOpenServiceIterator10superClassE +__ZN22_IOOpenServiceIterator13getNextObjectEv +__ZN22_IOOpenServiceIterator4freeEv +__ZN22_IOOpenServiceIterator5resetEv +__ZN22_IOOpenServiceIterator7isValidEv +__ZN22_IOOpenServiceIterator8iteratorEP10OSIteratorPK9IOServiceS4_ +__ZN22_IOOpenServiceIterator9MetaClassC1Ev +__ZN22_IOOpenServiceIterator9MetaClassC2Ev +__ZN22_IOOpenServiceIterator9metaClassE +__ZN22_IOOpenServiceIteratorC1EPK11OSMetaClass +__ZN22_IOOpenServiceIteratorC1Ev +__ZN22_IOOpenServiceIteratorC2EPK11OSMetaClass +__ZN22_IOOpenServiceIteratorC2Ev +__ZN22_IOOpenServiceIteratorD0Ev +__ZN22_IOOpenServiceIteratorD2Ev +__ZN23IOMultiMemoryDescriptor10gMetaClassE +__ZN23IOMultiMemoryDescriptor10superClassE +__ZN23IOMultiMemoryDescriptor10writeBytesEmPKvm +__ZN23IOMultiMemoryDescriptor14initWithRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN23IOMultiMemoryDescriptor15initWithAddressEPvm11IODirection +__ZN23IOMultiMemoryDescriptor15initWithAddressEjm11IODirectionP4task +__ZN23IOMultiMemoryDescriptor15withDescriptorsEPP18IOMemoryDescriptorm11IODirectionb +__ZN23IOMultiMemoryDescriptor16getSourceSegmentEmPm +__ZN23IOMultiMemoryDescriptor17getVirtualSegmentEmPm +__ZN23IOMultiMemoryDescriptor18getPhysicalSegmentEmPm +__ZN23IOMultiMemoryDescriptor19initWithDescriptorsEPP18IOMemoryDescriptorm11IODirectionb +__ZN23IOMultiMemoryDescriptor22initWithPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN23IOMultiMemoryDescriptor23initWithPhysicalAddressEmm11IODirection +__ZN23IOMultiMemoryDescriptor4freeEv +__ZN23IOMultiMemoryDescriptor7prepareE11IODirection +__ZN23IOMultiMemoryDescriptor8completeE11IODirection +__ZN23IOMultiMemoryDescriptor9MetaClassC1Ev +__ZN23IOMultiMemoryDescriptor9MetaClassC2Ev +__ZN23IOMultiMemoryDescriptor9metaClassE +__ZN23IOMultiMemoryDescriptor9readBytesEmPvm +__ZN23IOMultiMemoryDescriptorC1EPK11OSMetaClass +__ZN23IOMultiMemoryDescriptorC1Ev +__ZN23IOMultiMemoryDescriptorC2EPK11OSMetaClass +__ZN23IOMultiMemoryDescriptorC2Ev +__ZN23IOMultiMemoryDescriptorD0Ev +__ZN23IOMultiMemoryDescriptorD2Ev +__ZN24IOBufferMemoryDescriptor10gMetaClassE +__ZN24IOBufferMemoryDescriptor10superClassE +__ZN24IOBufferMemoryDescriptor11appendBytesEPKvj +__ZN24IOBufferMemoryDescriptor11withOptionsEmjj +__ZN24IOBufferMemoryDescriptor12setDirectionE11IODirection +__ZN24IOBufferMemoryDescriptor12withCapacityEj11IODirectionb +__ZN24IOBufferMemoryDescriptor13initWithBytesEPKvj11IODirectionb +__ZN24IOBufferMemoryDescriptor14getBytesNoCopyEjj +__ZN24IOBufferMemoryDescriptor14getBytesNoCopyEv +__ZN24IOBufferMemoryDescriptor14initWithRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN24IOBufferMemoryDescriptor15initWithAddressEPvm11IODirection +__ZN24IOBufferMemoryDescriptor15initWithAddressEjm11IODirectionP4task +__ZN24IOBufferMemoryDescriptor15initWithOptionsEmjj +__ZN24IOBufferMemoryDescriptor15initWithOptionsEmjjP4task +__ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskmjj +__ZN24IOBufferMemoryDescriptor22initWithPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN24IOBufferMemoryDescriptor23initWithPhysicalAddressEmm11IODirection +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor1Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor2Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor3Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor4Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor5Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor6Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor7Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor8Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor9Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor10Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor11Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor12Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor13Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor14Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor15Ev +__ZN24IOBufferMemoryDescriptor4freeEv +__ZN24IOBufferMemoryDescriptor9MetaClassC1Ev +__ZN24IOBufferMemoryDescriptor9MetaClassC2Ev +__ZN24IOBufferMemoryDescriptor9metaClassE +__ZN24IOBufferMemoryDescriptor9setLengthEj +__ZN24IOBufferMemoryDescriptor9withBytesEPKvj11IODirectionb +__ZN24IOBufferMemoryDescriptorC1EPK11OSMetaClass +__ZN24IOBufferMemoryDescriptorC1Ev +__ZN24IOBufferMemoryDescriptorC2EPK11OSMetaClass +__ZN24IOBufferMemoryDescriptorC2Ev +__ZN24IOBufferMemoryDescriptorD0Ev +__ZN24IOBufferMemoryDescriptorD2Ev +__ZN24IOCPUInterruptController10gMetaClassE +__ZN24IOCPUInterruptController10superClassE +__ZN24IOCPUInterruptController14causeInterruptEP9IOServicei +__ZN24IOCPUInterruptController15enableInterruptEP9IOServicei +__ZN24IOCPUInterruptController15handleInterruptEPvP9IOServicei +__ZN24IOCPUInterruptController16disableInterruptEP9IOServicei +__ZN24IOCPUInterruptController16getInterruptTypeEP9IOServiceiPi +__ZN24IOCPUInterruptController17registerInterruptEP9IOServiceiPvPFvS2_S2_S2_iES2_ +__ZN24IOCPUInterruptController18enableCPUInterruptEP5IOCPU +__ZN24IOCPUInterruptController25setCPUInterruptPropertiesEP9IOService +__ZN24IOCPUInterruptController26initCPUInterruptControllerEi +__ZN24IOCPUInterruptController30registerCPUInterruptControllerEv +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController0Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController1Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController2Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController3Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController4Ev +__ZN24IOCPUInterruptController34_RESERVEDIOCPUInterruptController5Ev +__ZN24IOCPUInterruptController9MetaClassC1Ev +__ZN24IOCPUInterruptController9MetaClassC2Ev +__ZN24IOCPUInterruptController9metaClassE +__ZN24IOCPUInterruptControllerC1EPK11OSMetaClass +__ZN24IOCPUInterruptControllerC1Ev +__ZN24IOCPUInterruptControllerC2EPK11OSMetaClass +__ZN24IOCPUInterruptControllerC2Ev +__ZN24IOCPUInterruptControllerD0Ev +__ZN24IOCPUInterruptControllerD2Ev +__ZN25IOGeneralMemoryDescriptor10gMetaClassE +__ZN25IOGeneralMemoryDescriptor10superClassE +__ZN25IOGeneralMemoryDescriptor11setPositionEm +__ZN25IOGeneralMemoryDescriptor11wireVirtualE11IODirection +__ZN25IOGeneralMemoryDescriptor13mapIntoKernelEj +__ZN25IOGeneralMemoryDescriptor14initWithRangesEP14IOVirtualRangem11IODirectionP4taskb +__ZN25IOGeneralMemoryDescriptor15initWithAddressEPvm11IODirection +__ZN25IOGeneralMemoryDescriptor15initWithAddressEjm11IODirectionP4task +__ZN25IOGeneralMemoryDescriptor15initWithOptionsEPvmmP4taskmP8IOMapper +__ZN25IOGeneralMemoryDescriptor15unmapFromKernelEv +__ZN25IOGeneralMemoryDescriptor16getSourceSegmentEmPm +__ZN25IOGeneralMemoryDescriptor17getVirtualSegmentEmPm +__ZN25IOGeneralMemoryDescriptor18getPhysicalSegmentEmPm +__ZN25IOGeneralMemoryDescriptor22initWithPhysicalRangesEP15IOPhysicalRangem11IODirectionb +__ZN25IOGeneralMemoryDescriptor23initWithPhysicalAddressEmm11IODirection +__ZN25IOGeneralMemoryDescriptor4freeEv +__ZN25IOGeneralMemoryDescriptor5doMapEP6vm_mapPjmmm +__ZN25IOGeneralMemoryDescriptor7doUnmapEP6vm_mapjm +__ZN25IOGeneralMemoryDescriptor7prepareE11IODirection +__ZN25IOGeneralMemoryDescriptor8completeE11IODirection +__ZN25IOGeneralMemoryDescriptor9MetaClassC1Ev +__ZN25IOGeneralMemoryDescriptor9MetaClassC2Ev +__ZN25IOGeneralMemoryDescriptor9metaClassE +__ZN25IOGeneralMemoryDescriptorC1EPK11OSMetaClass +__ZN25IOGeneralMemoryDescriptorC1Ev +__ZN25IOGeneralMemoryDescriptorC2EPK11OSMetaClass +__ZN25IOGeneralMemoryDescriptorC2Ev +__ZN25IOGeneralMemoryDescriptorD0Ev +__ZN25IOGeneralMemoryDescriptorD2Ev +__ZN25IOServiceUserNotification10gMetaClassE +__ZN25IOServiceUserNotification10superClassE +__ZN25IOServiceUserNotification13getNextObjectEv +__ZN25IOServiceUserNotification4freeEv +__ZN25IOServiceUserNotification4initEP8ipc_portjPj +__ZN25IOServiceUserNotification7handlerEPvP9IOService +__ZN25IOServiceUserNotification8_handlerEPvS0_P9IOService +__ZN25IOServiceUserNotification9MetaClassC1Ev +__ZN25IOServiceUserNotification9MetaClassC2Ev +__ZN25IOServiceUserNotification9metaClassE +__ZN25IOServiceUserNotificationC1EPK11OSMetaClass +__ZN25IOServiceUserNotificationC1Ev +__ZN25IOServiceUserNotificationC2EPK11OSMetaClass +__ZN25IOServiceUserNotificationC2Ev +__ZN25IOServiceUserNotificationD0Ev +__ZN25IOServiceUserNotificationD2Ev +__ZN26_IOServiceInterestNotifier10gMetaClassE +__ZN26_IOServiceInterestNotifier10superClassE +__ZN26_IOServiceInterestNotifier4freeEv +__ZN26_IOServiceInterestNotifier4waitEv +__ZN26_IOServiceInterestNotifier6enableEb +__ZN26_IOServiceInterestNotifier6removeEv +__ZN26_IOServiceInterestNotifier7disableEv +__ZN26_IOServiceInterestNotifier9MetaClassC1Ev +__ZN26_IOServiceInterestNotifier9MetaClassC2Ev +__ZN26_IOServiceInterestNotifier9metaClassE +__ZN26_IOServiceInterestNotifierC1EPK11OSMetaClass +__ZN26_IOServiceInterestNotifierC1Ev +__ZN26_IOServiceInterestNotifierC2EPK11OSMetaClass +__ZN26_IOServiceInterestNotifierC2Ev +__ZN26_IOServiceInterestNotifierD0Ev +__ZN26_IOServiceInterestNotifierD2Ev +__ZN27IOSharedInterruptController10gMetaClassE +__ZN27IOSharedInterruptController10superClassE +__ZN27IOSharedInterruptController15enableInterruptEP9IOServicei +__ZN27IOSharedInterruptController15handleInterruptEPvP9IOServicei +__ZN27IOSharedInterruptController16disableInterruptEP9IOServicei +__ZN27IOSharedInterruptController16getInterruptTypeEP9IOServiceiPi +__ZN27IOSharedInterruptController17registerInterruptEP9IOServiceiPvPFvS2_S2_S2_iES2_ +__ZN27IOSharedInterruptController19unregisterInterruptEP9IOServicei +__ZN27IOSharedInterruptController23initInterruptControllerEP21IOInterruptControllerP6OSData +__ZN27IOSharedInterruptController26getInterruptHandlerAddressEv +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController0Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController1Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController2Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController3Ev +__ZN27IOSharedInterruptController9MetaClassC1Ev +__ZN27IOSharedInterruptController9MetaClassC2Ev +__ZN27IOSharedInterruptController9metaClassE +__ZN27IOSharedInterruptControllerC1EPK11OSMetaClass +__ZN27IOSharedInterruptControllerC1Ev +__ZN27IOSharedInterruptControllerC2EPK11OSMetaClass +__ZN27IOSharedInterruptControllerC2Ev +__ZN27IOSharedInterruptControllerD0Ev +__ZN27IOSharedInterruptControllerD2Ev +__ZN28IOFilterInterruptEventSource10gMetaClassE +__ZN28IOFilterInterruptEventSource10superClassE +__ZN28IOFilterInterruptEventSource15signalInterruptEv +__ZN28IOFilterInterruptEventSource20interruptEventSourceEP8OSObjectPFvS1_P22IOInterruptEventSourceiEP9IOServicei +__ZN28IOFilterInterruptEventSource23normalInterruptOccurredEPvP9IOServicei +__ZN28IOFilterInterruptEventSource24disableInterruptOccurredEPvP9IOServicei +__ZN28IOFilterInterruptEventSource26filterInterruptEventSourceEP8OSObjectPFvS1_P22IOInterruptEventSourceiEPFbS1_PS_EP9IOServicei +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource0Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource1Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource2Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource3Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource4Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource5Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource6Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource7Ev +__ZN28IOFilterInterruptEventSource4initEP8OSObjectPFvS1_P22IOInterruptEventSourceiEP9IOServicei +__ZN28IOFilterInterruptEventSource4initEP8OSObjectPFvS1_P22IOInterruptEventSourceiEPFbS1_PS_EP9IOServicei +__ZN28IOFilterInterruptEventSource9MetaClassC1Ev +__ZN28IOFilterInterruptEventSource9MetaClassC2Ev +__ZN28IOFilterInterruptEventSource9metaClassE +__ZN28IOFilterInterruptEventSourceC1EPK11OSMetaClass +__ZN28IOFilterInterruptEventSourceC1Ev +__ZN28IOFilterInterruptEventSourceC2EPK11OSMetaClass +__ZN28IOFilterInterruptEventSourceC2Ev +__ZN28IOFilterInterruptEventSourceD0Ev +__ZN28IOFilterInterruptEventSourceD2Ev +__ZN32IOServiceMessageUserNotification10gMetaClassE +__ZN32IOServiceMessageUserNotification10superClassE +__ZN32IOServiceMessageUserNotification13getNextObjectEv +__ZN32IOServiceMessageUserNotification4freeEv +__ZN32IOServiceMessageUserNotification4initEP8ipc_portjPjj +__ZN32IOServiceMessageUserNotification7handlerEPvmP9IOServiceS0_j +__ZN32IOServiceMessageUserNotification8_handlerEPvS0_mP9IOServiceS0_j +__ZN32IOServiceMessageUserNotification9MetaClassC1Ev +__ZN32IOServiceMessageUserNotification9MetaClassC2Ev +__ZN32IOServiceMessageUserNotification9metaClassE +__ZN32IOServiceMessageUserNotificationC1EPK11OSMetaClass +__ZN32IOServiceMessageUserNotificationC1Ev +__ZN32IOServiceMessageUserNotificationC2EPK11OSMetaClass +__ZN32IOServiceMessageUserNotificationC2Ev +__ZN32IOServiceMessageUserNotificationD0Ev +__ZN32IOServiceMessageUserNotificationD2Ev +__ZN5IOCPU10gMetaClassE +__ZN5IOCPU10superClassE +__ZN5IOCPU11getCPUGroupEv +__ZN5IOCPU11getCPUStateEv +__ZN5IOCPU11setCPUStateEm +__ZN5IOCPU12getCPUNumberEv +__ZN5IOCPU12setCPUNumberEm +__ZN5IOCPU13setPropertiesEP8OSObject +__ZN5IOCPU15_RESERVEDIOCPU0Ev +__ZN5IOCPU15_RESERVEDIOCPU1Ev +__ZN5IOCPU15_RESERVEDIOCPU2Ev +__ZN5IOCPU15_RESERVEDIOCPU3Ev +__ZN5IOCPU15_RESERVEDIOCPU4Ev +__ZN5IOCPU15_RESERVEDIOCPU5Ev +__ZN5IOCPU15_RESERVEDIOCPU6Ev +__ZN5IOCPU15_RESERVEDIOCPU7Ev +__ZN5IOCPU15getCPUGroupSizeEv +__ZN5IOCPU16getMachProcessorEv +__ZN5IOCPU17enableCPUTimeBaseEb +__ZN5IOCPU5startEP9IOService +__ZN5IOCPU8initCPUsEv +__ZN5IOCPU9MetaClassC1Ev +__ZN5IOCPU9MetaClassC2Ev +__ZN5IOCPU9metaClassE +__ZN5IOCPU9signalCPUEPS_ +__ZN5IOCPUC1EPK11OSMetaClass +__ZN5IOCPUC2EPK11OSMetaClass +__ZN5IOCPUD0Ev +__ZN5IOCPUD2Ev +__ZN5OSSet10gMetaClassE +__ZN5OSSet10superClassE +__ZN5OSSet11initWithSetEPKS_j +__ZN5OSSet11withObjectsEPPK8OSObjectjj +__ZN5OSSet12removeObjectEPK15OSMetaClassBase +__ZN5OSSet12withCapacityEj +__ZN5OSSet13initWithArrayEPK7OSArrayj +__ZN5OSSet14ensureCapacityEj +__ZN5OSSet15_RESERVEDOSSet0Ev +__ZN5OSSet15_RESERVEDOSSet1Ev +__ZN5OSSet15_RESERVEDOSSet2Ev +__ZN5OSSet15_RESERVEDOSSet3Ev +__ZN5OSSet15_RESERVEDOSSet4Ev +__ZN5OSSet15_RESERVEDOSSet5Ev +__ZN5OSSet15_RESERVEDOSSet6Ev +__ZN5OSSet15_RESERVEDOSSet7Ev +__ZN5OSSet15flushCollectionEv +__ZN5OSSet15initWithObjectsEPPK8OSObjectjj +__ZN5OSSet16initWithCapacityEj +__ZN5OSSet20setCapacityIncrementEj +__ZN5OSSet4freeEv +__ZN5OSSet5mergeEPK7OSArray +__ZN5OSSet5mergeEPKS_ +__ZN5OSSet7withSetEPKS_j +__ZN5OSSet9MetaClassC1Ev +__ZN5OSSet9MetaClassC2Ev +__ZN5OSSet9metaClassE +__ZN5OSSet9setObjectEPK15OSMetaClassBase +__ZN5OSSet9withArrayEPK7OSArrayj +__ZN5OSSetC1EPK11OSMetaClass +__ZN5OSSetC1Ev +__ZN5OSSetC2EPK11OSMetaClass +__ZN5OSSetC2Ev +__ZN5OSSetD0Ev +__ZN5OSSetD2Ev +__ZN6OSData10appendByteEhj +__ZN6OSData10gMetaClassE +__ZN6OSData10superClassE +__ZN6OSData11appendBytesEPKS_ +__ZN6OSData11appendBytesEPKvj +__ZN6OSData12initWithDataEPKS_ +__ZN6OSData12initWithDataEPKS_jj +__ZN6OSData12withCapacityEj +__ZN6OSData13initWithBytesEPKvj +__ZN6OSData14ensureCapacityEj +__ZN6OSData15withBytesNoCopyEPvj +__ZN6OSData16_RESERVEDOSData0Ev +__ZN6OSData16_RESERVEDOSData1Ev +__ZN6OSData16_RESERVEDOSData2Ev +__ZN6OSData16_RESERVEDOSData3Ev +__ZN6OSData16_RESERVEDOSData4Ev +__ZN6OSData16_RESERVEDOSData5Ev +__ZN6OSData16_RESERVEDOSData6Ev +__ZN6OSData16_RESERVEDOSData7Ev +__ZN6OSData16initWithCapacityEj +__ZN6OSData19initWithBytesNoCopyEPvj +__ZN6OSData20setCapacityIncrementEj +__ZN6OSData4freeEv +__ZN6OSData8withDataEPKS_ +__ZN6OSData8withDataEPKS_jj +__ZN6OSData9MetaClassC1Ev +__ZN6OSData9MetaClassC2Ev +__ZN6OSData9metaClassE +__ZN6OSData9withBytesEPKvj +__ZN6OSDataC1EPK11OSMetaClass +__ZN6OSDataC1Ev +__ZN6OSDataC2EPK11OSMetaClass +__ZN6OSDataC2Ev +__ZN6OSDataD0Ev +__ZN6OSDataD2Ev +__ZN7OSArray10gMetaClassE +__ZN7OSArray10superClassE +__ZN7OSArray11withObjectsEPPK8OSObjectjj +__ZN7OSArray12removeObjectEj +__ZN7OSArray12withCapacityEj +__ZN7OSArray13initWithArrayEPKS_j +__ZN7OSArray13replaceObjectEjPK15OSMetaClassBase +__ZN7OSArray14ensureCapacityEj +__ZN7OSArray15flushCollectionEv +__ZN7OSArray15initWithObjectsEPPK8OSObjectjj +__ZN7OSArray16initWithCapacityEj +__ZN7OSArray17_RESERVEDOSArray0Ev +__ZN7OSArray17_RESERVEDOSArray1Ev +__ZN7OSArray17_RESERVEDOSArray2Ev +__ZN7OSArray17_RESERVEDOSArray3Ev +__ZN7OSArray17_RESERVEDOSArray4Ev +__ZN7OSArray17_RESERVEDOSArray5Ev +__ZN7OSArray17_RESERVEDOSArray6Ev +__ZN7OSArray17_RESERVEDOSArray7Ev +__ZN7OSArray20setCapacityIncrementEj +__ZN7OSArray4freeEv +__ZN7OSArray5mergeEPKS_ +__ZN7OSArray9MetaClassC1Ev +__ZN7OSArray9MetaClassC2Ev +__ZN7OSArray9metaClassE +__ZN7OSArray9setObjectEPK15OSMetaClassBase +__ZN7OSArray9setObjectEjPK15OSMetaClassBase +__ZN7OSArray9withArrayEPKS_j +__ZN7OSArrayC1EPK11OSMetaClass +__ZN7OSArrayC1Ev +__ZN7OSArrayC2EPK11OSMetaClass +__ZN7OSArrayC2Ev +__ZN7OSArrayD0Ev +__ZN7OSArrayD2Ev +__ZN8IOMapper10allocTableEm +__ZN8IOMapper10gMetaClassE +__ZN8IOMapper10iovmInsertEjmP13upl_page_infom +__ZN8IOMapper10iovmInsertEjmPjm +__ZN8IOMapper10superClassE +__ZN8IOMapper11NewARTTableEmPPvPj +__ZN8IOMapper12FreeARTTableEP6OSDatam +__ZN8IOMapper17setMapperRequiredEb +__ZN8IOMapper18_RESERVEDIOMapper0Ev +__ZN8IOMapper18_RESERVEDIOMapper1Ev +__ZN8IOMapper18_RESERVEDIOMapper2Ev +__ZN8IOMapper18_RESERVEDIOMapper3Ev +__ZN8IOMapper18_RESERVEDIOMapper4Ev +__ZN8IOMapper18_RESERVEDIOMapper5Ev +__ZN8IOMapper18_RESERVEDIOMapper6Ev +__ZN8IOMapper18_RESERVEDIOMapper7Ev +__ZN8IOMapper18_RESERVEDIOMapper8Ev +__ZN8IOMapper18_RESERVEDIOMapper9Ev +__ZN8IOMapper19_RESERVEDIOMapper10Ev +__ZN8IOMapper19_RESERVEDIOMapper11Ev +__ZN8IOMapper19_RESERVEDIOMapper12Ev +__ZN8IOMapper19_RESERVEDIOMapper13Ev +__ZN8IOMapper19_RESERVEDIOMapper14Ev +__ZN8IOMapper19_RESERVEDIOMapper15Ev +__ZN8IOMapper19waitForSystemMapperEv +__ZN8IOMapper4freeEv +__ZN8IOMapper5startEP9IOService +__ZN8IOMapper7gSystemE +__ZN8IOMapper9MetaClassC1Ev +__ZN8IOMapper9MetaClassC2Ev +__ZN8IOMapper9metaClassE +__ZN8IOMapperC1EPK11OSMetaClass +__ZN8IOMapperC2EPK11OSMetaClass +__ZN8IOMapperD0Ev +__ZN8IOMapperD2Ev +__ZN8IOPMpriv10gMetaClassE +__ZN8IOPMpriv10superClassE +__ZN8IOPMpriv9MetaClassC1Ev +__ZN8IOPMpriv9MetaClassC2Ev +__ZN8IOPMpriv9metaClassE +__ZN8IOPMprivC1EPK11OSMetaClass +__ZN8IOPMprivC1Ev +__ZN8IOPMprivC2EPK11OSMetaClass +__ZN8IOPMprivC2Ev +__ZN8IOPMprivD0Ev +__ZN8IOPMprivD2Ev +__ZN8IOPMprot10gMetaClassE +__ZN8IOPMprot10superClassE +__ZN8IOPMprot9MetaClassC1Ev +__ZN8IOPMprot9MetaClassC2Ev +__ZN8IOPMprot9metaClassE +__ZN8IOPMprotC1EPK11OSMetaClass +__ZN8IOPMprotC1Ev +__ZN8IOPMprotC2EPK11OSMetaClass +__ZN8IOPMprotC2Ev +__ZN8IOPMprotD0Ev +__ZN8IOPMprotD2Ev +__ZN8IOSyncer10gMetaClassE +__ZN8IOSyncer10superClassE +__ZN8IOSyncer13privateSignalEv +__ZN8IOSyncer4freeEv +__ZN8IOSyncer4initEb +__ZN8IOSyncer4waitEb +__ZN8IOSyncer6createEb +__ZN8IOSyncer6reinitEv +__ZN8IOSyncer6signalEib +__ZN8IOSyncer9MetaClassC1Ev +__ZN8IOSyncer9MetaClassC2Ev +__ZN8IOSyncer9metaClassE +__ZN8IOSyncerC1EPK11OSMetaClass +__ZN8IOSyncerC1Ev +__ZN8IOSyncerC2EPK11OSMetaClass +__ZN8IOSyncerC2Ev +__ZN8IOSyncerD0Ev +__ZN8IOSyncerD2Ev +__ZN8OSNumber10gMetaClassE +__ZN8OSNumber10superClassE +__ZN8OSNumber10withNumberEPKcj +__ZN8OSNumber10withNumberEyj +__ZN8OSNumber18_RESERVEDOSNumber0Ev +__ZN8OSNumber18_RESERVEDOSNumber1Ev +__ZN8OSNumber18_RESERVEDOSNumber2Ev +__ZN8OSNumber18_RESERVEDOSNumber3Ev +__ZN8OSNumber18_RESERVEDOSNumber4Ev +__ZN8OSNumber18_RESERVEDOSNumber5Ev +__ZN8OSNumber18_RESERVEDOSNumber6Ev +__ZN8OSNumber18_RESERVEDOSNumber7Ev +__ZN8OSNumber4freeEv +__ZN8OSNumber4initEPKcj +__ZN8OSNumber4initEyj +__ZN8OSNumber8addValueEx +__ZN8OSNumber8setValueEy +__ZN8OSNumber9MetaClassC1Ev +__ZN8OSNumber9MetaClassC2Ev +__ZN8OSNumber9metaClassE +__ZN8OSNumberC1EPK11OSMetaClass +__ZN8OSNumberC1Ev +__ZN8OSNumberC2EPK11OSMetaClass +__ZN8OSNumberC2Ev +__ZN8OSNumberD0Ev +__ZN8OSNumberD2Ev +__ZN8OSObject10gMetaClassE +__ZN8OSObject10superClassE +__ZN8OSObject18_RESERVEDOSObject0Ev +__ZN8OSObject18_RESERVEDOSObject1Ev +__ZN8OSObject18_RESERVEDOSObject2Ev +__ZN8OSObject18_RESERVEDOSObject3Ev +__ZN8OSObject18_RESERVEDOSObject4Ev +__ZN8OSObject18_RESERVEDOSObject5Ev +__ZN8OSObject18_RESERVEDOSObject6Ev +__ZN8OSObject18_RESERVEDOSObject7Ev +__ZN8OSObject18_RESERVEDOSObject8Ev +__ZN8OSObject18_RESERVEDOSObject9Ev +__ZN8OSObject19_RESERVEDOSObject10Ev +__ZN8OSObject19_RESERVEDOSObject11Ev +__ZN8OSObject19_RESERVEDOSObject12Ev +__ZN8OSObject19_RESERVEDOSObject13Ev +__ZN8OSObject19_RESERVEDOSObject14Ev +__ZN8OSObject19_RESERVEDOSObject15Ev +__ZN8OSObject19_RESERVEDOSObject16Ev +__ZN8OSObject19_RESERVEDOSObject17Ev +__ZN8OSObject19_RESERVEDOSObject18Ev +__ZN8OSObject19_RESERVEDOSObject19Ev +__ZN8OSObject19_RESERVEDOSObject20Ev +__ZN8OSObject19_RESERVEDOSObject21Ev +__ZN8OSObject19_RESERVEDOSObject22Ev +__ZN8OSObject19_RESERVEDOSObject23Ev +__ZN8OSObject19_RESERVEDOSObject24Ev +__ZN8OSObject19_RESERVEDOSObject25Ev +__ZN8OSObject19_RESERVEDOSObject26Ev +__ZN8OSObject19_RESERVEDOSObject27Ev +__ZN8OSObject19_RESERVEDOSObject28Ev +__ZN8OSObject19_RESERVEDOSObject29Ev +__ZN8OSObject19_RESERVEDOSObject30Ev +__ZN8OSObject19_RESERVEDOSObject31Ev +__ZN8OSObject4freeEv +__ZN8OSObject4initEv +__ZN8OSObject9MetaClassC1Ev +__ZN8OSObject9MetaClassC2Ev +__ZN8OSObject9metaClassE +__ZN8OSObjectC1EPK11OSMetaClass +__ZN8OSObjectC1Ev +__ZN8OSObjectC2EPK11OSMetaClass +__ZN8OSObjectC2Ev +__ZN8OSObjectD0Ev +__ZN8OSObjectD2Ev +__ZN8OSObjectdlEPvm +__ZN8OSObjectnwEm +__ZN8OSString10gMetaClassE +__ZN8OSString10superClassE +__ZN8OSString10withStringEPKS_ +__ZN8OSString11withCStringEPKc +__ZN8OSString14initWithStringEPKS_ +__ZN8OSString15initWithCStringEPKc +__ZN8OSString17withCStringNoCopyEPKc +__ZN8OSString18_RESERVEDOSString0Ev +__ZN8OSString18_RESERVEDOSString1Ev +__ZN8OSString18_RESERVEDOSString2Ev +__ZN8OSString18_RESERVEDOSString3Ev +__ZN8OSString18_RESERVEDOSString4Ev +__ZN8OSString18_RESERVEDOSString5Ev +__ZN8OSString18_RESERVEDOSString6Ev +__ZN8OSString18_RESERVEDOSString7Ev +__ZN8OSString18_RESERVEDOSString8Ev +__ZN8OSString18_RESERVEDOSString9Ev +__ZN8OSString19_RESERVEDOSString10Ev +__ZN8OSString19_RESERVEDOSString11Ev +__ZN8OSString19_RESERVEDOSString12Ev +__ZN8OSString19_RESERVEDOSString13Ev +__ZN8OSString19_RESERVEDOSString14Ev +__ZN8OSString19_RESERVEDOSString15Ev +__ZN8OSString21initWithCStringNoCopyEPKc +__ZN8OSString4freeEv +__ZN8OSString7setCharEcj +__ZN8OSString9MetaClassC1Ev +__ZN8OSString9MetaClassC2Ev +__ZN8OSString9metaClassE +__ZN8OSStringC1EPK11OSMetaClass +__ZN8OSStringC1Ev +__ZN8OSStringC2EPK11OSMetaClass +__ZN8OSStringC2Ev +__ZN8OSStringD0Ev +__ZN8OSStringD2Ev +__ZN8OSSymbol10gMetaClassE +__ZN8OSSymbol10initializeEv +__ZN8OSSymbol10superClassE +__ZN8OSSymbol10withStringEPK8OSString +__ZN8OSSymbol11withCStringEPKc +__ZN8OSSymbol14initWithStringEPK8OSString +__ZN8OSSymbol15initWithCStringEPKc +__ZN8OSSymbol17withCStringNoCopyEPKc +__ZN8OSSymbol18_RESERVEDOSSymbol0Ev +__ZN8OSSymbol18_RESERVEDOSSymbol1Ev +__ZN8OSSymbol18_RESERVEDOSSymbol2Ev +__ZN8OSSymbol18_RESERVEDOSSymbol3Ev +__ZN8OSSymbol18_RESERVEDOSSymbol4Ev +__ZN8OSSymbol18_RESERVEDOSSymbol5Ev +__ZN8OSSymbol18_RESERVEDOSSymbol6Ev +__ZN8OSSymbol18_RESERVEDOSSymbol7Ev +__ZN8OSSymbol18checkForPageUnloadEPvS0_ +__ZN8OSSymbol21initWithCStringNoCopyEPKc +__ZN8OSSymbol4freeEv +__ZN8OSSymbol9MetaClassC1Ev +__ZN8OSSymbol9MetaClassC2Ev +__ZN8OSSymbol9metaClassE +__ZN8OSSymbolC1EPK11OSMetaClass +__ZN8OSSymbolC1Ev +__ZN8OSSymbolC2EPK11OSMetaClass +__ZN8OSSymbolC2Ev +__ZN8OSSymbolD0Ev +__ZN8OSSymbolD2Ev +__ZN9IOCommand10gMetaClassE +__ZN9IOCommand10superClassE +__ZN9IOCommand4initEv +__ZN9IOCommand9MetaClassC1Ev +__ZN9IOCommand9MetaClassC2Ev +__ZN9IOCommand9metaClassE +__ZN9IOCommandC1EPK11OSMetaClass +__ZN9IOCommandC2EPK11OSMetaClass +__ZN9IOCommandD0Ev +__ZN9IOCommandD2Ev +__ZN9IODTNVRAM10gMetaClassE +__ZN9IODTNVRAM10superClassE +__ZN9IODTNVRAM10writeXPRAMEmPhm +__ZN9IODTNVRAM11setPropertyEPK8OSSymbolP8OSObject +__ZN9IODTNVRAM13savePanicInfoEPhm +__ZN9IODTNVRAM13setPropertiesEP8OSObject +__ZN9IODTNVRAM15initOFVariablesEv +__ZN9IODTNVRAM15syncOFVariablesEv +__ZN9IODTNVRAM16escapeDataToDataEP6OSData +__ZN9IODTNVRAM16updateOWBootArgsEPK8OSSymbolP8OSObject +__ZN9IODTNVRAM17getOWVariableInfoEmPPK8OSSymbolPmS4_ +__ZN9IODTNVRAM17readNVRAMPropertyEP15IORegistryEntryPPK8OSSymbolPP6OSData +__ZN9IODTNVRAM18generateOWChecksumEPh +__ZN9IODTNVRAM18getNVRAMPartitionsEv +__ZN9IODTNVRAM18readNVRAMPartitionEPK8OSSymbolmPhm +__ZN9IODTNVRAM18validateOWChecksumEPh +__ZN9IODTNVRAM18writeNVRAMPropertyEP15IORegistryEntryPK8OSSymbolP6OSData +__ZN9IODTNVRAM19convertObjectToPropEPhPmPK8OSSymbolP8OSObject +__ZN9IODTNVRAM19convertPropToObjectEPhmS0_mPPK8OSSymbolPP8OSObject +__ZN9IODTNVRAM19searchNVRAMPropertyEP17IONVRAMDescriptorPm +__ZN9IODTNVRAM19unescapeBytesToDataEPhm +__ZN9IODTNVRAM19writeNVRAMPartitionEPK8OSSymbolmPhm +__ZN9IODTNVRAM22readNVRAMPropertyType0EP15IORegistryEntryPPK8OSSymbolPP6OSData +__ZN9IODTNVRAM22readNVRAMPropertyType1EP15IORegistryEntryPPK8OSSymbolPP6OSData +__ZN9IODTNVRAM23registerNVRAMControllerEP17IONVRAMController +__ZN9IODTNVRAM23writeNVRAMPropertyType0EP15IORegistryEntryPK8OSSymbolP6OSData +__ZN9IODTNVRAM23writeNVRAMPropertyType1EP15IORegistryEntryPK8OSSymbolP6OSData +__ZN9IODTNVRAM26calculatePartitionChecksumEPh +__ZN9IODTNVRAM4initEP15IORegistryEntryPK15IORegistryPlane +__ZN9IODTNVRAM4syncEv +__ZN9IODTNVRAM9MetaClassC1Ev +__ZN9IODTNVRAM9MetaClassC2Ev +__ZN9IODTNVRAM9metaClassE +__ZN9IODTNVRAM9readXPRAMEmPhm +__ZN9IODTNVRAMC1EPK11OSMetaClass +__ZN9IODTNVRAMC1Ev +__ZN9IODTNVRAMC2EPK11OSMetaClass +__ZN9IODTNVRAMC2Ev +__ZN9IODTNVRAMD0Ev +__ZN9IODTNVRAMD2Ev +__ZN9IOService10actionStopEPS_S0_ +__ZN9IOService10adjustBusyEl +__ZN9IOService10ask_parentEm +__ZN9IOService10gMetaClassE +__ZN9IOService10handleOpenEPS_mPv +__ZN9IOService10initializeEv +__ZN9IOService10joinPMtreeEPS_ +__ZN9IOService10makeUsableEv +__ZN9IOService10superClassE +__ZN9IOService10systemWakeEv +__ZN9IOService10youAreRootEv +__ZN9IOService11_adjustBusyEl +__ZN9IOService11addLocationEP12OSDictionary +__ZN9IOService11changeStateEv +__ZN9IOService11getPlatformEv +__ZN9IOService11handleCloseEPS_m +__ZN9IOService11notifyChildEP17IOPowerConnectionb +__ZN9IOService11setPlatformEP16IOPlatformExpert +__ZN9IOService11tellClientsEi +__ZN9IOService12acquire_lockEv +__ZN9IOService12checkForDoneEv +__ZN9IOService12clampPowerOnEm +__ZN9IOService12didTerminateEPS_mPb +__ZN9IOService12driver_ackedEv +__ZN9IOService12getBusyStateEv +__ZN9IOService12getResourcesEv +__ZN9IOService12nameMatchingEPK8OSStringP12OSDictionary +__ZN9IOService12nameMatchingEPKcP12OSDictionary +__ZN9IOService12passiveMatchEP12OSDictionaryb +__ZN9IOService12requestProbeEm +__ZN9IOService12scheduleStopEPS_ +__ZN9IOService12tellChangeUpEm +__ZN9IOService12waitForStateEmmP13mach_timespec +__ZN9IOService13addPowerChildEPS_ +__ZN9IOService13askChangeDownEm +__ZN9IOService13checkResourceEP8OSObject +__ZN9IOService13getPMworkloopEv +__ZN9IOService13invokeNotiferEP18_IOServiceNotifier +__ZN9IOService13matchLocationEPS_ +__ZN9IOService13messageClientEmP8OSObjectPvj +__ZN9IOService13newUserClientEP4taskPvmP12OSDictionaryPP12IOUserClient +__ZN9IOService13newUserClientEP4taskPvmPP12IOUserClient +__ZN9IOService13responseValidEm +__ZN9IOService13setParentInfoEmP17IOPowerConnection +__ZN9IOService13setPowerStateEmPS_ +__ZN9IOService13startMatchingEm +__ZN9IOService13waitMatchIdleEm +__ZN9IOService13willTerminateEPS_m +__ZN9IOService14actionFinalizeEPS_m +__ZN9IOService14activityTickleEmm +__ZN9IOService14applyToClientsEPFvPS_PvES1_ +__ZN9IOService14causeInterruptEi +__ZN9IOService14checkResourcesEv +__ZN9IOService14doServiceMatchEm +__ZN9IOService14getServiceRootEv +__ZN9IOService14messageClientsEmPvj +__ZN9IOService14newTemperatureElPS_ +__ZN9IOService14setPowerParentEP17IOPowerConnectionbm +__ZN9IOService14startCandidateEPS_ +__ZN9IOService14stop_ack_timerEv +__ZN9IOService14tellChangeDownEm +__ZN9IOService14waitForServiceEP12OSDictionaryP13mach_timespec +__ZN9IOService15OurChangeFinishEv +__ZN9IOService15addNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_ES5_S5_l +__ZN9IOService15comparePropertyEP12OSDictionaryPK8OSString +__ZN9IOService15comparePropertyEP12OSDictionaryPKc +__ZN9IOService15enableInterruptEi +__ZN9IOService15errnoFromReturnEi +__ZN9IOService15getDeviceMemoryEv +__ZN9IOService15getPMRootDomainEv +__ZN9IOService15instruct_driverEm +__ZN9IOService15lookupInterruptEibPP21IOInterruptController +__ZN9IOService15powerChangeDoneEm +__ZN9IOService15probeCandidatesEP12OSOrderedSet +__ZN9IOService15publishResourceEPK8OSSymbolP8OSObject +__ZN9IOService15publishResourceEPKcP8OSObject +__ZN9IOService15registerServiceEm +__ZN9IOService15serviceMatchingEPK8OSStringP12OSDictionary +__ZN9IOService15serviceMatchingEPKcP12OSDictionary +__ZN9IOService15setDeviceMemoryEP7OSArray +__ZN9IOService15setNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_ES5_S5_l +__ZN9IOService15setPMRootDomainEP14IOPMrootDomain +__ZN9IOService15start_ack_timerEv +__ZN9IOService15tellChangeDown1Em +__ZN9IOService15tellChangeDown2Em +__ZN9IOService15terminateClientEPS_m +__ZN9IOService15terminatePhase1Em +__ZN9IOService15terminateThreadEPv +__ZN9IOService15terminateWorkerEm +__ZN9IOService16ack_timer_tickedEv +__ZN9IOService16allowPowerChangeEm +__ZN9IOService16applyToProvidersEPFvPS_PvES1_ +__ZN9IOService16command_receivedEPvS0_S0_S0_ +__ZN9IOService16didYouWakeSystemEv +__ZN9IOService16disableInterruptEi +__ZN9IOService16getInterruptTypeEiPi +__ZN9IOService16registerInterestEPK8OSSymbolPFiPvS3_mPS_S3_jES3_S3_ +__ZN9IOService16removePowerChildEP17IOPowerConnection +__ZN9IOService16requestTerminateEPS_m +__ZN9IOService16resolveInterruptEPS_i +__ZN9IOService16resourceMatchingEPK8OSStringP12OSDictionary +__ZN9IOService16resourceMatchingEPKcP12OSDictionary +__ZN9IOService16scheduleFinalizeEv +__ZN9IOService16startSettleTimerEm +__ZN9IOService16start_our_changeEm +__ZN9IOService16stringFromReturnEi +__ZN9IOService16tellNoChangeDownEm +__ZN9IOService17addNeededResourceEPKc +__ZN9IOService17allowCancelCommonEv +__ZN9IOService17applyToInterestedEPK8OSSymbolPFvP8OSObjectPvES5_ +__ZN9IOService17cancelPowerChangeEm +__ZN9IOService17catalogNewDriversEP12OSOrderedSet +__ZN9IOService17comparePropertiesEP12OSDictionaryP12OSCollection +__ZN9IOService17currentCapabilityEv +__ZN9IOService17getAggressivenessEmPm +__ZN9IOService17registerInterruptEiP8OSObjectPFvS1_PvPS_iES2_ +__ZN9IOService17setAggressivenessEmm +__ZN9IOService18actionDidTerminateEPS_m +__ZN9IOService18changePowerStateToEm +__ZN9IOService18doServiceTerminateEm +__ZN9IOService18enqueuePowerChangeEmmmP17IOPowerConnectionm +__ZN9IOService18getResourceServiceEv +__ZN9IOService18lockForArbitrationEb +__ZN9IOService18matchPropertyTableEP12OSDictionary +__ZN9IOService18matchPropertyTableEP12OSDictionaryPl +__ZN9IOService18setIdleTimerPeriodEm +__ZN9IOService18settleTimerExpiredEv +__ZN9IOService19_RESERVEDIOService3Ev +__ZN9IOService19_RESERVEDIOService4Ev +__ZN9IOService19_RESERVEDIOService5Ev +__ZN9IOService19_RESERVEDIOService6Ev +__ZN9IOService19_RESERVEDIOService7Ev +__ZN9IOService19_RESERVEDIOService8Ev +__ZN9IOService19_RESERVEDIOService9Ev +__ZN9IOService19actionWillTerminateEPS_mP7OSArray +__ZN9IOService19computeDesiredStateEv +__ZN9IOService19compute_settle_timeEv +__ZN9IOService19deliverNotificationEPK8OSSymbolmm +__ZN9IOService19getExistingServicesEP12OSDictionarymm +__ZN9IOService19getMatchingServicesEP12OSDictionary +__ZN9IOService19installNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_ES5_S5_lPP10OSIterator +__ZN9IOService19powerOverrideOnPrivEv +__ZN9IOService19registerPowerDriverEPS_P14IOPMPowerStatem +__ZN9IOService19start_PM_idle_timerEv +__ZN9IOService19start_parent_changeEm +__ZN9IOService19unregisterInterruptEi +__ZN9IOService20_RESERVEDIOService10Ev +__ZN9IOService20_RESERVEDIOService11Ev +__ZN9IOService20_RESERVEDIOService12Ev +__ZN9IOService20_RESERVEDIOService13Ev +__ZN9IOService20_RESERVEDIOService14Ev +__ZN9IOService20_RESERVEDIOService15Ev +__ZN9IOService20_RESERVEDIOService16Ev +__ZN9IOService20_RESERVEDIOService17Ev +__ZN9IOService20_RESERVEDIOService18Ev +__ZN9IOService20_RESERVEDIOService19Ev +__ZN9IOService20_RESERVEDIOService20Ev +__ZN9IOService20_RESERVEDIOService21Ev +__ZN9IOService20_RESERVEDIOService22Ev +__ZN9IOService20_RESERVEDIOService23Ev +__ZN9IOService20_RESERVEDIOService24Ev +__ZN9IOService20_RESERVEDIOService25Ev +__ZN9IOService20_RESERVEDIOService26Ev +__ZN9IOService20_RESERVEDIOService27Ev +__ZN9IOService20_RESERVEDIOService28Ev +__ZN9IOService20_RESERVEDIOService29Ev +__ZN9IOService20_RESERVEDIOService30Ev +__ZN9IOService20_RESERVEDIOService31Ev +__ZN9IOService20_RESERVEDIOService32Ev +__ZN9IOService20_RESERVEDIOService33Ev +__ZN9IOService20_RESERVEDIOService34Ev +__ZN9IOService20_RESERVEDIOService35Ev +__ZN9IOService20_RESERVEDIOService36Ev +__ZN9IOService20_RESERVEDIOService37Ev +__ZN9IOService20_RESERVEDIOService38Ev +__ZN9IOService20_RESERVEDIOService39Ev +__ZN9IOService20_RESERVEDIOService40Ev +__ZN9IOService20_RESERVEDIOService41Ev +__ZN9IOService20_RESERVEDIOService42Ev +__ZN9IOService20_RESERVEDIOService43Ev +__ZN9IOService20_RESERVEDIOService44Ev +__ZN9IOService20_RESERVEDIOService45Ev +__ZN9IOService20_RESERVEDIOService46Ev +__ZN9IOService20_RESERVEDIOService47Ev +__ZN9IOService20_RESERVEDIOService48Ev +__ZN9IOService20_RESERVEDIOService49Ev +__ZN9IOService20_RESERVEDIOService50Ev +__ZN9IOService20_RESERVEDIOService51Ev +__ZN9IOService20_RESERVEDIOService52Ev +__ZN9IOService20_RESERVEDIOService53Ev +__ZN9IOService20_RESERVEDIOService54Ev +__ZN9IOService20_RESERVEDIOService55Ev +__ZN9IOService20_RESERVEDIOService56Ev +__ZN9IOService20_RESERVEDIOService57Ev +__ZN9IOService20_RESERVEDIOService58Ev +__ZN9IOService20_RESERVEDIOService59Ev +__ZN9IOService20_RESERVEDIOService60Ev +__ZN9IOService20_RESERVEDIOService61Ev +__ZN9IOService20_RESERVEDIOService62Ev +__ZN9IOService20_RESERVEDIOService63Ev +__ZN9IOService20callPlatformFunctionEPK8OSSymbolbPvS3_S3_S3_ +__ZN9IOService20callPlatformFunctionEPKcbPvS2_S2_S2_ +__ZN9IOService20getDeviceMemoryCountEv +__ZN9IOService20powerOverrideOffPrivEv +__ZN9IOService20unlockForArbitrationEv +__ZN9IOService21doInstallNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_ES5_S5_lPP10OSIterator +__ZN9IOService21getClientWithCategoryEPK8OSSymbol +__ZN9IOService21powerStateDidChangeToEmmPS_ +__ZN9IOService21rebuildChildClampBitsEv +__ZN9IOService21temporaryPowerClampOnEv +__ZN9IOService21unregisterAllInterestEv +__ZN9IOService22OurChangeSetPowerStateEv +__ZN9IOService22PM_Clamp_Timer_ExpiredEv +__ZN9IOService22acknowledgePowerChangeEPS_ +__ZN9IOService22changePowerStateToPrivEm +__ZN9IOService22powerDomainDidChangeToEmP17IOPowerConnection +__ZN9IOService22powerStateWillChangeToEmmPS_ +__ZN9IOService23acknowledgeNotificationEPvm +__ZN9IOService23currentPowerConsumptionEv +__ZN9IOService23powerDomainWillChangeToEmP17IOPowerConnection +__ZN9IOService23requestPowerDomainStateEmP17IOPowerConnectionm +__ZN9IOService23scheduleTerminatePhase2Em +__ZN9IOService23syncNotificationHandlerEPvS0_PS_ +__ZN9IOService23tellClientsWithResponseEi +__ZN9IOService24PM_idle_timer_expirationEv +__ZN9IOService24acknowledgeSetPowerStateEv +__ZN9IOService24getDeviceMemoryWithIndexEj +__ZN9IOService24mapDeviceMemoryWithIndexEjm +__ZN9IOService24powerStateForDomainStateEm +__ZN9IOService24registerInterestedDriverEPS_ +__ZN9IOService26add_child_to_active_changeEP17IOPowerConnection +__ZN9IOService26deRegisterInterestedDriverEPS_ +__ZN9IOService26temperatureCriticalForZoneEPS_ +__ZN9IOService27OurChangeWaitForPowerSettleEv +__ZN9IOService27add_driver_to_active_changeEP12IOPMinformee +__ZN9IOService27maxCapabilityForDomainStateEm +__ZN9IOService27serializedAllowPowerChange2Em +__ZN9IOService28serializedCancelPowerChange2Em +__ZN9IOService29OurChangeTellClientsPowerDownEv +__ZN9IOService29ParentUpSetPowerState_DelayedEv +__ZN9IOService31ParentDownSetPowerState_DelayedEv +__ZN9IOService31ParentUpSetPowerState_ImmediateEv +__ZN9IOService31initialPowerStateForDomainStateEm +__ZN9IOService33ParentDownSetPowerState_ImmediateEv +__ZN9IOService33ParentUpWaitForSettleTime_DelayedEv +__ZN9IOService35ParentDownAcknowledgeChange_DelayedEv +__ZN9IOService35ParentUpWaitForSettleTime_ImmediateEv +__ZN9IOService36ParentDownWaitForPowerSettle_DelayedEv +__ZN9IOService37OurChangeTellPriorityClientsPowerDownEv +__ZN9IOService38ParentUpAcknowledgePowerChange_DelayedEv +__ZN9IOService41OurChangeNotifyInterestedDriversDidChangeEv +__ZN9IOService42OurChangeNotifyInterestedDriversWillChangeEv +__ZN9IOService46ParentDownTellPriorityClientsPowerDown_DelayedEv +__ZN9IOService48ParentDownTellPriorityClientsPowerDown_ImmediateEv +__ZN9IOService48ParentUpNotifyInterestedDriversDidChange_DelayedEv +__ZN9IOService4freeEv +__ZN9IOService4openEPS_mPv +__ZN9IOService4stopEPS_ +__ZN9IOService50ParentUpNotifyInterestedDriversDidChange_ImmediateEv +__ZN9IOService51ParentDownNotifyInterestedDriversWillChange_DelayedEv +__ZN9IOService53ParentDownNotifyDidChangeAndAcknowledgeChange_DelayedEv +__ZN9IOService53ParentDownNotifyInterestedDriversWillChange_ImmediateEv +__ZN9IOService56ParentDownWaitForPowerSettleAndNotifyDidChange_ImmediateEv +__ZN9IOService5closeEPS_m +__ZN9IOService5probeEPS_Pl +__ZN9IOService5startEPS_ +__ZN9IOService6PMfreeEv +__ZN9IOService6PMinitEv +__ZN9IOService6PMstopEv +__ZN9IOService6attachEPS_ +__ZN9IOService6detachEPS_ +__ZN9IOService6informEP12IOPMinformeeb +__ZN9IOService7messageEmPS_Pv +__ZN9IOService8all_doneEv +__ZN9IOService8finalizeEm +__ZN9IOService9MetaClassC1Ev +__ZN9IOService9MetaClassC2Ev +__ZN9IOService9all_ackedEv +__ZN9IOService9metaClassE +__ZN9IOService9notifyAllEb +__ZN9IOService9resourcesEv +__ZN9IOService9terminateEm +__ZN9IOService9waitQuietEP13mach_timespec +__ZN9IOServiceC1EPK11OSMetaClass +__ZN9IOServiceC1Ev +__ZN9IOServiceC2EPK11OSMetaClass +__ZN9IOServiceC2Ev +__ZN9IOServiceD0Ev +__ZN9IOServiceD2Ev +__ZN9OSBoolean10gMetaClassE +__ZN9OSBoolean10initializeEv +__ZN9OSBoolean10superClassE +__ZN9OSBoolean11withBooleanEb +__ZN9OSBoolean19_RESERVEDOSBoolean0Ev +__ZN9OSBoolean19_RESERVEDOSBoolean1Ev +__ZN9OSBoolean19_RESERVEDOSBoolean2Ev +__ZN9OSBoolean19_RESERVEDOSBoolean3Ev +__ZN9OSBoolean19_RESERVEDOSBoolean4Ev +__ZN9OSBoolean19_RESERVEDOSBoolean5Ev +__ZN9OSBoolean19_RESERVEDOSBoolean6Ev +__ZN9OSBoolean19_RESERVEDOSBoolean7Ev +__ZN9OSBoolean4freeEv +__ZN9OSBoolean9MetaClassC1Ev +__ZN9OSBoolean9MetaClassC2Ev +__ZN9OSBoolean9metaClassE +__ZN9OSBooleanC1EPK11OSMetaClass +__ZN9OSBooleanC1Ev +__ZN9OSBooleanC2EPK11OSMetaClass +__ZN9OSBooleanC2Ev +__ZN9OSBooleanD0Ev +__ZN9OSBooleanD2Ev +__ZNK10IOMachPort12getMetaClassEv +__ZNK10IOMachPort9MetaClass5allocEv +__ZNK10IONotifier12getMetaClassEv +__ZNK10IONotifier9MetaClass5allocEv +__ZNK10IOWorkLoop12getMetaClassEv +__ZNK10IOWorkLoop19enableAllInterruptsEv +__ZNK10IOWorkLoop20disableAllInterruptsEv +__ZNK10IOWorkLoop21enableAllEventSourcesEv +__ZNK10IOWorkLoop22disableAllEventSourcesEv +__ZNK10IOWorkLoop6inGateEv +__ZNK10IOWorkLoop8onThreadEv +__ZNK10IOWorkLoop9MetaClass5allocEv +__ZNK10IOWorkLoop9getThreadEv +__ZNK10OSIterator12getMetaClassEv +__ZNK10OSIterator9MetaClass5allocEv +__ZNK11IOCatalogue12getMetaClassEv +__ZNK11IOCatalogue12unloadModuleEP8OSString +__ZNK11IOCatalogue13serializeDataEmP11OSSerialize +__ZNK11IOCatalogue14isModuleLoadedEP12OSDictionary +__ZNK11IOCatalogue14isModuleLoadedEP8OSString +__ZNK11IOCatalogue14isModuleLoadedEPKc +__ZNK11IOCatalogue18getGenerationCountEv +__ZNK11IOCatalogue9MetaClass5allocEv +__ZNK11IOCatalogue9serializeEP11OSSerialize +__ZNK11IODataQueue12getMetaClassEv +__ZNK11IODataQueue9MetaClass5allocEv +__ZNK11IOMemoryMap12getMetaClassEv +__ZNK11IOMemoryMap9MetaClass5allocEv +__ZNK11IOResources11getWorkLoopEv +__ZNK11IOResources12getMetaClassEv +__ZNK11IOResources9MetaClass5allocEv +__ZNK11OSMetaClass12getClassNameEv +__ZNK11OSMetaClass12getClassSizeEv +__ZNK11OSMetaClass12getMetaClassEv +__ZNK11OSMetaClass12taggedRetainEPKv +__ZNK11OSMetaClass13checkMetaCastEPK15OSMetaClassBase +__ZNK11OSMetaClass13getSuperClassEv +__ZNK11OSMetaClass13taggedReleaseEPKv +__ZNK11OSMetaClass13taggedReleaseEPKvi +__ZNK11OSMetaClass14getRetainCountEv +__ZNK11OSMetaClass14reservedCalledEi +__ZNK11OSMetaClass16getInstanceCountEv +__ZNK11OSMetaClass18instanceDestructedEv +__ZNK11OSMetaClass19instanceConstructedEv +__ZNK11OSMetaClass6retainEv +__ZNK11OSMetaClass7releaseEi +__ZNK11OSMetaClass7releaseEv +__ZNK11OSMetaClass9serializeEP11OSSerialize +__ZNK11OSSerialize11getCapacityEv +__ZNK11OSSerialize12getMetaClassEv +__ZNK11OSSerialize20getCapacityIncrementEv +__ZNK11OSSerialize4textEv +__ZNK11OSSerialize9MetaClass5allocEv +__ZNK11OSSerialize9getLengthEv +__ZNK12IOPMinformee12getMetaClassEv +__ZNK12IOPMinformee9MetaClass5allocEv +__ZNK12IORootParent12getMetaClassEv +__ZNK12IORootParent9MetaClass5allocEv +__ZNK12IOUserClient12getMetaClassEv +__ZNK12IOUserClient9MetaClass5allocEv +__ZNK12OSCollection12getMetaClassEv +__ZNK12OSCollection9MetaClass5allocEv +__ZNK12OSDictionary11getCapacityEv +__ZNK12OSDictionary12getMetaClassEv +__ZNK12OSDictionary12initIteratorEPv +__ZNK12OSDictionary12iteratorSizeEv +__ZNK12OSDictionary20getCapacityIncrementEv +__ZNK12OSDictionary24getNextObjectForIteratorEPvPP8OSObject +__ZNK12OSDictionary8getCountEv +__ZNK12OSDictionary9MetaClass5allocEv +__ZNK12OSDictionary9getObjectEPK8OSString +__ZNK12OSDictionary9getObjectEPK8OSSymbol +__ZNK12OSDictionary9getObjectEPKc +__ZNK12OSDictionary9isEqualToEPK15OSMetaClassBase +__ZNK12OSDictionary9isEqualToEPKS_ +__ZNK12OSDictionary9isEqualToEPKS_PK12OSCollection +__ZNK12OSDictionary9serializeEP11OSSerialize +__ZNK12OSOrderedSet11getCapacityEv +__ZNK12OSOrderedSet12getMetaClassEv +__ZNK12OSOrderedSet12initIteratorEPv +__ZNK12OSOrderedSet12iteratorSizeEv +__ZNK12OSOrderedSet13getLastObjectEv +__ZNK12OSOrderedSet14containsObjectEPK15OSMetaClassBase +__ZNK12OSOrderedSet14getFirstObjectEv +__ZNK12OSOrderedSet20getCapacityIncrementEv +__ZNK12OSOrderedSet24getNextObjectForIteratorEPvPP8OSObject +__ZNK12OSOrderedSet6memberEPK15OSMetaClassBase +__ZNK12OSOrderedSet8getCountEv +__ZNK12OSOrderedSet9MetaClass5allocEv +__ZNK12OSOrderedSet9getObjectEj +__ZNK12OSOrderedSet9isEqualToEPK15OSMetaClassBase +__ZNK12OSOrderedSet9isEqualToEPKS_ +__ZNK12OSSerializer12getMetaClassEv +__ZNK12OSSerializer9MetaClass5allocEv +__ZNK12OSSerializer9serializeEP11OSSerialize +__ZNK12OSSymbolPool10findSymbolEPKc +__ZNK12_IOMemoryMap12getMetaClassEv +__ZNK12_IOMemoryMap13taggedReleaseEPKv +__ZNK12_IOMemoryMap9MetaClass5allocEv +__ZNK13IOCommandGate12getMetaClassEv +__ZNK13IOCommandGate9MetaClass5allocEv +__ZNK13IOCommandPool12getMetaClassEv +__ZNK13IOCommandPool9MetaClass5allocEv +__ZNK13IOEventSource11getWorkLoopEv +__ZNK13IOEventSource12getMetaClassEv +__ZNK13IOEventSource7getNextEv +__ZNK13IOEventSource8onThreadEv +__ZNK13IOEventSource9MetaClass5allocEv +__ZNK13IOEventSource9getActionEv +__ZNK13IOEventSource9isEnabledEv +__ZNK13_IOServiceJob12getMetaClassEv +__ZNK13_IOServiceJob9MetaClass5allocEv +__ZNK14IOCommandQueue12getMetaClassEv +__ZNK14IOCommandQueue9MetaClass5allocEv +__ZNK14IOMemoryCursor12getMetaClassEv +__ZNK14IOMemoryCursor9MetaClass5allocEv +__ZNK14IOPMrootDomain12getMetaClassEv +__ZNK14IOPMrootDomain9MetaClass5allocEv +__ZNK15IOConditionLock12getConditionEv +__ZNK15IOConditionLock12getMetaClassEv +__ZNK15IOConditionLock16getInterruptibleEv +__ZNK15IOConditionLock9MetaClass5allocEv +__ZNK15IOPMPowerSource12getMetaClassEv +__ZNK15IOPMPowerSource9MetaClass5allocEv +__ZNK15IOPanicPlatform12getMetaClassEv +__ZNK15IOPanicPlatform9MetaClass5allocEv +__ZNK15IORegistryEntry11compareNameEP8OSStringPS1_ +__ZNK15IORegistryEntry11getLocationEPK15IORegistryPlane +__ZNK15IORegistryEntry11getPropertyEPK8OSString +__ZNK15IORegistryEntry11getPropertyEPK8OSStringPK15IORegistryPlanem +__ZNK15IORegistryEntry11getPropertyEPK8OSSymbol +__ZNK15IORegistryEntry11getPropertyEPK8OSSymbolPK15IORegistryPlanem +__ZNK15IORegistryEntry11getPropertyEPKc +__ZNK15IORegistryEntry11getPropertyEPKcPK15IORegistryPlanem +__ZNK15IORegistryEntry12compareNamesEP8OSObjectPP8OSString +__ZNK15IORegistryEntry12copyLocationEPK15IORegistryPlane +__ZNK15IORegistryEntry12copyPropertyEPK8OSString +__ZNK15IORegistryEntry12copyPropertyEPK8OSStringPK15IORegistryPlanem +__ZNK15IORegistryEntry12copyPropertyEPK8OSSymbol +__ZNK15IORegistryEntry12copyPropertyEPK8OSSymbolPK15IORegistryPlanem +__ZNK15IORegistryEntry12copyPropertyEPKc +__ZNK15IORegistryEntry12copyPropertyEPKcPK15IORegistryPlanem +__ZNK15IORegistryEntry12getMetaClassEv +__ZNK15IORegistryEntry13getChildEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry14applyToParentsEPFvPS_PvES1_PK15IORegistryPlane +__ZNK15IORegistryEntry14copyChildEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry14getParentEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry15applyToChildrenEPFvPS_PvES1_PK15IORegistryPlane +__ZNK15IORegistryEntry15copyParentEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry16getChildIteratorEPK15IORegistryPlane +__ZNK15IORegistryEntry16getPathComponentEPcPiPK15IORegistryPlane +__ZNK15IORegistryEntry16getPropertyTableEv +__ZNK15IORegistryEntry17getParentIteratorEPK15IORegistryPlane +__ZNK15IORegistryEntry19serializePropertiesEP11OSSerialize +__ZNK15IORegistryEntry20getChildSetReferenceEPK15IORegistryPlane +__ZNK15IORegistryEntry21getParentSetReferenceEPK15IORegistryPlane +__ZNK15IORegistryEntry24dictionaryWithPropertiesEv +__ZNK15IORegistryEntry7getNameEPK15IORegistryPlane +__ZNK15IORegistryEntry7getPathEPcPiPK15IORegistryPlane +__ZNK15IORegistryEntry7inPlaneEPK15IORegistryPlane +__ZNK15IORegistryEntry7isChildEPS_PK15IORegistryPlaneb +__ZNK15IORegistryEntry8copyNameEPK15IORegistryPlane +__ZNK15IORegistryEntry8getDepthEPK15IORegistryPlane +__ZNK15IORegistryEntry8hasAliasEPK15IORegistryPlanePcPi +__ZNK15IORegistryEntry8isParentEPS_PK15IORegistryPlaneb +__ZNK15IORegistryEntry8makeLinkEPS_jPK15IORegistryPlane +__ZNK15IORegistryEntry9MetaClass5allocEv +__ZNK15IORegistryEntry9breakLinkEPS_jPK15IORegistryPlane +__ZNK15IORegistryPlane12getMetaClassEv +__ZNK15IORegistryPlane9MetaClass5allocEv +__ZNK15IORegistryPlane9serializeEP11OSSerialize +__ZNK15IOWatchDogTimer12getMetaClassEv +__ZNK15IOWatchDogTimer9MetaClass5allocEv +__ZNK15OSMetaClassBase8metaCastEPK11OSMetaClass +__ZNK15OSMetaClassBase8metaCastEPK8OSString +__ZNK15OSMetaClassBase8metaCastEPK8OSSymbol +__ZNK15OSMetaClassBase8metaCastEPKc +__ZNK15OSMetaClassBase9isEqualToEPKS_ +__ZNK15OSMetaClassMeta5allocEv +__ZNK15_IOConfigThread12getMetaClassEv +__ZNK15_IOConfigThread9MetaClass5allocEv +__ZNK16IOKitDiagnostics12getMetaClassEv +__ZNK16IOKitDiagnostics9MetaClass5allocEv +__ZNK16IOKitDiagnostics9serializeEP11OSSerialize +__ZNK16IOPMPagingPlexus12getMetaClassEv +__ZNK16IOPMPagingPlexus9MetaClass5allocEv +__ZNK16IOPMinformeeList12getMetaClassEv +__ZNK16IOPMinformeeList9MetaClass5allocEv +__ZNK16IOPlatformDevice11compareNameEP8OSStringPS1_ +__ZNK16IOPlatformDevice12getMetaClassEv +__ZNK16IOPlatformDevice9MetaClass5allocEv +__ZNK16IOPlatformExpert12getMetaClassEv +__ZNK16IOPlatformExpert14compareNubNameEPK9IOServiceP8OSStringPS4_ +__ZNK16IOPlatformExpert9MetaClass5allocEv +__ZNK16IORangeAllocator12getMetaClassEv +__ZNK16IORangeAllocator9MetaClass5allocEv +__ZNK16IORangeAllocator9serializeEP11OSSerialize +__ZNK17IOBigMemoryCursor12getMetaClassEv +__ZNK17IOBigMemoryCursor9MetaClass5allocEv +__ZNK17IOPowerConnection12getMetaClassEv +__ZNK17IOPowerConnection9MetaClass5allocEv +__ZNK18IODTPlatformExpert12getMetaClassEv +__ZNK18IODTPlatformExpert14compareNubNameEPK9IOServiceP8OSStringPS4_ +__ZNK18IODTPlatformExpert9MetaClass5allocEv +__ZNK18IOMemoryDescriptor12getDirectionEv +__ZNK18IOMemoryDescriptor12getMetaClassEv +__ZNK18IOMemoryDescriptor9MetaClass5allocEv +__ZNK18IOMemoryDescriptor9getLengthEv +__ZNK18IOPMchangeNoteList12getMetaClassEv +__ZNK18IOPMchangeNoteList9MetaClass5allocEv +__ZNK18IORegistryIterator12getMetaClassEv +__ZNK18IORegistryIterator9MetaClass5allocEv +__ZNK18IOTimerEventSource12getMetaClassEv +__ZNK18IOTimerEventSource9MetaClass5allocEv +__ZNK18IOUserNotification12getMetaClassEv +__ZNK18IOUserNotification9MetaClass5allocEv +__ZNK18_IOServiceNotifier12getMetaClassEv +__ZNK18_IOServiceNotifier9MetaClass5allocEv +__ZNK19IOPMPowerSourceList12getMetaClassEv +__ZNK19IOPMPowerSourceList9MetaClass5allocEv +__ZNK19IOPMPowerStateQueue12getMetaClassEv +__ZNK19IOPMPowerStateQueue9MetaClass5allocEv +__ZNK20IOLittleMemoryCursor12getMetaClassEv +__ZNK20IOLittleMemoryCursor9MetaClass5allocEv +__ZNK20OSCollectionIterator12getMetaClassEv +__ZNK20OSCollectionIterator9MetaClass5allocEv +__ZNK20RootDomainUserClient12getMetaClassEv +__ZNK20RootDomainUserClient9MetaClass5allocEv +__ZNK21IOInterruptController12getMetaClassEv +__ZNK21IOInterruptController9MetaClass5allocEv +__ZNK21IONaturalMemoryCursor12getMetaClassEv +__ZNK21IONaturalMemoryCursor9MetaClass5allocEv +__ZNK21IOSubMemoryDescriptor12getMetaClassEv +__ZNK21IOSubMemoryDescriptor9MetaClass5allocEv +__ZNK21IOSubMemoryDescriptor9serializeEP11OSSerialize +__ZNK22IOInterruptEventSource11getIntIndexEv +__ZNK22IOInterruptEventSource11getProviderEv +__ZNK22IOInterruptEventSource12getMetaClassEv +__ZNK22IOInterruptEventSource14getAutoDisableEv +__ZNK22IOInterruptEventSource9MetaClass5allocEv +__ZNK22IOPlatformExpertDevice11compareNameEP8OSStringPS1_ +__ZNK22IOPlatformExpertDevice11getWorkLoopEv +__ZNK22IOPlatformExpertDevice12getMetaClassEv +__ZNK22IOPlatformExpertDevice9MetaClass5allocEv +__ZNK22_IOOpenServiceIterator12getMetaClassEv +__ZNK22_IOOpenServiceIterator9MetaClass5allocEv +__ZNK23IOMultiMemoryDescriptor12getMetaClassEv +__ZNK23IOMultiMemoryDescriptor9MetaClass5allocEv +__ZNK24IOBufferMemoryDescriptor11getCapacityEv +__ZNK24IOBufferMemoryDescriptor12getMetaClassEv +__ZNK24IOBufferMemoryDescriptor9MetaClass5allocEv +__ZNK24IOCPUInterruptController12getMetaClassEv +__ZNK24IOCPUInterruptController9MetaClass5allocEv +__ZNK25IOGeneralMemoryDescriptor12getMetaClassEv +__ZNK25IOGeneralMemoryDescriptor9MetaClass5allocEv +__ZNK25IOGeneralMemoryDescriptor9serializeEP11OSSerialize +__ZNK25IOServiceUserNotification12getMetaClassEv +__ZNK25IOServiceUserNotification9MetaClass5allocEv +__ZNK26_IOServiceInterestNotifier12getMetaClassEv +__ZNK26_IOServiceInterestNotifier9MetaClass5allocEv +__ZNK27IOSharedInterruptController12getMetaClassEv +__ZNK27IOSharedInterruptController9MetaClass5allocEv +__ZNK28IOFilterInterruptEventSource12getMetaClassEv +__ZNK28IOFilterInterruptEventSource15getFilterActionEv +__ZNK28IOFilterInterruptEventSource9MetaClass5allocEv +__ZNK32IOServiceMessageUserNotification12getMetaClassEv +__ZNK32IOServiceMessageUserNotification9MetaClass5allocEv +__ZNK5IOCPU12getMetaClassEv +__ZNK5IOCPU9MetaClass5allocEv +__ZNK5OSSet11getCapacityEv +__ZNK5OSSet12getAnyObjectEv +__ZNK5OSSet12getMetaClassEv +__ZNK5OSSet12initIteratorEPv +__ZNK5OSSet12iteratorSizeEv +__ZNK5OSSet14containsObjectEPK15OSMetaClassBase +__ZNK5OSSet20getCapacityIncrementEv +__ZNK5OSSet24getNextObjectForIteratorEPvPP8OSObject +__ZNK5OSSet6memberEPK15OSMetaClassBase +__ZNK5OSSet8getCountEv +__ZNK5OSSet9MetaClass5allocEv +__ZNK5OSSet9isEqualToEPK15OSMetaClassBase +__ZNK5OSSet9isEqualToEPKS_ +__ZNK5OSSet9serializeEP11OSSerialize +__ZNK6OSData11getCapacityEv +__ZNK6OSData12getMetaClassEv +__ZNK6OSData14getBytesNoCopyEjj +__ZNK6OSData14getBytesNoCopyEv +__ZNK6OSData20getCapacityIncrementEv +__ZNK6OSData9MetaClass5allocEv +__ZNK6OSData9getLengthEv +__ZNK6OSData9isEqualToEPK15OSMetaClassBase +__ZNK6OSData9isEqualToEPK8OSString +__ZNK6OSData9isEqualToEPKS_ +__ZNK6OSData9isEqualToEPKvj +__ZNK6OSData9serializeEP11OSSerialize +__ZNK7OSArray11getCapacityEv +__ZNK7OSArray12getMetaClassEv +__ZNK7OSArray12initIteratorEPv +__ZNK7OSArray12iteratorSizeEv +__ZNK7OSArray13getLastObjectEv +__ZNK7OSArray20getCapacityIncrementEv +__ZNK7OSArray20getNextIndexOfObjectEPK15OSMetaClassBasej +__ZNK7OSArray24getNextObjectForIteratorEPvPP8OSObject +__ZNK7OSArray8getCountEv +__ZNK7OSArray9MetaClass5allocEv +__ZNK7OSArray9getObjectEj +__ZNK7OSArray9isEqualToEPK15OSMetaClassBase +__ZNK7OSArray9isEqualToEPKS_ +__ZNK7OSArray9serializeEP11OSSerialize +__ZNK8IOMapper12getMetaClassEv +__ZNK8IOMapper9MetaClass5allocEv +__ZNK8IOPMpriv12getMetaClassEv +__ZNK8IOPMpriv9MetaClass5allocEv +__ZNK8IOPMpriv9serializeEP11OSSerialize +__ZNK8IOPMprot12getMetaClassEv +__ZNK8IOPMprot9MetaClass5allocEv +__ZNK8IOPMprot9serializeEP11OSSerialize +__ZNK8IOSyncer12getMetaClassEv +__ZNK8IOSyncer9MetaClass5allocEv +__ZNK8OSNumber12getMetaClassEv +__ZNK8OSNumber12numberOfBitsEv +__ZNK8OSNumber13numberOfBytesEv +__ZNK8OSNumber17unsigned8BitValueEv +__ZNK8OSNumber18unsigned16BitValueEv +__ZNK8OSNumber18unsigned32BitValueEv +__ZNK8OSNumber18unsigned64BitValueEv +__ZNK8OSNumber9MetaClass5allocEv +__ZNK8OSNumber9isEqualToEPK15OSMetaClassBase +__ZNK8OSNumber9isEqualToEPKS_ +__ZNK8OSNumber9serializeEP11OSSerialize +__ZNK8OSObject12getMetaClassEv +__ZNK8OSObject12taggedRetainEPKv +__ZNK8OSObject13taggedReleaseEPKv +__ZNK8OSObject13taggedReleaseEPKvi +__ZNK8OSObject14getRetainCountEv +__ZNK8OSObject6retainEv +__ZNK8OSObject7releaseEi +__ZNK8OSObject7releaseEv +__ZNK8OSObject9MetaClass5allocEv +__ZNK8OSObject9serializeEP11OSSerialize +__ZNK8OSString12getMetaClassEv +__ZNK8OSString16getCStringNoCopyEv +__ZNK8OSString7getCharEj +__ZNK8OSString9MetaClass5allocEv +__ZNK8OSString9getLengthEv +__ZNK8OSString9isEqualToEPK15OSMetaClassBase +__ZNK8OSString9isEqualToEPK6OSData +__ZNK8OSString9isEqualToEPKS_ +__ZNK8OSString9isEqualToEPKc +__ZNK8OSString9serializeEP11OSSerialize +__ZNK8OSSymbol12getMetaClassEv +__ZNK8OSSymbol13taggedReleaseEPKv +__ZNK8OSSymbol13taggedReleaseEPKvi +__ZNK8OSSymbol9MetaClass5allocEv +__ZNK8OSSymbol9isEqualToEPK15OSMetaClassBase +__ZNK8OSSymbol9isEqualToEPKS_ +__ZNK8OSSymbol9isEqualToEPKc +__ZNK9IOCommand12getMetaClassEv +__ZNK9IOCommand9MetaClass5allocEv +__ZNK9IODTNVRAM11getPropertyEPK8OSSymbol +__ZNK9IODTNVRAM11getPropertyEPKc +__ZNK9IODTNVRAM12getMetaClassEv +__ZNK9IODTNVRAM17getOFVariablePermEPK8OSSymbol +__ZNK9IODTNVRAM17getOFVariableTypeEPK8OSSymbol +__ZNK9IODTNVRAM19serializePropertiesEP11OSSerialize +__ZNK9IODTNVRAM9MetaClass5allocEv +__ZNK9IOService10isInactiveEv +__ZNK9IOService11getProviderEv +__ZNK9IOService11getWorkLoopEv +__ZNK9IOService12getMetaClassEv +__ZNK9IOService12handleIsOpenEPKS_ +__ZNK9IOService17getClientIteratorEv +__ZNK9IOService19getProviderIteratorEv +__ZNK9IOService19serializePropertiesEP11OSSerialize +__ZNK9IOService21getOpenClientIteratorEv +__ZNK9IOService23getOpenProviderIteratorEv +__ZNK9IOService6isOpenEPKS_ +__ZNK9IOService8getStateEv +__ZNK9IOService9MetaClass5allocEv +__ZNK9IOService9getClientEv +__ZNK9OSBoolean12getMetaClassEv +__ZNK9OSBoolean12taggedRetainEPKv +__ZNK9OSBoolean13taggedReleaseEPKvi +__ZNK9OSBoolean6isTrueEv +__ZNK9OSBoolean7isFalseEv +__ZNK9OSBoolean8getValueEv +__ZNK9OSBoolean9MetaClass5allocEv +__ZNK9OSBoolean9isEqualToEPK15OSMetaClassBase +__ZNK9OSBoolean9isEqualToEPKS_ +__ZNK9OSBoolean9serializeEP11OSSerialize +__ZTV10IOMachPort +__ZTV10IONotifier +__ZTV10IOWorkLoop +__ZTV10OSIterator +__ZTV11IOCatalogue +__ZTV11IODataQueue +__ZTV11IOMemoryMap +__ZTV11IOResources +__ZTV11OSMetaClass +__ZTV11OSSerialize +__ZTV12IOPMinformee +__ZTV12IORootParent +__ZTV12IOUserClient +__ZTV12OSCollection +__ZTV12OSDictionary +__ZTV12OSOrderedSet +__ZTV12OSSerializer +__ZTV12OSSymbolPool +__ZTV12_IOMemoryMap +__ZTV13IOCommandGate +__ZTV13IOCommandPool +__ZTV13IOEventSource +__ZTV13_IOServiceJob +__ZTV14IOCommandQueue +__ZTV14IOMemoryCursor +__ZTV14IOPMrootDomain +__ZTV15IOConditionLock +__ZTV15IOPMPowerSource +__ZTV15IOPanicPlatform +__ZTV15IORegistryEntry +__ZTV15IORegistryPlane +__ZTV15IOWatchDogTimer +__ZTV15OSMetaClassBase +__ZTV15OSMetaClassMeta +__ZTV15_IOConfigThread +__ZTV16IOKitDiagnostics +__ZTV16IOPMPagingPlexus +__ZTV16IOPMinformeeList +__ZTV16IOPlatformDevice +__ZTV16IOPlatformExpert +__ZTV16IORangeAllocator +__ZTV17IOBigMemoryCursor +__ZTV17IOPowerConnection +__ZTV18IODTPlatformExpert +__ZTV18IOMemoryDescriptor +__ZTV18IOPMchangeNoteList +__ZTV18IORegistryIterator +__ZTV18IOTimerEventSource +__ZTV18IOUserNotification +__ZTV18_IOServiceNotifier +__ZTV19IOPMPowerSourceList +__ZTV19IOPMPowerStateQueue +__ZTV20IOLittleMemoryCursor +__ZTV20OSCollectionIterator +__ZTV20RootDomainUserClient +__ZTV21IOInterruptController +__ZTV21IONaturalMemoryCursor +__ZTV21IOSubMemoryDescriptor +__ZTV22IOInterruptEventSource +__ZTV22IOPlatformExpertDevice +__ZTV22_IOOpenServiceIterator +__ZTV23IOMultiMemoryDescriptor +__ZTV24IOBufferMemoryDescriptor +__ZTV24IOCPUInterruptController +__ZTV25IOGeneralMemoryDescriptor +__ZTV25IOServiceUserNotification +__ZTV26_IOServiceInterestNotifier +__ZTV27IOSharedInterruptController +__ZTV28IOFilterInterruptEventSource +__ZTV32IOServiceMessageUserNotification +__ZTV5IOCPU +__ZTV5OSSet +__ZTV6OSData +__ZTV7OSArray +__ZTV8IOMapper +__ZTV8IOPMpriv +__ZTV8IOPMprot +__ZTV8IOSyncer +__ZTV8OSNumber +__ZTV8OSObject +__ZTV8OSString +__ZTV8OSSymbol +__ZTV9IOCommand +__ZTV9IODTNVRAM +__ZTV9IOService +__ZTV9OSBoolean +__ZTVN10IOMachPort9MetaClassE +__ZTVN10IONotifier9MetaClassE +__ZTVN10IOWorkLoop9MetaClassE +__ZTVN10OSIterator9MetaClassE +__ZTVN11IOCatalogue9MetaClassE +__ZTVN11IODataQueue9MetaClassE +__ZTVN11IOMemoryMap9MetaClassE +__ZTVN11IOResources9MetaClassE +__ZTVN11OSSerialize9MetaClassE +__ZTVN12IOPMinformee9MetaClassE +__ZTVN12IORootParent9MetaClassE +__ZTVN12IOUserClient9MetaClassE +__ZTVN12OSCollection9MetaClassE +__ZTVN12OSDictionary9MetaClassE +__ZTVN12OSOrderedSet9MetaClassE +__ZTVN12OSSerializer9MetaClassE +__ZTVN12_IOMemoryMap9MetaClassE +__ZTVN13IOCommandGate9MetaClassE +__ZTVN13IOCommandPool9MetaClassE +__ZTVN13IOEventSource9MetaClassE +__ZTVN13_IOServiceJob9MetaClassE +__ZTVN14IOCommandQueue9MetaClassE +__ZTVN14IOMemoryCursor9MetaClassE +__ZTVN14IOPMrootDomain9MetaClassE +__ZTVN15IOConditionLock9MetaClassE +__ZTVN15IOPMPowerSource9MetaClassE +__ZTVN15IOPanicPlatform9MetaClassE +__ZTVN15IORegistryEntry9MetaClassE +__ZTVN15IORegistryPlane9MetaClassE +__ZTVN15IOWatchDogTimer9MetaClassE +__ZTVN15_IOConfigThread9MetaClassE +__ZTVN16IOKitDiagnostics9MetaClassE +__ZTVN16IOPMPagingPlexus9MetaClassE +__ZTVN16IOPMinformeeList9MetaClassE +__ZTVN16IOPlatformDevice9MetaClassE +__ZTVN16IOPlatformExpert9MetaClassE +__ZTVN16IORangeAllocator9MetaClassE +__ZTVN17IOBigMemoryCursor9MetaClassE +__ZTVN17IOPowerConnection9MetaClassE +__ZTVN18IODTPlatformExpert9MetaClassE +__ZTVN18IOMemoryDescriptor9MetaClassE +__ZTVN18IOPMchangeNoteList9MetaClassE +__ZTVN18IORegistryIterator9MetaClassE +__ZTVN18IOTimerEventSource9MetaClassE +__ZTVN18IOUserNotification9MetaClassE +__ZTVN18_IOServiceNotifier9MetaClassE +__ZTVN19IOPMPowerSourceList9MetaClassE +__ZTVN19IOPMPowerStateQueue9MetaClassE +__ZTVN20IOLittleMemoryCursor9MetaClassE +__ZTVN20OSCollectionIterator9MetaClassE +__ZTVN20RootDomainUserClient9MetaClassE +__ZTVN21IOInterruptController9MetaClassE +__ZTVN21IONaturalMemoryCursor9MetaClassE +__ZTVN21IOSubMemoryDescriptor9MetaClassE +__ZTVN22IOInterruptEventSource9MetaClassE +__ZTVN22IOPlatformExpertDevice9MetaClassE +__ZTVN22_IOOpenServiceIterator9MetaClassE +__ZTVN23IOMultiMemoryDescriptor9MetaClassE +__ZTVN24IOBufferMemoryDescriptor9MetaClassE +__ZTVN24IOCPUInterruptController9MetaClassE +__ZTVN25IOGeneralMemoryDescriptor9MetaClassE +__ZTVN25IOServiceUserNotification9MetaClassE +__ZTVN26_IOServiceInterestNotifier9MetaClassE +__ZTVN27IOSharedInterruptController9MetaClassE +__ZTVN28IOFilterInterruptEventSource9MetaClassE +__ZTVN32IOServiceMessageUserNotification9MetaClassE +__ZTVN5IOCPU9MetaClassE +__ZTVN5OSSet9MetaClassE +__ZTVN6OSData9MetaClassE +__ZTVN7OSArray9MetaClassE +__ZTVN8IOMapper9MetaClassE +__ZTVN8IOPMpriv9MetaClassE +__ZTVN8IOPMprot9MetaClassE +__ZTVN8IOSyncer9MetaClassE +__ZTVN8OSNumber9MetaClassE +__ZTVN8OSObject9MetaClassE +__ZTVN8OSString9MetaClassE +__ZTVN8OSSymbol9MetaClassE +__ZTVN9IOCommand9MetaClassE +__ZTVN9IODTNVRAM9MetaClassE +__ZTVN9IOService9MetaClassE +__ZTVN9OSBoolean9MetaClassE +__ZdlPv +__Znwm +___cxa_pure_virtual +___disable_threadsignal +___doprnt +___pthread_kill +___sysctl +__cpu_capabilities +__disable_preemption +__dist_code +__doprnt +__doprnt_truncates +__enable_preemption +__enable_preemption_no_check +__giDebugLogDataInternal +__giDebugLogInternal +__giDebugReserved1 +__giDebugReserved2 +__length_code +__longjmp +__mh_execute_header +__mk_sp_thread_begin +__mk_sp_thread_depress_abort +__mk_sp_thread_depress_abstime +__mk_sp_thread_depress_ms +__mk_sp_thread_dispatch +__mk_sp_thread_done +__mk_sp_thread_perhaps_yield +__mk_sp_thread_switch +__mk_sp_thread_switch_continue +__mk_sp_thread_unblock +__mutex_lock +__mutex_try +__printf +__setjmp +__start +__tr_align +__tr_flush_block +__tr_init +__tr_stored_block +__tr_tally +__vm_external_state_get +__vm_map_clip_end +__vm_map_clip_start +__vm_map_entry_create +__vm_map_entry_dispose +_absolutetime_to_nanoseconds +_accept +_access +_acct +_acct_process +_acctchkfreq +_acctp +_acctresume +_acctsuspend +_acctwatch +_acctwatch_funnel +_acknowledgeSleepWakeNotification +_act_abort +_act_attach +_act_deallocate +_act_detach +_act_execute_returnhandlers +_act_free_swapin +_act_get_state +_act_get_state_locked +_act_lock_thread +_act_machine_sv_free +_act_reference +_act_set_apc +_act_set_astbsd +_act_set_state +_act_set_state_locked +_act_thread_catt +_act_thread_cfree +_act_thread_csave +_act_ulock_release_all +_act_unlock_thread +_active_debugger +_add_from_mkext_function +_add_name +_add_pcbuffer +_add_profil +_add_to_time_wait +_addlog +_addupc_task +_adjtime +_adjust_vm_object_cache +_adler32 +_adr +_advisory_read +_age_is_stale +_ah4_calccksum +_ah4_input +_ah4_output +_ah6_calccksum +_ah6_ctlinput +_ah6_input +_ah6_output +_ah_algorithm_lookup +_ah_hdrlen +_ah_hdrsiz +_aio_cancel +_aio_error +_aio_fsync +_aio_max_requests +_aio_max_requests_per_process +_aio_read +_aio_return +_aio_suspend +_aio_worker_threads +_aio_write +_alert +_alert_done +_all_zones_lock +_allocbuf +_allow_clustered_pageouts +_allproc +_app_profile +_appleClut8 +_apple_hwcksum_rx +_apple_hwcksum_tx +_argstrcpy +_around +_arp_ifinit +_arp_rtrequest +_arpintr +_arpintrq +_arpresolve +_arpwhohas +_assert_wait +_assert_wait_possible +_assert_wait_prim +_assert_wait_timeout +_assert_wait_timeout_event +_ast_check +_ast_init +_ast_taken +_astbsd_on +_at_ether_input +_atoi +_atoi_term +_attrcalcsize +_avail_remaining +_avenrun +_averunnable +_b_to_q +_backing_store_add +_backing_store_alloc +_backing_store_list +_backing_store_lookup +_backing_store_release_trigger_disable +_badport_bandlim +_bawrite +_bcd2bin_data +_bcmp +_bcopy +_bcopy_phys +_bdevsw +_bdevsw_add +_bdevsw_isfree +_bdevsw_remove +_bdevvp +_bdwrite +_be_tracing +_bflushq +_bin2bcd_data +_bind +_biodone +_biowait +_blaundrycnt +_block_procsigmask +_boot +_boothowto +_bootp +_boottime +_both +_bpf_filter +_bpf_init +_bpf_mtap +_bpf_tap +_bpf_tap_callback +_bpf_validate +_bpfattach +_bpfclose +_bpfdetach +_bpfioctl +_bpfopen +_bpfpoll +_bpfread +_bpfwrite +_branch_tracing_enabled +_bread +_breada +_breadn +_brelse +_bremfree +_bs_commit +_bs_get_global_clsize +_bs_global_info +_bs_initialize +_bs_low +_bs_more_space +_bs_no_paging_space +_bs_port_table +_bs_set_default_clsize +_bsd_ast +_bsd_autoconf +_bsd_bufferinit +_bsd_close_page_cache_files +_bsd_exception +_bsd_hardclock +_bsd_hardclockinit +_bsd_init +_bsd_init_task +_bsd_open_page_cache_files +_bsd_osrelease +_bsd_ostype +_bsd_pageable_map +_bsd_read_page_cache_file +_bsd_search_page_cache_data_base +_bsd_startupearly +_bsd_uprofil +_bsd_utaskbootstrap +_bsd_version +_bsd_version_major +_bsd_version_minor +_bsd_version_variant +_bsd_write_page_cache_file +_bsdinit_task +_buf +_bufferhdr_map +_bufhash +_bufhashlist_slock +_bufhashtbl +_bufqlim +_bufqscanwait +_bufqueues +_bufstats +_busyprt +_bwillwrite +_bwrite +_byte_swap_cgin +_byte_swap_cgout +_byte_swap_csum +_byte_swap_dir_block_in +_byte_swap_dir_block_out +_byte_swap_dir_out +_byte_swap_direct +_byte_swap_dirtemplate_in +_byte_swap_inode_in +_byte_swap_inode_out +_byte_swap_ints +_byte_swap_longlongs +_byte_swap_minidir_in +_byte_swap_sbin +_byte_swap_sbout +_byte_swap_shorts +_bzero +_bzero_phys +_c_incoming_interrupts +_c_mach_msg_trap_switch_fast +_c_mmot_combined_S_R +_c_mmot_kernel_send +_c_swapin_thread_block +_c_syscalls_mach +_c_syscalls_unix +_c_thr_exc_raise +_c_thr_exc_raise_state +_c_thr_exc_raise_state_id +_c_thread_invoke_csw +_c_thread_invoke_hits +_c_thread_invoke_misses +_c_thread_invoke_same +_c_thread_invoke_same_cont +_c_tsk_exc_raise +_c_tsk_exc_raise_state +_c_tsk_exc_raise_state_id +_c_vm_page_grab_fictitious +_c_vm_page_more_fictitious +_c_vm_page_release_fictitious +_c_weird_pset_ref_exit +_cache_enter +_cache_lookup +_cache_purge +_cache_purgevfs +_cached_sock_alloc +_cached_sock_count +_cached_sock_free +_calcru +_calend_config +_calend_getattr +_calend_gettime +_calend_init +_calend_ops +_call_continuation +_call_thread_block +_call_thread_unblock +_callout +_cansignal +_cast128_decrypt_round12 +_cast128_decrypt_round16 +_cast128_encrypt_round12 +_cast128_encrypt_round16 +_catch_exc_subsystem +_catch_exception_raise +_catch_exception_raise_state +_catch_exception_raise_state_identity +_catq +_cause_ast_check +_cd9660_access +_cd9660_blkatoff +_cd9660_blktooff +_cd9660_bmap +_cd9660_cdxaop_entries +_cd9660_cdxaop_opv_desc +_cd9660_cdxaop_p +_cd9660_close +_cd9660_cmap +_cd9660_defattr +_cd9660_deftstamp +_cd9660_enotsupp +_cd9660_fhtovp +_cd9660_fifoop_entries +_cd9660_fifoop_opv_desc +_cd9660_fifoop_p +_cd9660_getattr +_cd9660_getattrlist +_cd9660_ihashget +_cd9660_ihashins +_cd9660_ihashrem +_cd9660_inactive +_cd9660_init +_cd9660_ioctl +_cd9660_islocked +_cd9660_lock +_cd9660_lookup +_cd9660_mmap +_cd9660_mount +_cd9660_mountroot +_cd9660_offtoblk +_cd9660_open +_cd9660_pagein +_cd9660_pathconf +_cd9660_print +_cd9660_quotactl +_cd9660_read +_cd9660_readdir +_cd9660_readlink +_cd9660_reclaim +_cd9660_remove +_cd9660_rmdir +_cd9660_root +_cd9660_rrip_analyze +_cd9660_rrip_getname +_cd9660_rrip_getsymname +_cd9660_rrip_offset +_cd9660_seek +_cd9660_select +_cd9660_specop_entries +_cd9660_specop_opv_desc +_cd9660_specop_p +_cd9660_start +_cd9660_statfs +_cd9660_strategy +_cd9660_sync +_cd9660_sysctl +_cd9660_tstamp_conv17 +_cd9660_tstamp_conv7 +_cd9660_unlock +_cd9660_unmount +_cd9660_vfsops +_cd9660_vget +_cd9660_vget_internal +_cd9660_vnodeop_entries +_cd9660_vnodeop_opv_desc +_cd9660_vnodeop_p +_cd9660_vptofh +_cd9660_xa_read +_cdevsw +_cdevsw_add +_cdevsw_add_with_bdev +_cdevsw_isfree +_cdevsw_remove +_cfree +_cfreecount +_cfreelist +_chdir +_check_actforsig +_check_cpu_subtype +_check_exec_access +_check_routeselfref +_checkalias +_checkuseraccess +_chflags +_chgproccnt +_chkdq +_chkdqchg +_chkiq +_chkiqchg +_chkvnlock +_chmod +_chown +_chroot +_chrtoblk +_chrtoblk_set +_cinit +_cjk_encoding +_cjk_lastunique +_clalloc +_classichandler +_classichandler_fileid +_classichandler_fsid +_clear_procsiglist +_clear_wait +_clfree +_clock_absolutetime_interval_to_deadline +_clock_adjtime +_clock_adjust_calendar +_clock_alarm +_clock_alarm_intr +_clock_alarm_reply +_clock_config +_clock_count +_clock_deadline_for_periodic_event +_clock_get_attributes +_clock_get_calendar_microtime +_clock_get_calendar_nanotime +_clock_get_calendar_value +_clock_get_system_microtime +_clock_get_system_nanotime +_clock_get_system_value +_clock_get_time +_clock_get_uptime +_clock_init +_clock_initialize_calendar +_clock_interval_to_absolutetime_interval +_clock_interval_to_deadline +_clock_list +_clock_priv_server +_clock_priv_server_routine +_clock_priv_subsystem +_clock_server +_clock_server_routine +_clock_service_create +_clock_set_attributes +_clock_set_calendar_adjtime +_clock_set_calendar_microtime +_clock_set_time +_clock_set_timer_deadline +_clock_set_timer_func +_clock_sleep_internal +_clock_sleep_trap +_clock_subsystem +_clock_timebase_info +_clock_wakeup_calendar +_clone_system_shared_regions +_cloneproc +_close +_closef +_clr_be_bit +_clrbit +_clrbits +_cluster_bp +_cluster_copy_ubc_data +_cluster_copy_upl_data +_cluster_pagein +_cluster_pageout +_cluster_push +_cluster_read +_cluster_release +_cluster_transfer_minimum +_cluster_write +_clustered_reads +_clustered_writes +_clusters_available +_clusters_committed +_clusters_committed_peak +_cmask +_cngetc +_cnmaygetc +_cnodehash +_cnodehashtbl +_cnputc +_collectth_state +_com_mapping_resource +_com_region_handle +_com_region_map +_com_region_size +_commpage_populate +_comp_add_data +_comp_end +_comp_get_ratio +_comp_init +_compute_averunnable +_compute_mach_factor +_compute_my_priority +_compute_priority +_concat_domain +_connect +_cons +_cons_cinput +_consdebug_putc +_consider_machine_adjust +_consider_machine_collect +_consider_task_collect +_consider_zone_gc +_conslog_putc +_console_user +_constty +_convert_act_to_port +_convert_clock_ctrl_to_port +_convert_clock_to_port +_convert_host_to_port +_convert_ledger_to_port +_convert_lock_set_to_port +_convert_memory_object_to_port +_convert_mig_object_to_port +_convert_mo_control_to_port +_convert_port_entry_to_map +_convert_port_entry_to_object +_convert_port_to_UNDReply +_convert_port_to_act +_convert_port_to_clock +_convert_port_to_clock_ctrl +_convert_port_to_host +_convert_port_to_host_priv +_convert_port_to_host_security +_convert_port_to_ledger +_convert_port_to_lock_set +_convert_port_to_locked_task +_convert_port_to_map +_convert_port_to_memory_object +_convert_port_to_mig_object +_convert_port_to_mo_control +_convert_port_to_processor +_convert_port_to_pset +_convert_port_to_pset_name +_convert_port_to_semaphore +_convert_port_to_space +_convert_port_to_task +_convert_port_to_upl +_convert_processor_to_port +_convert_pset_name_to_port +_convert_pset_to_port +_convert_semaphore_to_port +_convert_task_to_port +_convert_upl_to_port +_copyfile +_copyin +_copyin_shared_file +_copyinmap +_copyinmsg +_copyinstr +_copyout +_copyoutmap +_copyoutmsg +_copyoutstr +_copypv +_copyright +_copystr +_copywithin +_coredump +_coredumpok +_count_busy_buffers +_count_lock_queue +_cpm_allocate +_cpu_control +_cpu_down +_cpu_info +_cpu_info_count +_cpu_init +_cpu_launch_first_thread +_cpu_machine_init +_cpu_number +_cpu_register +_cpu_signal_handler +_cpu_sleep +_cpu_start +_cpu_up +_crcmp +_crcopy +_crdup +_create_unix_stack +_cred0 +_crfree +_crget +_csw_check +_cthread_stack_size +_ctl_attach +_ctl_connect +_ctl_ctloutput +_ctl_deregister +_ctl_disconnect +_ctl_enqueuedata +_ctl_enqueuembuf +_ctl_find +_ctl_head +_ctl_ioctl +_ctl_post_msg +_ctl_register +_ctl_send +_ctl_usrreqs +_ctlsw +_cttyioctl +_cttyopen +_cttyread +_cttyselect +_cttywrite +_cur_tw_slot +_current_act +_current_debugger +_current_map +_current_proc +_current_proc_EXTERNAL +_current_task +_current_thread +_current_thread_aborted +_current_timer +_cvtstat +_d_to_i +_db_dumpiojunk +_db_piokjunk +_db_thread_read_times +_db_timer_grab +_dbugprintf +_ddb_regs +_dead_badop +_dead_blktooff +_dead_bmap +_dead_cmap +_dead_ebadf +_dead_ioctl +_dead_lock +_dead_lookup +_dead_nullop +_dead_offtoblk +_dead_open +_dead_print +_dead_read +_dead_select +_dead_strategy +_dead_vnodeop_entries +_dead_vnodeop_opv_desc +_dead_vnodeop_p +_dead_write +_debug_buf +_debug_buf_ptr +_debug_buf_size +_debug_container_malloc_size +_debug_iomalloc_size +_debug_ivars_size +_debug_log_init +_debug_malloc_size +_debug_mode +_debug_putc +_def_tbuffer_size +_default_environment_shared_regions +_default_pager +_default_pager_add_file +_default_pager_async_lock +_default_pager_backing_store_create +_default_pager_backing_store_delete +_default_pager_backing_store_info +_default_pager_clsize +_default_pager_default_set +_default_pager_external_count +_default_pager_external_set +_default_pager_info +_default_pager_info_verbose +_default_pager_init_flag +_default_pager_initialize +_default_pager_internal_count +_default_pager_internal_set +_default_pager_memory_object_create +_default_pager_memory_object_default_subsystem +_default_pager_object +_default_pager_object_create +_default_pager_object_pages +_default_pager_object_server +_default_pager_object_server_routine +_default_pager_object_subsystem +_default_pager_objects +_default_pager_space_alert +_default_pager_triggers +_default_preemption_rate +_default_pset +_deflate +_deflateCopy +_deflateEnd +_deflateInit2_ +_deflateInit_ +_deflateParams +_deflateReset +_deflateSetDictionary +_deflate_copyright +_defrouter_addreq +_defrouter_delreq +_defrouter_lookup +_defrouter_select +_defrtrlist_del +_delack_bitmask +_delay +_delete +_delete_each_prefix +_des_SPtrans +_des_check_key +_des_check_key_parity +_des_decrypt3 +_des_ecb3_encrypt +_des_ecb_encrypt +_des_encrypt1 +_des_encrypt2 +_des_encrypt3 +_des_fixup_key_parity +_des_is_weak_key +_des_key_sched +_des_options +_des_set_key +_des_set_key_checked +_des_set_key_unchecked +_des_set_odd_parity +_desireddquot +_desiredvnodes +_dest6_input +_dev_add_entry +_dev_add_name +_dev_add_node +_dev_dup_entry +_dev_dup_plane +_dev_finddir +_dev_findname +_dev_free_hier +_dev_free_name +_dev_root +_devcls +_devfs_checkpath +_devfs_dn_free +_devfs_dntovn +_devfs_free_plane +_devfs_kernel_mount +_devfs_lock +_devfs_make_link +_devfs_make_node +_devfs_mknod +_devfs_mount +_devfs_propogate +_devfs_remove +_devfs_sinit +_devfs_spec_vnodeop_opv_desc +_devfs_spec_vnodeop_p +_devfs_stats +_devfs_update +_devfs_vfsops +_devfs_vnodeop_opv_desc +_devfs_vnodeop_p +_device_close +_device_data_action +_device_object_create +_device_pager_bootstrap +_device_pager_data_initialize +_device_pager_data_request +_device_pager_data_return +_device_pager_data_unlock +_device_pager_deallocate +_device_pager_init +_device_pager_lookup +_device_pager_populate_object +_device_pager_reference +_device_pager_setup +_device_pager_synchronize +_device_pager_terminate +_device_pager_unmap +_device_pager_workaround +_device_pager_zone +_device_service_create +_devin +_devio +_devioc +_devnode_free +_devopn +_devout +_devwait +_dgraph_add_dependency +_dgraph_add_dependent +_dgraph_establish_load_order +_dgraph_find_dependent +_dgraph_find_root +_dgraph_free +_dgraph_init +_dgraph_log +_dhcpol_add +_dhcpol_concat +_dhcpol_count +_dhcpol_element +_dhcpol_find +_dhcpol_free +_dhcpol_get +_dhcpol_init +_dhcpol_parse_buffer +_dhcpol_parse_packet +_dhcpol_parse_vendor +_di_root_image +_dirchk +_disableConsoleOutput +_disableDebugOuput +_disableSerialOuput +_disable_bluebox +_disable_branch_tracing +_disable_funnel +_dispatch_counts +_div_init +_div_input +_div_usrreqs +_divert_packet +_dlil_attach_interface_filter +_dlil_attach_protocol +_dlil_attach_protocol_filter +_dlil_dereg_if_modules +_dlil_dereg_proto_module +_dlil_detach_filter +_dlil_detach_protocol +_dlil_event +_dlil_expand_mcl +_dlil_find_dltag +_dlil_if_acquire +_dlil_if_attach +_dlil_if_detach +_dlil_if_release +_dlil_init +_dlil_initialized +_dlil_inject_if_input +_dlil_inject_if_output +_dlil_inject_pr_input +_dlil_inject_pr_output +_dlil_input +_dlil_input_lock +_dlil_input_packet +_dlil_input_thread_continue +_dlil_input_thread_wakeup +_dlil_ioctl +_dlil_output +_dlil_plumb_protocol +_dlil_post_msg +_dlil_reg_if_modules +_dlil_reg_proto_module +_dlil_stats +_dlil_unplumb_protocol +_dlttoproto +_dmmax +_dmmin +_dmtext +_do_bsdexception +_doasyncfree +_doclusterread +_doclusterwrite +_doingcache +_domaininit +_domainname +_domainnamelen +_domains +_donice +_doreallocblks +_dosetrlimit +_dounmount +_dp_memory_object_data_initialize +_dp_memory_object_data_request +_dp_memory_object_data_return +_dp_memory_object_data_unlock +_dp_memory_object_deallocate +_dp_memory_object_init +_dp_memory_object_reference +_dp_memory_object_subsystem +_dp_memory_object_synchronize +_dp_memory_object_terminate +_dp_memory_object_unmap +_dp_pages_free +_dp_parse_argument +_dp_pgins +_dp_pgouts +_dpt_array +_dpt_lock +_dqdirtylist +_dqfileclose +_dqfileopen +_dqflush +_dqfreelist +_dqget +_dqhash +_dqhashtbl +_dqinit +_dqreclaim +_dqref +_dqrele +_dqsync +_dqsync_orphans +_draw_panic_dialog +_dump_string_table +_dumpdev +_dumplo +_dup +_dup2 +_dup_sockaddr +_dupfdopen +_dylink_test +_dynamic_pager_control_port +_edata +_embutl +_eml_init +_eml_task_deallocate +_eml_task_reference +_enable_bluebox +_enable_branch_tracing +_enable_funnel +_enable_hotpath +_encap4_input +_encap6_input +_encap_attach +_encap_attach_func +_encap_detach +_encap_getarg +_encap_init +_encaptab +_encode_comp_t +_end +_enodev +_enodev_strat +_enoioctl +_enosys +_enterpgrp +_enxio +_eopnotsupp +_err_abortop +_err_access +_err_advlock +_err_allocate +_err_blkatoff +_err_blktooff +_err_bmap +_err_bwrite +_err_close +_err_cmap +_err_copyfile +_err_create +_err_devblocksize +_err_exchange +_err_fsync +_err_getattr +_err_getattrlist +_err_inactive +_err_ioctl +_err_islocked +_err_lease +_err_link +_err_lock +_err_mkcomplex +_err_mkdir +_err_mknod +_err_mmap +_err_offtoblk +_err_open +_err_pagein +_err_pageout +_err_pathconf +_err_pgrd +_err_pgwr +_err_print +_err_read +_err_readdir +_err_readdirattr +_err_readlink +_err_reallocblks +_err_reclaim +_err_remove +_err_rename +_err_revoke +_err_rmdir +_err_searchfs +_err_seek +_err_select +_err_setattr +_err_setattrlist +_err_strategy +_err_symlink +_err_truncate +_err_unlock +_err_update +_err_valloc +_err_vfree +_err_whiteout +_err_write +_errsys +_esp4_input +_esp4_output +_esp6_ctlinput +_esp6_input +_esp6_output +_esp_algorithm_lookup +_esp_auth +_esp_hdrsiz +_esp_max_ivlen +_esp_rijndael_blockdecrypt +_esp_rijndael_blockencrypt +_esp_rijndael_schedlen +_esp_rijndael_schedule +_esp_schedule +_esp_udp_encap_port +_etap_get_info +_etap_interrupt_probe +_etap_machcall_probe1 +_etap_machcall_probe2 +_etap_mon_reconfig +_etap_new_probe +_etap_probe +_etap_trace_event +_etap_trace_thread +_etext +_ether_addmulti +_ether_attach_at +_ether_attach_inet +_ether_attach_inet6 +_ether_delmulti +_ether_demux +_ether_detach_at +_ether_detach_inet +_ether_detach_inet6 +_ether_family_init +_ether_frameout +_ether_ifattach +_ether_ifmod_ioctl +_ether_inet6_prmod_ioctl +_ether_inet_prmod_ioctl +_ether_input +_ether_ipmulticast_max +_ether_ipmulticast_min +_ether_pre_output +_ether_prmod_ioctl +_ether_resolvemulti +_ether_sprintf +_ev_try_lock +_ev_unlock +_event_usrreqs +_eventsw +_evprocdeque +_evprocenque +_evsofree +_exc_server +_exc_server_routine +_exception +_exception_deliver +_exception_raise +_exception_raise_state +_exception_raise_state_identity +_exchangedata +_exchangelock +_execsigs +_execv +_execve +_execve_semaphore +_exit +_exit1 +_falloc +_fatfile_getarch +_fatfile_getarch_affinity +_fchdir +_fchflags +_fchmod +_fchown +_fcntl +_fcount +_fdalloc +_fdavail +_fdcopy +_fdesc_allocvp +_fdesc_badop +_fdesc_getattr +_fdesc_inactive +_fdesc_init +_fdesc_ioctl +_fdesc_lookup +_fdesc_mount +_fdesc_open +_fdesc_pathconf +_fdesc_print +_fdesc_read +_fdesc_readdir +_fdesc_readlink +_fdesc_reclaim +_fdesc_root +_fdesc_select +_fdesc_setattr +_fdesc_start +_fdesc_statfs +_fdesc_sync +_fdesc_unmount +_fdesc_vfree +_fdesc_vfsops +_fdesc_vnodeop_entries +_fdesc_vnodeop_opv_desc +_fdesc_vnodeop_p +_fdesc_write +_fdexec +_fdexpand +_fdfree +_fdgetf +_fdhash +_fdhashtbl +_fdopen +_fdrelse +_ffree +_ffs +_ffs_alloc +_ffs_balloc +_ffs_blkalloc +_ffs_blkatoff +_ffs_blkfree +_ffs_blkpref +_ffs_blktooff +_ffs_clrblock +_ffs_clusteracct +_ffs_fhtovp +_ffs_fifoop_entries +_ffs_fifoop_opv_desc +_ffs_fifoop_p +_ffs_flushfiles +_ffs_fragacct +_ffs_fsync +_ffs_init +_ffs_isblock +_ffs_mount +_ffs_mountfs +_ffs_mountroot +_ffs_offtoblk +_ffs_oldfscompat +_ffs_pagein +_ffs_pageout +_ffs_read +_ffs_reallocblks +_ffs_realloccg +_ffs_reclaim +_ffs_reload +_ffs_sbupdate +_ffs_setblock +_ffs_specop_entries +_ffs_specop_opv_desc +_ffs_specop_p +_ffs_statfs +_ffs_sync +_ffs_sysctl +_ffs_truncate +_ffs_unmount +_ffs_update +_ffs_valloc +_ffs_vfree +_ffs_vget +_ffs_vnodeop_entries +_ffs_vnodeop_opv_desc +_ffs_vnodeop_p +_ffs_vptofh +_ffs_write +_ffsbit +_fhopen +_fifo_advlock +_fifo_bmap +_fifo_close +_fifo_ebadf +_fifo_inactive +_fifo_ioctl +_fifo_lookup +_fifo_nfsv2nodeop_opv_desc +_fifo_nfsv2nodeop_p +_fifo_open +_fifo_pathconf +_fifo_print +_fifo_printinfo +_fifo_read +_fifo_select +_fifo_vnodeop_entries +_fifo_vnodeop_opv_desc +_fifo_vnodeop_p +_fifo_write +_filedesc0 +_filehead +_fillPage +_fill_backward_load_order +_find_entry +_find_nke +_finishdup +_first_avail +_first_free_check +_first_free_is_valid +_first_k_zone +_first_zone +_firstc +_firstsect +_firstseg +_firstsegfromheader +_fixjobc +_flock +_flush_dcache +_flush_dcache64 +_fmod_watch +_fork +_forkproc +_fpathconf +_fr_checkp +_frag6_doing_reass +_frag6_drain +_frag6_init +_frag6_input +_frag6_nfragpackets +_frag6_slowtimo +_fragtbl +_fragtbl124 +_fragtbl8 +_freebitcount +_freevnodes +_fref +_frele +_fsctl +_fstat +_fstatfs +_fstatv +_fsync +_ftruncate +_fubyte +_fuibyte +_fuiword +_funnel_alloc +_funnel_free +_funnel_lock +_funnel_unlock +_futimes +_fuword +_fvm_seg +_fw_enable +_gCatalogCacheGlobals +_gCompareTable +_gGearPict +_gIOAppPowerStateInterest +_gIOBusyInterest +_gIOCatalogCacheMisses +_gIOCatalogLock +_gIOCatalogModuleRequests +_gIOCatalogue +_gIOClassKey +_gIOCommandPoolSizeKey +_gIODTAAPLInterruptsKey +_gIODTAddressCellKey +_gIODTCompatibleKey +_gIODTDefaultInterruptController +_gIODTInterruptCellKey +_gIODTInterruptParentKey +_gIODTModelKey +_gIODTNWInterruptMappingKey +_gIODTNameKey +_gIODTPHandleKey +_gIODTPersistKey +_gIODTPlane +_gIODTRangeKey +_gIODTSizeCellKey +_gIODTTypeKey +_gIODTUnitKey +_gIODefaultMatchCategoryKey +_gIODeviceMemoryKey +_gIOFirstMatchNotification +_gIOFirstPublishNotification +_gIOGeneralInterest +_gIOInterruptControllersKey +_gIOInterruptSpecifiersKey +_gIOKLDLock +_gIOKernelConfigTables +_gIOKernelKmods +_gIOKitDebug +_gIOKitDebugKey +_gIOKitPortCount +_gIOLocationKey +_gIOLocationMatchKey +_gIOMatchCategoryKey +_gIOMatchedNotification +_gIOMatchedServiceCountKey +_gIOModuleIdentifierKey +_gIONameKey +_gIONameMatchKey +_gIONameMatchedKey +_gIOParentMatchKey +_gIOPathMatchKey +_gIOPowerPlane +_gIOPrelinkedModules +_gIOPriorityPowerStateInterest +_gIOProbeScoreKey +_gIOPropertyMatchKey +_gIOProviderClassKey +_gIOPublishNotification +_gIORangeAllocatorLock +_gIOResourceMatchKey +_gIOResourcesKey +_gIOServiceKey +_gIOServicePlane +_gIOTerminatedNotification +_gIOUserClientClassKey +_gLatinCaseFold +_gLowerCaseTable +_gOFVariables +_gPEClockFrequencyInfo +_gPlatformInterruptControllerName +_gTimeZone +_gatherstats +_gc_buffer_lock +_gc_vt100state +_getProcName +_get_aiotask +_get_bsdtask_info +_get_bsdthread_info +_get_bsduthreadarg +_get_bsduthreadrval +_get_dp_control_port +_get_firstthread +_get_inpcb_str_size +_get_kernel_symfile +_get_map_end +_get_map_max +_get_map_min +_get_map_nentries +_get_map_pmap +_get_map_start +_get_new_filter_id +_get_procrustime +_get_read_buffer +_get_set_state +_get_signalact +_get_signalthread +_get_state_handler +_get_task_ipcspace +_get_task_map +_get_task_numacts +_get_task_pmap +_get_task_userstop +_get_tcp_str_size +_get_thread_userstop +_get_thread_waitresult +_get_threadtask +_get_user_regs +_get_useraddr +_get_vmmap_entries +_get_vmmap_size +_get_vmsubmap_entries +_getact_thread +_getattrlist +_getblk +_getc +_getdirentries +_getdirentriesattr +_getdtablesize +_geteblk +_getegid +_geteuid +_getfakefvmseg +_getfh +_getfsstat +_getgid +_getgroups +_getinoquota +_getitimer +_getlastaddr +_getlogin +_getmachheaders +_getnewvnode +_getpeername +_getpgid +_getpgrp +_getpid +_getppid +_getpriority +_getquota +_getrlimit +_getrusage +_getsectbyname +_getsectbynamefromheader +_getsectcmdsymtabfromheader +_getsectdatafromheader +_getsegbyname +_getsegbynamefromheader +_getsegdatafromheader +_getshuttle_thread +_getsid +_getsock +_getsockaddr +_getsockname +_getsockopt +_getsymtab +_gettimeofday +_getuid +_getval +_getvnode +_gif_attach_inet +_gif_attach_inet6 +_gif_attach_proto_family +_gif_delete_tunnel +_gif_demux +_gif_detach_inet +_gif_detach_inet6 +_gif_detach_proto_family +_gif_encapcheck4 +_gif_encapcheck6 +_gif_input +_gif_ioctl +_gif_pre_output +_gif_reg_if_mods +_gif_shutdown +_gifattach +_gifs +_global_state_pid +_global_stats +_global_user_profile_cache +_grade_cpu_subtype +_groupmember +_gsignal +_halt_all_cpus +_halt_cpu +_halt_in_debugger +_hard_throttle_on_root +_hashinit +_hertz_tick +_hex2ascii_data +_hfc_tag +_hfs_addconverter +_hfs_allocate +_hfs_blktooff +_hfs_bmap +_hfs_bwrite +_hfs_catname +_hfs_chash_slock +_hfs_chkdq +_hfs_chkdqchg +_hfs_chkiq +_hfs_chkiqchg +_hfs_clearlock +_hfs_cmap +_hfs_converterinit +_hfs_encoding_list +_hfs_encoding_list_slock +_hfs_encodingbias +_hfs_extname +_hfs_fifoop_entries +_hfs_fifoop_opv_desc +_hfs_fifoop_p +_hfs_findoverlap +_hfs_getblock +_hfs_getconverter +_hfs_getinoquota +_hfs_getlock +_hfs_getquota +_hfs_ioctl +_hfs_offtoblk +_hfs_owner_rights +_hfs_pagein +_hfs_pageout +_hfs_pickencoding +_hfs_privdirname +_hfs_qsync +_hfs_quotactl +_hfs_quotaoff +_hfs_quotaon +_hfs_quotastat +_hfs_read +_hfs_relconverter +_hfs_remconverter +_hfs_select +_hfs_setlock +_hfs_setquota +_hfs_setuse +_hfs_specop_entries +_hfs_specop_opv_desc +_hfs_specop_p +_hfs_split +_hfs_strategy +_hfs_swap_BTNode +_hfs_swap_HFSBTInternalNode +_hfs_swap_HFSPlusBTInternalNode +_hfs_swap_HFSPlusForkData +_hfs_to_utf8 +_hfs_truncate +_hfs_vbmname +_hfs_vfsops +_hfs_vnodeop_entries +_hfs_vnodeop_opv_desc +_hfs_vnodeop_p +_hfs_wakelock +_hfs_write +_hfsmaxlockdepth +_holdrele +_host_default_memory_manager +_host_get_UNDServer +_host_get_boot_info +_host_get_clock_control +_host_get_clock_service +_host_get_exception_ports +_host_get_io_master +_host_get_special_port +_host_info +_host_ipc_hash_info +_host_kernel_version +_host_load_symbol_table +_host_notify_calendar_change +_host_notify_init +_host_notify_port_destroy +_host_page_size +_host_priv_self +_host_priv_server +_host_priv_server_routine +_host_priv_statistics +_host_priv_subsystem +_host_processor_info +_host_processor_set_priv +_host_processor_sets +_host_processors +_host_reboot +_host_request_notification +_host_security_create_task_token +_host_security_self +_host_security_server +_host_security_server_routine +_host_security_set_task_token +_host_security_subsystem +_host_self +_host_self_trap +_host_set_UNDServer +_host_set_exception_ports +_host_set_special_port +_host_stack_usage +_host_statistics +_host_swap_exception_ports +_host_virtual_physical_table_info +_host_zone_info +_hostid +_hostname +_hostnamelen +_hw_atomic_add +_hw_atomic_and +_hw_atomic_or +_hw_atomic_sub +_hw_compare_and_store +_hw_lock_held +_hw_lock_init +_hw_lock_lock +_hw_lock_to +_hw_lock_try +_hw_lock_unlock +_hz +_hzto +_icmp6_ctloutput +_icmp6_error +_icmp6_fasttimo +_icmp6_ifstat +_icmp6_ifstatmax +_icmp6_init +_icmp6_input +_icmp6_mtudisc_update +_icmp6_nodeinfo +_icmp6_rediraccept +_icmp6_redirect_input +_icmp6_redirect_output +_icmp6_redirtimeout +_icmp6_reflect +_icmp6errppslim +_icmp6stat +_icmp_error +_icmp_input +_idle_thread +_idle_thread_continue +_if_addmulti +_if_allmulti +_if_attach +_if_delmulti +_if_delmultiaddr +_if_down +_if_down_all +_if_index +_if_name +_if_route +_if_rtproto_del +_if_unroute +_if_up +_if_withname +_ifa_ifwithaddr +_ifa_ifwithdstaddr +_ifa_ifwithnet +_ifa_ifwithroute +_ifafree +_ifaof_ifpforaddr +_ifaref +_ifbyfamily +_ifindex2ifnet +_ifioctl +_ifma_lostlist +_ifmaof_ifpforaddr +_ifmedia_add +_ifmedia_init +_ifmedia_ioctl +_ifmedia_list_add +_ifmedia_removeall +_ifmedia_set +_ifnet +_ifnet_addrs +_ifpromisc +_ifptodlt +_ifqmaxlen +_iftovt_tab +_ifunit +_igmp_fasttimo +_igmp_init +_igmp_input +_igmp_joingroup +_igmp_leavegroup +_igmp_slowtimo +_ihash +_ihashtbl +_in6_addmulti +_in6_addr2scopeid +_in6_addrscope +_in6_are_prefix_equal +_in6_cksum +_in6_clearscope +_in6_control +_in6_delmulti +_in6_dinit +_in6_embedscope +_in6_get_tmpifid +_in6_gif_input +_in6_gif_output +_in6_gif_protosw +_in6_if_up +_in6_ifaddr +_in6_ifattach +_in6_ifawithifp +_in6_ifawithscope +_in6_ifdetach +_in6_ifindex2scopeid +_in6_ifstat +_in6_ifstatmax +_in6_init2done +_in6_init_prefix_ltimes +_in6_inithead +_in6_is_addr_deprecated +_in6_len2mask +_in6_localaddr +_in6_losing +_in6_mapped_peeraddr +_in6_mapped_sockaddr +_in6_mask2len +_in6_matchlen +_in6_maxmtu +_in6_multihead +_in6_nigroup +_in6_nigroup_attach +_in6_nigroup_detach +_in6_pcbbind +_in6_pcbconnect +_in6_pcbdetach +_in6_pcbdisconnect +_in6_pcbladdr +_in6_pcblookup_hash +_in6_pcblookup_local +_in6_pcbnotify +_in6_pcbpurgeif0 +_in6_pcbsetport +_in6_post_msg +_in6_prefix_add_ifid +_in6_prefix_ioctl +_in6_prefix_remove_ifid +_in6_prefixlen2mask +_in6_proto_count +_in6_purgeaddr +_in6_purgeif +_in6_purgeprefix +_in6_recoverscope +_in6_rr_timer +_in6_rr_timer_funneled +_in6_rtchange +_in6_selecthlim +_in6_selectsrc +_in6_setmaxmtu +_in6_setpeeraddr +_in6_setsockaddr +_in6_sin6_2_sin +_in6_sin6_2_sin_in_sock +_in6_sin_2_v4mapsin6 +_in6_sin_2_v4mapsin6_in_sock +_in6_sockaddr +_in6_tmpaddrtimer +_in6_tmpaddrtimer_funneled +_in6_tmpifadd +_in6_update_ifa +_in6_v4mapsin6_sockaddr +_in6addr_any +_in6addr_linklocal_allnodes +_in6addr_linklocal_allrouters +_in6addr_loopback +_in6addr_nodelocal_allnodes +_in6if_do_dad +_in6ifa_ifpforlinklocal +_in6ifa_ifpwithaddr +_in6mask0 +_in6mask128 +_in6mask32 +_in6mask64 +_in6mask96 +_in_addmulti +_in_addword +_in_broadcast +_in_canforward +_in_cksum +_in_cksum_skip +_in_control +_in_delayed_cksum +_in_delmulti +_in_dinit +_in_gif_input +_in_gif_output +_in_gif_protosw +_in_ifaddrhead +_in_ifadown +_in_ifscrub +_in_inithead +_in_localaddr +_in_losing +_in_multihead +_in_pcb_get_owner +_in_pcb_grab_port +_in_pcb_letgo_port +_in_pcb_nat_init +_in_pcb_new_share_client +_in_pcb_rem_share_client +_in_pcballoc +_in_pcbbind +_in_pcbconnect +_in_pcbdetach +_in_pcbdisconnect +_in_pcbinshash +_in_pcbladdr +_in_pcblookup_hash +_in_pcblookup_local +_in_pcbnotifyall +_in_pcbpurgeif0 +_in_pcbrehash +_in_pcbremlists +_in_proto_count +_in_pseudo +_in_rtchange +_in_rtqdrain +_in_setpeeraddr +_in_setsockaddr +_in_stf_input +_in_stf_protosw +_inactivevnodes +_incore +_inet6_ether_input +_inet6_ether_pre_output +_inet6ctlerrmap +_inet6domain +_inet6sw +_inet_aton +_inet_ether_input +_inet_ether_pre_output +_inet_ntoa +_inetctlerrmap +_inetdomain +_inetsw +_inferior +_inflate +_inflateEnd +_inflateInit2_ +_inflateInit_ +_inflateReset +_inflateSetDictionary +_inflateSync +_inflateSyncPoint +_inflate_blocks +_inflate_blocks_free +_inflate_blocks_new +_inflate_blocks_reset +_inflate_blocks_sync_point +_inflate_codes +_inflate_codes_free +_inflate_codes_new +_inflate_copyright +_inflate_fast +_inflate_flush +_inflate_mask +_inflate_set_dictionary +_inflate_trees_bits +_inflate_trees_dynamic +_inflate_trees_fixed +_init_args +_init_ast_check +_init_attempts +_init_domain +_init_exec_args +_init_ip6pktopts +_init_process +_init_program_name +_init_sin6 +_init_task_failure_data +_init_timers +_initialize_screen +_initialized +_initproc +_inittodr +_inside +_insmntque +_install_special_handler +_install_special_handler_locked +_int6intrq_present +_interlock_unlock +_intstack +_invalhash +_invalidate_icache +_invalidate_icache64 +_io_map +_io_map_spec +_io_throttle_zero_fill +_iobufqueue +_ioctl +_iokit_add_reference +_iokit_alloc_object_port +_iokit_builder +_iokit_client_died +_iokit_destroy_object_port +_iokit_lookup_connect_port +_iokit_lookup_connect_ref +_iokit_lookup_connect_ref_current_task +_iokit_lookup_object_port +_iokit_make_connect_port +_iokit_make_object_port +_iokit_make_send_right +_iokit_notify +_iokit_osrelease +_iokit_ostype +_iokit_port_for_object +_iokit_release_port +_iokit_remove_reference +_iokit_retain_port +_iokit_server +_iokit_server_routine +_iokit_user_client_trap +_iokit_version +_iokit_version_major +_iokit_version_minor +_iokit_version_variant +_ip4_ah_cleartos +_ip4_ah_net_deflev +_ip4_ah_offsetmask +_ip4_ah_trans_deflev +_ip4_def_policy +_ip4_esp_net_deflev +_ip4_esp_randpad +_ip4_esp_trans_deflev +_ip4_ipsec_dfbit +_ip4_ipsec_ecn +_ip6_accept_rtadv +_ip6_addaux +_ip6_ah_net_deflev +_ip6_ah_trans_deflev +_ip6_auto_flowlabel +_ip6_auto_linklocal +_ip6_clearpktopts +_ip6_copypktopts +_ip6_ctloutput +_ip6_dad_count +_ip6_def_policy +_ip6_defhlim +_ip6_defmcasthlim +_ip6_delaux +_ip6_desync_factor +_ip6_ecn_egress +_ip6_ecn_ingress +_ip6_esp_net_deflev +_ip6_esp_randpad +_ip6_esp_trans_deflev +_ip6_findaux +_ip6_flow_seq +_ip6_forward +_ip6_forward_rt +_ip6_forward_srcrt +_ip6_forwarding +_ip6_freemoptions +_ip6_freepcbopts +_ip6_fw_chk_ptr +_ip6_fw_ctl_ptr +_ip6_fw_enable +_ip6_get_prevhdr +_ip6_getdstifaddr +_ip6_gif_hlim +_ip6_hdrnestlimit +_ip6_id +_ip6_init +_ip6_input +_ip6_ipsec_ecn +_ip6_keepfaith +_ip6_lasthdr +_ip6_log_interval +_ip6_log_time +_ip6_maxfragpackets +_ip6_mforward +_ip6_mloopback +_ip6_mrouter +_ip6_mrouter_done +_ip6_mrouter_get +_ip6_mrouter_set +_ip6_mrouter_ver +_ip6_mrtproto +_ip6_nexthdr +_ip6_optlen +_ip6_ours_check_algorithm +_ip6_output +_ip6_process_hopopts +_ip6_protox +_ip6_rr_prune +_ip6_savecontrol +_ip6_sendredirects +_ip6_setpktoptions +_ip6_sourcecheck +_ip6_sourcecheck_interval +_ip6_sprintf +_ip6_temp_preferred_lifetime +_ip6_temp_regen_advance +_ip6_temp_valid_lifetime +_ip6_unknown_opt +_ip6_use_deprecated +_ip6_use_tempaddr +_ip6_v6only +_ip6intr +_ip6intrq +_ip6q +_ip6stat +_ip_ctloutput +_ip_defttl +_ip_divert_cookie +_ip_drain +_ip_ecn_egress +_ip_ecn_ingress +_ip_freemoptions +_ip_fw_chk_ptr +_ip_fw_ctl_ptr +_ip_fw_fwd_addr +_ip_gif_ttl +_ip_id +_ip_init +_ip_input +_ip_linklocal_in_allowbadttl +_ip_linklocal_stat +_ip_mcast_src +_ip_mforward +_ip_mrouter +_ip_mrouter_done +_ip_mrouter_get +_ip_mrouter_set +_ip_optcopy +_ip_output +_ip_pkt_to_mbuf +_ip_protox +_ip_rsvp_done +_ip_rsvp_force_done +_ip_rsvp_init +_ip_rsvp_vif_done +_ip_rsvp_vif_init +_ip_rsvpd +_ip_savecontrol +_ip_slowtimo +_ip_srcroute +_ip_stripoptions +_ipc_bootstrap +_ipc_clock_enable +_ipc_clock_init +_ipc_entry_alloc +_ipc_entry_alloc_name +_ipc_entry_dealloc +_ipc_entry_get +_ipc_entry_grow_table +_ipc_entry_lookup +_ipc_entry_tree_collision +_ipc_hash_delete +_ipc_hash_global_delete +_ipc_hash_global_insert +_ipc_hash_global_lookup +_ipc_hash_global_mask +_ipc_hash_global_size +_ipc_hash_global_table +_ipc_hash_init +_ipc_hash_insert +_ipc_hash_local_delete +_ipc_hash_local_insert +_ipc_hash_local_lookup +_ipc_hash_lookup +_ipc_host_init +_ipc_init +_ipc_kernel_copy_map +_ipc_kernel_copy_map_size +_ipc_kernel_map +_ipc_kernel_map_size +_ipc_kmsg_alloc +_ipc_kmsg_cache +_ipc_kmsg_cache_avail +_ipc_kmsg_clean +_ipc_kmsg_clean_body +_ipc_kmsg_clean_partial +_ipc_kmsg_clear_prealloc +_ipc_kmsg_copyin +_ipc_kmsg_copyin_body +_ipc_kmsg_copyin_from_kernel +_ipc_kmsg_copyin_header +_ipc_kmsg_copyin_scatter +_ipc_kmsg_copyout +_ipc_kmsg_copyout_body +_ipc_kmsg_copyout_dest +_ipc_kmsg_copyout_header +_ipc_kmsg_copyout_object +_ipc_kmsg_copyout_pseudo +_ipc_kmsg_copyout_to_kernel +_ipc_kmsg_dequeue +_ipc_kmsg_destroy +_ipc_kmsg_destroy_dest +_ipc_kmsg_enqueue +_ipc_kmsg_free +_ipc_kmsg_free_scatter +_ipc_kmsg_get +_ipc_kmsg_get_from_kernel +_ipc_kmsg_init +_ipc_kmsg_max_vm_space +_ipc_kmsg_put +_ipc_kmsg_put_to_kernel +_ipc_kmsg_queue_next +_ipc_kmsg_rmqueue +_ipc_kmsg_send +_ipc_kmsg_set_prealloc +_ipc_kobject_destroy +_ipc_kobject_notify +_ipc_kobject_server +_ipc_kobject_set +_ipc_kobject_set_atomically +_ipc_mqueue_add +_ipc_mqueue_changed +_ipc_mqueue_copyin +_ipc_mqueue_destroy +_ipc_mqueue_full +_ipc_mqueue_init +_ipc_mqueue_member +_ipc_mqueue_post +_ipc_mqueue_rcv +_ipc_mqueue_receive +_ipc_mqueue_receive_continue +_ipc_mqueue_receive_results +_ipc_mqueue_release_msgcount +_ipc_mqueue_remove +_ipc_mqueue_remove_all +_ipc_mqueue_remove_from_all +_ipc_mqueue_select +_ipc_mqueue_send +_ipc_mqueue_set_qlimit +_ipc_mqueue_set_seqno +_ipc_notify_dead_name +_ipc_notify_no_senders +_ipc_notify_port_deleted +_ipc_notify_port_destroyed +_ipc_notify_send_once +_ipc_object_alloc +_ipc_object_alloc_dead +_ipc_object_alloc_dead_name +_ipc_object_alloc_name +_ipc_object_copyin +_ipc_object_copyin_from_kernel +_ipc_object_copyin_type +_ipc_object_copyout +_ipc_object_copyout_dest +_ipc_object_copyout_name +_ipc_object_destroy +_ipc_object_reference +_ipc_object_release +_ipc_object_rename +_ipc_object_translate +_ipc_object_translate_two +_ipc_object_zones +_ipc_port_alloc +_ipc_port_alloc_name +_ipc_port_alloc_special +_ipc_port_check_circularity +_ipc_port_clear_receiver +_ipc_port_copy_send +_ipc_port_copyout_send +_ipc_port_dealloc_special +_ipc_port_destroy +_ipc_port_dncancel +_ipc_port_dngrow +_ipc_port_dnnotify +_ipc_port_dnrequest +_ipc_port_init +_ipc_port_lookup_notify +_ipc_port_make_send +_ipc_port_make_send_locked +_ipc_port_make_sonce +_ipc_port_max +_ipc_port_multiple_lock_data +_ipc_port_nsrequest +_ipc_port_pdrequest +_ipc_port_release +_ipc_port_release_receive +_ipc_port_release_send +_ipc_port_release_sonce +_ipc_port_timestamp +_ipc_port_timestamp_data +_ipc_port_timestamp_lock_data +_ipc_processor_disable +_ipc_processor_enable +_ipc_processor_init +_ipc_processor_terminate +_ipc_pset_add +_ipc_pset_alloc +_ipc_pset_alloc_name +_ipc_pset_destroy +_ipc_pset_disable +_ipc_pset_enable +_ipc_pset_init +_ipc_pset_max +_ipc_pset_member +_ipc_pset_remove +_ipc_pset_remove_from_all +_ipc_pset_terminate +_ipc_right_check +_ipc_right_clean +_ipc_right_copyin +_ipc_right_copyin_check +_ipc_right_copyin_two +_ipc_right_copyin_undo +_ipc_right_copyout +_ipc_right_dealloc +_ipc_right_delta +_ipc_right_destroy +_ipc_right_dncancel +_ipc_right_dnrequest +_ipc_right_info +_ipc_right_inuse +_ipc_right_lookup_two_write +_ipc_right_lookup_write +_ipc_right_rename +_ipc_right_reverse +_ipc_space_clean +_ipc_space_create +_ipc_space_create_special +_ipc_space_destroy +_ipc_space_kernel +_ipc_space_max +_ipc_space_reference +_ipc_space_release +_ipc_space_reply +_ipc_space_zone +_ipc_splay_traverse_finish +_ipc_splay_traverse_next +_ipc_splay_traverse_start +_ipc_splay_tree_bounds +_ipc_splay_tree_delete +_ipc_splay_tree_init +_ipc_splay_tree_insert +_ipc_splay_tree_join +_ipc_splay_tree_lookup +_ipc_splay_tree_pick +_ipc_splay_tree_split +_ipc_table_alloc +_ipc_table_dnrequests +_ipc_table_dnrequests_size +_ipc_table_entries +_ipc_table_entries_size +_ipc_table_fill +_ipc_table_free +_ipc_table_init +_ipc_table_realloc +_ipc_task_disable +_ipc_task_enable +_ipc_task_init +_ipc_task_terminate +_ipc_thr_act_disable +_ipc_thr_act_init +_ipc_thr_act_terminate +_ipc_thread_init +_ipc_thread_terminate +_ipc_tree_entry_max +_ipc_tree_entry_zone +_ipcomp4_input +_ipcomp4_output +_ipcomp6_input +_ipcomp6_output +_ipcomp_algorithm_lookup +_ipcperm +_ipflow_create +_ipflow_fastforward +_ipflow_slowtimo +_ipforwarding +_ipintr +_ipintrq +_ipintrq_present +_ipip_input +_ipport_firstauto +_ipport_hifirstauto +_ipport_hilastauto +_ipport_lastauto +_ipport_lowfirstauto +_ipport_lowlastauto +_ipsec4_delete_pcbpolicy +_ipsec4_get_policy +_ipsec4_getpolicybyaddr +_ipsec4_getpolicybysock +_ipsec4_hdrsiz +_ipsec4_in_reject +_ipsec4_in_reject_so +_ipsec4_logpacketstr +_ipsec4_output +_ipsec4_set_policy +_ipsec4_tunnel_validate +_ipsec6_delete_pcbpolicy +_ipsec6_get_policy +_ipsec6_getpolicybyaddr +_ipsec6_getpolicybysock +_ipsec6_hdrsiz +_ipsec6_in_reject +_ipsec6_in_reject_so +_ipsec6_logpacketstr +_ipsec6_output_trans +_ipsec6_output_tunnel +_ipsec6_set_policy +_ipsec6_tunnel_validate +_ipsec6stat +_ipsec_addhist +_ipsec_bypass +_ipsec_chkreplay +_ipsec_clearhist +_ipsec_copy_policy +_ipsec_copypkt +_ipsec_debug +_ipsec_delaux +_ipsec_dumpmbuf +_ipsec_get_reqlevel +_ipsec_gethist +_ipsec_getsocket +_ipsec_hdrsiz_tcp +_ipsec_init_policy +_ipsec_logsastr +_ipsec_setsocket +_ipsec_updatereplay +_ipsecstat +_ipstat +_iptime +_is_file_clean +_is_io_async_method_scalarI_scalarO +_is_io_async_method_scalarI_structureI +_is_io_async_method_scalarI_structureO +_is_io_async_method_structureI_structureO +_is_io_catalog_get_data +_is_io_catalog_get_gen_count +_is_io_catalog_module_loaded +_is_io_catalog_reset +_is_io_catalog_send_data +_is_io_catalog_terminate +_is_io_connect_add_client +_is_io_connect_get_notification_semaphore +_is_io_connect_get_service +_is_io_connect_map_memory +_is_io_connect_method_scalarI_scalarO +_is_io_connect_method_scalarI_structureI +_is_io_connect_method_scalarI_structureO +_is_io_connect_method_structureI_structureO +_is_io_connect_set_notification_port +_is_io_connect_set_properties +_is_io_connect_unmap_memory +_is_io_iterator_is_valid +_is_io_iterator_next +_is_io_iterator_reset +_is_io_make_matching +_is_io_object_conforms_to +_is_io_object_get_class +_is_io_object_get_retain_count +_is_io_registry_create_iterator +_is_io_registry_entry_create_iterator +_is_io_registry_entry_from_path +_is_io_registry_entry_get_child_iterator +_is_io_registry_entry_get_location_in_plane +_is_io_registry_entry_get_name +_is_io_registry_entry_get_name_in_plane +_is_io_registry_entry_get_parent_iterator +_is_io_registry_entry_get_path +_is_io_registry_entry_get_properties +_is_io_registry_entry_get_property +_is_io_registry_entry_get_property_bytes +_is_io_registry_entry_get_property_recursively +_is_io_registry_entry_in_plane +_is_io_registry_entry_set_properties +_is_io_registry_get_root_entry +_is_io_registry_iterator_enter_entry +_is_io_registry_iterator_exit_entry +_is_io_service_acknowledge_notification +_is_io_service_add_interest_notification +_is_io_service_add_notification +_is_io_service_add_notification_old +_is_io_service_add_notification_ool +_is_io_service_close +_is_io_service_get_busy_state +_is_io_service_get_matching_services +_is_io_service_get_matching_services_ool +_is_io_service_get_state +_is_io_service_match_property_table +_is_io_service_match_property_table_ool +_is_io_service_open +_is_io_service_request_probe +_is_io_service_wait_quiet +_is_iokit_subsystem +_is_kerneltask +_is_suser +_is_suser1 +_is_thread_active +_is_thread_idle +_is_thread_running +_isargsep +_isdisk +_isinferior +_iskmemdev +_isn_ctx +_isn_last_reseed +_isn_secret +_iso_font +_iso_nchstats +_isodirino +_isofncmp +_isofntrans +_isohash +_isohashtbl +_isonullname +_issetugid +_issignal +_issingleuser +_itimerdecr +_itimerfix +_itoa +_journal_active +_journal_close +_journal_create +_journal_end_transaction +_journal_flush +_journal_kill_block +_journal_modify_block_abort +_journal_modify_block_end +_journal_modify_block_start +_journal_open +_journal_start_transaction +_kOSBooleanFalse +_kOSBooleanTrue +_k_zone +_k_zone_max +_kalloc +_kalloc_canblock +_kalloc_fake_zone_info +_kalloc_init +_kalloc_large_inuse +_kalloc_large_max +_kalloc_large_total +_kalloc_map +_kalloc_map_size +_kalloc_max +_kalloc_max_prerounded +_kalloc_noblock +_kalloc_zone +_kd_buffer +_kd_buflast +_kd_bufptr +_kd_bufsize +_kd_buftomem +_kd_entropy_buffer +_kd_entropy_bufsize +_kd_entropy_buftomem +_kd_entropy_count +_kd_entropy_indx +_kd_mapcount +_kd_mapptr +_kd_mapsize +_kd_maptomem +_kd_prev_timebase +_kd_readlast +_kd_trace_lock +_kdb_printf +_kdbg_bootstrap +_kdbg_clear +_kdbg_control +_kdbg_control_chud +_kdbg_getentropy +_kdbg_getreg +_kdbg_mapinit +_kdbg_read +_kdbg_readmap +_kdbg_reinit +_kdbg_resolve_map +_kdbg_setpid +_kdbg_setpidex +_kdbg_setreg +_kdbg_setrtcdec +_kdbg_trace_data +_kdbg_trace_string +_kdebug_chudhook +_kdebug_enable +_kdebug_flags +_kdebug_nolog +_kdebug_ops +_kdebug_trace +_kdlog_beg +_kdlog_end +_kdlog_value1 +_kdlog_value2 +_kdlog_value3 +_kdlog_value4 +_kdp +_kdp_call +_kdp_call_kdb +_kdp_exception +_kdp_exception_ack +_kdp_flag +_kdp_get_ip_address +_kdp_get_mac_addr +_kdp_getc +_kdp_intr_disbl +_kdp_intr_enbl +_kdp_machine_hostinfo +_kdp_machine_read_regs +_kdp_machine_write_regs +_kdp_ml_get_breakinsn +_kdp_packet +_kdp_panic +_kdp_raise_exception +_kdp_reboot +_kdp_register_send_receive +_kdp_remove_all_breakpoints +_kdp_reset +_kdp_set_ip_and_mac_addresses +_kdp_sync_cache +_kdp_unregister_send_receive +_kdp_us_spin +_kdp_vm_read +_kdp_vm_write +_kentry_count +_kentry_data +_kentry_data_size +_kern_control_init +_kern_event_init +_kern_invalid +_kern_invalid_debug +_kern_os_free +_kern_os_malloc +_kern_os_malloc_size +_kern_os_realloc +_kern_sysctl +_kernacc +_kernelLinkerPresent +_kernel_debug +_kernel_debug1 +_kernel_flock +_kernel_map +_kernel_memory_allocate +_kernel_object_iopl_request +_kernel_pageable_map +_kernel_pmap +_kernel_pmap_store +_kernel_sysctl +_kernel_task +_kernel_task_create +_kernel_thread +_kernel_thread_create +_kernel_thread_with_priority +_kernel_timer +_kernel_upl_abort +_kernel_upl_abort_range +_kernel_upl_commit +_kernel_upl_commit_range +_kernel_upl_map +_kernel_upl_unmap +_kernel_vm_map_reference +_kernproc +_kev_attach +_kev_control +_kev_detach +_kev_post_msg +_kevent +_key_allocsa +_key_allocsp +_key_cb +_key_checkrequest +_key_checktunnelsanity +_key_debug_level +_key_dst +_key_freereg +_key_freesav +_key_freeso +_key_freesp +_key_gettunnel +_key_init +_key_ismyaddr +_key_msg2sp +_key_newsp +_key_output +_key_parse +_key_random +_key_randomfill +_key_sa_recordxfer +_key_sa_routechange +_key_sa_stir_iv +_key_sendup +_key_sendup_mbuf +_key_sp2msg +_key_spdacquire +_key_src +_key_timehandler +_key_timehandler_funnel +_key_usrreqs +_keydb_delsecashead +_keydb_delsecpolicy +_keydb_delsecreg +_keydb_delsecreplay +_keydb_freesecasvar +_keydb_newsecashead +_keydb_newsecasvar +_keydb_newsecpolicy +_keydb_newsecreg +_keydb_newsecreplay +_keydb_refsecasvar +_keydomain +_keystat +_keysw +_kfree +_kget +_kill +_killpg1 +_kinfo_vdebug +_kld_file_cleanup_all_resources +_kld_file_getaddr +_kld_file_lookupsymbol +_kld_file_map +_kld_file_merge_OSObjects +_kld_file_patch_OSObjects +_kld_file_prepare_for_link +_klist_init +_klogwakeup +_km_tty +_kmclose +_kmem_alloc +_kmem_alloc_aligned +_kmem_alloc_contig +_kmem_alloc_pageable +_kmem_alloc_pages +_kmem_alloc_wired +_kmem_free +_kmem_init +_kmem_io_object_deallocate +_kmem_io_object_trunc +_kmem_mb_alloc +_kmem_realloc +_kmem_remap_pages +_kmem_suballoc +_kmeminit +_kmemstats +_kmgetc +_kmgetc_silent +_kminit +_kmioctl +_kmod +_kmod_cmd_queue +_kmod_control +_kmod_create +_kmod_create_fake +_kmod_create_internal +_kmod_default_start +_kmod_default_stop +_kmod_destroy +_kmod_destroy_internal +_kmod_dump +_kmod_finalize_cpp +_kmod_get_info +_kmod_init +_kmod_initialize_cpp +_kmod_load_extension +_kmod_load_extension_with_dependencies +_kmod_load_from_cache +_kmod_load_function +_kmod_load_request +_kmod_lock +_kmod_lookupbyid +_kmod_lookupbyid_locked +_kmod_lookupbyname +_kmod_lookupbyname_locked +_kmod_queue_cmd +_kmod_queue_lock +_kmod_release +_kmod_retain +_kmod_send_generic +_kmod_start_or_stop +_kmod_unload_cache +_kmopen +_kmputc +_kmread +_kmwrite +_kmzones +_knote +_knote_attach +_knote_detach +_knote_fdclose +_knote_init +_knote_remove +_kprintf +_kqueue +_kqueue_from_portset_np +_kqueue_portset_np +_kqueue_register +_kqueue_stat +_krealloc +_krpc_call +_krpc_portmap +_ktrace +_ktrcsw +_ktrgenio +_ktrnamei +_ktrpsig +_ktrsyscall +_ktrsysret +_kvprintf +_kvtophys +_last_page_zf +_last_zone +_lbolt +_ldisc_deregister +_ldisc_register +_lease_check +_lease_updatetime +_leavepgrp +_ledger_copy +_ledger_create +_ledger_enter +_ledger_init +_ledger_read +_ledger_server +_ledger_server_routine +_ledger_subsystem +_ledger_terminate +_ledger_transfer +_legal_vif_num +_lf_clearlock +_lf_findoverlap +_lf_getblock +_lf_getlock +_lf_setlock +_lf_split +_lf_wakelock +_libkern_builder +_libkern_osrelease +_libkern_ostype +_libkern_version +_libkern_version_major +_libkern_version_minor +_libkern_version_variant +_libsa_builder +_libsa_osrelease +_libsa_ostype +_libsa_version +_libsa_version_major +_libsa_version_minor +_libsa_version_variant +_lightning_bolt +_limcopy +_limit0 +_linesw +_link +_lio_listio +_listen +_llinfo_nd6 +_lo_attach_inet +_lo_attach_inet6 +_lo_demux +_lo_framer +_lo_input +_lo_reg_if_mods +_lo_set_bpf_tap +_lo_shutdown +_load_init_program +_load_ipfw +_load_kernel_extension +_load_machfile +_load_shared_file +_local_log2 +_local_proto_count +_localdomain +_lock_acquire +_lock_alloc +_lock_done +_lock_free +_lock_handoff +_lock_handoff_accept +_lock_init +_lock_make_stable +_lock_make_unstable +_lock_read +_lock_read_to_write +_lock_release +_lock_release_internal +_lock_set_create +_lock_set_dereference +_lock_set_destroy +_lock_set_event +_lock_set_handoff +_lock_set_init +_lock_set_reference +_lock_set_server +_lock_set_server_routine +_lock_set_subsystem +_lock_try +_lock_wait_time +_lock_write +_lock_write_to_read +_lockinit +_lockmgr +_lockmgr_printinfo +_lockstatus +_log +_logPanicDataToScreen +_log_in_vain +_log_init +_log_level +_log_lock +_log_open +_log_putc +_logclose +_logioctl +_logopen +_logpri +_logread +_logselect +_logsoftc +_logwakeup +_loif +_lookup +_lookup_default_shared_region +_loopattach +_lru_is_stale +_lseek +_lsf_mapping_pool_gauge +_lsf_remove_regions_mappings +_lsf_zone +_lstat +_lstatv +_m_adj +_m_aux_add +_m_aux_delete +_m_aux_find +_m_cat +_m_clalloc +_m_cltom +_m_copy_pkthdr +_m_copyback +_m_copydata +_m_copym +_m_copym_with_hdrs +_m_devget +_m_dtom +_m_dup +_m_expand +_m_free +_m_freem +_m_freem_list +_m_get +_m_getclr +_m_gethdr +_m_getpacket +_m_getpackethdrs +_m_getpackets +_m_leadingspace +_m_mcheck +_m_mchtype +_m_mclalloc +_m_mclfree +_m_mclget +_m_mclhasreference +_m_mclref +_m_mclunref +_m_mtocl +_m_mtod +_m_prepend +_m_prepend_2 +_m_pulldown +_m_pullup +_m_reclaim +_m_retry +_m_retryhdr +_m_split +_m_trailingspace +_m_want +_mac_roman_to_unicode +_mac_roman_to_utf8 +_mach_absolute_time +_mach_assert +_mach_destroy_memory_entry +_mach_factor +_mach_host_server +_mach_host_server_routine +_mach_host_subsystem +_mach_make_memory_entry +_mach_make_memory_entry_64 +_mach_memory_object_memory_entry +_mach_memory_object_memory_entry_64 +_mach_msg_overwrite +_mach_msg_overwrite_trap +_mach_msg_receive +_mach_msg_receive_continue +_mach_msg_receive_results +_mach_msg_rpc_from_kernel +_mach_msg_send +_mach_msg_send_from_kernel +_mach_msg_trap +_mach_port_allocate +_mach_port_allocate_full +_mach_port_allocate_name +_mach_port_allocate_qos +_mach_port_deallocate +_mach_port_destroy +_mach_port_dnrequest_info +_mach_port_extract_member +_mach_port_extract_right +_mach_port_get_attributes +_mach_port_get_refs +_mach_port_get_set_status +_mach_port_get_srights +_mach_port_gst_helper +_mach_port_insert_member +_mach_port_insert_right +_mach_port_kernel_object +_mach_port_mod_refs +_mach_port_move_member +_mach_port_names +_mach_port_names_helper +_mach_port_rename +_mach_port_request_notification +_mach_port_server +_mach_port_server_routine +_mach_port_set_attributes +_mach_port_set_mscount +_mach_port_set_seqno +_mach_port_space_info +_mach_port_subsystem +_mach_port_type +_mach_ports_lookup +_mach_ports_register +_mach_reply_port +_mach_thread_self +_mach_timebase_info +_mach_trap_count +_mach_trap_table +_mach_vm_region_info +_mach_vm_region_info_64 +_mach_wait_until +_machdep_sysctl_list +_machine_boot_info +_machine_exception +_machine_idle +_machine_info +_machine_init +_machine_load_context +_machine_signal_idle +_machine_slot +_machine_stack_attach +_machine_stack_detach +_machine_stack_handoff +_machine_startup +_machine_switch_act +_machine_switch_context +_machine_thread_create +_machine_thread_destroy +_machine_thread_dup +_machine_thread_get_state +_machine_thread_init +_machine_thread_set_current +_machine_thread_set_state +_machine_thread_terminate_self +_machine_wake_thread +_macx_backing_store_recovery +_macx_backing_store_suspend +_macx_swapoff +_macx_swapon +_macx_triggers +_madvise +_map_data +_map_data_size +_map_fd +_map_fd_funneled +_mapping_set_mod +_master_cpu +_master_device_port +_master_processor +_max_datalen +_max_doubled_size +_max_hdr +_max_linkhdr +_max_mem +_max_pages_trigger_port +_max_poll_computation +_max_poll_quanta +_max_protohdr +_max_rt_quantum +_max_unsafe_computation +_max_unsafe_quanta +_maxdmap +_maxfiles +_maxfilesperproc +_maximum_pages_free +_maxlockdepth +_maxproc +_maxprocperuid +_maxsmap +_maxsockets +_maxvfsconf +_maxvfsslots +_mb_map +_mbinit +_mbstat +_mbuf_slock +_mbutl +_mcl_paddr +_mcl_to_paddr +_mclfree +_mclrefcnt +_md_prepare_for_shutdown +_mdev +_mdevBMajor +_mdevCMajor +_mdevadd +_mdevinit +_mdevlookup +_mem_size +_memcmp +_memcpy +_memmove +_memname +_memory_manager_default +_memory_manager_default_cluster +_memory_manager_default_lock +_memory_object_change_attributes +_memory_object_control_deallocate +_memory_object_control_disable +_memory_object_control_reference +_memory_object_control_server +_memory_object_control_server_routine +_memory_object_control_subsystem +_memory_object_create +_memory_object_create_named +_memory_object_data_initialize +_memory_object_data_request +_memory_object_data_return +_memory_object_data_unlock +_memory_object_deactivate_pages +_memory_object_deallocate +_memory_object_default_deallocate +_memory_object_default_reference +_memory_object_default_server +_memory_object_default_server_routine +_memory_object_destroy +_memory_object_get_attributes +_memory_object_init +_memory_object_iopl_request +_memory_object_lock_page +_memory_object_lock_request +_memory_object_name_server +_memory_object_name_server_routine +_memory_object_name_subsystem +_memory_object_page_op +_memory_object_range_op +_memory_object_recover_named +_memory_object_reference +_memory_object_release_name +_memory_object_server +_memory_object_server_routine +_memory_object_super_upl_request +_memory_object_synchronize +_memory_object_synchronize_completed +_memory_object_terminate +_memory_object_unmap +_memory_object_upl_request +_memset +_meta_bread +_meta_breadn +_meta_is_stale +_meta_zones +_mf6ctable +_mfree +_mfreelater +_microtime +_microuptime +_mig_buckets +_mig_dealloc_reply_port +_mig_e +_mig_get_reply_port +_mig_init +_mig_object_deallocate +_mig_object_destroy +_mig_object_init +_mig_object_no_senders +_mig_object_reference +_mig_put_reply_port +_mig_reply_size +_mig_strncpy +_mig_table_max_displ +_mig_user_allocate +_mig_user_deallocate +_min_pages_trigger_port +_min_rt_quantum +_min_std_quantum +_mincore +_minherit +_minimum_pages_remaining +_minphys +_mk_timebase_info +_mk_timer_arm +_mk_timer_cancel +_mk_timer_create +_mk_timer_destroy +_mk_timer_init +_mk_timer_port_destroy +_mkcomplex +_mkdir +_mkfifo +_mknod +_ml_at_interrupt_context +_ml_cause_interrupt +_ml_cpu_get_info +_ml_get_interrupts_enabled +_ml_get_max_cpus +_ml_get_timebase +_ml_init_interrupt +_ml_init_max_cpus +_ml_install_interrupt_handler +_ml_io_map +_ml_phys_read +_ml_phys_read_64 +_ml_phys_read_byte +_ml_phys_read_byte_64 +_ml_phys_read_double +_ml_phys_read_double_64 +_ml_phys_read_half +_ml_phys_read_half_64 +_ml_phys_read_word +_ml_phys_read_word_64 +_ml_phys_write +_ml_phys_write_64 +_ml_phys_write_byte +_ml_phys_write_byte_64 +_ml_phys_write_double +_ml_phys_write_double_64 +_ml_phys_write_half +_ml_phys_write_half_64 +_ml_phys_write_word +_ml_phys_write_word_64 +_ml_probe_read +_ml_probe_read_64 +_ml_processor_register +_ml_set_interrupts_enabled +_ml_static_malloc +_ml_static_mfree +_ml_static_ptovirt +_ml_thread_policy +_ml_vtophys +_mld6_fasttimeo +_mld6_init +_mld6_input +_mld6_start_listening +_mld6_stop_listening +_mlock +_mlockall +_mmFree +_mmGetPtr +_mmInit +_mmMalloc +_mmReturnPtr +_mmap +_mmread +_mmrw +_mmwrite +_mntid_slock +_mntvnode_slock +_modetodirtype +_modwatch +_mount +_mountlist +_mountlist_slock +_mountroot +_mountroot_post_hook +_mprotect +_mremap +_mrt6_ioctl +_mrt6stat +_mrt_ioctl +_msg_ool_size_small +_msg_receive_error +_msgbufp +_msgctl +_msgget +_msgrcv +_msgsnd +_msgsys +_msync +_multicast_register_if +_munlock +_munlockall +_munmap +_munmapfd +_mutex_alloc +_mutex_free +_mutex_init +_mutex_lock +_mutex_lock_acquire +_mutex_lock_wait +_mutex_pause +_mutex_preblock +_mutex_preblock_wait +_mutex_try +_mutex_unlock +_mutex_unlock_wakeup +_my_name +_mynum_flavors +_n6expire +_name_cmp +_namei +_nanoseconds_to_absolutetime +_nanotime +_nanouptime +_nbdwrite +_nblkdev +_nbuf +_nbufh +_nbufhigh +_nbuflow +_nbuftarget +_ncallout +_nchash +_nchashtbl +_nchinit +_nchrdev +_nchstats +_ncl +_nclruhead +_nd6_cache_lladdr +_nd6_dad_duplicated +_nd6_dad_start +_nd6_dad_stop +_nd6_dad_stoptimer +_nd6_debug +_nd6_defifindex +_nd6_delay +_nd6_free +_nd6_gctimer +_nd6_ifattach +_nd6_ifptomac +_nd6_init +_nd6_ioctl +_nd6_is_addr_neighbor +_nd6_lookup +_nd6_maxndopt +_nd6_maxnudhint +_nd6_mmaxtries +_nd6_na_input +_nd6_na_output +_nd6_need_cache +_nd6_ns_input +_nd6_ns_output +_nd6_nud_hint +_nd6_option +_nd6_option_init +_nd6_options +_nd6_output +_nd6_prefix_lookup +_nd6_prefix_offlink +_nd6_prefix_onlink +_nd6_prelist_add +_nd6_prune +_nd6_purge +_nd6_ra_input +_nd6_recalc_reachtm_interval +_nd6_rs_input +_nd6_rtrequest +_nd6_setdefaultiface +_nd6_setmtu +_nd6_storelladdr +_nd6_timer +_nd6_timer_funneled +_nd6_umaxtries +_nd6_useloopback +_nd_defrouter +_nd_ifinfo +_nd_prefix +_ndflush +_ndqb +_ndrv_abort +_ndrv_attach +_ndrv_bind +_ndrv_connect +_ndrv_control +_ndrv_ctlinput +_ndrv_ctloutput +_ndrv_delspec +_ndrv_detach +_ndrv_disconnect +_ndrv_do_detach +_ndrv_do_disconnect +_ndrv_dominit +_ndrv_drain +_ndrv_find_tag +_ndrv_flushq +_ndrv_get_ifp +_ndrv_handle_ifp_detach +_ndrv_init +_ndrv_input +_ndrv_output +_ndrv_peeraddr +_ndrv_read_event +_ndrv_recvspace +_ndrv_send +_ndrv_sendspace +_ndrv_sense +_ndrv_setspec +_ndrv_shutdown +_ndrv_sockaddr +_ndrv_sysctl +_ndrv_to_dlil_demux +_ndrv_usrreqs +_ndrvdomain +_ndrvl +_ndrvsw +_need_ast +_nestedpanic +_net_add_domain +_net_add_proto +_net_del_domain +_net_del_proto +_net_sysctl +_netaddr_match +_netboot_iaddr +_netboot_mountroot +_netboot_root +_netboot_rootpath +_netboot_setup +_netisr +_network_flock +_new_addr_hash +_new_obj_hash +_new_sysctl +_new_system_shared_regions +_newsysctl_list +_newtest +_nextc +_nextgennumber +_nextsect +_nextseg +_nextsegfromheader +_nextvnodeid +_nf_list +_nfiles +_nfs_adv +_nfs_async +_nfs_asyncio +_nfs_bioread +_nfs_boot_getfh +_nfs_boot_init +_nfs_bufq +_nfs_clearcommit +_nfs_cltpsock +_nfs_connect +_nfs_defect +_nfs_disconnect +_nfs_doio +_nfs_dolock +_nfs_false +_nfs_flushcommits +_nfs_fsinfo +_nfs_getattrcache +_nfs_getauth +_nfs_getcookie +_nfs_getnickauth +_nfs_getreq +_nfs_hash +_nfs_inactive +_nfs_init +_nfs_invaldir +_nfs_iodmount +_nfs_iodwant +_nfs_islocked +_nfs_ispublicfh +_nfs_loadattrcache +_nfs_lock +_nfs_mount_type +_nfs_mountroot +_nfs_namei +_nfs_nget +_nfs_nhinit +_nfs_node_hash_lock +_nfs_numasync +_nfs_prog +_nfs_readdirplusrpc +_nfs_readdirrpc +_nfs_readlinkrpc +_nfs_readrpc +_nfs_reclaim +_nfs_removeit +_nfs_rephead +_nfs_reply +_nfs_reqq +_nfs_request +_nfs_savenickauth +_nfs_send +_nfs_sigintr +_nfs_slplock +_nfs_slpunlock +_nfs_sndlock +_nfs_sndunlock +_nfs_ticks +_nfs_timer +_nfs_timer_funnel +_nfs_true +_nfs_udpsock +_nfs_unlock +_nfs_vfsops +_nfs_vinvalbuf +_nfs_write +_nfs_writerpc +_nfs_xdrneg1 +_nfs_xidwrap +_nfsadvlock_longest +_nfsadvlocks +_nfsadvlocks_time +_nfsclnt +_nfsd_head +_nfsd_head_flag +_nfsd_waiting +_nfslockdans +_nfslockdfd +_nfslockdfp +_nfslockdwait +_nfslockdwaiting +_nfsm_adj +_nfsm_disct +_nfsm_mbuftouio +_nfsm_reqh +_nfsm_rpchead +_nfsm_srvfattr +_nfsm_srvpostopattr +_nfsm_srvwcc +_nfsm_strtmbuf +_nfsm_uiotombuf +_nfsnodehash +_nfsnodehashtbl +_nfsrtt +_nfsrtton +_nfsrv3_access +_nfsrv3_procs +_nfsrv_cleancache +_nfsrv_commit +_nfsrv_create +_nfsrv_dorec +_nfsrv_errmap +_nfsrv_fhtovp +_nfsrv_fsinfo +_nfsrv_getattr +_nfsrv_getcache +_nfsrv_init +_nfsrv_initcache +_nfsrv_link +_nfsrv_lookup +_nfsrv_mkdir +_nfsrv_mknod +_nfsrv_noop +_nfsrv_null +_nfsrv_object_create +_nfsrv_pathconf +_nfsrv_rcv +_nfsrv_read +_nfsrv_readdir +_nfsrv_readdirplus +_nfsrv_readlink +_nfsrv_remove +_nfsrv_rename +_nfsrv_rmdir +_nfsrv_setattr +_nfsrv_setcred +_nfsrv_slpderef +_nfsrv_statfs +_nfsrv_symlink +_nfsrv_updatecache +_nfsrv_wakenfsd +_nfsrv_write +_nfsrv_writegather +_nfsrvhash +_nfsrvhashtbl +_nfsrvlruhead +_nfsrvw_procrastinate +_nfsrvw_procrastinate_v3 +_nfsrvw_sort +_nfsstats +_nfssvc +_nfssvc_sockhead +_nfssvc_sockhead_flag +_nfsv2_procid +_nfsv2_type +_nfsv2_vnodeop_opv_desc +_nfsv2_vnodeop_p +_nfsv3_procid +_nfsv3_type +_ngif +_niobuf +_nkdbufs +_nke_insert +_nlinesw +_nmbclusters +_no_dispatch_count +_nobdev +_nocdev +_nop_abortop +_nop_access +_nop_advlock +_nop_allocate +_nop_blkatoff +_nop_blktooff +_nop_bmap +_nop_bwrite +_nop_close +_nop_cmap +_nop_copyfile +_nop_create +_nop_devblocksize +_nop_exchange +_nop_fsync +_nop_getattr +_nop_getattrlist +_nop_inactive +_nop_ioctl +_nop_islocked +_nop_lease +_nop_link +_nop_lock +_nop_mkcomplex +_nop_mkdir +_nop_mknod +_nop_mmap +_nop_offtoblk +_nop_open +_nop_pagein +_nop_pageout +_nop_pathconf +_nop_pgrd +_nop_pgwr +_nop_print +_nop_read +_nop_readdir +_nop_readdirattr +_nop_readlink +_nop_reallocblks +_nop_reclaim +_nop_remove +_nop_rename +_nop_revoke +_nop_rmdir +_nop_searchfs +_nop_seek +_nop_select +_nop_setattr +_nop_setattrlist +_nop_strategy +_nop_symlink +_nop_truncate +_nop_unlock +_nop_update +_nop_valloc +_nop_vfree +_nop_whiteout +_nop_write +_noresume_on_disconnect +_norma_mk +_nosys +_not_implemented +_notify_filemod_watchers +_npcbufs +_nport +_nprocs +_nqfhhash +_nqfhhashtbl +_nqnfs_callback +_nqnfs_clientd +_nqnfs_clientlease +_nqnfs_getlease +_nqnfs_lease_check +_nqnfs_piggy +_nqnfs_prog +_nqnfs_serverd +_nqnfsrv_getlease +_nqnfsrv_vacated +_nqnfsstarttime +_nqsrv_clockskew +_nqsrv_getlease +_nqsrv_maxlease +_nqsrv_writeslack +_nqtimerhead +_nr_hashmask +_nr_hashtbl +_nrdeletes +_nrinserts +_nselcoll +_nswap +_nswapmap +_nswdev +_nsysent +_null_port +_nulldev +_nullop +_nullsys +_num_zones +_numcache +_numdquot +_numnfsrvcache +_numused_vfsslots +_numvnodes +_nv3tov_type +_oaccept +_obreak +_ocreat +_ofstat +_oftruncate +_ogetdirentries +_ogetdomainname +_ogetdtablesize +_ogethostid +_ogethostname +_ogetpagesize +_ogetpeername +_ogetrlimit +_ogetsockname +_okillpg +_old_if_attach +_olseek +_olstat +_open +_orecv +_orecvfrom +_orecvmsg +_osend +_osendmsg +_osetdomainname +_osethostid +_osethostname +_osetregid +_osetreuid +_osetrlimit +_osfmk_osrelease +_osfmk_ostype +_osfmk_version +_osfmk_version_major +_osfmk_version_minor +_osfmk_version_variant +_osigblock +_osigsetmask +_osigstack +_osigvec +_osmmap +_osrelease +_ostat +_ostype +_otruncate +_ovadvise +_ovbcopy +_owait +_owait3 +_packattrblk +_packcommonattr +_packdirattr +_packfileattr +_packvolattr +_page_mask +_page_shift +_page_size +_paging_segment_count +_paging_segment_max +_paging_segments +_paging_segments_lock +_panic +_panicDebugging +_panicDialogDesired +_panic_init +_panic_is_inited +_panic_lock +_panic_ui_initialize +_paniccpu +_panicstr +_panicwait +_parse_bsd_args +_pathconf +_pc_buffer +_pc_buflast +_pc_bufptr +_pc_bufsize +_pc_buftomem +_pc_sample_pid +_pc_trace_buf +_pc_trace_cnt +_pc_trace_frameworks +_pcb_synch +_pcsample_beg +_pcsample_comm +_pcsample_enable +_pcsample_end +_pcsample_flags +_pcsamples_bootstrap +_pcsamples_clear +_pcsamples_control +_pcsamples_ops +_pcsamples_read +_pcsamples_reinit +_pe_identify_machine +_pe_init_debug +_pexpert_osrelease +_pexpert_ostype +_pexpert_version +_pexpert_version_major +_pexpert_version_minor +_pexpert_version_variant +_pfctlinput +_pfctlinput2 +_pffasttimo +_pffinddomain +_pffindproto +_pffindtype +_pfind +_pfkeystat +_pfslowtimo +_pfxlist_onlink_check +_pgdelete +_pgfind +_pgrp0 +_pgrphash +_pgrphashtbl +_pgsignal +_physical_transfer_cluster_count +_physio +_pid_for_task +_pidhash +_pidhashtbl +_pim6_input +_pipe +_pmap_bootstrap +_pmap_change_wiring +_pmap_clear_modify +_pmap_clear_reference +_pmap_collect +_pmap_copy_page +_pmap_copy_part_page +_pmap_create +_pmap_destroy +_pmap_enter +_pmap_extract +_pmap_find_phys +_pmap_free_pages +_pmap_init +_pmap_initialized +_pmap_is_modified +_pmap_is_referenced +_pmap_map +_pmap_modify_pages +_pmap_next_page +_pmap_page_protect +_pmap_pageable +_pmap_protect +_pmap_reference +_pmap_remove +_pmap_remove_some_phys +_pmap_startup +_pmap_steal_memory +_pmap_sync_caches_phys +_pmap_verify_free +_pmap_virtual_space +_pmap_zero_page +_pmap_zero_part_page +_pmap_zone +_pmtu_expire +_pmtu_probe +_port_name_to_act +_port_name_to_clock +_port_name_to_semaphore +_port_name_to_task +_postevent +_postsig +_pread +_prelist_remove +_prelist_update +_prepare_profile_database +_prf +_print_saved_state +_print_vmpage_stat +_printf +_printf_init +_printf_lock +_priority_IO_timestamp_for_root +_prngAllowReseed +_prngDestroy +_prngForceReseed +_prngInitialize +_prngInput +_prngOutput +_prngProcessSeedBuffer +_prngStretch +_proc0 +_proc_exit +_proc_is_classic +_proc_name +_proc_prepareexit +_proc_reparent +_procdup +_process_terminate_self +_processor_array +_processor_assign +_processor_control +_processor_doshutdown +_processor_exit +_processor_get_assignment +_processor_info +_processor_info_count +_processor_init +_processor_offline +_processor_ptr +_processor_server +_processor_server_routine +_processor_set_base +_processor_set_create +_processor_set_default +_processor_set_destroy +_processor_set_info +_processor_set_limit +_processor_set_max_priority +_processor_set_policy_control +_processor_set_policy_disable +_processor_set_policy_enable +_processor_set_server +_processor_set_server_routine +_processor_set_stack_usage +_processor_set_statistics +_processor_set_subsystem +_processor_set_tasks +_processor_set_things +_processor_set_threads +_processor_shutdown +_processor_start +_processor_subsystem +_procinit +_prof_queue +_profil +_profile_kernel_services +_prtactive +_pru_abort_notsupp +_pru_accept_notsupp +_pru_attach_notsupp +_pru_bind_notsupp +_pru_connect2_notsupp +_pru_connect_notsupp +_pru_control_notsupp +_pru_detach_notsupp +_pru_disconnect_notsupp +_pru_listen_notsupp +_pru_peeraddr_notsupp +_pru_rcvd_notsupp +_pru_rcvoob_notsupp +_pru_send_notsupp +_pru_sense_null +_pru_shutdown_notsupp +_pru_sockaddr_notsupp +_pru_sopoll_notsupp +_pru_soreceive +_pru_soreceive_notsupp +_pru_sosend +_pru_sosend_notsupp +_ps_allocate_cluster +_ps_clmap +_ps_clunmap +_ps_dealloc_vsmap +_ps_deallocate_cluster +_ps_delete +_ps_enter +_ps_map_extend +_ps_read_device +_ps_read_file +_ps_select_array +_ps_select_segment +_ps_vs_write_complete +_ps_vstruct_allocated_pages +_ps_vstruct_allocated_size +_ps_vstruct_create +_ps_vstruct_dealloc +_ps_vstruct_transfer_from_segment +_ps_write_device +_ps_write_file +_psem_access +_psem_cache_init +_psem_cache_purge +_psem_delete +_psemhash +_psemhashtbl +_psemnument +_psemops +_psemstats +_pset_add_processor +_pset_add_task +_pset_add_thread +_pset_deallocate +_pset_init +_pset_quanta_setup +_pset_reference +_pset_remove_processor +_pset_remove_task +_pset_remove_thread +_pset_sys_bootstrap +_pseudo_inits +_pshm_access +_pshm_cache_add +_pshm_cache_delete +_pshm_cache_init +_pshm_cache_purge +_pshm_cache_search +_pshm_close +_pshm_mmap +_pshm_stat +_pshm_truncate +_pshmhash +_pshmhashtbl +_pshmnument +_pshmops +_pshmstats +_psignal +_psignal_lock +_psignal_sigprof +_psignal_uthread +_psignal_vfork +_psignal_vtalarm +_psignal_xcpu +_pstats0 +_pt_setrunnable +_pthread_sigmask +_ptrace +_pty_init +_putc +_pvs_cluster_read +_pvs_object_data_provided +_pwrite +_q_to_b +_qsync +_quotactl +_quotaoff +_quotaon +_quotastat +_random +_random_close +_random_init +_random_ioctl +_random_open +_random_read +_random_write +_raw_attach +_raw_ctlinput +_raw_detach +_raw_disconnect +_raw_init +_raw_input +_raw_usrreqs +_rawcb_list +_rawread +_rawwrite +_rc4_crypt +_rc4_init +_read +_read_random +_readlink +_readv +_real_ncpus +_realhost +_realitexpire +_reassignbuf +_reattach_wait +_reboot +_receive_packet +_record_startup_extensions_function +_recvfrom +_recvmsg +_ref_act_port_locked +_ref_pset_port_locked +_refresh_screen +_refunnel_hint +_refunnel_hint_enabled +_registerPrioritySleepWakeInterest +_registerSleepWakeInterest +_register_sockfilter +_relookup +_rem3_remangle_name +_remove_all_shared_regions +_remove_default_shared_region +_remove_name +_remove_startup_extension_function +_rename +_reset_shared_file +_resetpriority +_resize_namecache +_retrieve_act_self_fast +_retrieve_task_self_fast +_return_on_panic +_revoke +_rijndaelDecrypt +_rijndaelEncrypt +_rijndaelKeyEncToDec +_rijndaelKeySched +_rijndael_blockDecrypt +_rijndael_blockEncrypt +_rijndael_cipherInit +_rijndael_makeKey +_rijndael_padDecrypt +_rijndael_padEncrypt +_rip6_ctlinput +_rip6_ctloutput +_rip6_input +_rip6_output +_rip6_recvspace +_rip6_sendspace +_rip6_usrreqs +_rip6stat +_rip_ctlinput +_rip_ctloutput +_rip_init +_rip_input +_rip_output +_rip_recvspace +_rip_sendspace +_rip_usrreqs +_ripcb +_ripcbinfo +_rl_add +_rl_init +_rl_remove +_rl_scan +_rmdir +_rn_addmask +_rn_addroute +_rn_delete +_rn_init +_rn_inithead +_rn_lookup +_rn_match +_rn_refines +_rootDomainRestart +_rootDomainShutdown +_root_paged_ledger +_root_wired_ledger +_rootdev +_rootdevice +_rootfs +_rootvnode +_rootvp +_route6_input +_route_cb +_route_init +_routedomain +_rpc_auth_kerb +_rpc_auth_unix +_rpc_autherr +_rpc_call +_rpc_mismatch +_rpc_msgaccepted +_rpc_msgdenied +_rpc_reply +_rpc_vers +_rr_prefix +_rsvp_input +_rsvp_on +_rt6_flush +_rt_ifmsg +_rt_missmsg +_rt_newaddrmsg +_rt_newmaddrmsg +_rt_setgate +_rt_tables +_rtalloc +_rtalloc1 +_rtalloc_ign +_rtclock_intr +_rtclock_reset +_rtfree +_rtinit +_rtioctl +_rtredirect +_rtref +_rtrequest +_rtsetifa +_rtunref +_ruadd +_run_netisr +_run_queue_remove +_rwuio +_sa6_any +_safe_gets +_safedounmount +_sane_size +_savacctp +_save_waits +_sb_lock +_sb_max +_sb_notify +_sballoc +_sbappend +_sbappendaddr +_sbappendcontrol +_sbappendrecord +_sbcompress +_sbcreatecontrol +_sbdrop +_sbdroprecord +_sbflush +_sbfree +_sbinsertoob +_sblock +_sbrelease +_sbreserve +_sbrk +_sbspace +_sbtoxsockbuf +_sbunlock +_sbwait +_scanc +_sched_init +_sched_poll_yield_shift +_sched_safe_duration +_sched_tick +_sched_tick_init +_sched_tick_thread +_sched_tick_thread_continue +_scope6_addr2default +_scope6_get +_scope6_get_default +_scope6_ids +_scope6_ifattach +_scope6_set +_scope6_setdefault +_searchfs +_sectDATAB +_sectLINKB +_sectPRELINKB +_sectSizeDATA +_sectSizeLINK +_sectSizePRELINK +_sectSizeTEXT +_sectTEXTB +_securelevel +_selcontinue +_select +_selprocess +_selrecord +_selthreadclear +_seltrue +_selwait +_selwakeup +_sem +_sem_close +_sem_destroy +_sem_getvalue +_sem_init +_sem_open +_sem_post +_sem_trywait +_sem_unlink +_sem_wait +_sema +_semaphore_convert_wait_result +_semaphore_create +_semaphore_dereference +_semaphore_destroy +_semaphore_init +_semaphore_max +_semaphore_reference +_semaphore_server +_semaphore_server_routine +_semaphore_signal +_semaphore_signal_all +_semaphore_signal_all_trap +_semaphore_signal_internal +_semaphore_signal_thread +_semaphore_signal_thread_trap +_semaphore_signal_trap +_semaphore_subsystem +_semaphore_timedwait +_semaphore_timedwait_continue +_semaphore_timedwait_signal +_semaphore_timedwait_signal_trap +_semaphore_timedwait_trap +_semaphore_wait +_semaphore_wait_continue +_semaphore_wait_internal +_semaphore_wait_signal +_semaphore_wait_signal_trap +_semaphore_wait_trap +_semaphore_zone +_semconfig +_semctl +_semexit +_semget +_seminfo +_seminit +_semop +_semsys +_semu +_sendmsg +_sendsig +_sendto +_serial_putc +_session0 +_sessrele +_set_be_bit +_set_blocksize +_set_bsdtask_info +_set_bsduthreadargs +_set_cast128_subkey +_set_dp_control_port +_set_fsblocksize +_set_priority +_set_procsigmask +_set_sched_pri +_set_security_token +_set_state_handler +_setattrlist +_setbit +_setconf +_setegid +_seteuid +_setgid +_setgroups +_setitimer +_setlogin +_setpgid +_setpriority +_setprivexec +_setquota +_setrlimit +_setsid +_setsigvec +_setsockopt +_setthetime +_settimeofday +_setuid +_setup_main +_setuse +_sfilter_init +_sfilter_term +_sfma_handle +_sha1_init +_sha1_loop +_sha1_pad +_sha1_result +_shadow_map_create +_shadow_map_free +_shadow_map_read +_shadow_map_shadow_size +_shadow_map_write +_shared_com_boot_time_init +_shared_data_region_handle +_shared_file_available_hash_ele +_shared_file_boot_time_init +_shared_file_create_system_region +_shared_file_data_region +_shared_file_mapping_array +_shared_file_text_region +_shared_region_mapping_create +_shared_region_mapping_dealloc +_shared_region_mapping_info +_shared_region_mapping_ref +_shared_region_mapping_set_alt_next +_shared_region_object_chain_attach +_shared_text_region_handle +_shm_open +_shm_unlink +_shmat +_shmctl +_shmdt +_shmexit +_shmfork +_shmget +_shminfo +_shminit +_shmsegs +_shmsys +_shutdown +_sig_filtops +_sig_lock_to_exit +_sig_try_locked +_sigaction +_sigacts0 +_sigaltstack +_sigcontinue +_sigexit_locked +_siginit +_signal_lock +_signal_setast +_signal_unlock +_sigpending +_sigprocmask +_sigprop +_sigreturn +_sigsuspend +_sigwait +_skpc +_slave_machine_init +_slave_main +_sleep +_snprintf +_so_cache_hw +_so_cache_init_done +_so_cache_max_freed +_so_cache_time +_so_cache_timeouts +_so_cache_timer +_so_cache_zone +_so_gencnt +_soabort +_soaccept +_soalloc +_sobind +_socantrcvmore +_socantsendmore +_sockargs +_socket +_socket_cache_head +_socket_cache_tail +_socket_debug +_socket_zone +_socketinit +_socketops +_socketpair +_soclose +_soconnect +_soconnect2 +_socreate +_sodealloc +_sodelayed_copy +_sodisconnect +_sodropablereq +_sofree +_sogetopt +_sohasoutofband +_soisconnected +_soisconnecting +_soisdisconnected +_soisdisconnecting +_solisten +_sonewconn +_soo_close +_soo_ioctl +_soo_kqfilter +_soo_read +_soo_select +_soo_stat +_soo_write +_soopt_getm +_soopt_mcopyin +_soopt_mcopyout +_sooptcopyin +_sooptcopyout +_sopoll +_soreadable +_soreceive +_soreserve +_sorflush +_sorwakeup +_sosend +_sosendallatonce +_sosetopt +_soshutdown +_sotoxsocket +_sowakeup +_sowriteable +_sowwakeup +_space_deallocate +_spec_badop +_spec_blktooff +_spec_bmap +_spec_close +_spec_cmap +_spec_devblocksize +_spec_ebadf +_spec_fsync +_spec_ioctl +_spec_lookup +_spec_nfsv2nodeop_opv_desc +_spec_nfsv2nodeop_p +_spec_offtoblk +_spec_open +_spec_pathconf +_spec_print +_spec_read +_spec_select +_spec_strategy +_spec_vnodeop_entries +_spec_vnodeop_opv_desc +_spec_vnodeop_p +_spec_write +_spechash_slock +_special_handler +_special_handler_continue +_speclisth +_spl0 +_splbio +_splclock +_splhigh +_splimp +_split_funnel_off +_spllo +_spln +_splnet +_sploff +_splon +_splpower +_splsched +_splsoftclock +_spltty +_splvm +_splx +_sprintf +_sprintf_lock +_srv +_ss_fltsz +_ss_fltsz_local +_sscanf +_sstk +_stack_alloc +_stack_alloc_bndry +_stack_alloc_hits +_stack_alloc_hiwater +_stack_alloc_misses +_stack_alloc_total +_stack_alloc_try +_stack_cache_hits +_stack_collect +_stack_fake_zone_info +_stack_free +_stack_free_count +_stack_free_limit +_stack_free_max +_stack_free_stack +_stack_privilege +_stack_statistics +_start_cpu_thread +_start_def_pager +_start_kernel_threads +_startprofclock +_startup_miss +_stat +_state_count +_statfs +_statv +_std_quantum +_std_quantum_us +_stf_attach_inet6 +_stf_detach_inet6 +_stf_ioctl +_stf_pre_output +_stf_reg_if_mods +_stf_shutdown +_stfattach +_stop +_stopprofclock +_strcat +_strchr +_strcmp +_strcpy +_strdup +_strlen +_strncat +_strncmp +_strncpy +_strprefix +_strtol +_strtoq +_strtoul +_strtouq +_subyte +_suibyte +_suiword +_suser +_suword +_swap_act_map +_swap_task_map +_swapin_init +_swapin_lock +_swapin_queue +_swapin_thread +_swapin_thread_continue +_swapmap +_swapon +_swdevt +_switch_act +_switch_act_swapins +_switch_debugger +_switch_to_serial_console +_switch_to_shutdown_context +_swtch +_swtch_continue +_swtch_pri +_swtch_pri_continue +_symlink +_sync +_synthfs_access +_synthfs_adddirentry +_synthfs_cached_lookup +_synthfs_chflags +_synthfs_chmod +_synthfs_chown +_synthfs_create +_synthfs_fhtovp +_synthfs_getattr +_synthfs_inactive +_synthfs_init +_synthfs_islocked +_synthfs_lock +_synthfs_lookup +_synthfs_mkdir +_synthfs_mmap +_synthfs_mount +_synthfs_mount_fs +_synthfs_move_rename_entry +_synthfs_new_directory +_synthfs_new_symlink +_synthfs_open +_synthfs_pathconf +_synthfs_quotactl +_synthfs_readdir +_synthfs_readlink +_synthfs_reclaim +_synthfs_remove +_synthfs_remove_directory +_synthfs_remove_entry +_synthfs_remove_symlink +_synthfs_rename +_synthfs_rmdir +_synthfs_root +_synthfs_select +_synthfs_setattr +_synthfs_setupuio +_synthfs_start +_synthfs_statfs +_synthfs_symlink +_synthfs_sync +_synthfs_sysctl +_synthfs_unlock +_synthfs_unmount +_synthfs_update +_synthfs_vfsops +_synthfs_vget +_synthfs_vnodeop_entries +_synthfs_vnodeop_opv_desc +_synthfs_vnodeop_p +_synthfs_vptofh +_syscallnames +_sysclk_config +_sysclk_getattr +_sysclk_gettime +_sysclk_init +_sysclk_ops +_sysclk_setalarm +_sysctl__children +_sysctl__debug +_sysctl__debug_bpf_bufsize +_sysctl__debug_bpf_maxbufsize +_sysctl__debug_children +_sysctl__hw +_sysctl__hw_children +_sysctl__kern +_sysctl__kern_children +_sysctl__kern_dummy +_sysctl__kern_ipc +_sysctl__kern_ipc_children +_sysctl__kern_ipc_maxsockbuf +_sysctl__kern_ipc_maxsockets +_sysctl__kern_ipc_nmbclusters +_sysctl__kern_ipc_sockbuf_waste_factor +_sysctl__kern_ipc_somaxconn +_sysctl__kern_ipc_sorecvmincopy +_sysctl__kern_ipc_sosendminchain +_sysctl__kern_maxfilesperproc +_sysctl__kern_maxprocperuid +_sysctl__kern_sysv +_sysctl__kern_sysv_children +_sysctl__kern_sysv_shmall +_sysctl__kern_sysv_shmmax +_sysctl__kern_sysv_shmmin +_sysctl__kern_sysv_shmmni +_sysctl__kern_sysv_shmseg +_sysctl__machdep +_sysctl__machdep_children +_sysctl__net +_sysctl__net_children +_sysctl__net_inet +_sysctl__net_inet6 +_sysctl__net_inet6_children +_sysctl__net_inet6_icmp6 +_sysctl__net_inet6_icmp6_children +_sysctl__net_inet6_icmp6_errppslimit +_sysctl__net_inet6_icmp6_nd6_debug +_sysctl__net_inet6_icmp6_nd6_delay +_sysctl__net_inet6_icmp6_nd6_maxnudhint +_sysctl__net_inet6_icmp6_nd6_mmaxtries +_sysctl__net_inet6_icmp6_nd6_prune +_sysctl__net_inet6_icmp6_nd6_umaxtries +_sysctl__net_inet6_icmp6_nd6_useloopback +_sysctl__net_inet6_icmp6_nodeinfo +_sysctl__net_inet6_icmp6_rediraccept +_sysctl__net_inet6_icmp6_redirtimeout +_sysctl__net_inet6_icmp6_stats +_sysctl__net_inet6_ip6 +_sysctl__net_inet6_ip6_accept_rtadv +_sysctl__net_inet6_ip6_auto_flowlabel +_sysctl__net_inet6_ip6_auto_linklocal +_sysctl__net_inet6_ip6_children +_sysctl__net_inet6_ip6_dad_count +_sysctl__net_inet6_ip6_defmcasthlim +_sysctl__net_inet6_ip6_forwarding +_sysctl__net_inet6_ip6_gifhlim +_sysctl__net_inet6_ip6_hdrnestlimit +_sysctl__net_inet6_ip6_hlim +_sysctl__net_inet6_ip6_kame_version +_sysctl__net_inet6_ip6_keepfaith +_sysctl__net_inet6_ip6_log_interval +_sysctl__net_inet6_ip6_maxfragpackets +_sysctl__net_inet6_ip6_redirect +_sysctl__net_inet6_ip6_rip6stats +_sysctl__net_inet6_ip6_rr_prune +_sysctl__net_inet6_ip6_rtexpire +_sysctl__net_inet6_ip6_rtmaxcache +_sysctl__net_inet6_ip6_rtminexpire +_sysctl__net_inet6_ip6_stats +_sysctl__net_inet6_ip6_temppltime +_sysctl__net_inet6_ip6_tempvltime +_sysctl__net_inet6_ip6_use_deprecated +_sysctl__net_inet6_ip6_use_tempaddr +_sysctl__net_inet6_ip6_v6only +_sysctl__net_inet6_ipsec6 +_sysctl__net_inet6_ipsec6_ah_net_deflev +_sysctl__net_inet6_ipsec6_ah_trans_deflev +_sysctl__net_inet6_ipsec6_children +_sysctl__net_inet6_ipsec6_debug +_sysctl__net_inet6_ipsec6_def_policy +_sysctl__net_inet6_ipsec6_ecn +_sysctl__net_inet6_ipsec6_esp_net_deflev +_sysctl__net_inet6_ipsec6_esp_randpad +_sysctl__net_inet6_ipsec6_esp_trans_deflev +_sysctl__net_inet6_ipsec6_stats +_sysctl__net_inet6_tcp6 +_sysctl__net_inet6_tcp6_children +_sysctl__net_inet6_udp6 +_sysctl__net_inet6_udp6_children +_sysctl__net_inet_children +_sysctl__net_inet_div +_sysctl__net_inet_div_children +_sysctl__net_inet_icmp +_sysctl__net_inet_icmp_bmcastecho +_sysctl__net_inet_icmp_children +_sysctl__net_inet_icmp_drop_redirect +_sysctl__net_inet_icmp_icmplim +_sysctl__net_inet_icmp_log_redirect +_sysctl__net_inet_icmp_maskrepl +_sysctl__net_inet_icmp_stats +_sysctl__net_inet_igmp +_sysctl__net_inet_igmp_children +_sysctl__net_inet_igmp_stats +_sysctl__net_inet_ip +_sysctl__net_inet_ip_accept_sourceroute +_sysctl__net_inet_ip_check_interface +_sysctl__net_inet_ip_check_route_selfref +_sysctl__net_inet_ip_children +_sysctl__net_inet_ip_fastforwarding +_sysctl__net_inet_ip_forwarding +_sysctl__net_inet_ip_gifttl +_sysctl__net_inet_ip_intr_queue_drops +_sysctl__net_inet_ip_intr_queue_maxlen +_sysctl__net_inet_ip_keepfaith +_sysctl__net_inet_ip_linklocal +_sysctl__net_inet_ip_linklocal_children +_sysctl__net_inet_ip_linklocal_in +_sysctl__net_inet_ip_linklocal_in_allowbadttl +_sysctl__net_inet_ip_linklocal_in_children +_sysctl__net_inet_ip_linklocal_stat +_sysctl__net_inet_ip_maxfragpackets +_sysctl__net_inet_ip_portrange +_sysctl__net_inet_ip_portrange_children +_sysctl__net_inet_ip_portrange_first +_sysctl__net_inet_ip_portrange_hifirst +_sysctl__net_inet_ip_portrange_hilast +_sysctl__net_inet_ip_portrange_last +_sysctl__net_inet_ip_portrange_lowfirst +_sysctl__net_inet_ip_portrange_lowlast +_sysctl__net_inet_ip_redirect +_sysctl__net_inet_ip_rtexpire +_sysctl__net_inet_ip_rtmaxcache +_sysctl__net_inet_ip_rtminexpire +_sysctl__net_inet_ip_sourceroute +_sysctl__net_inet_ip_stats +_sysctl__net_inet_ip_subnets_are_local +_sysctl__net_inet_ip_ttl +_sysctl__net_inet_ipsec +_sysctl__net_inet_ipsec_ah_cleartos +_sysctl__net_inet_ipsec_ah_net_deflev +_sysctl__net_inet_ipsec_ah_offsetmask +_sysctl__net_inet_ipsec_ah_trans_deflev +_sysctl__net_inet_ipsec_bypass +_sysctl__net_inet_ipsec_children +_sysctl__net_inet_ipsec_debug +_sysctl__net_inet_ipsec_def_policy +_sysctl__net_inet_ipsec_dfbit +_sysctl__net_inet_ipsec_ecn +_sysctl__net_inet_ipsec_esp_net_deflev +_sysctl__net_inet_ipsec_esp_port +_sysctl__net_inet_ipsec_esp_randpad +_sysctl__net_inet_ipsec_esp_trans_deflev +_sysctl__net_inet_ipsec_stats +_sysctl__net_inet_raw +_sysctl__net_inet_raw_children +_sysctl__net_inet_raw_maxdgram +_sysctl__net_inet_raw_pcblist +_sysctl__net_inet_raw_recvspace +_sysctl__net_inet_tcp +_sysctl__net_inet_tcp_always_keepalive +_sysctl__net_inet_tcp_blackhole +_sysctl__net_inet_tcp_children +_sysctl__net_inet_tcp_delacktime +_sysctl__net_inet_tcp_delayed_ack +_sysctl__net_inet_tcp_do_tcpdrain +_sysctl__net_inet_tcp_drop_synfin +_sysctl__net_inet_tcp_icmp_may_rst +_sysctl__net_inet_tcp_isn_reseed_interval +_sysctl__net_inet_tcp_keepidle +_sysctl__net_inet_tcp_keepinit +_sysctl__net_inet_tcp_keepintvl +_sysctl__net_inet_tcp_local_slowstart_flightsize +_sysctl__net_inet_tcp_log_in_vain +_sysctl__net_inet_tcp_msl +_sysctl__net_inet_tcp_mssdflt +_sysctl__net_inet_tcp_newreno +_sysctl__net_inet_tcp_path_mtu_discovery +_sysctl__net_inet_tcp_pcbcount +_sysctl__net_inet_tcp_pcblist +_sysctl__net_inet_tcp_recvspace +_sysctl__net_inet_tcp_rfc1323 +_sysctl__net_inet_tcp_rfc1644 +_sysctl__net_inet_tcp_sendspace +_sysctl__net_inet_tcp_slowlink_wsize +_sysctl__net_inet_tcp_slowstart_flightsize +_sysctl__net_inet_tcp_stats +_sysctl__net_inet_tcp_strict_rfc1948 +_sysctl__net_inet_tcp_tcbhashsize +_sysctl__net_inet_tcp_tcp_lq_overflow +_sysctl__net_inet_tcp_v6mssdflt +_sysctl__net_inet_udp +_sysctl__net_inet_udp_blackhole +_sysctl__net_inet_udp_checksum +_sysctl__net_inet_udp_children +_sysctl__net_inet_udp_log_in_vain +_sysctl__net_inet_udp_maxdgram +_sysctl__net_inet_udp_pcblist +_sysctl__net_inet_udp_recvspace +_sysctl__net_inet_udp_stats +_sysctl__net_key +_sysctl__net_key_ah_keymin +_sysctl__net_key_blockacq_count +_sysctl__net_key_blockacq_lifetime +_sysctl__net_key_children +_sysctl__net_key_debug +_sysctl__net_key_esp_auth +_sysctl__net_key_esp_keymin +_sysctl__net_key_int_random +_sysctl__net_key_larval_lifetime +_sysctl__net_key_prefered_oldsa +_sysctl__net_key_spi_maxval +_sysctl__net_key_spi_minval +_sysctl__net_key_spi_trycnt +_sysctl__net_link +_sysctl__net_link_children +_sysctl__net_link_ether +_sysctl__net_link_ether_children +_sysctl__net_link_ether_inet +_sysctl__net_link_ether_inet_apple_hwcksum_rx +_sysctl__net_link_ether_inet_apple_hwcksum_tx +_sysctl__net_link_ether_inet_children +_sysctl__net_link_ether_inet_host_down_time +_sysctl__net_link_ether_inet_log_arp_wrong_iface +_sysctl__net_link_ether_inet_max_age +_sysctl__net_link_ether_inet_maxtries +_sysctl__net_link_ether_inet_proxyall +_sysctl__net_link_ether_inet_prune_intvl +_sysctl__net_link_ether_inet_useloopback +_sysctl__net_link_generic +_sysctl__net_link_generic_children +_sysctl__net_local +_sysctl__net_local_children +_sysctl__net_local_dgram +_sysctl__net_local_dgram_children +_sysctl__net_local_dgram_maxdgram +_sysctl__net_local_dgram_pcblist +_sysctl__net_local_dgram_recvspace +_sysctl__net_local_inflight +_sysctl__net_local_stream +_sysctl__net_local_stream_children +_sysctl__net_local_stream_pcblist +_sysctl__net_local_stream_recvspace +_sysctl__net_local_stream_sendspace +_sysctl__net_routetable +_sysctl__net_routetable_children +_sysctl__sysctl +_sysctl__sysctl_children +_sysctl__sysctl_debug +_sysctl__sysctl_name +_sysctl__sysctl_name2oid +_sysctl__sysctl_name_children +_sysctl__sysctl_next +_sysctl__sysctl_next_children +_sysctl__sysctl_oidfmt +_sysctl__sysctl_oidfmt_children +_sysctl__user +_sysctl__user_children +_sysctl__vfs +_sysctl__vfs_children +_sysctl__vfs_generic +_sysctl__vfs_generic_children +_sysctl__vfs_generic_ctlbyfsid +_sysctl__vfs_generic_ctlbyfsid_children +_sysctl__vfs_generic_vfsidlist +_sysctl__vm +_sysctl__vm_children +_sysctl_clockrate +_sysctl_doproc +_sysctl_file +_sysctl_handle_int +_sysctl_handle_long +_sysctl_handle_opaque +_sysctl_handle_string +_sysctl_int +_sysctl_procargs +_sysctl_quad +_sysctl_rdint +_sysctl_rdquad +_sysctl_rdstring +_sysctl_rdstruct +_sysctl_register_all +_sysctl_register_fixed +_sysctl_register_oid +_sysctl_register_set +_sysctl_set +_sysctl_string +_sysctl_struct +_sysctl_unregister_oid +_sysctl_unregister_set +_sysctl_vnode +_sysctlbyname +_sysent +_systemLogDiags +_systemdomain +_systemdomain_init +_tablefull +_task_act_iterate_wth_args +_task_assign +_task_assign_default +_task_backing_store_privileged +_task_collect_allowed +_task_collect_last_tick +_task_collect_max_rate +_task_collect_scan +_task_create +_task_create_internal +_task_deallocate +_task_for_pid +_task_get_assignment +_task_get_emulation_vector +_task_get_exception_ports +_task_get_special_port +_task_halt +_task_hold +_task_hold_locked +_task_importance +_task_info +_task_init +_task_is_classic +_task_policy +_task_policy_get +_task_policy_set +_task_reference +_task_reference_try +_task_release +_task_release_locked +_task_resume +_task_sample +_task_self_trap +_task_server +_task_server_routine +_task_set_emulation +_task_set_emulation_vector +_task_set_emulation_vector_internal +_task_set_exception_ports +_task_set_info +_task_set_ledger +_task_set_policy +_task_set_port_space +_task_set_ras_pc +_task_set_special_port +_task_subsystem +_task_suspend +_task_swap_exception_ports +_task_swappable +_task_synchronizer_destroy_all +_task_terminate +_task_terminate_internal +_task_threads +_task_wait_locked +_task_wire +_task_working_set_create +_task_zone +_tbeproc +_tcb +_tcbinfo +_tcp6_ctlinput +_tcp6_input +_tcp6_usrreqs +_tcp_backoff +_tcp_canceltimers +_tcp_ccgen +_tcp_close +_tcp_ctlinput +_tcp_ctloutput +_tcp_delack_enabled +_tcp_delacktime +_tcp_do_newreno +_tcp_drain +_tcp_drop +_tcp_drop_syn_sent +_tcp_fasttimo +_tcp_fillheaders +_tcp_freeq +_tcp_gettaocache +_tcp_init +_tcp_input +_tcp_keepidle +_tcp_keepinit +_tcp_keepintvl +_tcp_lq_overflow +_tcp_maketemplate +_tcp_maxidle +_tcp_maxpersistidle +_tcp_msl +_tcp_mss +_tcp_mssdflt +_tcp_mssopt +_tcp_mtudisc +_tcp_new_isn +_tcp_newtcpcb +_tcp_now +_tcp_output +_tcp_quench +_tcp_recvspace +_tcp_respond +_tcp_rtlookup +_tcp_rtlookup6 +_tcp_sendspace +_tcp_setpersist +_tcp_slowtimo +_tcp_syn_backoff +_tcp_timers +_tcp_usrreqs +_tcp_v6mssdflt +_tcpstat +_temp_msgbuf +_termioschars +_test_tws +_testbit +_thread_abort +_thread_abort_safely +_thread_act_server +_thread_act_server_routine +_thread_act_subsystem +_thread_apc_clear +_thread_apc_set +_thread_assign +_thread_assign_default +_thread_bind +_thread_block +_thread_block_reason +_thread_bootstrap +_thread_bootstrap_return +_thread_call_allocate +_thread_call_cancel +_thread_call_enter +_thread_call_enter1 +_thread_call_enter1_delayed +_thread_call_enter_delayed +_thread_call_free +_thread_call_func +_thread_call_func_cancel +_thread_call_func_delayed +_thread_call_initialize +_thread_call_is_delayed +_thread_call_setup +_thread_cancel_timer +_thread_change_psets +_thread_continue +_thread_create +_thread_create_running +_thread_deallocate +_thread_depress_abort +_thread_depress_expire +_thread_dispatch +_thread_doreap +_thread_doswapin +_thread_dup +_thread_entrypoint +_thread_exception_return +_thread_flavor_array +_thread_funnel_get +_thread_funnel_merge +_thread_funnel_set +_thread_funnel_switch +_thread_get_assignment +_thread_get_cont_arg +_thread_get_exception_ports +_thread_get_special_port +_thread_get_state +_thread_getstatus +_thread_go_locked +_thread_hold +_thread_info +_thread_info_shuttle +_thread_init +_thread_invoke +_thread_lock_act +_thread_policy +_thread_policy_get +_thread_policy_set +_thread_quantum_expire +_thread_read_times +_thread_reaper_enqueue +_thread_reaper_init +_thread_reference +_thread_release +_thread_resume +_thread_run +_thread_sample +_thread_scan_enabled +_thread_select +_thread_self +_thread_self_trap +_thread_set_child +_thread_set_cont_arg +_thread_set_exception_ports +_thread_set_parent +_thread_set_policy +_thread_set_special_port +_thread_set_state +_thread_set_timer +_thread_set_timer_deadline +_thread_setrun +_thread_setstatus +_thread_should_abort +_thread_should_halt +_thread_sleep_funnel +_thread_sleep_lock_write +_thread_sleep_mutex +_thread_sleep_mutex_deadline +_thread_sleep_usimple_lock +_thread_stop +_thread_suspend +_thread_swap_exception_ports +_thread_swapin +_thread_switch +_thread_syscall_return +_thread_task_priority +_thread_terminate +_thread_terminate_internal +_thread_terminate_self +_thread_termination_continue +_thread_timer_expire +_thread_timer_setup +_thread_timer_terminate +_thread_unlock_act +_thread_unstop +_thread_userstack +_thread_wait +_thread_wakeup +_thread_wakeup_prim +_thread_wire +_threadsignal +_tick +_time +_time_wait_slots +_time_zone_slock_init +_timeout +_timer_call_cancel +_timer_call_enter +_timer_call_enter1 +_timer_call_initialize +_timer_call_is_delayed +_timer_call_setup +_timer_call_shutdown +_timer_delta +_timer_grab +_timer_init +_timer_normalize +_timer_read +_timevaladd +_timevalfix +_timevalsub +_tk_cancc +_tk_nin +_tk_nout +_tk_rawcc +_to_bsd_time +_to_hfs_time +_tprintf +_tprintf_close +_tprintf_open +_tputchar +_trailer_template +_trap_type +_trashMemory +_trigger_name_to_port +_truncate +_tsleep +_tsleep0 +_tsleep1 +_ttioctl +_ttread +_ttrstrt +_ttselect +_ttsetwater +_ttspeedtab +_ttstart +_ttwakeup +_ttwrite +_ttwwakeup +_tty_pgsignal +_ttyblock +_ttychars +_ttycheckoutq +_ttyclose +_ttyflush +_ttyfree +_ttyinfo +_ttyinput +_ttylclose +_ttymalloc +_ttymodem +_ttyopen +_ttyprintf +_ttyselect +_ttysleep +_ttywait +_tvtoabstime +_tvtohz +_tws_build_cluster +_tws_create_startup_list +_tws_expand_working_set +_tws_handle_startup_file +_tws_hash_clear +_tws_hash_create +_tws_hash_destroy +_tws_hash_line_clear +_tws_hash_ws_flush +_tws_insert +_tws_internal_lookup +_tws_internal_startup_send +_tws_line_signal +_tws_lookup +_tws_read_startup_file +_tws_send_startup_info +_tws_startup_list_lookup +_tws_test_for_community +_tws_traverse_address_hash_list +_tws_traverse_object_hash_list +_tws_write_startup_file +_tz +_tz_slock +_uap +_ubc_blktooff +_ubc_clean +_ubc_clearflags +_ubc_create_upl +_ubc_getcred +_ubc_getobject +_ubc_getsize +_ubc_hold +_ubc_info_deallocate +_ubc_info_init +_ubc_info_zone +_ubc_invalidate +_ubc_isinuse +_ubc_issetflags +_ubc_offtoblk +_ubc_page_op +_ubc_pushdirty +_ubc_pushdirty_range +_ubc_range_op +_ubc_rele +_ubc_release +_ubc_release_named +_ubc_setcred +_ubc_setflags +_ubc_setpager +_ubc_setsize +_ubc_uncache +_ubc_upl_abort +_ubc_upl_abort_range +_ubc_upl_commit +_ubc_upl_commit_range +_ubc_upl_map +_ubc_upl_pageinfo +_ubc_upl_unmap +_ucsfncmp +_ucsfntrans +_udb +_udbinfo +_udp6_ctlinput +_udp6_input +_udp6_output +_udp6_recvspace +_udp6_sendspace +_udp6_usrreqs +_udp_ctlinput +_udp_in6 +_udp_init +_udp_input +_udp_ip6 +_udp_notify +_udp_recvspace +_udp_sendspace +_udp_shutdown +_udp_ttl +_udp_usrreqs +_udpstat +_ufs_access +_ufs_advlock +_ufs_bmap +_ufs_bmaparray +_ufs_check_export +_ufs_checkpath +_ufs_close +_ufs_cmap +_ufs_create +_ufs_dirbad +_ufs_dirbadentry +_ufs_dirempty +_ufs_direnter +_ufs_direnter2 +_ufs_dirremove +_ufs_dirrewrite +_ufs_getattr +_ufs_getlbns +_ufs_ihash_slock +_ufs_ihashget +_ufs_ihashinit +_ufs_ihashins +_ufs_ihashlookup +_ufs_ihashrem +_ufs_inactive +_ufs_init +_ufs_ioctl +_ufs_islocked +_ufs_kqfilt_add +_ufs_link +_ufs_lock +_ufs_lookup +_ufs_makeinode +_ufs_mkdir +_ufs_mknod +_ufs_mmap +_ufs_open +_ufs_pathconf +_ufs_print +_ufs_quotactl +_ufs_readdir +_ufs_readlink +_ufs_reclaim +_ufs_remove +_ufs_rename +_ufs_rmdir +_ufs_root +_ufs_seek +_ufs_select +_ufs_setattr +_ufs_start +_ufs_strategy +_ufs_symlink +_ufs_unlock +_ufs_vfsops +_ufs_vinit +_ufs_whiteout +_ufsfifo_close +_ufsfifo_kqfilt_add +_ufsfifo_read +_ufsfifo_write +_ufsspec_close +_ufsspec_read +_ufsspec_write +_uihash +_uihashtbl +_uiomove +_uiomove64 +_uipc_usrreqs +_umask +_unblock_procsigmask +_undelete +_unicode_to_hfs +_union_abortop +_union_access +_union_advlock +_union_allocvp +_union_blktooff +_union_bmap +_union_close +_union_cmap +_union_copyfile +_union_copyup +_union_create +_union_dircache +_union_dowhiteout +_union_freevp +_union_fsync +_union_getattr +_union_inactive +_union_init +_union_ioctl +_union_islocked +_union_lease +_union_link +_union_lock +_union_lookup +_union_mkdir +_union_mknod +_union_mkshadow +_union_mkwhiteout +_union_mmap +_union_mount +_union_newlower +_union_newsize +_union_newupper +_union_offtoblk +_union_open +_union_pagein +_union_pageout +_union_pathconf +_union_print +_union_read +_union_readdir +_union_readlink +_union_reclaim +_union_remove +_union_removed_upper +_union_rename +_union_revoke +_union_rmdir +_union_root +_union_seek +_union_select +_union_setattr +_union_start +_union_statfs +_union_strategy +_union_symlink +_union_unlock +_union_unmount +_union_updatevp +_union_vfsops +_union_vn_close +_union_vn_create +_union_vnodeop_entries +_union_vnodeop_opv_desc +_union_vnodeop_p +_union_whiteout +_union_write +_unix_syscall +_unix_syscall_return +_unlink +_unmount +_unp_connect2 +_unp_dispose +_unp_externalize +_unp_init +_unp_zone +_unputc +_unregister_sockfilter +_untimeout +_update_default_shared_region +_update_priority +_upl_abort +_upl_abort_range +_upl_clear_dirty +_upl_commit +_upl_commit_range +_upl_deallocate +_upl_dirty_page +_upl_get_internal_page_list +_upl_get_internal_pagelist_offset +_upl_offset_to_pagelist +_upl_page_present +_upl_phys_page +_upl_server +_upl_server_routine +_upl_set_dirty +_upl_subsystem +_upl_valid_page +_uprintf +_ureadc +_user_warned +_useracc +_userland_sysctl +_usimple_lock +_usimple_lock_init +_usimple_lock_try +_usimple_unlock +_utf8_decodestr +_utf8_encodelen +_utf8_encodestr +_utf8_to_hfs +_utf8_to_mac_roman +_utf_extrabytes +_uthread_alloc +_uthread_free +_uthread_zone +_uthread_zone_init +_uthread_zone_inited +_utimes +_utrace +_ux_exception_port +_ux_handler_init +_v_putc +_va_null +_vagevp +_vattr_null +_vc_display_icon +_vc_progress_initialize +_vc_progress_lock +_vcattach +_vcount +_vcputc +_verbose +_version +_version_major +_version_minor +_version_variant +_vetoSleepWakeNotification +_vfinddev +_vflush +_vfork +_vfork_exit +_vfork_return +_vfs_busy +_vfs_event_signal +_vfs_export +_vfs_export_lookup +_vfs_getnewfsid +_vfs_getvfs +_vfs_init_io_attributes +_vfs_io_attributes +_vfs_mountedon +_vfs_mountroot +_vfs_nummntops +_vfs_op_descs +_vfs_op_init +_vfs_opv_descs +_vfs_opv_init +_vfs_opv_numops +_vfs_rootmountalloc +_vfs_sysctl +_vfs_unbusy +_vfsconf +_vfsconf_add +_vfsconf_del +_vfsinit +_vget +_vgone +_vgonel +_vhold +_video_scroll_down +_video_scroll_up +_vinfo +_vinvalbuf +_virtual_space_end +_virtual_space_start +_vm_accellerate_zf_pageout_trigger +_vm_allocate +_vm_allocate_cpm +_vm_allow_clustered_pagein +_vm_backing_store_disable +_vm_backing_store_low +_vm_behavior_set +_vm_conflict_check +_vm_copy +_vm_countdirtypages +_vm_deallocate +_vm_default_ahead +_vm_default_behind +_vm_external_copy +_vm_external_create +_vm_external_destroy +_vm_external_map_size +_vm_external_module_initialize +_vm_external_state_clr +_vm_external_state_set +_vm_external_within +_vm_fault +_vm_fault_cleanup +_vm_fault_copy +_vm_fault_copy_cleanup +_vm_fault_copy_dst_cleanup +_vm_fault_debug +_vm_fault_init +_vm_fault_list_request +_vm_fault_page +_vm_fault_unwire +_vm_fault_wire +_vm_fault_wire_fast +_vm_free_page_pause +_vm_get_shared_region +_vm_inherit +_vm_initial_limit_core +_vm_initial_limit_data +_vm_initial_limit_stack +_vm_last_addr +_vm_machine_attribute +_vm_map +_vm_map_64 +_vm_map_aggressive_enter +_vm_map_aggressive_enter_max +_vm_map_behavior_set +_vm_map_check_protection +_vm_map_copy_copy +_vm_map_copy_discard +_vm_map_copy_overwrite +_vm_map_copy_overwrite_aligned +_vm_map_copy_overwrite_nested +_vm_map_copy_overwrite_unaligned +_vm_map_copy_zone +_vm_map_copyin_common +_vm_map_copyin_kernel_buffer +_vm_map_copyin_object +_vm_map_copyout +_vm_map_copyout_kernel_buffer +_vm_map_create +_vm_map_deallocate +_vm_map_delete +_vm_map_destroy +_vm_map_enter +_vm_map_entry_delete +_vm_map_entry_insert +_vm_map_entry_zone +_vm_map_find_space +_vm_map_fork +_vm_map_fork_copy +_vm_map_fork_share +_vm_map_get_phys_page +_vm_map_get_upl +_vm_map_inherit +_vm_map_init +_vm_map_kentry_zone +_vm_map_lookup_entry +_vm_map_lookup_locked +_vm_map_machine_attribute +_vm_map_overwrite_submap_recurse +_vm_map_page_query +_vm_map_pmap_enter +_vm_map_pmap_enter_enable +_vm_map_pmap_enter_print +_vm_map_protect +_vm_map_range_check +_vm_map_read_user +_vm_map_reference +_vm_map_region_replace +_vm_map_remove +_vm_map_server +_vm_map_server_routine +_vm_map_simplify +_vm_map_steal_memory +_vm_map_submap +_vm_map_submap_pmap_clean +_vm_map_subsystem +_vm_map_switch +_vm_map_unwire +_vm_map_unwire_nested +_vm_map_verify +_vm_map_wire +_vm_map_wire_nested +_vm_map_write_user +_vm_map_zone +_vm_mapped_pages_info +_vm_mem_bootstrap +_vm_mem_init +_vm_msync +_vm_object_absent_max +_vm_object_destroy +_vm_object_enter +_vm_object_hash_entry_free +_vm_object_iopl_request +_vm_object_page_map +_vm_object_page_remove_iterate +_vm_object_page_remove_lookup +_vm_object_pager_create +_vm_object_populate_with_private +_vm_object_shadow_check +_vm_object_sync +_vm_object_terminate_remove_all +_vm_object_update +_vm_page_activate +_vm_page_active_count +_vm_page_alloc +_vm_page_alloc_lock +_vm_page_bootstrap +_vm_page_bucket_count +_vm_page_bucket_lock +_vm_page_buckets +_vm_page_convert +_vm_page_copy +_vm_page_create +_vm_page_deactivate +_vm_page_deactivate_behind +_vm_page_deactivate_hint +_vm_page_fictitious_addr +_vm_page_fictitious_count +_vm_page_free +_vm_page_free_count +_vm_page_free_count_init +_vm_page_free_count_minimum +_vm_page_free_list +_vm_page_free_min +_vm_page_free_reserve +_vm_page_free_reserved +_vm_page_free_target +_vm_page_free_verify +_vm_page_free_wanted +_vm_page_gobble +_vm_page_gobble_count +_vm_page_gobble_count_warning +_vm_page_grab +_vm_page_grab_count +_vm_page_grab_fictitious +_vm_page_hash_mask +_vm_page_hash_shift +_vm_page_inactive_count +_vm_page_inactive_target +_vm_page_init +_vm_page_insert +_vm_page_laundry_count +_vm_page_laundry_max +_vm_page_laundry_min +_vm_page_limbo_count +_vm_page_limbo_real_count +_vm_page_lookup +_vm_page_mask +_vm_page_module_init +_vm_page_more_fictitious +_vm_page_pages +_vm_page_part_copy +_vm_page_part_zero_fill +_vm_page_pin_count +_vm_page_preppin_lock +_vm_page_queue_active +_vm_page_queue_fictitious +_vm_page_queue_free +_vm_page_queue_free_lock +_vm_page_queue_inactive +_vm_page_queue_limbo +_vm_page_queue_lock +_vm_page_queue_zf +_vm_page_release +_vm_page_release_fictitious +_vm_page_remove +_vm_page_rename +_vm_page_replace +_vm_page_shift +_vm_page_template +_vm_page_ticket +_vm_page_ticket_roll +_vm_page_unwire +_vm_page_wait +_vm_page_wire +_vm_page_wire_count +_vm_page_wire_count_warning +_vm_page_zero_fill +_vm_page_zero_fill_lock +_vm_page_zone +_vm_pageclean_copy +_vm_pageclean_setup +_vm_pagein_cluster_unused +_vm_pagein_cluster_used +_vm_pageout +_vm_pageout_active +_vm_pageout_burst_max +_vm_pageout_burst_min +_vm_pageout_burst_wait +_vm_pageout_clean_active_pages +_vm_pageout_cluster +_vm_pageout_cluster_page +_vm_pageout_continue +_vm_pageout_dirty_no_pager +_vm_pageout_emergency_availability_request +_vm_pageout_empty_wait +_vm_pageout_in_place +_vm_pageout_inactive +_vm_pageout_inactive_absent +_vm_pageout_inactive_avoid +_vm_pageout_inactive_busy +_vm_pageout_inactive_clean +_vm_pageout_inactive_dirty +_vm_pageout_inactive_forced +_vm_pageout_inactive_nolock +_vm_pageout_inactive_throttled +_vm_pageout_inactive_used +_vm_pageout_initialize_page +_vm_pageout_object_allocate +_vm_pageout_object_terminate +_vm_pageout_out_of_line +_vm_pageout_pause_count +_vm_pageout_pause_max +_vm_pageout_reserved_internal +_vm_pageout_reserved_really +_vm_pageout_scan +_vm_pageout_scan_active_emm_throttle +_vm_pageout_scan_active_emm_throttle_failure +_vm_pageout_scan_active_emm_throttle_success +_vm_pageout_scan_continue +_vm_pageout_scan_event_counter +_vm_pageout_scan_inactive_emm_throttle +_vm_pageout_scan_inactive_emm_throttle_failure +_vm_pageout_scan_inactive_emm_throttle_success +_vm_pageout_setup +_vm_pageout_throttle +_vm_pool_low +_vm_protect +_vm_read +_vm_read_list +_vm_read_overwrite +_vm_region +_vm_region_64 +_vm_region_clone +_vm_region_count_obj_refs +_vm_region_look_for_page +_vm_region_object_create +_vm_region_recurse +_vm_region_recurse_64 +_vm_region_top_walk +_vm_region_walk +_vm_remap +_vm_remap_extract +_vm_remap_range_allocate +_vm_set_page_size +_vm_set_shared_region +_vm_stat +_vm_stat_discard +_vm_stat_discard_cleared_reply +_vm_stat_discard_cleared_too_late +_vm_stat_discard_cleared_unset +_vm_stat_discard_failure +_vm_stat_discard_sent +_vm_stat_discard_throttle +_vm_submap_object +_vm_sysctl +_vm_upl_map +_vm_upl_unmap +_vm_wire +_vm_write +_vm_zf_count +_vm_zf_iterator +_vm_zf_iterator_count +_vn_bwrite +_vn_close +_vn_default_error +_vn_lock +_vn_mkdir +_vn_open +_vn_rdwr +_vn_stat +_vn_symlink +_vn_table +_vn_writechk +_vndevice_init +_vndevice_root_image +_vnode_free_list +_vnode_free_list_slock +_vnode_inactive_list +_vnode_object_create +_vnode_objects_reclaimed +_vnode_pagein +_vnode_pageout +_vnode_pager_bootstrap +_vnode_pager_cluster_read +_vnode_pager_cluster_write +_vnode_pager_data_initialize +_vnode_pager_data_request +_vnode_pager_data_return +_vnode_pager_data_unlock +_vnode_pager_deallocate +_vnode_pager_get_filesize +_vnode_pager_get_object_size +_vnode_pager_init +_vnode_pager_lookup +_vnode_pager_reference +_vnode_pager_release_from_cache +_vnode_pager_setup +_vnode_pager_shutdown +_vnode_pager_synchronize +_vnode_pager_terminate +_vnode_pager_unmap +_vnode_pager_workaround +_vnode_pager_zone +_vnode_reclaim_tried +_vnodetarget +_vnops +_volfs_access +_volfs_fhtovp +_volfs_getattr +_volfs_init +_volfs_islocked +_volfs_load +_volfs_lock +_volfs_lookup +_volfs_mount +_volfs_pathconf +_volfs_quotactl +_volfs_readdir +_volfs_reclaim +_volfs_rmdir +_volfs_root +_volfs_select +_volfs_start +_volfs_statfs +_volfs_sync +_volfs_sysctl +_volfs_unlock +_volfs_unmount +_volfs_vfsops +_volfs_vget +_volfs_vnodeop_entries +_volfs_vnodeop_opv_desc +_volfs_vnodeop_p +_volfs_vptofh +_vop_abortop_desc +_vop_abortop_vp_offsets +_vop_access_desc +_vop_access_vp_offsets +_vop_advlock_desc +_vop_advlock_vp_offsets +_vop_allocate_desc +_vop_allocate_vp_offsets +_vop_blkatoff_desc +_vop_blkatoff_vp_offsets +_vop_blktooff_desc +_vop_blktooff_vp_offsets +_vop_bmap_desc +_vop_bmap_vp_offsets +_vop_bwrite_desc +_vop_bwrite_vp_offsets +_vop_cachedlookup_desc +_vop_cachedlookup_vp_offsets +_vop_close_desc +_vop_close_vp_offsets +_vop_cmap_desc +_vop_cmap_vp_offsets +_vop_copyfile_desc +_vop_copyfile_vp_offsets +_vop_create_desc +_vop_create_vp_offsets +_vop_default_desc +_vop_devblocksize_desc +_vop_devblocksize_vp_offsets +_vop_exchange_desc +_vop_exchange_vp_offsets +_vop_fsync_desc +_vop_fsync_vp_offsets +_vop_getattr_desc +_vop_getattr_vp_offsets +_vop_getattrlist_desc +_vop_getattrlist_vp_offsets +_vop_inactive_desc +_vop_inactive_vp_offsets +_vop_ioctl_desc +_vop_ioctl_vp_offsets +_vop_islocked_desc +_vop_islocked_vp_offsets +_vop_kqfilt_add_desc +_vop_kqfilt_add_vp_offsets +_vop_kqfilt_remove_desc +_vop_kqfilt_remove_vp_offsets +_vop_lease_desc +_vop_lease_vp_offsets +_vop_link_desc +_vop_link_vp_offsets +_vop_lock_desc +_vop_lock_vp_offsets +_vop_lookup_desc +_vop_lookup_vp_offsets +_vop_mkcomplex_desc +_vop_mkcomplex_vp_offsets +_vop_mkdir_desc +_vop_mkdir_vp_offsets +_vop_mknod_desc +_vop_mknod_vp_offsets +_vop_mmap_desc +_vop_mmap_vp_offsets +_vop_noislocked +_vop_nolock +_vop_nounlock +_vop_offtoblk_desc +_vop_offtoblk_vp_offsets +_vop_open_desc +_vop_open_vp_offsets +_vop_pagein_desc +_vop_pagein_vp_offsets +_vop_pageout_desc +_vop_pageout_vp_offsets +_vop_pathconf_desc +_vop_pathconf_vp_offsets +_vop_pgrd_desc +_vop_pgrd_vp_offsets +_vop_pgwr_desc +_vop_pgwr_vp_offsets +_vop_print_desc +_vop_print_vp_offsets +_vop_read_desc +_vop_read_vp_offsets +_vop_readdir_desc +_vop_readdir_vp_offsets +_vop_readdirattr_desc +_vop_readdirattr_vp_offsets +_vop_readlink_desc +_vop_readlink_vp_offsets +_vop_reallocblks_desc +_vop_reallocblks_vp_offsets +_vop_reclaim_desc +_vop_reclaim_vp_offsets +_vop_remove_desc +_vop_remove_vp_offsets +_vop_rename_desc +_vop_rename_vp_offsets +_vop_revoke +_vop_revoke_desc +_vop_revoke_vp_offsets +_vop_rmdir_desc +_vop_rmdir_vp_offsets +_vop_searchfs_desc +_vop_searchfs_vp_offsets +_vop_seek_desc +_vop_seek_vp_offsets +_vop_select_desc +_vop_select_vp_offsets +_vop_setattr_desc +_vop_setattr_vp_offsets +_vop_setattrlist_desc +_vop_setattrlist_vp_offsets +_vop_strategy_desc +_vop_strategy_vp_offsets +_vop_symlink_desc +_vop_symlink_vp_offsets +_vop_truncate_desc +_vop_truncate_vp_offsets +_vop_unlock_desc +_vop_unlock_vp_offsets +_vop_update_desc +_vop_update_vp_offsets +_vop_valloc_desc +_vop_valloc_vp_offsets +_vop_vfree_desc +_vop_vfree_vp_offsets +_vop_whiteout_desc +_vop_whiteout_vp_offsets +_vop_write_desc +_vop_write_vp_offsets +_vp_pagein +_vp_pgoclean +_vp_pgodirty +_vprint +_vproc_exit +_vput +_vpwakeup +_vrecycle +_vref +_vrele +_vs_alloc_async +_vs_alloc_async_count +_vs_alloc_async_failed +_vs_async_free_list +_vs_cl_write_complete +_vs_cluster_transfer +_vs_cluster_write +_vs_do_async_write +_vs_free_async +_vs_get_map_entry +_vs_object_create +_vslock +_vsnprintf +_vsprintf +_vstruct_def_clshift +_vstruct_list +_vstruct_zone +_vsunlock +_vttoif_tab +_vwakeup +_wait1 +_wait1continue +_wait4 +_wait_queue_alloc +_wait_queue_assert_wait +_wait_queue_assert_wait64 +_wait_queue_free +_wait_queue_init +_wait_queue_link +_wait_queue_link_noalloc +_wait_queue_link_size +_wait_queue_member +_wait_queue_pull_thread_locked +_wait_queue_set_alloc +_wait_queue_set_free +_wait_queue_set_init +_wait_queue_set_size +_wait_queue_set_unlink_all +_wait_queue_set_unlink_all_nofree +_wait_queue_sub_clearrefs +_wait_queue_sub_init +_wait_queue_unlink +_wait_queue_unlink_all +_wait_queue_unlink_one +_wait_queue_unlinkall_nofree +_wait_queue_wakeup64_all +_wait_queue_wakeup64_one +_wait_queue_wakeup64_thread +_wait_queue_wakeup_all +_wait_queue_wakeup_one +_wait_queue_wakeup_thread +_wait_queues +_wait_queues_init +_wait_shift +_wait_subqueue_unlink_all +_waitevent +_waittime +_wakeup +_wakeup_one +_walk_allvnodes +_walk_vnodes_debug +_watchevent +_wncpu +_write +_writev +_ws_disabled +_zError +_z_errmsg +_zalloc +_zalloc_async +_zalloc_canblock +_zalloc_end_of_space +_zalloc_next_space +_zalloc_noblock +_zalloc_wasted_space +_zcram +_zdata +_zdata_size +_zeroin6_addr +_zeroin_addr +_zfill +_zfree +_zget +_zget_space +_zget_space_lock +_zinit +_zlibVersion +_zombproc +_zone_bootstrap +_zone_change +_zone_check +_zone_free_count +_zone_gc +_zone_gc_allowed +_zone_gc_forced +_zone_gc_last_tick +_zone_gc_lock +_zone_gc_max_rate +_zone_init +_zone_map +_zone_map_max_address +_zone_map_min_address +_zone_page_alloc +_zone_page_collectable +_zone_page_init +_zone_page_keep +_zone_page_table +_zone_pages +_zone_steal_memory +_zone_zone +_zprealloc diff --git a/config/System6.0.i386.exports b/config/System6.0.i386.exports new file mode 100644 index 000000000..154f6ec5a --- /dev/null +++ b/config/System6.0.i386.exports @@ -0,0 +1,412 @@ +_Gdt +_Load_context +_PE_incoming_interrupt +_PE_install_interrupt_handler +_PE_interrupt_handler +_PE_platform_interrupt_initialize +_RtcAlrm +_RtcDelt +_RtcTime +_Thread_continue +__ZN15AppleIntelClock10gMetaClassE +__ZN15AppleIntelClock10superClassE +__ZN15AppleIntelClock5startEP9IOService +__ZN15AppleIntelClock9MetaClassC1Ev +__ZN15AppleIntelClock9MetaClassC2Ev +__ZN15AppleIntelClock9metaClassE +__ZN15AppleIntelClockC1EPK11OSMetaClass +__ZN15AppleIntelClockC1Ev +__ZN15AppleIntelClockC2EPK11OSMetaClass +__ZN15AppleIntelClockC2Ev +__ZN15AppleIntelClockD0Ev +__ZN15AppleIntelClockD2Ev +__ZNK15AppleIntelClock12getMetaClassEv +__ZNK15AppleIntelClock9MetaClass5allocEv +__ZTV15AppleIntelClock +__ZTVN15AppleIntelClock9MetaClassE +___divsi3 +___udivsi3 +__clts +__fldcw +__fnclex +__fninit +__fnstsw +__fprestore +__fpsave +__fstcw +__kick_buffer_ +__mp_disable_preemption +__mp_enable_preemption +__mp_enable_preemption_no_check +__setts +_a_dbl_fault +_a_fpu_over +_a_inv_tss +_acc_type +_act_machine_return +_act_machine_switch_pcb +_active_kloaded +_active_stacks +_all_intrs +_alltraps +_avail_end +_avail_next +_avail_start +_bbc_config +_bbc_getattr +_bbc_gettime +_bbc_settime +_bcopy16 +_bcopy_no_overwrite +_bit_lock +_bit_lock_try +_bit_unlock +_blkclr +_bmapmap +_bmapmapr +_bmapvideo +_boot_args_start +_buffer_map +_check_io_fault +_clear_kdb_intr +_cli_count +_clknum +_clks_per_int +_clks_per_int_99 +_cnclose +_cnioctl +_cnopen +_cnread +_cnselect +_cnvmem +_cnwrite +_collect_ref +_collect_unref +_copyp2p +_cpu_data +_cpu_interrupt +_cpu_shutdown +_cpu_to_lapic +_cpu_update_list +_cpu_update_needed +_cpu_vendors +_cpudata_desc_pattern +_cpuid_cpu_display +_cpuid_family +_cpuid_feature +_cpuid_feature_display +_cpuid_features +_cpuid_get_feature_names +_cpuid_get_info +_cpuid_info +_cpuid_intel_get_model_name +_cpus_active +_cpus_idle +_createdt +_dectohexdec +_dev_indirect_count +_dev_indirect_list +_display_syscall +_div_scale +_dr0 +_dr1 +_dr2 +_dr3 +_dr6 +_dr_addr +_dump_act +_dump_regs +_eintstack +_emulate_io +_extmem +_fakePPCBootArgs +_fakePPCDeviceTree +_fc_get +_first_addr +_fix_desc +_flush_tlb +_fp_free +_fp_kind +_fp_load +_fp_save +_fp_state_alloc +_fpexterrflt +_fpextovrflt +_fpflush +_fpinit +_fpintr +_fpnoextflt +_fpu_get_fxstate +_fpu_get_state +_fpu_module_init +_fpu_set_fxstate +_fpu_set_state +_gDriversProp +_gMemoryMapNode +_gdt +_gdtptr +_gdtr +_get_cr0 +_get_cr2 +_get_cr3 +_get_cr4 +_get_ldt +_get_pc +_get_tr +_hardclock +_hexdectodec +_hole_end +_hole_start +_htonl +_htons +_i386_astintr +_i386_exception +_i386_init +_i386_preinit +_i386_signal_cpu +_i386_signal_cpus +_i386_vm_init +_i_bit_clear +_i_bit_set +_idt +_idtptr +_ifps_zone +_indent +_init_fpu +_insb +_insl +_inst_fetch +_insw +_int_stack_high +_int_stack_top +_intel_read_fault +_intel_startCPU +_interrupt_stack +_interrupt_stack_alloc +_inuse_ptepages_count +_iopb_destroy +_iopb_init +_jail +_kd_slmscd +_kd_slmscu +_kd_slmwd +_kdp_copy_kmem +_kdp_getstate +_kdp_i386_backtrace +_kdp_i386_trap +_kdp_setstate +_kdreboot +_kernel_preempt_check +_kernel_stack +_kernel_trap +_kgdb_stack_store +_kpde +_ktss +_lapic_cpu_map +_lapic_dump +_lapic_end_of_interrupt +_lapic_esr_clear +_lapic_esr_read +_lapic_id +_lapic_id_initdata +_lapic_init +_lapic_interrupt +_lapic_start +_lapic_test +_lapic_to_cpu +_last_addr +_ldt +_ldt_desc_pattern +_linb +_linl +_linw +_locore_end +_loutb +_loutl +_loutw +_mach25_syscall +_mach_rpc +_machdep_call_count +_machdep_call_table +_machdep_syscall +_machine_kernel_stack_init +_master_is_up +_master_up +_minsecurity +_mp_boot_pde +_mp_desc_init +_mp_desc_table +_mp_gdt +_mp_idt +_mp_kdp_enter +_mp_kdp_exit +_mp_kdp_lock +_mp_kdp_ncpus +_mp_kdp_trap +_mp_ktss +_mp_ldt +_mul_scale +_new_clknum +_nptr +_ntohl +_ntohs +_outsb +_outsl +_outsw +_pagemove +_panic_trap +_phys_attribute_clear +_phys_attribute_set +_phys_attribute_test +_pmap_alloc_chunk +_pmap_cache_count +_pmap_cache_list +_pmap_cache_lock +_pmap_cache_max +_pmap_copy_part_lpage +_pmap_copy_part_rpage +_pmap_debug +_pmap_expand +_pmap_map_bd +_pmap_movepage +_pmap_object +_pmap_phys_attributes +_pmap_pte +_pmap_remove_range +_pmap_set_modify +_pmap_system_lock +_pmap_update_interrupt +_pmap_valid_page +_preemptable +_printdt +_process_pmap_updates +_pstart +_ptes_per_vm_page +_pv_free_list +_pv_free_list_lock +_pv_head_table +_pv_list_zone +_pv_lock_table +_real_pmap +_real_to_prot +_recover_table +_recover_table_end +_remote_kdb +_reset_mem_on_reboot +_retry_table +_retry_table_end +_return_to_iret +_rtc_cyc_per_sec +_rtc_intr_count +_rtc_intr_freq +_rtc_intr_hertz +_rtc_print_lost_tick +_rtc_quant_scale +_rtc_setvals +_rtcget +_rtclock +_rtcput +_sectOBJCB +_sectSizeOBJC +_serial_getc +_serial_init +_set_cpu_model +_set_cr0 +_set_cr3 +_set_cr4 +_set_kbd_leds +_set_ldt +_set_tr +_signal_cpus +_slave_boot_base +_slave_boot_end +_slave_boot_init +_slave_clock +_slave_pstart +_slave_start +_smp_init +_smp_initialized +_start_lock +_startprog +_sti_count +_syscall +_syscall_failed +_syscall_int80 +_sysclk_gettime_internal +_sysclk_gettime_interrupts_disabled +_sysclk_setattr +_sysctl__machdep_cpu +_sysctl__machdep_cpu_brand +_sysctl__machdep_cpu_brand_string +_sysctl__machdep_cpu_children +_sysctl__machdep_cpu_extfamily +_sysctl__machdep_cpu_extmodel +_sysctl__machdep_cpu_family +_sysctl__machdep_cpu_feature_bits +_sysctl__machdep_cpu_features +_sysctl__machdep_cpu_model +_sysctl__machdep_cpu_signature +_sysctl__machdep_cpu_stepping +_sysctl__machdep_cpu_value +_sysctl__machdep_cpu_vendor +_t_bounds +_t_debug +_t_fpu_err +_t_gen_prot +_t_int3 +_t_into +_t_invop +_t_nofpu +_t_page_fault +_t_preempt +_t_segnp +_t_stack_fault +_t_trap_0f +_t_trap_11 +_t_trap_12 +_t_trap_13 +_t_trap_14 +_t_trap_15 +_t_trap_16 +_t_trap_17 +_t_trap_18 +_t_trap_19 +_t_trap_1a +_t_trap_1b +_t_trap_1c +_t_trap_1d +_t_trap_1e +_t_trap_1f +_t_zero_div +_tc_clear_screen +_tc_enable +_tc_hide_cursor +_tc_initialize +_tc_paint_char +_tc_scroll_down +_tc_scroll_up +_tc_show_cursor +_tc_update_color +_thread_compose_cthread_desc +_thread_fast_set_cthread_self +_thread_get_cthread_self +_thread_set_cthread_self +_thread_swapin_mach_alloc +_time_per_clk +_trap_mach25_syscall +_trap_machdep_syscall +_trap_unix_syscall +_tss_desc_pattern +_user_ldt_free +_user_page_fault_continue +_user_trap +_v86_assist +_v86_assist_on +_v86_do_sti_cli +_v86_do_sti_immediate +_v86_unsafe_ok +_virtual_avail +_virtual_end +_vm_first_phys +_vm_last_phys +_yeartoday diff --git a/config/System6.0.ppc.exports b/config/System6.0.ppc.exports new file mode 100644 index 000000000..4b1734189 --- /dev/null +++ b/config/System6.0.ppc.exports @@ -0,0 +1,1256 @@ +Choke +ClearRealCall +CreateFakeDECCall +CreateFakeIOCall +CreateShutdownCTXCall +CutTrace +DoPreemptCall +LoadDBATsCall +LoadIBATsCall +NullCall +StoreRealCall +SwitchContextCall +_AARPwakeup +_ASPgetmsg +_ASPputmsg +_ATPgetreq +_ATPgetrsp +_ATPsndreq +_ATPsndrsp +_ATgetmsg +_ATputmsg +_ATsocket +_AURPaccess +_AURPcleanup +_AURPcmdx +_AURPfreemsg +_AURPgetmsg +_AURPgetri +_AURPinit +_AURPiocack +_AURPiocnak +_AURPpurgeri +_AURPrcvOpenReq +_AURPrcvOpenRsp +_AURPrcvRDReq +_AURPrcvRIAck +_AURPrcvRIReq +_AURPrcvRIRsp +_AURPrcvRIUpd +_AURPrcvTickle +_AURPrcvTickleAck +_AURPrcvZReq +_AURPrcvZRsp +_AURPrtupdate +_AURPsend +_AURPsetri +_AURPshutdown +_AURPsndGDZL +_AURPsndGZN +_AURPsndOpenReq +_AURPsndOpenReq_funnel +_AURPsndRDReq +_AURPsndRIAck +_AURPsndRIReq +_AURPsndRIReq_funnel +_AURPsndRIRsp_funnel +_AURPsndRIUpd +_AURPsndRIUpd_funnel +_AURPsndTickle +_AURPsndZReq +_AURPsndZRsp +_AURPupdate +_AURPupdateri +_AbortIO +_AdspBad +_AlignAssist +_AlignAssist64 +_AltivecAssist +_CalcRecvWdw +_CalcSendQFree +_CallTVector +_Call_Debugger +_Call_DebuggerC +_Call_continuation +_CheckAttn +_CheckOkToClose +_CheckReadQueue +_CheckRecvSeq +_CheckSend +_ChokeSys +_CleanupGlobals +_ClearReal +_ClearRealLL +_CompleteQueue +_CreateFakeDEC +_CreateFakeDECLL +_CreateFakeIO +_CreateFakeIOLL +_CreateShutdownCTX +_CreateShutdownCTXLL +_DDP_chksum_on +_DDP_slfsnd_on +_DebugWork +_DoChokeLL +_DoClose +_DoPreemptLL +_DoTimerElem +_EmulExit +_Emulate +_Emulate64 +_ErrorRTMPoverflow +_ErrorZIPoverflow +_ExceptionVectorsEnd +_ExceptionVectorsStart +_FCReturn +_FWtable +_FillSendQueue +_FindSender +_FirmwareCall +_FixedStackEnd +_FixedStackStart +_FloatInit +_GratefulDebInit +_GratefulDebWork +_InitGlobals +_InsertTimerElem +_LLTraceSet +_LoadDBATs +_LoadIBATs +_MapUserAddressSpace +_MapUserAddressSpaceInit +_NMIss +_NextCID +_NotifyUser +_NullLL +_PE_Determine_Clock_Speeds +_PE_find_scc +_PE_init_taproot +_PE_read_write_time_of_day +_PE_write_IIC +_PFSExit +_PPCcalls +_QNaNbarbarian +_RT_maxentry +_RT_table +_RT_table_freelist +_RT_table_start +_RXAttention +_RXData +_RXFReset +_RXFResetAck +_ReadReal +_ReleaseUserAddressSpace +_RemoveCCB +_RemoveTimerElem +_ResetHandler +_RouterError +_RouterMix +_RuptCtrs +_RxClose +_SndMsgUp +_StoreReal +_StoreRealLL +_SwitchContextLL +_SysChoked +_TimerQueueTick +_TimerStop +_TimerTick +_TimerTick_funnel +_TrashSession +_UrgentUser +_ZIPwakeup +_ZT_maxentry +_ZT_table +__ATPgetreq +__ATPgetrsp +__ATPsndreq +__ATPsndrsp +__ATclose +__ATgetmsg +__ATioctl +__ATkqfilter +__ATputmsg +__ATread +__ATrw +__ATselect +__ATsocket +__ATwrite +__Z11IODBDMAStopPV23IODBDMAChannelRegisters +__Z12IODBDMAFlushPV23IODBDMAChannelRegisters +__Z12IODBDMAPausePV23IODBDMAChannelRegisters +__Z12IODBDMAResetPV23IODBDMAChannelRegisters +__Z12IODBDMAStartPV23IODBDMAChannelRegistersPV17IODBDMADescriptor +__Z14RootRegisteredP8OSObjectPvP9IOService +__Z15IODBDMAContinuePV23IODBDMAChannelRegisters +__Z32IOFreePhysicallyContiguousMemoryPjj +__Z36IOAllocatePhysicallyContiguousMemoryjjPjPm +__ZN10AppleMacIO10deleteListEv +__ZN10AppleMacIO10gMetaClassE +__ZN10AppleMacIO10processNubEP9IOService +__ZN10AppleMacIO10superClassE +__ZN10AppleMacIO11excludeListEv +__ZN10AppleMacIO12publishBelowEP15IORegistryEntry +__ZN10AppleMacIO15getNubResourcesEP9IOService +__ZN10AppleMacIO20_RESERVEDAppleMacIO0Ev +__ZN10AppleMacIO20_RESERVEDAppleMacIO1Ev +__ZN10AppleMacIO20_RESERVEDAppleMacIO2Ev +__ZN10AppleMacIO20_RESERVEDAppleMacIO3Ev +__ZN10AppleMacIO5startEP9IOService +__ZN10AppleMacIO8selfTestEv +__ZN10AppleMacIO9MetaClassC1Ev +__ZN10AppleMacIO9MetaClassC2Ev +__ZN10AppleMacIO9createNubEP15IORegistryEntry +__ZN10AppleMacIO9metaClassE +__ZN10AppleMacIOC1EPK11OSMetaClass +__ZN10AppleMacIOC2EPK11OSMetaClass +__ZN10AppleMacIOD0Ev +__ZN10AppleMacIOD2Ev +__ZN10AppleNVRAM10gMetaClassE +__ZN10AppleNVRAM10superClassE +__ZN10AppleNVRAM4readEmPhm +__ZN10AppleNVRAM5startEP9IOService +__ZN10AppleNVRAM5writeEmPhm +__ZN10AppleNVRAM9MetaClassC1Ev +__ZN10AppleNVRAM9MetaClassC2Ev +__ZN10AppleNVRAM9metaClassE +__ZN10AppleNVRAMC1EPK11OSMetaClass +__ZN10AppleNVRAMC1Ev +__ZN10AppleNVRAMC2EPK11OSMetaClass +__ZN10AppleNVRAMC2Ev +__ZN10AppleNVRAMD0Ev +__ZN10AppleNVRAMD2Ev +__ZN16AppleMacIODevice10gMetaClassE +__ZN16AppleMacIODevice10superClassE +__ZN16AppleMacIODevice12getResourcesEv +__ZN16AppleMacIODevice13matchLocationEP9IOService +__ZN16AppleMacIODevice26_RESERVEDAppleMacIODevice0Ev +__ZN16AppleMacIODevice26_RESERVEDAppleMacIODevice1Ev +__ZN16AppleMacIODevice26_RESERVEDAppleMacIODevice2Ev +__ZN16AppleMacIODevice26_RESERVEDAppleMacIODevice3Ev +__ZN16AppleMacIODevice9MetaClassC1Ev +__ZN16AppleMacIODevice9MetaClassC2Ev +__ZN16AppleMacIODevice9metaClassE +__ZN16AppleMacIODeviceC1EPK11OSMetaClass +__ZN16AppleMacIODeviceC1Ev +__ZN16AppleMacIODeviceC2EPK11OSMetaClass +__ZN16AppleMacIODeviceC2Ev +__ZN16AppleMacIODeviceD0Ev +__ZN16AppleMacIODeviceD2Ev +__ZN17IONVRAMController10gMetaClassE +__ZN17IONVRAMController10superClassE +__ZN17IONVRAMController4syncEv +__ZN17IONVRAMController5startEP9IOService +__ZN17IONVRAMController9MetaClassC1Ev +__ZN17IONVRAMController9MetaClassC2Ev +__ZN17IONVRAMController9metaClassE +__ZN17IONVRAMControllerC1EPK11OSMetaClass +__ZN17IONVRAMControllerC2EPK11OSMetaClass +__ZN17IONVRAMControllerD0Ev +__ZN17IONVRAMControllerD2Ev +__ZN19ApplePlatformExpert10deleteListEv +__ZN19ApplePlatformExpert10gMetaClassE +__ZN19ApplePlatformExpert10superClassE +__ZN19ApplePlatformExpert11excludeListEv +__ZN19ApplePlatformExpert14getMachineNameEPci +__ZN19ApplePlatformExpert15getGMTTimeOfDayEv +__ZN19ApplePlatformExpert15setGMTTimeOfDayEl +__ZN19ApplePlatformExpert23registerNVRAMControllerEP17IONVRAMController +__ZN19ApplePlatformExpert29_RESERVEDApplePlatformExpert0Ev +__ZN19ApplePlatformExpert29_RESERVEDApplePlatformExpert1Ev +__ZN19ApplePlatformExpert29_RESERVEDApplePlatformExpert2Ev +__ZN19ApplePlatformExpert29_RESERVEDApplePlatformExpert3Ev +__ZN19ApplePlatformExpert5startEP9IOService +__ZN19ApplePlatformExpert9MetaClassC1Ev +__ZN19ApplePlatformExpert9MetaClassC2Ev +__ZN19ApplePlatformExpert9configureEP9IOService +__ZN19ApplePlatformExpert9metaClassE +__ZN19ApplePlatformExpertC1EPK11OSMetaClass +__ZN19ApplePlatformExpertC2EPK11OSMetaClass +__ZN19ApplePlatformExpertD0Ev +__ZN19ApplePlatformExpertD2Ev +__ZN19IODBDMAMemoryCursor10gMetaClassE +__ZN19IODBDMAMemoryCursor10superClassE +__ZN19IODBDMAMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvm +__ZN19IODBDMAMemoryCursor17withSpecificationEmmm +__ZN19IODBDMAMemoryCursor21initWithSpecificationEmmm +__ZN19IODBDMAMemoryCursor9MetaClassC1Ev +__ZN19IODBDMAMemoryCursor9MetaClassC2Ev +__ZN19IODBDMAMemoryCursor9metaClassE +__ZN19IODBDMAMemoryCursorC1EPK11OSMetaClass +__ZN19IODBDMAMemoryCursorC1Ev +__ZN19IODBDMAMemoryCursorC2EPK11OSMetaClass +__ZN19IODBDMAMemoryCursorC2Ev +__ZN19IODBDMAMemoryCursorD0Ev +__ZN19IODBDMAMemoryCursorD2Ev +__ZN8AppleCPU10gMetaClassE +__ZN8AppleCPU10getCPUNameEv +__ZN8AppleCPU10quiesceCPUEv +__ZN8AppleCPU10superClassE +__ZN8AppleCPU5startEP9IOService +__ZN8AppleCPU7haltCPUEv +__ZN8AppleCPU7initCPUEb +__ZN8AppleCPU8startCPUEjj +__ZN8AppleCPU9MetaClassC1Ev +__ZN8AppleCPU9MetaClassC2Ev +__ZN8AppleCPU9metaClassE +__ZN8AppleCPUC1EPK11OSMetaClass +__ZN8AppleCPUC1Ev +__ZN8AppleCPUC2EPK11OSMetaClass +__ZN8AppleCPUC2Ev +__ZN8AppleCPUD0Ev +__ZN8AppleCPUD2Ev +__ZN8AppleNMI10gMetaClassE +__ZN8AppleNMI10superClassE +__ZN8AppleNMI15handleInterruptEPvP9IOServicei +__ZN8AppleNMI18_RESERVEDAppleNMI0Ev +__ZN8AppleNMI18_RESERVEDAppleNMI1Ev +__ZN8AppleNMI18_RESERVEDAppleNMI2Ev +__ZN8AppleNMI18_RESERVEDAppleNMI3Ev +__ZN8AppleNMI22powerStateWillChangeToEmmP9IOService +__ZN8AppleNMI5startEP9IOService +__ZN8AppleNMI7initNMIEP21IOInterruptControllerP6OSData +__ZN8AppleNMI9MetaClassC1Ev +__ZN8AppleNMI9MetaClassC2Ev +__ZN8AppleNMI9metaClassE +__ZN8AppleNMIC1EPK11OSMetaClass +__ZN8AppleNMIC1Ev +__ZN8AppleNMIC2EPK11OSMetaClass +__ZN8AppleNMIC2Ev +__ZN8AppleNMID0Ev +__ZN8AppleNMID2Ev +__ZNK10AppleMacIO12getMetaClassEv +__ZNK10AppleMacIO14compareNubNameEPK9IOServiceP8OSStringPS4_ +__ZNK10AppleMacIO9MetaClass5allocEv +__ZNK10AppleNVRAM12getMetaClassEv +__ZNK10AppleNVRAM9MetaClass5allocEv +__ZNK16AppleMacIODevice11compareNameEP8OSStringPS1_ +__ZNK16AppleMacIODevice12getMetaClassEv +__ZNK16AppleMacIODevice9MetaClass5allocEv +__ZNK17IONVRAMController12getMetaClassEv +__ZNK17IONVRAMController9MetaClass5allocEv +__ZNK19ApplePlatformExpert12getMetaClassEv +__ZNK19ApplePlatformExpert9MetaClass5allocEv +__ZNK19IODBDMAMemoryCursor12getMetaClassEv +__ZNK19IODBDMAMemoryCursor9MetaClass5allocEv +__ZNK8AppleCPU12getMetaClassEv +__ZNK8AppleCPU9MetaClass5allocEv +__ZNK8AppleNMI12getMetaClassEv +__ZNK8AppleNMI9MetaClass5allocEv +__ZTV10AppleMacIO +__ZTV10AppleNVRAM +__ZTV16AppleMacIODevice +__ZTV17IONVRAMController +__ZTV19ApplePlatformExpert +__ZTV19IODBDMAMemoryCursor +__ZTV8AppleCPU +__ZTV8AppleNMI +__ZTVN10AppleMacIO9MetaClassE +__ZTVN10AppleNVRAM9MetaClassE +__ZTVN16AppleMacIODevice9MetaClassE +__ZTVN17IONVRAMController9MetaClassE +__ZTVN19ApplePlatformExpert9MetaClassE +__ZTVN19IODBDMAMemoryCursor9MetaClassE +__ZTVN8AppleCPU9MetaClassE +__ZTVN8AppleNMI9MetaClassE +__eSynchronizeIO +__start_cpu +_aaFPopTable +_aarp_chk_addr +_aarp_init1 +_aarp_init2 +_aarp_rcv_pkt +_aarp_sched_probe +_aarp_send_data +_aarp_table +_abs +_add_ddp_handler +_adspAllocateCCB +_adspAssignSocket +_adspAttention +_adspCLDeny +_adspCLListen +_adspClose +_adspDeassignSocket +_adspGlobal +_adspInit +_adspInited +_adspMode +_adspNewCID +_adspOpen +_adspOptions +_adspPacket +_adspRead +_adspReadAttention +_adspReadHandler +_adspRelease +_adspReset +_adspStatus +_adspWrite +_adspWriteHandler +_adsp_close +_adsp_dequeue_ccb +_adsp_input +_adsp_inputC +_adsp_inputQ +_adsp_open +_adsp_pidM +_adsp_readable +_adsp_rput +_adsp_sendddp +_adsp_window +_adsp_wput +_adsp_writeable +_adspall_lock +_adspgen_lock +_adspioc_ack +_adsptmr_lock +_append_copy +_appletalk_hack_start +_appletalk_inited +_arpinp_lock +_asp_ack_reply +_asp_clock +_asp_clock_funnel +_asp_close +_asp_init +_asp_inpC +_asp_nak_reply +_asp_open +_asp_pack_bdsp +_asp_readable +_asp_scbQ +_asp_wput +_aspall_lock +_asptmo_lock +_at_control +_at_ddp_brt +_at_ddp_stats +_at_ifQueueHd +_at_insert +_at_interfaces +_at_ioctl +_at_memzone_init +_at_pcballoc +_at_pcbbind +_at_pcbdetach +_at_reg_mcast +_at_state +_at_unreg_mcast +_atalk_closeref +_atalk_enablew +_atalk_flush +_atalk_getref +_atalk_gettrace +_atalk_load +_atalk_notify +_atalk_notify_sel +_atalk_openref +_atalk_peek +_atalk_post_msg +_atalk_putnext +_atalk_settrace +_atalk_to_ip +_atalk_unload +_atalkdomain +_atalkintr +_atalkintrq +_atalksw +_atomic_switch_syscall +_atomic_switch_trap +_atp_bind +_atp_build_release +_atp_cancel_req +_atp_close +_atp_delete_free_clusters +_atp_dequeue_atp +_atp_drop_req +_atp_free +_atp_free_cluster_list +_atp_free_cluster_timeout_set +_atp_free_list +_atp_init +_atp_inited +_atp_input +_atp_inputQ +_atp_iocack +_atp_iocnak +_atp_link +_atp_lomask +_atp_mask +_atp_need_rel +_atp_open +_atp_pidM +_atp_rcb_alloc +_atp_rcb_data +_atp_rcb_free +_atp_rcb_free_list +_atp_rcb_timer +_atp_reply +_atp_req_ind +_atp_req_timeout +_atp_resource_m +_atp_retry_req +_atp_rput +_atp_rsp_ind +_atp_send +_atp_send_replies +_atp_send_req +_atp_send_rsp +_atp_state_data +_atp_tid +_atp_timout +_atp_trans_abort +_atp_trans_alloc +_atp_trans_free +_atp_trans_free_list +_atp_treq_event +_atp_trp_clock +_atp_trp_clock_funnel +_atp_unlink +_atp_untimout +_atp_used_list +_atp_wput +_atp_x_done +_atp_x_done_funnel +_atpall_lock +_atpcb_zone +_atpgen_lock +_atptmo_lock +_attachData +_aurp_close +_aurp_global +_aurp_gref +_aurp_ifID +_aurp_open +_aurp_state +_aurp_wakeup +_aurp_wput +_aurpd_start +_aurpgen_lock +_backchain +_backpocket +_bbSetRupt +_bb_disable_bluebox +_bb_enable_bluebox +_bb_settaskenv +_bcopy_64 +_bcopy_970 +_bcopy_g3 +_bcopy_g4 +_bcopy_nc +_bcopy_physvir +_boot_args_buf +_bzero_128 +_bzero_32 +_bzero_nc +_cacheDisable +_cacheInit +_calcRecvQ +_calcSendQ +_cbfpend +_cbfr +_ccb_used_list +_chandler +_checkBogus +_checkNMI +_clock_delay_until +_clock_gettimeofday +_cnputcusr +_cntlzw +_commPagePtr +_commpage_flush_dcache +_commpage_flush_icache +_commpage_set_timestamp +_commpage_time_dcba +_completepb +_condStop +_cons_getc +_cons_ops +_cons_ops_index +_cons_putc +_consclose +_consider_mapping_adjust +_consioctl +_console_chan_default +_console_is_serial +_console_unit +_consopen +_consread +_consselect +_conswrite +_copy_pkt +_copyin_multiple +_copyout_multiple +_cpu_doshutdown +_cpu_signal +_cpu_sync_timebase +_cpus_holding_bkpts +_current_free_region +_cursor_pmap +_db_breakpoints_inserted +_db_im_stepping +_db_recover +_db_run_mode +_dbfloats +_dbgBits +_dbgCkpt +_dbgCkptLL +_dbgDisp +_dbgDispLL +_dbgRegsLL +_dbgTrace +_dbspecrs +_dbvecs +_ddp_AURPfuncx +_ddp_AURPsendx +_ddp_add_if +_ddp_adjmsg +_ddp_age_router +_ddp_bit_reverse +_ddp_brt_init +_ddp_brt_shutdown +_ddp_brt_sweep +_ddp_brt_sweep_funnel +_ddp_brt_sweep_timer +_ddp_checksum +_ddp_compress_msg +_ddp_ctloutput +_ddp_glean +_ddp_growmsg +_ddp_handler +_ddp_head +_ddp_init +_ddp_input +_ddp_notify_nbp +_ddp_output +_ddp_pru_abort +_ddp_pru_attach +_ddp_pru_bind +_ddp_pru_connect +_ddp_pru_control +_ddp_pru_detach +_ddp_pru_disconnect +_ddp_pru_peeraddr +_ddp_pru_send +_ddp_pru_shutdown +_ddp_pru_sockaddr +_ddp_putmsg +_ddp_recvspace +_ddp_rem_if +_ddp_router_output +_ddp_sendspace +_ddp_shutdown +_ddp_slowtimo +_ddp_socket_inuse +_ddp_start +_ddp_usrreqs +_ddpall_lock +_ddpinp_lock +_debcnputc +_debsave0 +_debstack +_debstack_top_ss +_debstackptr +_debugNoop +_debugbackpocket +_debugger_active +_debugger_cpu +_debugger_debug +_debugger_holdoff +_debugger_is_slave +_debugger_lock +_debugger_pending +_debugger_sync +_delay_for_interval +_dgVideo +_dgWork +_diagCall +_diagTrap +_disable_bluebox_internal +_doexception +_dst_addr_cnt +_dump_backtrace +_dump_savearea +_elap_dataput +_elap_offline +_elap_online3 +_elap_wput +_enter_funnel_section +_env_buf +_ep_input +_errstr +_et_zeroaddr +_etalk_multicast_addr +_exception_end +_exception_entry +_exception_exit +_exit_funnel_section +_extPatch32 +_extPatchMCK +_failNames +_fastexit +_fctx_test +_find_ifID +_find_user_fpu +_find_user_regs +_find_user_vec +_first_free_virt +_forUs +_fpu_save +_fpu_switch +_free_mappings +_free_pmap_count +_free_pmap_list +_free_pmap_lock +_free_pmap_max +_fwSCCinit +_gGetDefaultBusSpeedsKey +_gbuf_alloc_wait +_gbuf_freel +_gbuf_linkb +_gbuf_linkpkt +_gbuf_msgsize +_gbuf_strip +_getAarp +_getAarpTableSize +_getIfUsage +_getLocalZone +_getNbpTable +_getNbpTableSize +_getPhysAddrSize +_getRTRLocalZone +_getRtmpTable +_getRtmpTableSize +_getSPLocalZone +_getZipTable +_getZipTableSize +_get_got +_get_io_base_addr +_get_msr_exportmask +_get_msr_nbits +_get_msr_rbits +_get_preemption_level +_get_simple_lock_count +_getchar +_getrpc +_gets +_gettimeofday_32 +_gettimeofday_64 +_gref_alloc +_gref_close +_gref_wput +_handleDSeg +_handleISeg +_handlePF +_hash_table_base +_hash_table_size +_hid0get64 +_hw_add_map +_hw_blow_seg +_hw_cpu_sync +_hw_cpu_wcng +_hw_dequeue_atomic +_hw_find_map +_hw_find_space +_hw_hash_init +_hw_lock_bit +_hw_lock_mbits +_hw_map_seg +_hw_perfmon_lock +_hw_protect +_hw_purge_map +_hw_purge_phys +_hw_purge_space +_hw_queue_atomic +_hw_queue_atomic_list +_hw_rem_map +_hw_set_user_space +_hw_set_user_space_dis +_hw_setup_trans +_hw_start_trans +_hw_test_rc +_hw_unlock_bit +_hw_walk_phys +_hwulckPatch_eieio +_hwulckPatch_isync +_hwulckbPatch_eieio +_hwulckbPatch_isync +_iNullLL +_ifID_home +_ifID_table +_ignore_zero_fault +_ihandler +_ihandler_ret +_incrVSID +_init_ddp_handler +_initialize_serial +_interrupt +_interrupt_disable +_interrupt_enable +_intstack_top_ss +_invalidateSegs +_invalidate_dcache +_invalidate_dcache64 +_invxcption +_ioc_ack +_isync_mfdec +_kdb_trap +_kdp_backtrace +_kdp_copy_phys +_kdp_dabr +_kdp_noisy +_kdp_pmap +_kdp_print_backtrace +_kdp_print_registers +_kdp_sr_dump +_kdp_trans_off +_kdp_trap +_kdp_trap_codes +_kdp_vtophys +_kernel_args_buf +_kernel_pmap_phys +_killprint +_kprintf_lock +_lap_online +_lastTrace +_lock_debugger +_lowGlo +_m_clattach +_m_lgbuf_alloc +_m_lgbuf_free +_mach_absolute_time_32 +_mach_absolute_time_64 +_machine_act_terminate +_machine_clock_assist +_machine_conf +_machine_idle_ppc +_machine_idle_ret +_mapCtl +_mapInsert +_mapLog +_mapRemove +_mapSearch +_mapSearchFull +_mapSetLists +_mapSetUp +_mapSkipListVerify +_mapSkipListVerifyC +_mapalc1 +_mapalc2 +_mapdebug +_mapping_adjust +_mapping_adjust_call +_mapping_alloc +_mapping_clr_mod +_mapping_clr_ref +_mapping_drop_busy +_mapping_fake_zone_info +_mapping_find +_mapping_free +_mapping_free_init +_mapping_free_prime +_mapping_init +_mapping_make +_mapping_map +_mapping_p2v +_mapping_phys_lookup +_mapping_phys_unused +_mapping_prealloc +_mapping_protect +_mapping_protect_phys +_mapping_relpre +_mapping_remove +_mapping_set_ref +_mapping_tst_mod +_mapping_tst_ref +_mapping_verify +_mappingdeb0 +_mappingdeb1 +_max_cpus_initialized +_mem_actual +_mfdar +_mflr +_mfmmcr0 +_mfmmcr1 +_mfmmcr2 +_mfmsr +_mfpmc1 +_mfpmc2 +_mfpmc3 +_mfpmc4 +_mfpvr +_mfrtcl +_mfrtcu +_mfsda +_mfsia +_mfsrin +_mftb +_mftbu +_ml_enable_cache_level +_ml_enable_nap +_ml_ppc_sleep +_ml_probe_read_mck +_ml_probe_read_mck_64 +_ml_read_temp +_ml_restore +_ml_sense_nmi +_ml_set_physical +_ml_set_physical_disabled +_ml_set_physical_get_ffs +_ml_set_processor_speed +_ml_set_processor_voltage +_ml_set_translation_off +_ml_thrm_init +_ml_thrm_set +_ml_throttle +_mtdar +_mtdec +_mtmmcr0 +_mtmmcr1 +_mtmmcr2 +_mtmsr +_mtpmc1 +_mtpmc2 +_mtpmc3 +_mtpmc4 +_mtsdr1 +_mtsrin +_mulckPatch_eieio +_mulckPatch_isync +_mutex_unlock_rwcmb +_name_registry +_nbp_add_multicast +_nbp_delete_entry +_nbp_fillin_nve +_nbp_find_nve +_nbp_input +_nbp_mh_reg +_nbp_new_nve_entry +_nbp_shutdown +_nbp_strhash +_net_access +_net_access_cnt +_net_export +_net_port +_no_of_nets_tried +_no_of_nodes_tried +_nve_lock +_ot_ddp_check_socket +_pat_output +_patch_table +_pbtcnt +_pbtcpu +_pbtlock +_pe_do_clock_test +_pe_run_clock_test +_per_proc_info +_perfIntHook +_perfTrapHook +_perfmon_acquire_facility +_perfmon_clear_counters +_perfmon_control +_perfmon_disable +_perfmon_enable +_perfmon_handle_pmi +_perfmon_init +_perfmon_read_counters +_perfmon_release_facility +_perfmon_set_event +_perfmon_set_event_func +_perfmon_set_tbsel +_perfmon_set_threshold +_perfmon_start_counters +_perfmon_stop_counters +_perfmon_write_counters +_phys_copy +_phys_table +_phystokv +_pktsDropped +_pktsHome +_pktsIn +_pktsOut +_pmapTrans +_pmap_activate +_pmap_add_physical_memory +_pmap_attribute +_pmap_attribute_cache_sync +_pmap_boot_map +_pmap_canExecute +_pmap_deactivate +_pmap_find_physentry +_pmap_map_block +_pmap_map_block_rc +_pmap_mem_regions +_pmap_mem_regions_count +_pmap_nest +_pmap_switch +_pmap_unnest +_powermac_scc_get_datum +_powermac_scc_set_datum +_ppcNull +_ppcNullinst +_ppc_checkthreadstate +_ppc_gettimeofday +_ppc_init +_ppc_init_cpu +_ppc_max_adrsp +_ppc_max_pmaps +_ppc_usimple_lock +_ppc_usimple_lock_init +_ppc_usimple_lock_try +_ppc_usimple_unlock_rwcmb +_ppc_usimple_unlock_rwmb +_ppc_vm_cpu_init +_ppc_vm_init +_ppcscret +_pper_proc_info +_prep_ZIP_reply_packet +_print_backtrace +_probe_cb +_pthread_getspecific_sprg3 +_pthread_getspecific_uftrap +_pthread_self_sprg3 +_pthread_self_uftrap +_qAddToEnd +_qfind_m +_rcv_connection_id +_reboot_how +_refall_lock +_regDefaultZone +_releaseData +_resetPOR +_resethandler_target +_retFromVM +_routerStart +_router_added +_router_killed +_routershutdown +_routing_needed +_rt_bdelete +_rt_binsert +_rt_blookup +_rt_delete +_rt_getNextRoute +_rt_insert +_rt_show +_rt_sortedshow +_rt_table_init +_rtclock_decrementer_min +_rtmp_dropper +_rtmp_init +_rtmp_input +_rtmp_prep_new_packet +_rtmp_purge +_rtmp_r_find_bridge +_rtmp_router_input +_rtmp_router_start +_rtmp_send_port +_rtmp_send_port_funnel +_rtmp_shutdown +_rtmp_timeout +_save_adjust +_save_alloc +_save_cpv +_save_fake_zone_info +_save_get +_save_get_init +_save_get_phys_32 +_save_get_phys_64 +_save_queue +_save_recover +_save_release +_save_ret +_save_ret_phys +_save_ret_wMSR +_save_trim_free +_saveanchor +_savearea_init +_scb_free_list +_scb_resource_m +_scb_used_list +_scc +_scc_funnel_initted +_scc_getc +_scc_param +_scc_parm_done +_scc_probe +_scc_putc +_scc_softc +_scc_std +_scc_stomp +_scc_tty +_scc_uses_modem_control +_sconowner +_sectKLDB +_sectSizeKLD +_serial_initted +_serial_keyboard_init +_serial_keyboard_poll +_serial_keyboard_start +_serialmode +_setLocalZones +_setPmon +_set_machine_current_act +_sethzonehash +_shadow_BAT +_shandler +_sharedPage +_sharedPmap +_sip_input +_snmpFlags +_snmpStats +_spinlock_32_lock_mp +_spinlock_32_lock_up +_spinlock_32_try_mp +_spinlock_32_try_up +_spinlock_32_unlock_mp +_spinlock_32_unlock_up +_spinlock_64_lock_mp +_spinlock_64_lock_up +_spinlock_64_try_mp +_spinlock_64_try_up +_spinlock_64_unlock_mp +_spinlock_64_unlock_up +_stFloat +_stSpecrs +_stVectors +_static_memory_end +_sulckPatch_eieio +_sulckPatch_isync +_switchIntoVM +_switchSegs +_switch_in +_switch_to_old_console +_switch_to_video_console +_syncClkSpot +_sync_cache +_sync_cache64 +_sync_cache_virtual +_sync_ppage +_sys_ATPgetreq +_sys_ATPgetrsp +_sys_ATPsndreq +_sys_ATPsndrsp +_sys_ATgetmsg +_sys_ATputmsg +_sys_ATsocket +_syscall_error +_syscall_notify_interrupt +_syscall_trace +_syscall_trace_end +_sysctl__net_appletalk +_sysctl__net_appletalk_children +_sysctl__net_appletalk_ddpstats +_sysctl__net_appletalk_debug +_sysctl__net_appletalk_routermix +_taproot_addr +_taproot_size +_testPerfTrap +_thandler +_thread_adjuserstack +_thread_enable_fpe +_thread_setentrypoint +_thread_setuserstack +_tlbie +_toss_live_fpu +_toss_live_vec +_trackrouter +_trackrouter_rem_if +_trap +_trcWork +_trp_tmo_rcb +_tstbit +_ttalk_multicast_addr +_unlock_debugger +_update_tmo +_upshift8 +_uwritec +_vcgetc +_vec_save +_vec_switch +_vm_max_address +_vm_max_physical +_vmm_dispatch +_vmm_dispatch_table +_vmm_execute_vm +_vmm_exit +_vmm_fam_exc +_vmm_fam_pf +_vmm_fam_reserved +_vmm_force_exit +_vmm_get_XA +_vmm_get_adsp +_vmm_get_entry +_vmm_get_features +_vmm_get_features_sel +_vmm_get_float_state +_vmm_get_page_dirty_flag +_vmm_get_page_dirty_flag32 +_vmm_get_page_mapping +_vmm_get_page_mapping32 +_vmm_get_timer +_vmm_get_vector_state +_vmm_get_version +_vmm_get_version_sel +_vmm_init_context +_vmm_init_context_sel +_vmm_interrupt +_vmm_map_execute +_vmm_map_execute32 +_vmm_map_list +_vmm_map_list32 +_vmm_map_list64 +_vmm_map_page +_vmm_map_page32 +_vmm_max_addr +_vmm_protect_execute +_vmm_protect_execute32 +_vmm_protect_page +_vmm_protect_page32 +_vmm_set_XA +_vmm_set_timer +_vmm_stop_vm +_vmm_tear_down_all +_vmm_tear_down_context +_vmm_timer_pop +_vmm_ufp +_vmm_unmap_all_pages +_vmm_unmap_list +_vmm_unmap_page +_vmm_unmap_page32 +_xLoadDBATsLL +_xLoadIBATsLL +_xpatcnt +_xsum_assym +_zip_control +_zip_handle_getmyzone +_zip_prep_query_packet +_zip_reply_received +_zip_reply_to_getlocalzones +_zip_reply_to_getzonelist +_zip_router_input +_zip_sched_getnetinfo +_zip_send_queries +_zip_type_packet +_zonename_equal +_zt_add_zone +_zt_add_zonename +_zt_clr_zmap +_zt_compute_hash +_zt_ent_zcount +_zt_ent_zindex +_zt_find_zname +_zt_getNextZone +_zt_get_zmcast +_zt_remove_zones +_zt_set_zmap +_zt_upper_zname +dbgCkptCall +dbgDispCall +dbgRegsCall +debstash +fwdisplock +hexTab +hexfont +iNullCall diff --git a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp deleted file mode 100644 index 72ada3cc2..000000000 --- a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * AppleI386CPU.cpp - * - * March 6, 2000 jliu - * Created based on AppleCPU. - */ - -#include "AppleI386CPU.h" - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#undef super -#define super IOCPU - -OSDefineMetaClassAndStructors(AppleI386CPU, IOCPU); - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -bool AppleI386CPU::start(IOService * provider) -{ -// kern_return_t result; - - if (!super::start(provider)) return false; - - cpuIC = new AppleI386CPUInterruptController; - if (cpuIC == 0) return false; - - if (cpuIC->initCPUInterruptController(1) != kIOReturnSuccess) - return false; - - cpuIC->attach(this); - - cpuIC->registerCPUInterruptController(); - -#ifdef NOTYET - // Register this CPU with mach. - result = ml_processor_register((cpu_id_t)this, 0, - &machProcessor, &ipi_handler, true); - if (result == KERN_FAILURE) return false; -#endif - - setCPUState(kIOCPUStateUninitalized); - -#ifdef NOTYET - processor_start(machProcessor); -#endif - - // Hack. Call initCPU() ourself since no one else will. - initCPU(true); - - registerService(); - - return true; -} - -void AppleI386CPU::initCPU(bool /*boot*/) -{ - cpuIC->enableCPUInterrupt(this); - - setCPUState(kIOCPUStateRunning); -} - -void AppleI386CPU::quiesceCPU(void) -{ -} - -kern_return_t AppleI386CPU::startCPU(vm_offset_t /*start_paddr*/, - vm_offset_t /*arg_paddr*/) -{ - return KERN_FAILURE; -} - -void AppleI386CPU::haltCPU(void) -{ -} - -const OSSymbol * AppleI386CPU::getCPUName(void) -{ - return OSSymbol::withCStringNoCopy("Primary0"); -} - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#undef super -#define super IOCPUInterruptController - -OSDefineMetaClassAndStructors(AppleI386CPUInterruptController, - IOCPUInterruptController); - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -IOReturn AppleI386CPUInterruptController::handleInterrupt(void * /*refCon*/, - IOService * /*nub*/, - int source) -{ - IOInterruptVector * vector; - - // Override the implementation in IOCPUInterruptController to - // dispatch interrupts the old way. - // - // source argument is ignored, use the first IOCPUInterruptController - // in the vector array. - // - vector = &vectors[0]; - - if (!vector->interruptRegistered) - return kIOReturnInvalid; - - vector->handler(vector->target, - vector->refCon, - vector->nub, - source); - - return kIOReturnSuccess; -} diff --git a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.h b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.h deleted file mode 100644 index 2fbf2413d..000000000 --- a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * AppleI386CPU.h - * - * March 6, 2000 jliu - * Created based on AppleCPU. - */ - -#ifndef _IOKIT_APPLEI386CPU_H -#define _IOKIT_APPLEI386CPU_H - -#include - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -class AppleI386CPU : public IOCPU -{ - OSDeclareDefaultStructors(AppleI386CPU); - -private: - IOCPUInterruptController * cpuIC; - -public: - virtual bool start(IOService * provider); - virtual void initCPU(bool boot); - virtual void quiesceCPU(void); - virtual kern_return_t startCPU(vm_offset_t start_paddr, - vm_offset_t arg_paddr); - virtual void haltCPU(void); - virtual const OSSymbol * getCPUName(void); -}; - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -class AppleI386CPUInterruptController : public IOCPUInterruptController -{ - OSDeclareDefaultStructors(AppleI386CPUInterruptController); - -public: - virtual IOReturn handleInterrupt(void * refCon, - IOService * nub, - int source); -}; - -#endif /* ! _IOKIT_APPLEI386CPU_H */ diff --git a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp deleted file mode 100644 index b0f943b7c..000000000 --- a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - */ - -#include - -#include - -#include -#include -#include -#include - -#include -#include "AppleI386PlatformExpert.h" - -#include - -__BEGIN_DECLS -extern void kdreboot(void); -__END_DECLS -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#define super IOPlatformExpert - -OSSymbol * gIntelPICName; - -OSDefineMetaClassAndStructors(AppleI386PlatformExpert, IOPlatformExpert) - -IOService * AppleI386PlatformExpert::probe(IOService * /* provider */, - SInt32 * score ) -{ - *score = 2000; - - return (this); -} - -bool AppleI386PlatformExpert::start(IOService * provider) -{ - gIntelPICName = (OSSymbol *) OSSymbol::withCStringNoCopy("intel-pic"); - - setBootROMType(kBootROMTypeNewWorld); /* hammer to new world for i386 */ - -// setupPIC(provider); - - if (!super::start(provider)) - return false; - - // Install halt/restart handler. - - PE_halt_restart = handlePEHaltRestart; - - return true; -} - -IOService * AppleI386PlatformExpert::createNub(OSDictionary * from) -{ - IOService * nub; - OSData * prop; - KERNBOOTSTRUCT * bootStruct; - - nub = super::createNub(from); - - if (nub) - { - if (0 == strcmp( "pci", nub->getName())) - { - bootStruct = (KERNBOOTSTRUCT *) PE_state.bootArgs; - prop = OSData::withBytesNoCopy(&bootStruct->pciInfo, - sizeof(bootStruct->pciInfo)); - assert(prop); - if (prop) - from->setObject( "pci-bus-info", prop); - } - else if (0 != strcmp("intel-pic", nub->getName())) - { - setupPIC(nub); - } - } - - return (nub); -} - -#define kNumVectors 16 - -void -AppleI386PlatformExpert::setupPIC(IOService *nub) -{ - int i; - OSDictionary * propTable; - OSArray * controller; - OSArray * specifier; - OSData * tmpData; - long tmpLong; - - propTable = nub->getPropertyTable(); - - // - // For the moment.. assume a classic 8259 interrupt controller - // with 16 interrupts. - // - // Later, this will be changed to detect a APIC and/or MP-Table - // and then will set the nubs appropriately. - - // Create the interrupt specifer array. - specifier = OSArray::withCapacity(kNumVectors); - assert(specifier); - for (i = 0; i < kNumVectors; i++) { - tmpLong = i; - tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); - specifier->setObject(tmpData); - } - - // Create the interrupt controller array. - controller = OSArray::withCapacity(kNumVectors); - assert(controller); - for (i = 0; i < kNumVectors; i++) - controller->setObject(gIntelPICName); - - // Put the two arrays into the property table. - propTable->setObject(gIOInterruptControllersKey, controller); - propTable->setObject(gIOInterruptSpecifiersKey, specifier); - - // Release the arrays after being added to the property table. - specifier->release(); - controller->release(); -} - -bool -AppleI386PlatformExpert::matchNubWithPropertyTable(IOService * nub, - OSDictionary * propTable ) -{ - OSString * nameProp; - OSString * match; - - if (0 == (nameProp = (OSString *) nub->getProperty(gIONameKey))) - return (false); - - if ( 0 == (match = (OSString *) propTable->getObject(gIONameMatchKey))) - return (false); - - return (match->isEqualTo( nameProp )); -} - -bool AppleI386PlatformExpert::getMachineName( char * name, int maxLength ) -{ - strncpy( name, "x86", maxLength ); - - return (true); -} - -bool AppleI386PlatformExpert::getModelName( char * name, int maxLength ) -{ - strncpy( name, "x86", maxLength ); - - return (true); -} - -int AppleI386PlatformExpert::handlePEHaltRestart( unsigned int type ) -{ - int ret = 1; - - switch ( type ) - { - case kPERestartCPU: - // Use the pexpert service to reset the system through - // the keyboard controller. - kdreboot(); - break; - - case kPEHaltCPU: - default: - ret = -1; - break; - } - - return ret; -} diff --git a/iokit/Drivers/platform/drvAppleIntelClassicPIC/AppleIntelClassicPIC.h b/iokit/Drivers/platform/drvAppleIntelClassicPIC/AppleIntelClassicPIC.h deleted file mode 100644 index 3eacd086d..000000000 --- a/iokit/Drivers/platform/drvAppleIntelClassicPIC/AppleIntelClassicPIC.h +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * - * DRI: Josh de Cesare - * - */ - -#ifndef _IOKIT_APPLEINTELCLASSICPIC_H -#define _IOKIT_APPLEINTELCLASSICPIC_H - -#include -#include - -#define kClockIRQ 0 // FIXME for SMP systems. - -#define kPIC1BasePort 0x20 -#define kPIC2BasePort 0xa0 - -#define kPIC1TriggerTypePort 0x4d0 -#define kPIC2TriggerTypePort 0x4d1 - -#define kPICCmdPortOffset 0 -#define kPICDataPortOffset 1 - -#define kEOICommand 0x20 - -#define kPICSlaveID 2 // Slave ID for second PIC - -#define kNumVectors 16 - -#define IS_SLAVE_VECTOR(x) ((x) & 8) - -// ICW1 -// -#define kPIC_ICW1(x) ((x) + kPICCmdPortOffset) -#define kPIC_ICW1_MBO 0x10 // must be one -#define kPIC_ICW1_LTIM 0x08 // level/edge triggered mode -#define kPIC_ICW1_ADI 0x04 // 4/8 byte call address interval -#define kPIC_ICW1_SNGL 0x02 // single/cascade mode -#define kPIC_ICW1_IC4 0x01 // ICW4 needed/not needed - -// ICW2 - Interrupt vector address (bits 7 - 3). -// -#define kPIC_ICW2(x) ((x) + kPICDataPortOffset) - -// ICW3 - Slave device. -// -#define kPIC_ICW3(x) ((x) + kPICDataPortOffset) - -// ICW4 -// -#define kPIC_ICW4(x) ((x) + kPICDataPortOffset) -#define kPIC_ICW4_SFNM 0x10 // special fully nested mode -#define kPIC_ICW4_BUF 0x08 // buffered mode -#define kPIC_ICW4_MS 0x04 // master/slave -#define kPIC_ICW4_AEOI 0x02 // automatic end of interrupt mode -#define kPIC_ICW4_uPM 0x01 // 8088 (vs. 8085) operation - -// OCW1 - Interrupt mask. -// -#define kPIC_OCW1(x) ((x) + kPICDataPortOffset) - -// OCW2 - Bit 4 must be zero. -// -#define kPIC_OCW2(x) ((x) + kPICCmdPortOffset) -#define kPIC_OCW2_R 0x80 // rotation -#define kPIC_OCW2_SL 0x40 // specific -#define kPIC_OCW2_EOI 0x20 -#define kPIC_OCW2_LEVEL(x) ((x) & 0x07) - -// OCW3 - Bit 4 must be zero. -// -#define kPIC_OCW3(x) ((x) + kPICCmdPortOffset) -#define kPIC_OCW3_ESMM 0x40 // special mask mode -#define kPIC_OCW3_SMM 0x20 -#define kPIC_OCW3_MBO 0x08 // must be one -#define kPIC_OCW3_P 0x04 // poll -#define kPIC_OCW3_RR 0x02 -#define kPIC_OCW3_RIS 0x01 - - -class AppleIntelClassicPIC : public IOInterruptController -{ - OSDeclareDefaultStructors( AppleIntelClassicPIC ); - -protected: - volatile UInt16 maskInterrupts; /* Which interrupts are masked out */ - UInt16 triggerTypes; /* Interrupt trigger type mask */ - - inline int getTriggerType(long irq) - { - return ( triggerTypes & (1 << irq) ) ? - kIOInterruptTypeLevel : kIOInterruptTypeEdge; - } - - inline void updateMask(long irq) - { - if ( IS_SLAVE_VECTOR(irq) ) - outb( kPIC_OCW1(kPIC2BasePort), maskInterrupts >> 8 ); - else - outb( kPIC_OCW1(kPIC1BasePort), maskInterrupts & 0xff ); - } - - inline void disableInterrupt(long irq) - { - maskInterrupts |= (1 << irq); - updateMask(irq); - } - - inline void enableInterrupt(long irq) - { - maskInterrupts &= ~(1 << irq); - updateMask(irq); - } - - inline void ackInterrupt(long irq) - { - if ( IS_SLAVE_VECTOR(irq) ) - outb( kPIC_OCW2(kPIC2BasePort), kEOICommand ); - outb( kPIC_OCW2(kPIC1BasePort), kEOICommand ); - } - - virtual void initializePIC(UInt16 port, - UInt8 icw1, UInt8 icw2, - UInt8 icw3, UInt8 icw4); - -public: - virtual bool start(IOService * provider); - virtual void free(void); - - // Methods that must be implemented by simplifed interrupt controllers. - - virtual int getVectorType(long vectorNumber, IOInterruptVector * vector); - virtual IOInterruptAction getInterruptHandlerAddress(void); - virtual IOReturn handleInterrupt(void * refCon, IOService * nub, int source); - virtual bool vectorCanBeShared(long vectorNumber, IOInterruptVector * vector); - virtual void initVector(long vectorNumber, IOInterruptVector * vector); - virtual void disableVectorHard(long vectorNumber, IOInterruptVector * vector); - virtual void enableVector(long vectorNumber, IOInterruptVector * vector); -}; - -#endif /* ! _IOKIT_APPLEINTELCLASSICPIC_H */ diff --git a/iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp b/iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp deleted file mode 100644 index bc080d0e9..000000000 --- a/iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp +++ /dev/null @@ -1,322 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * - * DRI: Michael Burg - */ - -#include -#include -#include -#include "AppleIntelClassicPIC.h" - -// This must agree with the trap number reported by the low-level -// interrupt handler (osfmk/i386/locore.s). - -#define kIntelReservedIntVectors 0x40 - -extern OSSymbol * gIntelPICName; - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#undef super -#define super IOInterruptController - -OSDefineMetaClassAndStructors(AppleIntelClassicPIC, IOInterruptController); - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -bool AppleIntelClassicPIC::start(IOService * provider) -{ - IOInterruptAction handler; - - if ( super::start(provider) == false ) return false; - - // Allocate the memory for the vectors. - - vectors = (IOInterruptVector *) IOMalloc( kNumVectors * - sizeof(IOInterruptVector) ); - if ( vectors == NULL ) return false; - - bzero(vectors, kNumVectors * sizeof(IOInterruptVector)); - - // Allocate locks for the vectors. - - for ( int cnt = 0; cnt < kNumVectors; cnt++ ) - { - vectors[cnt].interruptLock = IOLockAlloc(); - - if ( vectors[cnt].interruptLock == NULL ) - { - return false; - } - } - - // Mask out the interrupts except for the casacde line. - - maskInterrupts = 0xffff & ~(1 << kPICSlaveID); - - // Initialize master PIC. - - initializePIC( kPIC1BasePort, - /* ICW1 */ kPIC_ICW1_IC4, - /* ICW2 */ kIntelReservedIntVectors, - /* ICW3 */ (1 << kPICSlaveID), - /* ICW4 */ kPIC_ICW4_uPM ); - - // Write to OCW1, OCW3, OCW2. - // The priority order is changed to (highest to lowest) - // 3 4 5 6 7 0 1 2 - // The default priority after initialization is (highest to lowest) - // 0 1 2 3 4 5 6 7 - - outb( kPIC_OCW1(kPIC1BasePort), maskInterrupts & 0xff ); - outb( kPIC_OCW3(kPIC1BasePort), kPIC_OCW3_MBO | kPIC_OCW3_RR ); - outb( kPIC_OCW2(kPIC1BasePort), kPIC_OCW2_R | - kPIC_OCW2_SL | - kPIC_OCW2_LEVEL(2) ); - - // Initialize slave PIC. - - initializePIC( kPIC2BasePort, - /* ICW1 */ kPIC_ICW1_IC4, - /* ICW2 */ kIntelReservedIntVectors + 8, - /* ICW3 */ kPICSlaveID, - /* ICW4 */ kPIC_ICW4_uPM ); - - // Write to OCW1, and OCW3. - - outb( kPIC_OCW1(kPIC2BasePort), maskInterrupts >> 8 ); - outb( kPIC_OCW3(kPIC2BasePort), kPIC_OCW3_MBO | kPIC_OCW3_RR ); - - // Record trigger type. - - triggerTypes = inb( kPIC1TriggerTypePort ) | - ( inb( kPIC2TriggerTypePort ) << 8 ); - - // Primary interrupt controller - - getPlatform()->setCPUInterruptProperties(provider); - - // Register the interrupt handler function so it can service interrupts. - - handler = getInterruptHandlerAddress(); - if ( provider->registerInterrupt(0, this, handler, 0) != kIOReturnSuccess ) - panic("AppleIntelClassicPIC: Failed to install platform interrupt handler"); - - provider->enableInterrupt(0); - - // Register this interrupt controller so clients can find it. - - getPlatform()->registerInterruptController(gIntelPICName, this); - - return true; -} - -//--------------------------------------------------------------------------- -// Free the interrupt controller object. Deallocate all resources. - -void AppleIntelClassicPIC::free(void) -{ - if ( vectors ) - { - for ( int cnt = 0; cnt < kNumVectors; cnt++ ) - { - if (vectors[cnt].interruptLock) - IOLockFree(vectors[cnt].interruptLock); - } - - IOFree( vectors, kNumVectors * sizeof(IOInterruptVector) ); - vectors = 0; - } - - super::free(); -} - -//--------------------------------------------------------------------------- -// Initialize the PIC by sending the Initialization Command Words (ICW). - -void AppleIntelClassicPIC::initializePIC( UInt16 port, - UInt8 icw1, UInt8 icw2, - UInt8 icw3, UInt8 icw4 ) -{ - // Initialize 8259's. Start the initialization sequence by - // issuing ICW1 (Initialization Command Word 1). - // Bit 4 must be set. - - outb( kPIC_ICW1(port), kPIC_ICW1_MBO | icw1 ); - - // ICW2 - // Upper 5 bits of the interrupt vector address. The lower three - // bits are set according to the interrupt level serviced. - - outb( kPIC_ICW2(port), icw2 ); - - // ICW3 (Master Device) - // Set a 1 bit for each IR line that has a slave. - - outb( kPIC_ICW3(port), icw3 ); - - // ICW4 - - outb( kPIC_ICW4(port), icw4 ); -} - -//--------------------------------------------------------------------------- -// Report whether the interrupt line is edge or level triggered. - -int AppleIntelClassicPIC::getVectorType(long vectorNumber, - IOInterruptVector * vector) -{ - return getTriggerType(vectorNumber); -} - -//--------------------------------------------------------------------------- -// - -IOInterruptAction AppleIntelClassicPIC::getInterruptHandlerAddress(void) -{ - return (IOInterruptAction) &AppleIntelClassicPIC::handleInterrupt; -} - -//--------------------------------------------------------------------------- -// Handle an interrupt by servicing the 8259, and dispatch the -// handler associated with the interrupt vector. - -IOReturn AppleIntelClassicPIC::handleInterrupt(void * savedState, - IOService * nub, - int source) -{ - IOInterruptVector * vector; - long vectorNumber; - - typedef void (*IntelClockFuncType)(void *); - IntelClockFuncType clockFunc; - - vectorNumber = source - kIntelReservedIntVectors; - - if (vectorNumber >= kNumVectors) - return kIOReturnSuccess; - - // Disable and ack interrupt. - - disableInterrupt(vectorNumber); - ackInterrupt( vectorNumber); - - // Process the interrupt. - - vector = &vectors[vectorNumber]; - - vector->interruptActive = 1; - - if ( !vector->interruptDisabledSoft ) - { - if ( vector->interruptRegistered ) - { - // Call registered interrupt handler. - - if (vectorNumber == kClockIRQ) // FIXME - { - clockFunc = (IntelClockFuncType) vector->handler; - clockFunc(savedState); - } - else - { - vector->handler(vector->target, vector->refCon, - vector->nub, vector->source); - } - - // interruptDisabledSoft flag may be set by the - // handler to indicate that the interrupt should - // be disabled. - - if ( vector->interruptDisabledSoft ) - { - // Already "hard" disabled, set interruptDisabledHard - // to indicate this. - - vector->interruptDisabledHard = 1; - } - else - { - // Re-enable the interrupt line. - - enableInterrupt(vectorNumber); - } - } - } - else - { - vector->interruptDisabledHard = 1; - } - - vector->interruptActive = 0; - - return kIOReturnSuccess; -} - -//--------------------------------------------------------------------------- -// - -bool AppleIntelClassicPIC::vectorCanBeShared(long vectorNumber, - IOInterruptVector * vector) -{ - if ( getVectorType(vectorNumber, vector) == kIOInterruptTypeLevel ) - return true; - else - return false; -} - -//--------------------------------------------------------------------------- -// - -void AppleIntelClassicPIC::initVector(long vectorNumber, - IOInterruptVector * vector) -{ - super::initVector(vectorNumber, vector); -} - -//--------------------------------------------------------------------------- -// - -void AppleIntelClassicPIC::disableVectorHard(long vectorNumber, - IOInterruptVector * vector) -{ - // Sorry, cacade/slave interrupt line cannot be disable. - - if (vectorNumber == kPICSlaveID) return; - - disableInterrupt(vectorNumber); -} - -//--------------------------------------------------------------------------- -// - -void AppleIntelClassicPIC::enableVector(long vectorNumber, - IOInterruptVector * vector) -{ - enableInterrupt(vectorNumber); -} diff --git a/iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp b/iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp index 5242a6fd6..407b02a57 100644 --- a/iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp +++ b/iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp @@ -267,19 +267,30 @@ OSMetaClassDefineReservedUnused(AppleMacIODevice, 3); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ bool AppleMacIODevice::compareName( OSString * name, - OSString ** matched = 0 ) const + OSString ** matched ) const { - return( ((AppleMacIO *)getProvider())-> - compareNubName( this, name, matched )); + return (IODTCompareNubName(this, name, matched) || + IORegistryEntry::compareName(name, matched)); } IOService * AppleMacIODevice::matchLocation( IOService * /* client */ ) { - return( this ); + return this; } IOReturn AppleMacIODevice::getResources( void ) { - return( ((AppleMacIO *)getProvider())->getNubResources( this )); + IOService *macIO = this; + + if (getDeviceMemory() != 0) return kIOReturnSuccess; + + while (macIO && ((macIO = macIO->getProvider()) != 0)) + if (strcmp("mac-io", macIO->getName()) == 0) break; + + if (macIO == 0) return kIOReturnError; + + IODTResolveAddressing(this, "reg", macIO->getDeviceMemoryWithIndex(0)); + + return kIOReturnSuccess; } diff --git a/iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp b/iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp index bf9965c97..74ceaaa95 100644 --- a/iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp +++ b/iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp @@ -136,12 +136,12 @@ IOReturn AppleNMI::powerStateWillChangeTo ( IOPMPowerFlags theFlags, unsigned lo // Mask NMI and change from edge to level whilst sleeping (copied directly from OS9 code) nmiIntSourceAddr = (volatile unsigned long *)kExtInt9_NMIIntSource; - nmiIntSource = *nmiIntSourceAddr; + nmiIntSource = ml_phys_read(nmiIntSourceAddr); nmiIntSource |= kNMIIntLevelMask; - *nmiIntSourceAddr = nmiIntSource; + ml_phys_write(nmiIntSourceAddr, nmiIntSource); eieio(); nmiIntSource |= kNMIIntMask; - *nmiIntSourceAddr = nmiIntSource; + ml_phys_write(nmiIntSourceAddr, nmiIntSource); eieio(); } else @@ -150,12 +150,12 @@ IOReturn AppleNMI::powerStateWillChangeTo ( IOPMPowerFlags theFlags, unsigned lo // Unmask NMI and change back to edge (copied directly from OS9 code) nmiIntSourceAddr = (volatile unsigned long *)kExtInt9_NMIIntSource; - nmiIntSource = *nmiIntSourceAddr; + nmiIntSource = ml_phys_read(nmiIntSourceAddr); nmiIntSource &= ~kNMIIntLevelMask; - *nmiIntSourceAddr = nmiIntSource; + ml_phys_write(nmiIntSourceAddr, nmiIntSource); eieio(); nmiIntSource &= ~kNMIIntMask; - *nmiIntSourceAddr = nmiIntSource; + ml_phys_write(nmiIntSourceAddr, nmiIntSource); eieio(); } } diff --git a/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp b/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp deleted file mode 100644 index f71f313da..000000000 --- a/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * 12 Nov 1998 suurballe Created. - */ - -#include -#include -#include "IOPMUADBController.h" - -#define super IOADBController -OSDefineMetaClassAndStructors(IOPMUADBController, IOADBController) - -// ********************************************************************************** -// start -// -// ********************************************************************************** -IOService * IOPMUADBController::probe( IOService * provider, SInt32 * score ) -{ - if (super::probe(provider, score) == NULL) - return NULL; - - // this adb controller must interface with the pmu, so let's check if it is of the right type: - // so in any case if this is a powerbook G3 1998 or 1999 it has a pmu so: - if (IODTMatchNubWithKeys(getPlatform()->getProvider(), "'AAPL,PowerBook1998'") || - IODTMatchNubWithKeys(getPlatform()->getProvider(), "'PowerBook1,1'")) - return this; - - // If it is a different machine the compatible property will tell us if it is a pmu-driven - // adb device: - OSData *kl = OSDynamicCast(OSData, provider->getProperty("compatible")); - if ((kl != NULL) && kl->isEqualTo("pmu", 3)) - return this; - - // In all the other cases we do not handle it: - return NULL; -} - -// ********************************************************************************** -// start -// -// ********************************************************************************** -bool IOPMUADBController::start ( IOService * nub ) -{ - // Wait for the PMU to show up: - PMUdriver = waitForService(serviceMatching("ApplePMU")); - - // All the commands in this file will generate an interrupt. - // since the interrupt is the logical conclusion of those commands - // we need a syncer to sincronize the begin/end of these functions: - waitingForData = NULL; - - // Registers for the two interrupts that needs to handle: - if (PMUdriver->callPlatformFunction("registerForPMUInterrupts", true, (void*) (kPMUADBint | kPMUenvironmentInt), (void*)handleADBInterrupt, (void*)this, NULL) != kIOReturnSuccess) { -#ifdef VERBOSE_LOGS_ON - IOLog("IOPMUADBController::start registerForPMUInterrupts kPMUADBint fails\n"); -#endif // VERBOSE_LOGS_ON - - return false; - } - - // Creates the mutex lock to protect the clients list: - requestMutexLock = NULL; - requestMutexLock = IOLockAlloc(); - if (!requestMutexLock) - return false; - - clamshellOpen = true; - - // This happens last (while the most common place is the begin) because - // trhe superclass may need the services of the functions above. - if( !super::start(nub)) - return false; - - return true; -} - -// ********************************************************************************** -// free -// -// ********************************************************************************** -void IOPMUADBController::free ( ) -{ - // Releases the mutex lock used to protect the clients lists: - if (requestMutexLock != NULL) { - IOLockFree (requestMutexLock); - requestMutexLock = NULL; - } - - // And removes the interrupt handler: - if (PMUdriver != NULL) - PMUdriver->callPlatformFunction("deRegisterClient", true, (void*)this, (void*)(kPMUADBint | kPMUenvironmentInt), NULL, NULL); -} - -// ********************************************************************************** -// localSendMiscCommand -// -// ********************************************************************************** -IOReturn IOPMUADBController::localSendMiscCommand(int command, IOByteCount sLength, UInt8 *sBuffer) -{ - IOReturn returnValue = kIOReturnError; - IOByteCount rLength = 1; - UInt8 rBuffer; - - // The poupose of this method is to free us from the pain to create a parameter block each time - // we wish to talk to the pmu: - SendMiscCommandParameterBlock prmBlock = {command, sLength, sBuffer, &rLength, &rBuffer}; - -#ifdef VERBOSE_LOGS_ON - IOLog("ApplePMUInterface::localSendMiscCommand 0x%02x %d 0x%08lx 0x%08lx 0x%08lx\n", - command, sLength, sBuffer, rLength, rBuffer); -#endif - - if (PMUdriver != NULL) { -#ifdef VERBOSE_LOGS_ON - IOLog("IOPMUADBController::localSendMiscCommand calling PMUdriver->callPlatformFunction\n"); -#endif - returnValue = PMUdriver->callPlatformFunction("sendMiscCommand", true, (void*)&prmBlock, NULL, NULL, NULL); - } - - // If we are here we do not have a dreive to talk to: -#ifdef VERBOSE_LOGS_ON - IOLog("IOPMUADBController::localSendMiscCommand end 0x%08lx\n", returnValue); -#endif - - return returnValue; -} - -// ********************************************************************************** -// this is the interrupt handler for all ADB interrupts: -// A.W. Added code to check for clamshell status, and block all ADB traffic except -// for POWER key scan code from default ADB keyboard or devices that connect -// to that keyboard power button. -// ********************************************************************************** - -/* static */ void -IOPMUADBController::handleADBInterrupt(IOService *client, UInt8 interruptMask, UInt32 length, UInt8 *buffer) -{ - IOPMUADBController *myThis = OSDynamicCast(IOPMUADBController, client); - - // Check if we are the right client for this interrupt: - if (myThis == NULL) - return; - - if (interruptMask & kPMUenvironmentInt) - { - if (buffer) - { - if (*buffer & kClamshellClosedEventMask) - myThis->clamshellOpen = false; - else - myThis->clamshellOpen = true; - } - if ( !(interruptMask & kPMUautopoll)) - { - return; //Nothing left to do - } - } - if ((interruptMask & kPMUautopoll) && (myThis->autopollOn)) - { - if (myThis->clamshellOpen) - { - autopollHandler(client, buffer[0], length - 1, buffer + 1); // yes, call adb input handler - } - else if ( (buffer[0] == 0x2c) && (buffer[1] == 0x7f) && (buffer[2] == 0x7f)) - { - autopollHandler(client, buffer[0], length - 1, buffer + 1); // POWER down - } - else if ( (buffer[0] == 0x2c) && (buffer[1] == 0xff) && (buffer[2] == 0xff)) - { - autopollHandler(client, buffer[0], length - 1, buffer + 1); // POWER up - } - - } - else { - if (myThis->waitingForData != NULL) { - // Complets the adb transaction - myThis->dataLen = length - 1; - bcopy(buffer + 1, myThis->dataBuffer, myThis->dataLen); - myThis->waitingForData->signal(); - } - } -} - - -// ********************************************************************************** -// cancelAllIO -// -// ********************************************************************************** -IOReturn IOPMUADBController::cancelAllIO ( void ) -{ - if (waitingForData != NULL) { - dataLen = 0; // read fails with error, write fails quietly - waitingForData->signal(); - } - return kPMUNoError; -} - - -// ********************************************************************************** -// setAutoPollPeriod -// -// ********************************************************************************** -IOReturn IOPMUADBController::setAutoPollPeriod ( int ) -{ - return kPMUNotSupported; -} - - -// ********************************************************************************** -// getAutoPollPeriod -// -// ********************************************************************************** -IOReturn IOPMUADBController::getAutoPollPeriod ( int * ) -{ - return kPMUNotSupported; -} - - -// ********************************************************************************** -// setAutoPollList -// -// ********************************************************************************** -IOReturn IOPMUADBController::setAutoPollList ( UInt16 PollBitField ) -{ - pollList = PollBitField; // remember the new poll list - - if ( autopollOn ) { - UInt8 oBuffer[4]; - - oBuffer[0] = 0; // Byte count in the resto of the command - oBuffer[1] = 0x86; // adb Command op. - oBuffer[2] = (UInt8)(PollBitField >> 8); // ?? - oBuffer[3] = (UInt8)(PollBitField & 0xff); // ?? - - localSendMiscCommand (kPMUpMgrADB, 4, oBuffer); - } - return kPMUNoError; -} - - -// ********************************************************************************** -// getAutoPollList -// -// ********************************************************************************** -IOReturn IOPMUADBController::getAutoPollList ( UInt16 * activeAddressMask ) -{ - *activeAddressMask = pollList; - return kPMUNoError; -} - - -// ********************************************************************************** -// setAutoPollEnable -// -// ********************************************************************************** -IOReturn IOPMUADBController::setAutoPollEnable ( bool enable ) -{ - UInt8 oBuffer[4]; - - autopollOn = enable; - - if ( enable ) { // enabling autopoll - oBuffer[0] = 0; - oBuffer[1] = 0x86; - oBuffer[2] = (UInt8)(pollList >> 8); - oBuffer[3] = (UInt8)(pollList & 0xff); - - localSendMiscCommand (kPMUpMgrADB, 4, oBuffer); - } - else { // disabling autopoll; - /* Waits one second for the trackpads to be up (this is needed only in old machines) - This is placed here because this is the fist call at wake. */ - if (IODTMatchNubWithKeys(getPlatform()->getProvider(), "'PowerBook1,1'") || - IODTMatchNubWithKeys(getPlatform()->getProvider(), "'AAPL,PowerBook1998'")) - IOSleep(1500); - - localSendMiscCommand (kPMUpMgrADBoff, 0, NULL); - } - - return kPMUNoError; -} - - -// ********************************************************************************** -// resetBus -// -// ********************************************************************************** -IOReturn IOPMUADBController::resetBus ( void ) -{ - if (requestMutexLock != NULL) - IOLockLock(requestMutexLock); - - UInt8 oBuffer[4]; - - oBuffer[0] = kPMUResetADBBus; - oBuffer[1] = 0; - oBuffer[2] = 0; - - // Reset bus needs to wait for the interrupt to terminate the transaction: - waitingForData = IOSyncer::create(); - localSendMiscCommand (kPMUpMgrADB, 3, oBuffer); - waitingForData->wait(); // wait till done - waitingForData = 0; - - if (requestMutexLock != NULL) - IOLockUnlock(requestMutexLock); - - /* Waits one second for the trackpads to be up (this is needed only in old machines) */ - if (IODTMatchNubWithKeys(getPlatform()->getProvider(), "'PowerBook1,1'") || - IODTMatchNubWithKeys(getPlatform()->getProvider(), "'AAPL,PowerBook1998'")) - IOSleep(1500); - - return kPMUNoError; -} - - -// ********************************************************************************** -// flushDevice -// -// ********************************************************************************** -IOReturn IOPMUADBController::flushDevice ( IOADBAddress address ) -{ - if (requestMutexLock != NULL) - IOLockLock(requestMutexLock); - - UInt8 oBuffer[4]; - - oBuffer[0] = kPMUFlushADB | (address << kPMUADBAddressField); - oBuffer[1] = ( autopollOn ? 2 : 0 ); - oBuffer[2] = 0; - - // flush device needs to wait for the interrupt to terminate the transaction - waitingForData = IOSyncer::create(); - localSendMiscCommand (kPMUpMgrADB, 3, oBuffer); - waitingForData->wait(); // wait till done - waitingForData = 0; - - if (requestMutexLock != NULL) - IOLockUnlock(requestMutexLock); - - return kPMUNoError; -} - - -// ********************************************************************************** -// readFromDevice -// -// The length parameter is ignored on entry. It is set on exit to reflect -// the number of bytes read from the device. -// ********************************************************************************** -IOReturn IOPMUADBController::readFromDevice ( IOADBAddress address, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length ) -{ - if ( (length == NULL) || (data == NULL) ) { - return kPMUParameterError; - } - - if (requestMutexLock != NULL) - IOLockLock(requestMutexLock); - - UInt8 oBuffer[4]; - - oBuffer[0] = kPMUReadADB | (address << kPMUADBAddressField) | (adbRegister); - oBuffer[1] = ( autopollOn ? 2 : 0 ); - oBuffer[2] = 0; - - // read from device needs to wait for the interrupt to terminate the transaction - // and to obtain the data from the device. - waitingForData = IOSyncer::create(); - localSendMiscCommand (kPMUpMgrADB, 3, oBuffer); - waitingForData->wait(); // wait till done - waitingForData = 0; - - // set caller's length - *length = (dataLen < *length ? dataLen : *length); - bcopy(dataBuffer, data, *length); - - if (requestMutexLock != NULL) - IOLockUnlock(requestMutexLock); - - if (dataLen == 0 ) { // nothing read; device isn't there - return ADB_RET_NOTPRESENT; - } - - return ADB_RET_OK; -} - - -// ********************************************************************************** -// writeToDevice -// -// ********************************************************************************** -IOReturn IOPMUADBController::writeToDevice ( IOADBAddress address, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length ) -{ - // Last check on * length > (252): since the pmu registers are 8 bit - // and the buffer has the first 3 bytes used for the standard paramters - // the max lenght can not be more than 252 bytes. - if ( (* length == 0) || (data == NULL) || (* length > 252) ) - { - return kPMUParameterError; - } - - if (address == 0) - return kPMUNoError; // for now let's ignore these ... - - if (requestMutexLock != NULL) - IOLockLock(requestMutexLock); - - UInt8 oBuffer[256]; - - oBuffer[0] = kPMUWriteADB | (address << kPMUADBAddressField) | (adbRegister); - oBuffer[1] = ( autopollOn ? 2 : 0 ); - oBuffer[2] = *length; - bcopy(data, &oBuffer[3], *length); - - // write to the device needs to wait for the interrupt to terminate the transaction - waitingForData = IOSyncer::create(); - localSendMiscCommand (kPMUpMgrADB, 3 + *length, oBuffer); - waitingForData->wait(); - waitingForData = 0; - - if (requestMutexLock != NULL) - IOLockUnlock(requestMutexLock); - - return kPMUNoError; -} - - diff --git a/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.h b/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.h deleted file mode 100644 index 90fc0b6d9..000000000 --- a/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * 12 Nov 1998 suurballe Created. - */ - -#include -#include -#include -#include - -class IOPMUADBController : public IOADBController -{ - OSDeclareDefaultStructors(IOPMUADBController) - -private: - enum { - kPMUNoError = 0, - kPMUInitError = 1, // PMU failed to initialize - kPMUParameterError = 2, // Bad parameters - kPMUNotSupported = 3, // PMU don't do that (Cuda does, though) - kPMUIOError = 4 // Nonspecific I/O failure - }; - - enum { - kPMUpMgrADB = 0x20, // send ADB command - kPMUpMgrADBoff = 0x21, // turn ADB auto-poll off - kPMUreadADB = 0x28, // Apple Desktop Bus - kPMUpMgrADBInt = 0x2F, // get ADB interrupt data (Portable only) - }; - - enum { - kPMUADBAddressField = 4 - }; - - enum { - kPMUResetADBBus = 0x00, - kPMUFlushADB = 0x01, - kPMUWriteADB = 0x08, - kPMUReadADB = 0x0C, - kPMURWMaskADB = 0x0C - }; - - enum { // when kPMUADBint is set - kPMUADBint = 0x10, - kPMUenvironmentInt = 0x40, // Environment changed (clamshell) - kPMUwaitinglsc = 0x01, // waiting to listen to charger - kPMUautoSRQpolling = 0x02, // auto/SRQ polling is enabled - kPMUautopoll = 0x04 // input is autopoll data - }; - - // We need this to callPlatformFunction when sending to sendMiscCommand - typedef struct SendMiscCommandParameterBlock { - int command; - IOByteCount sLength; - UInt8 *sBuffer; - IOByteCount *rLength; - UInt8 *rBuffer; - } SendMiscCommandParameterBlock; - typedef SendMiscCommandParameterBlock *SendMiscCommandParameterBlockPtr; - - // Local data: - IOService *PMUdriver; - UInt32 pollList; // ADB autopoll device bitmap - bool autopollOn; // TRUE: PMU is autopolling - bool clamshellOpen; // Normally TRUE - - UInt32 dataLen; // data len as result of an interrupt - UInt8 dataBuffer[256]; // data as result of an interrupt - IOSyncer *waitingForData; // syncronizer for reads and writes. - - // Local interrupt handlers: - static void handleADBInterrupt(IOService *client, UInt8 matchingMask, UInt32 length, UInt8 *buffer); - - // This lock protects the access to the common varialbes of this object: - IOLock *requestMutexLock; - - // A simpler way to interface with the pmu SendMiscCommand - IOReturn localSendMiscCommand(int command, IOByteCount sLength, UInt8 *sBuffer); - -public: - IOService *probe( IOService * nub, SInt32 * score ); - bool start ( IOService * ); - void free (); - IOReturn setAutoPollPeriod ( int microseconds ); - IOReturn getAutoPollPeriod ( int * microseconds ); - IOReturn setAutoPollList ( UInt16 activeAddressMask ); - IOReturn getAutoPollList ( UInt16 * activeAddressMask ); - IOReturn setAutoPollEnable ( bool enable ); - IOReturn resetBus ( void ); - IOReturn cancelAllIO ( void ); - IOReturn flushDevice ( IOADBAddress address ); - IOReturn readFromDevice ( IOADBAddress address, IOADBRegister adbRegister, UInt8 * data, IOByteCount * length ); - IOReturn writeToDevice ( IOADBAddress address, IOADBRegister adbRegister, UInt8 * data, IOByteCount * length ); -}; diff --git a/iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp b/iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp index 7afdbfcbc..8df491dc5 100644 --- a/iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp +++ b/iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp @@ -62,7 +62,7 @@ bool ApplePlatformExpert::start( IOService * provider ) setBootROMType(kBootROMTypeOldWorld); // Get the Rom Minor Version from the 68k ROM. - romVersion = ml_phys_read(0xffc00010) & 0x0000ffff; + romVersion = ml_phys_read_64(0xffc00010ULL) & 0x0000ffff; provider->setProperty("rom-version", &romVersion, sizeof(romVersion)); } diff --git a/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp b/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp deleted file mode 100644 index e49d3b293..000000000 --- a/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * - */ - -#include -#include -#include -#include "RootDomainUserClient.h" -#include - -#define super IOUserClient - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -OSDefineMetaClassAndStructors(RootDomainUserClient, IOUserClient) - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -RootDomainUserClient *RootDomainUserClient::withTask(task_t owningTask) -{ - RootDomainUserClient *me; - - me = new RootDomainUserClient; - if(me) { - if(!me->init()) { - me->release(); - return NULL; - } - me->fTask = owningTask; - } - return me; -} - -bool RootDomainUserClient::start( IOService * provider ) -{ - assert(OSDynamicCast(IOPMrootDomain, provider)); - if(!super::start(provider)) - return false; - fOwner = (IOPMrootDomain *)provider; - - // Got the owner, so initialize the call structures - fMethods[kPMSetAggressiveness].object = provider; // 0 - fMethods[kPMSetAggressiveness].func = (IOMethod)&IOPMrootDomain::setAggressiveness; - fMethods[kPMSetAggressiveness].count0 = 2; - fMethods[kPMSetAggressiveness].count1 = 0; - fMethods[kPMSetAggressiveness].flags = kIOUCScalarIScalarO; - - fMethods[kPMGetAggressiveness].object = provider; // 1 - fMethods[kPMGetAggressiveness].func = (IOMethod)&IOPMrootDomain::getAggressiveness; - fMethods[kPMGetAggressiveness].count0 = 1; - fMethods[kPMGetAggressiveness].count1 = 1; - fMethods[kPMGetAggressiveness].flags = kIOUCScalarIScalarO; - - fMethods[kPMSleepSystem].object = provider; // 2 - fMethods[kPMSleepSystem].func = (IOMethod)&IOPMrootDomain::sleepSystem; - fMethods[kPMSleepSystem].count0 = 0; - fMethods[kPMSleepSystem].count1 = 0; - fMethods[kPMSleepSystem].flags = kIOUCScalarIScalarO; - - fMethods[kPMAllowPowerChange].object = provider; // 3 - fMethods[kPMAllowPowerChange].func = (IOMethod)&IOPMrootDomain::allowPowerChange; - fMethods[kPMAllowPowerChange].count0 = 1; - fMethods[kPMAllowPowerChange].count1 = 0; - fMethods[kPMAllowPowerChange].flags = kIOUCScalarIScalarO; - - fMethods[kPMCancelPowerChange].object = provider; // 4 - fMethods[kPMCancelPowerChange].func = (IOMethod)&IOPMrootDomain::cancelPowerChange; - fMethods[kPMCancelPowerChange].count0 = 1; - fMethods[kPMCancelPowerChange].count1 = 0; - fMethods[kPMCancelPowerChange].flags = kIOUCScalarIScalarO; - - fMethods[kPMShutdownSystem].object = provider; // 5 - fMethods[kPMShutdownSystem].func = (IOMethod)&IOPMrootDomain::shutdownSystem; - fMethods[kPMShutdownSystem].count0 = 0; - fMethods[kPMShutdownSystem].count1 = 0; - fMethods[kPMShutdownSystem].flags = kIOUCScalarIScalarO; - - fMethods[kPMRestartSystem].object = provider; // 6 - fMethods[kPMRestartSystem].func = (IOMethod)&IOPMrootDomain::restartSystem; - fMethods[kPMRestartSystem].count0 = 0; - fMethods[kPMRestartSystem].count1 = 0; - fMethods[kPMRestartSystem].flags = kIOUCScalarIScalarO; - - return true; -} - - -IOReturn RootDomainUserClient::clientClose( void ) -{ - detach( fOwner); - - return kIOReturnSuccess; -} - -IOReturn RootDomainUserClient::clientDied( void ) -{ - return( clientClose()); -} - -IOExternalMethod * -RootDomainUserClient::getExternalMethodForIndex( UInt32 index ) -{ - if(index >= kNumPMMethods) - return NULL; - else - return &fMethods[index]; -} - -IOReturn -RootDomainUserClient::registerNotificationPort( - mach_port_t port, UInt32 type ) -{ - return kIOReturnUnsupported; -} - diff --git a/iokit/Families/IOADBBus/IOADBBusPriv.h b/iokit/Families/IOADBBus/IOADBBusPriv.h deleted file mode 100644 index 50efd8276..000000000 --- a/iokit/Families/IOADBBus/IOADBBusPriv.h +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ -/* - * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -/* - * MKLINUX-1.0DR2 - */ -/* - * 18 June 1998 sdouglas - * Start IOKit version. - */ - -#define ADB_DEVICE_COUNT 16 - -#define ADB_FLAGS_PRESENT 0x00000001 /* Device is present */ -#define ADB_FLAGS_REGISTERED 0x00000002 /* Device has a handler */ -#define ADB_FLAGS_UNRESOLVED 0x00000004 /* Device has not been fully probed */ - -/* - * ADB Commands - */ - -#define ADB_DEVCMD_SELF_TEST 0xff -#define ADB_DEVCMD_CHANGE_ID 0xfe -#define ADB_DEVCMD_CHANGE_ID_AND_ACT 0xfd -#define ADB_DEVCMD_CHANGE_ID_AND_ENABLE 0x00 - -#ifndef __cplusplus - -struct ADBDeviceControl { - IOADBAddress address; - IOADBAddress defaultAddress; - UInt8 handlerID; - UInt8 defaultHandlerID; - UInt32 flags; - id owner; // here for speed -}; - -typedef struct ADBDeviceControl ADBDeviceControl; - - -@class IOADBDevice; - -@interface IOADBBus : IODevice -{ - IODevice * controller; -@public - ADBDeviceControl * adbDevices[ ADB_DEVICE_COUNT ]; -} - -- (IOReturn) probeBus; -- setUpName:(IOADBDevice *)device; - -/////// nub -> bus - -- (IOReturn) setOwner:owner forDevice:(void *)busRef; - -- (IOReturn) flush:(void *)busRef; - -- (IOReturn) readRegister:(void *)busRef - adbRegister:(IOADBRegister)adbRegister - contents:(UInt8 *)data - length:(IOByteCount *)length; - -- (IOReturn) writeRegister:(void *)busRef - adbRegister:(IOADBRegister)adbRegister - contents:(UInt8 *)data - length:(IOByteCount *)length; - -- (IOADBAddress) address:(void *)busRef; - -- (IOADBAddress) defaultAddress:(void *)busRef; - -- (UInt8) handlerID:(void *)busRef; - -- (UInt8) defaultHandlerID:(void *)busRef; - -- (IOReturn) setHandlerID:(void *)busRef - handlerID:(UInt8)handlerID; - -@end - -@interface IOADBDevice : IODevice -{ - IOADBBus * bus; - void * busRef; -} - -- initForBus:(IOADBBus *)bus andBusRef:(void *)busRef; - -- (void *) busRef; - -@end - -#endif diff --git a/iokit/Families/IOADBBus/IOADBController.cpp b/iokit/Families/IOADBBus/IOADBController.cpp deleted file mode 100644 index 306b18db3..000000000 --- a/iokit/Families/IOADBBus/IOADBController.cpp +++ /dev/null @@ -1,806 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ -/* - * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -/* - * MKLINUX-1.0DR2 - */ -/* - * 18 June 1998 sdouglas Start IOKit version. - * 16 Nov 1998 suurballe Port to c++ - */ - - -#include - -#include "IOADBControllerUserClient.h" -#include -#include -#include -#include -#include -#include -#include "IOADBBusPriv.h" - -bool ADBhasRoot( OSObject *, void *, IOService * ); -void doProbe ( thread_call_param_t, thread_call_param_t); - -#define kTenSeconds 10000000 - -#define super IOADBBus - -OSDefineMetaClass(IOADBController,IOADBBus) -OSDefineAbstractStructors(IOADBController,IOADBBus) - - -// ********************************************************************************** -// start -// -// ********************************************************************************** -bool IOADBController::start ( IOService * nub ) -{ - if( !super::start(nub)) { - return false; - } - probeBus(); - - rootDomain = NULL; - busProbed = true; - - // creates the probe thread for when we wake up: - probeThread = thread_call_allocate((thread_call_func_t)doProbe, (thread_call_param_t)this); - if (probeThread == NULL) { - IOLog("IOADBController::start fails to call thread_call_allocate \n"); - return false; - } - - addNotification( gIOPublishNotification,serviceMatching("IOPMrootDomain"), // look for the Root Domain - (IOServiceNotificationHandler)ADBhasRoot, this, 0 ); - - return true; -} - - - - - -// ********************************************************************************** -// ADBhasRoot -// -// The Root Power Domain has registered. -// Register as an interested driver so we find out when the system is -// going to sleep and waking up. -// ********************************************************************************** -bool ADBhasRoot( OSObject * us, void *, IOService * yourDevice ) -{ - if ( yourDevice != NULL ) { - ((IOADBController *)us)->rootDomain = (IOPMrootDomain *)yourDevice; - ((IOADBController *)us)->rootDomain->registerInterestedDriver((IOService *) us); - } - return true; -} - - -//********************************************************************************* -// powerStateWillChangeTo -// -// We are notified here of power changes in the root domain. -// -// If power is going down in the root domain, then the system is going to -// sleep, and we tear down the ADB stack. -//********************************************************************************* - -IOReturn IOADBController::powerStateWillChangeTo ( IOPMPowerFlags theFlags, unsigned long, IOService*) -{ - int i; - if ( ! (theFlags & kIOPMPowerOn) && ! (theFlags & kIOPMDoze) ) { - busProbed = false; - for ( i = 1; i < ADB_DEVICE_COUNT; i++ ) { - if( adbDevices[ i ] != NULL ) { - if ( adbDevices[ i ]->nub ) { - adbDevices[ i ]->nub->terminate(kIOServiceRequired | kIOServiceSynchronous); - adbDevices[ i ]->nub->release(); - } - IOFree( adbDevices[ i ], sizeof (ADBDeviceControl)); - adbDevices[ i ] = NULL; - } - } - } - return IOPMAckImplied; -} - -//********************************************************************************* -// powerStateDidChangeTo -// -// We are notified here of power changes in the root domain -// -// If power is has been brought up, then the system is waking from sleep. -// We re-probe the bus -//********************************************************************************* -IOReturn IOADBController::powerStateDidChangeTo ( IOPMPowerFlags theFlags, unsigned long, IOService*) -{ - if ( (theFlags & kIOPMPowerOn) || (theFlags & kIOPMDoze) ) { - if ( ! busProbed ) { - thread_call_enter(probeThread); - busProbed = true; - return kTenSeconds; - } - } - return IOPMAckImplied; -} - - -void doProbe ( thread_call_param_t arg, thread_call_param_t) -{ - ((IOADBController *)arg)->probeBus(); - ((IOADBController *)arg)->rootDomain->acknowledgePowerChange((IOService *)arg); -} - - -// ********************************************************************************** -// probeAddress -// -// ********************************************************************************** -bool IOADBController::probeAddress ( IOADBAddress addr ) -{ - IOReturn err; - ADBDeviceControl * deviceInfo; - UInt16 value; - IOByteCount length; - - length = 2; - err = readFromDevice(addr,3,(UInt8 *)&value,&length); - - if (err == ADB_RET_OK) { - if( NULL == (deviceInfo = adbDevices[ addr ])) { - - deviceInfo = (ADBDeviceControl *)IOMalloc(sizeof(ADBDeviceControl)); - bzero(deviceInfo, sizeof(ADBDeviceControl)); - - adbDevices[ addr ] = deviceInfo; - deviceInfo->defaultAddress = addr; - deviceInfo->handlerID = deviceInfo->defaultHandlerID = (value & 0xff); - } - deviceInfo->address = addr; - } - return( (err == ADB_RET_OK)); -} - - -// ********************************************************************************** -// firstBit -// -// ********************************************************************************** -unsigned int IOADBController::firstBit ( unsigned int mask ) -{ - int bit = 15; - - while( 0 == (mask & (1 << bit))) { - bit--; - } - return(bit); -} - - -// ********************************************************************************** -// moveDeviceFrom -// -// ********************************************************************************** -bool IOADBController::moveDeviceFrom ( IOADBAddress from, IOADBAddress to, bool check ) -{ - IOReturn err; - UInt16 value; - IOByteCount length; - bool moved; - - length = 2; - value = ((to << 8) | ADB_DEVCMD_CHANGE_ID); - - err = writeToDevice(from,3,(UInt8 *)&value,&length); - - adbDevices[ to ] = adbDevices[ from ]; - - moved = probeAddress(to); - - if( moved || (!check)) { - adbDevices[ from ] = NULL; - } - else { - adbDevices[ to ] = NULL; - } - - return moved; -} - - -// ********************************************************************************** -// probeBus -// -// ********************************************************************************** -IOReturn IOADBController::probeBus ( void ) -{ - int i; - UInt32 unresolvedAddrs; - UInt32 freeAddrs; - IOADBAddress freeNum, devNum; - IOADBDevice * newDev; - OSDictionary * newProps; - char nameStr[ 10 ]; - const OSNumber * object; - const OSSymbol * key; - - /* Kill the auto poll until a new dev id's have been setup */ - setAutoPollEnable(false); - - /* - * Send a ADB bus reset - reply is sent after bus has reset, - * so there is no need to wait for the reset to complete. - */ - - resetBus(); - - /* - * Okay, now attempt reassign the - * bus - */ - - unresolvedAddrs = 0; - freeAddrs = 0xfffe; - - /* Skip 0 -- it's special! */ - for (i = 1; i < ADB_DEVICE_COUNT; i++) { - if( probeAddress(i) ) { - unresolvedAddrs |= ( 1 << i ); - freeAddrs &= ~( 1 << i ); - } - } - -/* Now attempt to reassign the addresses */ - while( unresolvedAddrs) { - if( !freeAddrs) { - panic("ADB: Cannot find a free ADB slot for reassignment!"); - } - - freeNum = firstBit(freeAddrs); - devNum = firstBit(unresolvedAddrs); - - if( !moveDeviceFrom(devNum, freeNum, true) ) { - - /* It didn't move.. bad! */ - IOLog("WARNING : ADB DEVICE %d having problems " - "probing!\n", devNum); - } - else { - if( probeAddress(devNum) ) { - /* Found another device at the address, leave - * the first device moved to one side and set up - * newly found device for probing - */ - freeAddrs &= ~( 1 << freeNum ); - - devNum = 0; - - } - else { - /* no more at this address, good !*/ - /* Move it back.. */ - moveDeviceFrom(freeNum,devNum,false); - } - } - if(devNum) { - unresolvedAddrs &= ~( 1 << devNum ); - } - } - - IOLog("ADB present:%lx\n", (freeAddrs ^ 0xfffe)); - - setAutoPollList(freeAddrs ^ 0xfffe); - - setAutoPollPeriod(11111); - - setAutoPollEnable(true); - -// publish the nubs - for ( i = 1; i < ADB_DEVICE_COUNT; i++ ) { - if( 0 == adbDevices[ i ] ) { - continue; - } - newDev = new IOADBDevice; // make a nub - if ( newDev == NULL ) { - continue; - } - adbDevices[ i ]->nub = newDev; // keep a pointer to it - - newProps = OSDictionary::withCapacity( 10 ); // create a property table for it - if ( newProps == NULL ) { - newDev->free(); - continue; - } - - key = OSSymbol::withCString(ADBaddressProperty); // make key/object for address - if ( key == NULL ) { - newDev->free(); - newProps->free(); - continue; - } - - object = OSNumber::withNumber((unsigned long long)adbDevices[i]->address,8); - if ( object == NULL ) { - key->release(); - newDev->free(); - newProps->free(); - continue; - } - newProps->setObject(key, (OSObject *)object); // put it in newProps - key->release(); - object->release(); - - key = OSSymbol::withCString(ADBhandlerIDProperty); // make key/object for handlerID - if ( key == NULL ) { - newDev->free(); - newProps->free(); - continue; - } - object = OSNumber::withNumber((unsigned long long)adbDevices[i]->handlerID,8); - if ( object == NULL ) { - key->release(); - newDev->free(); - newProps->free(); - continue; - } - newProps->setObject(key, (OSObject *)object); // put it in newProps - key->release(); - object->release(); - - key = OSSymbol::withCString(ADBdefAddressProperty); // make key/object for default addr - if ( key == NULL ) { - newDev->free(); - newProps->free(); - continue; - } - object = OSNumber::withNumber((unsigned long long)adbDevices[i]->defaultAddress,8); - if ( object == NULL ) { - key->release(); - newDev->free(); - newProps->free(); - continue; - } - newProps->setObject(key, (OSObject *)object); // put it in newProps - key->release(); - object->release(); - - key = OSSymbol::withCString(ADBdefHandlerProperty); // make key/object for default h id - if ( key == NULL ) { - newDev->free(); - newProps->free(); - continue; - } - object = OSNumber::withNumber((unsigned long long)adbDevices[i]->defaultHandlerID,8); - if ( object == NULL ) { - key->release(); - newDev->free(); - newProps->free(); - continue; - } - newProps->setObject(key, (OSObject *)object); // put it in newProps - key->release(); - object->release(); - - if ( ! newDev->init(newProps,adbDevices[i]) ) { // give it to our new nub - kprintf("adb nub init failed\n"); - newDev->release(); - continue; - } - - sprintf(nameStr,"%x-%02x",adbDevices[i]->defaultAddress,adbDevices[i]->handlerID); - newDev->setName(nameStr); - sprintf(nameStr, "%x", adbDevices[i]->defaultAddress); - newDev->setLocation(nameStr); - - newProps->release(); // we're done with it - if ( !newDev->attach(this) ) { - kprintf("adb nub attach failed\n"); - newDev->release(); - continue; - } - newDev->start(this); - newDev->registerService(); - newDev->waitQuiet(); - } // repeat loop - return kIOReturnSuccess; -} - - -// ********************************************************************************** -// autopollHandler -// -// ********************************************************************************** -void autopollHandler ( IOService * us, UInt8 adbCommand, IOByteCount length, UInt8 * data ) -{ - ((IOADBController *)us)->packet(data,length,adbCommand); -} - - -// ********************************************************************************** -// packet -// -// ********************************************************************************** -void IOADBController::packet ( UInt8 * data, IOByteCount length, UInt8 adbCommand ) -{ - ADBDeviceControl * deviceInfo; - - deviceInfo = adbDevices[ adbCommand >> 4 ]; - if( deviceInfo != NULL ) { - if( deviceInfo->owner != NULL ) { - deviceInfo->handler(deviceInfo->owner, adbCommand, length, data); - } - } - else { - // new device arrival? - // IOLog("IOADBBus: new device @%x\n", address); - } -} - - -// ********************************************************************************** -// matchDevice -// -// ********************************************************************************** -bool IOADBController::matchNubWithPropertyTable( IOService * device, OSDictionary * propTable ) -{ - bool matched = false; - const char * keys; - ADBDeviceControl * deviceInfo = (ADBDeviceControl *)(((IOADBDevice *)device)->busRef()); - OSObject * X; - - do { - X = propTable->getObject("ADB Match"); - if( !X ) { - break; - } - keys = ((OSString *)X)->getCStringNoCopy(); - if( *keys == '*' ) { - keys++; - } - else { - if( deviceInfo->defaultAddress != strtol(keys, (char **) &keys, 16)) { - break; - } - } - if( *keys++ == '-' ) { - if( deviceInfo->defaultHandlerID != strtol(keys, (char **) &keys, 16)) { - break; - } - } - matched = true; - - } while ( false ); - return matched; -} - - -/////// nub -> bus - -// ********************************************************************************** -// setOwner -// -// ********************************************************************************** -IOReturn IOADBController::setOwner ( void * device, IOService * client, ADB_callback_func handler ) -{ - ADBDeviceControl * deviceInfo = (ADBDeviceControl *)device; - - deviceInfo->handler = handler; - deviceInfo->owner = client; - return kIOReturnSuccess; -} - - -// ********************************************************************************** -// clearOwner -// -// ********************************************************************************** -IOReturn IOADBController::clearOwner ( void * device ) -{ - ADBDeviceControl * deviceInfo = (ADBDeviceControl *)device; - kprintf("IOADBController::clearOwner\n"); - - deviceInfo->owner = NULL; - deviceInfo->handler = NULL; - return kIOReturnSuccess; -} - - -// ********************************************************************************** -// claimDevice -// -// Called by the user client -// ********************************************************************************** -IOReturn IOADBController::claimDevice (unsigned long ADBaddress, IOService * client, ADB_callback_func handler ) -{ - if ( claimed_devices[ADBaddress] == true ) { // is this address already claimed by the user? - return kIOReturnExclusiveAccess; // yes - } - if ( adbDevices[ADBaddress] == NULL ) { // no, is there a device at that address? - return kIOReturnNoDevice; // no - } - if (adbDevices[ADBaddress]->handler != NULL ) { // yes, is it already owned by the kernel? - return kIOReturnExclusiveAccess; // yes - } - claimed_devices[ADBaddress] = true; // no, user can have it - return kIOReturnSuccess; -} - - -// ********************************************************************************** -// releaseDevice -// -// Called by the user client -// ********************************************************************************** -IOReturn IOADBController::releaseDevice (unsigned long ADBaddress ) -{ - if ( claimed_devices[ADBaddress] == false ) { - return kIOReturnBadArgument; - } - - claimed_devices[ADBaddress] = false; - - return kIOReturnSuccess; -} - - -// ********************************************************************************** -// readDeviceForUser -// -// Called by the user client -// ********************************************************************************** -IOReturn IOADBController::readDeviceForUser (unsigned long address, unsigned long adbRegister, - UInt8 * data, IOByteCount * length) -{ - if ( claimed_devices[address] == false ) { - return kIOReturnBadArgument; - } - - return (readFromDevice((IOADBAddress)address,(IOADBRegister)adbRegister,data,length)); -} - - -// ********************************************************************************** -// writeDeviceForUser -// -// Called by the user client -// ********************************************************************************** -IOReturn IOADBController::writeDeviceForUser (unsigned long address, unsigned long adbRegister, - UInt8 * data, IOByteCount * length) -{ - if ( claimed_devices[address] == false ) { - return kIOReturnBadArgument; - } - - return (writeToDevice((IOADBAddress)address,(IOADBRegister)adbRegister,data,length)); -} - - -// ********************************************************************************** -// address -// -// ********************************************************************************** -IOADBAddress IOADBController::address ( ADBDeviceControl * busRef ) -{ - return busRef->address; -} - - -// ********************************************************************************** -// defaultAddress -// -// ********************************************************************************** -IOADBAddress IOADBController::defaultAddress ( ADBDeviceControl * busRef ) -{ - return busRef->defaultAddress; -} - - -// ********************************************************************************** -// handlerID -// -// ********************************************************************************** -UInt8 IOADBController::handlerID ( ADBDeviceControl * busRef ) -{ - return busRef->handlerID; -} - - -// ********************************************************************************** -// defaultHandlerID -// -// ********************************************************************************** -UInt8 IOADBController::defaultHandlerID ( ADBDeviceControl * busRef ) -{ - return busRef->defaultHandlerID; -} - - -// ********************************************************************************** -// cancelAllIO -// -// ********************************************************************************** -IOReturn IOADBController::cancelAllIO ( void ) -{ - return kIOReturnSuccess; -} - - -// ********************************************************************************** -// flush -// -// ********************************************************************************** -IOReturn IOADBController::flush ( ADBDeviceControl * busRef ) -{ - return(flushDevice(busRef->address)); -} - - -// ********************************************************************************** -// readRegister -// -// ********************************************************************************** -IOReturn IOADBController::readRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length ) -{ - return readFromDevice(busRef->address,adbRegister,data,length); -} - - -// ********************************************************************************** -// writeRegister -// -// ********************************************************************************** -IOReturn IOADBController::writeRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length ) -{ - return writeToDevice(busRef->address,adbRegister,data,length); -} - - -// ********************************************************************************** -// setHandlerID -// -// ********************************************************************************** -IOReturn IOADBController::setHandlerID ( ADBDeviceControl * deviceInfo, UInt8 handlerID ) -{ - IOReturn err; - UInt16 value; - IOByteCount length; - IOADBAddress addr = deviceInfo->address; - - length = 2; - err = readFromDevice(addr,3,(UInt8 *)&value,&length); - - if ( err ) { - return err; - } - - value = (value & 0xf000) | handlerID | (addr << 8); - length = sizeof(value); - err = writeToDevice(addr,3,(UInt8 *)&value,&length); - - length = sizeof(value); - err = readFromDevice(addr,3,(UInt8 *)&value,&length); - - if ( err == kIOReturnSuccess ) { - deviceInfo->handlerID = value & 0xff; - } - - if ( deviceInfo->handlerID == handlerID ) { - err = kIOReturnSuccess; - } - else { - err = kIOReturnNoResources; - } - - return err; -} - - -// ********************************************************************************** -// getURLComponentUnit -// -// ********************************************************************************** -int IOADBController::getURLComponentUnit ( IOService * device, char * path, int maxLen ) -{ - ADBDeviceControl * deviceInfo = (ADBDeviceControl *)((IOADBDevice *)device)->busRef(); - - if( maxLen > 1 ) { - sprintf( path, "%x", deviceInfo->address ); - return(1); - } - else { - return(0); - } -} - - -// ********************************************************************************** -// newUserClient -// -// ********************************************************************************** -IOReturn IOADBController::newUserClient( task_t owningTask, void * /* security_id */, UInt32 type, IOUserClient ** handler ) -{ - IOReturn err = kIOReturnSuccess; - IOADBControllerUserClient * client; - - client = IOADBControllerUserClient::withTask(owningTask); - - if( !client || (false == client->attach( this )) || - (false == client->start( this )) ) { - if(client) { - client->detach( this ); - client->release(); - client = NULL; - } - err = kIOReturnNoMemory; - } - *handler = client; - return err; -} diff --git a/iokit/Families/IOADBBus/IOADBControllerUserClient.cpp b/iokit/Families/IOADBBus/IOADBControllerUserClient.cpp deleted file mode 100644 index 2fbaac9dd..000000000 --- a/iokit/Families/IOADBBus/IOADBControllerUserClient.cpp +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * - */ - -#include -#include -#include -#include "IOADBControllerUserClient.h" - -#define super IOUserClient - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -OSDefineMetaClassAndStructors(IOADBControllerUserClient, IOUserClient) - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -IOADBControllerUserClient *IOADBControllerUserClient::withTask(task_t owningTask) -{ - IOADBControllerUserClient * me; - - me = new IOADBControllerUserClient; - if ( me ) { - if (! me->init() ) { - me->release(); - return NULL; - } - me->fTask = owningTask; - } - return me; -} - -bool IOADBControllerUserClient::start( IOService * provider ) -{ - assert(OSDynamicCast(IOADBController, provider)); - if(!super::start(provider)) - return false; - fOwner = (IOADBController *)provider; - - // Got the owner, so initialize the call structures - fMethods[kADBReadDevice].object = provider; - fMethods[kADBReadDevice].func = (IOMethod)&IOADBController::readDeviceForUser; - fMethods[kADBReadDevice].count0 = 2; - fMethods[kADBReadDevice].count1 = 8; - fMethods[kADBReadDevice].flags = kIOUCScalarIStructO; - - fMethods[kADBWriteDevice].object = provider; - fMethods[kADBWriteDevice].func = (IOMethod)&IOADBController::writeDeviceForUser; - fMethods[kADBWriteDevice].count0 = 4; - fMethods[kADBWriteDevice].count1 = 0; - fMethods[kADBWriteDevice].flags = kIOUCScalarIScalarO; - - fMethods[kADBClaimDevice].object = provider; - fMethods[kADBClaimDevice].func = (IOMethod)&IOADBController::claimDevice; - fMethods[kADBClaimDevice].count0 = 1; - fMethods[kADBClaimDevice].count1 = 0; - fMethods[kADBClaimDevice].flags = kIOUCScalarIScalarO; - - fMethods[kADBReleaseDevice].object = provider; - fMethods[kADBReleaseDevice].func = (IOMethod)&IOADBController::releaseDevice; - fMethods[kADBReleaseDevice].count0 = 1; - fMethods[kADBReleaseDevice].count1 = 0; - fMethods[kADBReleaseDevice].flags = kIOUCScalarIScalarO; - - return true; -} - -IOReturn IOADBControllerUserClient::clientMemoryForType( UInt32 type, - UInt32 * flags, IOLogicalAddress * address, IOByteCount * size ) -{ - return kIOReturnUnsupported; -} - -IOReturn IOADBControllerUserClient::clientClose( void ) -{ - detach( fOwner); - - return kIOReturnSuccess; -} - -IOReturn IOADBControllerUserClient::clientDied( void ) -{ - return( clientClose()); -} - -IOReturn IOADBControllerUserClient::connectClient( IOUserClient * client ) -{ - return kIOReturnSuccess; -} - -IOExternalMethod * IOADBControllerUserClient::getExternalMethodForIndex( UInt32 index ) -{ - if(index >= kNumADBMethods) - return NULL; - else - return &fMethods[index]; -} - -IOReturn IOADBControllerUserClient::registerNotificationPort ( mach_port_t port, UInt32 type ) -{ - return kIOReturnUnsupported; -} - diff --git a/iokit/Families/IOADBBus/IOADBControllerUserClient.h b/iokit/Families/IOADBBus/IOADBControllerUserClient.h deleted file mode 100644 index 7ed5863a7..000000000 --- a/iokit/Families/IOADBBus/IOADBControllerUserClient.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - */ - - -#ifndef _IOKIT_ADBCONTROLLERUSERCLIENT_H -#define _IOKIT_ADBCONTROLLERUSERCLIENT_H - -#include -#include -#include - - -class IOADBControllerUserClient : public IOUserClient -{ - OSDeclareDefaultStructors(IOADBControllerUserClient) - -private: - IOADBController * fOwner; - task_t fTask; - IOExternalMethod fMethods[ kNumADBMethods ]; - -public: - - static IOADBControllerUserClient *withTask(task_t owningTask); - - virtual IOReturn clientClose( void ); - - virtual IOReturn clientDied( void ); - - virtual IOReturn registerNotificationPort ( mach_port_t port, UInt32 type ); - - virtual IOReturn connectClient( IOUserClient * client ); - - virtual IOReturn clientMemoryForType( UInt32, UInt32 *, IOLogicalAddress *, IOByteCount * ); - - virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); - - virtual bool start( IOService * provider ); - -}; - -#endif /* ! _IOKIT_ADBCONTROLLERUSERCLIENT_H */ - diff --git a/iokit/Families/IOADBBus/IOADBDevice.cpp b/iokit/Families/IOADBBus/IOADBDevice.cpp deleted file mode 100644 index 6a5855e15..000000000 --- a/iokit/Families/IOADBBus/IOADBDevice.cpp +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * 18 June 1998 sdouglas Start IOKit version. - * 17 Nov 1998 suurballe Port objc to c++ - */ - -#include - -#define super IOService -OSDefineMetaClassAndStructors(IOADBDevice,IOService) - -// ********************************************************************************** -// init -// -// ********************************************************************************** -bool IOADBDevice::init ( OSDictionary * regEntry, ADBDeviceControl * us ) -{ -if( !super::init(regEntry)) - return false; - -fBusRef = us; -return true; -} - - -// ********************************************************************************** -// attach -// -// ********************************************************************************** -bool IOADBDevice::attach ( IOADBBus * controller ) -{ -if( !super::attach(controller)) - return false; - -bus = controller; -return true; -} - -// ********************************************************************************** -// matchPropertyTable -// -// ********************************************************************************** - -bool IOADBDevice::matchPropertyTable( OSDictionary * table ) -{ - return( bus->matchNubWithPropertyTable( this, table )); -} - -// ********************************************************************************** -// seizeForClient -// -// ********************************************************************************** -bool IOADBDevice::seizeForClient ( IOService * client, ADB_callback_func handler ) -{ -bus->setOwner(fBusRef,client,handler); - -return true; -} - - -// ********************************************************************************** -// releaseFromClient -// -// ********************************************************************************** -void IOADBDevice::releaseFromClient ( IORegistryEntry * ) -{ - kprintf("IOADBDevice::releaseFromClient\n"); - bus->clearOwner(fBusRef); -} - - -// ********************************************************************************** -// flush -// -// ********************************************************************************** -IOReturn IOADBDevice::flush ( void ) -{ -if ( isInactive() ) { - return kIOReturnNotOpen; -} -return( bus->flush(fBusRef) ); -} - - -// ********************************************************************************** -// readRegister -// -// ********************************************************************************** -IOReturn IOADBDevice::readRegister ( IOADBRegister adbRegister, UInt8 * data, - IOByteCount * length ) -{ -if ( isInactive() ) { - return kIOReturnNotOpen; -} -return( bus->readRegister(fBusRef,adbRegister,data,length) ); -} - - -// ********************************************************************************** -// writeRegister -// -// ********************************************************************************** -IOReturn IOADBDevice::writeRegister ( IOADBRegister adbRegister, UInt8 * data, - IOByteCount * length ) -{ -if ( isInactive() ) { - return kIOReturnNotOpen; -} -return( bus->writeRegister(fBusRef,adbRegister,data,length) ); -} - - -// ********************************************************************************** -// address -// -// ********************************************************************************** -IOADBAddress IOADBDevice::address ( void ) -{ -return( bus->address(fBusRef) ); -} - - -// ********************************************************************************** -// defaultAddress -// -// ********************************************************************************** -IOADBAddress IOADBDevice::defaultAddress ( void ) -{ -return( bus->defaultAddress(fBusRef) ); -} - - -// ********************************************************************************** -// handlerID -// -// ********************************************************************************** -UInt8 IOADBDevice::handlerID ( void ) -{ -return( bus->handlerID(fBusRef) ); -} - - -// ********************************************************************************** -// defaultHandlerID -// -// ********************************************************************************** -UInt8 IOADBDevice::defaultHandlerID ( void ) -{ -return( bus->defaultHandlerID(fBusRef) ); -} - - -// ********************************************************************************** -// setHandlerID -// -// ********************************************************************************** -IOReturn IOADBDevice::setHandlerID ( UInt8 handlerID ) -{ -return( bus->setHandlerID(fBusRef,handlerID) ); -} - - -// ********************************************************************************** -// busRef -// -// ********************************************************************************** -void * IOADBDevice::busRef ( void ) -{ -return fBusRef; -} diff --git a/iokit/IOKit/IOBSD.h b/iokit/IOKit/IOBSD.h index d84f51108..9c385bcfb 100644 --- a/iokit/IOKit/IOBSD.h +++ b/iokit/IOKit/IOBSD.h @@ -35,8 +35,4 @@ #define kIOBSDMinorKey "BSD Minor" // (an OSNumber) #define kIOBSDUnitKey "BSD Unit" // (an OSNumber) -#define kIOBSDName "BSD Name" ///d:deprecated -#define kIOBSDMajor "BSD Major" ///d:deprecated -#define kIOBSDMinor "BSD Minor" ///d:deprecated - #endif /* !_IOBSD_H */ diff --git a/iokit/IOKit/IOBufferMemoryDescriptor.h b/iokit/IOKit/IOBufferMemoryDescriptor.h index af934cc2c..804658694 100644 --- a/iokit/IOKit/IOBufferMemoryDescriptor.h +++ b/iokit/IOKit/IOBufferMemoryDescriptor.h @@ -28,12 +28,11 @@ #include enum { - kIOMemoryDirectionMask = 0x0000000f, kIOMemoryPhysicallyContiguous = 0x00000010, kIOMemoryPageable = 0x00000020, kIOMemorySharingTypeMask = 0x000f0000, kIOMemoryUnshared = 0x00000000, - kIOMemoryKernelUserShared = 0x00010000, + kIOMemoryKernelUserShared = 0x00010000 }; #define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_ 1 @@ -240,15 +239,6 @@ public: * will not copy past the end of the memory descriptor's current capacity. */ virtual bool appendBytes(const void *bytes, vm_size_t withLength); - - /* - * getPhysicalSegment: - * - * Get the physical address of the buffer, relative to the current position. - * If the current position is at the end of the buffer, a zero is returned. - */ - virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, - IOByteCount * length); }; #endif /* !_IOBUFFERMEMORYDESCRIPTOR_H */ diff --git a/iokit/IOKit/IOCatalogue.h b/iokit/IOKit/IOCatalogue.h index 7905d86db..2475a0337 100644 --- a/iokit/IOKit/IOCatalogue.h +++ b/iokit/IOKit/IOCatalogue.h @@ -205,6 +205,7 @@ public: */ virtual bool serialize(OSSerialize * s) const; + bool serializeData(IOOptionBits kind, OSSerialize * s) const; /*! @function recordStartupExtensions @@ -249,8 +250,6 @@ private: @param moduleName An OSString containing the name of the module to unload. */ IOReturn unloadModule( OSString * moduleName ) const; - - }; __BEGIN_DECLS diff --git a/iokit/IOKit/IODeviceTreeSupport.h b/iokit/IOKit/IODeviceTreeSupport.h index fb89c5133..2cc5e7cc1 100644 --- a/iokit/IOKit/IODeviceTreeSupport.h +++ b/iokit/IOKit/IODeviceTreeSupport.h @@ -59,7 +59,7 @@ bool IODTCompareNubName( const IORegistryEntry * regEntry, enum { kIODTRecursive = 0x00000001, - kIODTExclusive = 0x00000002, + kIODTExclusive = 0x00000002 }; OSCollectionIterator * IODTFindMatchingEntries( IORegistryEntry * from, diff --git a/iokit/IOKit/IOKitDebug.h b/iokit/IOKit/IOKitDebug.h index b50378211..082bd8ba6 100644 --- a/iokit/IOKit/IOKitDebug.h +++ b/iokit/IOKit/IOKitDebug.h @@ -54,11 +54,7 @@ private: UInt32 value, const char * name ); }; -#endif - -#ifdef __cplusplus -extern "C" { -#endif +#endif __cplusplus enum { // loggage @@ -79,12 +75,17 @@ enum { kIOLogMemory = 0x00004000ULL, // debug aids - change behaviour - kIONoFreeObjects = 0x00100000ULL + kIONoFreeObjects = 0x00100000ULL, + kIOLogSynchronous = 0x00200000ULL, // IOLog completes synchrounsly }; extern SInt64 gIOKitDebug; extern char iokit_version[]; +#ifdef __cplusplus +extern "C" { +#endif + struct IORegistryPlane; extern void IOPrintPlane( const struct IORegistryPlane * plane ); extern void OSPrintMemory( void ); diff --git a/iokit/IOKit/IOKitKeys.h b/iokit/IOKit/IOKitKeys.h index 2825c1b97..5a4830506 100644 --- a/iokit/IOKit/IOKitKeys.h +++ b/iokit/IOKit/IOKitKeys.h @@ -124,4 +124,7 @@ #define kIOBusBadgeKey "IOBusBadge" // (OSDictionary) #define kIODeviceIconKey "IODeviceIcon" // (OSDictionary) +// property of root that describes the machine's serial number as a string +#define kIOPlatformSerialNumberKey "IOPlatformSerialNumber" // (OSString) + #endif /* ! _IOKIT_IOKITKEYS_H */ diff --git a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.h b/iokit/IOKit/IOKitKeysPrivate.h similarity index 53% rename from iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.h rename to iokit/IOKit/IOKitKeysPrivate.h index 874921600..fef465508 100644 --- a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.h +++ b/iokit/IOKit/IOKitKeysPrivate.h @@ -22,41 +22,27 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - */ - -#ifndef _IOKIT_APPLEI386PLATFORM_H -#define _IOKIT_APPLEI386PLATFORM_H - -#include - -class AppleI386PlatformExpert : public IOPlatformExpert -{ - OSDeclareDefaultStructors(AppleI386PlatformExpert) -private: - void setupPIC(IOService * nub); +#ifndef _IOKIT_IOKITKEYSPRIVATE_H +#define _IOKIT_IOKITKEYSPRIVATE_H - static int handlePEHaltRestart(unsigned int type); +#include -public: - virtual IOService * probe(IOService * provider, - SInt32 * score); +// properties found in the registry root +#define kIOConsoleUsersKey "IOConsoleUsers" /* value is OSArray */ +#define kIOMaximumMappedIOByteCountKey "IOMaximumMappedIOByteCount" /* value is OSNumber */ - virtual bool start(IOService * provider); +// properties found in the console user dict - virtual bool matchNubWithPropertyTable(IOService * nub, - OSDictionary * table); +#define kIOConsoleSessionIDKey "kCGSSessionIDKey" /* value is OSNumber */ - virtual IOService * createNub(OSDictionary * from); +#define kIOConsoleSessionUserNameKey "kCGSSessionUserNameKey" /* value is OSString */ +#define kIOConsoleSessionUIDKey "kCGSSessionUserIDKey" /* value is OSNumber */ +#define kIOConsoleSessionConsoleSetKey "kCGSSessionConsoleSetKey" /* value is OSNumber */ +#define kIOConsoleSessionOnConsoleKey "kCGSSessionOnConsoleKey" /* value is OSBoolean */ - virtual bool getModelName(char * name, int maxLength); - virtual bool getMachineName(char * name, int maxLength); -}; +// IOResources property +#define kIOConsoleUsersSeedKey "IOConsoleUsersSeed" /* value is OSNumber */ -#endif /* ! _IOKIT_APPLEI386PLATFORM_H */ +#endif /* ! _IOKIT_IOKITKEYSPRIVATE_H */ diff --git a/iokit/IOKit/IOKitServer.h b/iokit/IOKit/IOKitServer.h index 9a2e9c2fe..910e9c7b6 100644 --- a/iokit/IOKit/IOKitServer.h +++ b/iokit/IOKit/IOKitServer.h @@ -59,7 +59,7 @@ extern "C" { enum { kIOServiceMatching = 100, kIOBSDNameMatching = 101, - kIOOFPathMatching = 102, + kIOOFPathMatching = 102 }; // IOCatalogueSendData @@ -77,7 +77,7 @@ enum { kIOCatalogRemoveDrivers, kIOCatalogRemoveDriversNoMatch, kIOCatalogStartMatching, - kIOCatalogRemoveKernelLinker, + kIOCatalogRemoveKernelLinker }; // IOCatalogueGetData @@ -86,7 +86,10 @@ enum { @constant kIOCatalogGetContents Returns a snapshot of the database to the caller. */ enum { - kIOCatalogGetContents = 1, + kIOCatalogGetContents = 1, + kIOCatalogGetModuleDemandList = 2, + kIOCatalogGetCacheMissList = 3, + kIOCatalogGetROMMkextList = 4 }; // IOCatalogueReset @@ -95,7 +98,7 @@ enum { @constant kIOCatalogResetDefault Removes all entries from IOCatalogue except those used for booting the system. */ enum { - kIOCatalogResetDefault = 1, + kIOCatalogResetDefault = 1 }; // IOCatalogueTerminate @@ -108,7 +111,7 @@ enum { enum { kIOCatalogModuleUnload = 1, kIOCatalogModuleTerminate, - kIOCatalogServiceTerminate, + kIOCatalogServiceTerminate }; enum { diff --git a/iokit/IOKit/IOLib.h b/iokit/IOKit/IOLib.h index 74757b7d1..3c42748a7 100644 --- a/iokit/IOKit/IOLib.h +++ b/iokit/IOKit/IOLib.h @@ -36,6 +36,8 @@ #error IOLib.h is for kernel use only #endif +#include + #include #include @@ -46,9 +48,7 @@ #include -#ifdef __cplusplus -extern "C" { -#endif +__BEGIN_DECLS #include #include @@ -146,6 +146,78 @@ void IOFreePageable(void * address, vm_size_t size); #define IONew(type,number) (type*)IOMalloc(sizeof(type) * (number) ) #define IODelete(ptr,type,number) IOFree( (ptr) , sizeof(type) * (number) ) +///////////////////////////////////////////////////////////////////////////// +// +// +// These functions are now implemented in IOMapper.cpp +// +// +///////////////////////////////////////////////////////////////////////////// + +/*! @function IOMappedRead8 + @abstract Read one byte from the desired "Physical" IOSpace address. + @discussion Read one byte from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. + @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + @result Data contained at that location */ + +UInt8 IOMappedRead8(IOPhysicalAddress address); + +/*! @function IOMappedRead16 + @abstract Read two bytes from the desired "Physical" IOSpace address. + @discussion Read two bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. + @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + @result Data contained at that location */ + +UInt16 IOMappedRead16(IOPhysicalAddress address); + +/*! @function IOMappedRead32 + @abstract Read four bytes from the desired "Physical" IOSpace address. + @discussion Read four bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. + @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + @result Data contained at that location */ + +UInt32 IOMappedRead32(IOPhysicalAddress address); + +/*! @function IOMappedRead64 + @abstract Read eight bytes from the desired "Physical" IOSpace address. + @discussion Read eight bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. + @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + @result Data contained at that location */ + +UInt64 IOMappedRead64(IOPhysicalAddress address); + +/*! @function IOMappedWrite8 + @abstract Write one byte to the desired "Physical" IOSpace address. + @discussion Write one byte to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. + @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + @param value Data to be writen to the desired location */ + +void IOMappedWrite8(IOPhysicalAddress address, UInt8 value); + +/*! @function IOMappedWrite16 + @abstract Write two bytes to the desired "Physical" IOSpace address. + @discussion Write two bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. + @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + @param value Data to be writen to the desired location */ + +void IOMappedWrite16(IOPhysicalAddress address, UInt16 value); + +/*! @function IOMappedWrite32 + @abstract Write four bytes to the desired "Physical" IOSpace address. + @discussion Write four bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. + @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + @param value Data to be writen to the desired location */ + +void IOMappedWrite32(IOPhysicalAddress address, UInt32 value); + +/*! @function IOMappedWrite64 + @abstract Write eight bytes to the desired "Physical" IOSpace address. + @discussion Write eight bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. + @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + @param value Data to be writen to the desired location */ + +void IOMappedWrite64(IOPhysicalAddress address, UInt64 value); + /*! @function IOSetProcessorCacheMode @abstract Sets the processor cache mode for mapped memory. @discussion This function sets the cache mode of an already mapped & wired memory range. Note this may not be supported on I/O mappings or shared memory - it is far preferable to set the cache mode as mappings are created with the IOMemoryDescriptor::map method. @@ -292,8 +364,6 @@ extern mach_timespec_t IOZeroTvalspec; #endif /* __APPLE_API_OBSOLETE */ -#ifdef __cplusplus -} /* extern "C" */ -#endif +__END_DECLS #endif /* !__IOKIT_IOLIB_H */ diff --git a/iokit/IOKit/IOLocks.h b/iokit/IOKit/IOLocks.h index cc8bde3ae..69bd12794 100644 --- a/iokit/IOKit/IOLocks.h +++ b/iokit/IOKit/IOLocks.h @@ -136,7 +136,7 @@ void IOLockWakeup(IOLock * lock, void *event, bool oneThread) typedef enum { kIOLockStateUnlocked = 0, - kIOLockStateLocked = 1, + kIOLockStateLocked = 1 } IOLockState; void IOLockInitWithState( IOLock * lock, IOLockState state); diff --git a/iokit/IOKit/IOMapper.h b/iokit/IOKit/IOMapper.h new file mode 100644 index 000000000..14b9d3dd8 --- /dev/null +++ b/iokit/IOKit/IOMapper.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 1998-2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __IOKIT_IOMAPPER_H +#define __IOKIT_IOMAPPER_H + +#include + +__BEGIN_DECLS +#include +#include + +// These are C accessors to the system mapper for non-IOKit clients +ppnum_t IOMapperIOVMAlloc(unsigned pages); +void IOMapperIOVMFree(ppnum_t addr, unsigned pages); + +ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page); +void IOMapperInsertPPNPages(ppnum_t addr, unsigned offset, + ppnum_t *pageList, unsigned pageCount); +void IOMapperInsertUPLPages(ppnum_t addr, unsigned offset, + upl_page_info_t *pageList, unsigned pageCount); +__END_DECLS + +#if __cplusplus + +#include +#include + +class OSData; + +class IOMapper : public IOService +{ + OSDeclareAbstractStructors(IOMapper); + + // Give the platform expert access to setMapperRequired(); + friend class IOPlatformExpert; + +private: + enum SystemMapperState { + kNoMapper = 0, + kUnknown = 1, + kHasMapper = 2, // Any other value is pointer to a live mapper + kWaitMask = 3, + }; +protected: + void *fTable; + ppnum_t fTablePhys; + IOItemCount fTableSize; + OSData *fTableHandle; + bool fIsSystem; + + virtual bool start(IOService *provider); + virtual void free(); + + static void setMapperRequired(bool hasMapper); + static void waitForSystemMapper(); + + virtual bool initHardware(IOService *provider) = 0; + + virtual bool allocTable(IOByteCount size); + +public: + // Static routines capable of allocating tables that are physically + // contiguous in real memory space. + static OSData * NewARTTable(IOByteCount size, + void ** virtAddrP, ppnum_t *physAddrP); + static void FreeARTTable(OSData *handle, IOByteCount size); + + + // To get access to the system mapper IOMapper::gSystem + static IOMapper *gSystem; + + virtual ppnum_t iovmAlloc(IOItemCount pages) = 0; + virtual void iovmFree(ppnum_t addr, IOItemCount pages) = 0; + + virtual void iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page) = 0; + virtual void iovmInsert(ppnum_t addr, IOItemCount offset, + ppnum_t *pageList, IOItemCount pageCount); + virtual void iovmInsert(ppnum_t addr, IOItemCount offset, + upl_page_info_t *pageList, IOItemCount pageCount); + static void checkForSystemMapper() + { if ((vm_address_t) gSystem & kWaitMask) waitForSystemMapper(); }; + + // Function will panic if the given address is not found in a valid + // iovm mapping. + virtual addr64_t mapAddr(IOPhysicalAddress addr) = 0; + +private: + OSMetaClassDeclareReservedUnused(IOMapper, 0); + OSMetaClassDeclareReservedUnused(IOMapper, 1); + OSMetaClassDeclareReservedUnused(IOMapper, 2); + OSMetaClassDeclareReservedUnused(IOMapper, 3); + OSMetaClassDeclareReservedUnused(IOMapper, 4); + OSMetaClassDeclareReservedUnused(IOMapper, 5); + OSMetaClassDeclareReservedUnused(IOMapper, 6); + OSMetaClassDeclareReservedUnused(IOMapper, 7); + OSMetaClassDeclareReservedUnused(IOMapper, 8); + OSMetaClassDeclareReservedUnused(IOMapper, 9); + OSMetaClassDeclareReservedUnused(IOMapper, 10); + OSMetaClassDeclareReservedUnused(IOMapper, 11); + OSMetaClassDeclareReservedUnused(IOMapper, 12); + OSMetaClassDeclareReservedUnused(IOMapper, 13); + OSMetaClassDeclareReservedUnused(IOMapper, 14); + OSMetaClassDeclareReservedUnused(IOMapper, 15); +}; + +#endif /* __cplusplus */ + +#endif /* !__IOKIT_IOMAPPER_H */ diff --git a/iokit/IOKit/IOMemoryDescriptor.h b/iokit/IOKit/IOMemoryDescriptor.h index bbcab9680..76f469a5e 100644 --- a/iokit/IOKit/IOMemoryDescriptor.h +++ b/iokit/IOKit/IOMemoryDescriptor.h @@ -25,9 +25,15 @@ #ifndef _IOMEMORYDESCRIPTOR_H #define _IOMEMORYDESCRIPTOR_H +#include + #include #include +__BEGIN_DECLS +#include +__END_DECLS + struct IOPhysicalRange { IOPhysicalAddress address; @@ -35,6 +41,7 @@ struct IOPhysicalRange }; class IOMemoryMap; +class IOMapper; /* * Direction of transfer, with respect to the described memory. @@ -44,9 +51,31 @@ enum IODirection kIODirectionNone = 0x0, // same as VM_PROT_NONE kIODirectionIn = 0x1, // User land 'read', same as VM_PROT_READ kIODirectionOut = 0x2, // User land 'write', same as VM_PROT_WRITE - kIODirectionOutIn = kIODirectionIn | kIODirectionOut, + kIODirectionOutIn = kIODirectionOut | kIODirectionIn, + kIODirectionInOut = kIODirectionIn | kIODirectionOut +}; + +/* + * IOOptionBits used in the second withRanges variant + */ +enum { + kIOMemoryDirectionMask = 0x00000007, + kIOMemoryAutoPrepare = 0x00000008, // Shared with Buffer MD + + kIOMemoryTypeVirtual = 0x00000010, + kIOMemoryTypePhysical = 0x00000020, + kIOMemoryTypeUPL = 0x00000030, + kIOMemoryTypeMask = 0x000000f0, + + kIOMemoryAsReference = 0x00000100, + kIOMemoryBufferPageable = 0x00000400, + kIOMemoryDontMap = 0x00000800, + kIOMemoryPersistent = 0x00010000 }; +#define kIOMapperNone ((IOMapper *) -1) +#define kIOMapperSystem ((IOMapper *) 0) + /*! @class IOMemoryDescriptor : public OSObject @abstract An abstract base class defining common methods for describing physical or virtual memory. @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */ @@ -78,7 +107,7 @@ protected: IOOptionBits _flags; void * _memEntry; - IODirection _direction; /* direction of transfer */ + IODirection _direction; /* DEPRECATED: use _flags instead. direction of transfer */ IOByteCount _length; /* length of all ranges */ IOOptionBits _tag; @@ -86,11 +115,26 @@ public: virtual IOPhysicalAddress getSourceSegment( IOByteCount offset, IOByteCount * length ); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0); + +/*! @function initWithOptions + @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions. + @discussion Note this function can be used to re-init a previously created memory descriptor. + @result true on success, false on failure. */ + virtual bool initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper = 0); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1); + + virtual addr64_t IOMemoryDescriptor::getPhysicalSegment64( IOByteCount offset, + IOByteCount * length ); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2); private: - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3); OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4); OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5); @@ -160,11 +204,42 @@ public: @param asReference If false, the IOMemoryDescriptor object will make a copy of the ranges array, otherwise, the array will be used in situ, avoiding an extra allocation. @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false); + static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false); + +/*! @function withOptions + @abstract Master initialiser for all variants of memory descriptors. + @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. However we temporarily have setup a mechanism that automatically prepares kernel_task memory descriptors at creation time. + + + @param buffers A pointer to an array of IOVirtualRanges or IOPhysicalRanges if the options:type is Virtual or Physical. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. + + @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length. + + @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl. + + @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into. + + @param options + kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters. + kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations. + kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map. + kIOMemoryNoAutoPrepare Indicates that the temporary AutoPrepare of kernel_task memory should not be performed. + + @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present. + + @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor *withOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper = 0); /*! @function withPhysicalRanges @abstract Create an IOMemoryDescriptor to describe one or more physical ranges. @@ -178,7 +253,7 @@ public: static IOMemoryDescriptor * withPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, - IODirection withDirection, + IODirection withDirection, bool asReference = false); /*! @function withSubRange @@ -190,10 +265,10 @@ public: @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. This is used over the direction of the parent descriptor. @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor * of, - IOByteCount offset, - IOByteCount length, - IODirection withDirection); + static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of, + IOByteCount offset, + IOByteCount length, + IODirection withDirection); /*! @function initWithAddress @abstract Initialize or reinitialize an IOMemoryDescriptor to describe one virtual range of the kernel task. @@ -244,11 +319,11 @@ public: @param asReference If false, the IOMemoryDescriptor object will make a copy of the ranges array, otherwise, the array will be used in situ, avoiding an extra allocation. @result true on success, false on failure. */ - virtual bool initWithRanges( IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) = 0; + virtual bool initWithRanges(IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false) = 0; /*! @function initWithPhysicalRanges @abstract Initialize or reinitialize an IOMemoryDescriptor to describe one or more physical ranges. @@ -339,7 +414,7 @@ public: /*! @function prepare @abstract Prepare the memory for an I/O transfer. - @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. This method needn't called for non-pageable memory. + @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor. @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. @result An IOReturn code. */ @@ -347,8 +422,8 @@ public: /*! @function complete @abstract Complete processing of the memory after an I/O transfer finishes. - @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. - @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time. + @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. @result An IOReturn code. */ virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0; @@ -513,7 +588,7 @@ public: virtual IOReturn unmap() = 0; - virtual void taskDied() = 0; + virtual void taskDied() = 0; }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -524,8 +599,11 @@ public: // might be created by IOMemoryDescriptor::withAddress(), but there should be // no need to reference as anything but a generic IOMemoryDescriptor *. +// Also these flags should not overlap with the options to +// IOMemoryDescriptor::initWithRanges(... IOOptionsBits options); + enum { - kIOMemoryRequiresWire = 0x00000001 + kIOMemoryPreparedReadOnly = 0x00008000, }; class IOGeneralMemoryDescriptor : public IOMemoryDescriptor @@ -534,8 +612,8 @@ class IOGeneralMemoryDescriptor : public IOMemoryDescriptor protected: union { - IOVirtualRange * v; - IOPhysicalRange * p; + IOVirtualRange * v; + IOPhysicalRange * p; } _ranges; /* list of address ranges */ unsigned _rangesCount; /* number of address ranges in list */ bool _rangesIsAllocated; /* is list allocated by us? */ @@ -543,48 +621,70 @@ protected: task_t _task; /* task where all ranges are mapped to */ union { - IOVirtualRange v; - IOPhysicalRange p; + IOVirtualRange v; + IOPhysicalRange p; } _singleRange; /* storage space for a single range */ unsigned _wireCount; /* number of outstanding wires */ - vm_address_t _cachedVirtualAddress; /* a cached virtual-to-physical */ - IOPhysicalAddress _cachedPhysicalAddress; /* mapping, for optimization */ + /* DEPRECATED */ vm_address_t _cachedVirtualAddress; /* a cached virtual-to-physical */ + + /* DEPRECATED */ IOPhysicalAddress _cachedPhysicalAddress; bool _initialized; /* has superclass been initialized? */ virtual void free(); -protected: + +private: + // Internal API may be made virtual at some time in the future. + IOReturn wireVirtual(IODirection forDirection); + /* DEPRECATED */ IOByteCount _position; /* absolute position over all ranges */ /* DEPRECATED */ virtual void setPosition(IOByteCount position); -private: - /* DEPRECATED */ unsigned _positionAtIndex; /* range #n in which position is now */ - /* DEPRECATED */ IOByteCount _positionAtOffset; /* relative position within range #n */ +/* + * DEPRECATED IOByteCount _positionAtIndex; // relative position within range #n + * + * Re-use the _positionAtIndex as a count of the number of pages in + * this memory descriptor. Convieniently vm_address_t is an unsigned integer + * type so I can get away without having to change the type. + */ + unsigned int _pages; + +/* DEPRECATED */ unsigned _positionAtOffset; //range #n in which position is now + OSData *_memoryEntries; /* DEPRECATED */ vm_offset_t _kernPtrAligned; /* DEPRECATED */ unsigned _kernPtrAtIndex; /* DEPRECATED */ IOByteCount _kernSize; + /* DEPRECATED */ virtual void mapIntoKernel(unsigned rangeIndex); /* DEPRECATED */ virtual void unmapFromKernel(); - inline vm_map_t getMapForTask( task_t task, vm_address_t address ); public: /* * IOMemoryDescriptor required methods */ - virtual bool initWithAddress(void * address, - IOByteCount withLength, - IODirection withDirection); + // Master initaliser + virtual bool initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper = 0); - virtual bool initWithAddress(vm_address_t address, + // Secondary initialisers + virtual bool initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection); + + virtual bool initWithAddress(vm_address_t address, IOByteCount withLength, - IODirection withDirection, - task_t withTask); + IODirection withDirection, + task_t withTask); virtual bool initWithPhysicalAddress( IOPhysicalAddress address, @@ -643,10 +743,6 @@ protected: virtual void free(); - virtual bool initSubRange( IOMemoryDescriptor * parent, - IOByteCount offset, IOByteCount length, - IODirection withDirection ); - virtual bool initWithAddress(void * address, IOByteCount withLength, IODirection withDirection); @@ -679,6 +775,18 @@ protected: IOMemoryDescriptor::withSubRange; public: + /* + * Initialize or reinitialize an IOSubMemoryDescriptor to describe + * a subrange of an existing descriptor. + * + * An IOSubMemoryDescriptor can be re-used by calling initSubRange + * again on an existing instance -- note that this behavior is not + * commonly supported in other IOKit classes, although it is here. + */ + virtual bool initSubRange( IOMemoryDescriptor * parent, + IOByteCount offset, IOByteCount length, + IODirection withDirection ); + /* * IOMemoryDescriptor required methods */ diff --git a/iokit/IOKit/IOMessage.h b/iokit/IOKit/IOMessage.h index 5df7bffe2..5e31f78da 100644 --- a/iokit/IOKit/IOMessage.h +++ b/iokit/IOKit/IOMessage.h @@ -44,6 +44,8 @@ typedef UInt32 IOMessage; #define kIOMessageServiceBusyStateChange iokit_common_msg(0x120) +#define kIOMessageServicePropertyChange iokit_common_msg(0x130) + #define kIOMessageCanDevicePowerOff iokit_common_msg(0x200) #define kIOMessageDeviceWillPowerOff iokit_common_msg(0x210) #define kIOMessageDeviceWillNotPowerOff iokit_common_msg(0x220) @@ -56,5 +58,6 @@ typedef UInt32 IOMessage; #define kIOMessageSystemWillNotSleep iokit_common_msg(0x290) #define kIOMessageSystemHasPoweredOn iokit_common_msg(0x300) #define kIOMessageSystemWillRestart iokit_common_msg(0x310) +#define kIOMessageSystemWillPowerOn iokit_common_msg(0x320) #endif /* ! __IOKIT_IOMESSAGE_H */ diff --git a/iokit/IOKit/IOPMEventSource.h b/iokit/IOKit/IOPMEventSource.h new file mode 100644 index 000000000..5d4dddfcb --- /dev/null +++ b/iokit/IOKit/IOPMEventSource.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2001-2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + #ifndef _IOPMEVENTSOURCE_H_ + #define _IOPMEVENTSOURCE_H_ + + #include + #include + + // Queue of requested states + typedef struct { + unsigned long state; + void *next; + } ActivityTickleStateList; + + class IOPMEventSource : public IOEventSource + { + OSDeclareDefaultStructors(IOPMEventSource); + +protected: + virtual bool checkForWork(void); + + ActivityTickleStateList *states; + +public: + typedef void (*Action)(OSObject *owner, unsigned long state); + + // static initialiser + static IOPMEventSource *PMEventSource(OSObject *owner, Action action); + + virtual bool init(OSObject *owner, Action action); + + // Enqueues an activityTickle request to be executed on the workloop + virtual IOReturn activityTickleOccurred(unsigned long); + }; + + #endif /* _IOPMEVENTSOURCE_H_ */ diff --git a/iokit/IOKit/IOPlatformExpert.h b/iokit/IOKit/IOPlatformExpert.h index 079c1019a..2ea98c9a8 100644 --- a/iokit/IOKit/IOPlatformExpert.h +++ b/iokit/IOKit/IOPlatformExpert.h @@ -63,6 +63,8 @@ extern void PESetGMTTimeOfDay( long secs ); #ifdef __cplusplus } /* extern "C" */ +#define kIOPlatformMapperPresentKey "IOPlatformMapperPresent" + extern OSSymbol * gPlatformInterruptControllerName; class IORangeAllocator; @@ -152,9 +154,10 @@ public: virtual IOByteCount savePanicInfo(UInt8 *buffer, IOByteCount length); + virtual OSString* createSystemSerialNumberString(OSData* myProperty); + OSMetaClassDeclareReservedUsed(IOPlatformExpert, 0); - - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 1); + OSMetaClassDeclareReservedUsed(IOPlatformExpert, 1); OSMetaClassDeclareReservedUnused(IOPlatformExpert, 2); OSMetaClassDeclareReservedUnused(IOPlatformExpert, 3); OSMetaClassDeclareReservedUnused(IOPlatformExpert, 4); @@ -232,6 +235,7 @@ public: IOByteCount length); virtual IOByteCount savePanicInfo(UInt8 *buffer, IOByteCount length); + virtual OSString* createSystemSerialNumberString(OSData* myProperty); OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 0); OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 1); diff --git a/iokit/IOKit/IOReturn.h b/iokit/IOKit/IOReturn.h index ae96524e4..3495ed1c1 100644 --- a/iokit/IOKit/IOReturn.h +++ b/iokit/IOKit/IOReturn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -23,10 +23,7 @@ * @APPLE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * * HISTORY - * */ /* @@ -51,6 +48,9 @@ typedef kern_return_t IOReturn; #define sub_iokit_usb err_sub(1) #define sub_iokit_firewire err_sub(2) #define sub_iokit_block_storage err_sub(4) +#define sub_iokit_graphics err_sub(5) +#define sub_iokit_bluetooth err_sub(8) +#define sub_iokit_pmu err_sub(9) #define sub_iokit_reserved err_sub(-1) #define iokit_common_err(return) (sys_iokit|sub_iokit_common|return) #define iokit_family_err(sub,return) (sys_iokit|sub|return) diff --git a/iokit/IOKit/IOService.h b/iokit/IOKit/IOService.h index e9f15a5c5..53b4f8599 100644 --- a/iokit/IOKit/IOService.h +++ b/iokit/IOKit/IOService.h @@ -1795,30 +1795,38 @@ private: IOReturn notifyAll ( bool is_prechange ); bool notifyChild ( IOPowerConnection * nextObject, bool is_prechange ); bool inform ( IOPMinformee * nextObject, bool is_prechange ); - void our_prechange_03 ( void ); - void our_prechange_04 ( void ); - void our_prechange_05 ( void ); - void our_prechange_1 ( void ); - void our_prechange_2 ( void ); - void our_prechange_3 ( void ); - void our_prechange_4 ( void ); - IOReturn parent_down_0 ( void ); - IOReturn parent_down_02 ( void ); - void parent_down_04 ( void ); - void parent_down_05 ( void ); - IOReturn parent_down_1 ( void ); - IOReturn parent_down_2 ( void ); - void parent_down_3 ( void ); - void parent_down_4 ( void ); - void parent_down_5 ( void ); - void parent_down_6 ( void ); - void parent_up_0 ( void ); - IOReturn parent_up_1 ( void ); - IOReturn parent_up_2 ( void ); - IOReturn parent_up_3 ( void ); - void parent_up_4 ( void ); - void parent_up_5 ( void ); - void parent_up_6 ( void ); + + // Power Management state machine + // power change initiated by driver + void OurChangeTellClientsPowerDown ( void ); + void OurChangeTellPriorityClientsPowerDown ( void ); + void OurChangeNotifyInterestedDriversWillChange ( void ); + void OurChangeSetPowerState ( void ); + void OurChangeWaitForPowerSettle ( void ); + void OurChangeNotifyInterestedDriversDidChange ( void ); + void OurChangeFinish ( void ); + + // downward power change initiated by a power parent + IOReturn ParentDownTellPriorityClientsPowerDown_Immediate ( void ); + IOReturn ParentDownNotifyInterestedDriversWillChange_Immediate ( void ); + void ParentDownTellPriorityClientsPowerDown_Delayed ( void ); + void ParentDownNotifyInterestedDriversWillChange_Delayed ( void ); + IOReturn ParentDownSetPowerState_Immediate ( void ); + IOReturn ParentDownWaitForPowerSettleAndNotifyDidChange_Immediate ( void ); + void ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed ( void ); + void ParentDownSetPowerState_Delayed ( void ); + void ParentDownWaitForPowerSettle_Delayed ( void ); + void ParentDownAcknowledgeChange_Delayed ( void ); + + // upward power change initiated by a power parent + void ParentUpSetPowerState_Delayed ( void ); + IOReturn ParentUpSetPowerState_Immediate ( void ); + IOReturn ParentUpWaitForSettleTime_Immediate ( void ); + IOReturn ParentUpNotifyInterestedDriversDidChange_Immediate ( void ); + void ParentUpWaitForSettleTime_Delayed ( void ); + void ParentUpNotifyInterestedDriversDidChange_Delayed ( void ); + void ParentUpAcknowledgePowerChange_Delayed ( void ); + void all_done ( void ); void all_acked ( void ); void driver_acked ( void ); diff --git a/iokit/IOKit/IOSharedLock.h b/iokit/IOKit/IOSharedLock.h index 803ec162c..0899e775f 100644 --- a/iokit/IOKit/IOSharedLock.h +++ b/iokit/IOKit/IOSharedLock.h @@ -58,9 +58,12 @@ extern "C" { typedef volatile int IOSharedLockData; typedef IOSharedLockData * IOSharedLock; -#define IOSpinLockInit(l) (*(l) = (IOSpinLockData)0) +#define IOSpinLockInit(l) (*(l) = (IOSharedLockData)0) +#ifndef KERNEL extern void IOSpinLock(IOSharedLock l); +#endif + extern void IOSpinUnlock(IOSharedLock l); extern boolean_t IOTrySpinLock(IOSharedLock l); @@ -73,7 +76,10 @@ typedef ev_lock_data_t * ev_lock_t; // needs isync? //#define ev_is_locked(l) (*(l) != (ev_lock_data_t)0) +#ifndef KERNEL extern void ev_lock(ev_lock_t l); // Spin lock! +#endif + extern void ev_unlock(ev_lock_t l); extern boolean_t ev_try_lock(ev_lock_t l); diff --git a/iokit/IOKit/IOTypes.h b/iokit/IOKit/IOTypes.h index 4feb8f5be..c7f5e05e8 100644 --- a/iokit/IOKit/IOTypes.h +++ b/iokit/IOKit/IOTypes.h @@ -171,19 +171,21 @@ enum { kIODefaultCache = 0, kIOInhibitCache = 1, kIOWriteThruCache = 2, - kIOCopybackCache = 3 + kIOCopybackCache = 3, + kIOWriteCombineCache = 4 }; // IOMemory mapping options enum { kIOMapAnywhere = 0x00000001, - kIOMapCacheMask = 0x00000300, + kIOMapCacheMask = 0x00000700, kIOMapCacheShift = 8, - kIOMapDefaultCache = kIODefaultCache << kIOMapCacheShift, - kIOMapInhibitCache = kIOInhibitCache << kIOMapCacheShift, - kIOMapWriteThruCache = kIOWriteThruCache << kIOMapCacheShift, - kIOMapCopybackCache = kIOCopybackCache << kIOMapCacheShift, + kIOMapDefaultCache = kIODefaultCache << kIOMapCacheShift, + kIOMapInhibitCache = kIOInhibitCache << kIOMapCacheShift, + kIOMapWriteThruCache = kIOWriteThruCache << kIOMapCacheShift, + kIOMapCopybackCache = kIOCopybackCache << kIOMapCacheShift, + kIOMapWriteCombineCache = kIOWriteCombineCache << kIOMapCacheShift, kIOMapUserOptionsMask = 0x00000fff, diff --git a/iokit/IOKit/IOUserClient.h b/iokit/IOKit/IOUserClient.h index 117db5259..ef62b0c46 100644 --- a/iokit/IOKit/IOUserClient.h +++ b/iokit/IOKit/IOUserClient.h @@ -40,7 +40,7 @@ enum { kIOUCScalarIScalarO = 0, kIOUCScalarIStructO = 2, kIOUCStructIStructO = 3, - kIOUCScalarIStructI = 4, + kIOUCScalarIStructI = 4 }; typedef IOReturn (IOService::*IOMethod)(void * p1, void * p2, void * p3, diff --git a/iokit/IOKit/Makefile b/iokit/IOKit/Makefile index 2ae91e0c7..f9a6e8586 100644 --- a/iokit/IOKit/Makefile +++ b/iokit/IOKit/Makefile @@ -11,7 +11,6 @@ include $(MakeInc_cmd) include $(MakeInc_def) INSTINC_SUBDIRS = \ - adb \ nvram \ platform \ power \ @@ -29,7 +28,8 @@ EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} -NOT_EXPORT_HEADERS = +NOT_EXPORT_HEADERS = IOKitKeysPrivate.h + NOT_LOCAL_HEADERS = ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) @@ -38,7 +38,7 @@ INSTALL_MI_LIST = IOBSD.h IOKitKeys.h IOKitServer.h IOReturn.h\ IOSharedLock.h IOTypes.h OSMessageNotification.h\ IODataQueueShared.h IOMessage.h -INSTALL_MI_LCL_LIST = "" +INSTALL_MI_LCL_LIST = IOKitKeysPrivate.h INSTALL_MI_DIR = . diff --git a/iokit/IOKit/adb/IOADBBus.h b/iokit/IOKit/adb/IOADBBus.h deleted file mode 100644 index 4f8c3fb48..000000000 --- a/iokit/IOKit/adb/IOADBBus.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ -/* - * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -/* - * MKLINUX-1.0DR2 - */ -/* - * 18 June 1998 sdouglas Start IOKit version. - * 23 Nov 1998 suurballe Port to C++ - */ - -#ifndef _IOKIT_IOADBBUS_H -#define _IOKIT_IOADBBUS_H - -#include -#include - -class IOADBDevice; - -#define ADB_DEVICE_COUNT 16 - -#define ADB_FLAGS_PRESENT 0x00000001 /* Device is present */ -#define ADB_FLAGS_REGISTERED 0x00000002 /* Device has a handler */ -#define ADB_FLAGS_UNRESOLVED 0x00000004 /* Device has not been fully probed */ - -/* - * ADB Commands - */ - -#define ADB_DEVCMD_SELF_TEST 0xff -#define ADB_DEVCMD_CHANGE_ID 0xfe -#define ADB_DEVCMD_CHANGE_ID_AND_ACT 0xfd -#define ADB_DEVCMD_CHANGE_ID_AND_ENABLE 0x00 - -/* - * ADB IORegistryEntry properties - */ - -#define ADBaddressProperty "address" -#define ADBhandlerIDProperty "handler id" -#define ADBdefAddressProperty "default address" -#define ADBdefHandlerProperty "default handler id" -#define ADBnameProperty "name" - - -struct ADBDeviceControl { - IOADBAddress address; - IOADBAddress defaultAddress; - UInt8 handlerID; - UInt8 defaultHandlerID; - UInt32 flags; - IOService * owner; - ADB_callback_func handler; - IOADBDevice * nub; -}; - -typedef struct ADBDeviceControl ADBDeviceControl; - - -class IOADBBus: public IOService -{ -OSDeclareAbstractStructors(IOADBBus) - -public: - -ADBDeviceControl * adbDevices[ ADB_DEVICE_COUNT ]; - -virtual bool init ( OSDictionary * properties = 0 ); -virtual bool matchNubWithPropertyTable( IOService * device, OSDictionary * propertyTable ) = 0; -virtual IOReturn setOwner ( void * device, IOService * client, ADB_callback_func handler ) = 0; -virtual IOReturn clearOwner ( void * device ) = 0; -virtual IOReturn flush ( ADBDeviceControl * busRef ) = 0; -virtual IOReturn readRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length ) = 0; -virtual IOReturn writeRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length ) = 0; -virtual IOADBAddress address ( ADBDeviceControl * busRef ) = 0; -virtual IOADBAddress defaultAddress ( ADBDeviceControl * busRef ) = 0; -virtual UInt8 handlerID ( ADBDeviceControl * busRef ) = 0; -virtual UInt8 defaultHandlerID ( ADBDeviceControl * busRef ) = 0; -virtual IOReturn setHandlerID ( ADBDeviceControl * busRef, UInt8 handlerID ) = 0; - -}; - -#endif /* ! _IOKIT_IOADBBUS_H */ - diff --git a/iokit/IOKit/adb/IOADBController.h b/iokit/IOKit/adb/IOADBController.h deleted file mode 100644 index 886fb3fda..000000000 --- a/iokit/IOKit/adb/IOADBController.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * 18 June 1998 sdouglas Start IOKit version. - * 12 Nov 1998 suurballe Port objc protocol to c++ abstract class. - */ -#ifndef _IOKIT_ADBCONTROLLER_H -#define _IOKIT_ADBCONTROLLER_H - -#include -#include -#include - -extern "C" { -#include -} - -// referenced in subclasses: -void autopollHandler ( IOService *, UInt8, IOByteCount, UInt8 * ); - -class IOADBDevice; - -/* - * Results - */ - -#define ADB_RET_OK 0 /* Successful */ -#define ADB_RET_INUSE 1 /* ADB Device in use */ -#define ADB_RET_NOTPRESENT 2 /* ADB Device not present */ -#define ADB_RET_TIMEOUT 3 /* ADB Timeout */ -#define ADB_RET_UNEXPECTED_RESULT 4 /* Unknown result */ -#define ADB_RET_REQUEST_ERROR 5 /* Packet Request Error */ -#define ADB_RET_BUS_ERROR 6 /* ADB Bus Error */ - -class IOPMrootDomain; - -class IOADBController: public IOADBBus -{ -OSDeclareAbstractStructors(IOADBController) - -public: - - bool start ( IOService * nub ); - IOReturn setOwner ( void * device, IOService * client, ADB_callback_func handler ); - virtual IOReturn claimDevice ( unsigned long, IOService *, ADB_callback_func ); - virtual IOReturn releaseDevice ( unsigned long ); - virtual IOReturn readDeviceForUser(unsigned long, unsigned long, UInt8 *, IOByteCount *); - virtual IOReturn writeDeviceForUser(unsigned long, unsigned long, UInt8 *, IOByteCount *); - virtual IOReturn setAutoPollPeriod (int microseconds) = 0; - virtual IOReturn getAutoPollPeriod (int * microseconds) = 0; - virtual IOReturn setAutoPollList(UInt16 activeAddressMask) = 0; - virtual IOReturn getAutoPollList(UInt16 * activeAddressMask) = 0; - virtual IOReturn setAutoPollEnable(bool enable) = 0; - virtual IOReturn resetBus(void) = 0; - virtual IOReturn cancelAllIO(void) = 0; - virtual IOReturn flushDevice(IOADBAddress address) = 0; - virtual IOReturn readFromDevice(IOADBAddress address, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length) = 0; - virtual IOReturn writeToDevice(IOADBAddress address, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length) = 0; - void packet ( UInt8 * data, IOByteCount length, UInt8 adbCommand ); - - IOReturn flush ( ADBDeviceControl * busRef ); - IOReturn readRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length ); - IOReturn writeRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, - UInt8 * data, IOByteCount * length ); - IOADBAddress address ( ADBDeviceControl * busRef ); - IOADBAddress defaultAddress ( ADBDeviceControl * busRef ); - UInt8 handlerID ( ADBDeviceControl * busRef ); - UInt8 defaultHandlerID ( ADBDeviceControl * busRef ); - IOReturn setHandlerID ( ADBDeviceControl * busRef, UInt8 handlerID ); - bool matchNubWithPropertyTable( IOService * device, OSDictionary * propertyTable ); - IOReturn newUserClient( task_t, void *, UInt32, IOUserClient ** ); - IOReturn powerStateWillChangeTo ( IOPMPowerFlags, unsigned long, IOService*); - IOReturn powerStateDidChangeTo ( IOPMPowerFlags, unsigned long, IOService*); - IOReturn probeBus ( void ); - -IOReturn clearOwner ( void * ); - -IOPMrootDomain * rootDomain; - -private: - - bool claimed_devices[16]; // true if a device has been claimed by user - - bool probeAddress ( IOADBAddress addr ); - bool moveDeviceFrom ( IOADBAddress from, IOADBAddress to, bool check ); - unsigned int firstBit ( unsigned int mask ); - int getURLComponentUnit ( IOService * device, char * path, int maxLen ); - bool busProbed; - thread_call_t probeThread; -}; - -#endif /* ! _IOKIT_ADBCONTROLLER_H */ diff --git a/iokit/IOKit/adb/IOADBDevice.h b/iokit/IOKit/adb/IOADBDevice.h deleted file mode 100644 index 71c697131..000000000 --- a/iokit/IOKit/adb/IOADBDevice.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#ifndef IOADBDEVICE_H -#define IOADBDEVICE_H - -#include -#include -#include - -class IOADBBus; - - -class IOADBDevice : public IOService -{ -OSDeclareDefaultStructors(IOADBDevice) - -private: - -IOADBBus * bus; -ADBDeviceControl * fBusRef; - -public: - -bool init ( OSDictionary * regEntry, ADBDeviceControl * us ); -bool attach ( IOADBBus * controller ); -virtual bool matchPropertyTable( OSDictionary * table ); -bool seizeForClient ( IOService * client, ADB_callback_func handler ); -void releaseFromClient ( IORegistryEntry * client ); -IOReturn flush ( void ); -IOReturn readRegister ( IOADBRegister adbRegister, UInt8 * data, IOByteCount * length ); -IOReturn writeRegister ( IOADBRegister adbRegister, UInt8 * data, IOByteCount * length ); -IOADBAddress address ( void ); -IOADBAddress defaultAddress ( void ); -UInt8 handlerID ( void ); -UInt8 defaultHandlerID ( void ); -IOReturn setHandlerID ( UInt8 handlerID ); -void * busRef ( void ); - -}; - -#endif - diff --git a/iokit/IOKit/adb/Makefile b/iokit/IOKit/adb/Makefile deleted file mode 100644 index d6b386d64..000000000 --- a/iokit/IOKit/adb/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd -export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def -export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule -export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir - -IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A -export INCDIR = $(IOKIT_FRAMEDIR)/Headers -export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders - -include $(MakeInc_cmd) -include $(MakeInc_def) - -MI_DIR = adb -# NOT_EXPORT_HEADERS = adb.h - -INSTINC_SUBDIRS = -INSTINC_SUBDIRS_PPC = -INSTINC_SUBDIRS_I386 = - -EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} -EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} -EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} - -ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) - -INSTALL_MI_LIST = IOADBLib.h -INSTALL_MI_LCL_LIST = "" - -INSTALL_MI_DIR = $(MI_DIR) - -EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) - -EXPORT_MI_DIR = IOKit/$(MI_DIR) - -include $(MakeInc_rule) -include $(MakeInc_dir) diff --git a/iokit/IOKit/i386/IOSharedLockImp.h b/iokit/IOKit/i386/IOSharedLockImp.h index 0478bc429..20be6ebf5 100644 --- a/iokit/IOKit/i386/IOSharedLockImp.h +++ b/iokit/IOKit/i386/IOSharedLockImp.h @@ -75,16 +75,15 @@ #ifndef KERNEL LEAF(_ev_lock, 0) LEAF(_IOSpinLock, 0) - push %eax - push %ecx - movl $1, %ecx - movl 12(%esp), %eax -_spin: - xchgl %ecx,0(%eax) - cmp $0, %ecx - jne _spin - pop %ecx - pop %eax + movl 4(%esp), %ecx +0: + xorl %eax, %eax + rep + nop /* pause for hyperthreaded CPU's */ + lock + cmpxchgl %ecx, (%ecx) + jne 0b + ret END(_ev_lock) #endif @@ -97,11 +96,10 @@ END(_ev_lock) */ LEAF(_ev_unlock, 0) LEAF(_IOSpinUnlock, 0) - push %eax - movl 8(%esp),%eax - movl $0,0(%eax) + movl 4(%esp), %ecx + movl $0, (%ecx) ENABLE_PREEMPTION() - pop %eax + ret END(_ev_unlock) @@ -117,9 +115,11 @@ END(_ev_unlock) LEAF(_ev_try_lock, 0) LEAF(_IOTrySpinLock, 0) DISABLE_PREEMPTION() - movl 4(%esp), %eax - lock;bts $0, 0(%eax) - jb 1f + movl 4(%esp), %ecx + xorl %eax, %eax + lock + cmpxchgl %ecx, (%ecx) + jne 1f movl $1, %eax /* yes */ ret 1: diff --git a/iokit/IOKit/pci/IOPCIDevice.h b/iokit/IOKit/pci/IOPCIDevice.h index 0d340006e..63af58860 100644 --- a/iokit/IOKit/pci/IOPCIDevice.h +++ b/iokit/IOKit/pci/IOPCIDevice.h @@ -34,7 +34,7 @@ union IOPCIAddressSpace { UInt32 bits; struct { -#if __BIG_ENDIAN__ +#ifdef __BIG_ENDIAN__ unsigned int reloc:1; unsigned int prefetch:1; unsigned int t:1; @@ -44,7 +44,7 @@ union IOPCIAddressSpace { unsigned int deviceNum:5; unsigned int functionNum:3; unsigned int registerNum:8; -#elif __LITTLE_ENDIAN__ +#elif defined(__LITTLE_ENDIAN__) unsigned int registerNum:8; unsigned int functionNum:3; unsigned int deviceNum:5; diff --git a/iokit/IOKit/ppc/IODBDMA.h b/iokit/IOKit/ppc/IODBDMA.h index 4c12490f6..5a49f4b84 100644 --- a/iokit/IOKit/ppc/IODBDMA.h +++ b/iokit/IOKit/ppc/IODBDMA.h @@ -115,7 +115,7 @@ enum { kdbdmaStoreQuad = 4, kdbdmaLoadQuad = 5, kdbdmaNop = 6, - kdbdmaStop = 7, + kdbdmaStop = 7 }; diff --git a/iokit/IOKit/ppc/IOSharedLockImp.h b/iokit/IOKit/ppc/IOSharedLockImp.h index 44ae776b1..c6c062fd5 100644 --- a/iokit/IOKit/ppc/IOSharedLockImp.h +++ b/iokit/IOKit/ppc/IOSharedLockImp.h @@ -96,33 +96,47 @@ #ifndef KERNEL LEAF(_ev_lock) - li a6,1 // lock value - lwarx a7,0,a0 // CEMV10 -9: - sync - lwarx a7,0,a0 // read the lock - cmpwi cr0,a7,0 // is it busy? - bne- 9b // yes, spin - sync - stwcx. a6,0,a0 // try to get the lock - bne- 9b // failed, try again - isync - blr // got it, return + + li a6,1 // lock value + +8: lwz a7,0(a0) // Get lock word + mr. a7,a7 // Is it held? + bne-- 8b // Yup... + +9: lwarx a7,0,a0 // read the lock + mr. a7,a7 // Is it held? + bne-- 7f // yes, kill reservation + stwcx. a6,0,a0 // try to get the lock + bne-- 9b // failed, try again + isync + blr // got it, return + +7: li a7,-4 // Point to a spot in the red zone + stwcx. a7,a7,r1 // Kill reservation + b 8b // Go wait some more... + + END(_ev_lock) LEAF(_IOSpinLock) - li a6,1 // lock value - lwarx a7,0,a0 // CEMV10 -9: - sync - lwarx a7,0,a0 // read the lock - cmpwi cr0,a7,0 // is it busy? - bne- 9b // yes, spin - sync - stwcx. a6,0,a0 // try to get the lock - bne- 9b // failed, try again - isync - blr // got it, return + + li a6,1 // lock value + +8: lwz a7,0(a0) // Get lock word + mr. a7,a7 // Is it held? + bne-- 8b // Yup... + +9: lwarx a7,0,a0 // read the lock + mr. a7,a7 // Is it held? + bne-- 7f // yes, kill reservation + stwcx. a6,0,a0 // try to get the lock + bne-- 9b // failed, try again + isync + blr // got it, return + +7: li a7,-4 // Point to a spot in the red zone + stwcx. a7,a7,r1 // Kill reservation + b 8b // Go wait some more... END(_IOSpinLock) #endif @@ -159,45 +173,61 @@ END(_IOSpinUnlock) */ LEAF(_ev_try_lock) - li a6,1 // lock value - DISABLE_PREEMPTION() - lwarx a7,0,a0 // CEMV10 -8: - sync - lwarx a7,0,a0 // read the lock - cmpwi cr0,a7,0 // is it busy? - bne- 9f // yes, give up - sync - stwcx. a6,0,a0 // try to get the lock - bne- 8b // failed, try again - li a0,1 // return TRUE - isync - blr -9: - ENABLE_PREEMPTION() - li a0,0 // return FALSE - blr + + DISABLE_PREEMPTION() + + li a6,1 // lock value + + lwz a7,0(a0) // Get lock word + mr. a7,a7 // Is it held? + bne-- 6f // Yup... + +9: lwarx a7,0,a0 // read the lock + mr. a7,a7 // Is it held? + bne-- 7f // yes, kill reservation + stwcx. a6,0,a0 // try to get the lock + bne-- 9b // failed, try again + li a0,1 // return TRUE + isync + blr // got it, return + +7: li a7,-4 // Point to a spot in the red zone + stwcx. a7,a7,r1 // Kill reservation + +6: + ENABLE_PREEMPTION() + li a0,0 // return FALSE + blr + END(_ev_try_lock) LEAF(_IOTrySpinLock) - li a6,1 // lock value - DISABLE_PREEMPTION() - lwarx a7,0,a0 // CEMV10 -8: - sync - lwarx a7,0,a0 // read the lock - cmpwi cr0,a7,0 // is it busy? - bne- 9f // yes, give up - sync - stwcx. a6,0,a0 // try to get the lock - bne- 8b // failed, try again - li a0,1 // return TRUE - isync - blr -9: - ENABLE_PREEMPTION() - li a0,0 // return FALSE - blr + + DISABLE_PREEMPTION() + + li a6,1 // lock value + + lwz a7,0(a0) // Get lock word + mr. a7,a7 // Is it held? + bne-- 6f // Yup... + +9: lwarx a7,0,a0 // read the lock + mr. a7,a7 // Is it held? + bne-- 7f // yes, kill reservation + stwcx. a6,0,a0 // try to get the lock + bne-- 9b // failed, try again + li a0,1 // return TRUE + isync + blr // got it, return + +7: li a7,-4 // Point to a spot in the red zone + stwcx. a7,a7,r1 // Kill reservation + +6: + ENABLE_PREEMPTION() + li a0,0 // return FALSE + blr + END(_IOTrySpinLock) #endif /* ! _IOKIT_IOSHAREDLOCKIMP_H */ diff --git a/iokit/IOKit/pwr_mgt/IOPM.h b/iokit/IOKit/pwr_mgt/IOPM.h index d873fa815..ed787c342 100644 --- a/iokit/IOKit/pwr_mgt/IOPM.h +++ b/iokit/IOKit/pwr_mgt/IOPM.h @@ -285,6 +285,7 @@ enum { #define kAppleClamshellStateKey "AppleClamshellState" #define kIOREMSleepEnabledKey "REMSleepEnabled" +// Strings for deciphering the dictionary returned from IOPMCopyBatteryInfo #define kIOBatteryInfoKey "IOBatteryInfo" #define kIOBatteryCurrentChargeKey "Current" #define kIOBatteryCapacityKey "Capacity" @@ -298,6 +299,15 @@ enum { kIOBatteryChargerConnect = (1 << 0) }; +// Private power management message indicating battery data has changed +// Indicates new data resides in the IORegistry +#define kIOPMMessageBatteryStatusHasChanged iokit_family_msg(sub_iokit_pmu, 0x100) + +// Apple private Legacy messages for re-routing AutoWake and AutoPower messages to the PMU +// through newer user space IOPMSchedulePowerEvent API +#define kIOPMUMessageLegacyAutoWake iokit_family_msg(sub_iokit_pmu, 0x200) +#define kIOPMUMessageLegacyAutoPower iokit_family_msg(sub_iokit_pmu, 0x210) + // These flags are deprecated. Use the version with the kIOPM prefix below. enum { kACInstalled = kIOBatteryChargerConnect, @@ -328,6 +338,9 @@ enum { }; +// ********************************************** +// Internal power management data structures +// ********************************************** #if KERNEL && __cplusplus class IOService; diff --git a/iokit/IOKit/pwr_mgt/IOPMPrivate.h b/iokit/IOKit/pwr_mgt/IOPMPrivate.h index ede7da358..9e7106ffe 100644 --- a/iokit/IOKit/pwr_mgt/IOPMPrivate.h +++ b/iokit/IOKit/pwr_mgt/IOPMPrivate.h @@ -35,7 +35,7 @@ enum { // don't sleep on clamshell closure on a portable with AC connected kIOPMSetDesktopMode = (1<<17), // set state of AC adaptor connected - kIOPMSetACAdaptorConnected = (1<<18), + kIOPMSetACAdaptorConnected = (1<<18) }; #endif /* ! _IOKIT_IOPMPRIVATE_H */ diff --git a/iokit/IOKit/pwr_mgt/RootDomain.h b/iokit/IOKit/pwr_mgt/RootDomain.h index 880d63bd9..bd7270585 100644 --- a/iokit/IOKit/pwr_mgt/RootDomain.h +++ b/iokit/IOKit/pwr_mgt/RootDomain.h @@ -28,6 +28,7 @@ #include #include +class IOPMPowerStateQueue; class RootDomainUserClient; #define kRootDomainSupportedFeatures "Supported Features" @@ -61,7 +62,6 @@ public: static IOPMrootDomain * construct( void ); virtual bool start( IOService * provider ); - virtual IOReturn newUserClient ( task_t, void *, UInt32, IOUserClient ** ); virtual IOReturn setAggressiveness ( unsigned long, unsigned long ); virtual IOReturn youAreRoot ( void ); virtual IOReturn sleepSystem ( void ); @@ -77,7 +77,9 @@ public: void wakeFromDoze( void ); void broadcast_it (unsigned long, unsigned long ); void publishFeature( const char *feature ); - + void unIdleDevice( IOService *, unsigned long ); + void announcePowerSourceChange( void ); + // Override of these methods for logging purposes. virtual IOReturn changePowerStateTo ( unsigned long ordinal ); virtual IOReturn changePowerStateToPriv ( unsigned long ordinal ); @@ -113,7 +115,7 @@ private: void adjustPowerState( void ); void restoreUserSpinDownTimeout ( void ); - + IOPMPowerStateQueue *pmPowerStateQueue; unsigned int user_spindown; // User's selected disk spindown value unsigned int systemBooting:1; diff --git a/iokit/IOKit/system.h b/iokit/IOKit/system.h index 75b6115b6..c7524f6b1 100644 --- a/iokit/IOKit/system.h +++ b/iokit/IOKit/system.h @@ -86,6 +86,8 @@ void panic(const char * msg, ...); */ extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); +extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); +extern void flush_dcache64(addr64_t addr, unsigned count, int phys); __END_DECLS diff --git a/iokit/Kernel/IOBufferMemoryDescriptor.cpp b/iokit/Kernel/IOBufferMemoryDescriptor.cpp index 845c6e12e..c7a521d10 100644 --- a/iokit/Kernel/IOBufferMemoryDescriptor.cpp +++ b/iokit/Kernel/IOBufferMemoryDescriptor.cpp @@ -31,9 +31,9 @@ __BEGIN_DECLS void ipc_port_release_send(ipc_port_t port); #include -__END_DECLS -extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address ); +vm_map_t IOPageableMapForAddress( vm_address_t address ); +__END_DECLS #define super IOGeneralMemoryDescriptor OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, @@ -90,6 +90,7 @@ bool IOBufferMemoryDescriptor::initWithOptions( task_t inTask) { vm_map_t map = 0; + IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual; if (!capacity) return false; @@ -100,6 +101,9 @@ bool IOBufferMemoryDescriptor::initWithOptions( _physSegCount = 0; _buffer = 0; + // Grab the direction and the Auto Prepare bits from the Buffer MD options + iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare); + if ((options & kIOMemorySharingTypeMask) && (alignment < page_size)) alignment = page_size; @@ -109,6 +113,7 @@ bool IOBufferMemoryDescriptor::initWithOptions( _alignment = alignment; if (options & kIOMemoryPageable) { + iomdOptions |= kIOMemoryBufferPageable; if (inTask == kernel_task) { /* Allocate some kernel address space. */ @@ -128,19 +133,24 @@ bool IOBufferMemoryDescriptor::initWithOptions( map = get_task_map(inTask); vm_map_reference(map); reserved->map = map; - kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page(capacity), + kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity), VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); if( KERN_SUCCESS != kr) return( false ); // we have to make sure that these pages don't get copied on fork. - kr = vm_inherit( map, (vm_address_t) _buffer, round_page(capacity), VM_INHERIT_NONE); + kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE); if( KERN_SUCCESS != kr) return( false ); } } else { + // @@@ gvdl: Need to remove this + // Buffer should never auto prepare they should be prepared explicitly + // But it never was enforced so what are you going to do? + iomdOptions |= kIOMemoryAutoPrepare; + /* Allocate a wired-down buffer inside kernel space. */ if (options & kIOMemoryPhysicallyContiguous) _buffer = IOMallocContiguous(capacity, alignment, 0); @@ -156,26 +166,50 @@ bool IOBufferMemoryDescriptor::initWithOptions( _singleRange.v.address = (vm_address_t) _buffer; _singleRange.v.length = capacity; - if (!super::initWithRanges(&_singleRange.v, 1, - (IODirection) (options & kIOMemoryDirectionMask), - inTask, true)) + if (!super::initWithOptions(&_singleRange.v, 1, 0, + inTask, iomdOptions, /* System mapper */ 0)) return false; - if (options & kIOMemoryPageable) - { - _flags |= kIOMemoryRequiresWire; - + if (options & kIOMemoryPageable) { kern_return_t kr; ipc_port_t sharedMem = (ipc_port_t) _memEntry; - vm_size_t size = round_page(_ranges.v[0].length); + vm_size_t size = round_page_32(_ranges.v[0].length); // must create the entry before any pages are allocated if( 0 == sharedMem) { + + // set memory entry cache + vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; + switch (options & kIOMapCacheMask) + { + case kIOMapInhibitCache: + SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); + break; + + case kIOMapWriteThruCache: + SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); + break; + + case kIOMapWriteCombineCache: + SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); + break; + + case kIOMapCopybackCache: + SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); + break; + + case kIOMapDefaultCache: + default: + SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); + break; + } + kr = mach_make_memory_entry( map, &size, _ranges.v[0].address, - VM_PROT_READ | VM_PROT_WRITE, &sharedMem, + memEntryCacheMode, &sharedMem, NULL ); - if( (KERN_SUCCESS == kr) && (size != round_page(_ranges.v[0].length))) { + + if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) { ipc_port_release_send( sharedMem ); kr = kIOReturnVMError; } @@ -184,23 +218,6 @@ bool IOBufferMemoryDescriptor::initWithOptions( _memEntry = (void *) sharedMem; } } - else - { - /* Precompute virtual-to-physical page mappings. */ - vm_address_t inBuffer = (vm_address_t) _buffer; - _physSegCount = atop(trunc_page(inBuffer + capacity - 1) - - trunc_page(inBuffer)) + 1; - _physAddrs = IONew(IOPhysicalAddress, _physSegCount); - if (!_physAddrs) - return false; - - inBuffer = trunc_page(inBuffer); - for (unsigned i = 0; i < _physSegCount; i++) { - _physAddrs[i] = pmap_extract(get_task_pmap(kernel_task), inBuffer); - assert(_physAddrs[i]); /* supposed to be wired */ - inBuffer += page_size; - } - } setLength(capacity); @@ -211,7 +228,7 @@ IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( task_t inTask, IOOptionBits options, vm_size_t capacity, - vm_offset_t alignment = 1) + vm_offset_t alignment) { IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; @@ -233,7 +250,7 @@ bool IOBufferMemoryDescriptor::initWithOptions( IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( IOOptionBits options, vm_size_t capacity, - vm_offset_t alignment = 1) + vm_offset_t alignment) { IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; @@ -316,15 +333,14 @@ IOBufferMemoryDescriptor::withBytes(const void * inBytes, */ void IOBufferMemoryDescriptor::free() { + // Cache all of the relevant information on the stack for use + // after we call super::free()! IOOptionBits options = _options; vm_size_t size = _capacity; void * buffer = _buffer; vm_map_t map = 0; vm_offset_t alignment = _alignment; - if (_physAddrs) - IODelete(_physAddrs, IOPhysicalAddress, _physSegCount); - if (reserved) { map = reserved->map; @@ -339,7 +355,7 @@ void IOBufferMemoryDescriptor::free() if (options & kIOMemoryPageable) { if (map) - vm_deallocate(map, (vm_address_t) buffer, round_page(size)); + vm_deallocate(map, (vm_address_t) buffer, round_page_32(size)); else IOFreePageable(buffer, size); } @@ -441,55 +457,6 @@ IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) return 0; } -/* - * getPhysicalSegment: - * - * Get the physical address of the buffer, relative to the current position. - * If the current position is at the end of the buffer, a zero is returned. - */ -IOPhysicalAddress -IOBufferMemoryDescriptor::getPhysicalSegment(IOByteCount offset, - IOByteCount * lengthOfSegment) -{ - IOPhysicalAddress physAddr; - - if( offset != _position) - setPosition( offset ); - - assert(_position <= _length); - - /* Fail gracefully if the position is at (or past) the end-of-buffer. */ - if (_position >= _length) { - *lengthOfSegment = 0; - return 0; - } - - if (_options & kIOMemoryPageable) { - physAddr = super::getPhysicalSegment(offset, lengthOfSegment); - - } else { - /* Compute the largest contiguous physical length possible. */ - vm_address_t actualPos = _singleRange.v.address + _position; - vm_address_t actualPage = trunc_page(actualPos); - unsigned physInd = atop(actualPage-trunc_page(_singleRange.v.address)); - - vm_size_t physicalLength = actualPage + page_size - actualPos; - for (unsigned index = physInd + 1; index < _physSegCount && - _physAddrs[index] == _physAddrs[index-1] + page_size; index++) { - physicalLength += page_size; - } - - /* Clip contiguous physical length at the end-of-buffer. */ - if (physicalLength > _length - _position) - physicalLength = _length - _position; - - *lengthOfSegment = physicalLength; - physAddr = _physAddrs[physInd] + (actualPos - actualPage); - } - - return physAddr; -} - OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); diff --git a/iokit/Kernel/IOCPU.cpp b/iokit/Kernel/IOCPU.cpp index 57017a819..b74cb79a5 100644 --- a/iokit/Kernel/IOCPU.cpp +++ b/iokit/Kernel/IOCPU.cpp @@ -187,13 +187,14 @@ IOReturn IOCPU::setProperties(OSObject *properties) { OSDictionary *dict = OSDynamicCast(OSDictionary, properties); OSString *stateStr; + IOReturn result; if (dict == 0) return kIOReturnUnsupported; stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey)); if (stateStr != 0) { - if (!IOUserClient::clientHasPrivilege(current_task(), "root")) - return kIOReturnNotPrivileged; + result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); + if (result != kIOReturnSuccess) return result; if (_cpuNumber == 0) return kIOReturnUnsupported; diff --git a/iokit/Kernel/IOCatalogue.cpp b/iokit/Kernel/IOCatalogue.cpp index 4c8e3b1b5..6e9f45c52 100644 --- a/iokit/Kernel/IOCatalogue.cpp +++ b/iokit/Kernel/IOCatalogue.cpp @@ -78,17 +78,327 @@ void (*remove_startup_extension_function)(const char * name) = 0; */ int kernelLinkerPresent = 0; - -#define super OSObject #define kModuleKey "CFBundleIdentifier" +#define super OSObject OSDefineMetaClassAndStructors(IOCatalogue, OSObject) #define CATALOGTEST 0 -IOCatalogue * gIOCatalogue; -const OSSymbol * gIOClassKey; -const OSSymbol * gIOProbeScoreKey; +IOCatalogue * gIOCatalogue; +const OSSymbol * gIOClassKey; +const OSSymbol * gIOProbeScoreKey; +const OSSymbol * gIOModuleIdentifierKey; +OSSet * gIOCatalogModuleRequests; +OSSet * gIOCatalogCacheMisses; +OSSet * gIOCatalogROMMkexts; +IOLock * gIOCatalogLock; +IOLock * gIOKLDLock; + +/********************************************************************* +*********************************************************************/ + +OSArray * gIOPrelinkedModules = 0; + +extern "C" kern_return_t +kmod_create_internal( + kmod_info_t *info, + kmod_t *id); + +extern "C" kern_return_t +kmod_destroy_internal(kmod_t id); + +extern "C" kern_return_t +kmod_start_or_stop( + kmod_t id, + int start, + kmod_args_t *data, + mach_msg_type_number_t *dataCount); + +extern "C" kern_return_t kmod_retain(kmod_t id); +extern "C" kern_return_t kmod_release(kmod_t id); + +static +kern_return_t start_prelink_module(UInt32 moduleIndex) +{ + kern_return_t kr = KERN_SUCCESS; + UInt32 * togo; + SInt32 count, where, end; + UInt32 * prelink; + SInt32 next, lastDep; + OSData * data; + OSString * str; + OSDictionary * dict; + + OSArray * + prelinkedModules = gIOPrelinkedModules; + + togo = IONew(UInt32, prelinkedModules->getCount()); + togo[0] = moduleIndex; + count = 1; + + for (next = 0; next < count; next++) + { + dict = (OSDictionary *) prelinkedModules->getObject(togo[next]); + + data = OSDynamicCast(OSData, dict->getObject("OSBundlePrelink")); + if (!data) + { + // already started or no code + if (togo[next] == moduleIndex) + { + kr = KERN_FAILURE; + break; + } + continue; + } + prelink = (UInt32 *) data->getBytesNoCopy(); + lastDep = OSReadBigInt32(prelink, 12); + for (SInt32 idx = OSReadBigInt32(prelink, 8); idx < lastDep; idx += sizeof(UInt32)) + { + UInt32 depIdx = OSReadBigInt32(prelink, idx) - 1; + + for (where = next + 1; + (where < count) && (togo[where] > depIdx); + where++) {} + + if (where != count) + { + if (togo[where] == depIdx) + continue; + for (end = count; end != where; end--) + togo[end] = togo[end - 1]; + } + count++; + togo[where] = depIdx; + } + } + + if (KERN_SUCCESS != kr) + return kr; + + for (next = (count - 1); next >= 0; next--) + { + dict = (OSDictionary *) prelinkedModules->getObject(togo[next]); + + data = OSDynamicCast(OSData, dict->getObject("OSBundlePrelink")); + if (!data) + continue; + prelink = (UInt32 *) data->getBytesNoCopy(); + + kmod_t id; + kmod_info_t * kmod_info = (kmod_info_t *) OSReadBigInt32(prelink, 0); + + kr = kmod_create_internal(kmod_info, &id); + if (KERN_SUCCESS != kr) + break; + + lastDep = OSReadBigInt32(prelink, 12); + for (SInt32 idx = OSReadBigInt32(prelink, 8); idx < lastDep; idx += sizeof(UInt32)) + { + OSDictionary * depDict; + kmod_info_t * depInfo; + + depDict = (OSDictionary *) prelinkedModules->getObject(OSReadBigInt32(prelink, idx) - 1); + str = OSDynamicCast(OSString, depDict->getObject(kModuleKey)); + depInfo = kmod_lookupbyname_locked(str->getCStringNoCopy()); + if (depInfo) + { + kr = kmod_retain(KMOD_PACK_IDS(id, depInfo->id)); + kfree((vm_offset_t) depInfo, sizeof(kmod_info_t)); + } else + IOLog("%s: NO DEP %s\n", kmod_info->name, str->getCStringNoCopy()); + } + dict->removeObject("OSBundlePrelink"); + + if (kmod_info->start) + kr = kmod_start_or_stop(kmod_info->id, 1, 0, 0); + } + + IODelete(togo, UInt32, prelinkedModules->getCount()); + + return kr; +} + +/********************************************************************* +* This is a function that IOCatalogue calls in order to load a kmod. +*********************************************************************/ + +static +kern_return_t kmod_load_from_cache_sym(const OSSymbol * kmod_name) +{ + OSArray * prelinkedModules = gIOPrelinkedModules; + kern_return_t result = KERN_FAILURE; + OSDictionary * dict; + OSObject * ident; + UInt32 idx; + + if (!gIOPrelinkedModules) + return KERN_FAILURE; + + for (idx = 0; + (dict = (OSDictionary *) prelinkedModules->getObject(idx)); + idx++) + { + if ((ident = dict->getObject(kModuleKey)) + && kmod_name->isEqualTo(ident)) + break; + } + if (dict) + { + if (kernelLinkerPresent && dict->getObject("OSBundleDefer")) + { + kmod_load_extension((char *) kmod_name->getCStringNoCopy()); + result = kIOReturnOffline; + } + else + result = start_prelink_module(idx); + } + + return result; +} + +extern "C" Boolean kmod_load_request(const char * moduleName, Boolean make_request) +{ + bool ret, cacheMiss = false; + kern_return_t kr; + const OSSymbol * sym = 0; + kmod_info_t * kmod_info; + + if (!moduleName) + return false; + + /* To make sure this operation completes even if a bad extension needs + * to be removed, take the kld lock for this whole block, spanning the + * kmod_load_function() and remove_startup_extension_function() calls. + */ + IOLockLock(gIOKLDLock); + do + { + // Is the module already loaded? + ret = (0 != (kmod_info = kmod_lookupbyname_locked((char *)moduleName))); + if (ret) { + kfree((vm_offset_t) kmod_info, sizeof(kmod_info_t)); + break; + } + sym = OSSymbol::withCString(moduleName); + if (!sym) { + ret = false; + break; + } + + kr = kmod_load_from_cache_sym(sym); + ret = (kIOReturnSuccess == kr); + cacheMiss = !ret; + if (ret || !make_request || (kr == kIOReturnOffline)) + break; + + // If the module hasn't been loaded, then load it. + if (!kmod_load_function) { + IOLog("IOCatalogue: %s cannot be loaded " + "(kmod load function not set).\n", + moduleName); + break; + } + + kr = kmod_load_function((char *)moduleName); + + if (ret != kIOReturnSuccess) { + IOLog("IOCatalogue: %s cannot be loaded.\n", moduleName); + + /* If the extension couldn't be loaded this time, + * make it unavailable so that no more requests are + * made in vain. This also enables other matching + * extensions to have a chance. + */ + if (kernelLinkerPresent && remove_startup_extension_function) { + (*remove_startup_extension_function)(moduleName); + } + ret = false; + + } else if (kernelLinkerPresent) { + // If kern linker is here, the driver is actually loaded, + // so return true. + ret = true; + + } else { + // kern linker isn't here, a request has been queued + // but the module isn't necessarily loaded yet, so stall. + ret = false; + } + } + while (false); + + IOLockUnlock(gIOKLDLock); + + if (sym) + { + IOLockLock(gIOCatalogLock); + gIOCatalogModuleRequests->setObject(sym); + if (cacheMiss) + gIOCatalogCacheMisses->setObject(sym); + IOLockUnlock(gIOCatalogLock); + } + + return ret; +} + +extern "C" kern_return_t kmod_unload_cache(void) +{ + OSArray * prelinkedModules = gIOPrelinkedModules; + kern_return_t result = KERN_FAILURE; + OSDictionary * dict; + UInt32 idx; + UInt32 * prelink; + OSData * data; + + if (!gIOPrelinkedModules) + return KERN_SUCCESS; + + IOLockLock(gIOKLDLock); + for (idx = 0; + (dict = (OSDictionary *) prelinkedModules->getObject(idx)); + idx++) + { + data = OSDynamicCast(OSData, dict->getObject("OSBundlePrelink")); + if (!data) + continue; + prelink = (UInt32 *) data->getBytesNoCopy(); + + kmod_info_t * kmod_info = (kmod_info_t *) OSReadBigInt32(prelink, 0); + vm_offset_t + virt = ml_static_ptovirt(kmod_info->address); + if( virt) { + ml_static_mfree(virt, kmod_info->size); + } + } + + gIOPrelinkedModules->release(); + gIOPrelinkedModules = 0; + + IOLockUnlock(gIOKLDLock); + + return result; +} + +extern "C" kern_return_t kmod_load_from_cache(const char * kmod_name) +{ + kern_return_t kr; + const OSSymbol * sym = OSSymbol::withCStringNoCopy(kmod_name); + + if (sym) + { + kr = kmod_load_from_cache_sym(sym); + sym->release(); + } + else + kr = kIOReturnNoMemory; + + return kr; +} + +/********************************************************************* +*********************************************************************/ static void UniqueProperties( OSDictionary * dict ) { @@ -126,9 +436,15 @@ void IOCatalogue::initialize( void ) errorString->release(); } - gIOClassKey = OSSymbol::withCStringNoCopy( kIOClassKey ); - gIOProbeScoreKey = OSSymbol::withCStringNoCopy( kIOProbeScoreKey ); - assert( array && gIOClassKey && gIOProbeScoreKey); + gIOClassKey = OSSymbol::withCStringNoCopy( kIOClassKey ); + gIOProbeScoreKey = OSSymbol::withCStringNoCopy( kIOProbeScoreKey ); + gIOModuleIdentifierKey = OSSymbol::withCStringNoCopy( kModuleKey ); + gIOCatalogModuleRequests = OSSet::withCapacity(16); + gIOCatalogCacheMisses = OSSet::withCapacity(16); + gIOCatalogROMMkexts = OSSet::withCapacity(4); + + assert( array && gIOClassKey && gIOProbeScoreKey + && gIOModuleIdentifierKey && gIOCatalogModuleRequests); gIOCatalogue = new IOCatalogue; assert(gIOCatalogue); @@ -152,8 +468,11 @@ bool IOCatalogue::init(OSArray * initArray) array->retain(); kernelTables = OSCollectionIterator::withCollection( array ); - lock = IOLockAlloc(); - kld_lock = IOLockAlloc(); + gIOCatalogLock = IOLockAlloc(); + gIOKLDLock = IOLockAlloc(); + + lock = gIOCatalogLock; + kld_lock = gIOKLDLock; kernelTables->reset(); while( (dict = (OSDictionary *) kernelTables->getNextObject())) { @@ -204,7 +523,7 @@ void IOCatalogue::ping( thread_call_param_t arg, thread_call_param_t) set = OSOrderedSet::withCapacity( 1 ); - IOTakeLock( &self->lock ); + IOLockLock( &self->lock ); for( newLimit = 0; newLimit < kDriversPerIter; newLimit++) { table = (OSDictionary *) self->array->getObject( @@ -226,7 +545,7 @@ void IOCatalogue::ping( thread_call_param_t arg, thread_call_param_t) hackLimit += newLimit; self->generation++; - IOUnlock( &self->lock ); + IOLockUnlock( &self->lock ); if( kDriversPerIter == newLimit) { AbsoluteTime deadline; @@ -248,7 +567,7 @@ OSOrderedSet * IOCatalogue::findDrivers( IOService * service, if( !set ) return( 0 ); - IOTakeLock( lock ); + IOLockLock( lock ); kernelTables->reset(); #if CATALOGTEST @@ -267,7 +586,7 @@ OSOrderedSet * IOCatalogue::findDrivers( IOService * service, *generationCount = getGenerationCount(); - IOUnlock( lock ); + IOLockUnlock( lock ); return( set ); } @@ -284,7 +603,7 @@ OSOrderedSet * IOCatalogue::findDrivers( OSDictionary * matching, set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, (void *)gIOProbeScoreKey ); - IOTakeLock( lock ); + IOLockLock( lock ); kernelTables->reset(); while ( (dict = (OSDictionary *) kernelTables->getNextObject()) ) { @@ -295,7 +614,7 @@ OSOrderedSet * IOCatalogue::findDrivers( OSDictionary * matching, set->setObject(dict); } *generationCount = getGenerationCount(); - IOUnlock( lock ); + IOLockUnlock( lock ); return set; } @@ -311,12 +630,13 @@ static void AddNewImports( OSOrderedSet * set, OSDictionary * dict ) // Add driver config tables to catalog and start matching process. bool IOCatalogue::addDrivers(OSArray * drivers, - bool doNubMatching = true ) + bool doNubMatching ) { OSCollectionIterator * iter; OSDictionary * dict; OSOrderedSet * set; OSArray * persons; + OSString * moduleName; bool ret; ret = true; @@ -335,44 +655,56 @@ bool IOCatalogue::addDrivers(OSArray * drivers, return false; } - IOTakeLock( lock ); - while ( (dict = (OSDictionary *) iter->getNextObject()) ) { - UInt count; - - UniqueProperties( dict ); - - // Add driver personality to catalogue. - count = array->getCount(); - while ( count-- ) { - OSDictionary * driver; - - // Be sure not to double up on personalities. - driver = (OSDictionary *)array->getObject(count); - - /* Unlike in other functions, this comparison must be exact! - * The catalogue must be able to contain personalities that - * are proper supersets of others. - * Do not compare just the properties present in one driver - * pesonality or the other. - */ - if ( dict->isEqualTo(driver) ) { - array->removeObject(count); - break; - } - } - - ret = array->setObject( dict ); - if ( !ret ) - break; - - AddNewImports( set, dict ); + IOLockLock( lock ); + while ( (dict = (OSDictionary *) iter->getNextObject()) ) + { + if ((moduleName = OSDynamicCast(OSString, dict->getObject("OSBundleModuleDemand")))) + { + IOLockUnlock( lock ); + ret = kmod_load_request(moduleName->getCStringNoCopy(), false); + IOLockLock( lock ); + ret = true; + } + else + { + SInt count; + + UniqueProperties( dict ); + + // Add driver personality to catalogue. + count = array->getCount(); + while ( count-- ) { + OSDictionary * driver; + + // Be sure not to double up on personalities. + driver = (OSDictionary *)array->getObject(count); + + /* Unlike in other functions, this comparison must be exact! + * The catalogue must be able to contain personalities that + * are proper supersets of others. + * Do not compare just the properties present in one driver + * pesonality or the other. + */ + if (dict->isEqualTo(driver)) + break; + } + if (count >= 0) + // its a dup + continue; + + ret = array->setObject( dict ); + if (!ret) + break; + + AddNewImports( set, dict ); + } } // Start device matching. - if ( doNubMatching && (set->getCount() > 0) ) { + if (doNubMatching && (set->getCount() > 0)) { IOService::catalogNewDrivers( set ); generation++; } - IOUnlock( lock ); + IOLockUnlock( lock ); set->release(); iter->release(); @@ -383,7 +715,7 @@ bool IOCatalogue::addDrivers(OSArray * drivers, // Remove drivers from the catalog which match the // properties in the matching dictionary. bool IOCatalogue::removeDrivers( OSDictionary * matching, - bool doNubMatching = true) + bool doNubMatching) { OSCollectionIterator * tables; OSDictionary * dict; @@ -414,7 +746,7 @@ bool IOCatalogue::removeDrivers( OSDictionary * matching, UniqueProperties( matching ); - IOTakeLock( lock ); + IOLockLock( lock ); kernelTables->reset(); arrayCopy->merge(array); array->flushCollection(); @@ -436,7 +768,7 @@ bool IOCatalogue::removeDrivers( OSDictionary * matching, IOService::catalogNewDrivers(set); generation++; } - IOUnlock( lock ); + IOLockUnlock( lock ); set->release(); tables->release(); @@ -457,67 +789,7 @@ bool IOCatalogue::isModuleLoaded( OSString * moduleName ) const bool IOCatalogue::isModuleLoaded( const char * moduleName ) const { - kmod_info_t * k_info; - - if ( !moduleName ) - return false; - - // Is the module already loaded? - k_info = kmod_lookupbyname_locked((char *)moduleName); - if ( !k_info ) { - kern_return_t ret; - - /* To make sure this operation completes even if a bad extension needs - * to be removed, take the kld lock for this whole block, spanning the - * kmod_load_function() and remove_startup_extension_function() calls. - */ - IOLockLock(kld_lock); - - // If the module hasn't been loaded, then load it. - if (kmod_load_function != 0) { - - ret = kmod_load_function((char *)moduleName); - - if ( ret != kIOReturnSuccess ) { - IOLog("IOCatalogue: %s cannot be loaded.\n", moduleName); - - /* If the extension couldn't be loaded this time, - * make it unavailable so that no more requests are - * made in vain. This also enables other matching - * extensions to have a chance. - */ - if (kernelLinkerPresent && remove_startup_extension_function) { - (*remove_startup_extension_function)(moduleName); - } - IOLockUnlock(kld_lock); - return false; - } else if (kernelLinkerPresent) { - // If kern linker is here, the driver is actually loaded, - // so return true. - IOLockUnlock(kld_lock); - return true; - } else { - // kern linker isn't here, a request has been queued - // but the module isn't necessarily loaded yet, so stall. - IOLockUnlock(kld_lock); - return false; - } - } else { - IOLog("IOCatalogue: %s cannot be loaded " - "(kmod load function not set).\n", - moduleName); - } - - IOLockUnlock(kld_lock); - return false; - } - - if (k_info) { - kfree(k_info, sizeof(kmod_info_t)); - } - - /* Lock wasn't taken if we get here. */ - return true; + return (kmod_load_request(moduleName, true)); } // Check to see if module has been loaded already. @@ -528,7 +800,7 @@ bool IOCatalogue::isModuleLoaded( OSDictionary * driver ) const if ( !driver ) return false; - moduleName = OSDynamicCast(OSString, driver->getObject(kModuleKey)); + moduleName = OSDynamicCast(OSString, driver->getObject(gIOModuleIdentifierKey)); if ( moduleName ) return isModuleLoaded(moduleName); @@ -544,7 +816,7 @@ void IOCatalogue::moduleHasLoaded( OSString * moduleName ) OSDictionary * dict; dict = OSDictionary::withCapacity(2); - dict->setObject(kModuleKey, moduleName); + dict->setObject(gIOModuleIdentifierKey, moduleName); startMatching(dict); dict->release(); } @@ -572,7 +844,7 @@ IOReturn IOCatalogue::unloadModule( OSString * moduleName ) const if ( k_info->stop && !((ret = k_info->stop(k_info, 0)) == kIOReturnSuccess) ) { - kfree(k_info, sizeof(kmod_info_t)); + kfree((vm_offset_t) k_info, sizeof(kmod_info_t)); return ret; } @@ -581,7 +853,7 @@ IOReturn IOCatalogue::unloadModule( OSString * moduleName ) const } if (k_info) { - kfree(k_info, sizeof(kmod_info_t)); + kfree((vm_offset_t) k_info, sizeof(kmod_info_t)); } return ret; @@ -670,10 +942,10 @@ IOReturn IOCatalogue::terminateDrivers( OSDictionary * matching ) IOReturn ret; ret = kIOReturnSuccess; - IOTakeLock( lock ); + IOLockLock( lock ); ret = _terminateDrivers(array, matching); kernelTables->reset(); - IOUnlock( lock ); + IOLockUnlock( lock ); return ret; } @@ -689,9 +961,9 @@ IOReturn IOCatalogue::terminateDriversForModule( if ( !dict ) return kIOReturnNoMemory; - dict->setObject(kModuleKey, moduleName); + dict->setObject(gIOModuleIdentifierKey, moduleName); - IOTakeLock( lock ); + IOLockLock( lock ); ret = _terminateDrivers(array, dict); kernelTables->reset(); @@ -702,7 +974,7 @@ IOReturn IOCatalogue::terminateDriversForModule( ret = unloadModule(moduleName); } - IOUnlock( lock ); + IOLockUnlock( lock ); dict->release(); @@ -739,7 +1011,7 @@ bool IOCatalogue::startMatching( OSDictionary * matching ) if ( !set ) return false; - IOTakeLock( lock ); + IOLockLock( lock ); kernelTables->reset(); while ( (dict = (OSDictionary *)kernelTables->getNextObject()) ) { @@ -756,7 +1028,7 @@ bool IOCatalogue::startMatching( OSDictionary * matching ) generation++; } - IOUnlock( lock ); + IOLockUnlock( lock ); set->release(); @@ -765,44 +1037,75 @@ bool IOCatalogue::startMatching( OSDictionary * matching ) void IOCatalogue::reset(void) { - OSArray * tables; - OSDictionary * entry; - unsigned int count; - IOLog("Resetting IOCatalogue.\n"); - - IOTakeLock( lock ); - tables = OSArray::withArray(array); - array->flushCollection(); - - count = tables->getCount(); - while ( count-- ) { - entry = (OSDictionary *)tables->getObject(count); - if ( entry && !entry->getObject(kModuleKey) ) { - array->setObject(entry); - } - } - - kernelTables->reset(); - IOUnlock( lock ); - - tables->release(); } bool IOCatalogue::serialize(OSSerialize * s) const { - bool ret; + bool ret; if ( !s ) return false; - IOTakeLock( lock ); + IOLockLock( lock ); + ret = array->serialize(s); - IOUnlock( lock ); + + IOLockUnlock( lock ); return ret; } +bool IOCatalogue::serializeData(IOOptionBits kind, OSSerialize * s) const +{ + kern_return_t kr = kIOReturnSuccess; + + switch ( kind ) + { + case kIOCatalogGetContents: + if (!serialize(s)) + kr = kIOReturnNoMemory; + break; + + case kIOCatalogGetModuleDemandList: + IOLockLock( lock ); + if (!gIOCatalogModuleRequests->serialize(s)) + kr = kIOReturnNoMemory; + IOLockUnlock( lock ); + break; + + case kIOCatalogGetCacheMissList: + IOLockLock( lock ); + if (!gIOCatalogCacheMisses->serialize(s)) + kr = kIOReturnNoMemory; + IOLockUnlock( lock ); + break; + + case kIOCatalogGetROMMkextList: + IOLockLock( lock ); + + if (!gIOCatalogROMMkexts || !gIOCatalogROMMkexts->getCount()) + kr = kIOReturnNoResources; + else if (!gIOCatalogROMMkexts->serialize(s)) + kr = kIOReturnNoMemory; + + if (gIOCatalogROMMkexts) + { + gIOCatalogROMMkexts->release(); + gIOCatalogROMMkexts = 0; + } + + IOLockUnlock( lock ); + break; + + default: + kr = kIOReturnBadArgument; + break; + } + + return kr; +} + bool IOCatalogue::recordStartupExtensions(void) { bool result = false; @@ -823,18 +1126,43 @@ bool IOCatalogue::recordStartupExtensions(void) { /********************************************************************* *********************************************************************/ -bool IOCatalogue::addExtensionsFromArchive(OSData * mkext) { +bool IOCatalogue::addExtensionsFromArchive(OSData * mkext) +{ + OSData * copyData; bool result = false; + bool prelinked; - IOLockLock(kld_lock); - if (kernelLinkerPresent && add_from_mkext_function) { - result = (*add_from_mkext_function)(mkext); - } else { - IOLog("Can't add startup extensions from archive; " - "kernel linker is not present.\n"); - result = false; + /* The mkext we've been handed (or the data it references) can go away, + * so we need to make a local copy to keep around as long as it might + * be needed. + */ + copyData = OSData::withData(mkext); + if (copyData) + { + struct section * infosect; + + infosect = getsectbyname("__PRELINK", "__info"); + prelinked = (infosect && infosect->addr && infosect->size); + + IOLockLock(kld_lock); + + if (gIOCatalogROMMkexts) + gIOCatalogROMMkexts->setObject(copyData); + + if (prelinked) { + result = true; + } else if (kernelLinkerPresent && add_from_mkext_function) { + result = (*add_from_mkext_function)(copyData); + } else { + IOLog("Can't add startup extensions from archive; " + "kernel linker is not present.\n"); + result = false; + } + + IOLockUnlock(kld_lock); + + copyData->release(); } - IOLockUnlock(kld_lock); return result; } @@ -847,7 +1175,6 @@ bool IOCatalogue::addExtensionsFromArchive(OSData * mkext) { *********************************************************************/ kern_return_t IOCatalogue::removeKernelLinker(void) { kern_return_t result = KERN_SUCCESS; - extern struct mach_header _mh_execute_header; struct segment_command * segment; char * dt_segment_name; void * segment_paddress; @@ -885,19 +1212,19 @@ kern_return_t IOCatalogue::removeKernelLinker(void) { * memory so that any cross-dependencies (not that there * should be any) are handled. */ - segment = getsegbynamefromheader( - &_mh_execute_header, "__KLD"); + segment = getsegbyname("__KLD"); if (!segment) { - IOLog("error removing kernel linker: can't find __KLD segment\n"); + IOLog("error removing kernel linker: can't find %s segment\n", + "__KLD"); result = KERN_FAILURE; goto finish; } OSRuntimeUnloadCPPForSegment(segment); - segment = getsegbynamefromheader( - &_mh_execute_header, "__LINKEDIT"); + segment = getsegbyname("__LINKEDIT"); if (!segment) { - IOLog("error removing kernel linker: can't find __LINKEDIT segment\n"); + IOLog("error removing kernel linker: can't find %s segment\n", + "__LINKEDIT"); result = KERN_FAILURE; goto finish; } @@ -918,6 +1245,16 @@ kern_return_t IOCatalogue::removeKernelLinker(void) { (int)segment_size); } + struct section * sect; + sect = getsectbyname("__PRELINK", "__symtab"); + if (sect && sect->addr) + { + vm_offset_t + virt = ml_static_ptovirt(sect->addr); + if( virt) { + ml_static_mfree(virt, sect->size); + } + } finish: diff --git a/iokit/Kernel/IOCommandGate.cpp b/iokit/Kernel/IOCommandGate.cpp index 73a7cc98e..6d24f8f29 100644 --- a/iokit/Kernel/IOCommandGate.cpp +++ b/iokit/Kernel/IOCommandGate.cpp @@ -41,26 +41,26 @@ OSMetaClassDefineReservedUnused(IOCommandGate, 7); bool IOCommandGate::checkForWork() { return false; } -bool IOCommandGate::init(OSObject *inOwner, Action inAction = 0) +bool IOCommandGate::init(OSObject *inOwner, Action inAction) { return super::init(inOwner, (IOEventSource::Action) inAction); } IOCommandGate * -IOCommandGate::commandGate(OSObject *inOwner, Action inAction = 0) +IOCommandGate::commandGate(OSObject *inOwner, Action inAction) { IOCommandGate *me = new IOCommandGate; if (me && !me->init(inOwner, inAction)) { - me->free(); + me->release(); return 0; } return me; } -IOReturn IOCommandGate::runCommand(void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0) +IOReturn IOCommandGate::runCommand(void *arg0, void *arg1, + void *arg2, void *arg3) { IOReturn res; @@ -82,8 +82,8 @@ IOReturn IOCommandGate::runCommand(void *arg0 = 0, void *arg1 = 0, } IOReturn IOCommandGate::runAction(Action inAction, - void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0) + void *arg0, void *arg1, + void *arg2, void *arg3) { IOReturn res; @@ -104,8 +104,8 @@ IOReturn IOCommandGate::runAction(Action inAction, return res; } -IOReturn IOCommandGate::attemptCommand(void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0) +IOReturn IOCommandGate::attemptCommand(void *arg0, void *arg1, + void *arg2, void *arg3) { IOReturn res; @@ -130,8 +130,8 @@ IOReturn IOCommandGate::attemptCommand(void *arg0 = 0, void *arg1 = 0, } IOReturn IOCommandGate::attemptAction(Action inAction, - void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0) + void *arg0, void *arg1, + void *arg2, void *arg3) { IOReturn res; @@ -156,8 +156,6 @@ IOReturn IOCommandGate::attemptAction(Action inAction, IOReturn IOCommandGate::commandSleep(void *event, UInt32 interruptible) { - IOReturn ret; - if (!workLoop->inGate()) return kIOReturnNotPermitted; diff --git a/iokit/Kernel/IOConditionLock.cpp b/iokit/Kernel/IOConditionLock.cpp index 680f0a6af..7d0916893 100644 --- a/iokit/Kernel/IOConditionLock.cpp +++ b/iokit/Kernel/IOConditionLock.cpp @@ -39,7 +39,7 @@ #define super OSObject OSDefineMetaClassAndStructors(IOConditionLock, OSObject) -bool IOConditionLock::initWithCondition(int inCondition, bool inIntr = true) +bool IOConditionLock::initWithCondition(int inCondition, bool inIntr) { if (!super::init()) return false; @@ -55,12 +55,12 @@ bool IOConditionLock::initWithCondition(int inCondition, bool inIntr = true) return cond_interlock && sleep_interlock; } -IOConditionLock *IOConditionLock::withCondition(int condition, bool intr = true) +IOConditionLock *IOConditionLock::withCondition(int condition, bool intr) { IOConditionLock *me = new IOConditionLock; if (me && !me->initWithCondition(condition, intr)) { - me->free(); + me->release(); return 0; } diff --git a/iokit/Kernel/IODataQueue.cpp b/iokit/Kernel/IODataQueue.cpp index e73acfaab..3c7c3df10 100644 --- a/iokit/Kernel/IODataQueue.cpp +++ b/iokit/Kernel/IODataQueue.cpp @@ -74,7 +74,7 @@ Boolean IODataQueue::initWithCapacity(UInt32 size) return false; } - dataQueue = (IODataQueueMemory *)IOMallocAligned(round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE), PAGE_SIZE); + dataQueue = (IODataQueueMemory *)IOMallocAligned(round_page_32(size + DATA_QUEUE_MEMORY_HEADER_SIZE), PAGE_SIZE); if (dataQueue == 0) { return false; } @@ -94,7 +94,7 @@ Boolean IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize) void IODataQueue::free() { if (dataQueue) { - IOFreeAligned(dataQueue, round_page(dataQueue->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE)); + IOFreeAligned(dataQueue, round_page_32(dataQueue->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE)); } super::free(); diff --git a/iokit/Kernel/IODeviceMemory.cpp b/iokit/Kernel/IODeviceMemory.cpp index 2aa9725e5..dbd731921 100644 --- a/iokit/Kernel/IODeviceMemory.cpp +++ b/iokit/Kernel/IODeviceMemory.cpp @@ -22,13 +22,6 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * 23 Nov 98 sdouglas created. - * 30 Sep 99 sdouglas, merged IODeviceMemory into IOMemoryDescriptor. - */ #include diff --git a/iokit/Kernel/IODeviceTreeSupport.cpp b/iokit/Kernel/IODeviceTreeSupport.cpp index 437dc4e58..e5b1a2172 100644 --- a/iokit/Kernel/IODeviceTreeSupport.cpp +++ b/iokit/Kernel/IODeviceTreeSupport.cpp @@ -24,6 +24,11 @@ */ /* * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas, created from IODeviceTreeBus.m, & MacOS exp mgr. + * 05 Apr 99 sdouglas, add interrupt mapping. + * */ #include @@ -35,7 +40,8 @@ #include #include -#include +#include + extern "C" { #include void DTInit( void * data ); @@ -94,8 +100,6 @@ IODeviceTreeAlloc( void * dtTop ) bool intMap; bool freeDT; - IOLog("IODeviceTreeSupport "); - gIODTPlane = IORegistryEntry::makePlane( kIODeviceTreePlane ); gIODTNameKey = OSSymbol::withCStringNoCopy( "name" ); @@ -146,7 +150,7 @@ IODeviceTreeAlloc( void * dtTop ) parent = MakeReferenceTable( (DTEntry)dtTop, freeDT ); - stack = OSArray::withObjects( & (const OSObject *) parent, 1, 10 ); + stack = OSArray::withObjects( (const OSObject **) &parent, 1, 10 ); DTCreateEntryIterator( (DTEntry)dtTop, &iter ); do { @@ -191,7 +195,7 @@ IODeviceTreeAlloc( void * dtTop ) // free original device tree DTInit(0); IODTFreeLoaderInfo( "DeviceTree", - (void *)dtMap[0], round_page(dtMap[1]) ); + (void *)dtMap[0], round_page_32(dtMap[1]) ); } // adjust tree @@ -230,8 +234,6 @@ IODeviceTreeAlloc( void * dtTop ) parent->setProperty( gIODTNWInterruptMappingKey, (OSObject *) gIODTNWInterruptMappingKey ); - IOLog("done\n"); - return( parent); } diff --git a/iokit/Kernel/IOEventSource.cpp b/iokit/Kernel/IOEventSource.cpp index 6ddbac302..bb260732d 100644 --- a/iokit/Kernel/IOEventSource.cpp +++ b/iokit/Kernel/IOEventSource.cpp @@ -57,7 +57,7 @@ void IOEventSource::wakeupGate(void *event, bool oneThread) { workLoop->wakeupGate(event, oneThread); } bool IOEventSource::init(OSObject *inOwner, - Action inAction = 0) + Action inAction) { if (!inOwner) return false; diff --git a/iokit/Kernel/IOFilterInterruptEventSource.cpp b/iokit/Kernel/IOFilterInterruptEventSource.cpp index 708b9c90c..6b516500f 100644 --- a/iokit/Kernel/IOFilterInterruptEventSource.cpp +++ b/iokit/Kernel/IOFilterInterruptEventSource.cpp @@ -77,9 +77,9 @@ OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 7); * Implement the call throughs for the private protection conversion */ bool IOFilterInterruptEventSource::init(OSObject *inOwner, - Action inAction = 0, - IOService *inProvider = 0, - int inIntIndex = 0) + Action inAction, + IOService *inProvider, + int inIntIndex) { return false; } @@ -98,7 +98,7 @@ IOFilterInterruptEventSource::init(OSObject *inOwner, Action inAction, Filter inFilterAction, IOService *inProvider, - int inIntIndex = 0) + int inIntIndex) { if ( !super::init(inOwner, inAction, inProvider, inIntIndex) ) return false; @@ -115,13 +115,13 @@ IOFilterInterruptEventSource *IOFilterInterruptEventSource Action inAction, Filter inFilterAction, IOService *inProvider, - int inIntIndex = 0) + int inIntIndex) { IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; if (me && !me->init(inOwner, inAction, inFilterAction, inProvider, inIntIndex)) { - me->free(); + me->release(); return 0; } diff --git a/iokit/Kernel/IOInterruptEventSource.cpp b/iokit/Kernel/IOInterruptEventSource.cpp index cbdd0aa6a..ddd980aef 100644 --- a/iokit/Kernel/IOInterruptEventSource.cpp +++ b/iokit/Kernel/IOInterruptEventSource.cpp @@ -75,9 +75,9 @@ OSMetaClassDefineReservedUnused(IOInterruptEventSource, 6); OSMetaClassDefineReservedUnused(IOInterruptEventSource, 7); bool IOInterruptEventSource::init(OSObject *inOwner, - Action inAction = 0, - IOService *inProvider = 0, - int inIntIndex = 0) + Action inAction, + IOService *inProvider, + int inIntIndex) { bool res = true; @@ -126,7 +126,7 @@ IOInterruptEventSource::interruptEventSource(OSObject *inOwner, IOInterruptEventSource *me = new IOInterruptEventSource; if (me && !me->init(inOwner, inAction, inProvider, inIntIndex)) { - me->free(); + me->release(); return 0; } diff --git a/iokit/Kernel/IOKitDebug.cpp b/iokit/Kernel/IOKitDebug.cpp index 54e70a468..c02798def 100644 --- a/iokit/Kernel/IOKitDebug.cpp +++ b/iokit/Kernel/IOKitDebug.cpp @@ -84,6 +84,61 @@ void IOPrintPlane( const IORegistryPlane * plane ) iter->release(); } +void dbugprintf(char *fmt, ...); +void db_dumpiojunk( const IORegistryPlane * plane ); + +void db_piokjunk(void) { + + dbugprintf("\nDT plane:\n"); + db_dumpiojunk( gIODTPlane ); + dbugprintf("\n\nService plane:\n"); + db_dumpiojunk( gIOServicePlane ); + dbugprintf("\n\n" + "ivar kalloc() 0x%08x\n" + "malloc() 0x%08x\n" + "containers kalloc() 0x%08x\n" + "IOMalloc() 0x%08x\n" + "----------------------------------------\n", + debug_ivars_size, + debug_malloc_size, + debug_container_malloc_size, + debug_iomalloc_size + ); + +} + + +void db_dumpiojunk( const IORegistryPlane * plane ) +{ + IORegistryEntry * next; + IORegistryIterator * iter; + OSOrderedSet * all; + char format[] = "%xxxs"; + IOService * service; + + iter = IORegistryIterator::iterateOver( plane ); + + all = iter->iterateAll(); + if( all) { + dbugprintf("Count %d\n", all->getCount() ); + all->release(); + } else dbugprintf("Empty\n"); + + iter->reset(); + while( (next = iter->getNextObjectRecursive())) { + sprintf( format + 1, "%ds", 2 * next->getDepth( plane )); + dbugprintf( format, ""); + dbugprintf( "%s", next->getName( plane )); + if( (next->getLocation( plane ))) + dbugprintf("@%s", next->getLocation( plane )); + dbugprintf(" getMetaClass()->getClassName()); + if( (service = OSDynamicCast(IOService, next))) + dbugprintf(", busy %ld", service->getBusyState()); + dbugprintf( ">\n"); + } + iter->release(); +} + void IOPrintMemory( void ) { @@ -151,7 +206,7 @@ bool IOKitDiagnostics::serialize(OSSerialize *s) const updateOffset( dict, debug_container_malloc_size, "Container allocation" ); updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" ); - dict->setObject( "Classes", OSMetaClass::getClassDictionary() ); + OSMetaClass::serializeClassDictionary(dict); ok = dict->serialize( s ); diff --git a/iokit/Kernel/IOLib.c b/iokit/Kernel/IOLib.c index 28584b349..41394e11c 100644 --- a/iokit/Kernel/IOLib.c +++ b/iokit/Kernel/IOLib.c @@ -39,10 +39,14 @@ #include #include +#include #include mach_timespec_t IOZeroTvalspec = { 0, 0 }; +extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * Global variables for use by iLogger @@ -61,13 +65,15 @@ void *_giDebugReserved2 = NULL; */ static IOThreadFunc threadArgFcn; -static void * threadArgArg; -static lock_t * threadArgLock; +static void * threadArgArg; +static lock_t * threadArgLock; +static queue_head_t gIOMallocContiguousEntries; +static mutex_t * gIOMallocContiguousEntriesLock; enum { kIOMaxPageableMaps = 16 }; enum { kIOPageableMapSize = 16 * 1024 * 1024 }; -enum { kIOPageableMaxMapSize = 64 * 1024 * 1024 }; +enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 }; typedef struct { vm_map_t map; @@ -82,6 +88,7 @@ static struct { mutex_t * lock; } gIOKitPageableSpace; +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void IOLibInit(void) { @@ -109,6 +116,9 @@ void IOLibInit(void) gIOKitPageableSpace.hint = 0; gIOKitPageableSpace.count = 1; + gIOMallocContiguousEntriesLock = mutex_alloc( 0 ); + queue_init( &gIOMallocContiguousEntries ); + libInitialized = true; } @@ -275,6 +285,14 @@ void IOFreeAligned(void * address, vm_size_t size) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +struct _IOMallocContiguousEntry +{ + void * virtual; + ppnum_t ioBase; + queue_chain_t link; +}; +typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry; + void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, IOPhysicalAddress * physicalAddress) { @@ -283,6 +301,7 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, vm_address_t allocationAddress; vm_size_t adjustedSize; vm_offset_t alignMask; + ppnum_t pagenum; if (size == 0) return 0; @@ -292,15 +311,24 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, alignMask = alignment - 1; adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t); - if (adjustedSize >= page_size) { - - kr = kmem_alloc_contig(kernel_map, &address, size, - alignMask, 0); + if (adjustedSize >= page_size) + { + adjustedSize = size; + if (adjustedSize > page_size) + { + kr = kmem_alloc_contig(kernel_map, &address, size, + alignMask, 0); + } + else + { + kr = kernel_memory_allocate(kernel_map, &address, + size, alignMask, 0); + } if (KERN_SUCCESS != kr) address = 0; - - } else { - + } + else + { adjustedSize += alignMask; allocationAddress = (vm_address_t) kalloc(adjustedSize); @@ -310,8 +338,8 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, + (sizeof(vm_size_t) + sizeof(vm_address_t))) & (~alignMask); - if (atop(address) != atop(address + size - 1)) - address = round_page(address); + if (atop_32(address) != atop_32(address + size - 1)) + address = round_page_32(address); *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t))) = adjustedSize; @@ -321,9 +349,49 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, address = 0; } - if( address && physicalAddress) - *physicalAddress = (IOPhysicalAddress) pmap_extract( kernel_pmap, - address ); + /* Do we want a physical address? */ + if (address && physicalAddress) + { + do + { + /* Get the physical page */ + pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address); + if(pagenum) + { + IOByteCount offset; + ppnum_t base; + + base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT); + if (base) + { + _IOMallocContiguousEntry * + entry = IONew(_IOMallocContiguousEntry, 1); + if (!entry) + { + IOFreeContiguous((void *) address, size); + address = 0; + break; + } + entry->virtual = (void *) address; + entry->ioBase = base; + mutex_lock(gIOMallocContiguousEntriesLock); + queue_enter( &gIOMallocContiguousEntries, entry, + _IOMallocContiguousEntry *, link ); + mutex_unlock(gIOMallocContiguousEntriesLock); + + *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK)); + for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++) + IOMapperInsertPage( base, offset, pagenum ); + } + else + *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK)); + } + else + /* Did not find, return 0 */ + *physicalAddress = (IOPhysicalAddress) 0; + } + while (false); + } assert(0 == (address & alignMask)); @@ -337,14 +405,35 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, void IOFreeContiguous(void * address, vm_size_t size) { - vm_address_t allocationAddress; - vm_size_t adjustedSize; + vm_address_t allocationAddress; + vm_size_t adjustedSize; + _IOMallocContiguousEntry * entry; + ppnum_t base = 0; if( !address) return; assert(size); + mutex_lock(gIOMallocContiguousEntriesLock); + queue_iterate( &gIOMallocContiguousEntries, entry, + _IOMallocContiguousEntry *, link ) + { + if( entry->virtual == address ) { + base = entry->ioBase; + queue_remove( &gIOMallocContiguousEntries, entry, + _IOMallocContiguousEntry *, link ); + break; + } + } + mutex_unlock(gIOMallocContiguousEntriesLock); + + if (base) + { + IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT); + IODelete(entry, _IOMallocContiguousEntry, 1); + } + adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t); if (adjustedSize >= page_size) { @@ -469,7 +558,7 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment) #if IOALLOCDEBUG if( ref.address) - debug_iomalloc_size += round_page(size); + debug_iomalloc_size += round_page_32(size); #endif return( (void *) ref.address ); @@ -502,7 +591,7 @@ void IOFreePageable(void * address, vm_size_t size) kmem_free( map, (vm_offset_t) address, size); #if IOALLOCDEBUG - debug_iomalloc_size -= round_page(size); + debug_iomalloc_size -= round_page_32(size); #endif } @@ -510,30 +599,34 @@ void IOFreePageable(void * address, vm_size_t size) extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa, vm_size_t length, unsigned int options); +extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length); IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, IOByteCount length, IOOptionBits cacheMode ) { IOReturn ret = kIOReturnSuccess; - vm_offset_t physAddr; + ppnum_t pagenum; if( task != kernel_task) return( kIOReturnUnsupported ); - length = round_page(address + length) - trunc_page( address ); - address = trunc_page( address ); + length = round_page_32(address + length) - trunc_page_32( address ); + address = trunc_page_32( address ); // make map mode cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask; while( (kIOReturnSuccess == ret) && (length > 0) ) { - physAddr = pmap_extract( kernel_pmap, address ); - if( physAddr) - ret = IOMapPages( get_task_map(task), address, physAddr, page_size, cacheMode ); - else + // Get the physical page number + pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address); + if( pagenum) { + ret = IOUnmapPages( get_task_map(task), address, page_size ); + ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode ); + } else ret = kIOReturnVMError; + address += page_size; length -= page_size; } @@ -548,7 +641,7 @@ IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address, return( kIOReturnUnsupported ); #if __ppc__ - flush_dcache( (vm_offset_t) address, (unsigned) length, false ); + flush_dcache64( (addr64_t) address, (unsigned) length, false ); #endif return( kIOReturnSuccess ); diff --git a/iokit/Kernel/IOMapper.cpp b/iokit/Kernel/IOMapper.cpp new file mode 100644 index 000000000..c1795da6c --- /dev/null +++ b/iokit/Kernel/IOMapper.cpp @@ -0,0 +1,389 @@ +/* + * Copyright (c) 1998-2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +#define super IOService +OSDefineMetaClassAndAbstractStructors(IOMapper, IOService); + +OSMetaClassDefineReservedUnused(IOMapper, 0); +OSMetaClassDefineReservedUnused(IOMapper, 1); +OSMetaClassDefineReservedUnused(IOMapper, 2); +OSMetaClassDefineReservedUnused(IOMapper, 3); +OSMetaClassDefineReservedUnused(IOMapper, 4); +OSMetaClassDefineReservedUnused(IOMapper, 5); +OSMetaClassDefineReservedUnused(IOMapper, 6); +OSMetaClassDefineReservedUnused(IOMapper, 7); +OSMetaClassDefineReservedUnused(IOMapper, 8); +OSMetaClassDefineReservedUnused(IOMapper, 9); +OSMetaClassDefineReservedUnused(IOMapper, 10); +OSMetaClassDefineReservedUnused(IOMapper, 11); +OSMetaClassDefineReservedUnused(IOMapper, 12); +OSMetaClassDefineReservedUnused(IOMapper, 13); +OSMetaClassDefineReservedUnused(IOMapper, 14); +OSMetaClassDefineReservedUnused(IOMapper, 15); + +IOMapper * IOMapper::gSystem = (IOMapper *) IOMapper::kUnknown; + +class IOMapperLock { + IOLock *fWaitLock; +public: + IOMapperLock() { fWaitLock = IOLockAlloc(); }; + ~IOMapperLock() { IOLockFree(fWaitLock); }; + + void lock() { IOLockLock(fWaitLock); }; + void unlock() { IOLockUnlock(fWaitLock); }; + void sleep(void *event) { IOLockSleep(fWaitLock, event, THREAD_UNINT); }; + void wakeup(void *event) { IOLockWakeup(fWaitLock, event, false); }; +}; + +static IOMapperLock sMapperLock; + +bool IOMapper::start(IOService *provider) +{ + if (!super::start(provider)) + return false; + + if (!initHardware(provider)) + return false; + + if (fIsSystem) { + sMapperLock.lock(); + IOMapper::gSystem = this; + sMapperLock.wakeup(&IOMapper::gSystem); + sMapperLock.unlock(); + } + + return true; +} + +bool IOMapper::allocTable(IOByteCount size) +{ + assert(!fTable); + + fTableSize = size; + fTableHandle = NewARTTable(size, &fTable, &fTablePhys); + return fTableHandle != 0; +} + +void IOMapper::free() +{ + if (fTableHandle) { + FreeARTTable(fTableHandle, fTableSize); + fTableHandle = 0; + } + + super::free(); +} + +void IOMapper::setMapperRequired(bool hasMapper) +{ + if (hasMapper) + IOMapper::gSystem = (IOMapper *) kHasMapper; + else { + sMapperLock.lock(); + IOMapper::gSystem = (IOMapper *) kNoMapper; + sMapperLock.unlock(); + sMapperLock.wakeup(&IOMapper::gSystem); + } +} + +void IOMapper::waitForSystemMapper() +{ + sMapperLock.lock(); + while ((vm_address_t) IOMapper::gSystem & kWaitMask) + sMapperLock.sleep(&IOMapper::gSystem); + sMapperLock.unlock(); +} + +void IOMapper::iovmInsert(ppnum_t addr, IOItemCount offset, + ppnum_t *pageList, IOItemCount pageCount) +{ + while (pageCount--) + iovmInsert(addr, offset++, *pageList++); +} + +void IOMapper::iovmInsert(ppnum_t addr, IOItemCount offset, + upl_page_info_t *pageList, IOItemCount pageCount) +{ + for (IOItemCount i = 0; i < pageCount; i++) + iovmInsert(addr, offset + i, pageList[i].phys_addr); +} + +struct ARTTableData { + void *v; + upl_t u[0]; +}; +#define getARTDataP(data) ((ARTTableData *) (data)->getBytesNoCopy()) + +OSData * +IOMapper::NewARTTable(IOByteCount size, + void ** virtAddrP, ppnum_t *physAddrP) +{ + OSData *ret; + kern_return_t kr; + vm_address_t startUpl; + ARTTableData *dataP; + unsigned int dataSize; + upl_page_info_t *pl = 0; + + // Each UPL can deal with about one meg at the moment + size = round_page_32(size); + dataSize = sizeof(ARTTableData) + sizeof(upl_t) * size / (1024 * 1024); + ret = OSData::withCapacity(dataSize); + if (!ret) + return 0; + + // Append 0's to the buffer, in-other-words reset to nulls. + ret->appendBytes(NULL, sizeof(ARTTableData)); + dataP = getARTDataP(ret); + + kr = kmem_alloc_contig(kernel_map, &startUpl, size, PAGE_MASK, 0); + if (kr) + return 0; + + dataP->v = (void *) startUpl; + + do { + upl_t iopl; + int upl_flags = UPL_SET_INTERNAL | UPL_SET_LITE + | UPL_SET_IO_WIRE | UPL_COPYOUT_FROM; + vm_size_t iopl_size = size; + + kr = vm_map_get_upl(kernel_map, + startUpl, + &iopl_size, + &iopl, + 0, + 0, + &upl_flags, + 0); + if (kr) { + panic("IOMapper:vm_map_get_upl returned 0x%x\n"); + goto bail; + } + + if (!ret->appendBytes(&iopl, sizeof(upl_t))) + goto bail; + + startUpl += iopl_size; + size -= iopl_size; + } while(size); + + // Need to re-establish the dataP as the OSData may have grown. + dataP = getARTDataP(ret); + + // Now grab the page entry of the first page and get its phys addr + pl = UPL_GET_INTERNAL_PAGE_LIST(dataP->u[0]); + *physAddrP = pl->phys_addr; + *virtAddrP = dataP->v; + + return ret; + +bail: + FreeARTTable(ret, size); + return 0; +} + +void IOMapper::FreeARTTable(OSData *artHandle, IOByteCount size) +{ + assert(artHandle); + + ARTTableData *dataP = getARTDataP(artHandle); + + int numupls = ((artHandle->getLength() - sizeof(*dataP)) / sizeof(upl_t)); + for (int i = 0; i < numupls; i++) + kernel_upl_abort(dataP->u[i], 0); + + if (dataP->v) { + size = round_page_32(size); + kmem_free(kernel_map, (vm_address_t) dataP->v, size); + } + artHandle->release(); +} + +__BEGIN_DECLS + +// These are C accessors to the system mapper for non-IOKit clients +ppnum_t IOMapperIOVMAlloc(unsigned pages) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) + return IOMapper::gSystem->iovmAlloc((IOItemCount) pages); + else + return 0; +} + +void IOMapperIOVMFree(ppnum_t addr, unsigned pages) +{ + if (IOMapper::gSystem) + IOMapper::gSystem->iovmFree(addr, (IOItemCount) pages); +} + +ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page) +{ + if (IOMapper::gSystem) { + IOMapper::gSystem->iovmInsert(addr, (IOItemCount) offset, page); + return addr + offset; + } + else + return page; +} + +void IOMapperInsertPPNPages(ppnum_t addr, unsigned offset, + ppnum_t *pageList, unsigned pageCount) +{ + if (!IOMapper::gSystem) + panic("IOMapperInsertPPNPages no system mapper"); + else + assert(!((vm_address_t) IOMapper::gSystem & 3)); + + IOMapper::gSystem-> + iovmInsert(addr, (IOItemCount) offset, pageList, pageCount); +} + +void IOMapperInsertUPLPages(ppnum_t addr, unsigned offset, + upl_page_info_t *pageList, unsigned pageCount) +{ + if (!IOMapper::gSystem) + panic("IOMapperInsertUPLPages no system mapper"); + else + assert(!((vm_address_t) IOMapper::gSystem & 3)); + + IOMapper::gSystem->iovmInsert(addr, + (IOItemCount) offset, + pageList, + (IOItemCount) pageCount); +} + +///////////////////////////////////////////////////////////////////////////// +// +// +// IOLib.h APIs +// +// +///////////////////////////////////////////////////////////////////////////// + +#include + +UInt8 IOMappedRead8(IOPhysicalAddress address) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapAddr(address); + return (UInt8) ml_phys_read_byte_64(addr); + } + else + return (UInt8) ml_phys_read_byte((vm_offset_t) address); +} + +UInt16 IOMappedRead16(IOPhysicalAddress address) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapAddr(address); + return (UInt16) ml_phys_read_half_64(addr); + } + else + return (UInt16) ml_phys_read_half((vm_offset_t) address); +} + +UInt32 IOMappedRead32(IOPhysicalAddress address) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapAddr(address); + return (UInt32) ml_phys_read_word_64(addr); + } + else + return (UInt32) ml_phys_read_word((vm_offset_t) address); +} + +UInt64 IOMappedRead64(IOPhysicalAddress address) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapAddr(address); + return (UInt64) ml_phys_read_double_64(addr); + } + else + return (UInt64) ml_phys_read_double((vm_offset_t) address); +} + +void IOMappedWrite8(IOPhysicalAddress address, UInt8 value) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapAddr(address); + ml_phys_write_byte_64(addr, value); + } + else + ml_phys_write_byte((vm_offset_t) address, value); +} + +void IOMappedWrite16(IOPhysicalAddress address, UInt16 value) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapAddr(address); + ml_phys_write_half_64(addr, value); + } + else + ml_phys_write_half((vm_offset_t) address, value); +} + +void IOMappedWrite32(IOPhysicalAddress address, UInt32 value) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapAddr(address); + ml_phys_write_word_64(addr, value); + } + else + ml_phys_write_word((vm_offset_t) address, value); +} + +void IOMappedWrite64(IOPhysicalAddress address, UInt64 value) +{ + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapAddr(address); + ml_phys_write_double_64(addr, value); + } + else + ml_phys_write_double((vm_offset_t) address, value); +} + +__END_DECLS diff --git a/iokit/Kernel/IOMemoryCursor.cpp b/iokit/Kernel/IOMemoryCursor.cpp index 1c4152321..27a095735 100644 --- a/iokit/Kernel/IOMemoryCursor.cpp +++ b/iokit/Kernel/IOMemoryCursor.cpp @@ -64,6 +64,18 @@ IOMemoryCursor::initWithSpecification(SegmentFunction inSegFunc, IOPhysicalLength inMaxTransferSize, IOPhysicalLength inAlignment) { +// @@@ gvdl: Remove me +#if 1 +static UInt sMaxDBDMASegment; +if (!sMaxDBDMASegment) { + sMaxDBDMASegment = (UInt) -1; + if (PE_parse_boot_arg("mseg", &sMaxDBDMASegment)) + IOLog("Setting MaxDBDMASegment to %d\n", sMaxDBDMASegment); +} + +if (inMaxSegmentSize > sMaxDBDMASegment) inMaxSegmentSize = sMaxDBDMASegment; +#endif + if (!super::init()) return false; @@ -107,22 +119,66 @@ IOMemoryCursor::genPhysicalSegments(IOMemoryDescriptor *inDescriptor, * If we finished cleanly return number of segments found * and update the position in the descriptor. */ + PhysicalSegment curSeg = { 0 }; UInt curSegIndex = 0; UInt curTransferSize = 0; - PhysicalSegment seg; + IOByteCount inDescriptorLength = inDescriptor->getLength(); + PhysicalSegment seg = { 0 }; - while ((curSegIndex < inMaxSegments) - && (curTransferSize < inMaxTransferSize) - && (seg.location = inDescriptor->getPhysicalSegment( - fromPosition + curTransferSize, &seg.length))) + while ((seg.location) || (fromPosition < inDescriptorLength)) { - assert(seg.length); - seg.length = min(inMaxTransferSize-curTransferSize, - (min(seg.length, maxSegmentSize))); - (*outSeg)(seg, inSegments, curSegIndex++); - curTransferSize += seg.length; + if (!seg.location) + { + seg.location = inDescriptor->getPhysicalSegment( + fromPosition, &seg.length); + assert(seg.location); + assert(seg.length); + fromPosition += seg.length; + } + + if (!curSeg.location) + { + curTransferSize += seg.length; + curSeg = seg; + seg.location = 0; + } + else if ((curSeg.location + curSeg.length == seg.location)) + { + curTransferSize += seg.length; + curSeg.length += seg.length; + seg.location = 0; + } + + if (!seg.location) + { + if ((curSeg.length > maxSegmentSize)) + { + seg.location = curSeg.location + maxSegmentSize; + seg.length = curSeg.length - maxSegmentSize; + curTransferSize -= seg.length; + curSeg.length -= seg.length; + } + + if ((curTransferSize >= inMaxTransferSize)) + { + curSeg.length -= curTransferSize - inMaxTransferSize; + curTransferSize = inMaxTransferSize; + break; + } + } + + if (seg.location) + { + if ((curSegIndex + 1 == inMaxSegments)) + break; + (*outSeg)(curSeg, inSegments, curSegIndex++); + curSeg.location = 0; + } } + if (curSeg.location) + (*outSeg)(curSeg, inSegments, curSegIndex++); + if (outTransferSize) *outTransferSize = curTransferSize; diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index 8dc3f43c2..f36d55244 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -28,11 +28,15 @@ * HISTORY * */ +// 45678901234567890123456789012345678901234567890123456789012345678901234567890 +#include #include #include #include #include +#include +#include #include @@ -45,15 +49,17 @@ __BEGIN_DECLS #include +#include #include -void bcopy_phys(char *from, char *to, int size); -void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, - vm_prot_t prot, unsigned int flags, boolean_t wired); + #ifndef i386 -struct phys_entry *pmap_find_physentry(vm_offset_t pa); +struct phys_entry *pmap_find_physentry(ppnum_t pa); #endif +extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); void ipc_port_release_send(ipc_port_t port); -vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset); + +/* Copy between a physical page and a virtual address in the given vm_map */ +kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which); memory_object_t device_pager_setup( @@ -68,8 +74,17 @@ kern_return_t device_pager_populate_object( memory_object_t pager, vm_object_offset_t offset, - vm_offset_t phys_addr, + ppnum_t phys_addr, vm_size_t size); +kern_return_t +memory_object_iopl_request( + ipc_port_t port, + memory_object_offset_t offset, + vm_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags); /* * Page fault handling based on vm_map (or entries therein) @@ -83,26 +98,28 @@ extern kern_return_t vm_fault( pmap_t caller_pmap, vm_offset_t caller_pmap_addr); -__END_DECLS +unsigned int IOTranslateCacheBits(struct phys_entry *pp); -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +vm_map_t IOPageableMapForAddress( vm_address_t address ); -OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject ) +typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref); -#define super IOMemoryDescriptor +kern_return_t IOIteratePageableMaps(vm_size_t size, + IOIteratePageableMapsCallback callback, void * ref); +__END_DECLS -OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) +#define kIOMaximumMappedIOByteCount (512*1024*1024) -extern "C" { +static IOMapper * gIOSystemMapper; +static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount); -vm_map_t IOPageableMapForAddress( vm_address_t address ); +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref); +OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject ) -kern_return_t IOIteratePageableMaps(vm_size_t size, - IOIteratePageableMapsCallback callback, void * ref); +#define super IOMemoryDescriptor -} +OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -116,37 +133,7 @@ static IORecursiveLock * gIOMemoryLock; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_address_t address ) -{ - if( (task == kernel_task) && (kIOMemoryRequiresWire & _flags)) - return( IOPageableMapForAddress( address ) ); - else - return( get_task_map( task )); -} - -inline vm_offset_t pmap_extract_safe(task_t task, vm_offset_t va) -{ - vm_offset_t pa = pmap_extract(get_task_pmap(task), va); - - if ( pa == 0 ) - { - pa = vm_map_get_phys_page(get_task_map(task), trunc_page(va)); - if ( pa ) pa += va - trunc_page(va); - } - - return pa; -} - -inline void bcopy_phys_safe(char * from, char * to, int size) -{ - boolean_t enabled = ml_set_interrupts_enabled(FALSE); - - bcopy_phys(from, to, size); - - ml_set_interrupts_enabled(enabled); -} - -#define next_page(a) ( trunc_page(a) + page_size ) +#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE ) extern "C" { @@ -209,13 +196,23 @@ kern_return_t device_close( */ IOMemoryDescriptor * IOMemoryDescriptor::withAddress(void * address, - IOByteCount withLength, - IODirection withDirection) + IOByteCount length, + IODirection direction) +{ + return IOMemoryDescriptor:: + withAddress((vm_address_t) address, length, direction, kernel_task); +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withAddress(vm_address_t address, + IOByteCount length, + IODirection direction, + task_t task) { IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; if (that) { - if (that->initWithAddress(address, withLength, withDirection)) + if (that->initWithAddress(address, length, direction, task)) return that; that->release(); @@ -224,15 +221,32 @@ IOMemoryDescriptor::withAddress(void * address, } IOMemoryDescriptor * -IOMemoryDescriptor::withAddress(vm_address_t address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) +IOMemoryDescriptor::withPhysicalAddress( + IOPhysicalAddress address, + IOByteCount length, + IODirection direction ) +{ + IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; + if (self + && !self->initWithPhysicalAddress(address, length, direction)) { + self->release(); + return 0; + } + + return self; +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection direction, + task_t task, + bool asReference) { IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; if (that) { - if (that->initWithAddress(address, withLength, withDirection, withTask)) + if (that->initWithRanges(ranges, withCount, direction, task, asReference)) return that; that->release(); @@ -240,16 +254,6 @@ IOMemoryDescriptor::withAddress(vm_address_t address, return 0; } -IOMemoryDescriptor * -IOMemoryDescriptor::withPhysicalAddress( - IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ) -{ - return( IOMemoryDescriptor::withAddress( address, withLength, - withDirection, (task_t) 0 )); -} - /* * withRanges: @@ -260,33 +264,48 @@ IOMemoryDescriptor::withPhysicalAddress( * Passing the ranges as a reference will avoid an extra allocation. */ IOMemoryDescriptor * -IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) +IOMemoryDescriptor::withOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits opts, + IOMapper * mapper) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) - { - if (that->initWithRanges(ranges, withCount, withDirection, withTask, asReference)) - return that; + IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; - that->release(); + if (self + && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) + { + self->release(); + return 0; } + + return self; +} + +// Can't leave abstract but this should never be used directly, +bool IOMemoryDescriptor::initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper) +{ + // @@@ gvdl: Should I panic? + panic("IOMD::initWithOptions called\n"); return 0; } IOMemoryDescriptor * IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, - IODirection withDirection, - bool asReference = false) + IODirection direction, + bool asReference) { IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; if (that) { - if (that->initWithPhysicalRanges(ranges, withCount, withDirection, asReference)) + if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) return that; that->release(); @@ -298,15 +317,15 @@ IOMemoryDescriptor * IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, IOByteCount offset, IOByteCount length, - IODirection withDirection) + IODirection direction) { - IOSubMemoryDescriptor * that = new IOSubMemoryDescriptor; + IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor; - if (that && !that->initSubRange(of, offset, length, withDirection)) { - that->release(); - that = 0; + if (self && !self->initSubRange(of, offset, length, direction)) { + self->release(); + self = 0; } - return that; + return self; } /* @@ -356,29 +375,123 @@ IOGeneralMemoryDescriptor::initWithPhysicalAddress( return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); } +bool +IOGeneralMemoryDescriptor::initWithPhysicalRanges( + IOPhysicalRange * ranges, + UInt32 count, + IODirection direction, + bool reference) +{ + IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; + + if (reference) + mdOpts |= kIOMemoryAsReference; + + return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); +} + +bool +IOGeneralMemoryDescriptor::initWithRanges( + IOVirtualRange * ranges, + UInt32 count, + IODirection direction, + task_t task, + bool reference) +{ + IOOptionBits mdOpts = direction; + + if (reference) + mdOpts |= kIOMemoryAsReference; + + if (task) { + mdOpts |= kIOMemoryTypeVirtual; + if (task == kernel_task) + mdOpts |= kIOMemoryAutoPrepare; + } + else + mdOpts |= kIOMemoryTypePhysical; + + // @@@ gvdl: Need to remove this + // Auto-prepare if this is a kernel memory descriptor as very few + // clients bother to prepare() kernel memory. + // But it has been enforced so what are you going to do? + + return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); +} + /* - * initWithRanges: + * initWithOptions: * - * Initialize an IOMemoryDescriptor. The buffer is made up of several - * virtual address ranges, from a given task + * IOMemoryDescriptor. The buffer is made up of several virtual address ranges, + * from a given task or several physical ranges or finally an UPL from the ubc + * system. * * Passing the ranges as a reference will avoid an extra allocation. * - * An IOMemoryDescriptor can be re-used by calling initWithAddress or - * initWithRanges again on an existing instance -- note this behavior - * is not commonly supported in other I/O Kit classes, although it is - * supported here. + * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an + * existing instance -- note this behavior is not commonly supported in other + * I/O Kit classes, although it is supported here. */ + +enum ioPLBlockFlags { + kIOPLOnDevice = 0x00000001, + kIOPLExternUPL = 0x00000002, +}; + +struct ioPLBlock { + upl_t fIOPL; + vm_address_t fIOMDOffset; // The offset of this iopl in descriptor + vm_offset_t fPageInfo; // Pointer to page list or index into it + ppnum_t fMappedBase; // Page number of first page in this iopl + unsigned int fPageOffset; // Offset within first page of iopl + unsigned int fFlags; // Flags +}; + +struct ioGMDData { + IOMapper *fMapper; + unsigned int fPageCnt; + upl_page_info_t fPageList[0]; // @@@ gvdl need to get rid of this + // should be able to use upl directly + ioPLBlock fBlocks[0]; +}; + +#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) +#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt])) +#define getNumIOPL(d,len) \ + ((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) +#define getPageList(d) (&(d->fPageList[0])) +#define computeDataSize(p, u) \ + (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) + bool -IOGeneralMemoryDescriptor::initWithRanges( - IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) -{ - assert(ranges); - assert(withCount); +IOGeneralMemoryDescriptor::initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper) +{ + + switch (options & kIOMemoryTypeMask) { + case kIOMemoryTypeVirtual: + assert(task); + if (!task) + return false; + else + break; + + case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task + mapper = kIOMapperNone; + case kIOMemoryTypeUPL: + assert(!task); + break; + default: +panic("IOGMD::iWO(): bad type"); // @@@ gvdl: for testing + return false; /* bad argument */ + } + + assert(buffers); + assert(count); /* * We can check the _initialized instance variable before having ever set @@ -386,20 +499,12 @@ IOGeneralMemoryDescriptor::initWithRanges( * variables are zeroed on an object's allocation. */ - if (_initialized == false) - { - if (super::init() == false) return false; - _initialized = true; - } - else - { + if (_initialized) { /* * An existing memory descriptor is being retargeted to point to * somewhere else. Clean up our present state. */ - assert(_wireCount == 0); - while (_wireCount) complete(); if (_kernPtrAligned) @@ -407,54 +512,173 @@ IOGeneralMemoryDescriptor::initWithRanges( if (_ranges.v && _rangesIsAllocated) IODelete(_ranges.v, IOVirtualRange, _rangesCount); } + else { + if (!super::init()) + return false; + _initialized = true; + } - /* - * Initialize the memory descriptor. - */ + // Grab the appropriate mapper + if (mapper == kIOMapperNone) + mapper = 0; // No Mapper + else if (!mapper) { + IOMapper::checkForSystemMapper(); + gIOSystemMapper = mapper = IOMapper::gSystem; + } - _ranges.v = 0; - _rangesCount = withCount; - _rangesIsAllocated = asReference ? false : true; - _direction = withDirection; - _length = 0; - _task = withTask; + _flags = options; + _task = task; + + // DEPRECATED variable initialisation + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); _position = 0; - _positionAtIndex = 0; - _positionAtOffset = 0; _kernPtrAligned = 0; _cachedPhysicalAddress = 0; _cachedVirtualAddress = 0; - _flags = 0; - if (withTask && (withTask != kernel_task)) - _flags |= kIOMemoryRequiresWire; + if ( (options & kIOMemoryTypeMask) == kIOMemoryTypeUPL) { - if (asReference) - _ranges.v = ranges; - else - { - _ranges.v = IONew(IOVirtualRange, withCount); - if (_ranges.v == 0) return false; - bcopy(/* from */ ranges, _ranges.v, withCount * sizeof(IOVirtualRange)); - } + ioGMDData *dataP; + unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); - for (unsigned index = 0; index < _rangesCount; index++) - { - _length += _ranges.v[index].length; + if (!_memoryEntries) { + _memoryEntries = OSData::withCapacity(dataSize); + if (!_memoryEntries) + return false; + } + else if (!_memoryEntries->initWithCapacity(dataSize)) + return false; + + _memoryEntries->appendBytes(0, sizeof(ioGMDData)); + dataP = getDataP(_memoryEntries); + dataP->fMapper = mapper; + dataP->fPageCnt = 0; + + _wireCount++; // UPLs start out life wired + + _length = count; + _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); + + ioPLBlock iopl; + upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers); + + iopl.fIOPL = (upl_t) buffers; + // Set the flag kIOPLOnDevice convieniently equal to 1 + iopl.fFlags = pageList->device | kIOPLExternUPL; + iopl.fIOMDOffset = 0; + if (!pageList->device) { + // @@@ gvdl: Ask JoeS are the pages contiguious with the list? + // or there a chance that we may be inserting 0 phys_addrs? + // Pre-compute the offset into the UPL's page list + pageList = &pageList[atop_32(offset)]; + offset &= PAGE_MASK; + if (mapper) { + iopl.fMappedBase = mapper->iovmAlloc(_pages); + mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages); + } + else + iopl.fMappedBase = 0; + } + else + iopl.fMappedBase = 0; + iopl.fPageInfo = (vm_address_t) pageList; + iopl.fPageOffset = offset; + + _memoryEntries->appendBytes(&iopl, sizeof(iopl)); } + else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */ + IOVirtualRange *ranges = (IOVirtualRange *) buffers; - return true; -} + /* + * Initialize the memory descriptor. + */ -bool -IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, - UInt32 withCount, - IODirection withDirection, - bool asReference = false) -{ -#warning assuming virtual, physical addresses same size - return( initWithRanges( (IOVirtualRange *) ranges, - withCount, withDirection, (task_t) 0, asReference )); + _length = 0; + _pages = 0; + for (unsigned ind = 0; ind < count; ind++) { + IOVirtualRange cur = ranges[ind]; + + _length += cur.length; + _pages += atop_32(cur.address + cur.length + PAGE_MASK) + - atop_32(cur.address); + } + + _ranges.v = 0; + _rangesIsAllocated = !(options & kIOMemoryAsReference); + _rangesCount = count; + + if (options & kIOMemoryAsReference) + _ranges.v = ranges; + else { + _ranges.v = IONew(IOVirtualRange, count); + if (!_ranges.v) + return false; + bcopy(/* from */ ranges, _ranges.v, + count * sizeof(IOVirtualRange)); + } + + // Auto-prepare memory at creation time. + // Implied completion when descriptor is free-ed + if ( (options & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + _wireCount++; // Physical MDs are start out wired + else { /* kIOMemoryTypeVirtual */ + ioGMDData *dataP; + unsigned int dataSize = + computeDataSize(_pages, /* upls */ _rangesCount * 2); + + if (!_memoryEntries) { + _memoryEntries = OSData::withCapacity(dataSize); + if (!_memoryEntries) + return false; + } + else if (!_memoryEntries->initWithCapacity(dataSize)) + return false; + + _memoryEntries->appendBytes(0, sizeof(ioGMDData)); + dataP = getDataP(_memoryEntries); + dataP->fMapper = mapper; + dataP->fPageCnt = _pages; + + if (kIOMemoryPersistent & _flags) + { + kern_return_t error; + ipc_port_t sharedMem; + + vm_size_t size = _pages << PAGE_SHIFT; + vm_address_t startPage; + + startPage = trunc_page_32(_ranges.v[0].address); + + vm_map_t theMap = ((_task == kernel_task) && (kIOMemoryBufferPageable & _flags)) + ? IOPageableMapForAddress(startPage) + : get_task_map(_task); + + vm_size_t actualSize = size; + error = mach_make_memory_entry( theMap, + &actualSize, startPage, + VM_PROT_READ | VM_PROT_WRITE, &sharedMem, + NULL ); + + if (KERN_SUCCESS == error) { + if (actualSize == round_page_32(size)) { + _memEntry = (void *) sharedMem; + } else { +#if IOASSERT + IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n", + startPage, (UInt32)actualSize, size); +#endif + ipc_port_release_send( sharedMem ); + } + } + } + + if ((_flags & kIOMemoryAutoPrepare) + && prepare() != kIOReturnSuccess) + return false; + } + } + + return true; } /* @@ -471,101 +695,33 @@ void IOGeneralMemoryDescriptor::free() while (_wireCount) complete(); + if (_memoryEntries) + _memoryEntries->release(); + if (_kernPtrAligned) unmapFromKernel(); if (_ranges.v && _rangesIsAllocated) IODelete(_ranges.v, IOVirtualRange, _rangesCount); - if( reserved && reserved->devicePager) - device_pager_deallocate( reserved->devicePager ); + if (reserved && reserved->devicePager) + device_pager_deallocate( (memory_object_t) reserved->devicePager ); - // memEntry holds a ref on the device pager which owns reserved (ExpansionData) - // so no reserved access after this point - if( _memEntry) + // memEntry holds a ref on the device pager which owns reserved + // (ExpansionData) so no reserved access after this point + if (_memEntry) ipc_port_release_send( (ipc_port_t) _memEntry ); + super::free(); } /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel() /* DEPRECATED */ { -/* DEPRECATED */ kern_return_t krtn; -/* DEPRECATED */ vm_offset_t off; -/* DEPRECATED */ // Pull the shared pages out of the task map -/* DEPRECATED */ // Do we need to unwire it first? -/* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size ) -/* DEPRECATED */ { -/* DEPRECATED */ pmap_change_wiring( -/* DEPRECATED */ kernel_pmap, -/* DEPRECATED */ _kernPtrAligned + off, -/* DEPRECATED */ FALSE); -/* DEPRECATED */ -/* DEPRECATED */ pmap_remove( -/* DEPRECATED */ kernel_pmap, -/* DEPRECATED */ _kernPtrAligned + off, -/* DEPRECATED */ _kernPtrAligned + off + page_size); -/* DEPRECATED */ } -/* DEPRECATED */ // Free the former shmem area in the task -/* DEPRECATED */ krtn = vm_deallocate(kernel_map, -/* DEPRECATED */ _kernPtrAligned, -/* DEPRECATED */ _kernSize ); -/* DEPRECATED */ assert(krtn == KERN_SUCCESS); -/* DEPRECATED */ _kernPtrAligned = 0; + panic("IOGMD::unmapFromKernel deprecated"); /* DEPRECATED */ } /* DEPRECATED */ /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) /* DEPRECATED */ { -/* DEPRECATED */ kern_return_t krtn; -/* DEPRECATED */ vm_offset_t off; -/* DEPRECATED */ -/* DEPRECATED */ if (_kernPtrAligned) -/* DEPRECATED */ { -/* DEPRECATED */ if (_kernPtrAtIndex == rangeIndex) return; -/* DEPRECATED */ unmapFromKernel(); -/* DEPRECATED */ assert(_kernPtrAligned == 0); -/* DEPRECATED */ } -/* DEPRECATED */ -/* DEPRECATED */ vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); -/* DEPRECATED */ -/* DEPRECATED */ _kernSize = trunc_page(_ranges.v[rangeIndex].address + -/* DEPRECATED */ _ranges.v[rangeIndex].length + -/* DEPRECATED */ page_size - 1) - srcAlign; -/* DEPRECATED */ -/* DEPRECATED */ /* Find some memory of the same size in kernel task. We use vm_allocate() */ -/* DEPRECATED */ /* to do this. vm_allocate inserts the found memory object in the */ -/* DEPRECATED */ /* target task's map as a side effect. */ -/* DEPRECATED */ krtn = vm_allocate( kernel_map, -/* DEPRECATED */ &_kernPtrAligned, -/* DEPRECATED */ _kernSize, -/* DEPRECATED */ VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit -/* DEPRECATED */ assert(krtn == KERN_SUCCESS); -/* DEPRECATED */ if(krtn) return; -/* DEPRECATED */ -/* DEPRECATED */ /* For each page in the area allocated from the kernel map, */ -/* DEPRECATED */ /* find the physical address of the page. */ -/* DEPRECATED */ /* Enter the page in the target task's pmap, at the */ -/* DEPRECATED */ /* appropriate target task virtual address. */ -/* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size ) -/* DEPRECATED */ { -/* DEPRECATED */ vm_offset_t kern_phys_addr, phys_addr; -/* DEPRECATED */ if( _task) -/* DEPRECATED */ phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off ); -/* DEPRECATED */ else -/* DEPRECATED */ phys_addr = srcAlign + off; -/* DEPRECATED */ assert(phys_addr); -/* DEPRECATED */ if(phys_addr == 0) return; -/* DEPRECATED */ -/* DEPRECATED */ // Check original state. -/* DEPRECATED */ kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off ); -/* DEPRECATED */ // Set virtual page to point to the right physical one -/* DEPRECATED */ pmap_enter( -/* DEPRECATED */ kernel_pmap, -/* DEPRECATED */ _kernPtrAligned + off, -/* DEPRECATED */ phys_addr, -/* DEPRECATED */ VM_PROT_READ|VM_PROT_WRITE, -/* DEPRECATED */ VM_WIMG_USE_DEFAULT, -/* DEPRECATED */ TRUE); -/* DEPRECATED */ } -/* DEPRECATED */ _kernPtrAtIndex = rangeIndex; + panic("IOGMD::mapIntoKernel deprecated"); /* DEPRECATED */ } /* @@ -588,8 +744,7 @@ IOByteCount IOMemoryDescriptor::getLength() const return _length; } -void IOMemoryDescriptor::setTag( - IOOptionBits tag ) +void IOMemoryDescriptor::setTag( IOOptionBits tag ) { _tag = tag; } @@ -599,6 +754,7 @@ IOOptionBits IOMemoryDescriptor::getTag( void ) return( _tag); } +// @@@ gvdl: who is using this API? Seems like a wierd thing to implement. IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) { @@ -612,263 +768,282 @@ IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, return( physAddr ); } -IOByteCount IOMemoryDescriptor::readBytes( IOByteCount offset, - void * bytes, - IOByteCount withLength ) +IOByteCount IOMemoryDescriptor::readBytes + (IOByteCount offset, void *bytes, IOByteCount length) { - IOByteCount bytesCopied = 0; + addr64_t dstAddr = (addr64_t) (UInt32) bytes; + IOByteCount remaining; - assert(offset <= _length); - assert(offset <= _length - withLength); + // Assert that this entire I/O is withing the available range + assert(offset < _length); + assert(offset + length <= _length); + if (offset >= _length) { +IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl + return 0; + } - if ( offset < _length ) - { - withLength = min(withLength, _length - offset); + remaining = length = min(length, _length - offset); + while (remaining) { // (process another target segment?) + addr64_t srcAddr64; + IOByteCount srcLen; - while ( withLength ) // (process another source segment?) - { - IOPhysicalAddress sourceSegment; - IOByteCount sourceSegmentLength; + srcAddr64 = getPhysicalSegment64(offset, &srcLen); + if (!srcAddr64) + break; - sourceSegment = getPhysicalSegment(offset, &sourceSegmentLength); - if ( sourceSegment == 0 ) goto readBytesErr; + // Clip segment length to remaining + if (srcLen > remaining) + srcLen = remaining; - sourceSegmentLength = min(sourceSegmentLength, withLength); + copypv(srcAddr64, dstAddr, srcLen, + cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); - while ( sourceSegmentLength ) // (process another target segment?) - { - IOPhysicalAddress targetSegment; - IOByteCount targetSegmentLength; + dstAddr += srcLen; + offset += srcLen; + remaining -= srcLen; + } - targetSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes); - if ( targetSegment == 0 ) goto readBytesErr; + assert(!remaining); - targetSegmentLength = min(next_page(targetSegment) - targetSegment, sourceSegmentLength); + return length - remaining; +} - if ( sourceSegment + targetSegmentLength > next_page(sourceSegment) ) - { - IOByteCount pageLength; +IOByteCount IOMemoryDescriptor::writeBytes + (IOByteCount offset, const void *bytes, IOByteCount length) +{ + addr64_t srcAddr = (addr64_t) (UInt32) bytes; + IOByteCount remaining; - pageLength = next_page(sourceSegment) - sourceSegment; + // Assert that this entire I/O is withing the available range + assert(offset < _length); + assert(offset + length <= _length); - bcopy_phys_safe( /* from */ (char *) sourceSegment, - /* to */ (char *) targetSegment, - /* size */ (int ) pageLength ); + assert( !(kIOMemoryPreparedReadOnly & _flags) ); - ((UInt8 *) bytes) += pageLength; - bytesCopied += pageLength; - offset += pageLength; - sourceSegment += pageLength; - sourceSegmentLength -= pageLength; - targetSegment += pageLength; - targetSegmentLength -= pageLength; - withLength -= pageLength; - } + if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) { +IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl + return 0; + } - bcopy_phys_safe( /* from */ (char *) sourceSegment, - /* to */ (char *) targetSegment, - /* size */ (int ) targetSegmentLength ); + remaining = length = min(length, _length - offset); + while (remaining) { // (process another target segment?) + addr64_t dstAddr64; + IOByteCount dstLen; - ((UInt8 *) bytes) += targetSegmentLength; - bytesCopied += targetSegmentLength; - offset += targetSegmentLength; - sourceSegment += targetSegmentLength; - sourceSegmentLength -= targetSegmentLength; - withLength -= targetSegmentLength; - } - } - } + dstAddr64 = getPhysicalSegment64(offset, &dstLen); + if (!dstAddr64) + break; -readBytesErr: + // Clip segment length to remaining + if (dstLen > remaining) + dstLen = remaining; - if ( bytesCopied ) - { - // We mark the destination pages as modified, just - // in case they are made pageable later on in life. + copypv(srcAddr, (addr64_t) dstAddr64, dstLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); - pmap_modify_pages( /* pmap */ kernel_pmap, - /* start */ trunc_page(((vm_offset_t) bytes) - bytesCopied), - /* end */ round_page(((vm_offset_t) bytes)) ); + srcAddr += dstLen; + offset += dstLen; + remaining -= dstLen; } - return bytesCopied; + assert(!remaining); + + return length - remaining; } -IOByteCount IOMemoryDescriptor::writeBytes( IOByteCount offset, - const void * bytes, - IOByteCount withLength ) -{ - IOByteCount bytesCopied = 0; +// osfmk/device/iokit_rpc.c +extern "C" unsigned int IODefaultCacheBits(addr64_t pa); - assert(offset <= _length); - assert(offset <= _length - withLength); +/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) +/* DEPRECATED */ { + panic("IOGMD::setPosition deprecated"); +/* DEPRECATED */ } - if ( offset < _length ) - { - withLength = min(withLength, _length - offset); +IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment + (IOByteCount offset, IOByteCount *lengthOfSegment) +{ + IOPhysicalAddress address = 0; + IOPhysicalLength length = 0; - while ( withLength ) // (process another target segment?) - { - IOPhysicalAddress targetSegment; - IOByteCount targetSegmentLength; +// assert(offset <= _length); + if (offset < _length) // (within bounds?) + { + if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { + unsigned int ind; - targetSegment = getPhysicalSegment(offset, &targetSegmentLength); - if ( targetSegment == 0 ) goto writeBytesErr; + // Physical address based memory descriptor - targetSegmentLength = min(targetSegmentLength, withLength); + // Find offset within descriptor and make it relative + // to the current _range. + for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ ) + offset -= _ranges.p[ind].length; + + IOPhysicalRange cur = _ranges.p[ind]; + address = cur.address + offset; + length = cur.length - offset; + + // see how far we can coalesce ranges + for (++ind; ind < _rangesCount; ind++) { + cur = _ranges.p[ind]; + + if (address + length != cur.address) + break; + + length += cur.length; + } - while ( targetSegmentLength ) // (process another source segment?) + // @@@ gvdl: should assert(address); + // but can't as NVidia GeForce creates a bogus physical mem { - IOPhysicalAddress sourceSegment; - IOByteCount sourceSegmentLength; + assert(address || /*nvidia*/(!_ranges.p[0].address && 1 == _rangesCount)); + } + assert(length); + } + else do { + // We need wiring & we are wired. + assert(_wireCount); - sourceSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes); - if ( sourceSegment == 0 ) goto writeBytesErr; + if (!_wireCount) + { + panic("IOGMD: not wired for getPhysicalSegment()"); + continue; + } - sourceSegmentLength = min(next_page(sourceSegment) - sourceSegment, targetSegmentLength); + assert(_memoryEntries); - if ( targetSegment + sourceSegmentLength > next_page(targetSegment) ) - { - IOByteCount pageLength; + ioGMDData * dataP = getDataP(_memoryEntries); + const ioPLBlock *ioplList = getIOPLList(dataP); + UInt ind, numIOPLs = getNumIOPL(dataP, _memoryEntries->getLength()); + upl_page_info_t *pageList = getPageList(dataP); - pageLength = next_page(targetSegment) - targetSegment; + assert(numIOPLs > 0); - bcopy_phys_safe( /* from */ (char *) sourceSegment, - /* to */ (char *) targetSegment, - /* size */ (int ) pageLength ); + // Scan through iopl info blocks looking for block containing offset + for (ind = 1; ind < numIOPLs; ind++) { + if (offset < ioplList[ind].fIOMDOffset) + break; + } - // We flush the data cache in case it is code we've copied, - // such that the instruction cache is in the know about it. + // Go back to actual range as search goes past it + ioPLBlock ioplInfo = ioplList[ind - 1]; - flush_dcache(targetSegment, pageLength, true); + if (ind < numIOPLs) + length = ioplList[ind].fIOMDOffset; + else + length = _length; + length -= offset; // Remainder within iopl - ((UInt8 *) bytes) += pageLength; - bytesCopied += pageLength; - offset += pageLength; - sourceSegment += pageLength; - sourceSegmentLength -= pageLength; - targetSegment += pageLength; - targetSegmentLength -= pageLength; - withLength -= pageLength; - } + // Subtract offset till this iopl in total list + offset -= ioplInfo.fIOMDOffset; - bcopy_phys_safe( /* from */ (char *) sourceSegment, - /* to */ (char *) targetSegment, - /* size */ (int ) sourceSegmentLength ); + // This is a mapped IOPL so we just need to compute an offset + // relative to the mapped base. + if (ioplInfo.fMappedBase) { + offset += (ioplInfo.fPageOffset & PAGE_MASK); + address = ptoa_32(ioplInfo.fMappedBase) + offset; + continue; + } - // We flush the data cache in case it is code we've copied, - // such that the instruction cache is in the know about it. + // Currently the offset is rebased into the current iopl. + // Now add the iopl 1st page offset. + offset += ioplInfo.fPageOffset; - flush_dcache(targetSegment, sourceSegmentLength, true); + // For external UPLs the fPageInfo field points directly to + // the upl's upl_page_info_t array. + if (ioplInfo.fFlags & kIOPLExternUPL) + pageList = (upl_page_info_t *) ioplInfo.fPageInfo; + else + pageList = &pageList[ioplInfo.fPageInfo]; - ((UInt8 *) bytes) += sourceSegmentLength; - bytesCopied += sourceSegmentLength; - offset += sourceSegmentLength; - targetSegment += sourceSegmentLength; - targetSegmentLength -= sourceSegmentLength; - withLength -= sourceSegmentLength; + // Check for direct device non-paged memory + if ( ioplInfo.fFlags & kIOPLOnDevice ) { + address = ptoa_32(pageList->phys_addr) + offset; + continue; } - } - } -writeBytesErr: + // Now we need compute the index into the pageList + ind = atop_32(offset); + offset &= PAGE_MASK; + + IOPhysicalAddress pageAddr = pageList[ind].phys_addr; + address = ptoa_32(pageAddr) + offset; + + // Check for the remaining data in this upl being longer than the + // remainder on the current page. This should be checked for + // contiguous pages + if (length > PAGE_SIZE - offset) { + // See if the next page is contiguous. Stop looking when we hit + // the end of this upl, which is indicated by the + // contigLength >= length. + IOByteCount contigLength = PAGE_SIZE - offset; + + // Look for contiguous segment + while (contigLength < length + && ++pageAddr == pageList[++ind].phys_addr) { + contigLength += PAGE_SIZE; + } + if (length > contigLength) + length = contigLength; + } + + assert(address); + assert(length); - return bytesCopied; -} + } while (0); -extern "C" { -// osfmk/device/iokit_rpc.c -extern unsigned int IOTranslateCacheBits(struct phys_entry *pp); -}; + if (!address) + length = 0; + } -/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) -/* DEPRECATED */ { -/* DEPRECATED */ assert(position <= _length); -/* DEPRECATED */ -/* DEPRECATED */ if (position >= _length) -/* DEPRECATED */ { -/* DEPRECATED */ _position = _length; -/* DEPRECATED */ _positionAtIndex = _rangesCount; /* careful: out-of-bounds */ -/* DEPRECATED */ _positionAtOffset = 0; -/* DEPRECATED */ return; -/* DEPRECATED */ } -/* DEPRECATED */ -/* DEPRECATED */ if (position < _position) -/* DEPRECATED */ { -/* DEPRECATED */ _positionAtOffset = position; -/* DEPRECATED */ _positionAtIndex = 0; -/* DEPRECATED */ } -/* DEPRECATED */ else -/* DEPRECATED */ { -/* DEPRECATED */ _positionAtOffset += (position - _position); -/* DEPRECATED */ } -/* DEPRECATED */ _position = position; -/* DEPRECATED */ -/* DEPRECATED */ while (_positionAtOffset >= _ranges.v[_positionAtIndex].length) -/* DEPRECATED */ { -/* DEPRECATED */ _positionAtOffset -= _ranges.v[_positionAtIndex].length; -/* DEPRECATED */ _positionAtIndex++; -/* DEPRECATED */ } -/* DEPRECATED */ } + if (lengthOfSegment) + *lengthOfSegment = length; -IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment( IOByteCount offset, - IOByteCount * lengthOfSegment ) -{ - IOPhysicalAddress address = 0; - IOPhysicalLength length = 0; + return address; +} +addr64_t IOMemoryDescriptor::getPhysicalSegment64 + (IOByteCount offset, IOByteCount *lengthOfSegment) +{ + IOPhysicalAddress phys32; + IOByteCount length; + addr64_t phys64; -// assert(offset <= _length); + phys32 = getPhysicalSegment(offset, lengthOfSegment); + if (!phys32) + return 0; - if ( offset < _length ) // (within bounds?) + if (gIOSystemMapper) { - unsigned rangesIndex = 0; - - for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ ) - { - offset -= _ranges.v[rangesIndex].length; // (make offset relative) - } - - if ( _task == 0 ) // (physical memory?) - { - address = _ranges.v[rangesIndex].address + offset; - length = _ranges.v[rangesIndex].length - offset; - - for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) - { - if ( address + length != _ranges.v[rangesIndex].address ) break; - - length += _ranges.v[rangesIndex].length; // (coalesce ranges) - } - } - else // (virtual memory?) - { - vm_address_t addressVirtual = _ranges.v[rangesIndex].address + offset; - - assert((0 == (kIOMemoryRequiresWire & _flags)) || _wireCount); - - address = pmap_extract_safe(_task, addressVirtual); - length = next_page(addressVirtual) - addressVirtual; - length = min(_ranges.v[rangesIndex].length - offset, length); - } - - assert(address); - if ( address == 0 ) length = 0; + IOByteCount origLen; + + phys64 = gIOSystemMapper->mapAddr(phys32); + origLen = *lengthOfSegment; + length = page_size - (phys64 & (page_size - 1)); + while ((length < origLen) + && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length))) + length += page_size; + if (length > origLen) + length = origLen; + + *lengthOfSegment = length; } + else + phys64 = (addr64_t) phys32; - if ( lengthOfSegment ) *lengthOfSegment = length; - - return address; + return phys64; } -IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount offset, - IOByteCount * lengthOfSegment ) +IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment + (IOByteCount offset, IOByteCount *lengthOfSegment) { IOPhysicalAddress address = 0; IOPhysicalLength length = 0; assert(offset <= _length); + if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeUPL) + return super::getSourceSegment( offset, lengthOfSegment ); + if ( offset < _length ) // (within bounds?) { unsigned rangesIndex = 0; @@ -901,193 +1076,270 @@ IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount off /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, /* DEPRECATED */ IOByteCount * lengthOfSegment) /* DEPRECATED */ { -/* DEPRECATED */ if( offset != _position) -/* DEPRECATED */ setPosition( offset ); -/* DEPRECATED */ -/* DEPRECATED */ assert(_position <= _length); -/* DEPRECATED */ -/* DEPRECATED */ /* Fail gracefully if the position is at (or past) the end-of-buffer. */ -/* DEPRECATED */ if (_position >= _length) -/* DEPRECATED */ { -/* DEPRECATED */ *lengthOfSegment = 0; -/* DEPRECATED */ return 0; -/* DEPRECATED */ } -/* DEPRECATED */ -/* DEPRECATED */ /* Compute the relative length to the end of this virtual segment. */ -/* DEPRECATED */ *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset; -/* DEPRECATED */ -/* DEPRECATED */ /* Compute the relative address of this virtual segment. */ -/* DEPRECATED */ if (_task == kernel_task) -/* DEPRECATED */ return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset); -/* DEPRECATED */ else -/* DEPRECATED */ { -/* DEPRECATED */ vm_offset_t off; -/* DEPRECATED */ -/* DEPRECATED */ mapIntoKernel(_positionAtIndex); -/* DEPRECATED */ -/* DEPRECATED */ off = _ranges.v[_kernPtrAtIndex].address; -/* DEPRECATED */ off -= trunc_page(off); -/* DEPRECATED */ -/* DEPRECATED */ return (void *) (_kernPtrAligned + off + _positionAtOffset); -/* DEPRECATED */ } + if (_task == kernel_task) + return (void *) getSourceSegment(offset, lengthOfSegment); + else + panic("IOGMD::getVirtualSegment deprecated"); + + return 0; /* DEPRECATED */ } /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ -/* - * prepare - * - * Prepare the memory for an I/O transfer. This involves paging in - * the memory, if necessary, and wiring it down for the duration of - * the transfer. The complete() method completes the processing of - * the memory after the I/O transfer finishes. This method needn't - * called for non-pageable memory. - */ -IOReturn IOGeneralMemoryDescriptor::prepare( - IODirection forDirection = kIODirectionNone) +#ifdef __ppc__ +extern vm_offset_t static_memory_end; +#define io_kernel_static_end static_memory_end +#else +extern vm_offset_t first_avail; +#define io_kernel_static_end first_avail +#endif + +static kern_return_t +io_get_kernel_static_upl( + vm_map_t map, + vm_address_t offset, + vm_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + int *flags, + int force_data_sync) { - UInt rangeIndex = 0; + unsigned int pageCount, page; + ppnum_t phys; - if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) { - kern_return_t rc; + pageCount = atop_32(*upl_size); + if (pageCount > *count) + pageCount = *count; - if(forDirection == kIODirectionNone) - forDirection = _direction; + *upl = NULL; - vm_prot_t access; + for (page = 0; page < pageCount; page++) + { + phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); + if (!phys) + break; + page_list[page].phys_addr = phys; + page_list[page].pageout = 0; + page_list[page].absent = 0; + page_list[page].dirty = 0; + page_list[page].precious = 0; + page_list[page].device = 0; + } - switch (forDirection) - { - case kIODirectionIn: - access = VM_PROT_WRITE; - break; + return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); +} - case kIODirectionOut: - access = VM_PROT_READ; - break; +IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) +{ + IOReturn error = kIOReturnNoMemory; + ioGMDData *dataP; + ppnum_t mapBase = 0; + IOMapper *mapper; + ipc_port_t sharedMem = (ipc_port_t) _memEntry; - default: - access = VM_PROT_READ | VM_PROT_WRITE; - break; - } + assert(!_wireCount); - // - // Check user read/write access to the data buffer. - // + if (_pages >= gIOMaximumMappedIOPageCount) + return kIOReturnNoResources; - for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) - { - vm_offset_t checkBase = trunc_page(_ranges.v[rangeIndex].address); - vm_size_t checkSize = round_page(_ranges.v[rangeIndex].length ); + dataP = getDataP(_memoryEntries); + mapper = dataP->fMapper; + if (mapper && _pages) + mapBase = mapper->iovmAlloc(_pages); - while (checkSize) - { - vm_region_basic_info_data_t regionInfo; - mach_msg_type_number_t regionInfoSize = sizeof(regionInfo); - vm_size_t regionSize; - - if ( (vm_region( - /* map */ getMapForTask(_task, checkBase), - /* address */ &checkBase, - /* size */ ®ionSize, - /* flavor */ VM_REGION_BASIC_INFO, - /* info */ (vm_region_info_t) ®ionInfo, - /* info size */ ®ionInfoSize, - /* object name */ 0 ) != KERN_SUCCESS ) || - ( (forDirection & kIODirectionIn ) && - !(regionInfo.protection & VM_PROT_WRITE) ) || - ( (forDirection & kIODirectionOut) && - !(regionInfo.protection & VM_PROT_READ ) ) ) - { - return kIOReturnVMError; - } + // Note that appendBytes(NULL) zeros the data up to the + // desired length. + _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t)); + dataP = 0; // May no longer be valid so lets not get tempted. - assert((regionSize & PAGE_MASK) == 0); - - regionSize = min(regionSize, checkSize); - checkSize -= regionSize; - checkBase += regionSize; - } // (for each vm region) - } // (for each io range) - - for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) { - - vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); - IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address + - _ranges.v[rangeIndex].length + - page_size - 1); - - vm_map_t taskVMMap = getMapForTask(_task, srcAlign); - - // If this I/O is for a user land task then protect ourselves - // against COW and other vm_shenanigans - if (_task && _task != kernel_task) { - // setup a data object to hold the 'named' memory regions - // @@@ gvdl: If we fail to allocate an OSData we will just - // hope for the best for the time being. Lets not fail a - // prepare at this late stage in product release. - if (!_memoryEntries) - _memoryEntries = OSData::withCapacity(16); - if (_memoryEntries) { - vm_object_offset_t desiredSize = srcAlignEnd - srcAlign; - vm_object_offset_t entryStart = srcAlign; - ipc_port_t memHandle; - - do { - vm_object_offset_t actualSize = desiredSize; - - rc = mach_make_memory_entry_64 - (taskVMMap, &actualSize, entryStart, - forDirection, &memHandle, NULL); - if (KERN_SUCCESS != rc) { - IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc); - goto abortExit; - } - - _memoryEntries-> - appendBytes(&memHandle, sizeof(memHandle)); - desiredSize -= actualSize; - entryStart += actualSize; - } while (desiredSize); - } + if (forDirection == kIODirectionNone) + forDirection = _direction; + + int uplFlags; // This Mem Desc's default flags for upl creation + switch (forDirection) + { + case kIODirectionOut: + // Pages do not need to be marked as dirty on commit + uplFlags = UPL_COPYOUT_FROM; + _flags |= kIOMemoryPreparedReadOnly; + break; + + case kIODirectionIn: + default: + uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM + break; + } + uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; + + // + // Check user read/write access to the data buffer. + // + unsigned int pageIndex = 0; + IOByteCount mdOffset = 0; + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) + curMap = 0; + else + { curMap = get_task_map(_task); } + + for (UInt range = 0; range < _rangesCount; range++) { + ioPLBlock iopl; + IOVirtualRange curRange = _ranges.v[range]; + vm_address_t startPage; + IOByteCount numBytes; + + startPage = trunc_page_32(curRange.address); + iopl.fPageOffset = (short) curRange.address & PAGE_MASK; + if (mapper) + iopl.fMappedBase = mapBase + pageIndex; + else + iopl.fMappedBase = 0; + numBytes = iopl.fPageOffset + curRange.length; + + while (numBytes) { + dataP = getDataP(_memoryEntries); + vm_map_t theMap = + (curMap)? curMap + : IOPageableMapForAddress(startPage); + upl_page_info_array_t pageInfo = getPageList(dataP); + int ioplFlags = uplFlags; + upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; + + vm_size_t ioplSize = round_page_32(numBytes); + unsigned int numPageInfo = atop_32(ioplSize); + + if ((theMap == kernel_map) && (startPage < io_kernel_static_end)) + { + error = io_get_kernel_static_upl(theMap, + startPage, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags, + false); + + } else if (sharedMem && (kIOMemoryPersistent & _flags)) { + + error = memory_object_iopl_request(sharedMem, + ptoa_32(pageIndex), + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags); + + } else { + error = vm_map_get_upl(theMap, + startPage, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags, + false); } - rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE); - if (KERN_SUCCESS != rc) { - IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc); - goto abortExit; + assert(ioplSize); + if (error != KERN_SUCCESS) + goto abortExit; + + error = kIOReturnNoMemory; + + if (baseInfo->device) { + numPageInfo = 1; + iopl.fFlags = kIOPLOnDevice; + // Don't translate device memory at all + if (mapper && mapBase) { + mapper->iovmFree(mapBase, _pages); + mapBase = 0; + iopl.fMappedBase = 0; + } + } + else { + iopl.fFlags = 0; + if (mapper) + mapper->iovmInsert(mapBase, pageIndex, + baseInfo, numPageInfo); + } + + iopl.fIOMDOffset = mdOffset; + iopl.fPageInfo = pageIndex; + + if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL) + { + kernel_upl_commit(iopl.fIOPL, 0, 0); + iopl.fIOPL = 0; } + + if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { + // Clean up partial created and unsaved iopl + if (iopl.fIOPL) + kernel_upl_abort(iopl.fIOPL, 0); + goto abortExit; + } + + // Check for a multiple iopl's in one virtual range + pageIndex += numPageInfo; + mdOffset -= iopl.fPageOffset; + if (ioplSize < numBytes) { + numBytes -= ioplSize; + startPage += ioplSize; + mdOffset += ioplSize; + iopl.fPageOffset = 0; + if (mapper) + iopl.fMappedBase = mapBase + pageIndex; + } + else { + mdOffset += numBytes; + break; + } } } - _wireCount++; + return kIOReturnSuccess; abortExit: - UInt doneIndex; - - - for(doneIndex = 0; doneIndex < rangeIndex; doneIndex++) { - vm_offset_t srcAlign = trunc_page(_ranges.v[doneIndex].address); - IOByteCount srcAlignEnd = trunc_page(_ranges.v[doneIndex].address + - _ranges.v[doneIndex].length + - page_size - 1); + { + dataP = getDataP(_memoryEntries); + UInt done = getNumIOPL(dataP, _memoryEntries->getLength()); + ioPLBlock *ioplList = getIOPLList(dataP); + + for (UInt range = 0; range < done; range++) + { + if (ioplList[range].fIOPL) + kernel_upl_abort(ioplList[range].fIOPL, 0); + } - vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign, - srcAlignEnd, FALSE); + if (mapper && mapBase) + mapper->iovmFree(mapBase, _pages); } - if (_memoryEntries) { - ipc_port_t *handles, *handlesEnd; + return error; +} - handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy(); - handlesEnd = (ipc_port_t *) - ((vm_address_t) handles + _memoryEntries->getLength()); - while (handles < handlesEnd) - ipc_port_release_send(*handles++); - _memoryEntries->release(); - _memoryEntries = 0; +/* + * prepare + * + * Prepare the memory for an I/O transfer. This involves paging in + * the memory, if necessary, and wiring it down for the duration of + * the transfer. The complete() method completes the processing of + * the memory after the I/O transfer finishes. This method needn't + * called for non-pageable memory. + */ +IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) +{ + IOReturn error = kIOReturnSuccess; + + if (!_wireCount && (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) { + error = wireVirtual(forDirection); + if (error) + return error; } - return kIOReturnVMError; + _wireCount++; + + return kIOReturnSuccess; } /* @@ -1099,50 +1351,36 @@ abortExit: * before and after an I/O transfer involving pageable memory. */ -IOReturn IOGeneralMemoryDescriptor::complete( - IODirection forDirection = kIODirectionNone) +IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) { assert(_wireCount); - if(0 == _wireCount) + if (!_wireCount) return kIOReturnSuccess; _wireCount--; - if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) { - UInt rangeIndex; - kern_return_t rc; - - if(forDirection == kIODirectionNone) - forDirection = _direction; - - for(rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) { - - vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); - IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address + - _ranges.v[rangeIndex].length + - page_size - 1); - - if(forDirection == kIODirectionIn) - pmap_modify_pages(get_task_pmap(_task), srcAlign, srcAlignEnd); - - rc = vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign, - srcAlignEnd, FALSE); - if(rc != KERN_SUCCESS) - IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc); + if (!_wireCount) { + if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { + /* kIOMemoryTypePhysical */ + // DO NOTHING } + else { + ioGMDData * dataP = getDataP(_memoryEntries); + ioPLBlock *ioplList = getIOPLList(dataP); + UInt count = getNumIOPL(dataP, _memoryEntries->getLength()); + + if (dataP->fMapper && _pages && ioplList[0].fMappedBase) + dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages); + + // Only complete iopls that we created which are for TypeVirtual + if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) { + for (UInt ind = 0; ind < count; ind++) + if (ioplList[ind].fIOPL) + kernel_upl_commit(ioplList[ind].fIOPL, 0, 0); + } - if (_memoryEntries) { - ipc_port_t *handles, *handlesEnd; - - handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy(); - handlesEnd = (ipc_port_t *) - ((vm_address_t) handles + _memoryEntries->getLength()); - while (handles < handlesEnd) - ipc_port_release_send(*handles++); - - _memoryEntries->release(); - _memoryEntries = 0; - } + (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() + } } return kIOReturnSuccess; } @@ -1151,8 +1389,8 @@ IOReturn IOGeneralMemoryDescriptor::doMap( vm_map_t addressMap, IOVirtualAddress * atAddress, IOOptionBits options, - IOByteCount sourceOffset = 0, - IOByteCount length = 0 ) + IOByteCount sourceOffset, + IOByteCount length ) { kern_return_t kr; ipc_port_t sharedMem = (ipc_port_t) _memEntry; @@ -1167,11 +1405,7 @@ IOReturn IOGeneralMemoryDescriptor::doMap( if( 0 == sharedMem) { - vm_size_t size = 0; - - for (unsigned index = 0; index < _rangesCount; index++) - size += round_page(_ranges.v[index].address + _ranges.v[index].length) - - trunc_page(_ranges.v[index].address); + vm_size_t size = _pages << PAGE_SHIFT; if( _task) { #ifndef i386 @@ -1181,9 +1415,9 @@ IOReturn IOGeneralMemoryDescriptor::doMap( VM_PROT_READ | VM_PROT_WRITE, &sharedMem, NULL ); - if( (KERN_SUCCESS == kr) && (actualSize != round_page(size))) { + if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) { #if IOASSERT - IOLog("mach_make_memory_entry_64 (%08lx) size (%08lx:%08lx)\n", + IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n", _ranges.v[0].address, (UInt32)actualSize, size); #endif kr = kIOReturnVMError; @@ -1196,13 +1430,12 @@ IOReturn IOGeneralMemoryDescriptor::doMap( } else do { - memory_object_t pager; - unsigned int flags=0; - struct phys_entry *pp; - IOPhysicalAddress pa; + memory_object_t pager; + unsigned int flags = 0; + addr64_t pa; IOPhysicalLength segLen; - pa = getPhysicalSegment( sourceOffset, &segLen ); + pa = getPhysicalSegment64( sourceOffset, &segLen ); if( !reserved) { reserved = IONew( ExpansionData, 1 ); @@ -1212,41 +1445,35 @@ IOReturn IOGeneralMemoryDescriptor::doMap( reserved->pagerContig = (1 == _rangesCount); reserved->memory = this; -#ifndef i386 - switch(options & kIOMapCacheMask ) { /*What cache mode do we need*/ + /*What cache mode do we need*/ + switch(options & kIOMapCacheMask ) { case kIOMapDefaultCache: default: - if((pp = pmap_find_physentry(pa))) {/* Find physical address */ - /* Use physical attributes as default */ - flags = IOTranslateCacheBits(pp); - - } - else { /* If no physical, just hard code attributes */ - flags = DEVICE_PAGER_CACHE_INHIB | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - } - break; + flags = IODefaultCacheBits(pa); + break; case kIOMapInhibitCache: - flags = DEVICE_PAGER_CACHE_INHIB | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; + flags = DEVICE_PAGER_CACHE_INHIB | + DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; case kIOMapWriteThruCache: - flags = DEVICE_PAGER_WRITE_THROUGH | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; + flags = DEVICE_PAGER_WRITE_THROUGH | + DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; case kIOMapCopybackCache: - flags = DEVICE_PAGER_COHERENT; - break; + flags = DEVICE_PAGER_COHERENT; + break; + + case kIOMapWriteCombineCache: + flags = DEVICE_PAGER_CACHE_INHIB | + DEVICE_PAGER_COHERENT; + break; } flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; -#else - flags = reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; -#endif pager = device_pager_setup( (memory_object_t) 0, (int) reserved, size, flags); @@ -1292,7 +1519,7 @@ IOReturn IOGeneralMemoryDescriptor::doUnmap( IOByteCount length ) { // could be much better - if( _task && (addressMap == getMapForTask(_task, _ranges.v[0].address)) && (1 == _rangesCount) + if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount) && (logical == _ranges.v[0].address) && (length <= _ranges.v[0].length) ) return( kIOReturnSuccess ); @@ -1484,7 +1711,40 @@ static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) if( ref->sharedMem) { vm_prot_t prot = VM_PROT_READ | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); - + + // set memory entry cache + vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY; + switch (ref->options & kIOMapCacheMask) + { + case kIOMapInhibitCache: + SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); + break; + + case kIOMapWriteThruCache: + SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); + break; + + case kIOMapWriteCombineCache: + SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); + break; + + case kIOMapCopybackCache: + SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); + break; + + case kIOMapDefaultCache: + default: + SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); + break; + } + + vm_size_t unused = 0; + + err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/, + memEntryCacheMode, NULL, ref->sharedMem ); + if (KERN_SUCCESS != err) + IOLog("MAP_MEM_ONLY failed %d\n", err); + err = vm_map( map, &ref->mapped, ref->size, 0 /* mask */, @@ -1495,7 +1755,7 @@ static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) prot, // cur prot, // max VM_INHERIT_NONE); - + if( KERN_SUCCESS != err) { ref->mapped = 0; continue; @@ -1527,8 +1787,8 @@ IOReturn IOMemoryDescriptor::doMap( vm_map_t addressMap, IOVirtualAddress * atAddress, IOOptionBits options, - IOByteCount sourceOffset = 0, - IOByteCount length = 0 ) + IOByteCount sourceOffset, + IOByteCount length ) { IOReturn err = kIOReturnSuccess; memory_object_t pager; @@ -1548,23 +1808,23 @@ IOReturn IOMemoryDescriptor::doMap( sourceAddr = getSourceSegment( sourceOffset, NULL ); assert( sourceAddr ); - pageOffset = sourceAddr - trunc_page( sourceAddr ); + pageOffset = sourceAddr - trunc_page_32( sourceAddr ); - ref.size = round_page( length + pageOffset ); + ref.size = round_page_32( length + pageOffset ); logical = *atAddress; if( options & kIOMapAnywhere) // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE ref.mapped = 0; else { - ref.mapped = trunc_page( logical ); + ref.mapped = trunc_page_32( logical ); if( (logical - ref.mapped) != pageOffset) { err = kIOReturnVMError; continue; } } - if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags)) + if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); else err = IOMemoryDescriptorMapAlloc( addressMap, &ref ); @@ -1610,8 +1870,9 @@ IOReturn IOMemoryDescriptor::handleFault( vm_size_t bytes; vm_size_t page; IOByteCount pageOffset; + IOByteCount pagerOffset; IOPhysicalLength segLen; - IOPhysicalAddress physAddr; + addr64_t physAddr; if( !addressMap) { @@ -1627,9 +1888,10 @@ IOReturn IOMemoryDescriptor::handleFault( return( kIOReturnSuccess ); } - physAddr = getPhysicalSegment( sourceOffset, &segLen ); + physAddr = getPhysicalSegment64( sourceOffset, &segLen ); assert( physAddr ); - pageOffset = physAddr - trunc_page( physAddr ); + pageOffset = physAddr - trunc_page_64( physAddr ); + pagerOffset = sourceOffset; size = length + pageOffset; physAddr -= pageOffset; @@ -1640,14 +1902,14 @@ IOReturn IOMemoryDescriptor::handleFault( // in the middle of the loop only map whole pages if( segLen >= bytes) segLen = bytes; - else if( segLen != trunc_page( segLen)) + else if( segLen != trunc_page_32( segLen)) err = kIOReturnVMError; - if( physAddr != trunc_page( physAddr)) + if( physAddr != trunc_page_64( physAddr)) err = kIOReturnBadArgument; #ifdef DEBUG if( kIOLogMapping & gIOKitDebug) - IOLog("_IOMemoryMap::map(%p) %08lx->%08lx:%08lx\n", + IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n", addressMap, address + pageOffset, physAddr + pageOffset, segLen - pageOffset); #endif @@ -1659,7 +1921,7 @@ IOReturn IOMemoryDescriptor::handleFault( #ifdef i386 /* i386 doesn't support faulting on device memory yet */ if( addressMap && (kIOReturnSuccess == err)) - err = IOMapPages( addressMap, address, physAddr, segLen, options ); + err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options ); assert( KERN_SUCCESS == err ); if( err) break; @@ -1668,19 +1930,20 @@ IOReturn IOMemoryDescriptor::handleFault( if( pager) { if( reserved && reserved->pagerContig) { IOPhysicalLength allLen; - IOPhysicalAddress allPhys; + addr64_t allPhys; - allPhys = getPhysicalSegment( 0, &allLen ); + allPhys = getPhysicalSegment64( 0, &allLen ); assert( allPhys ); - err = device_pager_populate_object( pager, 0, trunc_page(allPhys), round_page(allLen) ); + err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) ); } else { - for( page = 0; + for( page = 0; (page < segLen) && (KERN_SUCCESS == err); page += page_size) { - err = device_pager_populate_object( pager, sourceOffset + page, - physAddr + page, page_size ); + err = device_pager_populate_object(pager, pagerOffset, + (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size); + pagerOffset += page_size; } } assert( KERN_SUCCESS == err ); @@ -1718,7 +1981,7 @@ IOReturn IOMemoryDescriptor::handleFault( pageOffset = 0; } while( bytes - && (physAddr = getPhysicalSegment( sourceOffset, &segLen ))); + && (physAddr = getPhysicalSegment64( sourceOffset, &segLen ))); if( bytes) err = kIOReturnBadArgument; @@ -1741,7 +2004,7 @@ IOReturn IOMemoryDescriptor::doUnmap( if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) { - if( _memEntry && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags)) + if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) addressMap = IOPageableMapForAddress( logical ); err = vm_deallocate( addressMap, logical, length ); @@ -1863,9 +2126,11 @@ void _IOMemoryMap::taskDied( void ) // of a memory descriptors _mappings set. This means that we // always have 2 references on a mapping. When either of these mappings // are released we need to free ourselves. -void _IOMemoryMap::taggedRelease(const void *tag = 0) const +void _IOMemoryMap::taggedRelease(const void *tag) const { + LOCK; super::taggedRelease(tag, 2); + UNLOCK; } void _IOMemoryMap::free() @@ -1923,7 +2188,7 @@ _IOMemoryMap * _IOMemoryMap::copyCompatible( { _IOMemoryMap * mapping; - if( (!task) || (task != getAddressTask())) + if( (!task) || (!addressMap) || (addressMap != get_task_map(task))) return( 0 ); if( (options ^ _options) & kIOMapReadOnly) return( 0 ); @@ -1981,6 +2246,9 @@ void IOMemoryDescriptor::initialize( void ) { if( 0 == gIOMemoryLock) gIOMemoryLock = IORecursiveLockAlloc(); + + IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey, + ptoa_64(gIOMaximumMappedIOPageCount), 64); } void IOMemoryDescriptor::free( void ) @@ -1994,7 +2262,7 @@ void IOMemoryDescriptor::free( void ) IOMemoryMap * IOMemoryDescriptor::setMapping( task_t intoTask, IOVirtualAddress mapAddress, - IOOptionBits options = 0 ) + IOOptionBits options ) { _IOMemoryMap * map; @@ -2017,7 +2285,7 @@ IOMemoryMap * IOMemoryDescriptor::setMapping( } IOMemoryMap * IOMemoryDescriptor::map( - IOOptionBits options = 0 ) + IOOptionBits options ) { return( makeMapping( this, kernel_task, 0, @@ -2029,8 +2297,8 @@ IOMemoryMap * IOMemoryDescriptor::map( task_t intoTask, IOVirtualAddress toAddress, IOOptionBits options, - IOByteCount offset = 0, - IOByteCount length = 0 ) + IOByteCount offset, + IOByteCount length ) { if( 0 == length) length = getLength(); @@ -2123,22 +2391,38 @@ OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, IOByteCount offset, IOByteCount length, - IODirection withDirection ) + IODirection direction ) { - if( !super::init()) - return( false ); - if( !parent) return( false); if( (offset + length) > parent->getLength()) return( false); + /* + * We can check the _parent instance variable before having ever set it + * to an initial value because I/O Kit guarantees that all our instance + * variables are zeroed on an object's allocation. + */ + + if( !_parent) { + if( !super::init()) + return( false ); + } else { + /* + * An existing memory descriptor is being retargeted to + * point to somewhere else. Clean up our present state. + */ + + _parent->release(); + _parent = 0; + } + parent->retain(); _parent = parent; _start = offset; _length = length; - _direction = withDirection; + _direction = direction; _tag = parent->getTag(); return( true ); @@ -2204,7 +2488,7 @@ void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, } IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, - void * bytes, IOByteCount withLength) + void * bytes, IOByteCount length) { IOByteCount byteCount; @@ -2215,14 +2499,14 @@ IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, LOCK; byteCount = _parent->readBytes( _start + offset, bytes, - min(withLength, _length - offset) ); + min(length, _length - offset) ); UNLOCK; return( byteCount ); } IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, - const void* bytes, IOByteCount withLength) + const void* bytes, IOByteCount length) { IOByteCount byteCount; @@ -2233,14 +2517,14 @@ IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, LOCK; byteCount = _parent->writeBytes( _start + offset, bytes, - min(withLength, _length - offset) ); + min(length, _length - offset) ); UNLOCK; return( byteCount ); } IOReturn IOSubMemoryDescriptor::prepare( - IODirection forDirection = kIODirectionNone) + IODirection forDirection) { IOReturn err; @@ -2252,7 +2536,7 @@ IOReturn IOSubMemoryDescriptor::prepare( } IOReturn IOSubMemoryDescriptor::complete( - IODirection forDirection = kIODirectionNone) + IODirection forDirection) { IOReturn err; @@ -2296,17 +2580,17 @@ IOMemoryMap * IOSubMemoryDescriptor::makeMapping( bool IOSubMemoryDescriptor::initWithAddress(void * address, - IOByteCount withLength, - IODirection withDirection) + IOByteCount length, + IODirection direction) { return( false ); } bool IOSubMemoryDescriptor::initWithAddress(vm_address_t address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) + IOByteCount length, + IODirection direction, + task_t task) { return( false ); } @@ -2314,8 +2598,8 @@ IOSubMemoryDescriptor::initWithAddress(vm_address_t address, bool IOSubMemoryDescriptor::initWithPhysicalAddress( IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ) + IOByteCount length, + IODirection direction ) { return( false ); } @@ -2324,9 +2608,9 @@ bool IOSubMemoryDescriptor::initWithRanges( IOVirtualRange * ranges, UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) + IODirection direction, + task_t task, + bool asReference) { return( false ); } @@ -2334,8 +2618,8 @@ IOSubMemoryDescriptor::initWithRanges( bool IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, - IODirection withDirection, - bool asReference = false) + IODirection direction, + bool asReference) { return( false ); } @@ -2346,7 +2630,6 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { OSSymbol const *keys[2]; OSObject *values[2]; - OSDictionary *dict; IOVirtualRange *vcopy; unsigned int index, nRanges; bool result; @@ -2477,8 +2760,8 @@ bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); @@ -2493,6 +2776,6 @@ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); -/* inline function implementation */ +/* ex-inline function implementation */ IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress() { return( getPhysicalSegment( 0, 0 )); } diff --git a/iokit/Kernel/IOMultiMemoryDescriptor.cpp b/iokit/Kernel/IOMultiMemoryDescriptor.cpp index 3e31f16b8..ff981beb8 100644 --- a/iokit/Kernel/IOMultiMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMultiMemoryDescriptor.cpp @@ -90,7 +90,7 @@ IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors( IOMemoryDescriptor ** descriptors, UInt32 withCount, IODirection withDirection, - bool asReference = false ) + bool asReference ) { // // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several @@ -121,7 +121,7 @@ bool IOMultiMemoryDescriptor::initWithDescriptors( IOMemoryDescriptor ** descriptors, UInt32 withCount, IODirection withDirection, - bool asReference = false ) + bool asReference ) { // // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several @@ -134,10 +134,19 @@ bool IOMultiMemoryDescriptor::initWithDescriptors( assert(descriptors); assert(withCount); - // Ask our superclass' opinion. - - if ( super::init() == false ) return false; + // Release existing descriptors, if any + if ( _descriptors ) + { + for ( unsigned index = 0; index < _descriptorsCount; index++ ) + _descriptors[index]->release(); + if ( _descriptorsIsAllocated ) + IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount); + } else { + // Ask our superclass' opinion. + if ( super::init() == false ) return false; + } + // Initialize our minimal state. _descriptors = 0; diff --git a/iokit/Kernel/IONVRAM.cpp b/iokit/Kernel/IONVRAM.cpp index fb6ceb5cd..3d859af68 100644 --- a/iokit/Kernel/IONVRAM.cpp +++ b/iokit/Kernel/IONVRAM.cpp @@ -261,7 +261,7 @@ OSObject *IODTNVRAM::getProperty(const OSSymbol *aKey) const if (_ofDict == 0) return 0; // Verify permissions. - result = IOUserClient::clientHasPrivilege(current_task(), "root"); + result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); if (result != kIOReturnSuccess) { variablePerm = getOFVariablePerm(aKey); if (variablePerm == kOFVariablePermRootOnly) return 0; @@ -294,7 +294,7 @@ bool IODTNVRAM::setProperty(const OSSymbol *aKey, OSObject *anObject) if (_ofDict == 0) return false; // Verify permissions. - result = IOUserClient::clientHasPrivilege(current_task(), "root"); + result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); if (result != kIOReturnSuccess) { propPerm = getOFVariablePerm(aKey); if (propPerm != kOFVariablePermUserWrite) return false; diff --git a/iokit/Kernel/IOPMPowerStateQueue.cpp b/iokit/Kernel/IOPMPowerStateQueue.cpp new file mode 100644 index 000000000..e137d290e --- /dev/null +++ b/iokit/Kernel/IOPMPowerStateQueue.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2001-2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "IOPMPowerStateQueue.h" +#undef super +#define super IOEventSource +OSDefineMetaClassAndStructors(IOPMPowerStateQueue, IOEventSource); + +IOPMPowerStateQueue *IOPMPowerStateQueue::PMPowerStateQueue(OSObject *inOwner) +{ + IOPMPowerStateQueue *me = new IOPMPowerStateQueue; + + if(me && !me->init(inOwner, 0) ) + { + me->release(); + return NULL; + } + + return me; +} + +bool IOPMPowerStateQueue::init(OSObject *owner, Action action) +{ + if(!(super::init(owner, (IOEventSource::Action) action))) return false; + + // Queue of powerstate changes + changes = NULL; + + return true; +} + + +bool IOPMPowerStateQueue::unIdleOccurred(IOService *inTarget, unsigned long inState) +{ + PowerChangeEntry *new_one = NULL; + + new_one = (PowerChangeEntry *)IOMalloc(sizeof(PowerChangeEntry)); + if(!new_one) return false; + + new_one->actionType = IOPMPowerStateQueue::kUnIdle; + new_one->state = inState; + new_one->target = inTarget; + + // Change to queue + OSEnqueueAtomic((void **)&changes, (void *)new_one, 0); + + signalWorkAvailable(); + + return true; +} + +// checkForWork() is called in a gated context +bool IOPMPowerStateQueue::checkForWork() +{ + PowerChangeEntry *theNode; + int theState; + IOService *theTarget; + UInt16 theAction; + + // Dequeue and process the state change request + if((theNode = (PowerChangeEntry *)OSDequeueAtomic((void **)&changes, 0))) + { + theState = theNode->state; + theTarget = theNode->target; + theAction = theNode->actionType; + IOFree((void *)theNode, sizeof(PowerChangeEntry)); + + switch (theAction) + { + case kUnIdle: + theTarget->command_received(theState, 0, 0, 0); + break; + } + } + + // Return true if there's more work to be done + if(changes) return true; + else return false; +} diff --git a/iokit/Kernel/IOPMPowerStateQueue.h b/iokit/Kernel/IOPMPowerStateQueue.h new file mode 100644 index 000000000..58fab241e --- /dev/null +++ b/iokit/Kernel/IOPMPowerStateQueue.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2001-2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOPMPOWERSTATEQUEUE_H_ +#define _IOPMPOWERSTATEQUEUE_H_ + +#include +#include +#include +extern "C" { + #include +} + +class IOPMPowerStateQueue : public IOEventSource + { + OSDeclareDefaultStructors(IOPMPowerStateQueue); + +private: + enum { + kUnIdle = 0 + }; + + // Queue of requested states + struct PowerChangeEntry + { + void *next; + UInt16 actionType; + UInt16 state; + IOService *target; + }; + + void *changes; + +protected: + virtual bool checkForWork(void); + +public: + //typedef void (*Action)(IOService *target, unsigned long state); + + virtual bool init(OSObject *owner, Action action = 0); + + // static initialiser + static IOPMPowerStateQueue *PMPowerStateQueue(OSObject *owner); + + // Enqueues an activityTickle request to be executed on the workloop + virtual bool unIdleOccurred(IOService *, unsigned long); + }; + + #endif /* _IOPMPOWERSTATEQUEUE_H_ */ diff --git a/iokit/Kernel/IOPMchangeNoteList.cpp b/iokit/Kernel/IOPMchangeNoteList.cpp index 5d8d94c8b..78ebc20bf 100644 --- a/iokit/Kernel/IOPMchangeNoteList.cpp +++ b/iokit/Kernel/IOPMchangeNoteList.cpp @@ -24,6 +24,7 @@ */ #include #include +#include #define super OSObject OSDefineMetaClassAndStructors(IOPMchangeNoteList,OSObject) @@ -107,6 +108,13 @@ long IOPMchangeNoteList::latestChange ( void ) IOReturn IOPMchangeNoteList::releaseHeadChangeNote ( void ) { + IOPowerConnection *tmp; + + if(tmp = changeNote[firstInList].parent) { + changeNote[firstInList].parent = 0; + tmp->release(); + } + changeNote[firstInList].flags = IOPMNotInUse; firstInList = increment(firstInList); return IOPMNoErr; @@ -124,6 +132,13 @@ IOReturn IOPMchangeNoteList::releaseHeadChangeNote ( void ) IOReturn IOPMchangeNoteList::releaseTailChangeNote ( void ) { + IOPowerConnection *tmp; + + if(tmp = changeNote[firstInList].parent) { + changeNote[firstInList].parent = 0; + tmp->release(); + } + firstUnused = decrement(firstUnused); changeNote[firstUnused].flags = IOPMNotInUse; return IOPMNoErr; diff --git a/iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp b/iokit/Kernel/IOPMrootDomain.cpp similarity index 82% rename from iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp rename to iokit/Kernel/IOPMrootDomain.cpp index c265c7373..98d3ede23 100644 --- a/iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp +++ b/iokit/Kernel/IOPMrootDomain.cpp @@ -33,6 +33,7 @@ #include #include "RootDomainUserClient.h" #include "IOKit/pwr_mgt/IOPowerConnection.h" +#include "IOPMPowerStateQueue.h" extern "C" void kprintf(const char *, ...); @@ -170,7 +171,7 @@ to be tickled)). IOPMrootDomain * IOPMrootDomain::construct( void ) { - IOPMrootDomain * root; + IOPMrootDomain *root; root = new IOPMrootDomain; if( root) @@ -183,8 +184,8 @@ IOPMrootDomain * IOPMrootDomain::construct( void ) static void disk_sync_callout(thread_call_param_t p0, thread_call_param_t p1) { - IOService * rootDomain = (IOService *) p0; - unsigned long pmRef = (unsigned long) p1; + IOService *rootDomain = (IOService *) p0; + unsigned long pmRef = (unsigned long) p1; sync_internal(); rootDomain->allowPowerChange(pmRef); @@ -200,7 +201,9 @@ static void disk_sync_callout(thread_call_param_t p0, thread_call_param_t p1) bool IOPMrootDomain::start ( IOService * nub ) { - OSDictionary *tmpDict; + OSDictionary *tmpDict; + + pmPowerStateQueue = 0; super::start(nub); @@ -223,12 +226,16 @@ bool IOPMrootDomain::start ( IOService * nub ) setProperty(kRootDomainSupportedFeatures, tmpDict); tmpDict->release(); - pm_vars->PMworkloop = IOWorkLoop::workLoop(); // make the workloop + pm_vars->PMworkloop = IOWorkLoop::workLoop(); + pmPowerStateQueue = IOPMPowerStateQueue::PMPowerStateQueue(this); + pm_vars->PMworkloop->addEventSource(pmPowerStateQueue); + extraSleepTimer = thread_call_allocate((thread_call_func_t)sleepTimerExpired, (thread_call_param_t) this); clamshellWakeupIgnore = thread_call_allocate((thread_call_func_t)wakeupClamshellTimerExpired, (thread_call_param_t) this); diskSyncCalloutEntry = thread_call_allocate(&disk_sync_callout, (thread_call_param_t) this); - patriarch = new IORootParent; // create our parent + // create our parent + patriarch = new IORootParent; patriarch->init(); patriarch->attach(this); patriarch->start(this); @@ -239,13 +246,19 @@ bool IOPMrootDomain::start ( IOService * nub ) registerPowerDriver(this,ourPowerStates,number_of_power_states); setPMRootDomain(this); - changePowerStateToPriv(ON_STATE); // set a clamp until we sleep + // set a clamp until we sleep + changePowerStateToPriv(ON_STATE); - registerPrioritySleepWakeInterest( &sysPowerDownHandler, this, 0); // install power change handler + // install power change handler + registerPrioritySleepWakeInterest( &sysPowerDownHandler, this, 0); // Register for a notification when IODisplayWrangler is published addNotification( gIOPublishNotification, serviceMatching("IODisplayWrangler"), &displayWranglerPublished, this, 0); + const OSSymbol *ucClassName = OSSymbol::withCStringNoCopy("RootDomainUserClient"); + setProperty(gIOUserClientClassKey, (OSMetaClassBase *) ucClassName); + ucClassName->release(); + registerService(); // let clients find us return true; @@ -259,16 +272,23 @@ bool IOPMrootDomain::start ( IOService * nub ) // ********************************************************************************** IOReturn IOPMrootDomain::setProperties ( OSObject *props_obj) { - OSDictionary *dict = OSDynamicCast(OSDictionary, props_obj); + OSDictionary *dict = OSDynamicCast(OSDictionary, props_obj); + OSBoolean *b; if(!dict) return kIOReturnBadArgument; - if(dict->getObject(OSString::withCString("System Boot Complete"))) { + if(systemBooting && dict->getObject(OSString::withCString("System Boot Complete"))) + { systemBooting = false; - kprintf("IOPM: received System Boot Complete property"); + //kprintf("IOPM: received System Boot Complete property\n"); adjustPowerState(); } + if(b = dict->getObject(OSString::withCString("DisablePowerButtonSleep"))) + { + setProperty(OSString::withCString("DisablePowerButtonSleep"), b); + } + return kIOReturnSuccess; } @@ -328,12 +348,15 @@ void IOPMrootDomain::broadcast_it (unsigned long type, unsigned long value) if ( type == kPMMinutesToSleep ) { if ( (sleepSlider == 0) && (value != 0) ) { sleepSlider = value; - adjustPowerState(); // idle sleep is now enabled, maybe sleep now + // idle sleep is now enabled, maybe sleep now + adjustPowerState(); } sleepSlider = value; if ( sleepSlider == 0 ) { - adjustPowerState(); // idle sleep is now disabled - patriarch->wakeSystem(); // make sure we're powered + // idle sleep is now disabled + adjustPowerState(); + // make sure we're powered + patriarch->wakeSystem(); } } if ( sleepSlider > longestNonSleepSlider ) { @@ -414,7 +437,7 @@ IOReturn IOPMrootDomain::setAggressiveness ( unsigned long type, unsigned long n // ********************************************************************************** IOReturn IOPMrootDomain::sleepSystem ( void ) { - kprintf("sleep demand received\n"); + //kprintf("sleep demand received\n"); if ( !systemBooting && allowSleep && sleepIsSupported ) { patriarch->sleepSystem(); return kIOReturnSuccess; @@ -433,8 +456,8 @@ IOReturn IOPMrootDomain::sleepSystem ( void ) // ********************************************************************************** IOReturn IOPMrootDomain::shutdownSystem ( void ) { - patriarch->shutDownSystem(); - return kIOReturnSuccess; + //patriarch->shutDownSystem(); + return kIOReturnUnsupported; } @@ -444,8 +467,8 @@ IOReturn IOPMrootDomain::shutdownSystem ( void ) // ********************************************************************************** IOReturn IOPMrootDomain::restartSystem ( void ) { - patriarch->restartSystem(); - return kIOReturnSuccess; + //patriarch->restartSystem(); + return kIOReturnUnsupported; } @@ -469,26 +492,44 @@ void IOPMrootDomain::powerChangeDone ( unsigned long previousState ) switch ( pm_vars->myCurrentState ) { case SLEEP_STATE: - if ( canSleep && sleepIsSupported ) { - idleSleepPending = false; // re-enable this timer for next sleep + if ( canSleep && sleepIsSupported ) + { + // re-enable this timer for next sleep + idleSleepPending = false; IOLog("System Sleep\n"); - pm_vars->thePlatform->sleepKernel(); // sleep now + pm_vars->thePlatform->sleepKernel(); - ioSPMTrace(IOPOWER_WAKE, * (int *) this); // now we're waking + // The CPU(s) are off at this point. When they're awakened by CPU interrupt, + // code will resume exeuction here. + + // Now we're waking... + ioSPMTrace(IOPOWER_WAKE, * (int *) this); - clock_interval_to_deadline(30, kSecondScale, &deadline); // stay awake for at least 30 seconds + // stay awake for at least 30 seconds + clock_interval_to_deadline(30, kSecondScale, &deadline); thread_call_enter_delayed(extraSleepTimer, deadline); - idleSleepPending = true; // this gets turned off when we sleep again + // this gets turned off when we sleep again + idleSleepPending = true; // Ignore closed clamshell during wakeup and for a few seconds // after wakeup is complete ignoringClamshellDuringWakeup = true; - gSleepOrShutdownPending = 0; // sleep transition complete - patriarch->wakeSystem(); // get us some power + // sleep transition complete + gSleepOrShutdownPending = 0; + + // trip the reset of the calendar clock + clock_wakeup_calendar(); + + // get us some power + patriarch->wakeSystem(); + // early stage wake notification + tellClients(kIOMessageSystemWillPowerOn); + + // tell the tree we're waking IOLog("System Wake\n"); - systemWake(); // tell the tree we're waking + systemWake(); // Allow drivers to request extra processing time before clamshell // sleep if kIOREMSleepEnabledKey is present. @@ -498,37 +539,42 @@ void IOPMrootDomain::powerChangeDone ( unsigned long previousState ) clock_interval_to_deadline(5, kSecondScale, &deadline); if(clamshellWakeupIgnore) thread_call_enter_delayed(clamshellWakeupIgnore, deadline); } else ignoringClamshellDuringWakeup = false; - + + // Find out what woke us propertyPtr = OSDynamicCast(OSNumber,getProperty("WakeEvent")); - if ( propertyPtr ) { // find out what woke us + if ( propertyPtr ) { theProperty = propertyPtr->unsigned16BitValue(); IOLog("Wake event %04x\n",theProperty); if ( (theProperty & 0x0008) || //lid (theProperty & 0x0800) || // front panel button (theProperty & 0x0020) || // external keyboard (theProperty & 0x0001) ) { // internal keyboard - reportUserInput(); + // We've identified the wakeup event as UI driven + reportUserInput(); } + } else { + // Since we can't identify the wakeup event, treat it as UI activity + reportUserInput(); } - else { - IOLog("Unknown wake event\n"); - reportUserInput(); // don't know, call it user input then - } - - changePowerStateToPriv(ON_STATE); // wake for thirty seconds + + // Wake for thirty seconds + changePowerStateToPriv(ON_STATE); powerOverrideOffPriv(); - } - else { - patriarch->sleepToDoze(); // allow us to step up a power state - changePowerStateToPriv(DOZE_STATE); // and do it + } else { + // allow us to step up a power state + patriarch->sleepToDoze(); + // and do it + changePowerStateToPriv(DOZE_STATE); } break; case DOZE_STATE: - if ( previousState != DOZE_STATE ) { + if ( previousState != DOZE_STATE ) + { IOLog("System Doze\n"); } - idleSleepPending = false; // re-enable this timer for next sleep + // re-enable this timer for next sleep + idleSleepPending = false; gSleepOrShutdownPending = 0; break; @@ -554,10 +600,17 @@ void IOPMrootDomain::powerChangeDone ( unsigned long previousState ) // ********************************************************************************** void IOPMrootDomain::wakeFromDoze( void ) { - if ( pm_vars->myCurrentState == DOZE_STATE ) { - canSleep = true; // reset this till next attempt + if ( pm_vars->myCurrentState == DOZE_STATE ) + { + // reset this till next attempt + canSleep = true; powerOverrideOffPriv(); - patriarch->wakeSystem(); // allow us to wake if children so desire + + // early wake notification + tellClients(kIOMessageSystemWillPowerOn); + + // allow us to wake if children so desire + patriarch->wakeSystem(); } } @@ -576,29 +629,15 @@ void IOPMrootDomain::publishFeature( const char * feature ) features->setObject(feature, kOSBooleanTrue); } - -// ********************************************************************************** -// newUserClient -// -// ********************************************************************************** -IOReturn IOPMrootDomain::newUserClient( task_t owningTask, void * /* security_id */, UInt32 type, IOUserClient ** handler ) +void IOPMrootDomain::unIdleDevice( IOService *theDevice, unsigned long theState ) { - IOReturn err = kIOReturnSuccess; - RootDomainUserClient * client; - - client = RootDomainUserClient::withTask(owningTask); + if(pmPowerStateQueue) + pmPowerStateQueue->unIdleOccurred(theDevice, theState); +} - if( !client || (false == client->attach( this )) || - (false == client->start( this )) ) { - if(client) { - client->detach( this ); - client->release(); - client = NULL; - } - err = kIOReturnNoMemory; - } - *handler = client; - return err; +void IOPMrootDomain::announcePowerSourceChange( void ) +{ + messageClients(kIOPMMessageBatteryStatusHasChanged); } //********************************************************************************* @@ -611,78 +650,100 @@ IOReturn IOPMrootDomain::newUserClient( task_t owningTask, void * /* security_ IOReturn IOPMrootDomain::receivePowerNotification (UInt32 msg) { - if (msg & kIOPMOverTemp) { + if (msg & kIOPMOverTemp) + { IOLog("Power Management received emergency overtemp signal. Going to sleep."); (void) sleepSystem (); } - if (msg & kIOPMSetDesktopMode) { + if (msg & kIOPMSetDesktopMode) + { desktopMode = (0 != (msg & kIOPMSetValue)); msg &= ~(kIOPMSetDesktopMode | kIOPMSetValue); } - if (msg & kIOPMSetACAdaptorConnected) { + if (msg & kIOPMSetACAdaptorConnected) + { acAdaptorConnect = (0 != (msg & kIOPMSetValue)); msg &= ~(kIOPMSetACAdaptorConnected | kIOPMSetValue); } - if (msg & kIOPMEnableClamshell) { + if (msg & kIOPMEnableClamshell) + { ignoringClamshell = false; } - if (msg & kIOPMDisableClamshell) { + if (msg & kIOPMDisableClamshell) + { ignoringClamshell = true; } - if (msg & kIOPMProcessorSpeedChange) { - IOService *pmu = waitForService(serviceMatching("ApplePMU")); - pmu->callPlatformFunction("prepareForSleep", false, 0, 0, 0, 0); + if (msg & kIOPMProcessorSpeedChange) + { + IOService *pmu = waitForService(serviceMatching("ApplePMU")); + pmu->callPlatformFunction("prepareForSleep", false, 0, 0, 0, 0); pm_vars->thePlatform->sleepKernel(); - pmu->callPlatformFunction("recoverFromSleep", false, 0, 0, 0, 0); + pmu->callPlatformFunction("recoverFromSleep", false, 0, 0, 0, 0); } - if (msg & kIOPMSleepNow) { + if (msg & kIOPMSleepNow) + { (void) sleepSystem (); } - if (msg & kIOPMPowerEmergency) { + if (msg & kIOPMPowerEmergency) + { (void) sleepSystem (); } - if (msg & kIOPMClamshellClosed) { + if (msg & kIOPMClamshellClosed) + { if ( !ignoringClamshell && !ignoringClamshellDuringWakeup - && (!desktopMode || !acAdaptorConnect) ) { + && (!desktopMode || !acAdaptorConnect) ) + { (void) sleepSystem (); } } - if (msg & kIOPMPowerButton) { // toggle state of sleep/wake - if ( pm_vars->myCurrentState == DOZE_STATE ) { // are we dozing? - systemWake(); // yes, tell the tree we're waking - reportUserInput(); // wake the Display Wrangler + if (msg & kIOPMPowerButton) + { + // toggle state of sleep/wake + // are we dozing? + if ( pm_vars->myCurrentState == DOZE_STATE ) + { + // yes, tell the tree we're waking + systemWake(); + // wake the Display Wrangler + reportUserInput(); } else { - (void) sleepSystem (); + // Check that power button sleep is enabled + if(kOSBooleanTrue != getProperty(OSString::withCString("DisablePowerButtonSleep"))) + sleepSystem(); } } // if the case has been closed, we allow // the machine to be put to sleep or to idle sleep - if ( (msg & kIOPMAllowSleep) && !allowSleep ) { - allowSleep = true; + if ( (msg & kIOPMAllowSleep) && !allowSleep ) + { + allowSleep = true; adjustPowerState(); } // if the case has been opened, we disallow sleep/doze if (msg & kIOPMPreventSleep) { - allowSleep = false; - if ( pm_vars->myCurrentState == DOZE_STATE ) { // are we dozing? - systemWake(); // yes, tell the tree we're waking + allowSleep = false; + // are we dozing? + if ( pm_vars->myCurrentState == DOZE_STATE ) { + // yes, tell the tree we're waking + systemWake(); adjustPowerState(); - reportUserInput(); // wake the Display Wrangler - } - else { + // wake the Display Wrangler + reportUserInput(); + } else { adjustPowerState(); - patriarch->wakeSystem(); // make sure we have power to clamp + // make sure we have power to clamp + patriarch->wakeSystem(); } } @@ -697,10 +758,10 @@ IOReturn IOPMrootDomain::receivePowerNotification (UInt32 msg) void IOPMrootDomain::setSleepSupported( IOOptionBits flags ) { - if ( flags & kPCICantSleep ) { + if ( flags & kPCICantSleep ) + { canSleep = false; - } - else { + } else { platformSleepSupport = flags; } @@ -720,34 +781,40 @@ void IOPMrootDomain::setSleepSupported( IOOptionBits flags ) IOReturn IOPMrootDomain::requestPowerDomainState ( IOPMPowerFlags desiredState, IOPowerConnection * whichChild, unsigned long specification ) { - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - unsigned long powerRequestFlag = 0; - IOPMPowerFlags editedDesire = desiredState; - - if ( !(desiredState & kIOPMPreventIdleSleep) ) { // if they don't really need it, they don't get it + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; + unsigned long powerRequestFlag = 0; + IOPMPowerFlags editedDesire = desiredState; + + // if they don't really need it, they don't get it + if ( !(desiredState & kIOPMPreventIdleSleep) ) { editedDesire = 0; } - IOLockLock(pm_vars->childLock); // recompute sleepIsSupported - // and see if all children are asleep + IOLockLock(pm_vars->childLock); + + // recompute sleepIsSupported and see if all children are asleep iter = getChildIterator(gIOPowerPlane); sleepIsSupported = true; - - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { - if ( connection == whichChild ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { + if ( connection == whichChild ) + { powerRequestFlag += editedDesire; - if ( desiredState & kIOPMPreventSystemSleep ) { + if ( desiredState & kIOPMPreventSystemSleep ) + { sleepIsSupported = false; } - } - else { + } else { powerRequestFlag += connection->getDesiredDomainState(); - if ( connection->getPreventSystemSleepFlag() ) { + if ( connection->getPreventSystemSleepFlag() ) + { sleepIsSupported = false; } } @@ -756,11 +823,13 @@ IOReturn IOPMrootDomain::requestPowerDomainState ( IOPMPowerFlags desiredState, iter->release(); } - if ( (extraSleepDelay == 0) && (powerRequestFlag == 0) ) { + if ( (extraSleepDelay == 0) && (powerRequestFlag == 0) ) + { sleepASAP = true; } - adjustPowerState(); // this may put the system to sleep + // this may put the system to sleep + adjustPowerState(); IOLockUnlock(pm_vars->childLock); @@ -799,7 +868,8 @@ bool IOPMrootDomain::tellChangeDown ( unsigned long stateNum ) case OFF_STATE: return super::tellClientsWithResponse(kIOMessageSystemWillPowerOff); } - return super::tellChangeDown(stateNum); // this shouldn't execute + // this shouldn't execute + return super::tellChangeDown(stateNum); } @@ -847,7 +917,8 @@ void IOPMrootDomain::tellNoChangeDown ( unsigned long ) void IOPMrootDomain::tellChangeUp ( unsigned long stateNum) { - if ( stateNum == ON_STATE ) { + if ( stateNum == ON_STATE ) + { return tellClients(kIOMessageSystemHasPoweredOn); } } @@ -861,9 +932,11 @@ void IOPMrootDomain::reportUserInput ( void ) { OSIterator * iter; - if(!wrangler) { + if(!wrangler) + { iter = getMatchingServices(serviceMatching("IODisplayWrangler")); - if(iter) { + if(iter) + { wrangler = (IOService *) iter->getNextObject(); iter->release(); } @@ -880,7 +953,6 @@ void IOPMrootDomain::reportUserInput ( void ) void IOPMrootDomain::setQuickSpinDownTimeout ( void ) { - //IOLog("setQuickSpinDownTimeout\n"); super::setAggressiveness((unsigned long)kPMMinutesToSpinDown,(unsigned long)1); } @@ -929,9 +1001,9 @@ IOReturn IOPMrootDomain::sysPowerDownHandler( void * target, void * refCon, UInt32 messageType, IOService * service, void * messageArgument, vm_size_t argSize ) { - IOReturn ret; - IOPowerStateChangeNotification * params = (IOPowerStateChangeNotification *) messageArgument; - IOPMrootDomain * rootDomain = OSDynamicCast(IOPMrootDomain, service); + IOReturn ret; + IOPowerStateChangeNotification *params = (IOPowerStateChangeNotification *) messageArgument; + IOPMrootDomain *rootDomain = OSDynamicCast(IOPMrootDomain, service); if(!rootDomain) return kIOReturnUnsupported; @@ -1017,15 +1089,13 @@ IOReturn IOPMrootDomain::displayWranglerNotification( void * target, void * refC deviceAlreadyPoweredOff = true; - if( rootDomain->extraSleepDelay ) { - + if( rootDomain->extraSleepDelay ) + { // start the extra sleep timer clock_interval_to_deadline(rootDomain->extraSleepDelay*60, kSecondScale, &deadline ); thread_call_enter_delayed(rootDomain->extraSleepTimer, deadline); rootDomain->idleSleepPending = true; - } else { - // accelerate disk spin down if spin down timer is non-zero (zero = never spin down) // and if system sleep is non-Never if( (0 != rootDomain->user_spindown) && (0 != rootDomain->sleepSlider) ) @@ -1042,7 +1112,8 @@ IOReturn IOPMrootDomain::displayWranglerNotification( void * target, void * refC // cancel any pending idle sleep - if(rootDomain->idleSleepPending) { + if(rootDomain->idleSleepPending) + { thread_call_cancel(rootDomain->extraSleepTimer); rootDomain->idleSleepPending = false; } @@ -1077,15 +1148,15 @@ bool IOPMrootDomain::displayWranglerPublished( void * target, void * refCon, if(!rootDomain) return false; - rootDomain->wrangler = newService; - - // we found the display wrangler, now install a handler - if( !rootDomain->wrangler->registerInterest( gIOGeneralInterest, &displayWranglerNotification, target, 0) ) { - IOLog("IOPMrootDomain::displayWranglerPublished registerInterest failed\n"); - return false; - } - - return true; + rootDomain->wrangler = newService; + + // we found the display wrangler, now install a handler + if( !rootDomain->wrangler->registerInterest( gIOGeneralInterest, &displayWranglerNotification, target, 0) ) { + IOLog("IOPMrootDomain::displayWranglerPublished registerInterest failed\n"); + return false; + } + + return true; } @@ -1117,14 +1188,14 @@ void IOPMrootDomain::adjustPowerState( void ) ! allowSleep || systemBooting ) { changePowerStateToPriv(ON_STATE); - } - else { - if ( sleepASAP ) { + } else { + if ( sleepASAP ) + { sleepASAP = false; - if ( sleepIsSupported ) { + if ( sleepIsSupported ) + { changePowerStateToPriv(SLEEP_STATE); - } - else { + } else { changePowerStateToPriv(DOZE_STATE); } } diff --git a/iokit/Kernel/IOPlatformExpert.cpp b/iokit/Kernel/IOPlatformExpert.cpp index 269ea125e..f9ec811d3 100644 --- a/iokit/Kernel/IOPlatformExpert.cpp +++ b/iokit/Kernel/IOPlatformExpert.cpp @@ -26,16 +26,20 @@ * HISTORY */ -#include -#include #include #include -#include -#include #include +#include +#include +#include +#include +#include #include #include -#include +#include + +#include + #include @@ -55,7 +59,7 @@ OSDefineMetaClassAndStructors(IOPlatformExpert, IOService) OSMetaClassDefineReservedUsed(IOPlatformExpert, 0); -OSMetaClassDefineReservedUnused(IOPlatformExpert, 1); +OSMetaClassDefineReservedUsed(IOPlatformExpert, 1); OSMetaClassDefineReservedUnused(IOPlatformExpert, 2); OSMetaClassDefineReservedUnused(IOPlatformExpert, 3); OSMetaClassDefineReservedUnused(IOPlatformExpert, 4); @@ -91,6 +95,23 @@ bool IOPlatformExpert::start( IOService * provider ) if (!super::start(provider)) return false; + + // Register the presence or lack thereof a system + // PCI address mapper with the IOMapper class + +#if 1 + IORegistryEntry * regEntry = IORegistryEntry::fromPath("/u3/dart", gIODTPlane); + if (!regEntry) + regEntry = IORegistryEntry::fromPath("/dart", gIODTPlane); + if (regEntry) { + int debugFlags; + if (!PE_parse_boot_arg("dart", &debugFlags) || debugFlags) + setProperty(kIOPlatformMapperPresentKey, kOSBooleanTrue); + regEntry->release(); + } +#endif + + IOMapper::setMapperRequired(0 != getProperty(kIOPlatformMapperPresentKey)); gIOInterruptControllers = OSDictionary::withCapacity(1); gIOInterruptControllersLock = IOLockAlloc(); @@ -112,6 +133,16 @@ bool IOPlatformExpert::start( IOService * provider ) PMInstantiatePowerDomains(); + // Parse the serial-number data and publish a user-readable string + OSData* mydata = (OSData*) (provider->getProperty("serial-number")); + if (mydata != NULL) { + OSString *serNoString = createSystemSerialNumberString(mydata); + if (serNoString != NULL) { + provider->setProperty(kIOPlatformSerialNumberKey, serNoString); + serNoString->release(); + } + } + return( configure(provider) ); } @@ -155,7 +186,7 @@ IOService * IOPlatformExpert::createNub( OSDictionary * from ) } bool IOPlatformExpert::compareNubName( const IOService * nub, - OSString * name, OSString ** matched = 0 ) const + OSString * name, OSString ** matched ) const { return( nub->IORegistryEntry::compareName( name, matched )); } @@ -205,6 +236,11 @@ bool IOPlatformExpert::getModelName( char * /*name*/, int /*maxLength*/) return( false ); } +OSString* IOPlatformExpert::createSystemSerialNumberString(OSData* myProperty) +{ + return NULL; +} + IORangeAllocator * IOPlatformExpert::getPhysicalRangeAllocator(void) { return(OSDynamicCast(IORangeAllocator, @@ -1049,6 +1085,39 @@ IOByteCount IODTPlatformExpert::savePanicInfo(UInt8 *buffer, IOByteCount length) return lengthSaved; } +OSString* IODTPlatformExpert::createSystemSerialNumberString(OSData* myProperty) { + UInt8* serialNumber; + unsigned int serialNumberSize; + short pos = 0; + char* temp; + char SerialNo[30]; + + if (myProperty != NULL) { + serialNumberSize = myProperty->getLength(); + serialNumber = (UInt8*)(myProperty->getBytesNoCopy()); + temp = serialNumber; + if (serialNumberSize > 0) { + // check to see if this is a CTO serial number... + while (pos < serialNumberSize && temp[pos] != '-') pos++; + + if (pos < serialNumberSize) { // there was a hyphen, so it's a CTO serial number + memcpy(SerialNo, serialNumber + 12, 8); + memcpy(&SerialNo[8], serialNumber, 3); + SerialNo[11] = '-'; + memcpy(&SerialNo[12], serialNumber + 3, 8); + SerialNo[20] = 0; + } else { // just a normal serial number + memcpy(SerialNo, serialNumber + 13, 8); + memcpy(&SerialNo[8], serialNumber, 3); + SerialNo[11] = 0; + } + return OSString::withCString(SerialNo); + } + } + return NULL; +} + + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #undef super @@ -1064,7 +1133,7 @@ OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 3); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ bool IOPlatformExpertDevice::compareName( OSString * name, - OSString ** matched = 0 ) const + OSString ** matched ) const { return( IODTCompareNubName( this, name, matched )); } @@ -1126,7 +1195,7 @@ OSMetaClassDefineReservedUnused(IOPlatformDevice, 3); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ bool IOPlatformDevice::compareName( OSString * name, - OSString ** matched = 0 ) const + OSString ** matched ) const { return( ((IOPlatformExpert *)getProvider())-> compareNubName( this, name, matched )); diff --git a/iokit/Kernel/IORangeAllocator.cpp b/iokit/Kernel/IORangeAllocator.cpp index 0dbea067d..8fc649fe2 100644 --- a/iokit/Kernel/IORangeAllocator.cpp +++ b/iokit/Kernel/IORangeAllocator.cpp @@ -90,9 +90,9 @@ bool IORangeAllocator::init( IORangeScalar endOfRange, IORangeAllocator * IORangeAllocator:: withRange( IORangeScalar endOfRange, - IORangeScalar defaultAlignment = 0, - UInt32 capacity = 0, - IOOptionBits options = 0 ) + IORangeScalar defaultAlignment, + UInt32 capacity, + IOOptionBits options ) { IORangeAllocator * thingy; @@ -180,7 +180,7 @@ void IORangeAllocator::deallocElement( UInt32 index ) bool IORangeAllocator::allocate( IORangeScalar size, IORangeScalar * result, - IORangeScalar alignment = 0 ) + IORangeScalar alignment ) { IORangeScalar data, dataEnd; IORangeScalar thisStart, thisEnd; diff --git a/iokit/Kernel/IORegistryEntry.cpp b/iokit/Kernel/IORegistryEntry.cpp index f95427287..eb6472a0c 100644 --- a/iokit/Kernel/IORegistryEntry.cpp +++ b/iokit/Kernel/IORegistryEntry.cpp @@ -396,7 +396,7 @@ bool IORegistryPlane::serialize(OSSerialize *s) const enum { kIORegCapacityIncrement = 4 }; -bool IORegistryEntry::init( OSDictionary * dict = 0 ) +bool IORegistryEntry::init( OSDictionary * dict ) { OSString * prop; @@ -523,19 +523,6 @@ void IORegistryEntry::setPropertyTable( OSDictionary * dict ) /* Wrappers to synchronize property table */ -#define wrap1(func, type, constant) \ -OSObject * \ -IORegistryEntry::func ## Property( type * aKey) constant \ -{ \ - OSObject * obj; \ - \ - PLOCK; \ - obj = getPropertyTable()->func ## Object( aKey ); \ - PUNLOCK; \ - \ - return( obj ); \ -} - #define wrap2(type, constant) \ OSObject * \ IORegistryEntry::copyProperty( type * aKey) constant \ @@ -551,15 +538,6 @@ IORegistryEntry::copyProperty( type * aKey) constant \ return( obj ); \ } -#define wrap3(func,type,constant) \ -void \ -IORegistryEntry::func ## Property( type * aKey) constant \ -{ \ - PLOCK; \ - getPropertyTable()->func ## Object( aKey ); \ - PUNLOCK; \ -} - #define wrap4(type,constant) \ OSObject * \ IORegistryEntry::getProperty( type * aKey, \ @@ -638,18 +616,10 @@ IOReturn IORegistryEntry::setProperties( OSObject * properties ) return( kIOReturnUnsupported ); } -wrap1(get, const OSSymbol, const) // getProperty() definition -wrap1(get, const OSString, const) // getProperty() definition -wrap1(get, const char, const) // getProperty() definition - wrap2(const OSSymbol, const) // copyProperty() definition wrap2(const OSString, const) // copyProperty() definition wrap2(const char, const) // copyProperty() definition -wrap3(remove, const OSSymbol,) // removeProperty() definition -wrap3(remove, const OSString,) // removeProperty() definition -wrap3(remove, const char,) // removeProperty() definition - wrap4(const OSSymbol, const) // getProperty() w/plane definition wrap4(const OSString, const) // getProperty() w/plane definition wrap4(const char, const) // getProperty() w/plane definition @@ -659,6 +629,62 @@ wrap5(const OSString, const) // copyProperty() w/plane definition wrap5(const char, const) // copyProperty() w/plane definition +OSObject * +IORegistryEntry::getProperty( const OSSymbol * aKey) const +{ + OSObject * obj; + + PLOCK; + obj = getPropertyTable()->getObject( aKey ); + PUNLOCK; + + return( obj ); +} + +OSObject * +IORegistryEntry::getProperty( const OSString * aKey) const +{ + const OSSymbol * tmpKey = OSSymbol::withString( aKey ); + OSObject * obj = getProperty( tmpKey ); + + tmpKey->release(); + return( obj ); +} + +OSObject * +IORegistryEntry::getProperty( const char * aKey) const +{ + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + OSObject * obj = getProperty( tmpKey ); + + tmpKey->release(); + return( obj ); +} + +void +IORegistryEntry::removeProperty( const OSSymbol * aKey) +{ + PLOCK; + getPropertyTable()->removeObject( aKey ); + PUNLOCK; +} + +void +IORegistryEntry::removeProperty( const OSString * aKey) +{ + const OSSymbol * tmpKey = OSSymbol::withString( aKey ); + removeProperty( tmpKey ); + tmpKey->release(); +} + +void +IORegistryEntry::removeProperty( const char * aKey) +{ + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + removeProperty( tmpKey ); + tmpKey->release(); +} + bool IORegistryEntry::setProperty( const OSSymbol * aKey, OSObject * anObject) { @@ -673,22 +699,20 @@ IORegistryEntry::setProperty( const OSSymbol * aKey, OSObject * anObject) bool IORegistryEntry::setProperty( const OSString * aKey, OSObject * anObject) { - bool ret = false; - PLOCK; - ret = getPropertyTable()->setObject( aKey, anObject ); - PUNLOCK; + const OSSymbol * tmpKey = OSSymbol::withString( aKey ); + bool ret = setProperty( tmpKey, anObject ); + tmpKey->release(); return ret; } bool IORegistryEntry::setProperty( const char * aKey, OSObject * anObject) { - bool ret = false; - PLOCK; - ret = getPropertyTable()->setObject( aKey, anObject ); - PUNLOCK; - + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + bool ret = setProperty( tmpKey, anObject ); + + tmpKey->release(); return ret; } @@ -699,9 +723,10 @@ IORegistryEntry::setProperty(const char * aKey, const char * aString) OSSymbol * aSymbol = (OSSymbol *) OSSymbol::withCString( aString ); if( aSymbol) { - PLOCK; - ret = getPropertyTable()->setObject( aKey, aSymbol ); - PUNLOCK; + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + ret = setProperty( tmpKey, aSymbol ); + + tmpKey->release(); aSymbol->release(); } return( ret ); @@ -714,9 +739,10 @@ IORegistryEntry::setProperty(const char * aKey, bool aBoolean) OSBoolean * aBooleanObj = OSBoolean::withBoolean( aBoolean ); if( aBooleanObj) { - PLOCK; - ret = getPropertyTable()->setObject( aKey, aBooleanObj ); - PUNLOCK; + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + ret = setProperty( tmpKey, aBooleanObj ); + + tmpKey->release(); aBooleanObj->release(); } return( ret ); @@ -731,9 +757,10 @@ IORegistryEntry::setProperty( const char * aKey, OSNumber * anOffset = OSNumber::withNumber( aValue, aNumberOfBits ); if( anOffset) { - PLOCK; - ret = getPropertyTable()->setObject( aKey, anOffset ); - PUNLOCK; + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + ret = setProperty( tmpKey, anOffset ); + + tmpKey->release(); anOffset->release(); } return( ret ); @@ -748,9 +775,10 @@ IORegistryEntry::setProperty( const char * aKey, OSData * data = OSData::withBytes( bytes, length ); if( data) { - PLOCK; - ret = getPropertyTable()->setObject( aKey, data ); - PUNLOCK; + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + ret = setProperty( tmpKey, data ); + + tmpKey->release(); data->release(); } return( ret ); @@ -760,7 +788,7 @@ IORegistryEntry::setProperty( const char * aKey, /* Name, location, paths */ -const char * IORegistryEntry::getName( const IORegistryPlane * plane = 0 ) const +const char * IORegistryEntry::getName( const IORegistryPlane * plane ) const { OSSymbol * sym = 0; @@ -778,7 +806,7 @@ const char * IORegistryEntry::getName( const IORegistryPlane * plane = 0 ) const } const OSSymbol * IORegistryEntry::copyName( - const IORegistryPlane * plane = 0 ) const + const IORegistryPlane * plane ) const { OSSymbol * sym = 0; @@ -798,7 +826,7 @@ const OSSymbol * IORegistryEntry::copyName( } const OSSymbol * IORegistryEntry::copyLocation( - const IORegistryPlane * plane = 0 ) const + const IORegistryPlane * plane ) const { OSSymbol * sym = 0; @@ -814,7 +842,7 @@ const OSSymbol * IORegistryEntry::copyLocation( return( sym ); } -const char * IORegistryEntry::getLocation( const IORegistryPlane * plane = 0 ) const +const char * IORegistryEntry::getLocation( const IORegistryPlane * plane ) const { const OSSymbol * sym = copyLocation( plane ); const char * result = 0; @@ -828,7 +856,7 @@ const char * IORegistryEntry::getLocation( const IORegistryPlane * plane = 0 ) c } void IORegistryEntry::setName( const OSSymbol * name, - const IORegistryPlane * plane = 0 ) + const IORegistryPlane * plane ) { const OSSymbol * key; @@ -845,7 +873,7 @@ void IORegistryEntry::setName( const OSSymbol * name, } void IORegistryEntry::setName( const char * name, - const IORegistryPlane * plane = 0 ) + const IORegistryPlane * plane ) { OSSymbol * sym = (OSSymbol *)OSSymbol::withCString( name ); if ( sym ) { @@ -855,7 +883,7 @@ void IORegistryEntry::setName( const char * name, } void IORegistryEntry::setLocation( const OSSymbol * location, - const IORegistryPlane * plane = 0 ) + const IORegistryPlane * plane ) { const OSSymbol * key; @@ -872,7 +900,7 @@ void IORegistryEntry::setLocation( const OSSymbol * location, } void IORegistryEntry::setLocation( const char * location, - const IORegistryPlane * plane = 0 ) + const IORegistryPlane * plane ) { OSSymbol * sym = (OSSymbol *)OSSymbol::withCString( location ); if ( sym ) { @@ -882,7 +910,7 @@ void IORegistryEntry::setLocation( const char * location, } bool -IORegistryEntry::compareName( OSString * name, OSString ** matched = 0 ) const +IORegistryEntry::compareName( OSString * name, OSString ** matched ) const { const OSSymbol * sym = copyName(); bool isEqual; @@ -901,7 +929,7 @@ IORegistryEntry::compareName( OSString * name, OSString ** matched = 0 ) const } bool -IORegistryEntry::compareNames( OSObject * names, OSString ** matched = 0 ) const +IORegistryEntry::compareNames( OSObject * names, OSString ** matched ) const { OSString * string; OSCollection * collection; @@ -1068,39 +1096,38 @@ const char * IORegistryEntry::matchPathLocation( const char * cmp, const char * str; const char * result = 0; u_quad_t num1, num2; - char c1, c2; + char lastPathChar, lastLocationChar; str = getLocation( plane ); if( str) { - c2 = str[0]; + lastPathChar = cmp[0]; + lastLocationChar = str[0]; do { - num1 = strtouq( cmp, (char **) &cmp, 16 ); - if( c2) { + if( lastPathChar) { + num1 = strtouq( cmp, (char **) &cmp, 16 ); + lastPathChar = *cmp++; + } else + num1 = 0; + + if( lastLocationChar) { num2 = strtouq( str, (char **) &str, 16 ); - c2 = str[0]; + lastLocationChar = *str++; } else num2 = 0; if( num1 != num2) break; - c1 = *cmp++; - - if( (c2 == ':') && (c2 == c1)) { - str++; - continue; - } - - if( ',' != c1) { + if (!lastPathChar && !lastLocationChar) { result = cmp - 1; break; } - if( c2) { - if( c2 != ',') - break; - str++; - } + if( (',' != lastPathChar) && (':' != lastPathChar)) + lastPathChar = 0; + + if (lastPathChar && lastLocationChar && (lastPathChar != lastLocationChar)) + break; } while( true); } @@ -1156,7 +1183,7 @@ IORegistryEntry * IORegistryEntry::getChildFromComponent( const char ** opath, } const OSSymbol * IORegistryEntry::hasAlias( const IORegistryPlane * plane, - char * opath = 0, int * length = 0 ) const + char * opath, int * length ) const { IORegistryEntry * entry; IORegistryEntry * entry2; @@ -1238,10 +1265,10 @@ const char * IORegistryEntry::dealiasPath( IORegistryEntry * IORegistryEntry::fromPath( const char * path, - const IORegistryPlane * plane = 0, - char * opath = 0, - int * length = 0, - IORegistryEntry * fromEntry = 0 ) + const IORegistryPlane * plane, + char * opath, + int * length, + IORegistryEntry * fromEntry ) { IORegistryEntry * where = 0; IORegistryEntry * aliasEntry = 0; @@ -1334,9 +1361,9 @@ IORegistryEntry * IORegistryEntry::fromPath( IORegistryEntry * IORegistryEntry::childFromPath( const char * path, - const IORegistryPlane * plane = 0, - char * opath = 0, - int * len = 0 ) + const IORegistryPlane * plane, + char * opath, + int * len ) { return( IORegistryEntry::fromPath( path, plane, opath, len, this )); } @@ -1350,7 +1377,7 @@ IORegistryEntry * IORegistryEntry::childFromPath( inline bool IORegistryEntry::arrayMember( OSArray * set, const IORegistryEntry * member, - unsigned int * index = 0 ) const + unsigned int * index ) const { int i; OSObject * probeObject; @@ -1381,7 +1408,7 @@ bool IORegistryEntry::makeLink( IORegistryEntry * to, } else { - links = OSArray::withObjects( & (const OSObject *) to, 1, 1 ); + links = OSArray::withObjects( (const OSObject **) &to, 1, 1 ); result = (links != 0); if( result) { result = registryTable()->setObject( plane->keys[ relation ], @@ -1587,7 +1614,7 @@ void IORegistryEntry::applyToParents( IORegistryEntryApplierFunction applier, bool IORegistryEntry::isChild( IORegistryEntry * child, const IORegistryPlane * plane, - bool onlyChild = false ) const + bool onlyChild ) const { OSArray * links; bool ret = false; @@ -1608,7 +1635,7 @@ bool IORegistryEntry::isChild( IORegistryEntry * child, bool IORegistryEntry::isParent( IORegistryEntry * parent, const IORegistryPlane * plane, - bool onlyParent = false ) const + bool onlyParent ) const { OSArray * links; @@ -1839,7 +1866,7 @@ enum { kIORegistryIteratorInvalidFlag = 0x80000000 }; IORegistryIterator * IORegistryIterator::iterateOver( IORegistryEntry * root, const IORegistryPlane * plane, - IOOptionBits options = 0 ) + IOOptionBits options ) { IORegistryIterator * create; @@ -1869,7 +1896,7 @@ IORegistryIterator::iterateOver( IORegistryEntry * root, IORegistryIterator * IORegistryIterator::iterateOver( const IORegistryPlane * plane, - IOOptionBits options = 0 ) + IOOptionBits options ) { return( iterateOver( gRegistryRoot, plane, options )); } diff --git a/iokit/Kernel/IOService.cpp b/iokit/Kernel/IOService.cpp index e2fdab2c2..5dafe2216 100644 --- a/iokit/Kernel/IOService.cpp +++ b/iokit/Kernel/IOService.cpp @@ -22,17 +22,6 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1991-1999 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - * 29-Jan-91 Portions from IODevice.m, Doug Mitchell at NeXT, Created. - * 18-Jun-98 start IOKit objc - * 10-Nov-98 start iokit cpp - * 25-Feb-99 sdouglas, add threads and locks to ensure deadlock - * - */ #include @@ -46,7 +35,7 @@ #include #include #include -#include +#include #include #include #include @@ -109,6 +98,10 @@ const OSSymbol * gIOKitDebugKey; const OSSymbol * gIOCommandPoolSizeKey; +const OSSymbol * gIOConsoleUsersKey; +const OSSymbol * gIOConsoleSessionUIDKey; +const OSSymbol * gIOConsoleUsersSeedKey; + static int gIOResourceGenerationCount; const OSSymbol * gIOServiceKey; @@ -144,6 +137,9 @@ static OSArray * gIOStopList; static OSArray * gIOStopProviderList; static OSArray * gIOFinalizeList; +static SInt32 gIOConsoleUsersSeed; +static OSData * gIOConsoleUsersSeedValue; + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define LOCKREADNOTIFY() \ @@ -238,6 +234,11 @@ void IOService::initialize( void ) kIOTerminatedNotification ); gIOServiceKey = OSSymbol::withCStringNoCopy( kIOServiceClass); + gIOConsoleUsersKey = OSSymbol::withCStringNoCopy( kIOConsoleUsersKey); + gIOConsoleSessionUIDKey = OSSymbol::withCStringNoCopy( kIOConsoleSessionUIDKey); + gIOConsoleUsersSeedKey = OSSymbol::withCStringNoCopy( kIOConsoleUsersSeedKey); + gIOConsoleUsersSeedValue = OSData::withBytesNoCopy(&gIOConsoleUsersSeed, sizeof(gIOConsoleUsersSeed)); + gNotificationLock = IORecursiveLockAlloc(); assert( gIOServicePlane && gIODeviceMemoryKey @@ -246,7 +247,9 @@ void IOService::initialize( void ) && gIOProviderClassKey && gIONameMatchKey && gIONameMatchedKey && gIOMatchCategoryKey && gIODefaultMatchCategoryKey && gIOPublishNotification && gIOMatchedNotification - && gIOTerminatedNotification && gIOServiceKey ); + && gIOTerminatedNotification && gIOServiceKey + && gIOConsoleUsersKey && gIOConsoleSessionUIDKey + && gIOConsoleUsersSeedKey && gIOConsoleUsersSeedValue); gJobsLock = IOLockAlloc(); gJobs = OSOrderedSet::withCapacity( 10 ); @@ -401,7 +404,7 @@ void IOService::detach( IOService * provider ) * Register instance - publish it for matching */ -void IOService::registerService( IOOptionBits options = 0 ) +void IOService::registerService( IOOptionBits options ) { char * pathBuf; const char * path; @@ -454,7 +457,7 @@ void IOService::registerService( IOOptionBits options = 0 ) startMatching( options ); } -void IOService::startMatching( IOOptionBits options = 0 ) +void IOService::startMatching( IOOptionBits options ) { IOService * provider; UInt32 prevBusy = 0; @@ -570,7 +573,7 @@ IOReturn IOService::catalogNewDrivers( OSOrderedSet * newTables ) } _IOServiceJob * _IOServiceJob::startJob( IOService * nub, int type, - IOOptionBits options = 0 ) + IOOptionBits options ) { _IOServiceJob * job; @@ -815,7 +818,7 @@ void IOService::setPMRootDomain( class IOPMrootDomain * rootDomain) * Stacking change */ -bool IOService::lockForArbitration( bool isSuccessRequired = true ) +bool IOService::lockForArbitration( bool isSuccessRequired ) { bool found; bool success; @@ -1202,7 +1205,7 @@ void IOService::applyToClients( IOServiceApplierFunction applier, // send a message to a client or interested party of this service IOReturn IOService::messageClient( UInt32 type, OSObject * client, - void * argument = 0, vm_size_t argSize = 0 ) + void * argument, vm_size_t argSize ) { IOReturn ret; IOService * service; @@ -1268,7 +1271,7 @@ void IOService::applyToInterested( const OSSymbol * typeOfInterest, UNLOCKNOTIFY(); if( copyArray) { for( index = 0; - (next = array->getObject( index )); + (next = copyArray->getObject( index )); index++) { (*applier)(next, context); } @@ -1299,7 +1302,7 @@ static void messageClientsApplier( OSObject * object, void * ctx ) // send a message to all clients IOReturn IOService::messageClients( UInt32 type, - void * argument = 0, vm_size_t argSize = 0 ) + void * argument, vm_size_t argSize ) { MessageClientsContext context; @@ -1518,7 +1521,7 @@ bool IOService::requestTerminate( IOService * provider, IOOptionBits options ) return( ok ); } -bool IOService::terminatePhase1( IOOptionBits options = 0 ) +bool IOService::terminatePhase1( IOOptionBits options ) { IOService * victim; IOService * client; @@ -1595,7 +1598,7 @@ bool IOService::terminatePhase1( IOOptionBits options = 0 ) return( true ); } -void IOService::scheduleTerminatePhase2( IOOptionBits options = 0 ) +void IOService::scheduleTerminatePhase2( IOOptionBits options ) { AbsoluteTime deadline; int waitResult; @@ -1623,7 +1626,8 @@ void IOService::scheduleTerminatePhase2( IOOptionBits options = 0 ) gIOTerminateWork++; do { - terminateWorker( options ); + while( gIOTerminateWork ) + terminateWorker( options ); wait = (0 != (__state[1] & kIOServiceBusyStateMask)); if( wait) { // wait for the victim to go non-busy @@ -1638,17 +1642,21 @@ void IOService::scheduleTerminatePhase2( IOOptionBits options = 0 ) } else thread_cancel_timer(); } - } while( wait && (waitResult != THREAD_TIMED_OUT)); + } while(gIOTerminateWork || (wait && (waitResult != THREAD_TIMED_OUT))); - gIOTerminateThread = 0; - IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); + gIOTerminateThread = 0; + IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); } else { // ! kIOServiceSynchronous gIOTerminatePhase2List->setObject( this ); - if( 0 == gIOTerminateWork++) - gIOTerminateThread = IOCreateThread( &terminateThread, (void *) options ); + if( 0 == gIOTerminateWork++) { + if( !gIOTerminateThread) + gIOTerminateThread = IOCreateThread( &terminateThread, (void *) options ); + else + IOLockWakeup(gJobsLock, (event_t) &gIOTerminateWork, /* one-thread */ false ); + } } IOLockUnlock( gJobsLock ); @@ -1660,7 +1668,8 @@ void IOService::terminateThread( void * arg ) { IOLockLock( gJobsLock ); - terminateWorker( (IOOptionBits) arg ); + while (gIOTerminateWork) + terminateWorker( (IOOptionBits) arg ); gIOTerminateThread = 0; IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); @@ -1965,8 +1974,8 @@ bool IOService::finalize( IOOptionBits options ) return( true ); } -#undef tailQ(o) -#undef headQ(o) +#undef tailQ +#undef headQ /* * Terminate @@ -1990,7 +1999,7 @@ bool IOService::terminateClient( IOService * client, IOOptionBits options ) return( ok ); } -bool IOService::terminate( IOOptionBits options = 0 ) +bool IOService::terminate( IOOptionBits options ) { options |= kIOServiceTerminate; @@ -2020,8 +2029,8 @@ static void serviceOpenMessageApplier( OSObject * object, void * ctx ) } bool IOService::open( IOService * forClient, - IOOptionBits options = 0, - void * arg = 0 ) + IOOptionBits options, + void * arg ) { bool ok; ServiceOpenMessageContext context; @@ -2047,7 +2056,7 @@ bool IOService::open( IOService * forClient, } void IOService::close( IOService * forClient, - IOOptionBits options = 0 ) + IOOptionBits options ) { bool wasClosed; bool last = false; @@ -2079,7 +2088,7 @@ void IOService::close( IOService * forClient, } } -bool IOService::isOpen( const IOService * forClient = 0 ) const +bool IOService::isOpen( const IOService * forClient ) const { IOService * self = (IOService *) this; bool ok; @@ -2547,9 +2556,28 @@ bool IOService::startCandidate( IOService * service ) checkResources(); // stall for any driver resources service->checkResources(); + + AbsoluteTime startTime; + AbsoluteTime endTime; + UInt64 nano; + + if (kIOLogStart & gIOKitDebug) + clock_get_uptime(&startTime); + ok = service->start(this); - ok = service->start( this ); + if (kIOLogStart & gIOKitDebug) + { + clock_get_uptime(&endTime); + + if (CMP_ABSOLUTETIME(&endTime, &startTime) > 0) + { + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nano); + if (nano > 500000000ULL) + IOLog("%s::start took %ld ms\n", service->getName(), (UInt32)(nano / 1000000ULL)); + } + } if( !ok) service->detach( this ); } @@ -2561,7 +2589,7 @@ IOService * IOService::resources( void ) return( gIOResources ); } -void IOService::publishResource( const char * key, OSObject * value = 0 ) +void IOService::publishResource( const char * key, OSObject * value ) { const OSSymbol * sym; @@ -2571,13 +2599,16 @@ void IOService::publishResource( const char * key, OSObject * value = 0 ) } } -void IOService::publishResource( const OSSymbol * key, OSObject * value = 0 ) +void IOService::publishResource( const OSSymbol * key, OSObject * value ) { if( 0 == value) value = (OSObject *) gIOServiceKey; gIOResources->setProperty( key, value); + if( IORecursiveLockHaveLock( gNotificationLock)) + return; + gIOResourceGenerationCount++; gIOResources->registerService(); } @@ -2850,7 +2881,7 @@ UInt32 IOService::getBusyState( void ) } IOReturn IOService::waitForState( UInt32 mask, UInt32 value, - mach_timespec_t * timeout = 0 ) + mach_timespec_t * timeout ) { bool wait; int waitResult = THREAD_AWAKENED; @@ -2896,7 +2927,7 @@ IOReturn IOService::waitForState( UInt32 mask, UInt32 value, return( kIOReturnSuccess ); } -IOReturn IOService::waitQuiet( mach_timespec_t * timeout = 0 ) +IOReturn IOService::waitQuiet( mach_timespec_t * timeout ) { return( waitForState( kIOServiceBusyStateMask, 0, timeout )); } @@ -3052,7 +3083,7 @@ void _IOServiceJob::pingConfig( _IOServiceJob * job ) // internal - call with gNotificationLock OSObject * IOService::getExistingServices( OSDictionary * matching, - IOOptionBits inState, IOOptionBits options = 0 ) + IOOptionBits inState, IOOptionBits options ) { OSObject * current = 0; OSIterator * iter; @@ -3079,7 +3110,7 @@ OSObject * IOService::getExistingServices( OSDictionary * matching, ((OSSet *)current)->setObject( service ); else current = OSSet::withObjects( - & (const OSObject *) service, 1, 1 ); + (const OSObject **) &service, 1, 1 ); } } } while( !service && !iter->isValid()); @@ -3116,7 +3147,7 @@ OSIterator * IOService::getMatchingServices( OSDictionary * matching ) IONotifier * IOService::setNotification( const OSSymbol * type, OSDictionary * matching, IOServiceNotificationHandler handler, void * target, void * ref, - SInt32 priority = 0 ) + SInt32 priority ) { _IOServiceNotifier * notify = 0; OSOrderedSet * set; @@ -3220,8 +3251,8 @@ IONotifier * IOService::installNotification( IONotifier * IOService::addNotification( const OSSymbol * type, OSDictionary * matching, IOServiceNotificationHandler handler, - void * target, void * ref = 0, - SInt32 priority = 0 ) + void * target, void * ref, + SInt32 priority ) { OSIterator * existing; _IOServiceNotifier * notify; @@ -3267,7 +3298,7 @@ bool IOService::syncNotificationHandler( } IOService * IOService::waitForService( OSDictionary * matching, - mach_timespec_t * timeout = 0 ) + mach_timespec_t * timeout ) { IONotifier * notify = 0; // priority doesn't help us much since we need a thread wakeup @@ -3377,7 +3408,7 @@ IOOptionBits IOService::getState( void ) const */ OSDictionary * IOService::serviceMatching( const OSString * name, - OSDictionary * table = 0 ) + OSDictionary * table ) { if( !table) table = OSDictionary::withCapacity( 2 ); @@ -3388,7 +3419,7 @@ OSDictionary * IOService::serviceMatching( const OSString * name, } OSDictionary * IOService::serviceMatching( const char * name, - OSDictionary * table = 0 ) + OSDictionary * table ) { const OSString * str; @@ -3402,7 +3433,7 @@ OSDictionary * IOService::serviceMatching( const char * name, } OSDictionary * IOService::nameMatching( const OSString * name, - OSDictionary * table = 0 ) + OSDictionary * table ) { if( !table) table = OSDictionary::withCapacity( 2 ); @@ -3413,7 +3444,7 @@ OSDictionary * IOService::nameMatching( const OSString * name, } OSDictionary * IOService::nameMatching( const char * name, - OSDictionary * table = 0 ) + OSDictionary * table ) { const OSString * str; @@ -3427,7 +3458,7 @@ OSDictionary * IOService::nameMatching( const char * name, } OSDictionary * IOService::resourceMatching( const OSString * str, - OSDictionary * table = 0 ) + OSDictionary * table ) { table = serviceMatching( gIOResourcesKey, table ); if( table) @@ -3437,7 +3468,7 @@ OSDictionary * IOService::resourceMatching( const OSString * str, } OSDictionary * IOService::resourceMatching( const char * name, - OSDictionary * table = 0 ) + OSDictionary * table ) { const OSSymbol * str; @@ -3608,6 +3639,15 @@ IOReturn IOResources::setProperties( OSObject * properties ) return( kIOReturnBadArgument); while( (key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { + + if (gIOConsoleUsersKey == key) + { + IORegistryEntry::getRegistryRoot()->setProperty(key, dict->getObject(key)); + OSIncrementAtomic( &gIOConsoleUsersSeed ); + publishResource( gIOConsoleUsersSeedKey, gIOConsoleUsersSeedValue ); + continue; + } + publishResource( key, dict->getObject(key) ); } @@ -4172,7 +4212,7 @@ IODeviceMemory * IOService::getDeviceMemoryWithIndex( unsigned int index ) } IOMemoryMap * IOService::mapDeviceMemoryWithIndex( unsigned int index, - IOOptionBits options = 0 ) + IOOptionBits options ) { IODeviceMemory * range; IOMemoryMap * map; diff --git a/iokit/Kernel/IOServicePM.cpp b/iokit/Kernel/IOServicePM.cpp index a8542bc7d..3e5822be1 100644 --- a/iokit/Kernel/IOServicePM.cpp +++ b/iokit/Kernel/IOServicePM.cpp @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "IOKit/pwr_mgt/IOPMinformeeList.h" #include "IOKit/pwr_mgt/IOPMchangeNoteList.h" @@ -73,7 +74,6 @@ ioSPMTraceEnd(unsigned int csc, static void ack_timer_expired(thread_call_param_t); static void settle_timer_expired(thread_call_param_t); -IOReturn unIdleDevice ( OSObject *, void *, void *, void *, void * ); static void PM_idle_timer_expired(OSObject *, IOTimerEventSource *); static void c_PM_Clamp_Timer_Expired (OSObject * client,IOTimerEventSource *); void tellAppWithResponse ( OSObject * object, void * context); @@ -93,36 +93,38 @@ extern const IORegistryPlane * gIOPowerPlane; // Inputs are acks from interested parties, ack from the controlling driver, // ack timeouts, settle timeout, and powerStateDidChange from the parent. // These are the states: +enum { + kIOPM_OurChangeTellClientsPowerDown = 1, + kIOPM_OurChangeTellPriorityClientsPowerDown, + kIOPM_OurChangeNotifyInterestedDriversWillChange, + kIOPM_OurChangeSetPowerState, + kIOPM_OurChangeWaitForPowerSettle, + kIOPM_OurChangeNotifyInterestedDriversDidChange, + kIOPM_OurChangeFinish, + kIOPM_ParentDownTellPriorityClientsPowerDown_Immediate, + kIOPM_ParentDownNotifyInterestedDriversWillChange_Delayed, + kIOPM_ParentDownWaitForPowerSettleAndNotifyDidChange_Immediate, + kIOPM_ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed, + kIOPM_ParentDownSetPowerState_Delayed, + kIOPM_ParentDownWaitForPowerSettle_Delayed, + kIOPM_ParentDownAcknowledgeChange_Delayed, + kIOPM_ParentUpSetPowerState_Delayed, + kIOPM_ParentUpSetPowerState_Immediate, + kIOPM_ParentUpWaitForSettleTime_Delayed, + kIOPM_ParentUpNotifyInterestedDriversDidChange_Delayed, + kIOPM_ParentUpAcknowledgePowerChange_Delayed, + kIOPM_Finished +}; +// values of outofbandparameter enum { - IOPMour_prechange_03 = 1, - IOPMour_prechange_04, - IOPMour_prechange_05, - IOPMour_prechange_1, - IOPMour_prechange_2, - IOPMour_prechange_3, - IOPMour_prechange_4, - IOPMparent_down_0, - IOPMparent_down_05, - IOPMparent_down_2, - IOPMparent_down_3, - IOPMparent_down_4, - IOPMparent_down_5, - IOPMparent_down_6, - IOPMparent_up_0, - IOPMparent_up_1, - IOPMparent_up_4, - IOPMparent_up_5, - IOPMparent_up_6, - IOPMfinished - }; - -enum { // values of outofbandparameter kNotifyApps, kNotifyPriority }; -struct context { // used for applyToInterested + +// used for applyToInterested +struct context { OSArray * responseFlags; UInt16 serialNumber; UInt16 counter; @@ -134,7 +136,7 @@ struct context { // used for applyToInterested IOPMPowerFlags stateFlags; }; - // five minutes in microseconds +// five minutes in microseconds #define FIVE_MINUTES 5*60*1000000 #define k30seconds 30*1000000 @@ -195,17 +197,17 @@ When the parent calls powerStateDidChange, we acknowledge the parent again, and to accomodate the child, or if our power-controlling driver calls changePowerStateTo, or if some other driver which is using our device calls makeUsable, or if a subclassed object calls changePowerStateToPriv. These are all power changes initiated by us, not forced upon us by the parent. We start by notifying interested parties. If they all acknowledge via return code, we can go - on to state "our_prechange_1". Otherwise, we start the ack timer and wait for the stragglers to acknowlege by calling - acknowledgePowerChange. We move on to state "our_prechange_1" when all the stragglers have acknowledged, - or when the ack timer expires on all those which didn't acknowledge. In "our_prechange_1" we call the power-controlling - driver to change the power state of the hardware. If it returns saying it has done so, we go on to state "our_prechange_2". + on to state "OurChangeSetPowerState". Otherwise, we start the ack timer and wait for the stragglers to acknowlege by calling + acknowledgePowerChange. We move on to state "OurChangeSetPowerState" when all the stragglers have acknowledged, + or when the ack timer expires on all those which didn't acknowledge. In "OurChangeSetPowerState" we call the power-controlling + driver to change the power state of the hardware. If it returns saying it has done so, we go on to state "OurChangeWaitForPowerSettle". Otherwise, we have to wait for it, so we set the ack timer and wait. When it calls acknowledgeSetPowerState, or when the - ack timer expires, we go on. In "our_prechange_2", we look in the power state array to see if there is any settle time required - when changing from our current state to the new state. If not, we go right away to "our_prechange_3". Otherwise, we - set the settle timer and wait. When it expires, we move on. In "our_prechange_3" state, we notify all our interested parties + ack timer expires, we go on. In "OurChangeWaitForPowerSettle", we look in the power state array to see if there is any settle time required + when changing from our current state to the new state. If not, we go right away to "OurChangeNotifyInterestedDriversDidChange". Otherwise, we + set the settle timer and wait. When it expires, we move on. In "OurChangeNotifyInterestedDriversDidChange" state, we notify all our interested parties via their powerStateDidChange methods that we have finished changing power state. If they all acknowledge via return - code, we move on to "our_prechange_4". Otherwise we set the ack timer and wait. When they have all acknowledged, or - when the ack timer has expired for those that didn't, we move on to "our_prechange_4", where we remove the used + code, we move on to "OurChangeFinish". Otherwise we set the ack timer and wait. When they have all acknowledged, or + when the ack timer has expired for those that didn't, we move on to "OurChangeFinish", where we remove the used change note from the head of the queue and start the next one if one exists. Parent-initiated changes are more complex in the state machine. First, power going up and power going down are handled @@ -213,13 +215,13 @@ When the parent calls powerStateDidChange, we acknowledge the parent again, and in two different ways, so each of the parent paths is really two. When the parent calls our powerDomainWillChange method, notifying us that it will lower power in the domain, we decide - what state that will put our device in. Then we embark on the state machine path "IOPMparent_down_1" - and "IOPMparent_down_2", in which we notify interested parties of the upcoming change, instruct our driver to make + what state that will put our device in. Then we embark on the state machine path "IOPMParentDownSetPowerState_Immediate" + and "kIOPM_ParentDownWaitForPowerSettleAndNotifyDidChange_Immediate", in which we notify interested parties of the upcoming change, instruct our driver to make the change, check for settle time, and notify interested parties of the completed change. If we get to the end of this path without stalling due to an interested party which didn't acknowledge via return code, due to the controlling driver not able to change state right away, or due to a non-zero settling time, then we return IOPMAckImplied to the parent, and we're done with the change. - If we do stall in any of those states, we return IOPMWillAckLater to the parent and enter the parallel path "IOPMparent_down_4" - "IOPMparent_down_5", and "IOPMparent_down_3", where we continue with the same processing, except that at the end we + If we do stall in any of those states, we return IOPMWillAckLater to the parent and enter the parallel path "kIOPM_ParentDownSetPowerState_Delayed" + "kIOPM_ParentDownWaitForPowerSettle_Delayed", and "kIOPM_ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed", where we continue with the same processing, except that at the end we acknowledge the parent explicitly via acknowledgePowerChange, and we're done with the change. Then when the parent calls us at powerStateDidChange we acknowledging via return code, because we have already made the power change. In any case, when we are done we remove the used change note from the head of the queue and start on the next one. @@ -232,11 +234,11 @@ Then when the parent calls us at powerStateDidChange we acknowledging via return When the parent calls our powerDomainWillChange method, notifying us that it will raise power in the domain, we acknowledge via return code, because there's really nothing we can do until the power is actually raised in the domain. When the parent calls us at powerStateDidChange, we start by notifying our interested parties. If they all acknowledge via return code, - we go on to" IOPMparent_up_1" to instruct the driver to raise its power level. After that, we check for any - necessary settling time in "IOPMparent_up_2", and we notify all interested parties that power has changed - in "IOPMparent_up_3". If none of these operations stall, we acknowledge the parent via return code, release - the change note, and start the next, if there is one. If one of them does stall, we enter the parallel path "IOPMparent_up_0", - "IOPMparent_up_4", "IOPMparent_up_5", and "IOPMparent_up_6", which ends with + we go on to" kIOPM_ParentUpSetPowerState_Immediate" to instruct the driver to raise its power level. After that, we check for any + necessary settling time in "IOPMParentUpWaitForSettleTime_Immediate", and we notify all interested parties that power has changed + in "IOPMParentUpNotifyInterestedDriversDidChange_Immediate". If none of these operations stall, we acknowledge the parent via return code, release + the change note, and start the next, if there is one. If one of them does stall, we enter the parallel path "kIOPM_ParentUpSetPowerState_Delayed", + "kIOPM_ParentUpWaitForSettleTime_Delayed", "kIOPM_ParentUpNotifyInterestedDriversDidChange_Delayed", and "kIOPM_ParentUpAcknowledgePowerChange_Delayed", which ends with our explicit acknowledgement to the parent. */ @@ -250,16 +252,20 @@ void IOService::PMinit ( void ) { if ( ! initialized ) { - pm_vars = new IOPMprot; // make space for our variables + // make space for our variables + pm_vars = new IOPMprot; priv = new IOPMpriv; pm_vars->init(); priv->init(); - setProperty(prot_key, (OSObject *) pm_vars); // add these to the properties + + // add pm_vars & priv to the properties + setProperty(prot_key, (OSObject *) pm_vars); setProperty(priv_key, (OSObject *) priv); + // then initialize them priv->owner = this; - pm_vars->theNumberOfPowerStates = 0; // then initialize them + pm_vars->theNumberOfPowerStates = 0; priv->we_are_root = false; pm_vars->theControllingDriver = NULL; priv->our_lock = IOLockAlloc(); @@ -272,9 +278,10 @@ void IOService::PMinit ( void ) priv->changeList = new IOPMchangeNoteList; priv->changeList->initialize(); pm_vars->aggressiveness = 0; - for (unsigned int i = 0; i <= kMaxType; i++) { - pm_vars->current_aggressiveness_values[i] = 0; - pm_vars->current_aggressiveness_valid[i] = false; + for (unsigned int i = 0; i <= kMaxType; i++) + { + pm_vars->current_aggressiveness_values[i] = 0; + pm_vars->current_aggressiveness_valid[i] = false; } pm_vars->myCurrentState = 0; priv->imminentState = 0; @@ -287,7 +294,7 @@ void IOService::PMinit ( void ) priv->need_to_become_usable = false; priv->previousRequest = 0; priv->device_overrides = false; - priv->machine_state = IOPMfinished; + priv->machine_state = kIOPM_Finished; priv->timerEventSrc = NULL; priv->clampTimerEventSrc = NULL; pm_vars->PMworkloop = NULL; @@ -350,7 +357,8 @@ void IOService::PMfree ( void ) } priv->interestedDrivers->release(); priv->changeList->release(); - priv->release(); // remove instance variables + // remove instance variables + priv->release(); } if ( pm_vars ) { @@ -368,7 +376,8 @@ void IOService::PMfree ( void ) pm_vars->responseFlags->release(); pm_vars->responseFlags = NULL; } - pm_vars->release(); // remove instance variables + // remove instance variables + pm_vars->release(); } } @@ -386,16 +395,22 @@ void IOService::PMstop ( void ) IOService * theChild; IOService * theParent; - removeProperty(prot_key); // remove the properties + // remove the properties + removeProperty(prot_key); removeProperty(priv_key); - iter = getParentIterator(gIOPowerPlane); // detach parents + // detach parents + iter = getParentIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { theParent = (IOService *)connection->copyParentEntry(gIOPowerPlane); - if ( theParent ) { + if ( theParent ) + { theParent->removePowerChild(connection); theParent->release(); } @@ -403,21 +418,31 @@ void IOService::PMstop ( void ) } iter->release(); } - detachAbove( gIOPowerPlane ); // detach IOConnections + + // detach IOConnections + detachAbove( gIOPowerPlane ); - pm_vars->parentsKnowState = false; // no more power state changes + // no more power state changes + pm_vars->parentsKnowState = false; - iter = getChildIterator(gIOPowerPlane); // detach children + // detach children + iter = getChildIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { theChild = ((IOService *)(connection->copyChildEntry(gIOPowerPlane))); - if ( theChild ) { - connection->detachFromChild(theChild,gIOPowerPlane); // detach nub from child + if ( theChild ) + { + // detach nub from child + connection->detachFromChild(theChild,gIOPowerPlane); theChild->release(); } - detachFromChild(connection,gIOPowerPlane); // detach us from nub + // detach us from nub + detachFromChild(connection,gIOPowerPlane); } } iter->release(); @@ -499,12 +524,19 @@ IOReturn IOService::setPowerParent ( IOPowerConnection * theParent, bool stateKn IOLockLock(pm_vars->parentLock); - if ( stateKnown && ((pm_vars->PMworkloop == NULL) || (pm_vars->PMcommandGate == NULL)) ) { - getPMworkloop(); // we have a path to the root - if ( pm_vars->PMworkloop != NULL ) { // find out the workloop - if ( pm_vars->PMcommandGate == NULL ) { // and make our command gate + if ( stateKnown && ((pm_vars->PMworkloop == NULL) || (pm_vars->PMcommandGate == NULL)) ) + { + // we have a path to the root + // find out the workloop + getPMworkloop(); + if ( pm_vars->PMworkloop != NULL ) + { + if ( pm_vars->PMcommandGate == NULL ) + { + // and make our command gate pm_vars->PMcommandGate = IOCommandGate::commandGate((OSObject *)this); - if ( pm_vars->PMcommandGate != NULL ) { + if ( pm_vars->PMcommandGate != NULL ) + { pm_vars->PMworkloop->addEventSource(pm_vars->PMcommandGate); } } @@ -513,17 +545,22 @@ IOReturn IOService::setPowerParent ( IOPowerConnection * theParent, bool stateKn IOLockUnlock(pm_vars->parentLock); - theParent->setParentCurrentPowerFlags(currentState); // set our connection data + // set our connection data + theParent->setParentCurrentPowerFlags(currentState); theParent->setParentKnowsState(stateKnown); - pm_vars->parentsKnowState = true; // combine parent knowledge + // combine parent knowledge + pm_vars->parentsKnowState = true; pm_vars->parentsCurrentPowerFlags = 0; iter = getParentIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { pm_vars->parentsKnowState &= connection->parentKnowsState(); pm_vars->parentsCurrentPowerFlags |= connection->parentCurrentPowerFlags(); } @@ -532,14 +569,17 @@ IOReturn IOService::setPowerParent ( IOPowerConnection * theParent, bool stateKn } if ( (pm_vars->theControllingDriver != NULL) && - (pm_vars->parentsKnowState) ) { + (pm_vars->parentsKnowState) ) + { pm_vars->maxCapability = pm_vars->theControllingDriver->maxCapabilityForDomainState(pm_vars->parentsCurrentPowerFlags); - tempDesire = priv->deviceDesire; // initially change into the state we are already in + // initially change into the state we are already in + tempDesire = priv->deviceDesire; priv->deviceDesire = pm_vars->theControllingDriver->initialPowerStateForDomainState(pm_vars->parentsCurrentPowerFlags); computeDesiredState(); priv->previousRequest = 0xffffffff; changeState(); - priv->deviceDesire = tempDesire; // put this back like before + // put this back like before + priv->deviceDesire = tempDesire; } return IOPMNoErr; @@ -553,45 +593,60 @@ IOReturn IOService::setPowerParent ( IOPowerConnection * theParent, bool stateKn //********************************************************************************* IOReturn IOService::addPowerChild ( IOService * theChild ) { - IOPowerConnection * connection; - unsigned int i; + IOPowerConnection *connection; + unsigned int i; - if ( ! initialized ) { - return IOPMNotYetInitialized; // we're not a power-managed IOService + if ( ! initialized ) + { + // we're not a power-managed IOService + return IOPMNotYetInitialized; } pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAddChild,0,0); - connection = new IOPowerConnection; // make a nub + // Put ourselves into a usable power state. + // We must be in an "on" power state, as our children must be able to access + // our hardware after joining the power plane. + makeUsable(); + + // make a nub + connection = new IOPowerConnection; connection->init(); connection->start(this); connection->setAwaitingAck(false); - - attachToChild( connection,gIOPowerPlane ); // connect it up + + // connect it up + attachToChild( connection,gIOPowerPlane ); connection->attachToChild( theChild,gIOPowerPlane ); connection->release(); - if ( (pm_vars->theControllingDriver == NULL) || // tell it the current state of the power domain - ! (inPlane(gIOPowerPlane)) || - ! (pm_vars->parentsKnowState) ) { + // tell it the current state of the power domain + if ( (pm_vars->theControllingDriver == NULL) || + ! (inPlane(gIOPowerPlane)) || + ! (pm_vars->parentsKnowState) ) + { theChild->setPowerParent(connection,false,0); - if ( inPlane(gIOPowerPlane) ) { + if ( inPlane(gIOPowerPlane) ) + { for (i = 0; i <= kMaxType; i++) { - if ( pm_vars->current_aggressiveness_valid[i] ) { + if ( pm_vars->current_aggressiveness_valid[i] ) + { theChild->setAggressiveness (i, pm_vars->current_aggressiveness_values[i]); } } } - } - else { + } else { theChild->setPowerParent(connection,true,pm_vars->thePowerStates[pm_vars->myCurrentState].outputPowerCharacter); - for (i = 0; i <= kMaxType; i++) { - if ( pm_vars->current_aggressiveness_valid[i] ) { + for (i = 0; i <= kMaxType; i++) + { + if ( pm_vars->current_aggressiveness_valid[i] ) + { theChild->setAggressiveness (i, pm_vars->current_aggressiveness_values[i]); } } - add_child_to_active_change(connection); // catch it up if change is in progress + // catch it up if change is in progress + add_child_to_active_change(connection); } return IOPMNoErr; @@ -604,34 +659,46 @@ IOReturn IOService::addPowerChild ( IOService * theChild ) //********************************************************************************* IOReturn IOService::removePowerChild ( IOPowerConnection * theNub ) { - IORegistryEntry * theChild; + IORegistryEntry *theChild; + OSIterator *iter; pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogRemoveChild,0,0); theNub->retain(); - theChild = theNub->copyChildEntry(gIOPowerPlane); // detach nub from child - if ( theChild ) { + // detach nub from child + theChild = theNub->copyChildEntry(gIOPowerPlane); + if ( theChild ) + { theNub->detachFromChild(theChild, gIOPowerPlane); theChild->release(); } - detachFromChild(theNub,gIOPowerPlane); // detach from the nub + // detach from the nub + detachFromChild(theNub,gIOPowerPlane); - if ( theNub->getAwaitingAck() ) { // are we awaiting an ack from this child? - theNub->setAwaitingAck(false); // yes, pretend we got one - if ( acquire_lock() ) { - if (priv->head_note_pendingAcks != 0 ) { - priv->head_note_pendingAcks -= 1; // that's one fewer ack to worry about - if ( priv->head_note_pendingAcks == 0 ) { // is that the last? - stop_ack_timer(); // yes, stop the timer + // are we awaiting an ack from this child? + if ( theNub->getAwaitingAck() ) + { + // yes, pretend we got one + theNub->setAwaitingAck(false); + if ( acquire_lock() ) + { + if (priv->head_note_pendingAcks != 0 ) + { + // that's one fewer ack to worry about + priv->head_note_pendingAcks -= 1; + // is that the last? + if ( priv->head_note_pendingAcks == 0 ) + { + // yes, stop the timer + stop_ack_timer(); IOUnlock(priv->our_lock); - all_acked(); // and now we can continue our power change - } - else { + // and now we can continue our power change + all_acked(); + } else { IOUnlock(priv->our_lock); } - } - else { + } else { IOUnlock(priv->our_lock); } } @@ -639,18 +706,35 @@ IOReturn IOService::removePowerChild ( IOPowerConnection * theNub ) theNub->release(); - if ( (pm_vars->theControllingDriver == NULL) || // if not fully initialized - ! (inPlane(gIOPowerPlane)) || - ! (pm_vars->parentsKnowState) ) { - return IOPMNoErr; // we can do no more + // if not fully initialized + if ( (pm_vars->theControllingDriver == NULL) || + !(inPlane(gIOPowerPlane)) || + !(pm_vars->parentsKnowState) ) + { + // we can do no more + return IOPMNoErr; } // Perhaps the departing child was holding up idle or system sleep - we need to re-evaluate our // childrens' requests. Clear and re-calculate our kIOPMChildClamp and kIOPMChildClamp2 bits. rebuildChildClampBits(); - - computeDesiredState(); // this may be different now - changeState(); // change state if we can now tolerate lower power + + if(!priv->clampOn) + { + // count children + iter = getChildIterator(gIOPowerPlane); + if ( !iter || !iter->getNextObject() ) + { + // paired to match the makeUsable() call in addPowerChild() + changePowerStateToPriv(0); + } + if(iter) iter->release(); + } + + // this may be different now + computeDesiredState(); + // change state if we can now tolerate lower power + changeState(); return IOPMNoErr; } @@ -667,25 +751,33 @@ IOReturn IOService::removePowerChild ( IOPowerConnection * theNub ) IOReturn IOService::registerPowerDriver ( IOService * controllingDriver, IOPMPowerState* powerStates, unsigned long numberOfStates ) { - unsigned long i; - unsigned long tempDesire; - - if ( (numberOfStates > pm_vars->theNumberOfPowerStates) && (numberOfStates > 1) ) { - if ( priv->changeList->currentChange() == -1 ) { - if ( controllingDriver != NULL ) { - if ( numberOfStates <= IOPMMaxPowerStates ) { - switch ( powerStates[0].version ) { + unsigned long i; + unsigned long tempDesire; + + if ( (numberOfStates > pm_vars->theNumberOfPowerStates) + && (numberOfStates > 1) ) + { + if ( priv->changeList->currentChange() == -1 ) + { + if ( controllingDriver != NULL ) + { + if ( numberOfStates <= IOPMMaxPowerStates ) + { + switch ( powerStates[0].version ) + { case 1: pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriver, - (unsigned long)numberOfStates, (unsigned long)powerStates[0].version); - for ( i = 0; i < numberOfStates; i++ ) { + (unsigned long)numberOfStates, (unsigned long)powerStates[0].version); + for ( i = 0; i < numberOfStates; i++ ) + { pm_vars->thePowerStates[i] = powerStates[i]; } break; case 2: pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriver, - (unsigned long) numberOfStates,(unsigned long) powerStates[0].version); - for ( i = 0; i < numberOfStates; i++ ) { + (unsigned long) numberOfStates,(unsigned long) powerStates[0].version); + for ( i = 0; i < numberOfStates; i++ ) + { pm_vars->thePowerStates[i].version = powerStates[i].version; pm_vars->thePowerStates[i].capabilityFlags = powerStates[i].capabilityFlags; pm_vars->thePowerStates[i].outputPowerCharacter = powerStates[i].outputPowerCharacter; @@ -706,15 +798,18 @@ IOReturn IOService::registerPowerDriver ( IOService * controllingDriver, IOPMPow return IOPMNoErr; } - pm_vars->myCharacterFlags = 0; // make a mask of all the character bits we know about + // make a mask of all the character bits we know about + pm_vars->myCharacterFlags = 0; for ( i = 0; i < numberOfStates; i++ ) { pm_vars->myCharacterFlags |= pm_vars->thePowerStates[i].outputPowerCharacter; } pm_vars->theNumberOfPowerStates = numberOfStates; pm_vars->theControllingDriver = controllingDriver; - if ( priv->interestedDrivers->findItem(controllingDriver) == NULL ) { // register it as interested - registerInterestedDriver (controllingDriver ); // unless already done + if ( priv->interestedDrivers->findItem(controllingDriver) == NULL ) + { + // register it as interested, unless already done + registerInterestedDriver (controllingDriver ); } if ( priv->need_to_become_usable ) { priv->need_to_become_usable = false; @@ -724,18 +819,18 @@ IOReturn IOService::registerPowerDriver ( IOService * controllingDriver, IOPMPow if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) { pm_vars->maxCapability = pm_vars->theControllingDriver->maxCapabilityForDomainState(pm_vars->parentsCurrentPowerFlags); - tempDesire = priv->deviceDesire; // initially change into the state we are already in + // initially change into the state we are already in + tempDesire = priv->deviceDesire; priv->deviceDesire = pm_vars->theControllingDriver->initialPowerStateForDomainState(pm_vars->parentsCurrentPowerFlags); computeDesiredState(); changeState(); - priv->deviceDesire = tempDesire; // put this back like before + // put this back like before + priv->deviceDesire = tempDesire; } - } - else { + } else { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriverErr2,(unsigned long)numberOfStates,0); } - } - else { + } else { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriverErr4,0,0); } } @@ -757,40 +852,50 @@ IOReturn IOService::registerPowerDriver ( IOService * controllingDriver, IOPMPow IOPMPowerFlags IOService::registerInterestedDriver ( IOService * theDriver ) { - IOPMinformee * newInformee; - IOPMPowerFlags futureCapability; + IOPMinformee *newInformee; + IOPMPowerFlags futureCapability; if (theDriver == NULL ) { return 0; } - newInformee = new IOPMinformee; // make new driver node + // make new driver node + newInformee = new IOPMinformee; newInformee->initialize(theDriver); - priv->interestedDrivers->addToList(newInformee); // add it to list of drivers + // add it to list of drivers + priv->interestedDrivers->addToList(newInformee); if ( (pm_vars->theControllingDriver == NULL) || - ! (inPlane(gIOPowerPlane)) || - ! (pm_vars->parentsKnowState) ) { + !(inPlane(gIOPowerPlane)) || + !(pm_vars->parentsKnowState) ) + { + // can't tell it a state yet pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInterestedDriver,IOPMNotPowerManaged,0); - return IOPMNotPowerManaged; // can't tell it a state yet + return IOPMNotPowerManaged; } - switch (priv->machine_state) { // can we notify new driver of a change in progress? - case IOPMour_prechange_1: - case IOPMour_prechange_4: - case IOPMparent_down_4: - case IOPMparent_down_6: - case IOPMparent_up_0: - case IOPMparent_up_6: - futureCapability = priv->head_note_capabilityFlags; // yes, remember what we tell it + // can we notify new driver of a change in progress? + switch (priv->machine_state) { + case kIOPM_OurChangeSetPowerState: + case kIOPM_OurChangeFinish: + case kIOPM_ParentDownSetPowerState_Delayed: + case kIOPM_ParentDownAcknowledgeChange_Delayed: + case kIOPM_ParentUpSetPowerState_Delayed: + case kIOPM_ParentUpAcknowledgePowerChange_Delayed: + // yes, remember what we tell it + futureCapability = priv->head_note_capabilityFlags; pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInterestedDriver,(unsigned long)futureCapability,1); - add_driver_to_active_change(newInformee); // notify it - return futureCapability; // and return the same thing + // notify it + add_driver_to_active_change(newInformee); + // and return the same thing + return futureCapability; } pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInterestedDriver, - (unsigned long) pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags,2); - return pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags; // no, return current capability + (unsigned long) pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags,2); + + // no, return current capability + return pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags; } @@ -802,7 +907,8 @@ IOReturn IOService::deRegisterInterestedDriver ( IOService * theDriver ) { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogRemoveDriver,0,0); - priv->interestedDrivers->removeFromList(theDriver); // remove the departing driver + // remove the departing driver + priv->interestedDrivers->removeFromList(theDriver); return IOPMNoErr; } @@ -824,78 +930,100 @@ IOReturn IOService::deRegisterInterestedDriver ( IOService * theDriver ) IOReturn IOService::acknowledgePowerChange ( IOService * whichObject ) { - IOPMinformee * ackingObject; - unsigned long childPower = kIOPMUnknown; - IOService * theChild; - - ackingObject = priv->interestedDrivers->findItem(whichObject); // one of our interested drivers? - if ( ackingObject == NULL ) { - if ( ! isChild(whichObject,gIOPowerPlane) ) { - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr1,0,0); - kprintf("errant driver: %s\n",whichObject->getName()); - return IOPMNoErr; // no, just return - } - else { - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChildAcknowledge,priv->head_note_pendingAcks,0); - } - } - else { - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogDriverAcknowledge,priv->head_note_pendingAcks,0); - } - - if (! acquire_lock() ) { - return IOPMNoErr; - } + IOPMinformee *ackingObject; + unsigned long childPower = kIOPMUnknown; + IOService *theChild; - if (priv->head_note_pendingAcks != 0 ) { // yes, make sure we're expecting acks - if ( ackingObject != NULL ) { // it's an interested driver - if ( ackingObject->timer != 0 ) { // make sure we're expecting this ack - ackingObject->timer = 0; // mark it acked - priv->head_note_pendingAcks -= 1; // that's one fewer to worry about - if ( priv->head_note_pendingAcks == 0 ) { // is that the last? - stop_ack_timer(); // yes, stop the timer - IOUnlock(priv->our_lock); - all_acked(); // and now we can continue - return IOPMNoErr; - } - } - else { - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr2,0,0); // this driver has already acked - kprintf("errant driver: %s\n",whichObject->getName()); - } - } - else { // it's a child - if ( ((IOPowerConnection *)whichObject)->getAwaitingAck() ) { // make sure we're expecting this ack - priv->head_note_pendingAcks -= 1; // that's one fewer to worry about + // one of our interested drivers? + ackingObject = priv->interestedDrivers->findItem(whichObject); + if ( ackingObject == NULL ) + { + if ( ! isChild(whichObject,gIOPowerPlane) ) + { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr1,0,0); + //kprintf("errant driver: %s\n",whichObject->getName()); + // no, just return + return IOPMNoErr; + } else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChildAcknowledge,priv->head_note_pendingAcks,0); + } + } else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogDriverAcknowledge,priv->head_note_pendingAcks,0); + } + + if (! acquire_lock() ) + { + return IOPMNoErr; + } + + if (priv->head_note_pendingAcks != 0 ) + { + // yes, make sure we're expecting acks + if ( ackingObject != NULL ) + { + // it's an interested driver + // make sure we're expecting this ack + if ( ackingObject->timer != 0 ) + { + // mark it acked + ackingObject->timer = 0; + // that's one fewer to worry about + priv->head_note_pendingAcks -= 1; + // is that the last? + if ( priv->head_note_pendingAcks == 0 ) + { + // yes, stop the timer + stop_ack_timer(); + IOUnlock(priv->our_lock); + // and now we can continue + all_acked(); + return IOPMNoErr; + } + } else { + // this driver has already acked + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr2,0,0); + //kprintf("errant driver: %s\n",whichObject->getName()); + } + } else { + // it's a child + // make sure we're expecting this ack + if ( ((IOPowerConnection *)whichObject)->getAwaitingAck() ) + { + // that's one fewer to worry about + priv->head_note_pendingAcks -= 1; ((IOPowerConnection *)whichObject)->setAwaitingAck(false); theChild = (IOService *)whichObject->copyChildEntry(gIOPowerPlane); - if ( theChild ) { + if ( theChild ) + { childPower = theChild->currentPowerConsumption(); theChild->release(); } - if ( childPower == kIOPMUnknown ) { + if ( childPower == kIOPMUnknown ) + { pm_vars->thePowerStates[priv->head_note_state].staticPower = kIOPMUnknown; - } - else { - if ( pm_vars->thePowerStates[priv->head_note_state].staticPower != kIOPMUnknown ) { + } else { + if ( pm_vars->thePowerStates[priv->head_note_state].staticPower != kIOPMUnknown ) + { pm_vars->thePowerStates[priv->head_note_state].staticPower += childPower; } } - if ( priv->head_note_pendingAcks == 0 ) { // is that the last? - stop_ack_timer(); // yes, stop the timer + // is that the last? + if ( priv->head_note_pendingAcks == 0 ) { + // yes, stop the timer + stop_ack_timer(); IOUnlock(priv->our_lock); - all_acked(); // and now we can continue + // and now we can continue + all_acked(); return IOPMNoErr; } } - } + } + } else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr3,0,0); // not expecting anybody to ack + //kprintf("errant driver: %s\n",whichObject->getName()); } - else { - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr3,0,0); // not expecting anybody to ack - kprintf("errant driver: %s\n",whichObject->getName()); - } - IOUnlock(priv->our_lock); - return IOPMNoErr; + IOUnlock(priv->our_lock); + return IOPMNoErr; } //********************************************************************************* @@ -908,26 +1036,31 @@ IOReturn IOService::acknowledgePowerChange ( IOService * whichObject ) IOReturn IOService::acknowledgeSetPowerState ( void ) { - if (! acquire_lock() ) { + if (! acquire_lock() ) + { return IOPMNoErr; } ioSPMTrace(IOPOWER_ACK, * (int *) this); - if ( priv->driver_timer == -1 ) { - priv->driver_timer = 0; // driver is acking instead of using return code - } - else { - if ( priv->driver_timer > 0 ) { // are we expecting this? - stop_ack_timer(); // yes, stop the timer + if ( priv->driver_timer == -1 ) + { + // driver is acking instead of using return code + priv->driver_timer = 0; + } else { + // are we expecting this? + if ( priv->driver_timer > 0 ) + { + // yes, stop the timer + stop_ack_timer(); priv->driver_timer = 0; IOUnlock(priv->our_lock); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogDriverAcknowledgeSet,0,0); driver_acked(); return IOPMNoErr; - } - else { - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr4,0,0); // no + } else { + // not expecting this + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr4,0,0); } } IOUnlock(priv->our_lock); @@ -946,14 +1079,14 @@ IOReturn IOService::acknowledgeSetPowerState ( void ) void IOService::driver_acked ( void ) { switch (priv->machine_state) { - case IOPMour_prechange_2: - our_prechange_2(); + case kIOPM_OurChangeWaitForPowerSettle: + OurChangeWaitForPowerSettle(); break; - case IOPMparent_down_5: - parent_down_5(); + case kIOPM_ParentDownWaitForPowerSettle_Delayed: + ParentDownWaitForPowerSettle_Delayed(); break; - case IOPMparent_up_4: - parent_up_4(); + case kIOPM_ParentUpWaitForSettleTime_Delayed: + ParentUpWaitForSettleTime_Delayed(); break; } } @@ -971,26 +1104,35 @@ void IOService::driver_acked ( void ) IOReturn IOService::powerDomainWillChangeTo ( IOPMPowerFlags newPowerStateFlags, IOPowerConnection * whichParent ) { - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - unsigned long newStateNumber; - IOPMPowerFlags combinedPowerFlags; + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; + unsigned long newStateNumber; + IOPMPowerFlags combinedPowerFlags; pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogWillChange,(unsigned long)newPowerStateFlags,0); - if ( ! inPlane(gIOPowerPlane) ) { - return IOPMAckImplied; // somebody goofed + if ( ! inPlane(gIOPowerPlane) ) + { + // somebody goofed + return IOPMAckImplied; } IOLockLock(pm_vars->parentLock); - if ( (pm_vars->PMworkloop == NULL) || (pm_vars->PMcommandGate == NULL) ) { - getPMworkloop(); // we have a path to the root, - if ( pm_vars->PMworkloop != NULL ) { // so find out the workloop - if ( pm_vars->PMcommandGate == NULL ) { // and make our command gate + if ( (pm_vars->PMworkloop == NULL) || (pm_vars->PMcommandGate == NULL) ) + { + // we have a path to the root + getPMworkloop(); + // so find out the workloop + if ( pm_vars->PMworkloop != NULL ) + { + // and make our command gate + if ( pm_vars->PMcommandGate == NULL ) + { pm_vars->PMcommandGate = IOCommandGate::commandGate((OSObject *)this); - if ( pm_vars->PMcommandGate != NULL ) { + if ( pm_vars->PMcommandGate != NULL ) + { pm_vars->PMworkloop->addEventSource(pm_vars->PMcommandGate); } } @@ -999,17 +1141,21 @@ IOReturn IOService::powerDomainWillChangeTo ( IOPMPowerFlags newPowerStateFlags, IOLockUnlock(pm_vars->parentLock); - combinedPowerFlags = 0; // combine parents' power states + // combine parents' power states + // to determine our maximum state within the new power domain + combinedPowerFlags = 0; iter = getParentIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { if ( connection == whichParent ){ combinedPowerFlags |= newPowerStateFlags; - } - else { + } else { combinedPowerFlags |= connection->parentCurrentPowerFlags(); } } @@ -1017,12 +1163,15 @@ IOReturn IOService::powerDomainWillChangeTo ( IOPMPowerFlags newPowerStateFlags, iter->release(); } - if ( pm_vars->theControllingDriver == NULL ) { // we can't take any more action + if ( pm_vars->theControllingDriver == NULL ) + { + // we can't take any more action return IOPMAckImplied; } newStateNumber = pm_vars->theControllingDriver->maxCapabilityForDomainState(combinedPowerFlags); + // make the change return enqueuePowerChange(IOPMParentInitiated | IOPMDomainWillChange, - newStateNumber,combinedPowerFlags,whichParent,newPowerStateFlags); //make the change + newStateNumber,combinedPowerFlags,whichParent,newPowerStateFlags); } @@ -1049,8 +1198,9 @@ IOReturn IOService::powerDomainDidChangeTo ( IOPMPowerFlags newPowerStateFlags, } newStateNumber = pm_vars->theControllingDriver->maxCapabilityForDomainState(pm_vars->parentsCurrentPowerFlags); + // tell interested parties about it return enqueuePowerChange(IOPMParentInitiated | IOPMDomainDidChange, - newStateNumber,pm_vars->parentsCurrentPowerFlags,whichParent,0); // tell interested parties about it + newStateNumber,pm_vars->parentsCurrentPowerFlags,whichParent,0); } @@ -1063,23 +1213,28 @@ IOReturn IOService::powerDomainDidChangeTo ( IOPMPowerFlags newPowerStateFlags, void IOService::setParentInfo ( IOPMPowerFlags newPowerStateFlags, IOPowerConnection * whichParent ) { - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; - whichParent->setParentCurrentPowerFlags(newPowerStateFlags); // set our connection data + // set our connection data + whichParent->setParentCurrentPowerFlags(newPowerStateFlags); whichParent->setParentKnowsState(true); IOLockLock(pm_vars->parentLock); - pm_vars->parentsCurrentPowerFlags = 0; // recompute our parent info + // recompute our parent info + pm_vars->parentsCurrentPowerFlags = 0; pm_vars->parentsKnowState = true; iter = getParentIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { pm_vars->parentsKnowState &= connection->parentKnowsState(); pm_vars->parentsCurrentPowerFlags |= connection->parentCurrentPowerFlags(); } @@ -1101,16 +1256,17 @@ void IOService::setParentInfo ( IOPMPowerFlags newPowerStateFlags, IOPowerConnec void IOService::rebuildChildClampBits(void) { - unsigned long i; - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; + unsigned long i; + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; // A child's desires has changed. We need to rebuild the child-clamp bits in our // power state array. Start by clearing the bits in each power state. - for ( i = 0; i < pm_vars->theNumberOfPowerStates; i++ ) { + for ( i = 0; i < pm_vars->theNumberOfPowerStates; i++ ) + { pm_vars->thePowerStates[i].capabilityFlags &= ~(kIOPMChildClamp | kIOPMChildClamp2); } @@ -1146,69 +1302,82 @@ void IOService::rebuildChildClampBits(void) //********************************************************************************* IOReturn IOService::requestPowerDomainState ( IOPMPowerFlags desiredState, IOPowerConnection * whichChild, unsigned long specification ) { - unsigned long i; - unsigned long computedState; - unsigned long theDesiredState = desiredState & ~(kIOPMPreventIdleSleep | kIOPMPreventSystemSleep); - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; + unsigned long i; + unsigned long computedState; + unsigned long theDesiredState = desiredState & ~(kIOPMPreventIdleSleep | kIOPMPreventSystemSleep); + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogRequestDomain, (unsigned long)desiredState,(unsigned long)specification); - if ( pm_vars->theControllingDriver == NULL) { + if ( pm_vars->theControllingDriver == NULL) + { return IOPMNotYetInitialized; } switch (specification) { case IOPMLowestState: i = 0; - while ( i < pm_vars->theNumberOfPowerStates ) { - if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & theDesiredState) == (theDesiredState & pm_vars->myCharacterFlags) ) { + while ( i < pm_vars->theNumberOfPowerStates ) + { + if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & theDesiredState) == (theDesiredState & pm_vars->myCharacterFlags) ) + { break; } i++; } - if ( i >= pm_vars->theNumberOfPowerStates ) { + if ( i >= pm_vars->theNumberOfPowerStates ) + { return IOPMNoSuchState; - } + } break; case IOPMNextLowerState: i = pm_vars->myCurrentState - 1; - while ( i >= 0 ) { - if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & theDesiredState) == (theDesiredState & pm_vars->myCharacterFlags) ) { + while ( i >= 0 ) + { + if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & theDesiredState) == (theDesiredState & pm_vars->myCharacterFlags) ) + { break; } i--; } - if ( i < 0 ) { + if ( i < 0 ) + { return IOPMNoSuchState; } break; case IOPMHighestState: i = pm_vars->theNumberOfPowerStates; - while ( i >= 0 ) { + while ( i >= 0 ) + { i--; - if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & theDesiredState) == (theDesiredState & pm_vars->myCharacterFlags) ) { + if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & theDesiredState) == (theDesiredState & pm_vars->myCharacterFlags) ) + { break; } } - if ( i < 0 ) { + if ( i < 0 ) + { return IOPMNoSuchState; } break; case IOPMNextHigherState: i = pm_vars->myCurrentState + 1; - while ( i < pm_vars->theNumberOfPowerStates ) { - if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & theDesiredState) == (theDesiredState & pm_vars->myCharacterFlags) ) { + while ( i < pm_vars->theNumberOfPowerStates ) + { + if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & theDesiredState) == (theDesiredState & pm_vars->myCharacterFlags) ) + { break; } - i++; + i++; } - if ( i == pm_vars->theNumberOfPowerStates ) { + if ( i == pm_vars->theNumberOfPowerStates ) + { return IOPMNoSuchState; } break; @@ -1225,10 +1394,14 @@ IOReturn IOService::requestPowerDomainState ( IOPMPowerFlags desiredState, IOPow // the computed state as this child's desire. iter = getChildIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { - if ( connection == whichChild ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { + if ( connection == whichChild ) + { connection->setDesiredDomainState(computedState); connection->setPreventIdleSleepFlag(desiredState & kIOPMPreventIdleSleep); connection->setPreventSystemSleepFlag(desiredState & kIOPMPreventSystemSleep); @@ -1245,17 +1418,21 @@ IOReturn IOService::requestPowerDomainState ( IOPMPowerFlags desiredState, IOPow IOLockUnlock(pm_vars->childLock); - computeDesiredState(); // this may be different now + // this may be different now + computeDesiredState(); - if ( inPlane(gIOPowerPlane) && - (pm_vars->parentsKnowState) ) { - changeState(); // change state if all children can now tolerate lower power - } + if ( inPlane(gIOPowerPlane) && + (pm_vars->parentsKnowState) ) { + // change state if all children can now tolerate lower power + changeState(); + } - if ( priv->clampOn ) { // are we clamped on, waiting for this child? - priv->clampOn = false; // yes, remove the clamp - changePowerStateToPriv(0); - } + // are we clamped on, waiting for this child? + if ( priv->clampOn ) { + // yes, remove the clamp + priv->clampOn = false; + changePowerStateToPriv(0); + } return IOPMNoErr; } @@ -1294,13 +1471,15 @@ IOReturn IOService::makeUsable ( void ) { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogMakeUsable,0,0); - if ( pm_vars->theControllingDriver == NULL ) { + if ( pm_vars->theControllingDriver == NULL ) + { priv->need_to_become_usable = true; return IOPMNoErr; } priv->deviceDesire = pm_vars->theNumberOfPowerStates - 1; computeDesiredState(); - if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) { + if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) + { return changeState(); } return IOPMNoErr; @@ -1314,10 +1493,10 @@ IOReturn IOService::makeUsable ( void ) IOPMPowerFlags IOService::currentCapability ( void ) { - if ( pm_vars->theControllingDriver == NULL ) { + if ( pm_vars->theControllingDriver == NULL ) + { return 0; - } - else { + } else { return pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags; } } @@ -1335,12 +1514,14 @@ IOReturn IOService::changePowerStateTo ( unsigned long ordinal ) { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChangeStateTo,ordinal,0); - if ( ordinal >= pm_vars->theNumberOfPowerStates ) { + if ( ordinal >= pm_vars->theNumberOfPowerStates ) + { return IOPMParameterError; } priv->driverDesire = ordinal; computeDesiredState(); - if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) { + if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) + { return changeState(); } @@ -1359,15 +1540,18 @@ IOReturn IOService::changePowerStateToPriv ( unsigned long ordinal ) { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChangeStateToPriv,ordinal,0); - if ( pm_vars->theControllingDriver == NULL) { + if ( pm_vars->theControllingDriver == NULL) + { return IOPMNotYetInitialized; } - if ( ordinal >= pm_vars->theNumberOfPowerStates ) { + if ( ordinal >= pm_vars->theNumberOfPowerStates ) + { return IOPMParameterError; } priv->deviceDesire = ordinal; computeDesiredState(); - if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) { + if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) + { return changeState(); } @@ -1382,20 +1566,24 @@ IOReturn IOService::changePowerStateToPriv ( unsigned long ordinal ) void IOService::computeDesiredState ( void ) { - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - unsigned long newDesiredState = 0; + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; + unsigned long newDesiredState = 0; // Compute the maximum of our children's desires, our controlling driver's desire, and the subclass device's desire. - - if ( ! priv->device_overrides ) { + if ( ! priv->device_overrides ) + { iter = getChildIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { - if ( connection->getDesiredDomainState() > newDesiredState ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { + if ( connection->getDesiredDomainState() > newDesiredState ) + { newDesiredState = connection->getDesiredDomainState(); } } @@ -1403,12 +1591,14 @@ void IOService::computeDesiredState ( void ) iter->release(); } - if ( priv->driverDesire > newDesiredState ) { + if ( priv->driverDesire > newDesiredState ) + { newDesiredState = priv->driverDesire; } } - if ( priv->deviceDesire > newDesiredState ) { + if ( priv->deviceDesire > newDesiredState ) + { newDesiredState = priv->deviceDesire; } @@ -1426,10 +1616,13 @@ void IOService::computeDesiredState ( void ) IOReturn IOService::changeState ( void ) { - if ( (pm_vars->theControllingDriver == NULL) || // if not fully initialized - ! (inPlane(gIOPowerPlane)) || - ! (pm_vars->parentsKnowState) ) { - return IOPMNoErr; // we can do no more + // if not fully initialized + if ( (pm_vars->theControllingDriver == NULL) || + !(inPlane(gIOPowerPlane)) || + !(pm_vars->parentsKnowState) ) + { + // we can do no more + return IOPMNoErr; } return enqueuePowerChange(IOPMWeInitiated,priv->ourDesiredPowerState,0,0,0); @@ -1443,10 +1636,12 @@ IOReturn IOService::changeState ( void ) unsigned long IOService::currentPowerConsumption ( void ) { - if ( pm_vars->theControllingDriver == NULL ) { + if ( pm_vars->theControllingDriver == NULL ) + { return kIOPMUnknown; } - if ( pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags & kIOPMStaticPowerValid ) { + if ( pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags & kIOPMStaticPowerValid ) + { return pm_vars->thePowerStates[pm_vars->myCurrentState].staticPower; } return kIOPMUnknown; @@ -1462,27 +1657,40 @@ unsigned long IOService::currentPowerConsumption ( void ) // powered down, it is powered up again. //********************************************************************************* -bool IOService::activityTickle ( unsigned long type, unsigned long stateNumber=0 ) +bool IOService::activityTickle ( unsigned long type, unsigned long stateNumber ) { - AbsoluteTime uptime; + IOPMrootDomain *pmRootDomain; + AbsoluteTime uptime; - if ( type == kIOPMSuperclassPolicy1 ) { - if ( (priv->activityLock == NULL) || - (pm_vars->theControllingDriver == NULL) ) { + if ( type == kIOPMSuperclassPolicy1 ) + { + if ( pm_vars->theControllingDriver == NULL ) + { return true; } + + if( priv->activityLock == NULL ) + { + priv->activityLock = IOLockAlloc(); + } + IOTakeLock(priv->activityLock); priv->device_active = true; clock_get_uptime(&uptime); priv->device_active_timestamp = uptime; - if ( pm_vars->myCurrentState >= stateNumber) { + if ( pm_vars->myCurrentState >= stateNumber) + { IOUnlock(priv->activityLock); return true; } IOUnlock(priv->activityLock); - pm_vars->PMcommandGate->runAction(unIdleDevice,(void *)stateNumber); + + // Transfer execution to the PM workloop + if( (pmRootDomain = getPMRootDomain()) ) + pmRootDomain->unIdleDevice(this, stateNumber); + return false; } return true; @@ -1497,18 +1705,24 @@ bool IOService::activityTickle ( unsigned long type, unsigned long stateNumber=0 IOWorkLoop * IOService::getPMworkloop ( void ) { -IOService * nub; -IOService * parent; + IOService *nub; + IOService *parent; - if ( ! inPlane(gIOPowerPlane) ) { + if ( ! inPlane(gIOPowerPlane) ) + { return NULL; } - if ( pm_vars->PMworkloop == NULL ) { // we have no workloop yet + // we have no workloop yet + if ( pm_vars->PMworkloop == NULL ) + { nub = (IOService *)copyParentEntry(gIOPowerPlane); - if ( nub ) { + if ( nub ) + { parent = (IOService *)nub->copyParentEntry(gIOPowerPlane); nub->release(); - if ( parent ) { // ask one of our parents for the workloop + // ask one of our parents for the workloop + if ( parent ) + { pm_vars->PMworkloop = parent->getPMworkloop(); parent->release(); } @@ -1533,21 +1747,27 @@ IOReturn IOService::setIdleTimerPeriod ( unsigned long period ) priv->idle_timer_period = period; - if ( period > 0 ) { - if ( getPMworkloop() == NULL ) { + if ( period > 0 ) + { + if ( getPMworkloop() == NULL ) + { return kIOReturnError; } - // make the timer event - if ( priv->timerEventSrc == NULL ) { + + // make the timer event + if ( priv->timerEventSrc == NULL ) + { priv->timerEventSrc = IOTimerEventSource::timerEventSource(this, PM_idle_timer_expired); - if ( ! priv->timerEventSrc || - ( pm_vars->PMworkloop->addEventSource( priv->timerEventSrc) != kIOReturnSuccess) ) { + if ((!priv->timerEventSrc) || + (pm_vars->PMworkloop->addEventSource(priv->timerEventSrc) != kIOReturnSuccess) ) + { return kIOReturnError; } } - if ( priv->activityLock == NULL ) { + if ( priv->activityLock == NULL ) + { priv->activityLock = IOLockAlloc(); } @@ -1564,29 +1784,27 @@ IOReturn IOService::setIdleTimerPeriod ( unsigned long period ) //********************************************************************************* void IOService::start_PM_idle_timer ( void ) { - AbsoluteTime uptime; - AbsoluteTime delta; - UInt64 delta_ns; - UInt64 delta_secs; - UInt64 delay_secs; + AbsoluteTime uptime; + AbsoluteTime delta; + UInt64 delta_ns; + UInt64 delta_secs; + UInt64 delay_secs; IOLockLock(priv->activityLock); clock_get_uptime(&uptime); - /* Calculate time difference using funky macro from clock.h. - */ + // Calculate time difference using funky macro from clock.h. delta = uptime; SUB_ABSOLUTETIME(&delta, &(priv->device_active_timestamp)); - /* Figure it in seconds. - */ + // Figure it in seconds. absolutetime_to_nanoseconds(delta, &delta_ns); delta_secs = delta_ns / NSEC_PER_SEC; - /* Be paranoid about delta somehow exceeding timer period. - */ - if (delta_secs < priv->idle_timer_period ) { + // Be paranoid about delta somehow exceeding timer period. + if (delta_secs < priv->idle_timer_period ) + { delay_secs = priv->idle_timer_period - delta_secs; } else { delay_secs = priv->idle_timer_period; @@ -1621,19 +1839,24 @@ void PM_idle_timer_expired(OSObject * ourSelves, IOTimerEventSource *) void IOService::PM_idle_timer_expiration ( void ) { - if ( ! initialized ) { - return; // we're unloading + if ( ! initialized ) + { + // we're unloading + return; } - if ( priv->idle_timer_period > 0 ) { + if ( priv->idle_timer_period > 0 ) + { IOTakeLock(priv->activityLock); - if ( priv->device_active ) { + if ( priv->device_active ) + { priv->device_active = false; IOUnlock(priv->activityLock); start_PM_idle_timer(); return; } - if ( pm_vars->myCurrentState > 0 ) { + if ( pm_vars->myCurrentState > 0 ) + { IOUnlock(priv->activityLock); changePowerStateToPriv(pm_vars->myCurrentState - 1); start_PM_idle_timer(); @@ -1645,33 +1868,26 @@ void IOService::PM_idle_timer_expiration ( void ) } - // ********************************************************************************** -// unIdleDevice +// command_received // -// We are behind the command gate. This serializes with respect to timer expiration. +// We are un-idling a device due to its activity tickle. This routine runs on the +// PM workloop, and is initiated by IOService::activityTickle. +// We process all activityTickle state requests on the list. // ********************************************************************************** -IOReturn unIdleDevice ( OSObject * theDriver, void * param1, void * param2, void * param3, void * param4 ) +void IOService::command_received ( void *statePtr , void *, void * , void * ) { - ((IOService *)theDriver)->command_received(param1,param2,param3,param4); - return kIOReturnSuccess; -} + unsigned long stateNumber; + stateNumber = (unsigned long)statePtr; -// ********************************************************************************** -// command_received -// -// We are un-idling a device due to its activity tickle. -// ********************************************************************************** -void IOService::command_received ( void * stateNumber, void *, void * , void * ) -{ - if ( ! initialized ) { - return; // we're unloading - } + // If not initialized, we're unloading + if ( ! initialized ) return; - if ( (pm_vars->myCurrentState < (unsigned long)stateNumber) && - (priv->imminentState < (unsigned long)stateNumber) ) { - changePowerStateToPriv((unsigned long)stateNumber); + if ( (pm_vars->myCurrentState < stateNumber) && + (priv->imminentState < stateNumber) ) + { + changePowerStateToPriv(stateNumber); } } @@ -1685,25 +1901,30 @@ void IOService::command_received ( void * stateNumber, void *, void * , void * ) IOReturn IOService::setAggressiveness ( unsigned long type, unsigned long newLevel ) { - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - IOService * child; + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; + IOService *child; pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogSetAggressiveness,type, newLevel); - if ( type <= kMaxType ) { + if ( type <= kMaxType ) + { pm_vars->current_aggressiveness_values[type] = newLevel; pm_vars->current_aggressiveness_valid[type] = true; } iter = getChildIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { child = ((IOService *)(connection->copyChildEntry(gIOPowerPlane))); - if ( child ) { + if ( child ) + { child->setAggressiveness(type, newLevel); child->release(); } @@ -1723,9 +1944,14 @@ IOReturn IOService::setAggressiveness ( unsigned long type, unsigned long newLev IOReturn IOService::getAggressiveness ( unsigned long type, unsigned long * currentLevel ) { - if ( type <= kMaxType ) { - *currentLevel = pm_vars->current_aggressiveness_values[type]; - } +// if ( type > kMaxType ) +// return kIOReturnBadArgument; + + if ( !pm_vars->current_aggressiveness_valid[type] ) + return kIOReturnInvalid; + + *currentLevel = pm_vars->current_aggressiveness_values[type]; + return kIOReturnSuccess; } @@ -1738,30 +1964,36 @@ IOReturn IOService::getAggressiveness ( unsigned long type, unsigned long * curr IOReturn IOService::systemWake ( void ) { - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - IOService * theChild; + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; + IOService *theChild; pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogSystemWake,0, 0); iter = getChildIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { theChild = (IOService *)connection->copyChildEntry(gIOPowerPlane); - if ( theChild ) { + if ( theChild ) + { theChild->systemWake(); - theChild->release(); + theChild->release(); } } } iter->release(); } - if ( pm_vars->theControllingDriver != NULL ) { - if ( pm_vars->theControllingDriver->didYouWakeSystem() ) { + if ( pm_vars->theControllingDriver != NULL ) + { + if ( pm_vars->theControllingDriver->didYouWakeSystem() ) + { makeUsable(); } } @@ -1777,17 +2009,20 @@ IOReturn IOService::systemWake ( void ) IOReturn IOService::temperatureCriticalForZone ( IOService * whichZone ) { - IOService * theParent; - IOService * theNub; + IOService *theParent; + IOService *theNub; pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogCriticalTemp,0,0); - if ( inPlane(gIOPowerPlane) && ! (priv->we_are_root) ) { + if ( inPlane(gIOPowerPlane) && !(priv->we_are_root) ) + { theNub = (IOService *)copyParentEntry(gIOPowerPlane); - if ( theNub ) { + if ( theNub ) + { theParent = (IOService *)theNub->copyParentEntry(gIOPowerPlane); theNub->release(); - if ( theParent ) { + if ( theParent ) + { theParent->temperatureCriticalForZone(whichZone); theParent->release(); } @@ -1807,9 +2042,12 @@ IOReturn IOService::powerOverrideOnPriv ( void ) { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogOverrideOn,0,0); - priv->device_overrides = true; // turn on the override + // turn on the override + priv->device_overrides = true; computeDesiredState(); - return changeState(); // change state if that changed something + + // change state if that changed something + return changeState(); } @@ -1821,12 +2059,16 @@ IOReturn IOService::powerOverrideOffPriv ( void ) { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogOverrideOff,0,0); - priv->device_overrides = false; // turn off the override + // turn off the override + priv->device_overrides = false; computeDesiredState(); if( priv->clampOn) + { return makeUsable(); - else - return changeState(); // change state if that changed something + } else { + // change state if that changed something + return changeState(); + } } @@ -1848,43 +2090,47 @@ IOReturn IOService::powerOverrideOffPriv ( void ) IOReturn IOService::enqueuePowerChange ( unsigned long flags, unsigned long whatStateOrdinal, unsigned long domainState, IOPowerConnection * whichParent, unsigned long singleParentState ) { - long newNote; - long previousNote; + long newNote; + long previousNote; -// Create and initialize the new change note + // Create and initialize the new change note IOLockLock(priv->queue_lock); newNote = priv->changeList->createChangeNote(); if ( newNote == -1 ) { + // uh-oh, our list is full IOLockUnlock(priv->queue_lock); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogEnqueueErr,0,0); - return IOPMAckImplied; // uh-oh, our list is full + return IOPMAckImplied; } priv->changeList->changeNote[newNote].newStateNumber = whatStateOrdinal; - priv->changeList->changeNote[newNote].outputPowerCharacter = pm_vars->thePowerStates[whatStateOrdinal].outputPowerCharacter; - priv->changeList->changeNote[newNote].inputPowerRequirement = pm_vars->thePowerStates[whatStateOrdinal].inputPowerRequirement; - priv->changeList->changeNote[newNote].capabilityFlags = pm_vars->thePowerStates[whatStateOrdinal].capabilityFlags; + priv->changeList->changeNote[newNote].outputPowerCharacter = pm_vars->thePowerStates[whatStateOrdinal].outputPowerCharacter; + priv->changeList->changeNote[newNote].inputPowerRequirement = pm_vars->thePowerStates[whatStateOrdinal].inputPowerRequirement; + priv->changeList->changeNote[newNote].capabilityFlags = pm_vars->thePowerStates[whatStateOrdinal].capabilityFlags; priv->changeList->changeNote[newNote].flags = flags; - if (flags & IOPMParentInitiated ) { - priv->changeList->changeNote[newNote].domainState = domainState; - priv->changeList->changeNote[newNote].parent = whichParent; + priv->changeList->changeNote[newNote].parent = NULL; + if (flags & IOPMParentInitiated ) + { + priv->changeList->changeNote[newNote].domainState = domainState; + priv->changeList->changeNote[newNote].parent = whichParent; whichParent->retain(); - priv->changeList->changeNote[newNote].singleParentState = singleParentState; + priv->changeList->changeNote[newNote].singleParentState = singleParentState; } previousNote = priv->changeList->previousChangeNote(newNote); - if ( previousNote == -1 ) { + if ( previousNote == -1 ) + { // Queue is empty, we can start this change. - if (flags & IOPMWeInitiated ) { + if (flags & IOPMWeInitiated ) + { IOLockUnlock(priv->queue_lock); start_our_change(newNote); return 0; - } - else { + } else { IOLockUnlock(priv->queue_lock); return start_parent_change(newNote); } @@ -1897,7 +2143,8 @@ IOReturn IOService::enqueuePowerChange ( unsigned long flags, unsigned long wha // (A change is started iff it is at the head of the queue) while ( (previousNote != priv->head_note) && (previousNote != -1) && - (priv->changeList->changeNote[newNote].flags & priv->changeList->changeNote[previousNote].flags & IOPMWeInitiated) ) { + (priv->changeList->changeNote[newNote].flags & priv->changeList->changeNote[previousNote].flags & IOPMWeInitiated) ) + { priv->changeList->changeNote[previousNote].outputPowerCharacter = priv->changeList->changeNote[newNote].outputPowerCharacter; priv->changeList->changeNote[previousNote].inputPowerRequirement = priv->changeList->changeNote[newNote].inputPowerRequirement; priv->changeList->changeNote[previousNote].capabilityFlags =priv-> changeList->changeNote[newNote].capabilityFlags; @@ -1909,7 +2156,8 @@ IOReturn IOService::enqueuePowerChange ( unsigned long flags, unsigned long wha previousNote = priv->changeList->previousChangeNote(newNote); } IOLockUnlock(priv->queue_lock); - return IOPMWillAckLater; // in any case, we can't start yet + // in any case, we can't start yet + return IOPMWillAckLater; } //********************************************************************************* @@ -1936,10 +2184,12 @@ IOReturn IOService::notifyAll ( bool is_prechange ) // OK, we will go through the lists of interested drivers and power domain children // and notify each one of this change. - nextObject = priv->interestedDrivers->firstInList(); // notify interested drivers + + nextObject = priv->interestedDrivers->firstInList(); while ( nextObject != NULL ) { priv->head_note_pendingAcks +=1; - if (! inform(nextObject, is_prechange) ) { + if (! inform(nextObject, is_prechange) ) + { } nextObject = priv->interestedDrivers->nextInList(nextObject); } @@ -1947,18 +2197,26 @@ IOReturn IOService::notifyAll ( bool is_prechange ) if (! acquire_lock() ) { return IOPMNoErr; } - if ( priv->head_note_pendingAcks > 1 ) { // did they all ack? - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); // no + // did they all ack? + if ( priv->head_note_pendingAcks > 1 ) { + // no + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); start_ack_timer(); } - IOUnlock(priv->our_lock); // either way + // either way + IOUnlock(priv->our_lock); - iter = getChildIterator(gIOPowerPlane); // notify children - pm_vars->thePowerStates[priv->head_note_state].staticPower = 0; // summing their power consumption + // notify children + iter = getChildIterator(gIOPowerPlane); + // summing their power consumption + pm_vars->thePowerStates[priv->head_note_state].staticPower = 0; - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { priv->head_note_pendingAcks +=1; notifyChild(connection, is_prechange); } @@ -1969,12 +2227,18 @@ IOReturn IOService::notifyAll ( bool is_prechange ) if (! acquire_lock() ) { return IOPMNoErr; } - priv->head_note_pendingAcks -= 1; // now make this real - if (priv->head_note_pendingAcks == 0 ) { // is it all acked? - IOUnlock(priv->our_lock); // yes - return IOPMAckImplied; // return ack to parent + // now make this real + priv->head_note_pendingAcks -= 1; + // is it all acked? + if (priv->head_note_pendingAcks == 0 ) { + // yes, all acked + IOUnlock(priv->our_lock); + // return ack to parent + return IOPMAckImplied; } - IOUnlock(priv->our_lock); // no + + // not all acked + IOUnlock(priv->our_lock); return IOPMWillAckLater; } @@ -1989,40 +2253,45 @@ IOReturn IOService::notifyAll ( bool is_prechange ) bool IOService::notifyChild ( IOPowerConnection * theNub, bool is_prechange ) { - IOReturn k = IOPMAckImplied; - unsigned long childPower; - IOService * theChild = (IOService *)(theNub->copyChildEntry(gIOPowerPlane)); + IOReturn k = IOPMAckImplied; + unsigned long childPower; + IOService *theChild = (IOService *)(theNub->copyChildEntry(gIOPowerPlane)); - theNub->setAwaitingAck(true); // in case they don't ack - - if ( ! theChild ) { + theNub->setAwaitingAck(true); // in case they don't ack + + if ( ! theChild ) + { return true; - } - - if ( is_prechange ) { - k = theChild->powerDomainWillChangeTo(priv->head_note_outputFlags,theNub); - } - else { - k = theChild->powerDomainDidChangeTo(priv->head_note_outputFlags,theNub); - } - - if ( k == IOPMAckImplied ) { // did the return code ack? - priv->head_note_pendingAcks -=1; // yes + } + + if ( is_prechange ) + { + k = theChild->powerDomainWillChangeTo(priv->head_note_outputFlags,theNub); + } else { + k = theChild->powerDomainDidChangeTo(priv->head_note_outputFlags,theNub); + } + + // did the return code ack? + if ( k == IOPMAckImplied ) + { + // yes + priv->head_note_pendingAcks -=1; theNub->setAwaitingAck(false); childPower = theChild->currentPowerConsumption(); - if ( childPower == kIOPMUnknown ) { + if ( childPower == kIOPMUnknown ) + { pm_vars->thePowerStates[priv->head_note_state].staticPower = kIOPMUnknown; - } - else { - if ( pm_vars->thePowerStates[priv->head_note_state].staticPower != kIOPMUnknown ) { + } else { + if ( pm_vars->thePowerStates[priv->head_note_state].staticPower != kIOPMUnknown ) + { pm_vars->thePowerStates[priv->head_note_state].staticPower += childPower; } } theChild->release(); - return true; - } - theChild->release(); - return false; + return true; + } + theChild->release(); + return false; } @@ -2036,40 +2305,54 @@ bool IOService::notifyChild ( IOPowerConnection * theNub, bool is_prechange ) bool IOService::inform ( IOPMinformee * nextObject, bool is_prechange ) { - IOReturn k = IOPMAckImplied; - - nextObject->timer = -1; // initialize this + IOReturn k = IOPMAckImplied; - if ( is_prechange ) { - pm_vars->thePlatform->PMLog (pm_vars->ourName,PMlogInformDriverPreChange, + // initialize this + nextObject->timer = -1; + + if ( is_prechange ) + { + pm_vars->thePlatform->PMLog (pm_vars->ourName,PMlogInformDriverPreChange, (unsigned long)priv->head_note_capabilityFlags,(unsigned long)priv->head_note_state); - k = nextObject->whatObject->powerStateWillChangeTo( priv->head_note_capabilityFlags,priv->head_note_state,this); - } - else { - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInformDriverPostChange, - (unsigned long)priv->head_note_capabilityFlags,(unsigned long)priv->head_note_state); - k = nextObject->whatObject->powerStateDidChangeTo(priv->head_note_capabilityFlags,priv->head_note_state,this); - } - if ( nextObject->timer == 0 ) { // did it ack behind our back? - return true; // yes - } - if ( k ==IOPMAckImplied ) { // no, did the return code ack? - nextObject->timer = 0; // yes - priv->head_note_pendingAcks -= 1; - return true; - } - if ( k < 0 ) { - nextObject->timer = 0; // somebody goofed - priv-> head_note_pendingAcks -= 1; - return true; - } - nextObject->timer = (k * ns_per_us / ACK_TIMER_PERIOD) + 1; // no, it's a timer - return false; + k = nextObject->whatObject->powerStateWillChangeTo( priv->head_note_capabilityFlags,priv->head_note_state,this); + } else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInformDriverPostChange, + (unsigned long)priv->head_note_capabilityFlags,(unsigned long)priv->head_note_state); + k = nextObject->whatObject->powerStateDidChangeTo(priv->head_note_capabilityFlags,priv->head_note_state,this); + } + + // did it ack behind our back? + if ( nextObject->timer == 0 ) + { + // yes + return true; + } + + // no, did the return code ack? + if ( k ==IOPMAckImplied ) + { + // yes + nextObject->timer = 0; + priv->head_note_pendingAcks -= 1; + return true; + } + if ( k<0 ) + { + // somebody goofed + nextObject->timer = 0; + priv-> head_note_pendingAcks -= 1; + return true; + } + + // no, it's a timer + nextObject->timer = (k / (ACK_TIMER_PERIOD / ns_per_us)) + 1; + + return false; } //********************************************************************************* -// our_prechange_03 +// OurChangeTellClientsPowerDown // // All registered applications and kernel clients have positively acknowledged our // intention of lowering power. Here we notify them all that we will definitely @@ -2077,17 +2360,24 @@ bool IOService::inform ( IOPMinformee * nextObject, bool is_prechange ) // carry on by notifying interested drivers. Otherwise, we do wait. //********************************************************************************* -void IOService::our_prechange_03 ( void ) +void IOService::OurChangeTellClientsPowerDown ( void ) { - priv->machine_state = IOPMour_prechange_04; // next state - if ( tellChangeDown1(priv->head_note_state) ) { // are we waiting for responses? - our_prechange_04(); // no, notify priority clients + // next state + priv->machine_state = kIOPM_OurChangeTellPriorityClientsPowerDown; + + // are we waiting for responses? + if ( tellChangeDown1(priv->head_note_state) ) + { + // no, notify priority clients + OurChangeTellPriorityClientsPowerDown(); } + // If we are waiting for responses, execution will resume via + // allowCancelCommon() or ack timeout } //********************************************************************************* -// our_prechange_04 +// OurChangeTellPriorityClientsPowerDown // // All registered applications and kernel clients have positively acknowledged our // intention of lowering power. Here we notify "priority" clients that we are @@ -2095,17 +2385,23 @@ void IOService::our_prechange_03 ( void ) // carry on by notifying interested drivers. Otherwise, we do wait. //********************************************************************************* -void IOService::our_prechange_04 ( void ) +void IOService::OurChangeTellPriorityClientsPowerDown ( void ) { - priv->machine_state = IOPMour_prechange_05; // next state - if ( tellChangeDown2(priv->head_note_state) ) { // are we waiting for responses? - return our_prechange_05(); // no, notify interested drivers + // next state + priv->machine_state = kIOPM_OurChangeNotifyInterestedDriversWillChange; + // are we waiting for responses? + if ( tellChangeDown2(priv->head_note_state) ) + { + // no, notify interested drivers + return OurChangeNotifyInterestedDriversWillChange(); } + // If we are waiting for responses, execution will resume via + // allowCancelCommon() or ack timeout } //********************************************************************************* -// our_prechange_05 +// OurChangeNotifyInterestedDriversWillChange // // All registered applications and kernel clients have acknowledged our notification // that we are lowering power. Here we notify interested drivers. If we don't have @@ -2113,17 +2409,22 @@ void IOService::our_prechange_04 ( void ) // Otherwise, we do wait. //********************************************************************************* -void IOService::our_prechange_05 ( void ) +void IOService::OurChangeNotifyInterestedDriversWillChange ( void ) { - priv->machine_state = IOPMour_prechange_1; // no, in case they don't all ack - if ( notifyAll(true) == IOPMAckImplied ) { - our_prechange_1(); + // no, in case they don't all ack + priv->machine_state = kIOPM_OurChangeSetPowerState; + if ( notifyAll(true) == IOPMAckImplied ) + { + // not waiting for responses + OurChangeSetPowerState(); } + // If we are waiting for responses, execution will resume via + // all_acked() or ack timeout } //********************************************************************************* -// our_prechange_1 +// OurChangeSetPowerState // // All interested drivers have acknowledged our pre-change notification of a power // change we initiated. Here we instruct our controlling driver to make @@ -2132,21 +2433,25 @@ void IOService::our_prechange_05 ( void ) // If it doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -void IOService::our_prechange_1 ( void ) +void IOService::OurChangeSetPowerState ( void ) { - if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { - our_prechange_2(); // it's done, carry on - } - else { - priv->machine_state = IOPMour_prechange_2; // it's not, wait for it + priv->machine_state = kIOPM_OurChangeWaitForPowerSettle; + + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) + { + // it's done, carry on + OurChangeWaitForPowerSettle(); + } else { + // it's not, wait for it pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); start_ack_timer(); + // execution will resume via ack_timer_ticked() } } //********************************************************************************* -// our_prechange_2 +// OurChangeWaitForPowerSettle // // Our controlling driver has changed power state on the hardware // during a power change we initiated. Here we see if we need to wait @@ -2155,21 +2460,21 @@ void IOService::our_prechange_1 ( void ) // continue later. //********************************************************************************* -void IOService::our_prechange_2 ( void ) +void IOService::OurChangeWaitForPowerSettle ( void ) { - priv->settle_time = compute_settle_time(); - if ( priv->settle_time == 0 ) { - our_prechange_3(); - } - else { - priv->machine_state = IOPMour_prechange_3; + priv->settle_time = compute_settle_time(); + if ( priv->settle_time == 0 ) + { + OurChangeNotifyInterestedDriversDidChange(); + } else { + priv->machine_state = kIOPM_OurChangeNotifyInterestedDriversDidChange; startSettleTimer(priv->settle_time); } } //********************************************************************************* -// our_prechange_3 +// OurChangeNotifyInterestedDriversDidChange // // Power has settled on a power change we initiated. Here we notify // all our interested parties post-change. If they all acknowledge, we're @@ -2177,31 +2482,36 @@ void IOService::our_prechange_2 ( void ) // Otherwise we have to wait for acknowledgements and finish up later. //********************************************************************************* -void IOService::our_prechange_3 ( void ) +void IOService::OurChangeNotifyInterestedDriversDidChange ( void ) { - priv->machine_state = IOPMour_prechange_4; // in case they don't all ack - if ( notifyAll(false) == IOPMAckImplied ) { - our_prechange_4(); + // in case they don't all ack + priv->machine_state = kIOPM_OurChangeFinish; + if ( notifyAll(false) == IOPMAckImplied ) + { + // not waiting for responses + OurChangeFinish(); } + // If we are waiting for responses, execution will resume via + // all_acked() or ack timeout } //********************************************************************************* -// our_prechange_4 +// OurChangeFinish // // Power has settled on a power change we initiated, and // all our interested parties have acknowledged. We're // done with this change note, and we can start on the next one. //********************************************************************************* -void IOService::our_prechange_4 ( void ) +void IOService::OurChangeFinish ( void ) { all_done(); } //********************************************************************************* -// parent_down_0 +// ParentDownTellPriorityClientsPowerDown_Immediate // // All applications and kernel clients have been notified of a power lowering // initiated by the parent and we didn't have to wait for any responses. Here @@ -2209,18 +2519,24 @@ void IOService::our_prechange_4 ( void ) // If at least one doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -IOReturn IOService::parent_down_0 ( void ) +IOReturn IOService::ParentDownTellPriorityClientsPowerDown_Immediate ( void ) { - priv->machine_state = IOPMparent_down_05; // in case they don't all ack - if ( tellChangeDown2(priv->head_note_state) ) { // are we waiting for responses? - return parent_down_02(); // no, notify interested drivers + // in case they don't all ack + priv->machine_state = kIOPM_ParentDownNotifyInterestedDriversWillChange_Delayed; + // are we waiting for responses? + if ( tellChangeDown2(priv->head_note_state) ) + { + // no, notify interested drivers + return ParentDownNotifyInterestedDriversWillChange_Immediate(); } - return IOPMWillAckLater; // they didn't + // If we are waiting for responses, execution will resume via + // allowCancelCommon() or ack timeout + return IOPMWillAckLater; } //********************************************************************************* -// parent_down_02 +// ParentDownTellPriorityClientsPowerDown_Immediate2 // // All priority kernel clients have been notified of a power lowering // initiated by the parent and we didn't have to wait for any responses. Here @@ -2229,18 +2545,23 @@ IOReturn IOService::parent_down_0 ( void ) // If at least one doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -IOReturn IOService::parent_down_02 ( void ) +IOReturn IOService::ParentDownNotifyInterestedDriversWillChange_Immediate ( void ) { - priv->machine_state = IOPMparent_down_4; // in case they don't all ack - if ( notifyAll(true) == IOPMAckImplied ) { - return parent_down_1(); // they did + // in case they don't all ack + priv->machine_state = kIOPM_ParentDownSetPowerState_Delayed; + if ( notifyAll(true) == IOPMAckImplied ) + { + // they did + return ParentDownSetPowerState_Immediate(); } - return IOPMWillAckLater; // they didn't + // If we are waiting for responses, execution will resume via + // all_acked() or ack timeout + return IOPMWillAckLater; } //********************************************************************************* -// parent_down_04 +// ParentDownTellPriorityClientsPowerDown_Immediate4 // // All applications and kernel clients have been notified of a power lowering // initiated by the parent and we had to wait for responses. Here @@ -2248,17 +2569,24 @@ IOReturn IOService::parent_down_02 ( void ) // If at least one doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -void IOService::parent_down_04 ( void ) +void IOService::ParentDownTellPriorityClientsPowerDown_Delayed ( void ) { - priv->machine_state = IOPMparent_down_05; // in case they don't all ack - if ( tellChangeDown2(priv->head_note_state) ) { // are we waiting for responses? - parent_down_05(); // no, notify interested drivers + // in case they don't all ack + priv->machine_state = kIOPM_ParentDownNotifyInterestedDriversWillChange_Delayed; + + // are we waiting for responses? + if ( tellChangeDown2(priv->head_note_state) ) + { + // no, notify interested drivers + ParentDownNotifyInterestedDriversWillChange_Delayed(); } + // If we are waiting for responses, execution will resume via + // allowCancelCommon() or ack timeout } //********************************************************************************* -// parent_down_05 +// ParentDownTellPriorityClientsPowerDown_Immediate5 // // All applications and kernel clients have been notified of a power lowering // initiated by the parent and we had to wait for their responses. Here we notify @@ -2267,17 +2595,22 @@ void IOService::parent_down_04 ( void ) // If at least one doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -void IOService::parent_down_05 ( void ) +void IOService::ParentDownNotifyInterestedDriversWillChange_Delayed ( void ) { - priv->machine_state = IOPMparent_down_4; // in case they don't all ack - if ( notifyAll(true) == IOPMAckImplied ) { - parent_down_4(); // they did + // in case they don't all ack + priv->machine_state = kIOPM_ParentDownSetPowerState_Delayed; + if ( notifyAll(true) == IOPMAckImplied ) + { + // they did + ParentDownSetPowerState_Delayed(); } + // If we are waiting for responses, execution will resume via + // all_acked() or ack timeout } //********************************************************************************* -// parent_down_1 +// ParentDownSetPowerState_Immediate // // All parties have acknowledged our pre-change notification of a power // lowering initiated by the parent. Here we instruct our controlling driver @@ -2287,12 +2620,16 @@ void IOService::parent_down_05 ( void ) // If it doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -IOReturn IOService::parent_down_1 ( void ) +IOReturn IOService::ParentDownSetPowerState_Immediate ( void ) { - if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { - return parent_down_2(); // it's done, carry on + priv->machine_state = kIOPM_ParentDownWaitForPowerSettle_Delayed; + + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) + { + // it's done, carry on + return ParentDownWaitForPowerSettleAndNotifyDidChange_Immediate(); } - priv->machine_state = IOPMparent_down_5; // it's not, wait for it + // it's not, wait for it pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); start_ack_timer(); return IOPMWillAckLater; @@ -2300,7 +2637,7 @@ IOReturn IOService::parent_down_1 ( void ) //********************************************************************************* -// parent_down_4 +// ParentDownSetPowerState_Delayed // // We had to wait for it, but all parties have acknowledged our pre-change // notification of a power lowering initiated by the parent. @@ -2311,13 +2648,16 @@ IOReturn IOService::parent_down_1 ( void ) // If it doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -void IOService::parent_down_4 ( void ) +void IOService::ParentDownSetPowerState_Delayed ( void ) { - if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { - parent_down_5(); // it's done, carry on - } - else { - priv-> machine_state = IOPMparent_down_5; // it's not, wait for it + priv-> machine_state = kIOPM_ParentDownWaitForPowerSettle_Delayed; + + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) + { + // it's done, carry on + ParentDownWaitForPowerSettle_Delayed(); + } else { + // it's not, wait for it pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); start_ack_timer(); } @@ -2325,7 +2665,7 @@ void IOService::parent_down_4 ( void ) //********************************************************************************* -// parent_down_2 +// ParentDownWaitForPowerSettleAndNotifyDidChange_Immediate // // Our controlling driver has changed power state on the hardware // during a power change initiated by our parent. Here we see if we need @@ -2334,31 +2674,38 @@ void IOService::parent_down_4 ( void ) // If so, we wait and continue later. //********************************************************************************* -IOReturn IOService::parent_down_2 ( void ) +IOReturn IOService::ParentDownWaitForPowerSettleAndNotifyDidChange_Immediate ( void ) { IOService * nub; priv->settle_time = compute_settle_time(); - if ( priv->settle_time == 0 ) { - priv->machine_state = IOPMparent_down_6; // in case they don't all ack - if ( notifyAll(false) == IOPMAckImplied ) { + if ( priv->settle_time == 0 ) + { + // store current state in case they don't all ack + priv->machine_state = kIOPM_ParentDownAcknowledgeChange_Delayed; + if ( notifyAll(false) == IOPMAckImplied ) + { + // not waiting for responses nub = priv->head_note_parent; + nub->retain(); all_done(); nub->release(); return IOPMAckImplied; } - return IOPMWillAckLater; // they didn't - } - else { - priv->machine_state = IOPMparent_down_3; - startSettleTimer(priv->settle_time); - return IOPMWillAckLater; + // If we are waiting for responses, execution will resume via + // all_acked() or ack timeout + return IOPMWillAckLater; + } else { + // let settle time elapse, then notify interest drivers of our power state change in ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed + priv->machine_state = kIOPM_ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed; + startSettleTimer(priv->settle_time); + return IOPMWillAckLater; } } //********************************************************************************* -// parent_down_5 +// ParentDownWaitForPowerSettle_Delayed // // Our controlling driver has changed power state on the hardware // during a power change initiated by our parent. We have had to wait @@ -2369,34 +2716,36 @@ IOReturn IOService::parent_down_2 ( void ) // If so, we wait and continue later. //********************************************************************************* -void IOService::parent_down_5 ( void ) +void IOService::ParentDownWaitForPowerSettle_Delayed ( void ) { priv->settle_time = compute_settle_time(); - if ( priv->settle_time == 0 ) { - parent_down_3(); - } - else { - priv->machine_state = IOPMparent_down_3; + if ( priv->settle_time == 0 ) + { + ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed(); + } else { + priv->machine_state = kIOPM_ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed; startSettleTimer(priv->settle_time); } } //********************************************************************************* -// parent_down_3 +// ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed // // Power has settled on a power change initiated by our parent. Here we // notify interested parties. //********************************************************************************* -void IOService::parent_down_3 ( void ) +void IOService::ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed ( void ) { - IORegistryEntry * nub; - IOService * parent; + IORegistryEntry *nub; + IOService *parent; - priv->machine_state = IOPMparent_down_6; // in case they don't all ack + // in case they don't all ack + priv->machine_state = kIOPM_ParentDownAcknowledgeChange_Delayed; if ( notifyAll(false) == IOPMAckImplied ) { nub = priv->head_note_parent; + nub->retain(); all_done(); parent = (IOService *)nub->copyParentEntry(gIOPowerPlane); if ( parent ) { @@ -2405,11 +2754,14 @@ void IOService::parent_down_3 ( void ) } nub->release(); } + // If we are waiting for responses, execution will resume via + // all_acked() or ack timeout in ParentDownAcknowledgeChange_Delayed. + // Notice the duplication of code just above and in ParentDownAcknowledgeChange_Delayed. } //********************************************************************************* -// parent_down_6 +// ParentDownAcknowledgeChange_Delayed // // We had to wait for it, but all parties have acknowledged our post-change // notification of a power lowering initiated by the parent. @@ -2417,15 +2769,17 @@ void IOService::parent_down_3 ( void ) // We are done with this change note, and we can start on the next one. //********************************************************************************* -void IOService::parent_down_6 ( void ) +void IOService::ParentDownAcknowledgeChange_Delayed ( void ) { - IORegistryEntry * nub; - IOService * parent; + IORegistryEntry *nub; + IOService *parent; nub = priv->head_note_parent; + nub->retain(); all_done(); parent = (IOService *)nub->copyParentEntry(gIOPowerPlane); - if ( parent ) { + if ( parent ) + { parent->acknowledgePowerChange((IOService *)nub); parent->release(); } @@ -2434,7 +2788,7 @@ void IOService::parent_down_6 ( void ) //********************************************************************************* -// parent_up_0 +// ParentUpSetPowerState_Delayed // // Our parent has informed us via powerStateDidChange that it has // raised the power in our power domain, and we have had to wait @@ -2446,13 +2800,16 @@ void IOService::parent_down_6 ( void ) // If it doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -void IOService::parent_up_0 ( void ) +void IOService::ParentUpSetPowerState_Delayed ( void ) { - if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { - parent_up_4(); // it did it, carry on - } - else { - priv->machine_state = IOPMparent_up_4; // it didn't, wait for it + priv->machine_state = kIOPM_ParentUpWaitForSettleTime_Delayed; + + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) + { + // it did it, carry on + ParentUpWaitForSettleTime_Delayed(); + } else { + // it didn't, wait for it pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); start_ack_timer(); } @@ -2460,7 +2817,7 @@ void IOService::parent_up_0 ( void ) //********************************************************************************* -// parent_up_1 +// ParentUpSetPowerState_Immediate // // Our parent has informed us via powerStateDidChange that it has // raised the power in our power domain. Here we instruct our controlling @@ -2470,13 +2827,17 @@ void IOService::parent_up_0 ( void ) // If it doesn't, we have to wait for it to acknowledge and then continue. //********************************************************************************* -IOReturn IOService::parent_up_1 ( void ) +IOReturn IOService::ParentUpSetPowerState_Immediate ( void ) { - if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { - return parent_up_2(); // it did it, carry on + priv->machine_state = kIOPM_ParentUpWaitForSettleTime_Delayed; + + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) + { + // it did it, carry on + return ParentUpWaitForSettleTime_Immediate(); } else { - priv->machine_state = IOPMparent_up_4; // it didn't, wait for it + // it didn't, wait for it pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); start_ack_timer(); return IOPMWillAckLater; @@ -2485,7 +2846,7 @@ IOReturn IOService::parent_up_1 ( void ) //********************************************************************************* -// parent_up_2 +// ParentUpWaitForSettleTime_Immediate // // Our controlling driver has changed power state on the hardware // during a power raise initiated by the parent. Here we see if we need to wait @@ -2494,22 +2855,22 @@ IOReturn IOService::parent_up_1 ( void ) // continue later. //********************************************************************************* -IOReturn IOService::parent_up_2 ( void ) +IOReturn IOService::ParentUpWaitForSettleTime_Immediate ( void ) { priv->settle_time = compute_settle_time(); - if ( priv->settle_time == 0 ) { - return parent_up_3(); - } - else { - priv->machine_state = IOPMparent_up_5; - startSettleTimer(priv->settle_time); - return IOPMWillAckLater; - } + if ( priv->settle_time == 0 ) + { + return ParentUpNotifyInterestedDriversDidChange_Immediate(); + } else { + priv->machine_state = kIOPM_ParentUpNotifyInterestedDriversDidChange_Delayed; + startSettleTimer(priv->settle_time); + return IOPMWillAckLater; + } } //********************************************************************************* -// parent_up_4 +// ParentUpWaitForSettleTime_Delayed // // Our controlling driver has changed power state on the hardware // during a power raise initiated by the parent, but we had to wait for it. @@ -2518,21 +2879,21 @@ IOReturn IOService::parent_up_2 ( void ) // If so, we wait and continue later. //********************************************************************************* -void IOService::parent_up_4 ( void ) +void IOService::ParentUpWaitForSettleTime_Delayed ( void ) { priv->settle_time = compute_settle_time(); - if ( priv->settle_time == 0 ) { - parent_up_5(); - } - else { - priv->machine_state = IOPMparent_up_5; - startSettleTimer(priv->settle_time); - } + if ( priv->settle_time == 0 ) + { + ParentUpNotifyInterestedDriversDidChange_Delayed(); + } else { + priv->machine_state = kIOPM_ParentUpNotifyInterestedDriversDidChange_Delayed; + startSettleTimer(priv->settle_time); + } } //********************************************************************************* -// parent_up_3 +// ParentUpNotifyInterestedDriversDidChange_Immediate // // No power settling was required on a power raise initiated by the parent. // Here we notify all our interested parties post-change. If they all acknowledge, @@ -2540,23 +2901,28 @@ void IOService::parent_up_4 ( void ) // Otherwise we have to wait for acknowledgements and finish up later. //********************************************************************************* -IOReturn IOService::parent_up_3 ( void ) +IOReturn IOService::ParentUpNotifyInterestedDriversDidChange_Immediate ( void ) { IOService * nub; - priv->machine_state = IOPMparent_up_6; // in case they don't all ack - if ( notifyAll(false) == IOPMAckImplied ) { + // in case they don't all ack + priv->machine_state = kIOPM_ParentUpAcknowledgePowerChange_Delayed; + if ( notifyAll(false) == IOPMAckImplied ) + { nub = priv->head_note_parent; + nub->retain(); all_done(); nub->release(); return IOPMAckImplied; } - return IOPMWillAckLater; // they didn't + // If we are waiting for responses, execution will resume via + // all_acked() or ack timeout in ParentUpAcknowledgePowerChange_Delayed. + return IOPMWillAckLater; } //********************************************************************************* -// parent_up_5 +// ParentUpNotifyInterestedDriversDidChange_Delayed // // Power has settled on a power raise initiated by the parent. // Here we notify all our interested parties post-change. If they all acknowledge, @@ -2564,32 +2930,38 @@ IOReturn IOService::parent_up_3 ( void ) // Otherwise we have to wait for acknowledgements and finish up later. //********************************************************************************* -void IOService::parent_up_5 ( void ) +void IOService::ParentUpNotifyInterestedDriversDidChange_Delayed ( void ) { - priv->machine_state = IOPMparent_up_6; // in case they don't all ack - if ( notifyAll(false) == IOPMAckImplied ) { - parent_up_6(); + // in case they don't all ack + priv->machine_state = kIOPM_ParentUpAcknowledgePowerChange_Delayed; + if ( notifyAll(false) == IOPMAckImplied ) + { + ParentUpAcknowledgePowerChange_Delayed(); } + // If we are waiting for responses, execution will resume via + // all_acked() or ack timeout in ParentUpAcknowledgePowerChange_Delayed. } //********************************************************************************* -// parent_up_6 +// ParentUpAcknowledgePowerChange_Delayed // // All parties have acknowledged our post-change notification of a power // raising initiated by the parent. Here we acknowledge the parent. // We are done with this change note, and we can start on the next one. //********************************************************************************* -void IOService::parent_up_6 ( void ) +void IOService::ParentUpAcknowledgePowerChange_Delayed ( void ) { - IORegistryEntry * nub; - IOService * parent; + IORegistryEntry *nub; + IOService *parent; nub = priv->head_note_parent; + nub->retain(); all_done(); parent = (IOService *)nub->copyParentEntry(gIOPowerPlane); - if ( parent ) { + if ( parent ) + { parent->acknowledgePowerChange((IOService *)nub); parent->release(); } @@ -2607,70 +2979,93 @@ void IOService::parent_up_6 ( void ) void IOService::all_done ( void ) { - unsigned long previous_state; - IORegistryEntry * nub; - IOService * parent; + unsigned long previous_state; + IORegistryEntry *nub; + IOService *parent; - priv->machine_state = IOPMfinished; + priv->machine_state = kIOPM_Finished; - if ( priv->head_note_flags & IOPMWeInitiated ) { // our power change - if ( !( priv->head_note_flags & IOPMNotDone) ) { // could our driver switch to the new state? - if ( pm_vars->myCurrentState < priv->head_note_state ) { // yes, did power raise? - tellChangeUp (priv->head_note_state); // yes, inform clients and apps - } - else { - if ( ! priv->we_are_root ) { // no, if this lowers our - ask_parent(priv->head_note_state); // power requirements, tell the parent + // our power change + if ( priv->head_note_flags & IOPMWeInitiated ) + { + // could our driver switch to the new state? + if ( !( priv->head_note_flags & IOPMNotDone) ) + { + // yes, did power raise? + if ( pm_vars->myCurrentState < priv->head_note_state ) + { + // yes, inform clients and apps + tellChangeUp (priv->head_note_state); + } else { + // no, if this lowers our + if ( ! priv->we_are_root ) + { + // power requirements, tell the parent + ask_parent(priv->head_note_state); } } previous_state = pm_vars->myCurrentState; - pm_vars->myCurrentState = priv->head_note_state; // either way + // either way + pm_vars->myCurrentState = priv->head_note_state; priv->imminentState = pm_vars->myCurrentState; pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChangeDone,(unsigned long)pm_vars->myCurrentState,0); - powerChangeDone(previous_state); // inform subclass policy-maker + // inform subclass policy-maker + powerChangeDone(previous_state); } -// else { // no -// pm_vars->myCurrentState = pm_vars->theControllingDriver->powerStateForDomainState(pm_vars->parentsCurrentPowerFlags); -// } } - if ( priv->head_note_flags & IOPMParentInitiated) { // parent's power change + + // parent's power change + if ( priv->head_note_flags & IOPMParentInitiated) + { if ( ((priv->head_note_flags & IOPMDomainWillChange) && (pm_vars->myCurrentState >= priv->head_note_state)) || - ((priv->head_note_flags & IOPMDomainDidChange) && (pm_vars->myCurrentState < priv->head_note_state)) ) { - if ( pm_vars->myCurrentState < priv->head_note_state ) { // did power raise? - tellChangeUp (priv->head_note_state); // yes, inform clients and apps + ((priv->head_note_flags & IOPMDomainDidChange) && (pm_vars->myCurrentState < priv->head_note_state)) ) + { + // did power raise? + if ( pm_vars->myCurrentState < priv->head_note_state ) + { + // yes, inform clients and apps + tellChangeUp (priv->head_note_state); } - previous_state = pm_vars->myCurrentState; // either way + // either way + previous_state = pm_vars->myCurrentState; pm_vars->myCurrentState = priv->head_note_state; priv->imminentState = pm_vars->myCurrentState; pm_vars->maxCapability = pm_vars->theControllingDriver->maxCapabilityForDomainState(priv->head_note_domainState); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChangeDone,(unsigned long)pm_vars->myCurrentState,0); - powerChangeDone(previous_state); // inform subclass policy-maker + // inform subclass policy-maker + powerChangeDone(previous_state); } } IOLockLock(priv->queue_lock); - priv->changeList->releaseHeadChangeNote(); // we're done with this + // we're done with this + priv->changeList->releaseHeadChangeNote(); - priv->head_note = priv->changeList->currentChange(); // start next one in queue - if ( priv->head_note != -1 ) { + // start next one in queue + priv->head_note = priv->changeList->currentChange(); + if ( priv->head_note != -1 ) + { IOLockUnlock(priv->queue_lock); - if (priv->changeList->changeNote[priv->head_note].flags & IOPMWeInitiated ) { + if (priv->changeList->changeNote[priv->head_note].flags & IOPMWeInitiated ) + { start_our_change(priv->head_note); - } - else { + } else { nub = priv->changeList->changeNote[priv->head_note].parent; - if ( start_parent_change(priv->head_note) == IOPMAckImplied ) { + if ( start_parent_change(priv->head_note) == IOPMAckImplied ) + { parent = (IOService *)nub->copyParentEntry(gIOPowerPlane); - if ( parent ) { + if ( parent ) + { parent->acknowledgePowerChange((IOService *)nub); parent->release(); } } } + } else { + IOLockUnlock(priv->queue_lock); } - IOLockUnlock(priv->queue_lock); } @@ -2687,28 +3082,27 @@ void IOService::all_done ( void ) void IOService::all_acked ( void ) { switch (priv->machine_state) { - case IOPMour_prechange_1: - our_prechange_1(); + case kIOPM_OurChangeSetPowerState: + OurChangeSetPowerState(); break; - case IOPMour_prechange_4: - our_prechange_4(); + case kIOPM_OurChangeFinish: + OurChangeFinish(); break; - case IOPMparent_down_4: - parent_down_4(); + case kIOPM_ParentDownSetPowerState_Delayed: + ParentDownSetPowerState_Delayed(); break; - case IOPMparent_down_6: - parent_down_6(); + case kIOPM_ParentDownAcknowledgeChange_Delayed: + ParentDownAcknowledgeChange_Delayed(); break; - case IOPMparent_up_0: - parent_up_0(); + case kIOPM_ParentUpSetPowerState_Delayed: + ParentUpSetPowerState_Delayed(); break; - case IOPMparent_up_6: - parent_up_6(); + case kIOPM_ParentUpAcknowledgePowerChange_Delayed: + ParentUpAcknowledgePowerChange_Delayed(); break; } } - //********************************************************************************* // settleTimerExpired // @@ -2718,19 +3112,21 @@ void IOService::all_acked ( void ) void IOService::settleTimerExpired ( void ) { - if ( ! initialized ) { - return; // we're unloading + if ( ! initialized ) + { + // we're unloading + return; } switch (priv->machine_state) { - case IOPMour_prechange_3: - our_prechange_3(); + case kIOPM_OurChangeNotifyInterestedDriversDidChange: + OurChangeNotifyInterestedDriversDidChange(); break; - case IOPMparent_down_3: - parent_down_3(); + case kIOPM_ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed: + ParentDownNotifyDidChangeAndAcknowledgeChange_Delayed(); break; - case IOPMparent_up_5: - parent_up_5(); + case kIOPM_ParentUpNotifyInterestedDriversDidChange_Delayed: + ParentUpNotifyInterestedDriversDidChange_Delayed(); break; } } @@ -2745,20 +3141,28 @@ void IOService::settleTimerExpired ( void ) unsigned long IOService::compute_settle_time ( void ) { - unsigned long totalTime; - unsigned long i; + unsigned long totalTime; + unsigned long i; - totalTime = 0; // compute total time to attain the new state + // compute total time to attain the new state + totalTime = 0; i = pm_vars->myCurrentState; - if ( priv->head_note_state < pm_vars->myCurrentState ) { // we're lowering power - while ( i > priv->head_note_state ) { + + // we're lowering power + if ( priv->head_note_state < pm_vars->myCurrentState ) + { + while ( i > priv->head_note_state ) + { totalTime += pm_vars->thePowerStates[i].settleDownTime; i--; } } - if ( priv->head_note_state > pm_vars->myCurrentState ) { // we're raising power - while ( i < priv->head_note_state ) { + // we're raising power + if ( priv->head_note_state > pm_vars->myCurrentState ) + { + while ( i < priv->head_note_state ) + { totalTime += pm_vars->thePowerStates[i+1].settleUpTime; i++; } @@ -2802,26 +3206,33 @@ void IOService::ack_timer_ticked ( void ) { IOPMinformee * nextObject; - if ( ! initialized ) { - return; // we're unloading + if ( ! initialized ) + { + // we're unloading + return; } - if (! acquire_lock() ) { + if (! acquire_lock() ) + { return; } switch (priv->machine_state) { - case IOPMour_prechange_2: - case IOPMparent_down_5: - case IOPMparent_up_4: - if ( priv->driver_timer != 0 ) { // are we waiting for our driver to make its change? - priv->driver_timer -= 1; // yes, tick once - if ( priv->driver_timer == 0 ) { // it's tardy, we'll go on without it + case kIOPM_OurChangeWaitForPowerSettle: + case kIOPM_ParentDownWaitForPowerSettle_Delayed: + case kIOPM_ParentUpWaitForSettleTime_Delayed: + // are we waiting for our driver to make its change? + if ( priv->driver_timer != 0 ) { + // yes, tick once + priv->driver_timer -= 1; + // it's tardy, we'll go on without it + if ( priv->driver_timer == 0 ) + { IOUnlock(priv->our_lock); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogCtrlDriverTardy,0,0); driver_acked(); - } - else { // still waiting, set timer again + } else { + // still waiting, set timer again start_ack_timer(); IOUnlock(priv->our_lock); } @@ -2831,103 +3242,136 @@ void IOService::ack_timer_ticked ( void ) } break; - case IOPMour_prechange_1: - case IOPMour_prechange_4: - case IOPMparent_down_4: - case IOPMparent_down_6: - case IOPMparent_up_0: - case IOPMparent_up_6: - if (priv->head_note_pendingAcks != 0 ) { // are we waiting for interested parties to acknowledge? - nextObject = priv->interestedDrivers->firstInList(); // yes, go through the list of interested drivers - while ( nextObject != NULL ) { // and check each one - if ( nextObject->timer > 0 ) { + case kIOPM_OurChangeSetPowerState: + case kIOPM_OurChangeFinish: + case kIOPM_ParentDownSetPowerState_Delayed: + case kIOPM_ParentDownAcknowledgeChange_Delayed: + case kIOPM_ParentUpSetPowerState_Delayed: + case kIOPM_ParentUpAcknowledgePowerChange_Delayed: + // are we waiting for interested parties to acknowledge? + if (priv->head_note_pendingAcks != 0 ) + { + // yes, go through the list of interested drivers + nextObject = priv->interestedDrivers->firstInList(); + // and check each one + while ( nextObject != NULL ) + { + if ( nextObject->timer > 0 ) + { nextObject->timer -= 1; - if ( nextObject->timer == 0 ) { // this one should have acked by now + // this one should have acked by now + if ( nextObject->timer == 0 ) + { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogIntDriverTardy,0,0); - kprintf("interested driver tardy: %s\n",nextObject->whatObject->getName()); + //kprintf("interested driver tardy: %s\n",nextObject->whatObject->getName()); priv->head_note_pendingAcks -= 1; } } nextObject = priv->interestedDrivers->nextInList(nextObject); } - if ( priv->head_note_pendingAcks == 0 ) { // is that the last? + + // is that the last? + if ( priv->head_note_pendingAcks == 0 ) + { IOUnlock(priv->our_lock); - all_acked(); // yes, we can continue - } - else { // no, set timer again + // yes, we can continue + all_acked(); + } else { + // no, set timer again start_ack_timer(); IOUnlock(priv->our_lock); } - } - else { + } else { IOUnlock(priv->our_lock); } break; - case IOPMparent_down_0: // apps didn't respond to parent-down notification + // apps didn't respond to parent-down notification + case kIOPM_ParentDownTellPriorityClientsPowerDown_Immediate: IOUnlock(priv->our_lock); IOLockLock(priv->flags_lock); - if (pm_vars->responseFlags) { - pm_vars->responseFlags->release(); // get rid of this stuff + if (pm_vars->responseFlags) + { + // get rid of this stuff + pm_vars->responseFlags->release(); pm_vars->responseFlags = NULL; } IOLockUnlock(priv->flags_lock); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientTardy,0,5); - parent_down_04(); // carry on with the change + // carry on with the change + ParentDownTellPriorityClientsPowerDown_Delayed(); break; - case IOPMparent_down_05: + case kIOPM_ParentDownNotifyInterestedDriversWillChange_Delayed: IOUnlock(priv->our_lock); IOLockLock(priv->flags_lock); - if (pm_vars->responseFlags) { - pm_vars->responseFlags->release(); // get rid of this stuff + if (pm_vars->responseFlags) + { + // get rid of this stuff + pm_vars->responseFlags->release(); pm_vars->responseFlags = NULL; } IOLockUnlock(priv->flags_lock); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientTardy,0,1); - parent_down_05(); // carry on with the change + // carry on with the change + ParentDownNotifyInterestedDriversWillChange_Delayed(); break; - case IOPMour_prechange_03: // apps didn't respond to our power-down request + case kIOPM_OurChangeTellClientsPowerDown: + // apps didn't respond to our power-down request IOUnlock(priv->our_lock); IOLockLock(priv->flags_lock); - if (pm_vars->responseFlags) { - pm_vars->responseFlags->release(); // get rid of this stuff + if (pm_vars->responseFlags) + { + // get rid of this stuff + pm_vars->responseFlags->release(); pm_vars->responseFlags = NULL; } IOLockUnlock(priv->flags_lock); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientTardy,0,2); - tellNoChangeDown(priv->head_note_state); // rescind the request - priv->head_note_flags |= IOPMNotDone; // mark the change note un-actioned - all_done(); // and we're done + // rescind the request + tellNoChangeDown(priv->head_note_state); + // mark the change note un-actioned + priv->head_note_flags |= IOPMNotDone; + // and we're done + all_done(); break; - case IOPMour_prechange_04: // clients didn't respond to our power-down note + case kIOPM_OurChangeTellPriorityClientsPowerDown: + // clients didn't respond to our power-down note IOUnlock(priv->our_lock); IOLockLock(priv->flags_lock); - if (pm_vars->responseFlags) { - pm_vars->responseFlags->release(); // get rid of this stuff + if (pm_vars->responseFlags) + { + // get rid of this stuff + pm_vars->responseFlags->release(); pm_vars->responseFlags = NULL; } IOLockUnlock(priv->flags_lock); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientTardy,0,4); - our_prechange_04(); // carry on with the change + // carry on with the change + OurChangeTellPriorityClientsPowerDown(); break; - case IOPMour_prechange_05: // apps didn't respond to our power-down notification + case kIOPM_OurChangeNotifyInterestedDriversWillChange: + // apps didn't respond to our power-down notification IOUnlock(priv->our_lock); IOLockLock(priv->flags_lock); - if (pm_vars->responseFlags) { - pm_vars->responseFlags->release(); // get rid of this stuff + if (pm_vars->responseFlags) + { + // get rid of this stuff + pm_vars->responseFlags->release(); pm_vars->responseFlags = NULL; } IOLockUnlock(priv->flags_lock); pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientTardy,0,3); - our_prechange_05(); // carry on with the change + // carry on with the change + OurChangeNotifyInterestedDriversWillChange(); break; default: - IOUnlock(priv->our_lock); // not waiting for acks + // not waiting for acks + IOUnlock(priv->our_lock); break; } } @@ -2940,7 +3384,7 @@ void IOService::ack_timer_ticked ( void ) void IOService::start_ack_timer ( void ) { - AbsoluteTime deadline; + AbsoluteTime deadline; clock_interval_to_deadline(ACK_TIMER_PERIOD, kNanosecondScale, &deadline); @@ -2987,42 +3431,62 @@ static void settle_timer_expired ( thread_call_param_t us) IOReturn IOService::add_child_to_active_change ( IOPowerConnection * newObject ) { - if (! acquire_lock() ) { + if (! acquire_lock() ) + { return IOPMNoErr; } - switch (priv->machine_state) { - case IOPMour_prechange_1: - case IOPMparent_down_4: - case IOPMparent_up_0: - priv->head_note_pendingAcks += 2; // one for this child and one to prevent - IOUnlock(priv->our_lock); // incoming acks from changing our state + switch (priv->machine_state) + { + case kIOPM_OurChangeSetPowerState: + case kIOPM_ParentDownSetPowerState_Delayed: + case kIOPM_ParentUpSetPowerState_Delayed: + // one for this child and one to prevent + priv->head_note_pendingAcks += 2; + // incoming acks from changing our state + IOUnlock(priv->our_lock); notifyChild(newObject, true); - if (! acquire_lock() ) { - --priv->head_note_pendingAcks; // put it back + if (! acquire_lock() ) + { + // put it back + --priv->head_note_pendingAcks; return IOPMNoErr; } - if ( --priv->head_note_pendingAcks == 0 ) { // are we still waiting for acks? - stop_ack_timer(); // no, stop the timer + // are we still waiting for acks? + if ( --priv->head_note_pendingAcks == 0 ) + { + // no, stop the timer + stop_ack_timer(); IOUnlock(priv->our_lock); - all_acked(); // and now we can continue + + // and now we can continue + all_acked(); return IOPMNoErr; } break; - case IOPMour_prechange_4: - case IOPMparent_down_6: - case IOPMparent_up_6: - priv->head_note_pendingAcks += 2; // one for this child and one to prevent - IOUnlock(priv->our_lock); // incoming acks from changing our state + case kIOPM_OurChangeFinish: + case kIOPM_ParentDownAcknowledgeChange_Delayed: + case kIOPM_ParentUpAcknowledgePowerChange_Delayed: + // one for this child and one to prevent + priv->head_note_pendingAcks += 2; + // incoming acks from changing our state + IOUnlock(priv->our_lock); notifyChild(newObject, false); - if (! acquire_lock() ) { - --priv->head_note_pendingAcks; // put it back + if (! acquire_lock() ) + { + // put it back + --priv->head_note_pendingAcks; return IOPMNoErr; } - if ( --priv->head_note_pendingAcks == 0 ) { // are we still waiting for acks? - stop_ack_timer(); // no, stop the timer + // are we still waiting for acks? + if ( --priv->head_note_pendingAcks == 0 ) + { + // no, stop the timer + stop_ack_timer(); IOUnlock(priv->our_lock); - all_acked(); // and now we can continue + + // and now we can continue + all_acked(); return IOPMNoErr; } break; @@ -3043,42 +3507,61 @@ IOReturn IOService::add_child_to_active_change ( IOPowerConnection * newObject ) IOReturn IOService::add_driver_to_active_change ( IOPMinformee * newObject ) { - if (! acquire_lock() ) { + if (! acquire_lock() ) + { return IOPMNoErr; } switch (priv->machine_state) { - case IOPMour_prechange_1: - case IOPMparent_down_4: - case IOPMparent_up_0: - priv->head_note_pendingAcks += 2; // one for this driver and one to prevent - IOUnlock(priv->our_lock); // incoming acks from changing our state - inform(newObject, true); // inform the driver - if (! acquire_lock() ) { - --priv->head_note_pendingAcks; // put it back + case kIOPM_OurChangeSetPowerState: + case kIOPM_ParentDownSetPowerState_Delayed: + case kIOPM_ParentUpSetPowerState_Delayed: + // one for this driver and one to prevent + priv->head_note_pendingAcks += 2; + // incoming acks from changing our state + IOUnlock(priv->our_lock); + // inform the driver + inform(newObject, true); + if (! acquire_lock() ) + { + // put it back + --priv->head_note_pendingAcks; return IOPMNoErr; } - if ( --priv->head_note_pendingAcks == 0 ) { // are we still waiting for acks? - stop_ack_timer(); // no, stop the timer + // are we still waiting for acks? + if ( --priv->head_note_pendingAcks == 0 ) + { + // no, stop the timer + stop_ack_timer(); IOUnlock(priv->our_lock); - all_acked(); // and now we can continue + + // and now we can continue + all_acked(); return IOPMNoErr; } break; - case IOPMour_prechange_4: - case IOPMparent_down_6: - case IOPMparent_up_6: - priv->head_note_pendingAcks += 2; // one for this driver and one to prevent - IOUnlock(priv->our_lock); // incoming acks from changing our state - inform(newObject, false); // inform the driver + case kIOPM_OurChangeFinish: + case kIOPM_ParentDownAcknowledgeChange_Delayed: + case kIOPM_ParentUpAcknowledgePowerChange_Delayed: + // one for this driver and one to prevent + priv->head_note_pendingAcks += 2; + // incoming acks from changing our state + IOUnlock(priv->our_lock); + // inform the driver + inform(newObject, false); if (! acquire_lock() ) { - --priv->head_note_pendingAcks; // put it back + // put it back + --priv->head_note_pendingAcks; return IOPMNoErr; } - if ( --priv->head_note_pendingAcks == 0 ) { // are we still waiting for acks? - stop_ack_timer(); // no, stop the timer + // are we still waiting for acks? + if ( --priv->head_note_pendingAcks == 0 ) { + // no, stop the timer + stop_ack_timer(); IOUnlock(priv->our_lock); - all_acked(); // and now we can continue + + // and now we can continue + all_acked(); return IOPMNoErr; } break; @@ -3118,30 +3601,44 @@ IOReturn IOService::start_parent_change ( unsigned long queue_head ) pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartParentChange, (unsigned long)priv->head_note_state,(unsigned long)pm_vars->myCurrentState); - ask_parent( priv->ourDesiredPowerState); // if we need something and haven't told the parent, do so + // if we need something and haven't told the parent, do so + ask_parent( priv->ourDesiredPowerState); - if ( priv->head_note_state < pm_vars->myCurrentState ) { // power domain is lowering + // power domain is lowering + if ( priv->head_note_state < pm_vars->myCurrentState ) + { setParentInfo(priv->changeList->changeNote[priv->head_note].singleParentState,priv->head_note_parent); - priv->initial_change = false; - priv->machine_state = IOPMparent_down_0; // tell apps and kernel clients - if ( tellChangeDown1(priv->head_note_state) ) { // are we waiting for responses? - return parent_down_0(); // no, notify priority clients + priv->initial_change = false; + // tell apps and kernel clients + priv->machine_state = kIOPM_ParentDownTellPriorityClientsPowerDown_Immediate; + + // are we waiting for responses? + if ( tellChangeDown1(priv->head_note_state) ) + { + // no, notify priority clients + return ParentDownTellPriorityClientsPowerDown_Immediate(); } - return IOPMWillAckLater; // yes + // yes + return IOPMWillAckLater; } - if ( priv->head_note_state > pm_vars->myCurrentState ) { // parent is raising power, we may or may not - if ( priv->ourDesiredPowerState > pm_vars->myCurrentState ) { - if ( priv->ourDesiredPowerState < priv->head_note_state ) { - priv->head_note_state = priv->ourDesiredPowerState; // we do, but not all the way - priv->imminentState = priv->head_note_state; - priv->head_note_outputFlags = pm_vars->thePowerStates[priv->head_note_state].outputPowerCharacter; - priv->head_note_capabilityFlags = pm_vars->thePowerStates[priv->head_note_state].capabilityFlags; - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAmendParentChange,(unsigned long)priv->head_note_state,0); - } - } - else { - priv->head_note_state = pm_vars->myCurrentState; // we don't + // parent is raising power, we may or may not + if ( priv->head_note_state > pm_vars->myCurrentState ) + { + if ( priv->ourDesiredPowerState > pm_vars->myCurrentState ) + { + if ( priv->ourDesiredPowerState < priv->head_note_state ) + { + // we do, but not all the way + priv->head_note_state = priv->ourDesiredPowerState; + priv->imminentState = priv->head_note_state; + priv->head_note_outputFlags = pm_vars->thePowerStates[priv->head_note_state].outputPowerCharacter; + priv->head_note_capabilityFlags = pm_vars->thePowerStates[priv->head_note_state].capabilityFlags; + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAmendParentChange,(unsigned long)priv->head_note_state,0); + } + } else { + // we don't + priv->head_note_state = pm_vars->myCurrentState; priv->imminentState = priv->head_note_state; priv->head_note_outputFlags = pm_vars->thePowerStates[priv->head_note_state].outputPowerCharacter; priv->head_note_capabilityFlags = pm_vars->thePowerStates[priv->head_note_state].capabilityFlags; @@ -3150,17 +3647,21 @@ IOReturn IOService::start_parent_change ( unsigned long queue_head ) } if ( (priv->head_note_state > pm_vars->myCurrentState) && - (priv->head_note_flags & IOPMDomainDidChange) ) { // changing up + (priv->head_note_flags & IOPMDomainDidChange) ) + { + // changing up priv->initial_change = false; - priv->machine_state = IOPMparent_up_0; - if ( notifyAll(true) == IOPMAckImplied ) { - return parent_up_1(); - } - return IOPMWillAckLater; // they didn't all ack + priv->machine_state = kIOPM_ParentUpSetPowerState_Delayed; + if ( notifyAll(true) == IOPMAckImplied ) { + return ParentUpSetPowerState_Immediate(); + } + // they didn't all ack + return IOPMWillAckLater; } all_done(); - return IOPMAckImplied; // a null change or power will go up + // a null change or power will go up + return IOPMAckImplied; } @@ -3193,54 +3694,87 @@ void IOService::start_our_change ( unsigned long queue_head ) pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartDeviceChange, (unsigned long)priv->head_note_state,(unsigned long)pm_vars->myCurrentState); - if ( priv->head_note_capabilityFlags & IOPMNotAttainable ) { // can our driver switch to the new state? - if ( ! priv->we_are_root ) { // no, ask the parent to do it then + // can our driver switch to the new state? + if ( priv->head_note_capabilityFlags & IOPMNotAttainable ) + { + // no, ask the parent to do it then + if ( ! priv->we_are_root ) + { ask_parent(priv->head_note_state); } - priv-> head_note_flags |= IOPMNotDone; // mark the change note un-actioned - all_done(); // and we're done + // mark the change note un-actioned + priv-> head_note_flags |= IOPMNotDone; + // and we're done + all_done(); return; } - // is there enough power in the domain? - if ( (pm_vars->maxCapability < priv->head_note_state) && (! priv->we_are_root) ) { - if ( ! priv->we_are_root ) { // no, ask the parent to raise it + + // is there enough power in the domain? + if ( (pm_vars->maxCapability < priv->head_note_state) && (! priv->we_are_root) ) + { + // no, ask the parent to raise it + if ( ! priv->we_are_root ) + { ask_parent(priv->head_note_state); } - priv->head_note_flags |= IOPMNotDone; // no, mark the change note un-actioned - all_done(); // and we're done - return; // till the parent raises power + // no, mark the change note un-actioned + priv->head_note_flags |= IOPMNotDone; + // and we're done + // till the parent raises power + all_done(); + return; } - if ( ! priv->initial_change ) { - if ( priv->head_note_state == pm_vars->myCurrentState ) { - all_done(); // we initiated a null change; forget it + if ( ! priv->initial_change ) + { + if ( priv->head_note_state == pm_vars->myCurrentState ) + { + // we initiated a null change; forget it + all_done(); return; } } priv->initial_change = false; - if ( priv->head_note_state < pm_vars->myCurrentState ) { // dropping power? - priv->machine_state = IOPMour_prechange_03; // yes, in case we have to wait for acks + // dropping power? + if ( priv->head_note_state < pm_vars->myCurrentState ) + { + // yes, in case we have to wait for acks + priv->machine_state = kIOPM_OurChangeTellClientsPowerDown; pm_vars->doNotPowerDown = false; - pm_vars->outofbandparameter = kNotifyApps; // ask apps and kernel clients if we can drop power - if ( askChangeDown(priv->head_note_state) ) { - if ( pm_vars->doNotPowerDown ) { // don't have to wait, did any clients veto? - tellNoChangeDown(priv->head_note_state); // yes, rescind the warning - priv-> head_note_flags |= IOPMNotDone; // mark the change note un-actioned - all_done(); // and we're done - } - else { - our_prechange_03(); // no, tell'em we're dropping power + + // ask apps and kernel clients if we can drop power + pm_vars->outofbandparameter = kNotifyApps; + if ( askChangeDown(priv->head_note_state) ) + { + // don't have to wait, did any clients veto? + if ( pm_vars->doNotPowerDown ) + { + // yes, rescind the warning + tellNoChangeDown(priv->head_note_state); + // mark the change note un-actioned + priv-> head_note_flags |= IOPMNotDone; + // and we're done + all_done(); + } else { + // no, tell'em we're dropping power + OurChangeTellClientsPowerDown(); } } - } - else { - if ( ! priv->we_are_root ) { // we are raising power - ask_parent(priv->head_note_state); // if this changes our power requirement, tell the parent + } else { + // we are raising power + if ( ! priv->we_are_root ) + { + // if this changes our power requirement, tell the parent + ask_parent(priv->head_note_state); } - priv->machine_state = IOPMour_prechange_1; // in case they don't all ack - if ( notifyAll(true) == IOPMAckImplied ) { // notify interested drivers and children - our_prechange_1(); + // in case they don't all ack + priv->machine_state = kIOPM_OurChangeSetPowerState; + + // notify interested drivers and children + if ( notifyAll(true) == IOPMAckImplied ) + { + OurChangeSetPowerState(); } } } @@ -3255,16 +3789,18 @@ void IOService::start_our_change ( unsigned long queue_head ) IOReturn IOService::ask_parent ( unsigned long requestedState ) { - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - IOService * parent; - unsigned long ourRequest = pm_vars->thePowerStates[requestedState].inputPowerRequirement; + OSIterator *iter; + OSObject *next; + IOPowerConnection *connection; + IOService *parent; + unsigned long ourRequest = pm_vars->thePowerStates[requestedState].inputPowerRequirement; - if ( pm_vars->thePowerStates[requestedState].capabilityFlags & (kIOPMChildClamp | kIOPMPreventIdleSleep) ) { + if ( pm_vars->thePowerStates[requestedState].capabilityFlags & (kIOPMChildClamp | kIOPMPreventIdleSleep) ) + { ourRequest |= kIOPMPreventIdleSleep; } - if ( pm_vars->thePowerStates[requestedState].capabilityFlags & (kIOPMChildClamp2 | kIOPMPreventSystemSleep) ) { + if ( pm_vars->thePowerStates[requestedState].capabilityFlags & (kIOPMChildClamp2 | kIOPMPreventSystemSleep) ) + { ourRequest |= kIOPMPreventSystemSleep; } @@ -3275,19 +3811,24 @@ IOReturn IOService::ask_parent ( unsigned long requestedState ) return IOPMNoErr; } - if ( priv->we_are_root ) { + if ( priv->we_are_root ) + { return IOPMNoErr; } priv->previousRequest = ourRequest; iter = getParentIterator(gIOPowerPlane); - if ( iter ) { - while ( (next = iter->getNextObject()) ) { - if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( iter ) + { + while ( (next = iter->getNextObject()) ) + { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) + { parent = (IOService *)connection->copyParentEntry(gIOPowerPlane); if ( parent ) { - if ( parent->requestPowerDomainState(ourRequest,connection,IOPMLowestState)!= IOPMNoErr ) { + if ( parent->requestPowerDomainState(ourRequest,connection,IOPMLowestState)!= IOPMNoErr ) + { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogRequestDenied, (unsigned long)priv->previousRequest,0); } @@ -3314,30 +3855,42 @@ IOReturn IOService::instruct_driver ( unsigned long newState ) { IOReturn return_code; - if ( pm_vars->thePowerStates[newState].capabilityFlags & IOPMNotAttainable ) { // can our driver switch to the desired state? - return IOPMAckImplied; // no, so don't try + // can our driver switch to the desired state? + if ( pm_vars->thePowerStates[newState].capabilityFlags & IOPMNotAttainable ) + { + // no, so don't try + return IOPMAckImplied; } + priv->driver_timer = -1; - pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogProgramHardware,newState,0); - ioSPMTraceStart(IOPOWER_STATE, * (int *) this, (int) newState); - return_code = pm_vars->theControllingDriver->setPowerState( newState,this ); // yes, instruct it + // yes, instruct it + ioSPMTraceStart(IOPOWER_STATE, * (int *) this, (int) newState); + return_code = pm_vars->theControllingDriver->setPowerState( newState,this ); ioSPMTraceEnd(IOPOWER_STATE, * (int *) this, (int) newState, (int) return_code); - if ( return_code == IOPMAckImplied ) { // it finished + // it finished + if ( return_code == IOPMAckImplied ) + { priv->driver_timer = 0; return IOPMAckImplied; } - if ( priv->driver_timer == 0 ) { // it acked behind our back + // it acked behind our back + if ( priv->driver_timer == 0 ) + { return IOPMAckImplied; } - if ( return_code < 0 ) { // somebody goofed + // somebody goofed + if ( return_code < 0 ) + { return IOPMAckImplied; } - priv->driver_timer = (return_code * ns_per_us / ACK_TIMER_PERIOD) + 1; // it didn't finish + + // it didn't finish + priv->driver_timer = (return_code / ( ACK_TIMER_PERIOD / ns_per_us )) + 1; return IOPMWillAckLater; } @@ -3364,11 +3917,13 @@ bool IOService::acquire_lock ( void ) } IOTakeLock(priv->our_lock); - if ( current_change_note == priv->head_note ) { + if ( current_change_note == priv->head_note ) + { return TRUE; - } - else { // we blocked and something changed radically - IOUnlock(priv->our_lock); // so there's nothing to do any more + } else { + // we blocked and something changed radically + // so there's nothing to do any more + IOUnlock(priv->our_lock); return FALSE; } } @@ -3452,9 +4007,9 @@ bool IOService::tellChangeDown ( unsigned long stateNum ) bool IOService::tellClientsWithResponse ( int messageType ) { - struct context theContext; - AbsoluteTime deadline; - OSBoolean * aBool; + struct context theContext; + AbsoluteTime deadline; + OSBoolean *aBool; pm_vars->responseFlags = OSArray::withCapacity( 1 ); pm_vars->serialNumber += 1; @@ -3470,8 +4025,11 @@ bool IOService::tellClientsWithResponse ( int messageType ) theContext.stateFlags = priv->head_note_capabilityFlags; IOLockLock(priv->flags_lock); - aBool = OSBoolean::withBoolean(false); // position zero is false to - theContext.responseFlags->setObject(0,aBool); // prevent allowCancelCommon from succeeding + + // position zero is false to + // prevent allowCancelCommon from succeeding + aBool = OSBoolean::withBoolean(false); + theContext.responseFlags->setObject(0,aBool); aBool->release(); IOLockUnlock(priv->flags_lock); @@ -3485,28 +4043,35 @@ bool IOService::tellClientsWithResponse ( int messageType ) break; } - if (! acquire_lock() ) { + if (! acquire_lock() ) + { return true; } IOLockLock(priv->flags_lock); - aBool = OSBoolean::withBoolean(true); // now fix position zero + // now fix position zero + aBool = OSBoolean::withBoolean(true); theContext.responseFlags->replaceObject(0,aBool); aBool->release(); IOLockUnlock(priv->flags_lock); - if ( ! checkForDone() ) { // we have to wait for somebody + // do we have to wait for somebody? + if ( ! checkForDone() ) + { + // yes, start the ackTimer pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,theContext.maxTimeRequested,0); clock_interval_to_deadline(theContext.maxTimeRequested / 1000, kMillisecondScale, &deadline); thread_call_enter_delayed(priv->ackTimer, deadline); - IOUnlock(priv->our_lock); // yes + IOUnlock(priv->our_lock); return false; } IOUnlock(priv->our_lock); IOLockLock(priv->flags_lock); - pm_vars->responseFlags->release(); // everybody responded + + // everybody responded + pm_vars->responseFlags->release(); pm_vars->responseFlags = NULL; IOLockUnlock(priv->flags_lock); @@ -3522,18 +4087,18 @@ bool IOService::tellClientsWithResponse ( int messageType ) //********************************************************************************* void tellAppWithResponse ( OSObject * object, void * context) { - struct context * theContext = (struct context *)context; - UInt32 refcon; - OSBoolean * aBool; + struct context *theContext = (struct context *)context; + UInt32 refcon; + OSBoolean *aBool; - if( OSDynamicCast( IOService, object) ) { + if( OSDynamicCast( IOService, object) ) + { IOLockLock(theContext->flags_lock); aBool = OSBoolean::withBoolean(true); theContext->responseFlags->setObject(theContext->counter,aBool); aBool->release(); IOLockUnlock(theContext->flags_lock); - } - else { + } else { refcon = ((theContext->serialNumber & 0xFFFF)<<16) + (theContext->counter & 0xFFFF); IOLockLock(theContext->flags_lock); aBool = OSBoolean::withBoolean(false); @@ -3541,7 +4106,8 @@ void tellAppWithResponse ( OSObject * object, void * context) aBool->release(); IOLockUnlock(theContext->flags_lock); theContext->us->messageClient(theContext->msgType,object,(void *)refcon); - if ( theContext->maxTimeRequested < k30seconds ) { + if ( theContext->maxTimeRequested < k30seconds ) + { theContext->maxTimeRequested = k30seconds; } } @@ -3561,12 +4127,12 @@ void tellAppWithResponse ( OSObject * object, void * context) //********************************************************************************* void tellClientWithResponse ( OSObject * object, void * context) { - struct context * theContext = (struct context *)context; - IOPowerStateChangeNotification notify; - UInt32 refcon; - IOReturn retCode; - OSBoolean * aBool; - OSObject * theFlag; + struct context *theContext = (struct context *)context; + IOPowerStateChangeNotification notify; + UInt32 refcon; + IOReturn retCode; + OSBoolean *aBool; + OSObject *theFlag; refcon = ((theContext->serialNumber & 0xFFFF)<<16) + (theContext->counter & 0xFFFF); IOLockLock(theContext->flags_lock); @@ -3580,30 +4146,40 @@ void tellClientWithResponse ( OSObject * object, void * context) notify.stateNumber = theContext->stateNumber; notify.stateFlags = theContext->stateFlags; retCode = theContext->us->messageClient(theContext->msgType,object,(void *)¬ify); - if ( retCode == kIOReturnSuccess ) { - if ( notify.returnValue == 0 ) { // client doesn't want time to respond + if ( retCode == kIOReturnSuccess ) + { + if ( notify.returnValue == 0 ) + { + // client doesn't want time to respond IOLockLock(theContext->flags_lock); aBool = OSBoolean::withBoolean(true); - theContext->responseFlags->replaceObject(theContext->counter,aBool); // so set its flag true + // so set its flag true + theContext->responseFlags->replaceObject(theContext->counter,aBool); aBool->release(); IOLockUnlock(theContext->flags_lock); - } - else { + } else { IOLockLock(theContext->flags_lock); - theFlag = theContext->responseFlags->getObject(theContext->counter); // it does want time, and it hasn't - if ( theFlag != 0 ) { // responded yet - if ( ((OSBoolean *)theFlag)->isFalse() ) { // so note its time requirement - if ( theContext->maxTimeRequested < notify.returnValue ) { + + // it does want time, and it hasn't responded yet + theFlag = theContext->responseFlags->getObject(theContext->counter); + if ( theFlag != 0 ) + { + if ( ((OSBoolean *)theFlag)->isFalse() ) + { + // so note its time requirement + if ( theContext->maxTimeRequested < notify.returnValue ) + { theContext->maxTimeRequested = notify.returnValue; } } } IOLockUnlock(theContext->flags_lock); } - } - else { // not a client of ours + } else { + // not a client of ours IOLockLock(theContext->flags_lock); - aBool = OSBoolean::withBoolean(true); // so we won't be waiting for response + // so we won't be waiting for response + aBool = OSBoolean::withBoolean(true); theContext->responseFlags->replaceObject(theContext->counter,aBool); aBool->release(); IOLockUnlock(theContext->flags_lock); @@ -3670,8 +4246,8 @@ void IOService::tellClients ( int messageType ) //********************************************************************************* void tellClient ( OSObject * object, void * context) { - struct context * theContext = (struct context *)context; - IOPowerStateChangeNotification notify; + struct context *theContext = (struct context *)context; + IOPowerStateChangeNotification notify; notify.powerRef = (void *) 0; notify.returnValue = 0; @@ -3688,20 +4264,25 @@ void tellClient ( OSObject * object, void * context) // ********************************************************************************** bool IOService::checkForDone ( void ) { - int i = 0; - OSObject * theFlag; + int i = 0; + OSObject *theFlag; IOLockLock(priv->flags_lock); - if ( pm_vars->responseFlags == NULL ) { + if ( pm_vars->responseFlags == NULL ) + { IOLockUnlock(priv->flags_lock); return true; } - for ( i = 0; ; i++ ) { + + for ( i = 0; ; i++ ) + { theFlag = pm_vars->responseFlags->getObject(i); - if ( theFlag == NULL ) { + if ( theFlag == NULL ) + { break; } - if ( ((OSBoolean *)theFlag)->isFalse() ) { + if ( ((OSBoolean *)theFlag)->isFalse() ) + { IOLockUnlock(priv->flags_lock); return false; } @@ -3726,24 +4307,28 @@ bool IOService::responseValid ( unsigned long x ) serialComponent = (refcon>>16) & 0xFFFF; ordinalComponent = refcon & 0xFFFF; - if ( serialComponent != pm_vars->serialNumber ) { + if ( serialComponent != pm_vars->serialNumber ) + { return false; } IOLockLock(priv->flags_lock); - if ( pm_vars->responseFlags == NULL ) { + if ( pm_vars->responseFlags == NULL ) + { IOLockUnlock(priv->flags_lock); return false; } theFlag = pm_vars->responseFlags->getObject(ordinalComponent); - if ( theFlag == 0 ) { + if ( theFlag == 0 ) + { IOLockUnlock(priv->flags_lock); return false; } - if ( ((OSBoolean *)theFlag)->isFalse() ) { + if ( ((OSBoolean *)theFlag)->isFalse() ) + { aBool = OSBoolean::withBoolean(true); pm_vars->responseFlags->replaceObject(ordinalComponent,aBool); aBool->release(); @@ -3766,8 +4351,10 @@ bool IOService::responseValid ( unsigned long x ) // ********************************************************************************** IOReturn IOService::allowPowerChange ( unsigned long refcon ) { - if ( ! initialized ) { - return kIOReturnSuccess; // we're unloading + if ( ! initialized ) + { + // we're unloading + return kIOReturnSuccess; } return pm_vars->PMcommandGate->runAction(serializedAllowPowerChange,(void *)refcon); @@ -3781,9 +4368,12 @@ IOReturn serializedAllowPowerChange ( OSObject *owner, void * refcon, void *, vo IOReturn IOService::serializedAllowPowerChange2 ( unsigned long refcon ) { - if ( ! responseValid(refcon) ) { // response valid? + // response valid? + if ( ! responseValid(refcon) ) + { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr5,refcon,0); - return kIOReturnSuccess; // no, just return + // no, just return + return kIOReturnSuccess; } pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientAcknowledge,refcon,0); @@ -3803,8 +4393,10 @@ IOReturn IOService::serializedAllowPowerChange2 ( unsigned long refcon ) // ********************************************************************************** IOReturn IOService::cancelPowerChange ( unsigned long refcon ) { - if ( ! initialized ) { - return kIOReturnSuccess; // we're unloading + if ( ! initialized ) + { + // we're unloading + return kIOReturnSuccess; } return pm_vars->PMcommandGate->runAction(serializedCancelPowerChange,(void *)refcon); @@ -3818,9 +4410,12 @@ IOReturn serializedCancelPowerChange ( OSObject *owner, void * refcon, void *, v IOReturn IOService::serializedCancelPowerChange2 ( unsigned long refcon ) { - if ( ! responseValid(refcon) ) { // response valid? + // response valid? + if ( ! responseValid(refcon) ) + { pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr5,refcon,0); - return kIOReturnSuccess; // no, just return + // no, just return + return kIOReturnSuccess; } pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientCancel,refcon,0); @@ -3836,46 +4431,62 @@ IOReturn IOService::serializedCancelPowerChange2 ( unsigned long refcon ) // ********************************************************************************** IOReturn IOService::allowCancelCommon ( void ) { - if (! acquire_lock() ) { + if (! acquire_lock() ) + { return kIOReturnSuccess; } - if ( checkForDone() ) { // is this the last response? - stop_ack_timer(); // yes, stop the timer + // is this the last response? + if ( checkForDone() ) + { + // yes, stop the timer + stop_ack_timer(); IOUnlock(priv->our_lock); IOLockLock(priv->flags_lock); - if ( pm_vars->responseFlags ) { + if ( pm_vars->responseFlags ) + { pm_vars->responseFlags->release(); pm_vars->responseFlags = NULL; } IOLockUnlock(priv->flags_lock); switch (priv->machine_state) { - case IOPMour_prechange_03: // our change, was it vetoed? - if ( ! pm_vars->doNotPowerDown ) { - our_prechange_03(); // no, we can continue - } - else { - tellNoChangeDown(priv->head_note_state); // yes, rescind the warning - priv->head_note_flags |= IOPMNotDone; // mark the change note un-actioned - all_done(); // and we're done + case kIOPM_OurChangeTellClientsPowerDown: + // our change, was it vetoed? + if ( ! pm_vars->doNotPowerDown ) + { + // no, we can continue + OurChangeTellClientsPowerDown(); + } else { + // yes, rescind the warning + tellNoChangeDown(priv->head_note_state); + // mark the change note un-actioned + priv->head_note_flags |= IOPMNotDone; + + // and we're done + all_done(); } break; - case IOPMour_prechange_04: - our_prechange_04(); + case kIOPM_OurChangeTellPriorityClientsPowerDown: + OurChangeTellPriorityClientsPowerDown(); break; - case IOPMour_prechange_05: - our_prechange_05(); // our change, continue + case kIOPM_OurChangeNotifyInterestedDriversWillChange: + // our change, continue + OurChangeNotifyInterestedDriversWillChange(); break; - case IOPMparent_down_0: - parent_down_04(); // parent change, continue + case kIOPM_ParentDownTellPriorityClientsPowerDown_Immediate: + // parent change, continue + ParentDownTellPriorityClientsPowerDown_Delayed(); break; - case IOPMparent_down_05: - parent_down_05(); // parent change, continue + case kIOPM_ParentDownNotifyInterestedDriversWillChange_Delayed: + // parent change, continue + ParentDownNotifyInterestedDriversWillChange_Delayed(); break; } + } else { + // not done yet + IOUnlock(priv->our_lock); } - IOUnlock(priv->our_lock); // not done yet return kIOReturnSuccess; } @@ -3917,8 +4528,10 @@ void IOService::clampPowerOn (unsigned long duration) void IOService::PM_Clamp_Timer_Expired (void) { - if ( ! initialized ) { - return; // we're unloading + if ( ! initialized ) + { + // we're unloading + return; } changePowerStateToPriv (0); @@ -3932,8 +4545,8 @@ void IOService::PM_Clamp_Timer_Expired (void) void c_PM_Clamp_Timer_Expired (OSObject * client, IOTimerEventSource *) { - if (client) - ((IOService *)client)->PM_Clamp_Timer_Expired (); + if (client) + ((IOService *)client)->PM_Clamp_Timer_Expired (); } @@ -3961,11 +4574,14 @@ unsigned long IOService::maxCapabilityForDomainState ( IOPMPowerFlags domainStat { int i; - if (pm_vars->theNumberOfPowerStates == 0 ) { + if (pm_vars->theNumberOfPowerStates == 0 ) + { return 0; } - for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) { - if ( (domainState & pm_vars->thePowerStates[i].inputPowerRequirement) == pm_vars->thePowerStates[i].inputPowerRequirement ) { + for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) + { + if ( (domainState & pm_vars->thePowerStates[i].inputPowerRequirement) == pm_vars->thePowerStates[i].inputPowerRequirement ) + { return i; } } @@ -3983,17 +4599,20 @@ unsigned long IOService::maxCapabilityForDomainState ( IOPMPowerFlags domainStat unsigned long IOService::initialPowerStateForDomainState ( IOPMPowerFlags domainState ) { - int i; + int i; - if (pm_vars->theNumberOfPowerStates == 0 ) { - return 0; - } - for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) { - if ( (domainState & pm_vars->thePowerStates[i].inputPowerRequirement) == pm_vars->thePowerStates[i].inputPowerRequirement ) { - return i; - } - } - return 0; + if (pm_vars->theNumberOfPowerStates == 0 ) + { + return 0; + } + for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) + { + if ( (domainState & pm_vars->thePowerStates[i].inputPowerRequirement) == pm_vars->thePowerStates[i].inputPowerRequirement ) + { + return i; + } + } + return 0; } @@ -4007,17 +4626,20 @@ unsigned long IOService::initialPowerStateForDomainState ( IOPMPowerFlags domain unsigned long IOService::powerStateForDomainState ( IOPMPowerFlags domainState ) { - int i; + int i; - if (pm_vars->theNumberOfPowerStates == 0 ) { - return 0; - } - for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) { - if ( (domainState & pm_vars->thePowerStates[i].inputPowerRequirement) == pm_vars->thePowerStates[i].inputPowerRequirement ) { - return i; - } - } - return 0; + if (pm_vars->theNumberOfPowerStates == 0 ) + { + return 0; + } + for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) + { + if ( (domainState & pm_vars->thePowerStates[i].inputPowerRequirement) == pm_vars->thePowerStates[i].inputPowerRequirement ) + { + return i; + } + } + return 0; } @@ -4172,7 +4794,7 @@ bool IOPMpriv::serialize(OSSerialize *s) const nextObject = interestedDrivers->nextInList(nextObject); } - if ( machine_state != IOPMfinished ) { + if ( machine_state != kIOPM_Finished ) { ptr += sprintf(ptr,"machine_state = %d, ",(unsigned int)machine_state); ptr += sprintf(ptr,"driver_timer = %d, ",(unsigned int)driver_timer); ptr += sprintf(ptr,"settle_time = %d, ",(unsigned int)settle_time); diff --git a/iokit/Kernel/IOServicePrivate.h b/iokit/Kernel/IOServicePrivate.h index d9a341f47..e079f765f 100644 --- a/iokit/Kernel/IOServicePrivate.h +++ b/iokit/Kernel/IOServicePrivate.h @@ -194,5 +194,8 @@ public: virtual OSObject * getNextObject(); }; +extern const OSSymbol * gIOConsoleUsersKey; +extern const OSSymbol * gIOConsoleSessionUIDKey; + #endif /* ! _IOKIT_IOSERVICEPRIVATE_H */ diff --git a/iokit/Kernel/IOStartIOKit.cpp b/iokit/Kernel/IOStartIOKit.cpp index 5336a3c73..9f5b2600b 100644 --- a/iokit/Kernel/IOStartIOKit.cpp +++ b/iokit/Kernel/IOStartIOKit.cpp @@ -49,69 +49,24 @@ extern void IOLibInit(void); #include -/*XXX power management hacks XXX*/ -#include -#include - -extern void *registerSleepWakeInterest( - void *callback, - void *target, - void *refCon); -/*XXX power management hacks XXX*/ - -static void -calend_wakeup_resynch( - thread_call_param_t p0, - thread_call_param_t p1) -{ - void IOKitResetTime(void); - - IOKitResetTime(); -} - -static thread_call_t calend_sleep_wake_call; - -static IOReturn -calend_sleep_wake_notif( - void *target, - void *refCon, - unsigned int messageType, - void *provider, - void *messageArg, - vm_size_t argSize) -{ - if (messageType != kIOMessageSystemHasPoweredOn) - return (kIOReturnUnsupported); - - if (calend_sleep_wake_call != NULL) - thread_call_enter(calend_sleep_wake_call); - - return (kIOReturnSuccess); -} - void IOKitResetTime( void ) { - mach_timespec_t t; + mach_timespec_t t; - t.tv_sec = 30; - t.tv_nsec = 0; - IOService::waitForService( - IOService::resourceMatching("IORTC"), &t ); + t.tv_sec = 30; + t.tv_nsec = 0; + IOService::waitForService( + IOService::resourceMatching("IORTC"), &t ); #ifndef i386 - IOService::waitForService( - IOService::resourceMatching("IONVRAM"), &t ); + IOService::waitForService( + IOService::resourceMatching("IONVRAM"), &t ); #endif - if (calend_sleep_wake_call == NULL) { - calend_sleep_wake_call = thread_call_allocate( - calend_wakeup_resynch, NULL); - - registerSleepWakeInterest((void *)calend_sleep_wake_notif, NULL, NULL); - } - clock_initialize_calendar(); } +// From +extern int debug_mode; void StartIOKit( void * p1, void * p2, void * p3, void * p4 ) { @@ -129,6 +84,11 @@ void StartIOKit( void * p1, void * p2, void * p3, void * p4 ) if( PE_parse_boot_arg( "io", &debugFlags )) gIOKitDebug = debugFlags; + + // Check for the log synchronous bit set in io + if (gIOKitDebug & kIOLogSynchronous) + debug_mode = true; + // // Have to start IOKit environment before we attempt to start // the C++ runtime environment. At some stage we have to clean up @@ -139,9 +99,6 @@ void StartIOKit( void * p1, void * p2, void * p3, void * p4 ) IOLibInit(); OSlibkernInit(); - IOLog("_cppInit done\n"); - - /***** * Declare the fake kmod_info structs for built-in components * that must be tracked as independent units for dependencies. diff --git a/iokit/Kernel/IOSyncer.cpp b/iokit/Kernel/IOSyncer.cpp index 2a9cf5465..c7772a938 100644 --- a/iokit/Kernel/IOSyncer.cpp +++ b/iokit/Kernel/IOSyncer.cpp @@ -79,7 +79,7 @@ void IOSyncer::free() OSObject::free(); } -IOReturn IOSyncer::wait(bool autoRelease = true) +IOReturn IOSyncer::wait(bool autoRelease) { IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); @@ -99,8 +99,7 @@ IOReturn IOSyncer::wait(bool autoRelease = true) return result; } -void IOSyncer::signal(IOReturn res = kIOReturnSuccess, - bool autoRelease = true) +void IOSyncer::signal(IOReturn res, bool autoRelease) { fResult = res; privateSignal(); diff --git a/iokit/Kernel/IOTimerEventSource.cpp b/iokit/Kernel/IOTimerEventSource.cpp index 3c86e6709..21dcf4691 100644 --- a/iokit/Kernel/IOTimerEventSource.cpp +++ b/iokit/Kernel/IOTimerEventSource.cpp @@ -106,7 +106,7 @@ IOTimerEventSource::timerEventSource(OSObject *inOwner, Action inAction) IOTimerEventSource *me = new IOTimerEventSource; if (me && !me->init(inOwner, inAction)) { - me->free(); + me->release(); return 0; } diff --git a/iokit/Kernel/IOUserClient.cpp b/iokit/Kernel/IOUserClient.cpp index 446932b6a..9739846e0 100644 --- a/iokit/Kernel/IOUserClient.cpp +++ b/iokit/Kernel/IOUserClient.cpp @@ -22,18 +22,12 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - * 14 Aug 98 sdouglas created. - * 08 Dec 98 sdouglas cpp. - */ + #include #include #include +#include #include #include #include @@ -41,6 +35,8 @@ #include +#include "IOServicePrivate.h" + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ // definitions we should get from osfmk @@ -89,6 +85,7 @@ public: OSObject * object; ipc_port_t port; UInt32 mscount; + UInt8 holdDestroy; static IOMachPort * portForObject( OSObject * obj, ipc_kobject_type_t type ); @@ -96,6 +93,8 @@ public: ipc_kobject_type_t type, mach_port_mscount_t * mscount ); static void releasePortForObject( OSObject * obj, ipc_kobject_type_t type ); + static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ); + static OSDictionary * dictForType( ipc_kobject_type_t type ); static mach_port_name_t makeSendRightForTask( task_t task, @@ -196,7 +195,7 @@ bool IOMachPort::noMoreSendersForObject( OSObject * obj, dict->removeObject( (const OSSymbol *) obj ); else *mscount = machPort->mscount; - } + } obj->release(); } @@ -209,18 +208,37 @@ void IOMachPort::releasePortForObject( OSObject * obj, ipc_kobject_type_t type ) { OSDictionary * dict; + IOMachPort * machPort; IOTakeLock( gIOObjectPortLock); if( (dict = dictForType( type ))) { obj->retain(); - dict->removeObject( (const OSSymbol *) obj ); + machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); + if( machPort && !machPort->holdDestroy) + dict->removeObject( (const OSSymbol *) obj ); obj->release(); } IOUnlock( gIOObjectPortLock); } +void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ) +{ + OSDictionary * dict; + IOMachPort * machPort; + + IOLockLock( gIOObjectPortLock ); + + if( (dict = dictForType( type ))) { + machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); + if( machPort) + machPort->holdDestroy = true; + } + + IOLockUnlock( gIOObjectPortLock ); +} + void IOUserClient::destroyUserReferences( OSObject * obj ) { IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT ); @@ -253,7 +271,6 @@ void IOUserClient::destroyUserReferences( OSObject * obj ) } obj->release(); IOUnlock( gIOObjectPortLock); - } mach_port_name_t IOMachPort::makeSendRightForTask( task_t task, @@ -271,6 +288,26 @@ void IOMachPort::free( void ) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +class IOUserNotification : public OSIterator +{ + OSDeclareDefaultStructors(IOUserNotification) + + IONotifier * holdNotify; + IOLock * lock; + +public: + + virtual bool init( void ); + virtual void free(); + + virtual void setNotification( IONotifier * obj ); + + virtual void reset(); + virtual bool isValid(); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + extern "C" { // functions called from osfmk/device/iokit_rpc.c @@ -315,15 +352,23 @@ iokit_client_died( io_object_t obj, ipc_port_t /* port */, { IOUserClient * client; IOMemoryMap * map; + IOUserNotification * notify; if( !IOMachPort::noMoreSendersForObject( obj, type, mscount )) return( kIOReturnNotReady ); - if( (IKOT_IOKIT_CONNECT == type) - && (client = OSDynamicCast( IOUserClient, obj ))) - client->clientDied(); - if( (map = OSDynamicCast( IOMemoryMap, obj ))) - map->taskDied(); + if( IKOT_IOKIT_CONNECT == type) + { + if( (client = OSDynamicCast( IOUserClient, obj ))) + client->clientDied(); + } + else if( IKOT_IOKIT_OBJECT == type) + { + if( (map = OSDynamicCast( IOMemoryMap, obj ))) + map->taskDied(); + else if( (notify = OSDynamicCast( IOUserNotification, obj ))) + notify->setNotification( 0 ); + } return( kIOReturnSuccess ); } @@ -332,39 +377,19 @@ iokit_client_died( io_object_t obj, ipc_port_t /* port */, /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -class IOUserNotification : public OSIterator +class IOServiceUserNotification : public IOUserNotification { - OSDeclareDefaultStructors(IOUserNotification) + OSDeclareDefaultStructors(IOServiceUserNotification) struct PingMsg { - mach_msg_header_t msgHdr; - OSNotificationHeader notifyHeader; + mach_msg_header_t msgHdr; + OSNotificationHeader notifyHeader; }; - PingMsg * pingMsg; - vm_size_t msgSize; - IONotifier * holdNotify; - IOLock * lock; - -public: - - virtual bool init( mach_port_t port, natural_t type, - OSAsyncReference reference, - vm_size_t messageSize ); - virtual void free(); - - virtual void setNotification( IONotifier * obj ); - - virtual void reset(); - virtual bool isValid(); -}; - -class IOServiceUserNotification : public IOUserNotification -{ - OSDeclareDefaultStructors(IOServiceUserNotification) - enum { kMaxOutstanding = 256 }; + PingMsg * pingMsg; + vm_size_t msgSize; OSArray * newSet; OSObject * lastEntry; bool armed; @@ -386,6 +411,16 @@ class IOServiceMessageUserNotification : public IOUserNotification { OSDeclareDefaultStructors(IOServiceMessageUserNotification) + struct PingMsg { + mach_msg_header_t msgHdr; + mach_msg_body_t msgBody; + mach_msg_port_descriptor_t ports[1]; + OSNotificationHeader notifyHeader; + }; + + PingMsg * pingMsg; + vm_size_t msgSize; + public: virtual bool init( mach_port_t port, natural_t type, @@ -411,8 +446,7 @@ OSDefineAbstractStructors( IOUserNotification, OSIterator ) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IOUserNotification::init( mach_port_t port, natural_t type, - OSAsyncReference reference, vm_size_t extraSize ) +bool IOUserNotification::init( void ) { if( !super::init()) return( false ); @@ -421,24 +455,6 @@ bool IOUserNotification::init( mach_port_t port, natural_t type, if( !lock) return( false ); - msgSize = sizeof( PingMsg) + extraSize; - pingMsg = (PingMsg *) IOMalloc( msgSize); - if( !pingMsg) - return( false ); - - bzero( pingMsg, msgSize); - - pingMsg->msgHdr.msgh_remote_port = port; - pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS( - MACH_MSG_TYPE_COPY_SEND, - MACH_MSG_TYPE_MAKE_SEND ); - pingMsg->msgHdr.msgh_size = msgSize; - pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; - - pingMsg->notifyHeader.size = extraSize; - pingMsg->notifyHeader.type = type; - bcopy( reference, pingMsg->notifyHeader.reference, sizeof(OSAsyncReference) ); - return( true ); } @@ -448,9 +464,6 @@ void IOUserNotification::free( void ) holdNotify->remove(); // can't be in handler now - if( pingMsg) - IOFree( pingMsg, msgSize); - if( lock) IOLockFree( lock ); @@ -460,10 +473,17 @@ void IOUserNotification::free( void ) void IOUserNotification::setNotification( IONotifier * notify ) { - if( holdNotify) - holdNotify->remove(); + IONotifier * previousNotify; + IOLockLock( gIOObjectPortLock); + + previousNotify = holdNotify; holdNotify = notify; + + IOLockUnlock( gIOObjectPortLock); + + if( previousNotify) + previousNotify->remove(); } void IOUserNotification::reset() @@ -491,18 +511,49 @@ bool IOServiceUserNotification::init( mach_port_t port, natural_t type, if( !newSet) return( false ); - return( super::init( port, type, reference, 0) ); + msgSize = sizeof( PingMsg) + 0; + pingMsg = (PingMsg *) IOMalloc( msgSize); + if( !pingMsg) + return( false ); + + bzero( pingMsg, msgSize); + + pingMsg->msgHdr.msgh_remote_port = port; + pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS( + MACH_MSG_TYPE_COPY_SEND /*remote*/, + MACH_MSG_TYPE_MAKE_SEND /*local*/); + pingMsg->msgHdr.msgh_size = msgSize; + pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; + + pingMsg->notifyHeader.size = 0; + pingMsg->notifyHeader.type = type; + bcopy( reference, pingMsg->notifyHeader.reference, sizeof(OSAsyncReference) ); + + return( super::init() ); } void IOServiceUserNotification::free( void ) { - if( lastEntry) - lastEntry->release(); + PingMsg * _pingMsg; + vm_size_t _msgSize; + OSArray * _newSet; + OSObject * _lastEntry; - if( newSet) - newSet->release(); + _pingMsg = pingMsg; + _msgSize = msgSize; + _lastEntry = lastEntry; + _newSet = newSet; super::free(); + + if( _pingMsg && _msgSize) + IOFree( _pingMsg, _msgSize); + + if( _lastEntry) + _lastEntry->release(); + + if( _newSet) + _newSet->release(); } bool IOServiceUserNotification::_handler( void * target, @@ -511,7 +562,7 @@ bool IOServiceUserNotification::_handler( void * target, return( ((IOServiceUserNotification *) target)->handler( ref, newService )); } -bool IOServiceUserNotification::handler( void * /* ref */, +bool IOServiceUserNotification::handler( void * ref, IOService * newService ) { unsigned int count; @@ -531,6 +582,9 @@ bool IOServiceUserNotification::handler( void * /* ref */, IOUnlock( lock ); + if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) + IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT ); + if( sendPing) { if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) )) pingMsg->msgHdr.msgh_local_port = port; @@ -584,13 +638,48 @@ OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotificati bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, OSAsyncReference reference, vm_size_t extraSize ) { - return( super::init( port, type, reference, - sizeof(IOServiceInterestContent) + extraSize) ); + + extraSize += sizeof(IOServiceInterestContent); + msgSize = sizeof( PingMsg) + extraSize; + pingMsg = (PingMsg *) IOMalloc( msgSize); + if( !pingMsg) + return( false ); + + bzero( pingMsg, msgSize); + + pingMsg->msgHdr.msgh_remote_port = port; + pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX + | MACH_MSGH_BITS( + MACH_MSG_TYPE_COPY_SEND /*remote*/, + MACH_MSG_TYPE_MAKE_SEND /*local*/); + pingMsg->msgHdr.msgh_size = msgSize; + pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; + + pingMsg->msgBody.msgh_descriptor_count = 1; + + pingMsg->ports[0].name = 0; + pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND; + pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR; + + pingMsg->notifyHeader.size = extraSize; + pingMsg->notifyHeader.type = type; + bcopy( reference, pingMsg->notifyHeader.reference, sizeof(OSAsyncReference) ); + + return( super::init() ); } void IOServiceMessageUserNotification::free( void ) { + PingMsg * _pingMsg; + vm_size_t _msgSize; + + _pingMsg = pingMsg; + _msgSize = msgSize; + super::free(); + + if( _pingMsg && _msgSize) + IOFree( _pingMsg, _msgSize); } IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref, @@ -606,7 +695,7 @@ IOReturn IOServiceMessageUserNotification::handler( void * ref, void * messageArgument, vm_size_t argSize ) { kern_return_t kr; - ipc_port_t port; + ipc_port_t thisPort, providerPort; IOServiceInterestContent * data = (IOServiceInterestContent *) pingMsg->notifyHeader.content; @@ -624,16 +713,16 @@ IOReturn IOServiceMessageUserNotification::handler( void * ref, - sizeof( data->messageArgument) + argSize; - if( (port = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT ) )) - pingMsg->msgHdr.msgh_local_port = port; - else - pingMsg->msgHdr.msgh_local_port = NULL; - + providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT ); + pingMsg->ports[0].name = providerPort; + thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ); + pingMsg->msgHdr.msgh_local_port = thisPort; kr = mach_msg_send_from_kernel( &pingMsg->msgHdr, - pingMsg->msgHdr.msgh_size); - - if( port) - iokit_release_port( port ); + pingMsg->msgHdr.msgh_size); + if( thisPort) + iokit_release_port( thisPort ); + if( providerPort) + iokit_release_port( providerPort ); if( KERN_SUCCESS != kr) IOLog("%s: mach_msg_send_from_kernel {%x}\n", __FILE__, kr ); @@ -675,17 +764,43 @@ IOReturn IOUserClient::clientHasPrivilege( void * securityToken, security_token_t token; mach_msg_type_number_t count; - if( 0 != strcmp( privilegeName, kIOClientPrivilegeAdministrator)) - return( kIOReturnUnsupported ); - count = TASK_SECURITY_TOKEN_COUNT; kr = task_info( (task_t) securityToken, TASK_SECURITY_TOKEN, - (task_info_t) &token, &count ); - if( (kr == kIOReturnSuccess) - && (0 != token.val[0])) - kr = kIOReturnNotPrivileged; + (task_info_t) &token, &count ); - return( kr ); + if (KERN_SUCCESS != kr) + {} + else if (!strcmp(privilegeName, kIOClientPrivilegeAdministrator)) + { + if (0 != token.val[0]) + kr = kIOReturnNotPrivileged; + } + else if (!strcmp(privilegeName, kIOClientPrivilegeLocalUser)) + { + OSArray * array; + OSDictionary * user = 0; + + if ((array = OSDynamicCast(OSArray, + IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) + { + for (unsigned int idx = 0; + (user = OSDynamicCast(OSDictionary, array->getObject(idx))); + idx++) + { + OSNumber * num; + if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey))) + && (token.val[0] == num->unsigned32BitValue())) + break; + } + array->release(); + } + if (!user) + kr = kIOReturnNotPrivileged; + } + else + kr = kIOReturnUnsupported; + + return (kr); } bool IOUserClient::initWithTask(task_t owningTask, @@ -866,7 +981,8 @@ IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference, if(numArgs > kMaxAsyncArgs) return kIOReturnMessageTooLarge; - replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,0); + replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/, + 0 /*local*/); replyMsg.msgHdr.msgh_size = sizeof(replyMsg) - (kMaxAsyncArgs-numArgs)*sizeof(void *); replyMsg.msgHdr.msgh_remote_port = replyPort; @@ -1001,6 +1117,29 @@ kern_return_t is_io_service_match_property_table( return( kr ); } +/* Routine io_service_match_property_table_ool */ +kern_return_t is_io_service_match_property_table_ool( + io_object_t service, + io_buf_ptr_t matching, + mach_msg_type_number_t matchingCnt, + natural_t *result, + boolean_t *matches ) +{ + kern_return_t kr; + vm_offset_t data; + + kr = vm_map_copyout( kernel_map, &data, (vm_map_copy_t) matching ); + + if( KERN_SUCCESS == kr) { + // must return success after vm_map_copyout() succeeds + *result = is_io_service_match_property_table( service, + (char *) data, matches ); + vm_deallocate( kernel_map, data, matchingCnt ); + } + + return( kr ); +} + /* Routine io_service_get_matching_services */ kern_return_t is_io_service_get_matching_services( mach_port_t master_port, @@ -1028,6 +1167,29 @@ kern_return_t is_io_service_get_matching_services( return( kr ); } +/* Routine io_service_get_matching_services_ool */ +kern_return_t is_io_service_get_matching_services_ool( + mach_port_t master_port, + io_buf_ptr_t matching, + mach_msg_type_number_t matchingCnt, + natural_t *result, + io_object_t *existing ) +{ + kern_return_t kr; + vm_offset_t data; + + kr = vm_map_copyout( kernel_map, &data, (vm_map_copy_t) matching ); + + if( KERN_SUCCESS == kr) { + // must return success after vm_map_copyout() succeeds + *result = is_io_service_get_matching_services( master_port, + (char *) data, existing ); + vm_deallocate( kernel_map, data, matchingCnt ); + } + + return( kr ); +} + /* Routine io_service_add_notification */ kern_return_t is_io_service_add_notification( mach_port_t master_port, @@ -1038,7 +1200,6 @@ kern_return_t is_io_service_add_notification( mach_msg_type_number_t referenceCnt, io_object_t * notification ) { - IOServiceUserNotification * userNotify = 0; IONotifier * notify = 0; const OSSymbol * sym; @@ -1103,6 +1264,34 @@ kern_return_t is_io_service_add_notification( return( err ); } +/* Routine io_service_add_notification_ool */ +kern_return_t is_io_service_add_notification_ool( + mach_port_t master_port, + io_name_t notification_type, + io_buf_ptr_t matching, + mach_msg_type_number_t matchingCnt, + mach_port_t wake_port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + natural_t *result, + io_object_t *notification ) +{ + kern_return_t kr; + vm_offset_t data; + + kr = vm_map_copyout( kernel_map, &data, (vm_map_copy_t) matching ); + + if( KERN_SUCCESS == kr) { + // must return success after vm_map_copyout() succeeds + *result = is_io_service_add_notification( master_port, notification_type, + (char *) data, wake_port, reference, referenceCnt, notification ); + vm_deallocate( kernel_map, data, matchingCnt ); + } + + return( kr ); +} + + /* Routine io_service_add_notification_old */ kern_return_t is_io_service_add_notification_old( mach_port_t master_port, @@ -1155,6 +1344,8 @@ kern_return_t is_io_service_add_interest_notification( } else err = kIOReturnUnsupported; + sym->release(); + } while( false ); return( err ); @@ -1421,7 +1612,7 @@ kern_return_t is_io_registry_entry_get_property_bytes( offsetBytes = off->unsigned64BitValue(); len = off->numberOfBytes(); bytes = &offsetBytes; -#if __BIG_ENDIAN__ +#ifdef __BIG_ENDIAN__ bytes = (const void *) (((UInt32) bytes) + (sizeof( UInt64) - len)); #endif @@ -1627,6 +1818,18 @@ kern_return_t is_io_service_get_busy_state( return( kIOReturnSuccess ); } +/* Routine io_service_get_state */ +kern_return_t is_io_service_get_state( + io_object_t _service, + uint64_t *state ) +{ + CHECK( IOService, _service, service ); + + *state = service->getState(); + + return( kIOReturnSuccess ); +} + /* Routine io_service_wait_quiet */ kern_return_t is_io_service_wait_quiet( io_object_t _service, @@ -1740,7 +1943,10 @@ kern_return_t is_io_connect_map_memory( if( task != current_task()) { // push a name out to the task owning the map, // so we can clean up maps - mach_port_name_t name = IOMachPort::makeSendRightForTask( +#if IOASSERT + mach_port_name_t name = +#endif + IOMachPort::makeSendRightForTask( task, map, IKOT_IOKIT_OBJECT ); assert( name ); @@ -2630,17 +2836,8 @@ kern_return_t is_io_catalog_get_data( return kIOReturnNoMemory; s->clearText(); - switch ( flag ) { - case kIOCatalogGetContents: - if ( !gIOCatalogue->serialize(s) ) { - kr = kIOReturnNoMemory; - } - break; - default: - kr = kIOReturnBadArgument; - break; - } + kr = gIOCatalogue->serializeData(flag, s); if ( kr == kIOReturnSuccess ) { vm_offset_t data; diff --git a/iokit/Kernel/IOWorkLoop.cpp b/iokit/Kernel/IOWorkLoop.cpp index a6bfd4673..79b96d202 100644 --- a/iokit/Kernel/IOWorkLoop.cpp +++ b/iokit/Kernel/IOWorkLoop.cpp @@ -60,14 +60,8 @@ static inline bool ISSETP(void *addr, unsigned int flag) #define fFlags loopRestart -extern "C" extern void stack_privilege( thread_t thread); - void IOWorkLoop::launchThreadMain(void *self) { - register thread_t mythread = current_thread(); - - // Make sure that this thread always has a kernel stack - stack_privilege(mythread); thread_set_cont_arg((int) self); threadMainContinuation(); } @@ -112,7 +106,7 @@ IOWorkLoop::workLoop() IOWorkLoop *me = new IOWorkLoop; if (me && !me->init()) { - me->free(); + me->release(); return 0; } @@ -376,8 +370,8 @@ void IOWorkLoop::wakeupGate(void *event, bool oneThread) } IOReturn IOWorkLoop::runAction(Action inAction, OSObject *target, - void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0) + void *arg0, void *arg1, + void *arg2, void *arg3) { IOReturn res; diff --git a/iokit/Kernel/RootDomainUserClient.cpp b/iokit/Kernel/RootDomainUserClient.cpp new file mode 100644 index 000000000..28eab6b1f --- /dev/null +++ b/iokit/Kernel/RootDomainUserClient.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + */ + +#include +#include +#include +#include "RootDomainUserClient.h" +#include + +#define super IOUserClient + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClassAndStructors(RootDomainUserClient, IOUserClient) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool RootDomainUserClient::start( IOService * provider ) +{ + assert(OSDynamicCast(IOPMrootDomain, provider)); + if(!super::start(provider)) + return false; + fOwner = (IOPMrootDomain *)provider; + + + return true; +} + + +IOReturn RootDomainUserClient::clientClose( void ) +{ + detach(fOwner); + return kIOReturnSuccess; +} + +IOExternalMethod * +RootDomainUserClient::getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ) +{ + static IOExternalMethod sMethods[] = { + { // kPMSetAggressiveness, 0 + 0, (IOMethod)&IOPMrootDomain::setAggressiveness, kIOUCScalarIScalarO, 2, 0 + }, + { // kPMGetAggressiveness, 1 + 0, (IOMethod)&IOPMrootDomain::getAggressiveness, kIOUCScalarIScalarO, 1, 1 + }, + { // kPMSleepSystem, 2 + 0, (IOMethod)&IOPMrootDomain::sleepSystem, kIOUCScalarIScalarO, 0, 0 + }, + { // kPMAllowPowerChange, 3 + 0, (IOMethod)&IOPMrootDomain::allowPowerChange, kIOUCScalarIScalarO, 1, 0 + }, + { // kPMCancelPowerChange, 4 + 0, (IOMethod)&IOPMrootDomain::cancelPowerChange, kIOUCScalarIScalarO, 1, 0 + }, + { // kPMShutdownSystem, 5 + 0, (IOMethod)&IOPMrootDomain::shutdownSystem, kIOUCScalarIScalarO, 0, 0 + }, + { // kPMRestartSystem, 6 + 0, (IOMethod)&IOPMrootDomain::restartSystem, kIOUCScalarIScalarO, 0, 0 + }, + { // kPMSetPreventative, 7 + 1, (IOMethod) &RootDomainUserClient::setPreventative, kIOUCScalarIScalarO, 2, 0 + }, + }; + + if(index >= kNumPMMethods) + return NULL; + else { + if (sMethods[index].object) + *targetP = this; + else + *targetP = fOwner; + + return &sMethods[index]; + } +} + +void +RootDomainUserClient::setPreventative(UInt32 on_off, UInt32 types_of_sleep) +{ + return; +} + diff --git a/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.h b/iokit/Kernel/RootDomainUserClient.h similarity index 83% rename from iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.h rename to iokit/Kernel/RootDomainUserClient.h index 98e0b1328..545d3b2cd 100644 --- a/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.h +++ b/iokit/Kernel/RootDomainUserClient.h @@ -45,23 +45,17 @@ class RootDomainUserClient : public IOUserClient private: IOPMrootDomain * fOwner; - task_t fTask; - IOExternalMethod fMethods[ kNumPMMethods ]; public: - static RootDomainUserClient *withTask(task_t owningTask); - virtual IOReturn clientClose( void ); - virtual IOReturn clientDied( void ); - - virtual IOReturn registerNotificationPort ( mach_port_t port, UInt32 type ); - - virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); + virtual IOExternalMethod * getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ); virtual bool start( IOService * provider ); + void setPreventative(UInt32 on_off, UInt32 types_of_sleep); + }; #endif /* ! _IOKIT_ROOTDOMAINUSERCLIENT_H */ diff --git a/iokit/KernelConfigTables.cpp b/iokit/KernelConfigTables.cpp index 9f5048187..121b6e13f 100644 --- a/iokit/KernelConfigTables.cpp +++ b/iokit/KernelConfigTables.cpp @@ -26,90 +26,59 @@ /* This list is used in IOStartIOKit.cpp to declare fake kmod_info * structs for kext dependencies that are built into the kernel. - * See the SystemKEXT project for fuller information on these - * fake or pseudo-kexts, including their compatible versions. */ const char * gIOKernelKmods = -"{ - 'com.apple.kernel' = '6.8'; - 'com.apple.kernel.bsd' = '6.8'; - 'com.apple.kernel.iokit' = '6.8'; - 'com.apple.kernel.libkern' = '6.8'; - 'com.apple.kernel.mach' = '6.8'; - 'com.apple.iokit.IOADBFamily' = '6.8'; - 'com.apple.iokit.IONVRAMFamily' = '6.8'; - 'com.apple.iokit.IOSystemManagementFamily' = '6.8'; - 'com.apple.iokit.ApplePlatformFamily' = '6.8'; - 'com.apple.driver.AppleNMI' = '6.8'; -}"; +"{" +" 'com.apple.kernel' = '7.0';" +" 'com.apple.kpi.bsd' = '7.0';" +" 'com.apple.kpi.iokit' = '7.0';" +" 'com.apple.kpi.libkern' = '7.0';" +" 'com.apple.kpi.mach' = '7.0';" +" 'com.apple.iokit.IONVRAMFamily' = '7.0';" +" 'com.apple.driver.AppleNMI' = '7.0';" +" 'com.apple.iokit.IOSystemManagementFamily' = '7.0';" +" 'com.apple.iokit.ApplePlatformFamily' = '7.0';" +" 'com.apple.kernel.6.0' = '6.9.9';" +" 'com.apple.kernel.bsd' = '6.9.9';" +" 'com.apple.kernel.iokit' = '6.9.9';" +" 'com.apple.kernel.libkern' = '6.9.9';" +" 'com.apple.kernel.mach' = '6.9.9';" +"}"; const char * gIOKernelConfigTables = -"( - { - 'IOClass' = IOPanicPlatform; - 'IOProviderClass' = IOPlatformExpertDevice; - 'IOProbeScore' = '-1'; - } -" +"(" +" {" +" 'IOClass' = IOPanicPlatform;" +" 'IOProviderClass' = IOPlatformExpertDevice;" +" 'IOProbeScore' = '-1';" +" }" #ifdef PPC -" , - { - 'IOClass' = AppleCPU; - 'IOProviderClass' = IOPlatformDevice; - 'IONameMatch' = 'cpu'; - 'IOProbeScore' = 100:32; - }, - { - 'IOClass' = AppleNMI; - 'IOProviderClass' = AppleMacIODevice; - 'IONameMatch' = 'programmer-switch'; - }, - { - 'IOClass' = AppleNVRAM; - 'IOProviderClass' = AppleMacIODevice; - 'IONameMatch' = nvram; - }, - { - 'IOClass' = IOPMUADBController; - 'IOProviderClass' = AppleMacIODevice; - 'IONameMatch' = adb; - } -" +" ," +" {" +" 'IOClass' = AppleCPU;" +" 'IOProviderClass' = IOPlatformDevice;" +" 'IONameMatch' = 'cpu';" +" 'IOProbeScore' = 100:32;" +" }," +" {" +" 'IOClass' = AppleNMI;" +" 'IOProviderClass' = AppleMacIODevice;" +" 'IONameMatch' = 'programmer-switch';" +" }," +" {" +" 'IOClass' = AppleNVRAM;" +" 'IOProviderClass' = AppleMacIODevice;" +" 'IONameMatch' = nvram;" +" }" #endif /* PPC */ #ifdef i386 -" , - { - 'IOClass' = AppleI386PlatformExpert; - 'IOProviderClass' = IOPlatformExpertDevice; - 'top-level' = " - /* set of dicts to make into nubs */ - "[ - { IOName = cpu; }, - { IOName = intel-pic; }, - { IOName = intel-clock; }, - { IOName = ps2controller; }, - { IOName = pci; }, - { IOName = display; 'AAPL,boot-display' = Yes; } - ]; - }, - { - 'IOClass' = AppleI386CPU; - 'IOProviderClass' = IOPlatformDevice; - 'IONameMatch' = cpu; - 'IOProbeScore' = 100:32; - }, - { - 'IOClass' = AppleIntelClassicPIC; - 'IOProviderClass' = IOPlatformDevice; - 'IONameMatch' = intel-pic; - }, - { - 'IOClass' = AppleIntelClock; - 'IOProviderClass' = IOPlatformDevice; - 'IONameMatch' = intel-clock; - } -" +" ," +" {" +" 'IOClass' = AppleIntelClock;" +" 'IOProviderClass' = IOPlatformDevice;" +" 'IONameMatch' = intel-clock;" +" }" #endif /* i386 */ ")"; diff --git a/iokit/bsddev/IOKitBSDInit.cpp b/iokit/bsddev/IOKitBSDInit.cpp index 9ca242e8a..b6d87543d 100644 --- a/iokit/bsddev/IOKitBSDInit.cpp +++ b/iokit/bsddev/IOKitBSDInit.cpp @@ -39,12 +39,12 @@ extern "C" { // how long to wait for matching root device, secs #define ROOTDEVICETIMEOUT 60 +extern dev_t mdevadd(int devid, ppnum_t base, unsigned int size, int phys); +extern dev_t mdevlookup(int devid); kern_return_t IOKitBSDInit( void ) { - IOLog("IOKitBSDInit\n"); - IOService::publishResource("IOBSD"); return( kIOReturnSuccess ); @@ -78,26 +78,26 @@ OSDictionary * IOBSDNameMatching( const char * name ) return( 0 ); } -OSDictionary * IOCDMatching( const char * name ) +OSDictionary * IOCDMatching( void ) { OSDictionary * dict; const OSSymbol * str; - - dict = IOService::serviceMatching( "IOMedia" ); - if( dict == 0 ) { - IOLog("Unable to find IOMedia\n"); - return 0; - } - - str = OSSymbol::withCString( "CD_ROM_Mode_1" ); - if( str == 0 ) { - dict->release(); - return 0; - } - - dict->setObject( "Content", (OSObject *)str ); - str->release(); - return( dict ); + + dict = IOService::serviceMatching( "IOMedia" ); + if( dict == 0 ) { + IOLog("Unable to find IOMedia\n"); + return 0; + } + + str = OSSymbol::withCString( "CD_ROM_Mode_1" ); + if( str == 0 ) { + dict->release(); + return 0; + } + + dict->setObject( "Content Hint", (OSObject *)str ); + str->release(); + return( dict ); } OSDictionary * IONetworkMatching( const char * path, @@ -256,23 +256,42 @@ OSDictionary * IODiskMatching( const char * path, char * buf, int maxLen ) char * comp; long unit = -1; long partition = -1; + long lun = -1; char c; + const char * partitionSep = NULL; // scan the tail of the path for "@unit:partition" do { // Have to get the full path to the controller - an alias may // tell us next to nothing, like "hd:8" alias = IORegistryEntry::dealiasPath( &path, gIODTPlane ); - + look = path + strlen( path); c = ':'; while( look != path) { if( *(--look) == c) { if( c == ':') { partition = strtol( look + 1, 0, 0 ); + partitionSep = look; c = '@'; } else if( c == '@') { + int diff = -1; + unit = strtol( look + 1, 0, 16 ); + + diff = (int)partitionSep - (int)look; + if ( diff > 0 ) { + + for ( ; diff > 0; diff-- ) + { + if( look[diff] == ',' ) + { + lun = strtol ( &look[diff + 1], 0, 16 ); + break; + } + } + } + c = '/'; } else if( c == '/') { c = 0; @@ -288,29 +307,36 @@ OSDictionary * IODiskMatching( const char * path, char * buf, int maxLen ) } if( c || unit == -1 || partition == -1) continue; - + maxLen -= strlen( "{" kIOPathMatchKey "='" kIODeviceTreePlane ":" ); maxLen -= ( alias ? strlen( alias ) : 0 ) + (look - path); - maxLen -= strlen( "/@hhhhhhhh:dddddddddd';}" ); + maxLen -= strlen( "/@hhhhhhhh,hhhhhhhh:dddddddddd';}" ); if( maxLen > 0) { sprintf( buf, "{" kIOPathMatchKey "='" kIODeviceTreePlane ":" ); comp = buf + strlen( buf ); - + if( alias) { strcpy( comp, alias ); comp += strlen( alias ); } - + if ( (look - path)) { strncpy( comp, path, look - path); comp += look - path; } - - sprintf( comp, "/@%lx:%ld';}", unit, partition ); + + if ( lun != -1 ) + { + sprintf ( comp, "/@%lx,%lx:%ld';}", unit, lun, partition ); + } + else + { + sprintf( comp, "/@%lx:%ld';}", unit, partition ); + } } else continue; - + return( OSDynamicCast(OSDictionary, OSUnserialize( buf, 0 )) ); } while( false ); @@ -327,6 +353,44 @@ OSDictionary * IOOFPathMatching( const char * path, char * buf, int maxLen ) } +IOService * IOFindMatchingChild( IOService * service ) +{ + // find a matching child service + IOService * child = 0; + OSIterator * iter = service->getClientIterator(); + if ( iter ) { + while( ( child = (IOService *) iter->getNextObject() ) ) { + OSDictionary * dict = OSDictionary::withCapacity( 1 ); + if( dict == 0 ) { + iter->release(); + return 0; + } + const OSSymbol * str = OSSymbol::withCString( "Apple_HFS" ); + if( str == 0 ) { + dict->release(); + iter->release(); + return 0; + } + dict->setObject( "Content", (OSObject *)str ); + str->release(); + if ( child->compareProperty( dict, "Content" ) ) { + dict->release(); + break; + } + dict->release(); + IOService * subchild = IOFindMatchingChild( child ); + if ( subchild ) { + child = subchild; + break; + } + } + iter->release(); + } + return child; +} + +static int didRam = 0; + kern_return_t IOFindBSDRoot( char * rootName, dev_t * root, u_int32_t * oflags ) { @@ -337,9 +401,11 @@ kern_return_t IOFindBSDRoot( char * rootName, OSString * iostr; OSNumber * off; OSData * data = 0; + UInt32 *ramdParms = 0; UInt32 flags = 0; int minor, major; + bool findHFSChild = false; char * rdBootVar; enum { kMaxPathBuf = 512, kMaxBootVar = 128 }; char * str; @@ -349,6 +415,9 @@ kern_return_t IOFindBSDRoot( char * rootName, bool debugInfoPrintedOnce = false; static int mountAttempts = 0; + + int xchar, dchar; + if( mountAttempts++) IOSleep( 5 * 1000 ); @@ -364,17 +433,15 @@ kern_return_t IOFindBSDRoot( char * rootName, do { if( (regEntry = IORegistryEntry::fromPath( "/chosen", gIODTPlane ))) { - data = (OSData *) regEntry->getProperty( "rootpath" ); - regEntry->release(); - if( data) - continue; - } + data = (OSData *) regEntry->getProperty( "rootpath" ); + regEntry->release(); + if( data) continue; + } if( (regEntry = IORegistryEntry::fromPath( "/options", gIODTPlane ))) { - data = (OSData *) regEntry->getProperty( "boot-file" ); - regEntry->release(); - if( data) - continue; - } + data = (OSData *) regEntry->getProperty( "boot-file" ); + regEntry->release(); + if( data) continue; + } } while( false ); if( data) @@ -382,14 +449,65 @@ kern_return_t IOFindBSDRoot( char * rootName, if( rdBootVar[0] == '*') { look = rdBootVar + 1; - forceNet = false; + forceNet = false; } else { if( (regEntry = IORegistryEntry::fromPath( "/", gIODTPlane ))) { forceNet = (0 != regEntry->getProperty( "net-boot" )); - regEntry->release(); - } + regEntry->release(); + } } + + +// +// See if we have a RAMDisk property in /chosen/memory-map. If so, make it into a device. +// It will become /dev/mdx, where x is 0-f. +// + + if(!didRam) { /* Have we already build this ram disk? */ + didRam = 1; /* Remember we did this */ + if((regEntry = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ))) { /* Find the map node */ + data = (OSData *)regEntry->getProperty("RAMDisk"); /* Find the ram disk, if there */ + if(data) { /* We found one */ + + ramdParms = (UInt32 *)data->getBytesNoCopy(); /* Point to the ram disk base and size */ + (void)mdevadd(-1, ramdParms[0] >> 12, ramdParms[1] >> 12, 0); /* Initialize it and pass back the device number */ + } + regEntry->release(); /* Toss the entry */ + } + } + +// +// Now check if we are trying to root on a memory device +// + + if((rdBootVar[0] == 'm') && (rdBootVar[1] == 'd') && (rdBootVar[3] == 0)) { + dchar = xchar = rdBootVar[2]; /* Get the actual device */ + if((xchar >= '0') && (xchar <= '9')) xchar = xchar - '0'; /* If digit, convert */ + else { + xchar = xchar & ~' '; /* Fold to upper case */ + if((xchar >= 'A') && (xchar <= 'F')) { /* Is this a valid digit? */ + xchar = (xchar & 0xF) + 9; /* Convert the hex digit */ + dchar = dchar | ' '; /* Fold to lower case */ + } + else xchar = -1; /* Show bogus */ + } + if(xchar >= 0) { /* Do we have a valid memory device name? */ + *root = mdevlookup(xchar); /* Find the device number */ + if(*root >= 0) { /* Did we find one? */ + + rootName[0] = 'm'; /* Build root name */ + rootName[1] = 'd'; /* Build root name */ + rootName[2] = dchar; /* Build root name */ + rootName[3] = 0; /* Build root name */ + IOLog("BSD root: %s, major %d, minor %d\n", rootName, major(*root), minor(*root)); + *oflags = 0; /* Show that this is not network */ + goto iofrootx; /* Join common exit... */ + } + panic("IOFindBSDRoot: specified root memory device, %s, has not been configured\n", rdBootVar); /* Not there */ + } + } + if( look) { // from OpenFirmware path IOLog("From path: \"%s\", ", look); @@ -400,8 +518,8 @@ kern_return_t IOFindBSDRoot( char * rootName, matching = IODiskMatching( look, str, kMaxPathBuf ); } } - - if( (!matching) && rdBootVar[0] ) { + + if( (!matching) && rdBootVar[0] ) { // by BSD name look = rdBootVar; if( look[0] == '*') @@ -409,8 +527,9 @@ kern_return_t IOFindBSDRoot( char * rootName, if ( strncmp( look, "en", strlen( "en" )) == 0 ) { matching = IONetworkNamePrefixMatching( "en" ); - } else if ( strncmp( look, "cdrom", strlen( "cdrom" )) == 0 ) { - matching = IOCDMatching( look ); + } else if ( strncmp( look, "cdrom", strlen( "cdrom" )) == 0 ) { + matching = IOCDMatching(); + findHFSChild = true; } else { matching = IOBSDNameMatching( look ); } @@ -418,9 +537,9 @@ kern_return_t IOFindBSDRoot( char * rootName, if( !matching) { OSString * astring; - // any UFS + // any HFS matching = IOService::serviceMatching( "IOMedia" ); - astring = OSString::withCStringNoCopy("Apple_UFS"); + astring = OSString::withCStringNoCopy("Apple_HFS"); if ( astring ) { matching->setObject("Content", astring); astring->release(); @@ -462,6 +581,23 @@ kern_return_t IOFindBSDRoot( char * rootName, } while( !service); matching->release(); + if ( service && findHFSChild ) { + bool waiting = true; + // wait for children services to finish registering + while ( waiting ) { + t.tv_sec = ROOTDEVICETIMEOUT; + t.tv_nsec = 0; + if ( service->waitQuiet( &t ) == kIOReturnSuccess ) { + waiting = false; + } else { + IOLog( "Waiting for child registration\n" ); + } + } + // look for a subservice with an Apple_HFS child + IOService * subservice = IOFindMatchingChild( service ); + if ( subservice ) service = subservice; + } + major = 0; minor = 0; @@ -513,6 +649,7 @@ kern_return_t IOFindBSDRoot( char * rootName, IOFree( str, kMaxPathBuf + kMaxBootVar ); +iofrootx: if( (gIOKitDebug & (kIOLogDTree | kIOLogServiceTree | kIOLogMemory)) && !debugInfoPrintedOnce) { IOService::getPlatform()->waitQuiet(); diff --git a/iokit/conf/MASTER.i386 b/iokit/conf/MASTER.i386 index 7e7bc20b9..5d128418b 100644 --- a/iokit/conf/MASTER.i386 +++ b/iokit/conf/MASTER.i386 @@ -5,7 +5,7 @@ # -------- ---- -------- --------------- # # RELEASE = [intel mach iokitcpp] -# PROFILE = [intel mach iokitcpp profile] +# PROFILE = [RELEASE profile] # DEBUG = [intel mach iokitcpp debug] # ###################################################################### diff --git a/iokit/conf/MASTER.ppc b/iokit/conf/MASTER.ppc index 001486d75..1bcd729c6 100644 --- a/iokit/conf/MASTER.ppc +++ b/iokit/conf/MASTER.ppc @@ -5,7 +5,7 @@ # -------- ---- -------- --------------- # # RELEASE = [ppc mach iokitcpp] -# PROFILE = [ppc mach iokitcpp profile] +# PROFILE = [ RELEASE profile ] # DEBUG = [ppc mach iokitcpp debug] # RELEASE_TRACE = [ RELEASE kdebug ] # DEBUG_TRACE = [ DEBUG kdebug ] diff --git a/iokit/conf/Makefile b/iokit/conf/Makefile index 970b3246f..34ca97c17 100644 --- a/iokit/conf/Makefile +++ b/iokit/conf/Makefile @@ -18,7 +18,7 @@ ifndef IOKIT_KERNEL_CONFIG export IOKIT_KERNEL_CONFIG = $(KERNEL_CONFIG) endif -COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) +export COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: make build_setup @@ -53,6 +53,7 @@ do_all: do_setup_conf SOURCE=$${next_source} \ TARGET=$(TARGET) \ INCL_MAKEDEP=FALSE \ + KERNEL_CONFIG=$(IOKIT_KERNEL_CONFIG) \ build_all; \ echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(IOKIT_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; diff --git a/iokit/conf/Makefile.template b/iokit/conf/Makefile.template index 35daa9e5a..bf9b2d38b 100644 --- a/iokit/conf/Makefile.template +++ b/iokit/conf/Makefile.template @@ -28,9 +28,12 @@ include $(MakeInc_def) # CFLAGS+= -DKERNEL -DDRIVER_PRIVATE \ -Wall -Wno-four-char-constants -fno-common \ - -DIOMATCHDEBUG=1 -DIOALLOCDEBUG=1 -DIOASSERT=0 \ + -DIOMATCHDEBUG=1 -DIOALLOCDEBUG=1 \ #-DIOKITDEBUG=-1 +CFLAGS_RELEASE += -DIOASSERT=0 +CFLAGS_DEBUG += -DIOASSERT=1 + SFLAGS+= -DKERNEL # @@ -88,7 +91,7 @@ LDOBJS = $(OBJS) $(COMPONENT).o: $(LDOBJS) @echo "creating $(COMPONENT).o" $(RM) $(RMFLAGS) vers.c - $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + $(COMPOBJROOT)/newvers \ `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c @echo [ updating $(COMPONENT).o ${IOKIT_KERNEL_CONFIG} ] diff --git a/iokit/conf/files b/iokit/conf/files index bdaf03c11..29fc19011 100644 --- a/iokit/conf/files +++ b/iokit/conf/files @@ -21,6 +21,7 @@ iokit/Kernel/IOServicePM.cpp optional iokitcpp iokit/Kernel/IOPMchangeNoteList.cpp optional iokitcpp iokit/Kernel/IOPMinformee.cpp optional iokitcpp iokit/Kernel/IOPMinformeeList.cpp optional iokitcpp +iokit/Kernel/IOPMPowerStateQueue.cpp optional iokitcpp iokit/Kernel/IOCatalogue.cpp optional iokitcpp iokit/Kernel/IOPMPowerSource.cpp optional iokitcpp iokit/Kernel/IOPMPowerSourceList.cpp optional iokitcpp @@ -37,6 +38,7 @@ iokit/Kernel/IOFilterInterruptEventSource.cpp optional iokitcpp iokit/Kernel/IOTimerEventSource.cpp optional iokitcpp iokit/Kernel/IODeviceMemory.cpp optional iokitcpp +iokit/Kernel/IOMapper.cpp optional iokitcpp iokit/Kernel/IOMemoryDescriptor.cpp optional iokitcpp iokit/Kernel/IOMemoryCursor.cpp optional iokitcpp iokit/Kernel/IOBufferMemoryDescriptor.cpp optional iokitcpp @@ -74,8 +76,8 @@ iokit/bsddev/IOKitBSDInit.cpp optional iokitcpp iokit/bsddev/DINetBootHook.cpp optional iokitcpp # Power Management -iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp optional iokitcpp -iokit/Kernel/IOPowerConnection.cpp optional iokitcpp +iokit/Kernel/RootDomainUserClient.cpp optional iokitcpp +iokit/Kernel/IOPowerConnection.cpp optional iokitcpp # System Management iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp optional iokitcpp diff --git a/iokit/conf/files.i386 b/iokit/conf/files.i386 index 57bbaba7e..a604778e1 100644 --- a/iokit/conf/files.i386 +++ b/iokit/conf/files.i386 @@ -1,19 +1,11 @@ -# Intel platform support - -iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp optional iokitcpp -iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp optional iokitcpp - # Shared lock iokit/Kernel/i386/IOSharedLock.s standard iokit/Kernel/i386/IOAsmSupport.s standard -# Interrupt Controller -iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp optional iokitcpp - # Real Time Clock hack iokit/Drivers/platform/drvAppleIntelClock/IntelClock.cpp optional iokitcpp # Power Domains -iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp optional iokitcpp +iokit/Kernel/IOPMrootDomain.cpp optional iokitcpp diff --git a/iokit/conf/files.ppc b/iokit/conf/files.ppc index b2589df0c..8d60fc863 100644 --- a/iokit/conf/files.ppc +++ b/iokit/conf/files.ppc @@ -3,22 +3,15 @@ iokit/Kernel/ppc/IOAsmSupport.s standard iokit/Kernel/ppc/IODBDMA.cpp optional iokitcpp iokit/Kernel/ppc/IOSharedLock.s standard -iokit/Families/IOADBBus/IOADBBus.cpp optional iokitcpp -iokit/Families/IOADBBus/IOADBController.cpp optional iokitcpp -iokit/Families/IOADBBus/IOADBDevice.cpp optional iokitcpp -iokit/Families/IOADBBus/IOADBControllerUserClient.cpp optional iokitcpp - iokit/Families/IONVRAM/IONVRAMController.cpp optional iokitcpp iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.cpp optional iokitcpp -iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp optional iokitcpp - # Apple Platform Expert iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp optional iokitcpp iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.cpp optional iokitcpp # Power Domains -iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp optional iokitcpp +iokit/Kernel/IOPMrootDomain.cpp optional iokitcpp # Apple Mac-IO driver iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp optional iokitcpp diff --git a/iokit/conf/version.major b/iokit/conf/version.major index 1e8b31496..7f8f011eb 100644 --- a/iokit/conf/version.major +++ b/iokit/conf/version.major @@ -1 +1 @@ -6 +7 diff --git a/iokit/conf/version.minor b/iokit/conf/version.minor index 45a4fb75d..573541ac9 100644 --- a/iokit/conf/version.minor +++ b/iokit/conf/version.minor @@ -1 +1 @@ -8 +0 diff --git a/iokit/conf/version.variant b/iokit/conf/version.variant index e69de29bb..573541ac9 100644 --- a/iokit/conf/version.variant +++ b/iokit/conf/version.variant @@ -0,0 +1 @@ +0 diff --git a/iokit/include/DeviceTree.h b/iokit/include/DeviceTree.h deleted file mode 100644 index 46700f581..000000000 --- a/iokit/include/DeviceTree.h +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#ifndef __DEVICE_TREE__ -#define __DEVICE_TREE__ - -#ifdef __cplusplus -extern "C" { -#endif - -/* ------------------------------------------------------------------------------------- - Foundation Types ------------------------------------------------------------------------------------- -*/ -enum { - kDTPathNameSeparator = '/' /* 0x2F */ -}; - - -/* Property Name Definitions (Property Names are C-Strings)*/ -enum { - kDTMaxPropertyNameLength = 31 /* Max length of Property Name (terminator not included) */ -}; - -typedef char DTPropertyNameBuf[32]; - - -/* Entry Name Definitions (Entry Names are C-Strings)*/ -enum { - kDTMaxEntryNameLength = 31 /* Max length of a C-String Entry Name (terminator not included) */ -}; - -/* length of DTEntryNameBuf = kDTMaxEntryNameLength +1*/ -typedef char DTEntryNameBuf[32]; - - -/* Entry*/ -typedef struct OpaqueDTEntry* DTEntry; - -/* Entry Iterator*/ -typedef struct OpaqueDTEntryIterator* DTEntryIterator; - -/* Property Iterator*/ -typedef struct OpaqueDTPropertyIterator* DTPropertyIterator; - - -/* status values*/ -enum { - kError = -1, - kIterationDone = 0, - kSuccess = 1 -}; - -/* ------------------------------------------------------------------------------------- - Device Tree Calls ------------------------------------------------------------------------------------- -*/ - -/* ------------------------------------------------------------------------------------- - Entry Handling ------------------------------------------------------------------------------------- -*/ -/* Compare two Entry's for equality. */ -extern int DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2); - -/* ------------------------------------------------------------------------------------- - LookUp Entry by Name ------------------------------------------------------------------------------------- -*/ -/* - Lookup Entry - Locates an entry given a specified subroot (searchPoint) and path name. If the - searchPoint pointer is NULL, the path name is assumed to be an absolute path - name rooted to the root of the device tree. -*/ -extern int DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry); - -/* ------------------------------------------------------------------------------------- - Entry Iteration ------------------------------------------------------------------------------------- -*/ -/* - An Entry Iterator maintains three variables that are of interest to clients. - First is an "OutermostScope" which defines the outer boundry of the iteration. - This is defined by the starting entry and includes that entry plus all of it's - embedded entries. Second is a "currentScope" which is the entry the iterator is - currently in. And third is a "currentPosition" which is the last entry returned - during an iteration. - - Create Entry Iterator - Create the iterator structure. The outermostScope and currentScope of the iterator - are set to "startEntry". If "startEntry" = NULL, the outermostScope and - currentScope are set to the root entry. The currentPosition for the iterator is - set to "nil". -*/ -extern int DTCreateEntryIterator(const DTEntry startEntry, DTEntryIterator *iterator); - -/* Dispose Entry Iterator*/ -extern int DTDisposeEntryIterator(DTEntryIterator iterator); - -/* - Enter Child Entry - Move an Entry Iterator into the scope of a specified child entry. The - currentScope of the iterator is set to the entry specified in "childEntry". If - "childEntry" is nil, the currentScope is set to the entry specified by the - currentPosition of the iterator. -*/ -extern int DTEnterEntry(DTEntryIterator iterator, DTEntry childEntry); - -/* - Exit to Parent Entry - Move an Entry Iterator out of the current entry back into the scope of it's parent - entry. The currentPosition of the iterator is reset to the current entry (the - previous currentScope), so the next iteration call will continue where it left off. - This position is returned in parameter "currentPosition". -*/ -extern int DTExitEntry(DTEntryIterator iterator, DTEntry *currentPosition); - -/* - Iterate Entries - Iterate and return entries contained within the entry defined by the current - scope of the iterator. Entries are returned one at a time. When - int == kIterationDone, all entries have been exhausted, and the - value of nextEntry will be Nil. -*/ -extern int DTIterateEntries(DTEntryIterator iterator, DTEntry *nextEntry); - -/* - Restart Entry Iteration - Restart an iteration within the current scope. The iterator is reset such that - iteration of the contents of the currentScope entry can be restarted. The - outermostScope and currentScope of the iterator are unchanged. The currentPosition - for the iterator is set to "nil". -*/ -extern int DTRestartEntryIteration(DTEntryIterator iterator); - -/* ------------------------------------------------------------------------------------- - Get Property Values ------------------------------------------------------------------------------------- -*/ -/* - Get the value of the specified property for the specified entry. - - Get Property -*/ -extern int DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValue, int *propertySize); - -/* ------------------------------------------------------------------------------------- - Iterating Properties ------------------------------------------------------------------------------------- -*/ -/* - Create Property Iterator - Create the property iterator structure. The target entry is defined by entry. -*/ -extern int DTCreatePropertyIterator(const DTEntry entry, DTPropertyIterator *iterator); - -/* Dispose Property Iterator*/ -extern int DTDisposePropertyIterator(DTPropertyIterator iterator); - -/* - Iterate Properites - Iterate and return properties for given entry. - When int == kIterationDone, all properties have been exhausted. -*/ -extern int DTIterateProperties(DTPropertyIterator iterator, char **foundProperty); - -/* - Restart Property Iteration - Used to re-iterate over a list of properties. The Property Iterator is reset to - the beginning of the list of properties for an entry. -*/ -extern int DTRestartPropertyIteration(DTPropertyIterator iterator); - -#ifdef __cplusplus -} -#endif - -#endif /* __DEVICE_TREE__ */ - diff --git a/iokit/include/drivers/event_status_driver.h b/iokit/include/drivers/event_status_driver.h index bfaa8dffb..249232924 100644 --- a/iokit/include/drivers/event_status_driver.h +++ b/iokit/include/drivers/event_status_driver.h @@ -34,121 +34,10 @@ ******************************************************************************/ -#ifndef _DRIVERS_EVENT_STATUS_DRIVER_ -#define _DRIVERS_EVENT_STATUS_DRIVER_ +#warning include is going away use instead -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - - -/* - * Event System Handle: - * - * Information used by the system between calls to NXOpenEventSystem and - * NXCloseEventSystem. The application should not - * access any of the elements of this structure. - */ -typedef mach_port_t NXEventHandle; - -/* Open and Close */ -NXEventHandle NXOpenEventStatus(void); -void NXCloseEventStatus(NXEventHandle handle); - -/* Status */ -extern NXEventSystemInfoType NXEventSystemInfo(NXEventHandle handle, - char *flavor, - int *evs_info, - unsigned int *evs_info_cnt); -/* Keyboard */ -extern void NXSetKeyRepeatInterval(NXEventHandle handle, double seconds); -extern double NXKeyRepeatInterval(NXEventHandle handle); -extern void NXSetKeyRepeatThreshold(NXEventHandle handle, double threshold); -extern double NXKeyRepeatThreshold(NXEventHandle handle); -extern NXKeyMapping *NXSetKeyMapping(NXEventHandle h, NXKeyMapping *keymap); -extern int NXKeyMappingLength(NXEventHandle handle); -extern NXKeyMapping *NXGetKeyMapping(NXEventHandle h, NXKeyMapping *keymap); -extern void NXResetKeyboard(NXEventHandle handle); - -/* Mouse */ -extern void NXSetClickTime(NXEventHandle handle, double seconds); -extern double NXClickTime(NXEventHandle handle); -extern void NXSetClickSpace(NXEventHandle handle, _NXSize_ *area); -extern void NXGetClickSpace(NXEventHandle handle, _NXSize_ *area); -extern void NXSetMouseScaling(NXEventHandle handle, NXMouseScaling *scaling); -extern void NXGetMouseScaling(NXEventHandle handle, NXMouseScaling *scaling); -#ifdef _undef -extern void NXEnableMouseButton(NXEventHandle handle, NXMouseButton button); -extern NXMouseButton NXMouseButtonEnabled(NXEventHandle handle); -#endif -extern void NXResetMouse(NXEventHandle handle); - -/* Screen Brightness and Auto-dimming */ - -extern void NXSetAutoDimThreshold(NXEventHandle handle, double seconds); -extern double NXAutoDimThreshold(NXEventHandle handle); -extern double NXAutoDimTime(NXEventHandle handle); -extern double NXIdleTime(NXEventHandle handle); -extern void NXSetAutoDimState(NXEventHandle handle, boolean_t dimmed); -extern boolean_t NXAutoDimState(NXEventHandle handle); -extern void NXSetAutoDimBrightness(NXEventHandle handle, double level); -extern double NXAutoDimBrightness(NXEventHandle handle); -extern void NXSetScreenBrightness(NXEventHandle handle, double level); -extern double NXScreenBrightness(NXEventHandle handle); - -/* Speaker Volume */ -#ifdef _undef -extern void NXSetCurrentVolume(NXEventHandle handle, double volume); -extern double NXCurrentVolume(NXEventHandle handle); -#endif - -/* Wait Cursor */ -#ifdef _undef -extern void NXSetWaitCursorThreshold(NXEventHandle handle, double seconds); -extern double NXWaitCursorThreshold(NXEventHandle handle); -extern void NXSetWaitCursorSustain(NXEventHandle handle, double seconds); -extern double NXWaitCursorSustain(NXEventHandle handle); -extern void NXSetWaitCursorFrameInterval(NXEventHandle handle, double seconds); -extern double NXWaitCursorFrameInterval(NXEventHandle handle); -#endif - -/* - * Generic calls. Argument values are device and architecture dependent. - * This API is provided for the convenience of special device users. Code - * which is intended to be portable across multiple platforms and architectures - * should not use the following functions. - */ -#ifdef _undef -extern int NXEvSetParameterInt(NXEventHandle handle, - char *parameterName, - unsigned int *parameterArray, - unsigned int count); - -extern int NXEvSetParameterChar(NXEventHandle handle, - char *parameterName, - unsigned char *parameterArray, - unsigned int count); - -extern int NXEvGetParameterInt(NXEventHandle handle, - char *parameterName, - unsigned int maxCount, - unsigned int *parameterArray, - unsigned int *returnedCount); - -extern int NXEvGetParameterChar(NXEventHandle handle, - char *parameterName, - unsigned int maxCount, - unsigned char *parameterArray, - unsigned int *returnedCount); -#endif - -#ifdef __cplusplus -} /* extern "C" */ -#endif - -#endif /*_DRIVERS_EVENT_STATUS_DRIVER_ */ +#include +#ifdef __APPLE_API_OBSOLETE +#include +#endif /* __APPLE_API_OBSOLETE */ diff --git a/osfmk/.gdbinit b/kgmacros similarity index 83% rename from osfmk/.gdbinit rename to kgmacros index c560a9709..935ec81b3 100644 --- a/osfmk/.gdbinit +++ b/kgmacros @@ -6,16 +6,14 @@ # # All the convenience variables used by these macros begin with $kgm_ -set $kgm_vers = 2 - set $kgm_dummy = &proc0 set $kgm_dummy = &kmod echo Loading Kernel GDB Macros package. Type "help kgm" for more info.\n define kgm -printf "These are the kernel gdb macros version %d. ", $kgm_vers -echo Type "help kgm" for more info.\n +printf "" +echo These are the gdb macros for kernel debugging. Type "help kgm" for more info.\n end document kgm @@ -27,44 +25,50 @@ document kgm | | The following macros are available in this package: | -| showalltasks Display a summary listing of tasks -| showallacts Display a summary listing of all activations -| showallstacks Display the kernel stacks for all activations +| showalltasks Display a summary listing of all tasks +| showallthreads Display info about all threads in the system +| showallstacks Display the stack for each thread in the system +| showcurrentthreads Display info about the thread running on each cpu +| showcurrentstacks Display the stack for the thread running on each cpu | showallvm Display a summary listing of all the vm maps | showallvme Display a summary listing of all the vm map entries | showallipc Display a summary listing of all the ipc spaces | showallrights Display a summary listing of all the ipc rights | showallkmods Display a summary listing of all the kernel modules | -| showtask Display status of the specified task -| showtaskacts Display the status of all activations in the task -| showtaskstacks Display all kernel stacks for all activations in the task -| showtaskvm Display status of the specified task's vm_map -| showtaskvme Display a summary list of the task's vm_map entries -| showtaskipc Display status of the specified task's ipc space -| showtaskrights Display a summary list of the task's ipc space entries +| showtask Display info about the specified task +| showtaskthreads Display info about the threads in the task +| showtaskstacks Display the stack for each thread in the task +| showtaskvm Display info about the specified task's vm_map +| showtaskvme Display info about the task's vm_map entries +| showtaskipc Display info about the specified task's ipc space +| showtaskrights Display info about the task's ipc space entries | -| showact Display status of the specified thread activation -| showactstack Display the kernel stack for the specified activation +| showact Display info about a thread specified by activation +| showactstack Display the stack for a thread specified by activation | -| showmap Display the status of the specified vm_map +| showmap Display info about the specified vm_map | showmapvme Display a summary list of the specified vm_map's entries | -| showipc Display the status of the specified ipc space +| showipc Display info about the specified ipc space | showrights Display a summary list of all the rights in an ipc space | -| showpid Display the status of the process identified by pid -| showproc Display the status of the process identified by a proc pointer +| showpid Display info about the process identified by pid +| showproc Display info about the process identified by proc struct | -| showkmod Display information about a kernel module +| showkmod Display info about a kernel module | showkmodaddr Given an address, display the kernel module and offset | -| zprint Display zone information -| paniclog Display the panic log information +| dumpcallqueue Dump out all the entries given a queue head +| +| zprint Display info about the memory zones +| paniclog Display the panic log info | -| switchtoact Switch thread context -| switchtoctx Switch context +| switchtoact Switch to different context specified by activation +| switchtoctx Switch to different context | resetctx Reset context +| resume_on Resume when detaching from gdb +| resume_off Don't resume when detaching from gdb | | Type "help " for more specific help on a particular macro. | Type "show user " to see what the macro is really doing. @@ -166,7 +170,7 @@ end define showactint printf " 0x%08x ", $arg0 - set $kgm_actp = *(struct thread_activation *)$arg0 + set $kgm_actp = *(struct thread *)$arg0 if $kgm_actp.thread set $kgm_thread = *$kgm_actp.thread printf "0x%08x ", $kgm_actp.thread @@ -200,20 +204,34 @@ define showactint end if $arg1 != 0 if ($kgm_thread.kernel_stack != 0) - if ($kgm_thread.stack_privilege != 0) - printf "\n\t\tstack_privilege=0x%08x", $kgm_thread.stack_privilege + if ($kgm_thread.reserved_stack != 0) + printf "\n\t\treserved_stack=0x%08x", $kgm_thread.reserved_stack end printf "\n\t\tkernel_stack=0x%08x", $kgm_thread.kernel_stack - set $mysp = $kgm_actp->mact.pcb->save_r1 + if (machine_slot[0].cpu_type == 18) + set $mysp = $kgm_actp->mact.pcb->save_r1 + else + set $kgm_statep = (struct i386_kernel_state *)($kgm_thread->kernel_stack + 0x4000 - sizeof(stru\ +ct i386_kernel_state)) + set $mysp = $kgm_statep->k_ebp + end set $prevsp = 0 printf "\n\t\tstacktop=0x%08x", $mysp while ($mysp != 0) && (($mysp & 0xf) == 0) && ($mysp < 0xb0000000) && ($mysp > $prevsp) printf "\n\t\t0x%08x ", $mysp - set $kgm_return = *($mysp + 8) - if (($kgm_return > end) && ($kgm_return < 0x40000000)) + if (machine_slot[0].cpu_type == 18) + set $kgm_return = *($mysp + 8) + else + set $kgm_return = *($mysp + 4) + end + if ($kgm_return > sectPRELINKB) showkmodaddr $kgm_return else + if (machine_slot[0].cpu_type == 18) output /a * ($mysp + 8) + else + output /a * ($mysp + 4) + end end set $prevsp = $mysp set $mysp = * $mysp @@ -235,7 +253,7 @@ define showact showactint $arg0 0 end document showact -| Routine to print out the state of a specific thread activation. +| Routine to print out the state of a specific thread. | The following is the syntax: | (gdb) showact end @@ -246,59 +264,102 @@ define showactstack showactint $arg0 1 end document showactstack -| Routine to print out the stack of a specific thread activation. +| Routine to print out the stack of a specific thread. | The following is the syntax: | (gdb) showactstack end -define showallacts +define showallthreads set $kgm_head_taskp = &default_pset.tasks set $kgm_taskp = (struct task *)($kgm_head_taskp->next) while $kgm_taskp != $kgm_head_taskp showtaskheader showtaskint $kgm_taskp showactheader - set $kgm_head_actp = &($kgm_taskp->thr_acts) - set $kgm_actp = (struct thread_activation *)($kgm_taskp->thr_acts.next) + set $kgm_head_actp = &($kgm_taskp->threads) + set $kgm_actp = (struct thread *)($kgm_taskp->threads.next) while $kgm_actp != $kgm_head_actp showactint $kgm_actp 0 - set $kgm_actp = (struct thread_activation *)($kgm_actp->thr_acts.next) + set $kgm_actp = (struct thread *)($kgm_actp->task_threads.next) end printf "\n" set $kgm_taskp = (struct task *)($kgm_taskp->pset_tasks.next) end end -document showallacts -| Routine to print out a summary listing of all the thread activations. +document showallthreads +| Routine to print out info about all threads in the system. +| The following is the syntax: +| (gdb) showallthreads +end + +define showcurrentthreads +set $kgm_ncpus = machine_info.max_cpus +set $kgm_i = 0 + while $kgm_i < $kgm_ncpus + set $kgm_prp = processor_ptr[$kgm_i] + if ($kgm_prp != 0) && (($kgm_prp)->active_thread != 0) + set $kgm_actp = (($kgm_prp)->active_thread)->top_act + showtaskheader + showtaskint ($kgm_actp)->task + showactheader + showactint $kgm_actp 0 + printf "\n" + end + set $kgm_i = $kgm_i + 1 + end +end +document showcurrentthreads +| Routine to print out info about the thread running on each cpu. | The following is the syntax: -| (gdb) showallacts +| (gdb) showcurrentthreads end - define showallstacks set $kgm_head_taskp = &default_pset.tasks set $kgm_taskp = (struct task *)($kgm_head_taskp->next) while $kgm_taskp != $kgm_head_taskp showtaskheader showtaskint $kgm_taskp - set $kgm_head_actp = &($kgm_taskp->thr_acts) - set $kgm_actp = (struct thread_activation *)($kgm_taskp->thr_acts.next) + set $kgm_head_actp = &($kgm_taskp->threads) + set $kgm_actp = (struct thread *)($kgm_taskp->threads.next) while $kgm_actp != $kgm_head_actp showactheader showactint $kgm_actp 1 - set $kgm_actp = (struct thread_activation *)($kgm_actp->thr_acts.next) + set $kgm_actp = (struct thread *)($kgm_actp->task_threads.next) end printf "\n" set $kgm_taskp = (struct task *)($kgm_taskp->pset_tasks.next) end end document showallstacks -| Routine to print out a summary listing of all the thread kernel stacks. +| Routine to print out the stack for each thread in the system. | The following is the syntax: | (gdb) showallstacks end +define showcurrentstacks +set $kgm_ncpus = machine_info.max_cpus +set $kgm_i = 0 + while $kgm_i < $kgm_ncpus + set $kgm_prp = processor_ptr[$kgm_i] + if ($kgm_prp != 0) && (($kgm_prp)->active_thread != 0) + set $kgm_actp = (($kgm_prp)->active_thread)->top_act + showtaskheader + showtaskint ($kgm_actp)->task + showactheader + showactint $kgm_actp 1 + printf "\n" + end + set $kgm_i = $kgm_i + 1 + end +end +document showcurrentstacks +| Routine to print out the thread running on each cpu (incl. its stack) +| The following is the syntax: +| (gdb) showcurrentstacks +end + define showwaiterheader printf "waiters activation " printf "thread pri state wait_queue wait_event\n" @@ -315,7 +376,7 @@ define showwaitqwaiters set $kgm_w_found = 1 showwaiterheader end - set $kgm_w_shuttle = (struct thread_shuttle *)$kgm_w_wqe + set $kgm_w_shuttle = (struct thread *)$kgm_w_wqe showactint $kgm_w_shuttle->top_act 0 end set $kgm_w_wqe = (struct wait_queue_element *)$kgm_w_wqe->wqe_links.next @@ -532,7 +593,7 @@ define showmap showvmint $arg0 0 end document showmap -| Routine to print out a summary description of a vm_map +| Routine to print out info about the specified vm_map | The following is the syntax: | (gdb) showmap end @@ -702,7 +763,7 @@ define showtaskipc showipcint $kgm_taskp->itk_space 0 end document showtaskipc -| Routine to print the status of the ipc space for a task +| Routine to print info about the ipc space for a task | The following is the syntax: | (gdb) showtaskipc end @@ -716,7 +777,7 @@ define showtaskrights showipcint $kgm_taskp->itk_space 1 end document showtaskrights -| Routine to print a summary listing of all the ipc rights for a task +| Routine to print info about the ipc rights for a task | The following is the syntax: | (gdb) showtaskrights end @@ -765,7 +826,7 @@ define showtaskvm showvmint $kgm_taskp->map 0 end document showtaskvm -| Routine to print out a summary description of a task's vm_map +| Routine to print out info about a task's vm_map | The following is the syntax: | (gdb) showtaskvm end @@ -778,7 +839,7 @@ define showtaskvme showvmint $kgm_taskp->map 1 end document showtaskvme -| Routine to print out a summary listing of a task's vm_map_entries +| Routine to print out info about a task's vm_map_entries | The following is the syntax: | (gdb) showtaskvme end @@ -795,7 +856,7 @@ define showtaskint printf "0x%08x ", $arg0 printf "0x%08x ", $kgm_task.map printf "0x%08x ", $kgm_task.itk_space - printf "%3d ", $kgm_task.thr_act_count + printf "%3d ", $kgm_task.thread_count showprocint $kgm_task.bsd_info end @@ -810,22 +871,22 @@ document showtask end -define showtaskacts +define showtaskthreads showtaskheader set $kgm_taskp = (struct task *)$arg0 showtaskint $kgm_taskp showactheader - set $kgm_head_actp = &($kgm_taskp->thr_acts) - set $kgm_actp = (struct thread_activation *)($kgm_taskp->thr_acts.next) + set $kgm_head_actp = &($kgm_taskp->threads) + set $kgm_actp = (struct thread *)($kgm_taskp->threads.next) while $kgm_actp != $kgm_head_actp showactint $kgm_actp 0 - set $kgm_actp = (struct thread_activation *)($kgm_actp->thr_acts.next) + set $kgm_actp = (struct thread *)($kgm_actp->task_threads.next) end end -document showtaskacts -| Routine to print a summary listing of the activations in a task +document showtaskthreads +| Routine to print info about the threads in a task. | The following is the syntax: -| (gdb) showtaskacts +| (gdb) showtaskthreads end @@ -833,16 +894,16 @@ define showtaskstacks showtaskheader set $kgm_taskp = (struct task *)$arg0 showtaskint $kgm_taskp - set $kgm_head_actp = &($kgm_taskp->thr_acts) - set $kgm_actp = (struct thread_activation *)($kgm_taskp->thr_acts.next) + set $kgm_head_actp = &($kgm_taskp->threads) + set $kgm_actp = (struct thread *)($kgm_taskp->threads.next) while $kgm_actp != $kgm_head_actp showactheader showactint $kgm_actp 1 - set $kgm_actp = (struct thread_activation *)($kgm_actp->thr_acts.next) + set $kgm_actp = (struct thread *)($kgm_actp->task_threads.next) end end document showtaskstacks -| Routine to print a summary listing of the activations in a task and their stacks +| Routine to print out the stack for each thread in a task. | The following is the syntax: | (gdb) showtaskstacks end @@ -1263,7 +1324,7 @@ define switchtoact set $kdpstate = (struct savearea *) kdp.saved_state end set $kdp_act_counter = $kdp_act_counter + 1 - set $newact = (struct thread_activation *) $arg0 + set $newact = (struct thread *) $arg0 if (($newact.thread)->kernel_stack == 0) echo This activation does not have a stack.\n echo continuation: @@ -1330,10 +1391,30 @@ document resetctx | or "switchtoctx" commands. end +define resume_on + set noresume_on_disconnect = 0 +end + +document resume_on +| Syntax: resume_on +| The target system will resume when detaching or exiting from gdb. +| This is the default behavior. +end + +define resume_off + set noresume_on_disconnect = 1 +end + +document resume_off +| Syntax: resume_off +| The target system won't resume after detaching from gdb and +| can be attached with a new gdb session +end + define paniclog set $kgm_panic_bufptr = debug_buf - set $kgm_panic_bufptr_max = debug_buf+debug_buf_size - while *$kgm_panic_bufptr && $kgm_panic_bufptr < $kgm_panic_bufptr_max + set $kgm_panic_bufptr_max = debug_buf_ptr + while $kgm_panic_bufptr < $kgm_panic_bufptr_max if *(char *)$kgm_panic_bufptr == 10 printf "\n" else @@ -1348,3 +1429,41 @@ document paniclog | Display the panic log information | end + +define dumpcallqueue + set $kgm_callhead = (queue_t)&$arg0 + set $kgm_call = (struct call_entry *)$kgm_callhead.next + set $kgm_i = 0 + while $kgm_call != $kgm_callhead + printf "0x%08x ", $kgm_call + printf "0x%08x 0x%08x ", $kgm_call->param0, $kgm_call->param1 + output $kgm_call->state + printf "\t" + output $kgm_call->deadline + printf "\t" + output $kgm_call->func + printf "\n" + set $kgm_i = $kgm_i + 1 + set $kgm_call = (struct call_entry *)$kgm_call->q_link.next + end + printf "%d entries\n", $kgm_i +end + +document dumpcallqueue +| Syntax: dumpcallqueue +| Displays the contents of the specified call_entry queue. +end + +define showtaskacts +showtaskthreads $arg0 +end +document showtaskacts +| See help showtaskthreads. +end + +define showallacts +showallthreads +end +document showallacts +| See help showallthreads. +end diff --git a/libkern/Makefile b/libkern/Makefile index fdf3de1cc..7788f760d 100644 --- a/libkern/Makefile +++ b/libkern/Makefile @@ -4,6 +4,8 @@ export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir +export COMP_LDFLAGS_COMPONENT_PPC = -i_OSCompareAndSwap:_hw_compare_and_store + include $(MakeInc_cmd) include $(MakeInc_def) diff --git a/libkern/c++/OSArray.cpp b/libkern/c++/OSArray.cpp index 522548442..d76963047 100644 --- a/libkern/c++/OSArray.cpp +++ b/libkern/c++/OSArray.cpp @@ -75,7 +75,7 @@ bool OSArray::initWithCapacity(unsigned int inCapacity) bool OSArray::initWithObjects(const OSObject *objects[], unsigned int theCount, - unsigned int theCapacity = 0) + unsigned int theCapacity) { unsigned int capacity; @@ -103,7 +103,7 @@ bool OSArray::initWithObjects(const OSObject *objects[], } bool OSArray::initWithArray(const OSArray *anArray, - unsigned int theCapacity = 0) + unsigned int theCapacity) { if ( !anArray ) return false; @@ -117,7 +117,7 @@ OSArray *OSArray::withCapacity(unsigned int capacity) OSArray *me = new OSArray; if (me && !me->initWithCapacity(capacity)) { - me->free(); + me->release(); return 0; } @@ -126,12 +126,12 @@ OSArray *OSArray::withCapacity(unsigned int capacity) OSArray *OSArray::withObjects(const OSObject *objects[], unsigned int count, - unsigned int capacity = 0) + unsigned int capacity) { OSArray *me = new OSArray; if (me && !me->initWithObjects(objects, count, capacity)) { - me->free(); + me->release(); return 0; } @@ -139,12 +139,12 @@ OSArray *OSArray::withObjects(const OSObject *objects[], } OSArray *OSArray::withArray(const OSArray *array, - unsigned int capacity = 0) + unsigned int capacity) { OSArray *me = new OSArray; if (me && !me->initWithArray(array, capacity)) { - me->free(); + me->release(); return 0; } diff --git a/libkern/c++/OSCollectionIterator.cpp b/libkern/c++/OSCollectionIterator.cpp index f75f93651..ac9f6223b 100644 --- a/libkern/c++/OSCollectionIterator.cpp +++ b/libkern/c++/OSCollectionIterator.cpp @@ -63,7 +63,7 @@ OSCollectionIterator::withCollection(const OSCollection *inColl) OSCollectionIterator *me = new OSCollectionIterator; if (me && !me->initWithCollection(inColl)) { - me->free(); + me->release(); return 0; } diff --git a/libkern/c++/OSData.cpp b/libkern/c++/OSData.cpp index aae7bd2fc..6c5078a1d 100644 --- a/libkern/c++/OSData.cpp +++ b/libkern/c++/OSData.cpp @@ -47,9 +47,7 @@ OSMetaClassDefineReservedUnused(OSData, 7); #define EXTERNAL ((unsigned int) -1) #if OSALLOCDEBUG -extern "C" { - extern int debug_container_malloc_size; -}; +extern int debug_container_malloc_size; #define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) #else #define ACCUMSIZE(s) @@ -60,19 +58,26 @@ bool OSData::initWithCapacity(unsigned int inCapacity) if (!super::init()) return false; - if(inCapacity) { + if (data && (!inCapacity || capacity < inCapacity) ) { + // clean out old data's storage if it isn't big enough + kfree((vm_address_t) data, capacity); + data = 0; + ACCUMSIZE(-capacity); + } + + if (inCapacity && !data) { data = (void *) kalloc(inCapacity); if (!data) return false; + capacity = inCapacity; + ACCUMSIZE(inCapacity); } length = 0; - capacity = inCapacity; - capacityIncrement = capacity; - if(!capacityIncrement) + if (inCapacity < 16) capacityIncrement = 16; - - ACCUMSIZE(capacity); + else + capacityIncrement = inCapacity; return true; } @@ -82,7 +87,8 @@ bool OSData::initWithBytes(const void *bytes, unsigned int inLength) if ((inLength && !bytes) || !initWithCapacity(inLength)) return false; - bcopy(bytes, data, inLength); + if (bytes != data) + bcopy(bytes, data, inLength); length = inLength; return true; @@ -121,7 +127,7 @@ OSData *OSData::withCapacity(unsigned int inCapacity) OSData *me = new OSData; if (me && !me->initWithCapacity(inCapacity)) { - me->free(); + me->release(); return 0; } @@ -133,7 +139,7 @@ OSData *OSData::withBytes(const void *bytes, unsigned int inLength) OSData *me = new OSData; if (me && !me->initWithBytes(bytes, inLength)) { - me->free(); + me->release(); return 0; } return me; @@ -144,7 +150,7 @@ OSData *OSData::withBytesNoCopy(void *bytes, unsigned int inLength) OSData *me = new OSData; if (me && !me->initWithBytesNoCopy(bytes, inLength)) { - me->free(); + me->release(); return 0; } @@ -156,7 +162,7 @@ OSData *OSData::withData(const OSData *inData) OSData *me = new OSData; if (me && !me->initWithData(inData)) { - me->free(); + me->release(); return 0; } @@ -169,7 +175,7 @@ OSData *OSData::withData(const OSData *inData, OSData *me = new OSData; if (me && !me->initWithData(inData, start, inLength)) { - me->free(); + me->release(); return 0; } @@ -228,7 +234,7 @@ bool OSData::appendBytes(const void *bytes, unsigned int inLength) { unsigned int newSize; - if (inLength == 0) + if (!inLength) return true; if (capacity == EXTERNAL) @@ -238,7 +244,11 @@ bool OSData::appendBytes(const void *bytes, unsigned int inLength) if ( (newSize > capacity) && newSize > ensureCapacity(newSize) ) return false; - bcopy(bytes, &((unsigned char *)data)[length], inLength); + if (bytes) + bcopy(bytes, &((unsigned char *)data)[length], inLength); + else + bzero(&((unsigned char *)data)[length], inLength); + length = newSize; return true; @@ -248,7 +258,7 @@ bool OSData::appendByte(unsigned char byte, unsigned int inLength) { unsigned int newSize; - if (inLength == 0) + if (!inLength) return true; if (capacity == EXTERNAL) @@ -271,7 +281,7 @@ bool OSData::appendBytes(const OSData *other) const void *OSData::getBytesNoCopy() const { - if (length == 0) + if (!length) return 0; else return data; @@ -326,7 +336,7 @@ bool OSData::isEqualTo(const OSString *obj) const unsigned int checkLen = length; unsigned int stringLen; - if (NULL == obj) + if (!obj) return false; stringLen = obj->getLength (); diff --git a/libkern/c++/OSDictionary.cpp b/libkern/c++/OSDictionary.cpp index d3c8f1bfb..63ccca6dc 100644 --- a/libkern/c++/OSDictionary.cpp +++ b/libkern/c++/OSDictionary.cpp @@ -79,7 +79,7 @@ bool OSDictionary::initWithCapacity(unsigned int inCapacity) bool OSDictionary::initWithObjects(const OSObject *objects[], const OSSymbol *keys[], unsigned int theCount, - unsigned int theCapacity = 0) + unsigned int theCapacity) { unsigned int capacity = theCount; @@ -109,7 +109,7 @@ bool OSDictionary::initWithObjects(const OSObject *objects[], bool OSDictionary::initWithObjects(const OSObject *objects[], const OSString *keys[], unsigned int theCount, - unsigned int theCapacity = 0) + unsigned int theCapacity) { unsigned int capacity = theCount; @@ -145,7 +145,7 @@ bool OSDictionary::initWithObjects(const OSObject *objects[], } bool OSDictionary::initWithDictionary(const OSDictionary *dict, - unsigned int theCapacity = 0) + unsigned int theCapacity) { unsigned int capacity; @@ -179,7 +179,7 @@ OSDictionary *OSDictionary::withCapacity(unsigned int capacity) OSDictionary *me = new OSDictionary; if (me && !me->initWithCapacity(capacity)) { - me->free(); + me->release(); return 0; } @@ -189,12 +189,12 @@ OSDictionary *OSDictionary::withCapacity(unsigned int capacity) OSDictionary *OSDictionary::withObjects(const OSObject *objects[], const OSSymbol *keys[], unsigned int count, - unsigned int capacity = 0) + unsigned int capacity) { OSDictionary *me = new OSDictionary; if (me && !me->initWithObjects(objects, keys, count, capacity)) { - me->free(); + me->release(); return 0; } @@ -204,12 +204,12 @@ OSDictionary *OSDictionary::withObjects(const OSObject *objects[], OSDictionary *OSDictionary::withObjects(const OSObject *objects[], const OSString *keys[], unsigned int count, - unsigned int capacity = 0) + unsigned int capacity) { OSDictionary *me = new OSDictionary; if (me && !me->initWithObjects(objects, keys, count, capacity)) { - me->free(); + me->release(); return 0; } @@ -217,12 +217,12 @@ OSDictionary *OSDictionary::withObjects(const OSObject *objects[], } OSDictionary *OSDictionary::withDictionary(const OSDictionary *dict, - unsigned int capacity = 0) + unsigned int capacity) { OSDictionary *me = new OSDictionary; if (me && !me->initWithDictionary(dict, capacity)) { - me->free(); + me->release(); return 0; } diff --git a/libkern/c++/OSMetaClass.cpp b/libkern/c++/OSMetaClass.cpp index c3019b599..4184b82b0 100644 --- a/libkern/c++/OSMetaClass.cpp +++ b/libkern/c++/OSMetaClass.cpp @@ -554,6 +554,8 @@ void OSMetaClass::reportModInstances(const char *kmodName) iter->release(); } +extern "C" kern_return_t kmod_unload_cache(void); + static void _OSMetaClassConsiderUnloads(thread_call_param_t p0, thread_call_param_t p1) { @@ -578,7 +580,7 @@ static void _OSMetaClassConsiderUnloads(thread_call_param_t p0, while ( (kmodName = (OSSymbol *) kmods->getNextObject()) ) { if (ki) { - kfree(ki, sizeof(kmod_info_t)); + kfree((vm_offset_t) ki, sizeof(kmod_info_t)); ki = 0; } @@ -614,6 +616,8 @@ static void _OSMetaClassConsiderUnloads(thread_call_param_t p0, } while (didUnload); mutex_unlock(loadLock); + + kmod_unload_cache(); } void OSMetaClass::considerUnloads() @@ -801,31 +805,50 @@ void OSMetaClass::printInstanceCounts() OSDictionary * OSMetaClass::getClassDictionary() { - return sAllClassesDict; + panic("OSMetaClass::getClassDictionary(): Obsoleted\n"); + return 0; } bool OSMetaClass::serialize(OSSerialize *s) const { - OSDictionary * dict; - OSNumber * off; - bool ok = false; + panic("OSMetaClass::serialize(): Obsoleted\n"); + return false; +} + +void OSMetaClass::serializeClassDictionary(OSDictionary *serializeDictionary) +{ + OSDictionary *classDict; + + classDict = OSDictionary::withCapacity(sAllClassesDict->getCount()); + if (!classDict) + return; - if (s->previouslySerialized(this)) return true; + mutex_lock(loadLock); + do { + OSCollectionIterator *classes; + const OSSymbol *className; - dict = 0;// IODictionary::withCapacity(2); - off = OSNumber::withNumber(getInstanceCount(), 32); + classes = OSCollectionIterator::withCollection(sAllClassesDict); + if (!classes) + break; + + while ((className = (const OSSymbol *) classes->getNextObject())) { + const OSMetaClass *meta; + OSNumber *count; + + meta = (OSMetaClass *) sAllClassesDict->getObject(className); + count = OSNumber::withNumber(meta->getInstanceCount(), 32); + if (count) { + classDict->setObject(className, count); + count->release(); + } + } + classes->release(); - if (dict) { - dict->setObject("InstanceCount", off ); - ok = dict->serialize(s); - } else if( off) - ok = off->serialize(s); + serializeDictionary->setObject("Classes", classDict); + } while (0); - if (dict) - dict->release(); - if (off) - off->release(); + mutex_unlock(loadLock); - return ok; + classDict->release(); } - diff --git a/libkern/c++/OSNumber.cpp b/libkern/c++/OSNumber.cpp index 4ac84eff2..e327230f9 100644 --- a/libkern/c++/OSNumber.cpp +++ b/libkern/c++/OSNumber.cpp @@ -85,7 +85,7 @@ OSNumber *OSNumber::withNumber(unsigned long long value, OSNumber *me = new OSNumber; if (me && !me->init(value, numberOfBits)) { - me->free(); + me->release(); return 0; } @@ -97,7 +97,7 @@ OSNumber *OSNumber::withNumber(const char *value, unsigned int numberOfBits) OSNumber *me = new OSNumber; if (me && !me->init(value, numberOfBits)) { - me->free(); + me->release(); return 0; } diff --git a/libkern/c++/OSObject.cpp b/libkern/c++/OSObject.cpp index ab5873891..3f7959a62 100644 --- a/libkern/c++/OSObject.cpp +++ b/libkern/c++/OSObject.cpp @@ -104,6 +104,12 @@ static const char *getClassName(const OSObject *obj) bool OSObject::init() { return true; } +#if (!__ppc__) || (__GNUC__ < 3) + +// Implemented in assembler in post gcc 3.x systems as we have a problem +// where the destructor in gcc2.95 gets 2 arguments. The second argument +// appears to be a flag argument. I have copied the assembler from Puma xnu +// to OSRuntimeSupport.c So for 2.95 builds use the C void OSObject::free() { const OSMetaClass *meta = getMetaClass(); @@ -112,6 +118,7 @@ void OSObject::free() meta->instanceDestructed(); delete this; } +#endif /* (!__ppc__) || (__GNUC__ < 3) */ int OSObject::getRetainCount() const { @@ -120,42 +127,44 @@ int OSObject::getRetainCount() const void OSObject::taggedRetain(const void *tag) const { -#if !DEBUG volatile UInt32 *countP = (volatile UInt32 *) &retainCount; UInt32 inc = 1; UInt32 origCount; UInt32 newCount; - // Increment the collecion bucket. + // Increment the collection bucket. if ((const void *) OSTypeID(OSCollection) == tag) inc |= (1UL<<16); do { origCount = *countP; - if (-1UL == origCount) - // @@@ Pinot: panic("Attempting to retain a freed object"); - return; - - newCount = origCount + inc; - } while (!OSCompareAndSwap(origCount, newCount, (UInt32 *) countP)); -#else - volatile UInt32 *countP = (volatile UInt32 *) &retainCount; - UInt32 inc = 1; - UInt32 origCount; - UInt32 newCount; - - // Increment the collecion bucket. - if ((const void *) OSTypeID(OSCollection) == tag) - inc |= (1UL<<16); + if ( ((UInt16) origCount | 0x1) == 0xffff ) { + const char *msg; + if (origCount & 0x1) { + // If count == 0xffff that means we are freeing now so we can + // just return obviously somebody is cleaning up dangling + // references. + msg = "Attempting to retain a freed object"; + } + else { + // If count == 0xfffe then we have wrapped our reference count. + // We should stop counting now as this reference must be + // leaked rather than accidently wrapping around the clock and + // freeing a very active object later. - do { - origCount = *countP; - if (-1UL == origCount) - return; // We are freeing so leave now. +#if !DEBUG + break; // Break out of update loop which pegs the reference +#else DEBUG + // @@@ gvdl: eventually need to make this panic optional + // based on a boot argument i.e. debug= boot flag + msg = "About to wrap the reference count, reference leak?"; +#endif /* !DEBUG */ + } + panic("OSObject::refcount: %s", msg); + } newCount = origCount + inc; } while (!OSCompareAndSwap(origCount, newCount, (UInt32 *) countP)); -#endif } void OSObject::taggedRelease(const void *tag) const @@ -165,25 +174,44 @@ void OSObject::taggedRelease(const void *tag) const void OSObject::taggedRelease(const void *tag, const int when) const { -#if !DEBUG volatile UInt32 *countP = (volatile UInt32 *) &retainCount; UInt32 dec = 1; UInt32 origCount; UInt32 newCount; UInt32 actualCount; - // Increment the collecion bucket. + // Increment the collection bucket. if ((const void *) OSTypeID(OSCollection) == tag) dec |= (1UL<<16); do { origCount = *countP; - if (-1UL == origCount) - return; // We are freeing already leave now. + + if ( ((UInt16) origCount | 0x1) == 0xffff ) { + if (origCount & 0x1) { + // If count == 0xffff that means we are freeing now so we can + // just return obviously somebody is cleaning up some dangling + // references. So we blow out immediately. + return; + } + else { + // If count == 0xfffe then we have wrapped our reference + // count. We should stop counting now as this reference must be + // leaked rather than accidently freeing an active object later. +#if !DEBUG + return; // return out of function which pegs the reference +#else DEBUG + // @@@ gvdl: eventually need to make this panic optional + // based on a boot argument i.e. debug= boot flag + panic("OSObject::refcount: %s", + "About to unreference a pegged object, reference leak?"); +#endif /* !DEBUG */ + } + } actualCount = origCount - dec; - if ((SInt16) actualCount < when) - newCount = (UInt32) -1; + if ((UInt16) actualCount < when) + newCount = 0xffff; else newCount = actualCount; @@ -202,49 +230,8 @@ void OSObject::taggedRelease(const void *tag, const int when) const getClassName(this)); // Check for a 'free' condition and that if we are first through - if ((UInt32) -1 == newCount) + if (newCount == 0xffff) ((OSObject *) this)->free(); -#else - // @@@ Pinot: Need to update the debug build release code. - volatile UInt32 *countP = (volatile UInt32 *) &retainCount; - UInt32 dec = 1; - UInt32 origCount; - UInt32 newCount; - - // Increment the collecion bucket. - if ((const void *) OSTypeID(OSCollection) == tag) - dec |= (1UL<<16); - - do { - origCount = *countP; - if (-1UL == origCount) - return; // We are freeing already leave now. - - newCount = origCount - dec; - } while (!OSCompareAndSwap(origCount, newCount, (UInt32 *) countP)); - - // - // This panic means that we have just attempted to release an object - // who's retain count has gone to less than the number of collections - // it is a member off. Take a panic immediately. - // In Fact the panic MAY not be a registry corruption but it is - // ALWAYS the wrong thing to do. I call it a registry corruption 'cause - // the registry is the biggest single use of a network of collections. - // - if ((UInt16) newCount < (newCount >> 16)) - panic("A driver releasing a(n) %s has corrupted the registry\n", - getClassName(this)); - - // Check for a release too many - if ((SInt16) newCount < 0) - panic("An object has had a release too many\n", - getClassName(this)); - - // Check for a 'free' condition and that if we are first through - if ((SInt16) newCount < when - && OSCompareAndSwap(newCount, -1UL, (UInt32 *) countP)) - ((OSObject *) this)->free(); -#endif } void OSObject::release() const diff --git a/libkern/c++/OSObjectAsm.s b/libkern/c++/OSObjectAsm.s new file mode 100644 index 000000000..52bab860e --- /dev/null +++ b/libkern/c++/OSObjectAsm.s @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#if __GNUC__ >= 3 + +; +; This function was generated by disassembling the 'OSObject::free(void)' +; function of the Panther7B7 kernel in gdb. +; +; Then add the 'li r4,3' flag taken fropm the Puma kernel OSObject::free' +; + .text + + .align 5 + .globl __ZN8OSObject4freeEv + +__ZN8OSObject4freeEv: + ; function prologue + stw r31,-4(r1) + mflr r0 + stw r0,8(r1) + mr r31,r3 + stwu r1,-80(r1) + + ; const OSMetaClass *meta = getMetaClass(); + lwz r9,0(r3) + lwz r12,32(r9) + mtctr r12 + bctrl + + ; if (meta) + ; meta->instanceDestructed(); + cmpwi r3,0 + beq delete_this + bl __ZNK11OSMetaClass18instanceDestructedEv + +delete_this: + ; delete this; + lwz r9,0(r31) + mr r3,r31 + li r4,0 ; Load up some sort of flags, for 2.95 destructors? + lwz r0,88(r1) + addi r1,r1,80 + lwz r12,8(r9) + mtlr r0 + lwz r31,-4(r1) + mtctr r12 + bctr + +#endif /* __GNUC__ >= 3 */ diff --git a/libkern/c++/OSOrderedSet.cpp b/libkern/c++/OSOrderedSet.cpp index b8a01f352..f90e68322 100644 --- a/libkern/c++/OSOrderedSet.cpp +++ b/libkern/c++/OSOrderedSet.cpp @@ -86,7 +86,7 @@ withCapacity(unsigned int capacity, OSOrderedSet *me = new OSOrderedSet; if (me && !me->initWithCapacity(capacity, ordering, orderingRef)) { - me->free(); + me->release(); me = 0; } diff --git a/libkern/c++/OSSerialize.cpp b/libkern/c++/OSSerialize.cpp index d71d2c7cd..e3eb5e2e5 100644 --- a/libkern/c++/OSSerialize.cpp +++ b/libkern/c++/OSSerialize.cpp @@ -151,7 +151,7 @@ bool OSSerialize::initWithCapacity(unsigned int inCapacity) tag = 0; length = 1; - capacity = (inCapacity) ? round_page(inCapacity) : round_page(1); + capacity = (inCapacity) ? round_page_32(inCapacity) : round_page_32(1); capacityIncrement = capacity; // allocate from the kernel map so that we can safely map this data @@ -176,7 +176,7 @@ OSSerialize *OSSerialize::withCapacity(unsigned int inCapacity) OSSerialize *me = new OSSerialize; if (me && !me->initWithCapacity(inCapacity)) { - me->free(); + me->release(); return 0; } @@ -200,7 +200,7 @@ unsigned int OSSerialize::ensureCapacity(unsigned int newCapacity) return capacity; // round up - newCapacity = round_page(newCapacity); + newCapacity = round_page_32(newCapacity); kern_return_t rc = kmem_realloc(kernel_map, (vm_offset_t)data, @@ -241,7 +241,7 @@ void OSSerialize::free() OSDefineMetaClassAndStructors(OSSerializer, OSObject) OSSerializer * OSSerializer::forTarget( void * target, - OSSerializerCallback callback, void * ref = 0 ) + OSSerializerCallback callback, void * ref ) { OSSerializer * thing; diff --git a/libkern/c++/OSSet.cpp b/libkern/c++/OSSet.cpp index 92db6e65e..96c67bccb 100644 --- a/libkern/c++/OSSet.cpp +++ b/libkern/c++/OSSet.cpp @@ -54,7 +54,7 @@ bool OSSet::initWithCapacity(unsigned int inCapacity) bool OSSet::initWithObjects(const OSObject *inObjects[], unsigned int inCount, - unsigned int inCapacity = 0) + unsigned int inCapacity) { unsigned int capacity = inCount; @@ -79,7 +79,7 @@ bool OSSet::initWithObjects(const OSObject *inObjects[], } bool OSSet::initWithArray(const OSArray *inArray, - unsigned int inCapacity = 0) + unsigned int inCapacity) { if ( !inArray ) return false; @@ -89,7 +89,7 @@ bool OSSet::initWithArray(const OSArray *inArray, } bool OSSet::initWithSet(const OSSet *inSet, - unsigned int inCapacity = 0) + unsigned int inCapacity) { return initWithArray(inSet->members, inCapacity); } @@ -99,7 +99,7 @@ OSSet *OSSet::withCapacity(unsigned int capacity) OSSet *me = new OSSet; if (me && !me->initWithCapacity(capacity)) { - me->free(); + me->release(); return 0; } @@ -108,12 +108,12 @@ OSSet *OSSet::withCapacity(unsigned int capacity) OSSet *OSSet::withObjects(const OSObject *objects[], unsigned int count, - unsigned int capacity = 0) + unsigned int capacity) { OSSet *me = new OSSet; if (me && !me->initWithObjects(objects, count, capacity)) { - me->free(); + me->release(); return 0; } @@ -121,12 +121,12 @@ OSSet *OSSet::withObjects(const OSObject *objects[], } OSSet *OSSet::withArray(const OSArray *array, - unsigned int capacity = 0) + unsigned int capacity) { OSSet *me = new OSSet; if (me && !me->initWithArray(array, capacity)) { - me->free(); + me->release(); return 0; } @@ -134,12 +134,12 @@ OSSet *OSSet::withArray(const OSArray *array, } OSSet *OSSet::withSet(const OSSet *set, - unsigned int capacity = 0) + unsigned int capacity) { OSSet *me = new OSSet; if (me && !me->initWithSet(set, capacity)) { - me->free(); + me->release(); return 0; } diff --git a/libkern/c++/OSString.cpp b/libkern/c++/OSString.cpp index 1c326e049..fa02c340e 100644 --- a/libkern/c++/OSString.cpp +++ b/libkern/c++/OSString.cpp @@ -101,7 +101,7 @@ OSString *OSString::withString(const OSString *aString) OSString *me = new OSString; if (me && !me->initWithString(aString)) { - me->free(); + me->release(); return 0; } @@ -113,7 +113,7 @@ OSString *OSString::withCString(const char *cString) OSString *me = new OSString; if (me && !me->initWithCString(cString)) { - me->free(); + me->release(); return 0; } @@ -125,7 +125,7 @@ OSString *OSString::withCStringNoCopy(const char *cString) OSString *me = new OSString; if (me && !me->initWithCStringNoCopy(cString)) { - me->free(); + me->release(); return 0; } diff --git a/libkern/c++/OSSymbol.cpp b/libkern/c++/OSSymbol.cpp index 10548e878..acb5d3141 100644 --- a/libkern/c++/OSSymbol.cpp +++ b/libkern/c++/OSSymbol.cpp @@ -96,7 +96,7 @@ public: inline void closeGate() { mutex_lock(poolGate); }; inline void openGate() { mutex_unlock(poolGate); }; - OSSymbol *findSymbol(const char *cString, OSSymbol ***replace) const; + OSSymbol *findSymbol(const char *cString) const; OSSymbol *insertSymbol(OSSymbol *sym); void removeSymbol(OSSymbol *sym); @@ -213,7 +213,7 @@ void OSSymbolPool::reconstructSymbols() insertSymbol(insert); } -OSSymbol *OSSymbolPool::findSymbol(const char *cString, OSSymbol ***replace) const +OSSymbol *OSSymbolPool::findSymbol(const char *cString) const { Bucket *thisBucket; unsigned int j, inLen, hash; @@ -223,8 +223,6 @@ OSSymbol *OSSymbolPool::findSymbol(const char *cString, OSSymbol ***replace) con thisBucket = &buckets[hash % nBuckets]; j = thisBucket->count; - *replace = NULL; - if (!j) return 0; @@ -232,28 +230,16 @@ OSSymbol *OSSymbolPool::findSymbol(const char *cString, OSSymbol ***replace) con probeSymbol = (OSSymbol *) thisBucket->symbolP; if (inLen == probeSymbol->length - && (strcmp(probeSymbol->string, cString) == 0)) { - probeSymbol->retain(); - if (probeSymbol->getRetainCount() != 0xffff) - return probeSymbol; - else - // replace this one - *replace = (OSSymbol **) &thisBucket->symbolP; - } + && (strcmp(probeSymbol->string, cString) == 0)) + return probeSymbol; return 0; } for (list = thisBucket->symbolP; j--; list++) { probeSymbol = *list; if (inLen == probeSymbol->length - && (strcmp(probeSymbol->string, cString) == 0)) { - probeSymbol->retain(); - if (probeSymbol->getRetainCount() != 0xffff) - return probeSymbol; - else - // replace this one - *replace = list; - } + && (strcmp(probeSymbol->string, cString) == 0)) + return probeSymbol; } return 0; @@ -274,7 +260,7 @@ OSSymbol *OSSymbolPool::insertSymbol(OSSymbol *sym) thisBucket->symbolP = (OSSymbol **) sym; thisBucket->count++; count++; - return 0; + return sym; } if (j == 1) { @@ -295,7 +281,7 @@ OSSymbol *OSSymbolPool::insertSymbol(OSSymbol *sym) if (count > nBuckets) reconstructSymbols(); - return 0; + return sym; } for (list = thisBucket->symbolP; j--; list++) { @@ -318,7 +304,7 @@ OSSymbol *OSSymbolPool::insertSymbol(OSSymbol *sym) if (count > nBuckets) reconstructSymbols(); - return 0; + return sym; } void OSSymbolPool::removeSymbol(OSSymbol *sym) @@ -442,48 +428,62 @@ const OSSymbol *OSSymbol::withString(const OSString *aString) const OSSymbol *OSSymbol::withCString(const char *cString) { - OSSymbol **replace; - pool->closeGate(); - OSSymbol *newSymb = pool->findSymbol(cString, &replace); - if (!newSymb && (newSymb = new OSSymbol) ) { - if (newSymb->OSString::initWithCString(cString)) { - if (replace) - *replace = newSymb; - else - pool->insertSymbol(newSymb); - } else { + OSSymbol *oldSymb = pool->findSymbol(cString); + if (!oldSymb) { + OSSymbol *newSymb = new OSSymbol; + if (!newSymb) { + pool->openGate(); + return newSymb; + } + + if (newSymb->OSString::initWithCString(cString)) + oldSymb = pool->insertSymbol(newSymb); + + if (newSymb == oldSymb) { + pool->openGate(); + return newSymb; // return the newly created & inserted symbol. + } + else + // Somebody else inserted the new symbol so free our copy newSymb->OSString::free(); - newSymb = 0; - } } - pool->openGate(); + + oldSymb->retain(); // Retain the old symbol before releasing the lock. - return newSymb; + pool->openGate(); + return oldSymb; } const OSSymbol *OSSymbol::withCStringNoCopy(const char *cString) { - OSSymbol **replace; - pool->closeGate(); - OSSymbol *newSymb = pool->findSymbol(cString, &replace); - if (!newSymb && (newSymb = new OSSymbol) ) { - if (newSymb->OSString::initWithCStringNoCopy(cString)) { - if (replace) - *replace = newSymb; - else - pool->insertSymbol(newSymb); - } else { + OSSymbol *oldSymb = pool->findSymbol(cString); + if (!oldSymb) { + OSSymbol *newSymb = new OSSymbol; + if (!newSymb) { + pool->openGate(); + return newSymb; + } + + if (newSymb->OSString::initWithCStringNoCopy(cString)) + oldSymb = pool->insertSymbol(newSymb); + + if (newSymb == oldSymb) { + pool->openGate(); + return newSymb; // return the newly created & inserted symbol. + } + else + // Somebody else inserted the new symbol so free our copy newSymb->OSString::free(); - newSymb = 0; - } } - pool->openGate(); + + oldSymb->retain(); // Retain the old symbol before releasing the lock. - return newSymb; + pool->openGate(); + return oldSymb; } void OSSymbol::checkForPageUnload(void *startAddr, void *endAddr) @@ -506,12 +506,21 @@ void OSSymbol::checkForPageUnload(void *startAddr, void *endAddr) pool->openGate(); } -void OSSymbol::free() +void OSSymbol::taggedRelease(const void *tag) const +{ + super::taggedRelease(tag); +} + +void OSSymbol::taggedRelease(const void *tag, const int when) const { pool->closeGate(); - pool->removeSymbol(this); + super::taggedRelease(tag, when); pool->openGate(); - +} + +void OSSymbol::free() +{ + pool->removeSymbol(this); super::free(); } diff --git a/libkern/c++/OSUnserializeXML.cpp b/libkern/c++/OSUnserializeXML.cpp index d53cb8d28..ac39813b9 100644 --- a/libkern/c++/OSUnserializeXML.cpp +++ b/libkern/c++/OSUnserializeXML.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -23,14 +23,18 @@ * @APPLE_LICENSE_HEADER_END@ */ -/* OSUnserializeXML.y created by rsulack on Tue Oct 12 1999 */ +/* + * HISTORY + * + * OSUnserializeXML.y created by rsulack on Tue Oct 12 1999 + */ -// XML parser for unserializing OSContainer objects +// parser for unserializing OSContainer objects serialized to XML // // to build : // bison -p OSUnserializeXML OSUnserializeXML.y // head -50 OSUnserializeXML.y > OSUnserializeXML.cpp -// sed -e "s/stdio.h/stddef.h/" < OSUnserializeXML.tab.c >> OSUnserializeXML.cpp +// sed -e "s/#include //" < OSUnserializeXML.tab.c >> OSUnserializeXML.cpp // // when changing code check in both OSUnserializeXML.y and OSUnserializeXML.cpp // @@ -38,8 +42,6 @@ // // // -// -// // DO NOT EDIT OSUnserializeXML.cpp! // // this means you! @@ -49,8 +51,6 @@ // // // -// - /* A Bison parser, made from OSUnserializeXML.y by GNU Bison version 1.28 */ @@ -75,58 +75,76 @@ #define STRING 265 #define SYNTAX_ERROR 266 -#line 52 "OSUnserializeXML.y" +#line 55 "OSUnserializeXML.y" #include #include #include #include +#define YYSTYPE object_t * +#define YYPARSE_PARAM state +#define YYLEX_PARAM state + +// this is the internal struct used to hold objects on parser stack +// it represents objects both before and after they have been created typedef struct object { struct object *next; struct object *free; struct object *elements; OSObject *object; - const OSSymbol *key; // for dictionary + OSString *key; // for dictionary int size; - void *data; // for data - char *string; // for string & symbol - long long number; // for number + void *data; // for data + char *string; // for string & symbol + long long number; // for number int idref; } object_t; -static int yyparse(); -static int yyerror(char *s); -static int yylex(); - -static object_t * newObject(); -static void freeObject(object_t *o); - -static object_t *buildOSDictionary(object_t *); -static object_t *buildOSArray(object_t *); -static object_t *buildOSSet(object_t *); -static object_t *buildOSString(object_t *); -static object_t *buildKey(object_t *); -static object_t *buildOSData(object_t *); -static object_t *buildOSNumber(object_t *); -static object_t *buildOSBoolean(object_t *o); - -static void rememberObject(int, OSObject *); -static object_t *retrieveObject(int); - -// resultant object of parsed text -static OSObject *parsedObject; - -#define YYSTYPE object_t * +// this code is reentrant, this structure contains all +// state information for the parsing of a single buffer +typedef struct parser_state { + const char *parseBuffer; // start of text to be parsed + int parseBufferIndex; // current index into text + int lineNumber; // current line number + object_t *objects; // internal objects in use + object_t *freeObjects; // internal objects that are free + OSDictionary *tags; // used to remember "ID" tags + OSString **errorString; // parse error with line + OSObject *parsedObject; // resultant object of parsed text +} parser_state_t; + +#define STATE ((parser_state_t *)state) + +#undef yyerror +#define yyerror(s) OSUnserializeerror(STATE, (s)) +static int OSUnserializeerror(parser_state_t *state, char *s); + +static int yylex(YYSTYPE *lvalp, parser_state_t *state); +static int yyparse(void * state); + +static object_t *newObject(parser_state_t *state); +static void freeObject(parser_state_t *state, object_t *o); +static void rememberObject(parser_state_t *state, int tag, OSObject *o); +static object_t *retrieveObject(parser_state_t *state, int tag); +static void cleanupObjects(parser_state_t *state); + +static object_t *buildDictionary(parser_state_t *state, object_t *o); +static object_t *buildArray(parser_state_t *state, object_t *o); +static object_t *buildSet(parser_state_t *state, object_t *o); +static object_t *buildString(parser_state_t *state, object_t *o); +static object_t *buildData(parser_state_t *state, object_t *o); +static object_t *buildNumber(parser_state_t *state, object_t *o); +static object_t *buildBoolean(parser_state_t *state, object_t *o); extern "C" { -extern void *kern_os_malloc(size_t size); -extern void *kern_os_realloc(void * addr, size_t size); -extern void kern_os_free(void * addr); +extern void *kern_os_malloc(size_t size); +extern void *kern_os_realloc(void * addr, size_t size); +extern void kern_os_free(void * addr); //XXX shouldn't have to define these -extern long strtol(const char *, char **, int); -extern unsigned long strtoul(const char *, char **, int); +extern long strtol(const char *, char **, int); +extern unsigned long strtoul(const char *, char **, int); } /* extern "C" */ @@ -138,6 +156,7 @@ extern unsigned long strtoul(const char *, char **, int); #define YYSTYPE int #endif + #ifndef __cplusplus #ifndef __STDC__ #define const @@ -205,10 +224,10 @@ static const short yyrhs[] = { -1, #if YYDEBUG != 0 static const short yyrline[] = { 0, - 123, 124, 129, 135, 136, 137, 138, 139, 140, 141, - 142, 155, 158, 161, 164, 165, 170, 178, 183, 186, - 189, 192, 195, 198, 201, 204, 211, 214, 217, 220, - 223 + 144, 147, 152, 157, 158, 159, 160, 161, 162, 163, + 164, 177, 180, 183, 186, 187, 192, 201, 206, 209, + 212, 215, 218, 221, 224, 227, 234, 237, 240, 243, + 246 }; #endif @@ -291,6 +310,8 @@ static const short yycheck[] = { 0, 16, 17, 3, 4, 5, 6, 7, -1, 9, 10, 11, -1, 13, -1, 15, -1, 17 }; +#define YYPURE 1 + /* -*-C-*- Note some compilers choke on comments on `#line' lines. */ #line 3 "/usr/share/bison.simple" /* This file comes from bison-1.28. */ @@ -835,127 +856,129 @@ yyreduce: switch (yyn) { case 1: -#line 123 "OSUnserializeXML.y" -{ parsedObject = (OSObject *)NULL; YYACCEPT; ; +#line 144 "OSUnserializeXML.y" +{ yyerror("unexpected end of buffer"); + YYERROR; + ; break;} case 2: -#line 124 "OSUnserializeXML.y" -{ parsedObject = yyvsp[0]->object; +#line 147 "OSUnserializeXML.y" +{ STATE->parsedObject = yyvsp[0]->object; yyvsp[0]->object = 0; - freeObject(yyvsp[0]); + freeObject(STATE, yyvsp[0]); YYACCEPT; ; break;} case 3: -#line 129 "OSUnserializeXML.y" -{ - yyerror("syntax error"); +#line 152 "OSUnserializeXML.y" +{ yyerror("syntax error"); YYERROR; ; break;} case 4: -#line 135 "OSUnserializeXML.y" -{ yyval = buildOSDictionary(yyvsp[0]); ; +#line 157 "OSUnserializeXML.y" +{ yyval = buildDictionary(STATE, yyvsp[0]); ; break;} case 5: -#line 136 "OSUnserializeXML.y" -{ yyval = buildOSArray(yyvsp[0]); ; +#line 158 "OSUnserializeXML.y" +{ yyval = buildArray(STATE, yyvsp[0]); ; break;} case 6: -#line 137 "OSUnserializeXML.y" -{ yyval = buildOSSet(yyvsp[0]); ; +#line 159 "OSUnserializeXML.y" +{ yyval = buildSet(STATE, yyvsp[0]); ; break;} case 7: -#line 138 "OSUnserializeXML.y" -{ yyval = buildOSString(yyvsp[0]); ; +#line 160 "OSUnserializeXML.y" +{ yyval = buildString(STATE, yyvsp[0]); ; break;} case 8: -#line 139 "OSUnserializeXML.y" -{ yyval = buildOSData(yyvsp[0]); ; +#line 161 "OSUnserializeXML.y" +{ yyval = buildData(STATE, yyvsp[0]); ; break;} case 9: -#line 140 "OSUnserializeXML.y" -{ yyval = buildOSNumber(yyvsp[0]); ; +#line 162 "OSUnserializeXML.y" +{ yyval = buildNumber(STATE, yyvsp[0]); ; break;} case 10: -#line 141 "OSUnserializeXML.y" -{ yyval = buildOSBoolean(yyvsp[0]); ; +#line 163 "OSUnserializeXML.y" +{ yyval = buildBoolean(STATE, yyvsp[0]); ; break;} case 11: -#line 142 "OSUnserializeXML.y" -{ yyval = retrieveObject(yyvsp[0]->idref); +#line 164 "OSUnserializeXML.y" +{ yyval = retrieveObject(STATE, yyvsp[0]->idref); if (yyval) { yyval->object->retain(); } else { yyerror("forward reference detected"); YYERROR; } - freeObject(yyvsp[0]); + freeObject(STATE, yyvsp[0]); ; break;} case 12: -#line 155 "OSUnserializeXML.y" +#line 177 "OSUnserializeXML.y" { yyval = yyvsp[-1]; yyval->elements = NULL; ; break;} case 13: -#line 158 "OSUnserializeXML.y" +#line 180 "OSUnserializeXML.y" { yyval = yyvsp[-2]; yyval->elements = yyvsp[-1]; ; break;} case 16: -#line 165 "OSUnserializeXML.y" +#line 187 "OSUnserializeXML.y" { yyval = yyvsp[0]; yyval->next = yyvsp[-1]; ; break;} case 17: -#line 170 "OSUnserializeXML.y" +#line 192 "OSUnserializeXML.y" { yyval = yyvsp[-1]; - yyval->next = NULL; + yyval->key = yyval->object; yyval->object = yyvsp[0]->object; + yyval->next = NULL; yyvsp[0]->object = 0; - freeObject(yyvsp[0]); + freeObject(STATE, yyvsp[0]); ; break;} case 18: -#line 178 "OSUnserializeXML.y" -{ yyval = buildKey(yyvsp[0]); ; +#line 201 "OSUnserializeXML.y" +{ yyval = buildString(STATE, yyvsp[0]); ; break;} case 19: -#line 183 "OSUnserializeXML.y" +#line 206 "OSUnserializeXML.y" { yyval = yyvsp[-1]; yyval->elements = NULL; ; break;} case 20: -#line 186 "OSUnserializeXML.y" +#line 209 "OSUnserializeXML.y" { yyval = yyvsp[-2]; yyval->elements = yyvsp[-1]; ; break;} case 22: -#line 192 "OSUnserializeXML.y" +#line 215 "OSUnserializeXML.y" { yyval = yyvsp[-1]; yyval->elements = NULL; ; break;} case 23: -#line 195 "OSUnserializeXML.y" +#line 218 "OSUnserializeXML.y" { yyval = yyvsp[-2]; yyval->elements = yyvsp[-1]; ; break;} case 25: -#line 201 "OSUnserializeXML.y" +#line 224 "OSUnserializeXML.y" { yyval = yyvsp[0]; yyval->next = NULL; ; break;} case 26: -#line 204 "OSUnserializeXML.y" +#line 227 "OSUnserializeXML.y" { yyval = yyvsp[0]; yyval->next = yyvsp[-1]; ; @@ -1182,31 +1205,20 @@ yyerrhandle: } return 1; } -#line 226 "OSUnserializeXML.y" - - -static int lineNumber = 0; -static const char *parseBuffer; -static int parseBufferIndex; +#line 249 "OSUnserializeXML.y" -#define currentChar() (parseBuffer[parseBufferIndex]) -#define nextChar() (parseBuffer[++parseBufferIndex]) -#define prevChar() (parseBuffer[parseBufferIndex - 1]) - -#define isSpace(c) ((c) == ' ' || (c) == '\t') -#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) -#define isDigit(c) ((c) >= '0' && (c) <= '9') -#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') -#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) -#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) - -static char yyerror_message[128]; int -yyerror(char *s) /* Called by yyparse on error */ +OSUnserializeerror(parser_state_t * state, char *s) /* Called by yyparse on errors */ { - sprintf(yyerror_message, "OSUnserializeXML: %s near line %d\n", s, lineNumber); - return 0; + char tempString[128]; + + if (state->errorString) { + snprintf(tempString, 128, "OSUnserializeXML: %s near line %d\n", s, state->lineNumber); + *(state->errorString) = OSString::withCString(tempString); + } + + return 0; } #define TAG_MAX_LENGTH 32 @@ -1217,13 +1229,25 @@ yyerror(char *s) /* Called by yyparse on error */ #define TAG_EMPTY 3 #define TAG_COMMENT 4 +#define currentChar() (state->parseBuffer[state->parseBufferIndex]) +#define nextChar() (state->parseBuffer[++state->parseBufferIndex]) +#define prevChar() (state->parseBuffer[state->parseBufferIndex - 1]) + +#define isSpace(c) ((c) == ' ' || (c) == '\t') +#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) +#define isDigit(c) ((c) >= '0' && (c) <= '9') +#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') +#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) +#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) + static int -getTag(char tag[TAG_MAX_LENGTH], +getTag(parser_state_t *state, + char tag[TAG_MAX_LENGTH], int *attributeCount, char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH], char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH] ) { - int length = 0;; + int length = 0; int c = currentChar(); int tagType = TAG_START; @@ -1234,7 +1258,7 @@ getTag(char tag[TAG_MAX_LENGTH], if (c == '?' || c == '!') { while ((c = nextChar()) != 0) { - if (c == '\n') lineNumber++; + if (c == '\n') state->lineNumber++; if (c == '>') { (void)nextChar(); return TAG_COMMENT; @@ -1257,7 +1281,7 @@ getTag(char tag[TAG_MAX_LENGTH], tag[length] = 0; -//printf("tag %s, type %d\n", tag, tagType); +// printf("tag %s, type %d\n", tag, tagType); // look for attributes of the form attribute = "value" ... while ((c != '>') && (c != '/')) { @@ -1290,7 +1314,8 @@ getTag(char tag[TAG_MAX_LENGTH], c = nextChar(); // skip closing quote -//printf(" attribute '%s' = '%s', nextchar = '%c'\n", attributes[*attributeCount], values[*attributeCount], c); +// printf(" attribute '%s' = '%s', nextchar = '%c'\n", +// attributes[*attributeCount], values[*attributeCount], c); (*attributeCount)++; if (*attributeCount >= TAG_MAX_ATTRIBUTES) return TAG_BAD; @@ -1307,18 +1332,17 @@ getTag(char tag[TAG_MAX_LENGTH], } static char * -getString() +getString(parser_state_t *state) { int c = currentChar(); - - int start, length, i, j;; + int start, length, i, j; char * tempString; - start = parseBufferIndex; + start = state->parseBufferIndex; /* find end of string */ while (c != 0) { - if (c == '\n') lineNumber++; + if (c == '\n') state->lineNumber++; if (c == '<') { break; } @@ -1327,13 +1351,13 @@ getString() if (c != '<') return 0; - length = parseBufferIndex - start; + length = state->parseBufferIndex - start; /* copy to null terminated buffer */ tempString = (char *)malloc(length + 1); if (tempString == 0) { printf("OSUnserializeXML: can't alloc temp memory\n"); - return 0; + goto error; } // copy out string in tempString @@ -1341,29 +1365,29 @@ getString() i = j = 0; while (i < length) { - c = parseBuffer[start + i++]; + c = state->parseBuffer[start + i++]; if (c != '&') { tempString[j++] = c; } else { if ((i+3) > length) goto error; - c = parseBuffer[start + i++]; + c = state->parseBuffer[start + i++]; if (c == 'l') { - if (parseBuffer[start + i++] != 't') goto error; - if (parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 't') goto error; + if (state->parseBuffer[start + i++] != ';') goto error; tempString[j++] = '<'; continue; } if (c == 'g') { - if (parseBuffer[start + i++] != 't') goto error; - if (parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 't') goto error; + if (state->parseBuffer[start + i++] != ';') goto error; tempString[j++] = '>'; continue; } if ((i+3) > length) goto error; if (c == 'a') { - if (parseBuffer[start + i++] != 'm') goto error; - if (parseBuffer[start + i++] != 'p') goto error; - if (parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 'm') goto error; + if (state->parseBuffer[start + i++] != 'p') goto error; + if (state->parseBuffer[start + i++] != ';') goto error; tempString[j++] = '&'; continue; } @@ -1372,7 +1396,7 @@ getString() } tempString[j] = 0; -//printf("string %s\n", tempString); +// printf("string %s\n", tempString); return tempString; @@ -1382,7 +1406,7 @@ error: } static long long -getNumber() +getNumber(parser_state_t *state) { unsigned long long n = 0; int base = 10; @@ -1412,7 +1436,7 @@ getNumber() c = nextChar(); } } -//printf("number 0x%x\n", (unsigned long)n); +// printf("number 0x%x\n", (unsigned long)n); return n; } @@ -1437,14 +1461,14 @@ static const signed char __CFPLDataDecodeTable[128] = { /* 'x' */ 49, 50, 51, -1, -1, -1, -1, -1 }; -#define OSDATA_ALLOC_SIZE 4096 +#define DATA_ALLOC_SIZE 4096 static void * -getCFEncodedData(unsigned int *size) +getCFEncodedData(parser_state_t *state, unsigned int *size) { int numeq = 0, acc = 0, cntr = 0; int tmpbufpos = 0, tmpbuflen = 0; - unsigned char *tmpbuf = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + unsigned char *tmpbuf = (unsigned char *)malloc(DATA_ALLOC_SIZE); int c = currentChar(); *size = 0; @@ -1456,7 +1480,7 @@ getCFEncodedData(unsigned int *size) return 0; } if (c == '=') numeq++; else numeq = 0; - if (c == '\n') lineNumber++; + if (c == '\n') state->lineNumber++; if (__CFPLDataDecodeTable[c] < 0) { c = nextChar(); continue; @@ -1466,7 +1490,7 @@ getCFEncodedData(unsigned int *size) acc += __CFPLDataDecodeTable[c]; if (0 == (cntr & 0x3)) { if (tmpbuflen <= tmpbufpos + 2) { - tmpbuflen += OSDATA_ALLOC_SIZE; + tmpbuflen += DATA_ALLOC_SIZE; tmpbuf = (unsigned char *)realloc(tmpbuf, tmpbuflen); } tmpbuf[tmpbufpos++] = (acc >> 16) & 0xff; @@ -1478,23 +1502,27 @@ getCFEncodedData(unsigned int *size) c = nextChar(); } *size = tmpbufpos; + if (*size == 0) { + free(tmpbuf); + return 0; + } return tmpbuf; } static void * -getHexData(unsigned int *size) +getHexData(parser_state_t *state, unsigned int *size) { int c; unsigned char *d, *start, *lastStart; - start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + start = lastStart = d = (unsigned char *)malloc(DATA_ALLOC_SIZE); c = currentChar(); while (c != '<') { if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; if (c == '\n') { - lineNumber++; + state->lineNumber++; c = nextChar(); continue; } @@ -1519,9 +1547,9 @@ getHexData(unsigned int *size) } d++; - if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { + if ((d - lastStart) >= DATA_ALLOC_SIZE) { int oldsize = d - start; - start = (unsigned char *)realloc(start, oldsize + OSDATA_ALLOC_SIZE); + start = (unsigned char *)realloc(start, oldsize + DATA_ALLOC_SIZE); d = lastStart = start + oldsize; } c = nextChar(); @@ -1538,16 +1566,15 @@ getHexData(unsigned int *size) } static int -yylex() +yylex(YYSTYPE *lvalp, parser_state_t *state) { - int c; + int c, i; int tagType; char tag[TAG_MAX_LENGTH]; int attributeCount; char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; - - if (parseBufferIndex == 0) lineNumber = 1; + object_t *object; top: c = currentChar(); @@ -1557,21 +1584,22 @@ yylex() /* keep track of line number, don't return \n's */ if (c == '\n') { - lineNumber++; + STATE->lineNumber++; (void)nextChar(); goto top; } - - if (!c) return c; - tagType = getTag(tag, &attributeCount, attributes, values); + // end of the buffer? + if (!c) return 0; + + tagType = getTag(STATE, tag, &attributeCount, attributes, values); if (tagType == TAG_BAD) return SYNTAX_ERROR; if (tagType == TAG_COMMENT) goto top; // handle allocation and check for "ID" and "IDREF" tags up front - yylval = newObject(); - yylval->idref = -1; - for (int i=0; i < attributeCount; i++) { + *lvalp = object = newObject(STATE); + object->idref = -1; + for (i=0; i < attributeCount; i++) { if (attributes[i][0] == 'I' && attributes[i][1] == 'D') { // check for idref's, note: we ignore the tag, for // this to work correctly, all idrefs must be unique @@ -1579,12 +1607,12 @@ yylex() if (attributes[i][2] == 'R' && attributes[i][3] == 'E' && attributes[i][4] == 'F' && !attributes[i][5]) { if (tagType != TAG_EMPTY) return SYNTAX_ERROR; - yylval->idref = strtol(values[i], NULL, 0); + object->idref = strtol(values[i], NULL, 0); return IDREF; } // check for id's if (!attributes[i][2]) { - yylval->idref = strtol(values[i], NULL, 0); + object->idref = strtol(values[i], NULL, 0); } else { return SYNTAX_ERROR; } @@ -1595,7 +1623,7 @@ yylex() case 'a': if (!strcmp(tag, "array")) { if (tagType == TAG_EMPTY) { - yylval->elements = NULL; + object->elements = NULL; return ARRAY; } return (tagType == TAG_START) ? '(' : ')'; @@ -1604,33 +1632,34 @@ yylex() case 'd': if (!strcmp(tag, "dict")) { if (tagType == TAG_EMPTY) { - yylval->elements = NULL; + object->elements = NULL; return DICTIONARY; } return (tagType == TAG_START) ? '{' : '}'; } if (!strcmp(tag, "data")) { unsigned int size; - int readable = 0; if (tagType == TAG_EMPTY) { - yylval->data = NULL; - yylval->size = 0; + object->data = NULL; + object->size = 0; return DATA; } + + bool isHexFormat = false; for (int i=0; i < attributeCount; i++) { if (!strcmp(attributes[i], "format") && !strcmp(values[i], "hex")) { - readable++; + isHexFormat = true; break; } } // CF encoded is the default form - if (readable) { - yylval->data = getHexData(&size); + if (isHexFormat) { + object->data = getHexData(STATE, &size); } else { - yylval->data = getCFEncodedData(&size); + object->data = getCFEncodedData(STATE, &size); } - yylval->size = size; - if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "data")) { + object->size = size; + if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "data")) { return SYNTAX_ERROR; } return DATA; @@ -1639,25 +1668,25 @@ yylex() case 'f': if (!strcmp(tag, "false")) { if (tagType == TAG_EMPTY) { - yylval->number = 0; + object->number = 0; return BOOLEAN; } } break; case 'i': if (!strcmp(tag, "integer")) { - yylval->size = 64; // default - for (int i=0; i < attributeCount; i++) { + object->size = 64; // default + for (i=0; i < attributeCount; i++) { if (!strcmp(attributes[i], "size")) { - yylval->size = strtoul(values[i], NULL, 0); + object->size = strtoul(values[i], NULL, 0); } } if (tagType == TAG_EMPTY) { - yylval->number = 0; + object->number = 0; return NUMBER; } - yylval->number = getNumber(); - if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "integer")) { + object->number = getNumber(STATE); + if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "integer")) { return SYNTAX_ERROR; } return NUMBER; @@ -1666,11 +1695,11 @@ yylex() case 'k': if (!strcmp(tag, "key")) { if (tagType == TAG_EMPTY) return SYNTAX_ERROR; - yylval->string = getString(); - if (!yylval->string) { + object->string = getString(STATE); + if (!object->string) { return SYNTAX_ERROR; } - if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) + if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "key")) { return SYNTAX_ERROR; } @@ -1679,22 +1708,22 @@ yylex() break; case 'p': if (!strcmp(tag, "plist")) { - freeObject(yylval); + freeObject(STATE, object); goto top; } break; case 's': if (!strcmp(tag, "string")) { if (tagType == TAG_EMPTY) { - yylval->string = (char *)malloc(1); - *yylval->string = 0; + object->string = (char *)malloc(1); + object->string[0] = 0; return STRING; } - yylval->string = getString(); - if (!yylval->string) { + object->string = getString(STATE); + if (!object->string) { return SYNTAX_ERROR; } - if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) + if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "string")) { return SYNTAX_ERROR; } @@ -1702,7 +1731,7 @@ yylex() } if (!strcmp(tag, "set")) { if (tagType == TAG_EMPTY) { - yylval->elements = NULL; + object->elements = NULL; return SET;; } if (tagType == TAG_START) { @@ -1715,19 +1744,14 @@ yylex() case 't': if (!strcmp(tag, "true")) { if (tagType == TAG_EMPTY) { - yylval->number = 1; + object->number = 1; return BOOLEAN; } } break; - - default: - // XXX should we ignore invalid tags? - return SYNTAX_ERROR; - break; } - return 0; + return SYNTAX_ERROR; } // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# @@ -1736,94 +1760,96 @@ yylex() // "java" like allocation, if this code hits a syntax error in the // the middle of the parsed string we just bail with pointers hanging -// all over place, so this code helps keeps all together +// all over place, this code helps keeps it all together -static object_t *objects = 0; -static object_t *freeObjects = 0; +//static int object_count = 0; object_t * -newObject() +newObject(parser_state_t *state) { object_t *o; - if (freeObjects) { - o = freeObjects; - freeObjects = freeObjects->next; + if (state->freeObjects) { + o = state->freeObjects; + state->freeObjects = state->freeObjects->next; } else { o = (object_t *)malloc(sizeof(object_t)); +// object_count++; bzero(o, sizeof(object_t)); - o->free = objects; - objects = o; + o->free = state->objects; + state->objects = o; } return o; } void -freeObject(object_t *o) +freeObject(parser_state_t * state, object_t *o) { - o->next = freeObjects; - freeObjects = o; + o->next = state->freeObjects; + state->freeObjects = o; } void -cleanupObjects() +cleanupObjects(parser_state_t *state) { - object_t *t, *o = objects; + object_t *t, *o = state->objects; while (o) { if (o->object) { - printf("OSUnserializeXML: releasing object o=%x object=%x\n", (int)o, (int)o->object); +// printf("OSUnserializeXML: releasing object o=%x object=%x\n", (int)o, (int)o->object); o->object->release(); } if (o->data) { - printf("OSUnserializeXML: freeing object o=%x data=%x\n", (int)o, (int)o->data); +// printf("OSUnserializeXML: freeing object o=%x data=%x\n", (int)o, (int)o->data); free(o->data); } if (o->key) { - printf("OSUnserializeXML: releasing object o=%x key=%x\n", (int)o, (int)o->key); +// printf("OSUnserializeXML: releasing object o=%x key=%x\n", (int)o, (int)o->key); o->key->release(); } if (o->string) { - printf("OSUnserializeXML: freeing object o=%x string=%x\n", (int)o, (int)o->string); +// printf("OSUnserializeXML: freeing object o=%x string=%x\n", (int)o, (int)o->string); free(o->string); } t = o; o = o->free; free(t); +// object_count--; } +// printf("object_count = %d\n", object_count); } // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# -static OSDictionary *tags; - static void -rememberObject(int tag, OSObject *o) +rememberObject(parser_state_t *state, int tag, OSObject *o) { char key[16]; - sprintf(key, "%u", tag); + snprintf(key, 16, "%u", tag); -//printf("remember key %s\n", key); +// printf("remember key %s\n", key); - tags->setObject(key, o); + state->tags->setObject(key, o); } static object_t * -retrieveObject(int tag) +retrieveObject(parser_state_t *state, int tag) { + OSObject *ref; + object_t *o; char key[16]; - sprintf(key, "%u", tag); + snprintf(key, 16, "%u", tag); -//printf("retrieve key '%s'\n", key); +// printf("retrieve key '%s'\n", key); - OSObject *ref = tags->getObject(key); + ref = state->tags->getObject(key); if (!ref) return 0; - object_t *o = newObject(); + o = newObject(state); o->object = ref; return o; } @@ -1833,10 +1859,11 @@ retrieveObject(int tag) // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# object_t * -buildOSDictionary(object_t * header) +buildDictionary(parser_state_t *state, object_t * header) { object_t *o, *t; int count = 0; + OSDictionary *dict; // get count and reverse order o = header->elements; @@ -1850,31 +1877,33 @@ buildOSDictionary(object_t * header) header->elements = t; } - OSDictionary *d = OSDictionary::withCapacity(count); - - if (header->idref >= 0) rememberObject(header->idref, d); + dict = OSDictionary::withCapacity(count); + if (header->idref >= 0) rememberObject(state, header->idref, dict); o = header->elements; while (o) { - d->setObject(o->key, o->object); - o->object->release(); - o->object = 0; + dict->setObject(o->key, o->object); + o->key->release(); + o->object->release(); o->key = 0; + o->object = 0; + t = o; o = o->next; - freeObject(t); + freeObject(state, t); } o = header; - o->object = d; + o->object = dict; return o; }; object_t * -buildOSArray(object_t * header) +buildArray(parser_state_t *state, object_t * header) { object_t *o, *t; int count = 0; + OSArray *array; // get count and reverse order o = header->elements; @@ -1888,140 +1917,120 @@ buildOSArray(object_t * header) header->elements = t; } - OSArray *a = OSArray::withCapacity(count); - - if (header->idref >= 0) rememberObject(header->idref, a); + array = OSArray::withCapacity(count); + if (header->idref >= 0) rememberObject(state, header->idref, array); o = header->elements; while (o) { - a->setObject(o->object); + array->setObject(o->object); + o->object->release(); o->object = 0; + t = o; o = o->next; - freeObject(t); + freeObject(state, t); } o = header; - o->object = a; + o->object = array; return o; }; object_t * -buildOSSet(object_t *o) +buildSet(parser_state_t *state, object_t *header) { - o = buildOSArray(o); - OSArray *a = (OSArray *)o->object; + object_t *o = buildArray(state, header); - OSSet *s = OSSet::withArray(a, a->getCapacity()); + OSArray *array = (OSArray *)o->object; + OSSet *set = OSSet::withArray(array, array->getCapacity()); - //write over reference created in array - if (o->idref >= 0) rememberObject(o->idref, s); + // write over the reference created in buildArray + if (header->idref >= 0) rememberObject(state, header->idref, set); - a->release(); - o->object = s; + array->release(); + o->object = set; return o; }; object_t * -buildOSString(object_t *o) +buildString(parser_state_t *state, object_t *o) { - OSString *s = OSString::withCString(o->string); - - if (o->idref >= 0) rememberObject(o->idref, s); - - free(o->string); - o->string = 0; - o->object = s; - - return o; -}; + OSString *string; -object_t * -buildKey(object_t *o) -{ - const OSSymbol *s = OSSymbol::withCString(o->string); + string = OSString::withCString(o->string); + if (o->idref >= 0) rememberObject(state, o->idref, string); free(o->string); o->string = 0; - o->key = s; + o->object = string; return o; }; object_t * -buildOSData(object_t *o) +buildData(parser_state_t *state, object_t *o) { - OSData *d; + OSData *data; if (o->size) { - d = OSData::withBytes(o->data, o->size); - free(o->data); + data = OSData::withBytes(o->data, o->size); } else { - d = OSData::withCapacity(0); + data = OSData::withCapacity(0); } - if (o->idref >= 0) rememberObject(o->idref, d); + if (o->idref >= 0) rememberObject(state, o->idref, data); + if (o->size) free(o->data); o->data = 0; - o->object = d; + o->object = data; return o; }; object_t * -buildOSNumber(object_t *o) +buildNumber(parser_state_t *state, object_t *o) { - OSNumber *n = OSNumber::withNumber(o->number, o->size); + OSNumber *number = OSNumber::withNumber(o->number, o->size); - if (o->idref >= 0) rememberObject(o->idref, n); + if (o->idref >= 0) rememberObject(state, o->idref, number); - o->object = n; + o->object = number; return o; }; object_t * -buildOSBoolean(object_t *o) +buildBoolean(parser_state_t *state, object_t *o) { - OSBoolean *b = OSBoolean::withBoolean(o->number != 0); - o->object = b; + o->object = ((o->number == 0) ? kOSBooleanFalse : kOSBooleanTrue); + o->object->retain(); return o; }; -__BEGIN_DECLS -#include -__END_DECLS - -static mutex_t *lock = 0; - OSObject* OSUnserializeXML(const char *buffer, OSString **errorString) { OSObject *object; + parser_state_t *state = (parser_state_t *)malloc(sizeof(parser_state_t)); - if (!lock) { - lock = mutex_alloc(ETAP_IO_AHA); - mutex_lock(lock); - } else { - mutex_lock(lock); + if ((!state) || (!buffer)) return 0; - } + // just in case + if (errorString) *errorString = NULL; - objects = 0; - freeObjects = 0; - yyerror_message[0] = 0; //just in case - parseBuffer = buffer; - parseBufferIndex = 0; - tags = OSDictionary::withCapacity(128); - if (yyparse() == 0) { - object = parsedObject; - if (errorString) *errorString = 0; - } else { - object = 0; - if (errorString) - *errorString = OSString::withCString(yyerror_message); - } + state->parseBuffer = buffer; + state->parseBufferIndex = 0; + state->lineNumber = 1; + state->objects = 0; + state->freeObjects = 0; + state->tags = OSDictionary::withCapacity(128); + state->errorString = errorString; + state->parsedObject = 0; + + (void)yyparse((void *)state); + + object = state->parsedObject; - cleanupObjects(); - tags->release(); - mutex_unlock(lock); + cleanupObjects(state); + state->tags->release(); + free(state); return object; } diff --git a/libkern/c++/OSUnserializeXML.y b/libkern/c++/OSUnserializeXML.y index 918c68304..bb36c159c 100644 --- a/libkern/c++/OSUnserializeXML.y +++ b/libkern/c++/OSUnserializeXML.y @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -20,14 +20,18 @@ * @APPLE_LICENSE_HEADER_END@ */ -/* OSUnserializeXML.y created by rsulack on Tue Oct 12 1999 */ +/* + * HISTORY + * + * OSUnserializeXML.y created by rsulack on Tue Oct 12 1999 + */ -// XML parser for unserializing OSContainer objects +// parser for unserializing OSContainer objects serialized to XML // // to build : // bison -p OSUnserializeXML OSUnserializeXML.y // head -50 OSUnserializeXML.y > OSUnserializeXML.cpp -// sed -e "s/stdio.h/stddef.h/" < OSUnserializeXML.tab.c >> OSUnserializeXML.cpp +// sed -e "s/#include //" < OSUnserializeXML.tab.c >> OSUnserializeXML.cpp // // when changing code check in both OSUnserializeXML.y and OSUnserializeXML.cpp // @@ -35,8 +39,6 @@ // // // -// -// // DO NOT EDIT OSUnserializeXML.cpp! // // this means you! @@ -46,60 +48,79 @@ // // // -// +%pure_parser + %{ #include #include #include #include +#define YYSTYPE object_t * +#define YYPARSE_PARAM state +#define YYLEX_PARAM state + +// this is the internal struct used to hold objects on parser stack +// it represents objects both before and after they have been created typedef struct object { struct object *next; struct object *free; struct object *elements; OSObject *object; - const OSSymbol *key; // for dictionary + OSString *key; // for dictionary int size; - void *data; // for data - char *string; // for string & symbol - long long number; // for number + void *data; // for data + char *string; // for string & symbol + long long number; // for number int idref; } object_t; -static int yyparse(); -static int yyerror(char *s); -static int yylex(); - -static object_t * newObject(); -static void freeObject(object_t *o); - -static object_t *buildOSDictionary(object_t *); -static object_t *buildOSArray(object_t *); -static object_t *buildOSSet(object_t *); -static object_t *buildOSString(object_t *); -static object_t *buildKey(object_t *); -static object_t *buildOSData(object_t *); -static object_t *buildOSNumber(object_t *); -static object_t *buildOSBoolean(object_t *o); - -static void rememberObject(int, OSObject *); -static object_t *retrieveObject(int); - -// resultant object of parsed text -static OSObject *parsedObject; - -#define YYSTYPE object_t * +// this code is reentrant, this structure contains all +// state information for the parsing of a single buffer +typedef struct parser_state { + const char *parseBuffer; // start of text to be parsed + int parseBufferIndex; // current index into text + int lineNumber; // current line number + object_t *objects; // internal objects in use + object_t *freeObjects; // internal objects that are free + OSDictionary *tags; // used to remember "ID" tags + OSString **errorString; // parse error with line + OSObject *parsedObject; // resultant object of parsed text +} parser_state_t; + +#define STATE ((parser_state_t *)state) + +#undef yyerror +#define yyerror(s) OSUnserializeerror(STATE, (s)) +static int OSUnserializeerror(parser_state_t *state, char *s); + +static int yylex(YYSTYPE *lvalp, parser_state_t *state); +static int yyparse(void * state); + +static object_t *newObject(parser_state_t *state); +static void freeObject(parser_state_t *state, object_t *o); +static void rememberObject(parser_state_t *state, int tag, OSObject *o); +static object_t *retrieveObject(parser_state_t *state, int tag); +static void cleanupObjects(parser_state_t *state); + +static object_t *buildDictionary(parser_state_t *state, object_t *o); +static object_t *buildArray(parser_state_t *state, object_t *o); +static object_t *buildSet(parser_state_t *state, object_t *o); +static object_t *buildString(parser_state_t *state, object_t *o); +static object_t *buildData(parser_state_t *state, object_t *o); +static object_t *buildNumber(parser_state_t *state, object_t *o); +static object_t *buildBoolean(parser_state_t *state, object_t *o); extern "C" { -extern void *kern_os_malloc(size_t size); -extern void *kern_os_realloc(void * addr, size_t size); -extern void kern_os_free(void * addr); +extern void *kern_os_malloc(size_t size); +extern void *kern_os_realloc(void * addr, size_t size); +extern void kern_os_free(void * addr); //XXX shouldn't have to define these -extern long strtol(const char *, char **, int); -extern unsigned long strtoul(const char *, char **, int); +extern long strtol(const char *, char **, int); +extern unsigned long strtoul(const char *, char **, int); } /* extern "C" */ @@ -120,33 +141,34 @@ extern unsigned long strtoul(const char *, char **, int); %token SYNTAX_ERROR %% /* Grammar rules and actions follow */ -input: /* empty */ { parsedObject = (OSObject *)NULL; YYACCEPT; } - | object { parsedObject = $1->object; +input: /* empty */ { yyerror("unexpected end of buffer"); + YYERROR; + } + | object { STATE->parsedObject = $1->object; $1->object = 0; - freeObject($1); + freeObject(STATE, $1); YYACCEPT; } - | SYNTAX_ERROR { - yyerror("syntax error"); + | SYNTAX_ERROR { yyerror("syntax error"); YYERROR; } ; -object: dict { $$ = buildOSDictionary($1); } - | array { $$ = buildOSArray($1); } - | set { $$ = buildOSSet($1); } - | string { $$ = buildOSString($1); } - | data { $$ = buildOSData($1); } - | number { $$ = buildOSNumber($1); } - | boolean { $$ = buildOSBoolean($1); } - | idref { $$ = retrieveObject($1->idref); +object: dict { $$ = buildDictionary(STATE, $1); } + | array { $$ = buildArray(STATE, $1); } + | set { $$ = buildSet(STATE, $1); } + | string { $$ = buildString(STATE, $1); } + | data { $$ = buildData(STATE, $1); } + | number { $$ = buildNumber(STATE, $1); } + | boolean { $$ = buildBoolean(STATE, $1); } + | idref { $$ = retrieveObject(STATE, $1->idref); if ($$) { $$->object->retain(); } else { yyerror("forward reference detected"); YYERROR; } - freeObject($1); + freeObject(STATE, $1); } ; @@ -168,14 +190,15 @@ pairs: pair ; pair: key object { $$ = $1; - $$->next = NULL; + $$->key = $$->object; $$->object = $2->object; + $$->next = NULL; $2->object = 0; - freeObject($2); + freeObject(STATE, $2); } ; -key: KEY { $$ = buildKey($1); } +key: KEY { $$ = buildString(STATE, $1); } ; //------------------------------------------------------------------------------ @@ -224,29 +247,18 @@ string: STRING ; %% - -static int lineNumber = 0; -static const char *parseBuffer; -static int parseBufferIndex; - -#define currentChar() (parseBuffer[parseBufferIndex]) -#define nextChar() (parseBuffer[++parseBufferIndex]) -#define prevChar() (parseBuffer[parseBufferIndex - 1]) - -#define isSpace(c) ((c) == ' ' || (c) == '\t') -#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) -#define isDigit(c) ((c) >= '0' && (c) <= '9') -#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') -#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) -#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) - -static char yyerror_message[128]; int -yyerror(char *s) /* Called by yyparse on error */ +OSUnserializeerror(parser_state_t * state, char *s) /* Called by yyparse on errors */ { - sprintf(yyerror_message, "OSUnserializeXML: %s near line %d\n", s, lineNumber); - return 0; + char tempString[128]; + + if (state->errorString) { + snprintf(tempString, 128, "OSUnserializeXML: %s near line %d\n", s, state->lineNumber); + *(state->errorString) = OSString::withCString(tempString); + } + + return 0; } #define TAG_MAX_LENGTH 32 @@ -257,13 +269,25 @@ yyerror(char *s) /* Called by yyparse on error */ #define TAG_EMPTY 3 #define TAG_COMMENT 4 +#define currentChar() (state->parseBuffer[state->parseBufferIndex]) +#define nextChar() (state->parseBuffer[++state->parseBufferIndex]) +#define prevChar() (state->parseBuffer[state->parseBufferIndex - 1]) + +#define isSpace(c) ((c) == ' ' || (c) == '\t') +#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) +#define isDigit(c) ((c) >= '0' && (c) <= '9') +#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') +#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) +#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) + static int -getTag(char tag[TAG_MAX_LENGTH], +getTag(parser_state_t *state, + char tag[TAG_MAX_LENGTH], int *attributeCount, char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH], char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH] ) { - int length = 0;; + int length = 0; int c = currentChar(); int tagType = TAG_START; @@ -274,7 +298,7 @@ getTag(char tag[TAG_MAX_LENGTH], if (c == '?' || c == '!') { while ((c = nextChar()) != 0) { - if (c == '\n') lineNumber++; + if (c == '\n') state->lineNumber++; if (c == '>') { (void)nextChar(); return TAG_COMMENT; @@ -297,7 +321,7 @@ getTag(char tag[TAG_MAX_LENGTH], tag[length] = 0; -//printf("tag %s, type %d\n", tag, tagType); +// printf("tag %s, type %d\n", tag, tagType); // look for attributes of the form attribute = "value" ... while ((c != '>') && (c != '/')) { @@ -330,7 +354,8 @@ getTag(char tag[TAG_MAX_LENGTH], c = nextChar(); // skip closing quote -//printf(" attribute '%s' = '%s', nextchar = '%c'\n", attributes[*attributeCount], values[*attributeCount], c); +// printf(" attribute '%s' = '%s', nextchar = '%c'\n", +// attributes[*attributeCount], values[*attributeCount], c); (*attributeCount)++; if (*attributeCount >= TAG_MAX_ATTRIBUTES) return TAG_BAD; @@ -347,18 +372,17 @@ getTag(char tag[TAG_MAX_LENGTH], } static char * -getString() +getString(parser_state_t *state) { int c = currentChar(); - - int start, length, i, j;; + int start, length, i, j; char * tempString; - start = parseBufferIndex; + start = state->parseBufferIndex; /* find end of string */ while (c != 0) { - if (c == '\n') lineNumber++; + if (c == '\n') state->lineNumber++; if (c == '<') { break; } @@ -367,13 +391,13 @@ getString() if (c != '<') return 0; - length = parseBufferIndex - start; + length = state->parseBufferIndex - start; /* copy to null terminated buffer */ tempString = (char *)malloc(length + 1); if (tempString == 0) { printf("OSUnserializeXML: can't alloc temp memory\n"); - return 0; + goto error; } // copy out string in tempString @@ -381,29 +405,29 @@ getString() i = j = 0; while (i < length) { - c = parseBuffer[start + i++]; + c = state->parseBuffer[start + i++]; if (c != '&') { tempString[j++] = c; } else { if ((i+3) > length) goto error; - c = parseBuffer[start + i++]; + c = state->parseBuffer[start + i++]; if (c == 'l') { - if (parseBuffer[start + i++] != 't') goto error; - if (parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 't') goto error; + if (state->parseBuffer[start + i++] != ';') goto error; tempString[j++] = '<'; continue; } if (c == 'g') { - if (parseBuffer[start + i++] != 't') goto error; - if (parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 't') goto error; + if (state->parseBuffer[start + i++] != ';') goto error; tempString[j++] = '>'; continue; } if ((i+3) > length) goto error; if (c == 'a') { - if (parseBuffer[start + i++] != 'm') goto error; - if (parseBuffer[start + i++] != 'p') goto error; - if (parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 'm') goto error; + if (state->parseBuffer[start + i++] != 'p') goto error; + if (state->parseBuffer[start + i++] != ';') goto error; tempString[j++] = '&'; continue; } @@ -412,7 +436,7 @@ getString() } tempString[j] = 0; -//printf("string %s\n", tempString); +// printf("string %s\n", tempString); return tempString; @@ -422,7 +446,7 @@ error: } static long long -getNumber() +getNumber(parser_state_t *state) { unsigned long long n = 0; int base = 10; @@ -452,7 +476,7 @@ getNumber() c = nextChar(); } } -//printf("number 0x%x\n", (unsigned long)n); +// printf("number 0x%x\n", (unsigned long)n); return n; } @@ -477,14 +501,14 @@ static const signed char __CFPLDataDecodeTable[128] = { /* 'x' */ 49, 50, 51, -1, -1, -1, -1, -1 }; -#define OSDATA_ALLOC_SIZE 4096 +#define DATA_ALLOC_SIZE 4096 static void * -getCFEncodedData(unsigned int *size) +getCFEncodedData(parser_state_t *state, unsigned int *size) { int numeq = 0, acc = 0, cntr = 0; int tmpbufpos = 0, tmpbuflen = 0; - unsigned char *tmpbuf = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + unsigned char *tmpbuf = (unsigned char *)malloc(DATA_ALLOC_SIZE); int c = currentChar(); *size = 0; @@ -496,7 +520,7 @@ getCFEncodedData(unsigned int *size) return 0; } if (c == '=') numeq++; else numeq = 0; - if (c == '\n') lineNumber++; + if (c == '\n') state->lineNumber++; if (__CFPLDataDecodeTable[c] < 0) { c = nextChar(); continue; @@ -506,7 +530,7 @@ getCFEncodedData(unsigned int *size) acc += __CFPLDataDecodeTable[c]; if (0 == (cntr & 0x3)) { if (tmpbuflen <= tmpbufpos + 2) { - tmpbuflen += OSDATA_ALLOC_SIZE; + tmpbuflen += DATA_ALLOC_SIZE; tmpbuf = (unsigned char *)realloc(tmpbuf, tmpbuflen); } tmpbuf[tmpbufpos++] = (acc >> 16) & 0xff; @@ -518,23 +542,27 @@ getCFEncodedData(unsigned int *size) c = nextChar(); } *size = tmpbufpos; + if (*size == 0) { + free(tmpbuf); + return 0; + } return tmpbuf; } static void * -getHexData(unsigned int *size) +getHexData(parser_state_t *state, unsigned int *size) { int c; unsigned char *d, *start, *lastStart; - start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + start = lastStart = d = (unsigned char *)malloc(DATA_ALLOC_SIZE); c = currentChar(); while (c != '<') { if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; if (c == '\n') { - lineNumber++; + state->lineNumber++; c = nextChar(); continue; } @@ -559,9 +587,9 @@ getHexData(unsigned int *size) } d++; - if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { + if ((d - lastStart) >= DATA_ALLOC_SIZE) { int oldsize = d - start; - start = (unsigned char *)realloc(start, oldsize + OSDATA_ALLOC_SIZE); + start = (unsigned char *)realloc(start, oldsize + DATA_ALLOC_SIZE); d = lastStart = start + oldsize; } c = nextChar(); @@ -578,16 +606,15 @@ getHexData(unsigned int *size) } static int -yylex() +yylex(YYSTYPE *lvalp, parser_state_t *state) { - int c; + int c, i; int tagType; char tag[TAG_MAX_LENGTH]; int attributeCount; char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; - - if (parseBufferIndex == 0) lineNumber = 1; + object_t *object; top: c = currentChar(); @@ -597,21 +624,22 @@ yylex() /* keep track of line number, don't return \n's */ if (c == '\n') { - lineNumber++; + STATE->lineNumber++; (void)nextChar(); goto top; } - - if (!c) return c; - tagType = getTag(tag, &attributeCount, attributes, values); + // end of the buffer? + if (!c) return 0; + + tagType = getTag(STATE, tag, &attributeCount, attributes, values); if (tagType == TAG_BAD) return SYNTAX_ERROR; if (tagType == TAG_COMMENT) goto top; // handle allocation and check for "ID" and "IDREF" tags up front - yylval = newObject(); - yylval->idref = -1; - for (int i=0; i < attributeCount; i++) { + *lvalp = object = newObject(STATE); + object->idref = -1; + for (i=0; i < attributeCount; i++) { if (attributes[i][0] == 'I' && attributes[i][1] == 'D') { // check for idref's, note: we ignore the tag, for // this to work correctly, all idrefs must be unique @@ -619,12 +647,12 @@ yylex() if (attributes[i][2] == 'R' && attributes[i][3] == 'E' && attributes[i][4] == 'F' && !attributes[i][5]) { if (tagType != TAG_EMPTY) return SYNTAX_ERROR; - yylval->idref = strtol(values[i], NULL, 0); + object->idref = strtol(values[i], NULL, 0); return IDREF; } // check for id's if (!attributes[i][2]) { - yylval->idref = strtol(values[i], NULL, 0); + object->idref = strtol(values[i], NULL, 0); } else { return SYNTAX_ERROR; } @@ -635,7 +663,7 @@ yylex() case 'a': if (!strcmp(tag, "array")) { if (tagType == TAG_EMPTY) { - yylval->elements = NULL; + object->elements = NULL; return ARRAY; } return (tagType == TAG_START) ? '(' : ')'; @@ -644,33 +672,34 @@ yylex() case 'd': if (!strcmp(tag, "dict")) { if (tagType == TAG_EMPTY) { - yylval->elements = NULL; + object->elements = NULL; return DICTIONARY; } return (tagType == TAG_START) ? '{' : '}'; } if (!strcmp(tag, "data")) { unsigned int size; - int readable = 0; if (tagType == TAG_EMPTY) { - yylval->data = NULL; - yylval->size = 0; + object->data = NULL; + object->size = 0; return DATA; } + + bool isHexFormat = false; for (int i=0; i < attributeCount; i++) { if (!strcmp(attributes[i], "format") && !strcmp(values[i], "hex")) { - readable++; + isHexFormat = true; break; } } // CF encoded is the default form - if (readable) { - yylval->data = getHexData(&size); + if (isHexFormat) { + object->data = getHexData(STATE, &size); } else { - yylval->data = getCFEncodedData(&size); + object->data = getCFEncodedData(STATE, &size); } - yylval->size = size; - if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "data")) { + object->size = size; + if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "data")) { return SYNTAX_ERROR; } return DATA; @@ -679,25 +708,25 @@ yylex() case 'f': if (!strcmp(tag, "false")) { if (tagType == TAG_EMPTY) { - yylval->number = 0; + object->number = 0; return BOOLEAN; } } break; case 'i': if (!strcmp(tag, "integer")) { - yylval->size = 64; // default - for (int i=0; i < attributeCount; i++) { + object->size = 64; // default + for (i=0; i < attributeCount; i++) { if (!strcmp(attributes[i], "size")) { - yylval->size = strtoul(values[i], NULL, 0); + object->size = strtoul(values[i], NULL, 0); } } if (tagType == TAG_EMPTY) { - yylval->number = 0; + object->number = 0; return NUMBER; } - yylval->number = getNumber(); - if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "integer")) { + object->number = getNumber(STATE); + if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "integer")) { return SYNTAX_ERROR; } return NUMBER; @@ -706,11 +735,11 @@ yylex() case 'k': if (!strcmp(tag, "key")) { if (tagType == TAG_EMPTY) return SYNTAX_ERROR; - yylval->string = getString(); - if (!yylval->string) { + object->string = getString(STATE); + if (!object->string) { return SYNTAX_ERROR; } - if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) + if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "key")) { return SYNTAX_ERROR; } @@ -719,22 +748,22 @@ yylex() break; case 'p': if (!strcmp(tag, "plist")) { - freeObject(yylval); + freeObject(STATE, object); goto top; } break; case 's': if (!strcmp(tag, "string")) { if (tagType == TAG_EMPTY) { - yylval->string = (char *)malloc(1); - *yylval->string = 0; + object->string = (char *)malloc(1); + object->string[0] = 0; return STRING; } - yylval->string = getString(); - if (!yylval->string) { + object->string = getString(STATE); + if (!object->string) { return SYNTAX_ERROR; } - if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) + if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "string")) { return SYNTAX_ERROR; } @@ -742,7 +771,7 @@ yylex() } if (!strcmp(tag, "set")) { if (tagType == TAG_EMPTY) { - yylval->elements = NULL; + object->elements = NULL; return SET;; } if (tagType == TAG_START) { @@ -755,19 +784,14 @@ yylex() case 't': if (!strcmp(tag, "true")) { if (tagType == TAG_EMPTY) { - yylval->number = 1; + object->number = 1; return BOOLEAN; } } break; - - default: - // XXX should we ignore invalid tags? - return SYNTAX_ERROR; - break; } - return 0; + return SYNTAX_ERROR; } // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# @@ -776,94 +800,96 @@ yylex() // "java" like allocation, if this code hits a syntax error in the // the middle of the parsed string we just bail with pointers hanging -// all over place, so this code helps keeps all together +// all over place, this code helps keeps it all together -static object_t *objects = 0; -static object_t *freeObjects = 0; +//static int object_count = 0; object_t * -newObject() +newObject(parser_state_t *state) { object_t *o; - if (freeObjects) { - o = freeObjects; - freeObjects = freeObjects->next; + if (state->freeObjects) { + o = state->freeObjects; + state->freeObjects = state->freeObjects->next; } else { o = (object_t *)malloc(sizeof(object_t)); +// object_count++; bzero(o, sizeof(object_t)); - o->free = objects; - objects = o; + o->free = state->objects; + state->objects = o; } return o; } void -freeObject(object_t *o) +freeObject(parser_state_t * state, object_t *o) { - o->next = freeObjects; - freeObjects = o; + o->next = state->freeObjects; + state->freeObjects = o; } void -cleanupObjects() +cleanupObjects(parser_state_t *state) { - object_t *t, *o = objects; + object_t *t, *o = state->objects; while (o) { if (o->object) { - printf("OSUnserializeXML: releasing object o=%x object=%x\n", (int)o, (int)o->object); +// printf("OSUnserializeXML: releasing object o=%x object=%x\n", (int)o, (int)o->object); o->object->release(); } if (o->data) { - printf("OSUnserializeXML: freeing object o=%x data=%x\n", (int)o, (int)o->data); +// printf("OSUnserializeXML: freeing object o=%x data=%x\n", (int)o, (int)o->data); free(o->data); } if (o->key) { - printf("OSUnserializeXML: releasing object o=%x key=%x\n", (int)o, (int)o->key); +// printf("OSUnserializeXML: releasing object o=%x key=%x\n", (int)o, (int)o->key); o->key->release(); } if (o->string) { - printf("OSUnserializeXML: freeing object o=%x string=%x\n", (int)o, (int)o->string); +// printf("OSUnserializeXML: freeing object o=%x string=%x\n", (int)o, (int)o->string); free(o->string); } t = o; o = o->free; free(t); +// object_count--; } +// printf("object_count = %d\n", object_count); } // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# -static OSDictionary *tags; - static void -rememberObject(int tag, OSObject *o) +rememberObject(parser_state_t *state, int tag, OSObject *o) { char key[16]; - sprintf(key, "%u", tag); + snprintf(key, 16, "%u", tag); -//printf("remember key %s\n", key); +// printf("remember key %s\n", key); - tags->setObject(key, o); + state->tags->setObject(key, o); } static object_t * -retrieveObject(int tag) +retrieveObject(parser_state_t *state, int tag) { + OSObject *ref; + object_t *o; char key[16]; - sprintf(key, "%u", tag); + snprintf(key, 16, "%u", tag); -//printf("retrieve key '%s'\n", key); +// printf("retrieve key '%s'\n", key); - OSObject *ref = tags->getObject(key); + ref = state->tags->getObject(key); if (!ref) return 0; - object_t *o = newObject(); + o = newObject(state); o->object = ref; return o; } @@ -873,10 +899,11 @@ retrieveObject(int tag) // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# object_t * -buildOSDictionary(object_t * header) +buildDictionary(parser_state_t *state, object_t * header) { object_t *o, *t; int count = 0; + OSDictionary *dict; // get count and reverse order o = header->elements; @@ -890,31 +917,33 @@ buildOSDictionary(object_t * header) header->elements = t; } - OSDictionary *d = OSDictionary::withCapacity(count); - - if (header->idref >= 0) rememberObject(header->idref, d); + dict = OSDictionary::withCapacity(count); + if (header->idref >= 0) rememberObject(state, header->idref, dict); o = header->elements; while (o) { - d->setObject(o->key, o->object); - o->object->release(); - o->object = 0; + dict->setObject(o->key, o->object); + o->key->release(); + o->object->release(); o->key = 0; + o->object = 0; + t = o; o = o->next; - freeObject(t); + freeObject(state, t); } o = header; - o->object = d; + o->object = dict; return o; }; object_t * -buildOSArray(object_t * header) +buildArray(parser_state_t *state, object_t * header) { object_t *o, *t; int count = 0; + OSArray *array; // get count and reverse order o = header->elements; @@ -928,140 +957,120 @@ buildOSArray(object_t * header) header->elements = t; } - OSArray *a = OSArray::withCapacity(count); - - if (header->idref >= 0) rememberObject(header->idref, a); + array = OSArray::withCapacity(count); + if (header->idref >= 0) rememberObject(state, header->idref, array); o = header->elements; while (o) { - a->setObject(o->object); + array->setObject(o->object); + o->object->release(); o->object = 0; + t = o; o = o->next; - freeObject(t); + freeObject(state, t); } o = header; - o->object = a; + o->object = array; return o; }; object_t * -buildOSSet(object_t *o) +buildSet(parser_state_t *state, object_t *header) { - o = buildOSArray(o); - OSArray *a = (OSArray *)o->object; + object_t *o = buildArray(state, header); - OSSet *s = OSSet::withArray(a, a->getCapacity()); + OSArray *array = (OSArray *)o->object; + OSSet *set = OSSet::withArray(array, array->getCapacity()); - //write over reference created in array - if (o->idref >= 0) rememberObject(o->idref, s); + // write over the reference created in buildArray + if (header->idref >= 0) rememberObject(state, header->idref, set); - a->release(); - o->object = s; + array->release(); + o->object = set; return o; }; object_t * -buildOSString(object_t *o) +buildString(parser_state_t *state, object_t *o) { - OSString *s = OSString::withCString(o->string); - - if (o->idref >= 0) rememberObject(o->idref, s); - - free(o->string); - o->string = 0; - o->object = s; - - return o; -}; + OSString *string; -object_t * -buildKey(object_t *o) -{ - const OSSymbol *s = OSSymbol::withCString(o->string); + string = OSString::withCString(o->string); + if (o->idref >= 0) rememberObject(state, o->idref, string); free(o->string); o->string = 0; - o->key = s; + o->object = string; return o; }; object_t * -buildOSData(object_t *o) +buildData(parser_state_t *state, object_t *o) { - OSData *d; + OSData *data; if (o->size) { - d = OSData::withBytes(o->data, o->size); - free(o->data); + data = OSData::withBytes(o->data, o->size); } else { - d = OSData::withCapacity(0); + data = OSData::withCapacity(0); } - if (o->idref >= 0) rememberObject(o->idref, d); + if (o->idref >= 0) rememberObject(state, o->idref, data); + if (o->size) free(o->data); o->data = 0; - o->object = d; + o->object = data; return o; }; object_t * -buildOSNumber(object_t *o) +buildNumber(parser_state_t *state, object_t *o) { - OSNumber *n = OSNumber::withNumber(o->number, o->size); + OSNumber *number = OSNumber::withNumber(o->number, o->size); - if (o->idref >= 0) rememberObject(o->idref, n); + if (o->idref >= 0) rememberObject(state, o->idref, number); - o->object = n; + o->object = number; return o; }; object_t * -buildOSBoolean(object_t *o) +buildBoolean(parser_state_t *state, object_t *o) { - OSBoolean *b = OSBoolean::withBoolean(o->number != 0); - o->object = b; + o->object = ((o->number == 0) ? kOSBooleanFalse : kOSBooleanTrue); + o->object->retain(); return o; }; -__BEGIN_DECLS -#include -__END_DECLS - -static mutex_t *lock = 0; - OSObject* OSUnserializeXML(const char *buffer, OSString **errorString) { OSObject *object; + parser_state_t *state = (parser_state_t *)malloc(sizeof(parser_state_t)); - if (!lock) { - lock = mutex_alloc(ETAP_IO_AHA); - _mutex_lock(lock); - } else { - _mutex_lock(lock); + if ((!state) || (!buffer)) return 0; - } + // just in case + if (errorString) *errorString = NULL; - objects = 0; - freeObjects = 0; - yyerror_message[0] = 0; //just in case - parseBuffer = buffer; - parseBufferIndex = 0; - tags = OSDictionary::withCapacity(128); - if (yyparse() == 0) { - object = parsedObject; - if (errorString) *errorString = 0; - } else { - object = 0; - if (errorString) - *errorString = OSString::withCString(yyerror_message); - } + state->parseBuffer = buffer; + state->parseBufferIndex = 0; + state->lineNumber = 1; + state->objects = 0; + state->freeObjects = 0; + state->tags = OSDictionary::withCapacity(128); + state->errorString = errorString; + state->parsedObject = 0; + + (void)yyparse((void *)state); + + object = state->parsedObject; - cleanupObjects(); - tags->release(); - mutex_unlock(lock); + cleanupObjects(state); + state->tags->release(); + free(state); return object; } diff --git a/libkern/c++/Tests/TestSerialization/CustomInfo.xml b/libkern/c++/Tests/TestSerialization/CustomInfo.xml deleted file mode 100644 index 2398ff4ce..000000000 --- a/libkern/c++/Tests/TestSerialization/CustomInfo.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - Name - TestSerialization - Vendor - Your-Company - Version - 0.1 - Date - October 13, 1999 - - - diff --git a/libkern/c++/Tests/TestSerialization/Makefile b/libkern/c++/Tests/TestSerialization/Makefile deleted file mode 100644 index 1998d67f6..000000000 --- a/libkern/c++/Tests/TestSerialization/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# -# Generated by the Apple Project Builder. -# -# NOTE: Do NOT change this file -- Project Builder maintains it. -# -# Put all of your customizations in files called Makefile.preamble -# and Makefile.postamble (both optional), and Makefile will include them. -# - -NAME = TestSerialization - -PROJECTVERSION = 2.8 -PROJECT_TYPE = Kernel Extension - -TOOLS = test1.kmodproj test2.kmodproj - -OTHERSRCS = Makefile.preamble Makefile Makefile.postamble\ - CustomInfo.xml - -MAKEFILEDIR = $(MAKEFILEPATH)/pb_makefiles -CODE_GEN_STYLE = DYNAMIC -MAKEFILE = kext.make -NEXTSTEP_INSTALLDIR = /System/Library/Extensions -LIBS = -DEBUG_LIBS = $(LIBS) -PROF_LIBS = $(LIBS) -BUNDLE_EXTENSION = kext - - - - -NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc -NEXTSTEP_JAVA_COMPILER = /usr/bin/javac - -include $(MAKEFILEDIR)/platform.make - --include Makefile.preamble - -include $(MAKEFILEDIR)/$(MAKEFILE) - --include Makefile.postamble - --include Makefile.dependencies diff --git a/libkern/c++/Tests/TestSerialization/Makefile.postamble b/libkern/c++/Tests/TestSerialization/Makefile.postamble deleted file mode 100644 index 411cde671..000000000 --- a/libkern/c++/Tests/TestSerialization/Makefile.postamble +++ /dev/null @@ -1,100 +0,0 @@ -############################################################################### -# Makefile.postamble -# Copyright 1997, Apple Computer, Inc. -# -# Use this makefile, which is imported after all other makefiles, to -# override attributes for a project's Makefile environment. This allows you -# to take advantage of the environment set up by the other Makefiles. -# You can also define custom rules at the end of this file. -# -############################################################################### -# -# These variables are exported by the standard makefiles and can be -# used in any customizations you make. They are *outputs* of -# the Makefiles and should be used, not set. -# -# PRODUCTS: products to install. All of these products will be placed in -# the directory $(DSTROOT)$(INSTALLDIR) -# GLOBAL_RESOURCE_DIR: The directory to which resources are copied. -# LOCAL_RESOURCE_DIR: The directory to which localized resources are copied. -# OFILE_DIR: Directory into which .o object files are generated. -# DERIVED_SRC_DIR: Directory used for all other derived files -# -# ALL_CFLAGS: flags to pass when compiling .c files -# ALL_MFLAGS: flags to pass when compiling .m files -# ALL_CCFLAGS: flags to pass when compiling .cc, .cxx, and .C files -# ALL_MMFLAGS: flags to pass when compiling .mm, .mxx, and .M files -# ALL_PRECOMPFLAGS: flags to pass when precompiling .h files -# ALL_LDFLAGS: flags to pass when linking object files -# ALL_LIBTOOL_FLAGS: flags to pass when libtooling object files -# ALL_PSWFLAGS: flags to pass when processing .psw and .pswm (pswrap) files -# ALL_RPCFLAGS: flags to pass when processing .rpc (rpcgen) files -# ALL_YFLAGS: flags to pass when processing .y (yacc) files -# ALL_LFLAGS: flags to pass when processing .l (lex) files -# -# NAME: name of application, bundle, subproject, palette, etc. -# LANGUAGES: langages in which the project is written (default "English") -# English_RESOURCES: localized resources (e.g. nib's, images) of project -# GLOBAL_RESOURCES: non-localized resources of project -# -# SRCROOT: base directory in which to place the new source files -# SRCPATH: relative path from SRCROOT to present subdirectory -# -# INSTALLDIR: Directory the product will be installed into by 'install' target -# PUBLIC_HDR_INSTALLDIR: where to install public headers. Don't forget -# to prefix this with DSTROOT when you use it. -# PRIVATE_HDR_INSTALLDIR: where to install private headers. Don't forget -# to prefix this with DSTROOT when you use it. -# -# EXECUTABLE_EXT: Executable extension for the platform (i.e. .exe on Windows) -# -############################################################################### - -# Some compiler flags can be overridden here for certain build situations. -# -# WARNING_CFLAGS: flag used to set warning level (defaults to -Wmost) -# DEBUG_SYMBOLS_CFLAGS: debug-symbol flag passed to all builds (defaults -# to -g) -# DEBUG_BUILD_CFLAGS: flags passed during debug builds (defaults to -DDEBUG) -# OPTIMIZE_BUILD_CFLAGS: flags passed during optimized builds (defaults -# to -O) -# PROFILE_BUILD_CFLAGS: flags passed during profile builds (defaults -# to -pg -DPROFILE) -# LOCAL_DIR_INCLUDE_DIRECTIVE: flag used to add current directory to -# the include path (defaults to -I.) -# DEBUG_BUILD_LDFLAGS, OPTIMIZE_BUILD_LDFLAGS, PROFILE_BUILD_LDFLAGS: flags -# passed to ld/libtool (defaults to nothing) - - -# Library and Framework projects only: -# INSTALL_NAME_DIRECTIVE: This directive ensures that executables linked -# against the framework will run against the correct version even if -# the current version of the framework changes. You may override this -# to "" as an alternative to using the DYLD_LIBRARY_PATH during your -# development cycle, but be sure to restore it before installing. - - -# Ownership and permissions of files installed by 'install' target - -#INSTALL_AS_USER = root - # User/group ownership -#INSTALL_AS_GROUP = wheel - # (probably want to set both of these) -#INSTALL_PERMISSIONS = - # If set, 'install' chmod's executable to this - - -# Options to strip. Note: -S strips debugging symbols (executables can be stripped -# down further with -x or, if they load no bundles, with no options at all). - -#STRIPFLAGS = -S - - -######################################################################### -# Put rules to extend the behavior of the standard Makefiles here. Include them in -# the dependency tree via cvariables like AFTER_INSTALL in the Makefile.preamble. -# -# You should avoid redefining things like "install" or "app", as they are -# owned by the top-level Makefile API and no context has been set up for where -# derived files should go. -# diff --git a/libkern/c++/Tests/TestSerialization/Makefile.preamble b/libkern/c++/Tests/TestSerialization/Makefile.preamble deleted file mode 100644 index c1624b450..000000000 --- a/libkern/c++/Tests/TestSerialization/Makefile.preamble +++ /dev/null @@ -1,137 +0,0 @@ -############################################################################### -# Makefile.preamble -# Copyright 1997, Apple Computer, Inc. -# -# Use this makefile for configuring the standard application makefiles -# associated with ProjectBuilder. It is included before the main makefile. -# In Makefile.preamble you set attributes for a project, so they are available -# to the project's makefiles. In contrast, you typically write additional rules or -# override built-in behavior in the Makefile.postamble. -# -# Each directory in a project tree (main project plus subprojects) should -# have its own Makefile.preamble and Makefile.postamble. -############################################################################### -# -# Before the main makefile is included for this project, you may set: -# -# MAKEFILEDIR: Directory in which to find $(MAKEFILE) -# MAKEFILE: Top level mechanism Makefile (e.g., app.make, bundle.make) - -# Compiler/linker flags added to the defaults: The OTHER_* variables will be -# inherited by all nested sub-projects, but the LOCAL_ versions of the same -# variables will not. Put your -I, -D, -U, and -L flags in ProjectBuilder's -# Build Attributes inspector if at all possible. To override the default flags -# that get passed to ${CC} (e.g. change -O to -O2), see Makefile.postamble. The -# variables below are *inputs* to the build process and distinct from the override -# settings done (less often) in the Makefile.postamble. -# -# OTHER_CFLAGS, LOCAL_CFLAGS: additional flags to pass to the compiler -# Note that $(OTHER_CFLAGS) and $(LOCAL_CFLAGS) are used for .h, ...c, .m, -# .cc, .cxx, .C, and .M files. There is no need to respecify the -# flags in OTHER_MFLAGS, etc. -# OTHER_MFLAGS, LOCAL_MFLAGS: additional flags for .m files -# OTHER_CCFLAGS, LOCAL_CCFLAGS: additional flags for .cc, .cxx, and ...C files -# OTHER_MMFLAGS, LOCAL_MMFLAGS: additional flags for .mm and .M files -# OTHER_PRECOMPFLAGS, LOCAL_PRECOMPFLAGS: additional flags used when -# precompiling header files -# OTHER_LDFLAGS, LOCAL_LDFLAGS: additional flags passed to ld and libtool -# OTHER_PSWFLAGS, LOCAL_PSWFLAGS: additional flags passed to pswrap -# OTHER_RPCFLAGS, LOCAL_RPCFLAGS: additional flags passed to rpcgen -# OTHER_YFLAGS, LOCAL_YFLAGS: additional flags passed to yacc -# OTHER_LFLAGS, LOCAL_LFLAGS: additional flags passed to lex - -# These variables provide hooks enabling you to add behavior at almost every -# stage of the make: -# -# BEFORE_PREBUILD: targets to build before installing headers for a subproject -# AFTER_PREBUILD: targets to build after installing headers for a subproject -# BEFORE_BUILD_RECURSION: targets to make before building subprojects -# BEFORE_BUILD: targets to make before a build, but after subprojects -# AFTER_BUILD: targets to make after a build -# -# BEFORE_INSTALL: targets to build before installing the product -# AFTER_INSTALL: targets to build after installing the product -# BEFORE_POSTINSTALL: targets to build before postinstalling every subproject -# AFTER_POSTINSTALL: targts to build after postinstalling every subproject -# -# BEFORE_INSTALLHDRS: targets to build before installing headers for a -# subproject -# AFTER_INSTALLHDRS: targets to build after installing headers for a subproject -# BEFORE_INSTALLSRC: targets to build before installing source for a subproject -# AFTER_INSTALLSRC: targets to build after installing source for a subproject -# -# BEFORE_DEPEND: targets to build before building dependencies for a -# subproject -# AFTER_DEPEND: targets to build after building dependencies for a -# subproject -# -# AUTOMATIC_DEPENDENCY_INFO: if YES, then the dependency file is -# updated every time the project is built. If NO, the dependency -# file is only built when the depend target is invoked. - -# Framework-related variables: -# FRAMEWORK_DLL_INSTALLDIR: On Windows platforms, this variable indicates -# where to put the framework's DLL. This variable defaults to -# $(INSTALLDIR)/../Executables - -# Library-related variables: -# PUBLIC_HEADER_DIR: Determines where public exported header files -# should be installed. Do not include $(DSTROOT) in this value -- -# it is prefixed automatically. For library projects you should -# set this to something like /Developer/Headers/$(NAME). Do not set -# this variable for framework projects unless you do not want the -# header files included in the framework. -# PRIVATE_HEADER_DIR: Determines where private exported header files -# should be installed. Do not include $(DSTROOT) in this value -- -# it is prefixed automatically. -# LIBRARY_STYLE: This may be either STATIC or DYNAMIC, and determines -# whether the libraries produced are statically linked when they -# are used or if they are dynamically loadable. This defaults to -# DYNAMIC. -# LIBRARY_DLL_INSTALLDIR: On Windows platforms, this variable indicates -# where to put the library's DLL. This variable defaults to -# $(INSTALLDIR)/../Executables -# -# INSTALL_AS_USER: owner of the intalled products (default root) -# INSTALL_AS_GROUP: group of the installed products (default wheel) -# INSTALL_PERMISSIONS: permissions of the installed product (default o+rX) -# -# OTHER_RECURSIVE_VARIABLES: The names of variables which you want to be -# passed on the command line to recursive invocations of make. Note that -# the values in OTHER_*FLAGS are inherited by subprojects automatically -- -# you do not have to (and shouldn't) add OTHER_*FLAGS to -# OTHER_RECURSIVE_VARIABLES. - -# Additional headers to export beyond those in the PB.project: -# OTHER_PUBLIC_HEADERS -# OTHER_PROJECT_HEADERS -# OTHER_PRIVATE_HEADERS - -# Additional files for the project's product: <> -# OTHER_RESOURCES: (non-localized) resources for this project -# OTHER_OFILES: relocatables to be linked into this project -# OTHER_LIBS: more libraries to link against -# OTHER_PRODUCT_DEPENDS: other dependencies of this project -# OTHER_SOURCEFILES: other source files maintained by .pre/postamble -# OTHER_GARBAGE: additional files to be removed by `make clean' - -# Set this to YES if you don't want a final libtool call for a library/framework. -# BUILD_OFILES_LIST_ONLY - -# To include a version string, project source must exist in a directory named -# $(NAME).%d[.%d][.%d] and the following line must be uncommented. -# OTHER_GENERATED_OFILES = $(VERS_OFILE) - -# This definition will suppress stripping of debug symbols when an executable -# is installed. By default it is YES. -# STRIP_ON_INSTALL = NO - -# Uncomment to suppress generation of a KeyValueCoding index when installing -# frameworks (This index is used by WOB and IB to determine keys available -# for an object). Set to YES by default. -# PREINDEX_FRAMEWORK = NO - -# Change this definition to install projects somewhere other than the -# standard locations. NEXT_ROOT defaults to "C:/Apple" on Windows systems -# and "" on other systems. -DSTROOT = $(HOME) diff --git a/libkern/c++/Tests/TestSerialization/PB.project b/libkern/c++/Tests/TestSerialization/PB.project deleted file mode 100644 index a14195b01..000000000 --- a/libkern/c++/Tests/TestSerialization/PB.project +++ /dev/null @@ -1,17 +0,0 @@ -{ - BUNDLE_EXTENSION = kext; - DYNAMIC_CODE_GEN = YES; - FILESTABLE = { - OTHER_SOURCES = (Makefile.preamble, Makefile, Makefile.postamble, CustomInfo.xml); - SUBPROJECTS = (test1.kmodproj, test2.kmodproj); - }; - LANGUAGE = English; - MAKEFILEDIR = "$(MAKEFILEPATH)/pb_makefiles"; - NEXTSTEP_BUILDTOOL = /bin/gnumake; - NEXTSTEP_INSTALLDIR = /System/Library/Extensions; - NEXTSTEP_JAVA_COMPILER = /usr/bin/javac; - NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc; - PROJECTNAME = TestSerialization; - PROJECTTYPE = "Kernel Extension"; - PROJECTVERSION = 2.8; -} diff --git a/libkern/c++/Tests/TestSerialization/PBUserInfo/PBUserInfo_root.plist b/libkern/c++/Tests/TestSerialization/PBUserInfo/PBUserInfo_root.plist deleted file mode 100644 index 22b403e4f..000000000 --- a/libkern/c++/Tests/TestSerialization/PBUserInfo/PBUserInfo_root.plist +++ /dev/null @@ -1 +0,0 @@ -{NSMACHOperatingSystem = {Archs = "18 "; Target = extension; }; } diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/CustomInfo.xml b/libkern/c++/Tests/TestSerialization/test1.kmodproj/CustomInfo.xml deleted file mode 100644 index f3b0eeae1..000000000 --- a/libkern/c++/Tests/TestSerialization/test1.kmodproj/CustomInfo.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - Module - - Version - 0.1 - Name - test1 - File - test1 - Initialize - test1_start - Finalize - test1_stop - Target - Kernel - Format - mach-o - - - - diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile deleted file mode 100644 index 5b476501e..000000000 --- a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile +++ /dev/null @@ -1,49 +0,0 @@ -# -# Generated by the Apple Project Builder. -# -# NOTE: Do NOT change this file -- Project Builder maintains it. -# -# Put all of your customizations in files called Makefile.preamble -# and Makefile.postamble (both optional), and Makefile will include them. -# - -NAME = test1 - -PROJECTVERSION = 2.8 -PROJECT_TYPE = Kernel Module - -CPPFILES = test1_main.cpp - -HFILES = test1_main.h - -OTHERSRCS = Makefile.preamble Makefile Makefile.postamble\ - CustomInfo.xml - -MAKEFILEDIR = $(MAKEFILEPATH)/pb_makefiles -CODE_GEN_STYLE = DYNAMIC -MAKEFILE = kmod.make -NEXTSTEP_INSTALLDIR = /System/Library/Extensions -LIBS = -DEBUG_LIBS = $(LIBS) -PROF_LIBS = $(LIBS) - - -NEXTSTEP_PB_CFLAGS = -Wno-format - - -NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc -WINDOWS_OBJCPLUS_COMPILER = $(DEVDIR)/gcc -PDO_UNIX_OBJCPLUS_COMPILER = $(NEXTDEV_BIN)/gcc -NEXTSTEP_JAVA_COMPILER = /usr/bin/javac -WINDOWS_JAVA_COMPILER = $(JDKBINDIR)/javac.exe -PDO_UNIX_JAVA_COMPILER = $(JDKBINDIR)/javac - -include $(MAKEFILEDIR)/platform.make - --include Makefile.preamble - -include $(MAKEFILEDIR)/$(MAKEFILE) - --include Makefile.postamble - --include Makefile.dependencies diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.postamble b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.postamble deleted file mode 100644 index 411cde671..000000000 --- a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.postamble +++ /dev/null @@ -1,100 +0,0 @@ -############################################################################### -# Makefile.postamble -# Copyright 1997, Apple Computer, Inc. -# -# Use this makefile, which is imported after all other makefiles, to -# override attributes for a project's Makefile environment. This allows you -# to take advantage of the environment set up by the other Makefiles. -# You can also define custom rules at the end of this file. -# -############################################################################### -# -# These variables are exported by the standard makefiles and can be -# used in any customizations you make. They are *outputs* of -# the Makefiles and should be used, not set. -# -# PRODUCTS: products to install. All of these products will be placed in -# the directory $(DSTROOT)$(INSTALLDIR) -# GLOBAL_RESOURCE_DIR: The directory to which resources are copied. -# LOCAL_RESOURCE_DIR: The directory to which localized resources are copied. -# OFILE_DIR: Directory into which .o object files are generated. -# DERIVED_SRC_DIR: Directory used for all other derived files -# -# ALL_CFLAGS: flags to pass when compiling .c files -# ALL_MFLAGS: flags to pass when compiling .m files -# ALL_CCFLAGS: flags to pass when compiling .cc, .cxx, and .C files -# ALL_MMFLAGS: flags to pass when compiling .mm, .mxx, and .M files -# ALL_PRECOMPFLAGS: flags to pass when precompiling .h files -# ALL_LDFLAGS: flags to pass when linking object files -# ALL_LIBTOOL_FLAGS: flags to pass when libtooling object files -# ALL_PSWFLAGS: flags to pass when processing .psw and .pswm (pswrap) files -# ALL_RPCFLAGS: flags to pass when processing .rpc (rpcgen) files -# ALL_YFLAGS: flags to pass when processing .y (yacc) files -# ALL_LFLAGS: flags to pass when processing .l (lex) files -# -# NAME: name of application, bundle, subproject, palette, etc. -# LANGUAGES: langages in which the project is written (default "English") -# English_RESOURCES: localized resources (e.g. nib's, images) of project -# GLOBAL_RESOURCES: non-localized resources of project -# -# SRCROOT: base directory in which to place the new source files -# SRCPATH: relative path from SRCROOT to present subdirectory -# -# INSTALLDIR: Directory the product will be installed into by 'install' target -# PUBLIC_HDR_INSTALLDIR: where to install public headers. Don't forget -# to prefix this with DSTROOT when you use it. -# PRIVATE_HDR_INSTALLDIR: where to install private headers. Don't forget -# to prefix this with DSTROOT when you use it. -# -# EXECUTABLE_EXT: Executable extension for the platform (i.e. .exe on Windows) -# -############################################################################### - -# Some compiler flags can be overridden here for certain build situations. -# -# WARNING_CFLAGS: flag used to set warning level (defaults to -Wmost) -# DEBUG_SYMBOLS_CFLAGS: debug-symbol flag passed to all builds (defaults -# to -g) -# DEBUG_BUILD_CFLAGS: flags passed during debug builds (defaults to -DDEBUG) -# OPTIMIZE_BUILD_CFLAGS: flags passed during optimized builds (defaults -# to -O) -# PROFILE_BUILD_CFLAGS: flags passed during profile builds (defaults -# to -pg -DPROFILE) -# LOCAL_DIR_INCLUDE_DIRECTIVE: flag used to add current directory to -# the include path (defaults to -I.) -# DEBUG_BUILD_LDFLAGS, OPTIMIZE_BUILD_LDFLAGS, PROFILE_BUILD_LDFLAGS: flags -# passed to ld/libtool (defaults to nothing) - - -# Library and Framework projects only: -# INSTALL_NAME_DIRECTIVE: This directive ensures that executables linked -# against the framework will run against the correct version even if -# the current version of the framework changes. You may override this -# to "" as an alternative to using the DYLD_LIBRARY_PATH during your -# development cycle, but be sure to restore it before installing. - - -# Ownership and permissions of files installed by 'install' target - -#INSTALL_AS_USER = root - # User/group ownership -#INSTALL_AS_GROUP = wheel - # (probably want to set both of these) -#INSTALL_PERMISSIONS = - # If set, 'install' chmod's executable to this - - -# Options to strip. Note: -S strips debugging symbols (executables can be stripped -# down further with -x or, if they load no bundles, with no options at all). - -#STRIPFLAGS = -S - - -######################################################################### -# Put rules to extend the behavior of the standard Makefiles here. Include them in -# the dependency tree via cvariables like AFTER_INSTALL in the Makefile.preamble. -# -# You should avoid redefining things like "install" or "app", as they are -# owned by the top-level Makefile API and no context has been set up for where -# derived files should go. -# diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.preamble b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.preamble deleted file mode 100644 index c1624b450..000000000 --- a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.preamble +++ /dev/null @@ -1,137 +0,0 @@ -############################################################################### -# Makefile.preamble -# Copyright 1997, Apple Computer, Inc. -# -# Use this makefile for configuring the standard application makefiles -# associated with ProjectBuilder. It is included before the main makefile. -# In Makefile.preamble you set attributes for a project, so they are available -# to the project's makefiles. In contrast, you typically write additional rules or -# override built-in behavior in the Makefile.postamble. -# -# Each directory in a project tree (main project plus subprojects) should -# have its own Makefile.preamble and Makefile.postamble. -############################################################################### -# -# Before the main makefile is included for this project, you may set: -# -# MAKEFILEDIR: Directory in which to find $(MAKEFILE) -# MAKEFILE: Top level mechanism Makefile (e.g., app.make, bundle.make) - -# Compiler/linker flags added to the defaults: The OTHER_* variables will be -# inherited by all nested sub-projects, but the LOCAL_ versions of the same -# variables will not. Put your -I, -D, -U, and -L flags in ProjectBuilder's -# Build Attributes inspector if at all possible. To override the default flags -# that get passed to ${CC} (e.g. change -O to -O2), see Makefile.postamble. The -# variables below are *inputs* to the build process and distinct from the override -# settings done (less often) in the Makefile.postamble. -# -# OTHER_CFLAGS, LOCAL_CFLAGS: additional flags to pass to the compiler -# Note that $(OTHER_CFLAGS) and $(LOCAL_CFLAGS) are used for .h, ...c, .m, -# .cc, .cxx, .C, and .M files. There is no need to respecify the -# flags in OTHER_MFLAGS, etc. -# OTHER_MFLAGS, LOCAL_MFLAGS: additional flags for .m files -# OTHER_CCFLAGS, LOCAL_CCFLAGS: additional flags for .cc, .cxx, and ...C files -# OTHER_MMFLAGS, LOCAL_MMFLAGS: additional flags for .mm and .M files -# OTHER_PRECOMPFLAGS, LOCAL_PRECOMPFLAGS: additional flags used when -# precompiling header files -# OTHER_LDFLAGS, LOCAL_LDFLAGS: additional flags passed to ld and libtool -# OTHER_PSWFLAGS, LOCAL_PSWFLAGS: additional flags passed to pswrap -# OTHER_RPCFLAGS, LOCAL_RPCFLAGS: additional flags passed to rpcgen -# OTHER_YFLAGS, LOCAL_YFLAGS: additional flags passed to yacc -# OTHER_LFLAGS, LOCAL_LFLAGS: additional flags passed to lex - -# These variables provide hooks enabling you to add behavior at almost every -# stage of the make: -# -# BEFORE_PREBUILD: targets to build before installing headers for a subproject -# AFTER_PREBUILD: targets to build after installing headers for a subproject -# BEFORE_BUILD_RECURSION: targets to make before building subprojects -# BEFORE_BUILD: targets to make before a build, but after subprojects -# AFTER_BUILD: targets to make after a build -# -# BEFORE_INSTALL: targets to build before installing the product -# AFTER_INSTALL: targets to build after installing the product -# BEFORE_POSTINSTALL: targets to build before postinstalling every subproject -# AFTER_POSTINSTALL: targts to build after postinstalling every subproject -# -# BEFORE_INSTALLHDRS: targets to build before installing headers for a -# subproject -# AFTER_INSTALLHDRS: targets to build after installing headers for a subproject -# BEFORE_INSTALLSRC: targets to build before installing source for a subproject -# AFTER_INSTALLSRC: targets to build after installing source for a subproject -# -# BEFORE_DEPEND: targets to build before building dependencies for a -# subproject -# AFTER_DEPEND: targets to build after building dependencies for a -# subproject -# -# AUTOMATIC_DEPENDENCY_INFO: if YES, then the dependency file is -# updated every time the project is built. If NO, the dependency -# file is only built when the depend target is invoked. - -# Framework-related variables: -# FRAMEWORK_DLL_INSTALLDIR: On Windows platforms, this variable indicates -# where to put the framework's DLL. This variable defaults to -# $(INSTALLDIR)/../Executables - -# Library-related variables: -# PUBLIC_HEADER_DIR: Determines where public exported header files -# should be installed. Do not include $(DSTROOT) in this value -- -# it is prefixed automatically. For library projects you should -# set this to something like /Developer/Headers/$(NAME). Do not set -# this variable for framework projects unless you do not want the -# header files included in the framework. -# PRIVATE_HEADER_DIR: Determines where private exported header files -# should be installed. Do not include $(DSTROOT) in this value -- -# it is prefixed automatically. -# LIBRARY_STYLE: This may be either STATIC or DYNAMIC, and determines -# whether the libraries produced are statically linked when they -# are used or if they are dynamically loadable. This defaults to -# DYNAMIC. -# LIBRARY_DLL_INSTALLDIR: On Windows platforms, this variable indicates -# where to put the library's DLL. This variable defaults to -# $(INSTALLDIR)/../Executables -# -# INSTALL_AS_USER: owner of the intalled products (default root) -# INSTALL_AS_GROUP: group of the installed products (default wheel) -# INSTALL_PERMISSIONS: permissions of the installed product (default o+rX) -# -# OTHER_RECURSIVE_VARIABLES: The names of variables which you want to be -# passed on the command line to recursive invocations of make. Note that -# the values in OTHER_*FLAGS are inherited by subprojects automatically -- -# you do not have to (and shouldn't) add OTHER_*FLAGS to -# OTHER_RECURSIVE_VARIABLES. - -# Additional headers to export beyond those in the PB.project: -# OTHER_PUBLIC_HEADERS -# OTHER_PROJECT_HEADERS -# OTHER_PRIVATE_HEADERS - -# Additional files for the project's product: <> -# OTHER_RESOURCES: (non-localized) resources for this project -# OTHER_OFILES: relocatables to be linked into this project -# OTHER_LIBS: more libraries to link against -# OTHER_PRODUCT_DEPENDS: other dependencies of this project -# OTHER_SOURCEFILES: other source files maintained by .pre/postamble -# OTHER_GARBAGE: additional files to be removed by `make clean' - -# Set this to YES if you don't want a final libtool call for a library/framework. -# BUILD_OFILES_LIST_ONLY - -# To include a version string, project source must exist in a directory named -# $(NAME).%d[.%d][.%d] and the following line must be uncommented. -# OTHER_GENERATED_OFILES = $(VERS_OFILE) - -# This definition will suppress stripping of debug symbols when an executable -# is installed. By default it is YES. -# STRIP_ON_INSTALL = NO - -# Uncomment to suppress generation of a KeyValueCoding index when installing -# frameworks (This index is used by WOB and IB to determine keys available -# for an object). Set to YES by default. -# PREINDEX_FRAMEWORK = NO - -# Change this definition to install projects somewhere other than the -# standard locations. NEXT_ROOT defaults to "C:/Apple" on Windows systems -# and "" on other systems. -DSTROOT = $(HOME) diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/PB.project b/libkern/c++/Tests/TestSerialization/test1.kmodproj/PB.project deleted file mode 100644 index 771c5728e..000000000 --- a/libkern/c++/Tests/TestSerialization/test1.kmodproj/PB.project +++ /dev/null @@ -1,25 +0,0 @@ -{ - DYNAMIC_CODE_GEN = NO; - FILESTABLE = { - CLASSES = (test1_main.cpp); - H_FILES = (test1_main.h); - OTHER_SOURCES = (Makefile.preamble, Makefile, Makefile.postamble, CustomInfo.xml); - }; - LANGUAGE = English; - LOCALIZABLE_FILES = {}; - MAKEFILEDIR = "$(MAKEFILEPATH)/pb_makefiles"; - NEXTSTEP_BUILDTOOL = /bin/gnumake; - NEXTSTEP_COMPILEROPTIONS = "-Wno-format"; - NEXTSTEP_INSTALLDIR = /System/Library/Extensions; - NEXTSTEP_JAVA_COMPILER = /usr/bin/javac; - NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc; - PDO_UNIX_BUILDTOOL = $NEXT_ROOT/Developer/bin/make; - PDO_UNIX_JAVA_COMPILER = "$(JDKBINDIR)/javac"; - PDO_UNIX_OBJCPLUS_COMPILER = "$(NEXTDEV_BIN)/gcc"; - PROJECTNAME = test1; - PROJECTTYPE = "Kernel Module"; - PROJECTVERSION = 2.8; - WINDOWS_BUILDTOOL = $NEXT_ROOT/Developer/Executables/make; - WINDOWS_JAVA_COMPILER = "$(JDKBINDIR)/javac.exe"; - WINDOWS_OBJCPLUS_COMPILER = "$(DEVDIR)/gcc"; -} diff --git a/libkern/c++/Tests/TestSerialization/test1/test1.pbproj/project.pbxproj b/libkern/c++/Tests/TestSerialization/test1/test1.pbproj/project.pbxproj new file mode 100644 index 000000000..453a5a07d --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test1/test1.pbproj/project.pbxproj @@ -0,0 +1,260 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 38; + objects = { + 051B4E2F03823AF402CA299A = { + isa = PBXFileReference; + path = test1_main.cpp; + refType = 4; + }; + 051B4E3003823AF402CA299A = { + fileRef = 051B4E2F03823AF402CA299A; + isa = PBXBuildFile; + settings = { + }; + }; +//050 +//051 +//052 +//053 +//054 +//060 +//061 +//062 +//063 +//064 + 06AA1261FFB20DD611CA28AA = { + buildActionMask = 2147483647; + files = ( + ); + generatedFileNames = ( + ); + isa = PBXShellScriptBuildPhase; + neededFileNames = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "script=\"${SYSTEM_DEVELOPER_DIR}/ProjectBuilder Extras/Kernel Extension Support/KEXTPostprocess\";\nif [ -x \"$script\" ]; then\n . \"$script\"\nfi"; + }; + 06AA1262FFB20DD611CA28AA = { + buildRules = ( + ); + buildSettings = { + COPY_PHASE_STRIP = NO; + OPTIMIZATION_CFLAGS = "-O0"; + }; + isa = PBXBuildStyle; + name = Development; + }; + 06AA1263FFB20DD611CA28AA = { + buildRules = ( + ); + buildSettings = { + COPY_PHASE_STRIP = YES; + }; + isa = PBXBuildStyle; + name = Deployment; + }; + 06AA1268FFB211EB11CA28AA = { + buildActionMask = 2147483647; + files = ( + ); + generatedFileNames = ( + ); + isa = PBXShellScriptBuildPhase; + neededFileNames = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "script=\"${SYSTEM_DEVELOPER_DIR}/ProjectBuilder Extras/Kernel Extension Support/KEXTPreprocess\";\nif [ -x \"$script\" ]; then\n . \"$script\"\nfi"; + }; +//060 +//061 +//062 +//063 +//064 +//080 +//081 +//082 +//083 +//084 + 089C1669FE841209C02AAC07 = { + buildStyles = ( + 06AA1262FFB20DD611CA28AA, + 06AA1263FFB20DD611CA28AA, + ); + isa = PBXProject; + mainGroup = 089C166AFE841209C02AAC07; + projectDirPath = ""; + targets = ( + 089C1673FE841209C02AAC07, + ); + }; + 089C166AFE841209C02AAC07 = { + children = ( + 247142CAFF3F8F9811CA285C, + 19C28FB6FE9D52B211CA2CBB, + ); + isa = PBXGroup; + name = test1; + refType = 4; + }; + 089C1673FE841209C02AAC07 = { + buildPhases = ( + 06AA1268FFB211EB11CA28AA, + 089C1674FE841209C02AAC07, + 089C1675FE841209C02AAC07, + 089C1676FE841209C02AAC07, + 089C1677FE841209C02AAC07, + 089C1679FE841209C02AAC07, + 06AA1261FFB20DD611CA28AA, + ); + buildSettings = { + FRAMEWORK_SEARCH_PATHS = ""; + HEADER_SEARCH_PATHS = ""; + INSTALL_PATH = "$(SYSTEM_LIBRARY_DIR)/Extensions"; + KERNEL_MODULE = YES; + LIBRARY_SEARCH_PATHS = ""; + MODULE_NAME = com.MySoftwareCompany.kext.test1; + MODULE_START = test1_start; + MODULE_STOP = test1_stop; + MODULE_VERSION = 1.0.0d1; + OTHER_CFLAGS = ""; + OTHER_LDFLAGS = ""; + OTHER_REZFLAGS = ""; + PRODUCT_NAME = test1; + SECTORDER_FLAGS = ""; + WARNING_CFLAGS = "-Wmost -Wno-four-char-constants -Wno-unknown-pragmas"; + WRAPPER_EXTENSION = kext; + }; + dependencies = ( + ); + isa = PBXBundleTarget; + name = test1; + productInstallPath = "$(SYSTEM_LIBRARY_DIR)/Extensions"; + productName = test1; + productReference = 0A5A7D55FFB780D811CA28AA; + productSettingsXML = " + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + test1 + CFBundleIconFile + + CFBundleIdentifier + com.MySoftwareCompany.kext.test1 + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + KEXT + CFBundleSignature + ???? + CFBundleVersion + 1.0.0d1 + OSBundleLibraries + + com.apple.kernel.libkern + 1.1 + + + +"; + shouldUseHeadermap = 1; + }; + 089C1674FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + ); + isa = PBXHeadersBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; + 089C1675FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + ); + isa = PBXResourcesBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; + 089C1676FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + 051B4E3003823AF402CA299A, + ); + isa = PBXSourcesBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; + 089C1677FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + ); + isa = PBXFrameworksBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; + 089C1679FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + ); + isa = PBXRezBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; +//080 +//081 +//082 +//083 +//084 +//0A0 +//0A1 +//0A2 +//0A3 +//0A4 + 0A5A7D55FFB780D811CA28AA = { + isa = PBXBundleReference; + path = test1.kext; + refType = 3; + }; +//0A0 +//0A1 +//0A2 +//0A3 +//0A4 +//190 +//191 +//192 +//193 +//194 + 19C28FB6FE9D52B211CA2CBB = { + children = ( + 0A5A7D55FFB780D811CA28AA, + ); + isa = PBXGroup; + name = Products; + refType = 4; + }; +//190 +//191 +//192 +//193 +//194 +//240 +//241 +//242 +//243 +//244 + 247142CAFF3F8F9811CA285C = { + children = ( + 051B4E2F03823AF402CA299A, + ); + isa = PBXGroup; + name = Source; + path = ""; + refType = 4; + }; + }; + rootObject = 089C1669FE841209C02AAC07; +} diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.cpp b/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp similarity index 100% rename from libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.cpp rename to libkern/c++/Tests/TestSerialization/test1/test1_main.cpp diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/CustomInfo.xml b/libkern/c++/Tests/TestSerialization/test2.kmodproj/CustomInfo.xml deleted file mode 100755 index b7e7e2716..000000000 --- a/libkern/c++/Tests/TestSerialization/test2.kmodproj/CustomInfo.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - Module - - Version - 0.1 - Name - test2 - File - test2 - Initialize - test2_start - Finalize - test2_stop - Target - Kernel - Format - mach-o - - - - diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile deleted file mode 100644 index a34b54cb3..000000000 --- a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -# -# Generated by the Apple Project Builder. -# -# NOTE: Do NOT change this file -- Project Builder maintains it. -# -# Put all of your customizations in files called Makefile.preamble -# and Makefile.postamble (both optional), and Makefile will include them. -# - -NAME = test2 - -PROJECTVERSION = 2.8 -PROJECT_TYPE = Kernel Module - -CPPFILES = test2_main.cpp - -OTHERSRCS = Makefile.preamble Makefile Makefile.postamble\ - CustomInfo.xml - -MAKEFILEDIR = $(MAKEFILEPATH)/pb_makefiles -CODE_GEN_STYLE = DYNAMIC -MAKEFILE = kmod.make -NEXTSTEP_INSTALLDIR = /System/Library/Extensions -LIBS = -DEBUG_LIBS = $(LIBS) -PROF_LIBS = $(LIBS) - - -NEXTSTEP_PB_CFLAGS = -Wno-format - - -NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc -WINDOWS_OBJCPLUS_COMPILER = $(DEVDIR)/gcc -PDO_UNIX_OBJCPLUS_COMPILER = $(NEXTDEV_BIN)/gcc -NEXTSTEP_JAVA_COMPILER = /usr/bin/javac -WINDOWS_JAVA_COMPILER = $(JDKBINDIR)/javac.exe -PDO_UNIX_JAVA_COMPILER = $(JDKBINDIR)/javac - -include $(MAKEFILEDIR)/platform.make - --include Makefile.preamble - -include $(MAKEFILEDIR)/$(MAKEFILE) - --include Makefile.postamble - --include Makefile.dependencies diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.postamble b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.postamble deleted file mode 100644 index 411cde671..000000000 --- a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.postamble +++ /dev/null @@ -1,100 +0,0 @@ -############################################################################### -# Makefile.postamble -# Copyright 1997, Apple Computer, Inc. -# -# Use this makefile, which is imported after all other makefiles, to -# override attributes for a project's Makefile environment. This allows you -# to take advantage of the environment set up by the other Makefiles. -# You can also define custom rules at the end of this file. -# -############################################################################### -# -# These variables are exported by the standard makefiles and can be -# used in any customizations you make. They are *outputs* of -# the Makefiles and should be used, not set. -# -# PRODUCTS: products to install. All of these products will be placed in -# the directory $(DSTROOT)$(INSTALLDIR) -# GLOBAL_RESOURCE_DIR: The directory to which resources are copied. -# LOCAL_RESOURCE_DIR: The directory to which localized resources are copied. -# OFILE_DIR: Directory into which .o object files are generated. -# DERIVED_SRC_DIR: Directory used for all other derived files -# -# ALL_CFLAGS: flags to pass when compiling .c files -# ALL_MFLAGS: flags to pass when compiling .m files -# ALL_CCFLAGS: flags to pass when compiling .cc, .cxx, and .C files -# ALL_MMFLAGS: flags to pass when compiling .mm, .mxx, and .M files -# ALL_PRECOMPFLAGS: flags to pass when precompiling .h files -# ALL_LDFLAGS: flags to pass when linking object files -# ALL_LIBTOOL_FLAGS: flags to pass when libtooling object files -# ALL_PSWFLAGS: flags to pass when processing .psw and .pswm (pswrap) files -# ALL_RPCFLAGS: flags to pass when processing .rpc (rpcgen) files -# ALL_YFLAGS: flags to pass when processing .y (yacc) files -# ALL_LFLAGS: flags to pass when processing .l (lex) files -# -# NAME: name of application, bundle, subproject, palette, etc. -# LANGUAGES: langages in which the project is written (default "English") -# English_RESOURCES: localized resources (e.g. nib's, images) of project -# GLOBAL_RESOURCES: non-localized resources of project -# -# SRCROOT: base directory in which to place the new source files -# SRCPATH: relative path from SRCROOT to present subdirectory -# -# INSTALLDIR: Directory the product will be installed into by 'install' target -# PUBLIC_HDR_INSTALLDIR: where to install public headers. Don't forget -# to prefix this with DSTROOT when you use it. -# PRIVATE_HDR_INSTALLDIR: where to install private headers. Don't forget -# to prefix this with DSTROOT when you use it. -# -# EXECUTABLE_EXT: Executable extension for the platform (i.e. .exe on Windows) -# -############################################################################### - -# Some compiler flags can be overridden here for certain build situations. -# -# WARNING_CFLAGS: flag used to set warning level (defaults to -Wmost) -# DEBUG_SYMBOLS_CFLAGS: debug-symbol flag passed to all builds (defaults -# to -g) -# DEBUG_BUILD_CFLAGS: flags passed during debug builds (defaults to -DDEBUG) -# OPTIMIZE_BUILD_CFLAGS: flags passed during optimized builds (defaults -# to -O) -# PROFILE_BUILD_CFLAGS: flags passed during profile builds (defaults -# to -pg -DPROFILE) -# LOCAL_DIR_INCLUDE_DIRECTIVE: flag used to add current directory to -# the include path (defaults to -I.) -# DEBUG_BUILD_LDFLAGS, OPTIMIZE_BUILD_LDFLAGS, PROFILE_BUILD_LDFLAGS: flags -# passed to ld/libtool (defaults to nothing) - - -# Library and Framework projects only: -# INSTALL_NAME_DIRECTIVE: This directive ensures that executables linked -# against the framework will run against the correct version even if -# the current version of the framework changes. You may override this -# to "" as an alternative to using the DYLD_LIBRARY_PATH during your -# development cycle, but be sure to restore it before installing. - - -# Ownership and permissions of files installed by 'install' target - -#INSTALL_AS_USER = root - # User/group ownership -#INSTALL_AS_GROUP = wheel - # (probably want to set both of these) -#INSTALL_PERMISSIONS = - # If set, 'install' chmod's executable to this - - -# Options to strip. Note: -S strips debugging symbols (executables can be stripped -# down further with -x or, if they load no bundles, with no options at all). - -#STRIPFLAGS = -S - - -######################################################################### -# Put rules to extend the behavior of the standard Makefiles here. Include them in -# the dependency tree via cvariables like AFTER_INSTALL in the Makefile.preamble. -# -# You should avoid redefining things like "install" or "app", as they are -# owned by the top-level Makefile API and no context has been set up for where -# derived files should go. -# diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.preamble b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.preamble deleted file mode 100644 index c1624b450..000000000 --- a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.preamble +++ /dev/null @@ -1,137 +0,0 @@ -############################################################################### -# Makefile.preamble -# Copyright 1997, Apple Computer, Inc. -# -# Use this makefile for configuring the standard application makefiles -# associated with ProjectBuilder. It is included before the main makefile. -# In Makefile.preamble you set attributes for a project, so they are available -# to the project's makefiles. In contrast, you typically write additional rules or -# override built-in behavior in the Makefile.postamble. -# -# Each directory in a project tree (main project plus subprojects) should -# have its own Makefile.preamble and Makefile.postamble. -############################################################################### -# -# Before the main makefile is included for this project, you may set: -# -# MAKEFILEDIR: Directory in which to find $(MAKEFILE) -# MAKEFILE: Top level mechanism Makefile (e.g., app.make, bundle.make) - -# Compiler/linker flags added to the defaults: The OTHER_* variables will be -# inherited by all nested sub-projects, but the LOCAL_ versions of the same -# variables will not. Put your -I, -D, -U, and -L flags in ProjectBuilder's -# Build Attributes inspector if at all possible. To override the default flags -# that get passed to ${CC} (e.g. change -O to -O2), see Makefile.postamble. The -# variables below are *inputs* to the build process and distinct from the override -# settings done (less often) in the Makefile.postamble. -# -# OTHER_CFLAGS, LOCAL_CFLAGS: additional flags to pass to the compiler -# Note that $(OTHER_CFLAGS) and $(LOCAL_CFLAGS) are used for .h, ...c, .m, -# .cc, .cxx, .C, and .M files. There is no need to respecify the -# flags in OTHER_MFLAGS, etc. -# OTHER_MFLAGS, LOCAL_MFLAGS: additional flags for .m files -# OTHER_CCFLAGS, LOCAL_CCFLAGS: additional flags for .cc, .cxx, and ...C files -# OTHER_MMFLAGS, LOCAL_MMFLAGS: additional flags for .mm and .M files -# OTHER_PRECOMPFLAGS, LOCAL_PRECOMPFLAGS: additional flags used when -# precompiling header files -# OTHER_LDFLAGS, LOCAL_LDFLAGS: additional flags passed to ld and libtool -# OTHER_PSWFLAGS, LOCAL_PSWFLAGS: additional flags passed to pswrap -# OTHER_RPCFLAGS, LOCAL_RPCFLAGS: additional flags passed to rpcgen -# OTHER_YFLAGS, LOCAL_YFLAGS: additional flags passed to yacc -# OTHER_LFLAGS, LOCAL_LFLAGS: additional flags passed to lex - -# These variables provide hooks enabling you to add behavior at almost every -# stage of the make: -# -# BEFORE_PREBUILD: targets to build before installing headers for a subproject -# AFTER_PREBUILD: targets to build after installing headers for a subproject -# BEFORE_BUILD_RECURSION: targets to make before building subprojects -# BEFORE_BUILD: targets to make before a build, but after subprojects -# AFTER_BUILD: targets to make after a build -# -# BEFORE_INSTALL: targets to build before installing the product -# AFTER_INSTALL: targets to build after installing the product -# BEFORE_POSTINSTALL: targets to build before postinstalling every subproject -# AFTER_POSTINSTALL: targts to build after postinstalling every subproject -# -# BEFORE_INSTALLHDRS: targets to build before installing headers for a -# subproject -# AFTER_INSTALLHDRS: targets to build after installing headers for a subproject -# BEFORE_INSTALLSRC: targets to build before installing source for a subproject -# AFTER_INSTALLSRC: targets to build after installing source for a subproject -# -# BEFORE_DEPEND: targets to build before building dependencies for a -# subproject -# AFTER_DEPEND: targets to build after building dependencies for a -# subproject -# -# AUTOMATIC_DEPENDENCY_INFO: if YES, then the dependency file is -# updated every time the project is built. If NO, the dependency -# file is only built when the depend target is invoked. - -# Framework-related variables: -# FRAMEWORK_DLL_INSTALLDIR: On Windows platforms, this variable indicates -# where to put the framework's DLL. This variable defaults to -# $(INSTALLDIR)/../Executables - -# Library-related variables: -# PUBLIC_HEADER_DIR: Determines where public exported header files -# should be installed. Do not include $(DSTROOT) in this value -- -# it is prefixed automatically. For library projects you should -# set this to something like /Developer/Headers/$(NAME). Do not set -# this variable for framework projects unless you do not want the -# header files included in the framework. -# PRIVATE_HEADER_DIR: Determines where private exported header files -# should be installed. Do not include $(DSTROOT) in this value -- -# it is prefixed automatically. -# LIBRARY_STYLE: This may be either STATIC or DYNAMIC, and determines -# whether the libraries produced are statically linked when they -# are used or if they are dynamically loadable. This defaults to -# DYNAMIC. -# LIBRARY_DLL_INSTALLDIR: On Windows platforms, this variable indicates -# where to put the library's DLL. This variable defaults to -# $(INSTALLDIR)/../Executables -# -# INSTALL_AS_USER: owner of the intalled products (default root) -# INSTALL_AS_GROUP: group of the installed products (default wheel) -# INSTALL_PERMISSIONS: permissions of the installed product (default o+rX) -# -# OTHER_RECURSIVE_VARIABLES: The names of variables which you want to be -# passed on the command line to recursive invocations of make. Note that -# the values in OTHER_*FLAGS are inherited by subprojects automatically -- -# you do not have to (and shouldn't) add OTHER_*FLAGS to -# OTHER_RECURSIVE_VARIABLES. - -# Additional headers to export beyond those in the PB.project: -# OTHER_PUBLIC_HEADERS -# OTHER_PROJECT_HEADERS -# OTHER_PRIVATE_HEADERS - -# Additional files for the project's product: <> -# OTHER_RESOURCES: (non-localized) resources for this project -# OTHER_OFILES: relocatables to be linked into this project -# OTHER_LIBS: more libraries to link against -# OTHER_PRODUCT_DEPENDS: other dependencies of this project -# OTHER_SOURCEFILES: other source files maintained by .pre/postamble -# OTHER_GARBAGE: additional files to be removed by `make clean' - -# Set this to YES if you don't want a final libtool call for a library/framework. -# BUILD_OFILES_LIST_ONLY - -# To include a version string, project source must exist in a directory named -# $(NAME).%d[.%d][.%d] and the following line must be uncommented. -# OTHER_GENERATED_OFILES = $(VERS_OFILE) - -# This definition will suppress stripping of debug symbols when an executable -# is installed. By default it is YES. -# STRIP_ON_INSTALL = NO - -# Uncomment to suppress generation of a KeyValueCoding index when installing -# frameworks (This index is used by WOB and IB to determine keys available -# for an object). Set to YES by default. -# PREINDEX_FRAMEWORK = NO - -# Change this definition to install projects somewhere other than the -# standard locations. NEXT_ROOT defaults to "C:/Apple" on Windows systems -# and "" on other systems. -DSTROOT = $(HOME) diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/PB.project b/libkern/c++/Tests/TestSerialization/test2.kmodproj/PB.project deleted file mode 100644 index 36dc5a113..000000000 --- a/libkern/c++/Tests/TestSerialization/test2.kmodproj/PB.project +++ /dev/null @@ -1,24 +0,0 @@ -{ - DYNAMIC_CODE_GEN = NO; - FILESTABLE = { - CLASSES = (test2_main.cpp); - OTHER_SOURCES = (Makefile.preamble, Makefile, Makefile.postamble, CustomInfo.xml); - }; - LANGUAGE = English; - LOCALIZABLE_FILES = {}; - MAKEFILEDIR = "$(MAKEFILEPATH)/pb_makefiles"; - NEXTSTEP_BUILDTOOL = /bin/gnumake; - NEXTSTEP_COMPILEROPTIONS = "-Wno-format"; - NEXTSTEP_INSTALLDIR = /System/Library/Extensions; - NEXTSTEP_JAVA_COMPILER = /usr/bin/javac; - NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc; - PDO_UNIX_BUILDTOOL = $NEXT_ROOT/Developer/bin/make; - PDO_UNIX_JAVA_COMPILER = "$(JDKBINDIR)/javac"; - PDO_UNIX_OBJCPLUS_COMPILER = "$(NEXTDEV_BIN)/gcc"; - PROJECTNAME = test2; - PROJECTTYPE = "Kernel Module"; - PROJECTVERSION = 2.8; - WINDOWS_BUILDTOOL = $NEXT_ROOT/Developer/Executables/make; - WINDOWS_JAVA_COMPILER = "$(JDKBINDIR)/javac.exe"; - WINDOWS_OBJCPLUS_COMPILER = "$(DEVDIR)/gcc"; -} diff --git a/libkern/c++/Tests/TestSerialization/test2/test2.pbproj/project.pbxproj b/libkern/c++/Tests/TestSerialization/test2/test2.pbproj/project.pbxproj new file mode 100644 index 000000000..9403d5361 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test2/test2.pbproj/project.pbxproj @@ -0,0 +1,260 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 38; + objects = { + 05D29F900382361902CA299A = { + isa = PBXFileReference; + path = test2_main.cpp; + refType = 4; + }; + 05D29F910382361902CA299A = { + fileRef = 05D29F900382361902CA299A; + isa = PBXBuildFile; + settings = { + }; + }; +//050 +//051 +//052 +//053 +//054 +//060 +//061 +//062 +//063 +//064 + 06AA1261FFB20DD611CA28AA = { + buildActionMask = 2147483647; + files = ( + ); + generatedFileNames = ( + ); + isa = PBXShellScriptBuildPhase; + neededFileNames = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "script=\"${SYSTEM_DEVELOPER_DIR}/ProjectBuilder Extras/Kernel Extension Support/KEXTPostprocess\";\nif [ -x \"$script\" ]; then\n . \"$script\"\nfi"; + }; + 06AA1262FFB20DD611CA28AA = { + buildRules = ( + ); + buildSettings = { + COPY_PHASE_STRIP = NO; + OPTIMIZATION_CFLAGS = "-O0"; + }; + isa = PBXBuildStyle; + name = Development; + }; + 06AA1263FFB20DD611CA28AA = { + buildRules = ( + ); + buildSettings = { + COPY_PHASE_STRIP = YES; + }; + isa = PBXBuildStyle; + name = Deployment; + }; + 06AA1268FFB211EB11CA28AA = { + buildActionMask = 2147483647; + files = ( + ); + generatedFileNames = ( + ); + isa = PBXShellScriptBuildPhase; + neededFileNames = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "script=\"${SYSTEM_DEVELOPER_DIR}/ProjectBuilder Extras/Kernel Extension Support/KEXTPreprocess\";\nif [ -x \"$script\" ]; then\n . \"$script\"\nfi"; + }; +//060 +//061 +//062 +//063 +//064 +//080 +//081 +//082 +//083 +//084 + 089C1669FE841209C02AAC07 = { + buildStyles = ( + 06AA1262FFB20DD611CA28AA, + 06AA1263FFB20DD611CA28AA, + ); + isa = PBXProject; + mainGroup = 089C166AFE841209C02AAC07; + projectDirPath = ""; + targets = ( + 089C1673FE841209C02AAC07, + ); + }; + 089C166AFE841209C02AAC07 = { + children = ( + 247142CAFF3F8F9811CA285C, + 19C28FB6FE9D52B211CA2CBB, + ); + isa = PBXGroup; + name = test2; + refType = 4; + }; + 089C1673FE841209C02AAC07 = { + buildPhases = ( + 06AA1268FFB211EB11CA28AA, + 089C1674FE841209C02AAC07, + 089C1675FE841209C02AAC07, + 089C1676FE841209C02AAC07, + 089C1677FE841209C02AAC07, + 089C1679FE841209C02AAC07, + 06AA1261FFB20DD611CA28AA, + ); + buildSettings = { + FRAMEWORK_SEARCH_PATHS = ""; + HEADER_SEARCH_PATHS = ""; + INSTALL_PATH = "$(SYSTEM_LIBRARY_DIR)/Extensions"; + KERNEL_MODULE = YES; + LIBRARY_SEARCH_PATHS = ""; + MODULE_NAME = com.MySoftwareCompany.kext.test2; + MODULE_START = test2_start; + MODULE_STOP = test2_stop; + MODULE_VERSION = 1.0.0d1; + OTHER_CFLAGS = ""; + OTHER_LDFLAGS = ""; + OTHER_REZFLAGS = ""; + PRODUCT_NAME = test2; + SECTORDER_FLAGS = ""; + WARNING_CFLAGS = "-Wmost -Wno-four-char-constants -Wno-unknown-pragmas"; + WRAPPER_EXTENSION = kext; + }; + dependencies = ( + ); + isa = PBXBundleTarget; + name = test2; + productInstallPath = "$(SYSTEM_LIBRARY_DIR)/Extensions"; + productName = test2; + productReference = 0A5A7D55FFB780D811CA28AA; + productSettingsXML = " + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + test2 + CFBundleIconFile + + CFBundleIdentifier + com.MySoftwareCompany.kext.test2 + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + KEXT + CFBundleSignature + ???? + CFBundleVersion + 1.0.0d1 + OSBundleLibraries + + com.apple.kernel.libkern + 1.1 + + + +"; + shouldUseHeadermap = 1; + }; + 089C1674FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + ); + isa = PBXHeadersBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; + 089C1675FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + ); + isa = PBXResourcesBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; + 089C1676FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + 05D29F910382361902CA299A, + ); + isa = PBXSourcesBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; + 089C1677FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + ); + isa = PBXFrameworksBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; + 089C1679FE841209C02AAC07 = { + buildActionMask = 2147483647; + files = ( + ); + isa = PBXRezBuildPhase; + runOnlyForDeploymentPostprocessing = 0; + }; +//080 +//081 +//082 +//083 +//084 +//0A0 +//0A1 +//0A2 +//0A3 +//0A4 + 0A5A7D55FFB780D811CA28AA = { + isa = PBXBundleReference; + path = test2.kext; + refType = 3; + }; +//0A0 +//0A1 +//0A2 +//0A3 +//0A4 +//190 +//191 +//192 +//193 +//194 + 19C28FB6FE9D52B211CA2CBB = { + children = ( + 0A5A7D55FFB780D811CA28AA, + ); + isa = PBXGroup; + name = Products; + refType = 4; + }; +//190 +//191 +//192 +//193 +//194 +//240 +//241 +//242 +//243 +//244 + 247142CAFF3F8F9811CA285C = { + children = ( + 05D29F900382361902CA299A, + ); + isa = PBXGroup; + name = Source; + path = ""; + refType = 4; + }; + }; + rootObject = 089C1669FE841209C02AAC07; +} diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/test2_main.cpp b/libkern/c++/Tests/TestSerialization/test2/test2_main.cpp similarity index 100% rename from libkern/c++/Tests/TestSerialization/test2.kmodproj/test2_main.cpp rename to libkern/c++/Tests/TestSerialization/test2/test2_main.cpp diff --git a/libkern/conf/MASTER b/libkern/conf/MASTER index 3307faed8..b87e166d0 100644 --- a/libkern/conf/MASTER +++ b/libkern/conf/MASTER @@ -52,4 +52,5 @@ ident LIBKERN options KDEBUG # kernel tracing # +options GPROF # kernel profiling # options LIBKERNCPP # C++ implementation # diff --git a/libkern/conf/MASTER.i386 b/libkern/conf/MASTER.i386 index 83a06f878..b61a6e538 100644 --- a/libkern/conf/MASTER.i386 +++ b/libkern/conf/MASTER.i386 @@ -1,7 +1,7 @@ ###################################################################### # # RELEASE = [intel mach libkerncpp] -# PROFILE = [intel mach libkerncpp profile] +# PROFILE = [RELEASE profile] # DEBUG = [intel mach libkerncpp debug] # ###################################################################### diff --git a/libkern/conf/MASTER.ppc b/libkern/conf/MASTER.ppc index a2764000e..5c6b53d20 100644 --- a/libkern/conf/MASTER.ppc +++ b/libkern/conf/MASTER.ppc @@ -5,7 +5,7 @@ # -------- ---- -------- --------------- # # RELEASE = [ppc mach libkerncpp] -# PROFILE = [ppc mach libkerncpp profile] +# PROFILE = [RELEASE profile] # DEBUG = [ppc mach libkerncpp debug] # RELEASE_TRACE = [ RELEASE kdebug ] # DEBUG_TRACE = [ DEBUG kdebug ] diff --git a/libkern/conf/Makefile b/libkern/conf/Makefile index ffeab35e7..739d5d807 100644 --- a/libkern/conf/Makefile +++ b/libkern/conf/Makefile @@ -18,7 +18,7 @@ ifndef LIBKERN_KERNEL_CONFIG export LIBKERN_KERNEL_CONFIG = $(KERNEL_CONFIG) endif -COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) +export COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: make build_setup @@ -53,6 +53,7 @@ do_all: do_setup_conf SOURCE=$${next_source} \ TARGET=$(TARGET) \ INCL_MAKEDEP=FALSE \ + KERNEL_CONFIG=$(LIBKERN_KERNEL_CONFIG) \ build_all; \ echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(LIBKERN_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; diff --git a/libkern/conf/Makefile.template b/libkern/conf/Makefile.template index 966e827c8..e56bdbb70 100644 --- a/libkern/conf/Makefile.template +++ b/libkern/conf/Makefile.template @@ -86,7 +86,7 @@ LDOBJS = $(OBJS) $(COMPONENT).o: $(LDOBJS) @echo "creating $(COMPONENT).o" $(RM) $(RMFLAGS) vers.c - $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + $(COMPOBJROOT)/newvers \ `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c @echo [ updating $(COMPONENT).o ${LIBKERN_KERNEL_CONFIG} ] diff --git a/libkern/conf/files b/libkern/conf/files index c65902fad..ba3b178b9 100644 --- a/libkern/conf/files +++ b/libkern/conf/files @@ -2,6 +2,7 @@ OPTIONS/libkerncpp optional libkerncpp OPTIONS/kdebug optional kdebug +OPTIONS/gprof optional gprof # libkern diff --git a/libkern/conf/files.ppc b/libkern/conf/files.ppc index b3cfa98f9..0e495aa18 100644 --- a/libkern/conf/files.ppc +++ b/libkern/conf/files.ppc @@ -2,4 +2,5 @@ libkern/ppc/OSAtomic.s standard libkern/ppc/bcmp.s standard libkern/ppc/memcmp.s standard libkern/ppc/strlen.s standard +libkern/c++/OSObjectAsm.s optional libkerncpp diff --git a/libkern/conf/version.major b/libkern/conf/version.major index 1e8b31496..7f8f011eb 100644 --- a/libkern/conf/version.major +++ b/libkern/conf/version.major @@ -1 +1 @@ -6 +7 diff --git a/libkern/conf/version.minor b/libkern/conf/version.minor index 45a4fb75d..573541ac9 100644 --- a/libkern/conf/version.minor +++ b/libkern/conf/version.minor @@ -1 +1 @@ -8 +0 diff --git a/libkern/conf/version.variant b/libkern/conf/version.variant index e69de29bb..573541ac9 100644 --- a/libkern/conf/version.variant +++ b/libkern/conf/version.variant @@ -0,0 +1 @@ +0 diff --git a/libkern/libkern/OSByteOrder.h b/libkern/libkern/OSByteOrder.h index b0fd3dabc..c94053055 100644 --- a/libkern/libkern/OSByteOrder.h +++ b/libkern/libkern/OSByteOrder.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -32,11 +32,11 @@ #ifndef _OS_OSBYTEORDER_H #define _OS_OSBYTEORDER_H -#include +#include -#if defined(__ppc__) +#if defined(__GNUC__) && defined(__ppc__) #include -#elif defined(__i386__) +#elif defined(__GNUC__) && defined(__i386__) #include #else #include @@ -49,148 +49,153 @@ enum { }; OS_INLINE -UInt32 +int32_t OSHostByteOrder(void) { - UInt32 x = (OSBigEndian << 24) | OSLittleEndian; - return (UInt32)*((UInt8 *)&x); +#if defined(__LITTLE_ENDIAN__) + return OSLittleEndian; +#elif defined(__BIG_ENDIAN__) + return OSBigEndian; +#else + return OSUnknownByteOrder; +#endif } /* Macros for swapping constant values in the preprocessing stage. */ -#define OSSwapConstInt16(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8)) +#define OSSwapConstInt16(x) ((((uint16_t)(x) & 0xff00) >> 8) | \ + (((uint16_t)(x) & 0x00ff) << 8)) + +#define OSSwapConstInt32(x) ((((uint32_t)(x) & 0xff000000) >> 24) | \ + (((uint32_t)(x) & 0x00ff0000) >> 8) | \ + (((uint32_t)(x) & 0x0000ff00) << 8) | \ + (((uint32_t)(x) & 0x000000ff) << 24)) + +#define OSSwapConstInt64(x) ((((uint64_t)(x) & 0xff00000000000000ULL) >> 56) | \ + (((uint64_t)(x) & 0x00ff000000000000ULL) >> 40) | \ + (((uint64_t)(x) & 0x0000ff0000000000ULL) >> 24) | \ + (((uint64_t)(x) & 0x000000ff00000000ULL) >> 8) | \ + (((uint64_t)(x) & 0x00000000ff000000ULL) << 8) | \ + (((uint64_t)(x) & 0x0000000000ff0000ULL) << 24) | \ + (((uint64_t)(x) & 0x000000000000ff00ULL) << 40) | \ + (((uint64_t)(x) & 0x00000000000000ffULL) << 56)) + +#if !defined(__GNUC__) +#define __builtin_constant_p(x) (0) +#endif -#define OSSwapConstInt32(x) ((OSSwapConstInt16(x) << 16) | \ - (OSSwapConstInt16((x) >> 16))) +#define OSSwapInt16(x) \ + (__builtin_constant_p(x) ? OSSwapConstInt16(x) : _OSSwapInt16(x)) -#define OSSwapConstInt64(x) ((OSSwapConstInt32(x) << 32) | \ - (OSSwapConstInt32((x) >> 32))) +#define OSSwapInt32(x) \ + (__builtin_constant_p(x) ? OSSwapConstInt32(x) : _OSSwapInt32(x)) + +#define OSSwapInt64(x) \ + (__builtin_constant_p(x) ? OSSwapConstInt64(x) : _OSSwapInt64(x)) + +#define OSReadBigInt(x, y) OSReadBigInt32(x, y) +#define OSWriteBigInt(x, y, z) OSWriteBigInt32(x, y, z) +#define OSSwapBigToHostInt(x) OSSwapBigToHostInt32(x) +#define OSSwapHostToBigInt(x) OSSwapHostToBigInt32(x) +#define OSReadLittleInt(x, y) OSReadLittleInt32(x, y) +#define OSWriteLittleInt(x, y, z) OSWriteLittleInt32(x, y, z) +#define OSSwapHostToLittleInt(x) OSSwapHostToLittleInt32(x) +#define OSSwapLittleToHostInt(x) OSSwapLittleToHostInt32(x) #if defined(__BIG_ENDIAN__) /* Functions for loading big endian to host endianess. */ OS_INLINE -UInt -OSReadBigInt( - volatile void * base, - UInt offset -) -{ - return *(volatile UInt *)((UInt8 *)base + offset); -} - -OS_INLINE -UInt16 +uint16_t OSReadBigInt16( volatile void * base, - UInt offset + uintptr_t offset ) { - return *(volatile UInt16 *)((UInt8 *)base + offset); + return *(volatile uint16_t *)((int8_t *)base + offset); } OS_INLINE -UInt32 +uint32_t OSReadBigInt32( volatile void * base, - UInt offset + uintptr_t offset ) { - return *(volatile UInt32 *)((UInt8 *)base + offset); + return *(volatile uint32_t *)((uintptr_t)base + offset); } OS_INLINE -UInt64 +uint64_t OSReadBigInt64( volatile void * base, - UInt offset + uintptr_t offset ) { - return *(volatile UInt64 *)((UInt8 *)base + offset); + return *(volatile uint64_t *)((uintptr_t)base + offset); } /* Functions for storing host endianess to big endian. */ -OS_INLINE -void -OSWriteBigInt( - volatile void * base, - UInt offset, - UInt data -) -{ - *(volatile UInt *)((UInt8 *)base + offset) = data; -} - OS_INLINE void OSWriteBigInt16( volatile void * base, - UInt offset, - UInt16 data + uintptr_t offset, + uint16_t data ) { - *(volatile UInt16 *)((UInt8 *)base + offset) = data; + *(volatile uint16_t *)((uintptr_t)base + offset) = data; } OS_INLINE void OSWriteBigInt32( volatile void * base, - UInt offset, - UInt32 data + uintptr_t offset, + uint32_t data ) { - *(volatile UInt32 *)((UInt8 *)base + offset) = data; + *(volatile uint32_t *)((uintptr_t)base + offset) = data; } OS_INLINE void OSWriteBigInt64( volatile void * base, - UInt offset, - UInt64 data + uintptr_t offset, + uint64_t data ) { - *(volatile UInt64 *)((UInt8 *)base + offset) = data; + *(volatile uint64_t *)((uintptr_t)base + offset) = data; } /* Functions for loading little endian to host endianess. */ OS_INLINE -UInt -OSReadLittleInt( - volatile void * base, - UInt offset -) -{ - return OSReadSwapInt(base, offset); -} - -OS_INLINE -UInt16 +uint16_t OSReadLittleInt16( volatile void * base, - UInt offset + uintptr_t offset ) { return OSReadSwapInt16(base, offset); } OS_INLINE -UInt32 +uint32_t OSReadLittleInt32( volatile void * base, - UInt offset + uintptr_t offset ) { return OSReadSwapInt32(base, offset); } OS_INLINE -UInt64 +uint64_t OSReadLittleInt64( volatile void * base, - UInt offset + uintptr_t offset ) { return OSReadSwapInt64(base, offset); @@ -198,23 +203,12 @@ OSReadLittleInt64( /* Functions for storing host endianess to little endian. */ -OS_INLINE -void -OSWriteLittleInt( - volatile void * base, - UInt offset, - UInt data -) -{ - OSWriteSwapInt(base, offset, data); -} - OS_INLINE void OSWriteLittleInt16( volatile void * base, - UInt offset, - UInt16 data + uintptr_t offset, + uint16_t data ) { OSWriteSwapInt16(base, offset, data); @@ -224,8 +218,8 @@ OS_INLINE void OSWriteLittleInt32( volatile void * base, - UInt offset, - UInt32 data + uintptr_t offset, + uint32_t data ) { OSWriteSwapInt32(base, offset, data); @@ -235,8 +229,8 @@ OS_INLINE void OSWriteLittleInt64( volatile void * base, - UInt offset, - UInt64 data + uintptr_t offset, + uint64_t data ) { OSWriteSwapInt64(base, offset, data); @@ -251,36 +245,27 @@ OSWriteLittleInt64( /* Generic host endianess to big endian byte swapping functions. */ OS_INLINE -UInt -OSSwapHostToBigInt( - UInt data -) -{ - return data; -} - -OS_INLINE -UInt16 +uint16_t OSSwapHostToBigInt16( - UInt16 data + uint16_t data ) { return data; } OS_INLINE -UInt32 +uint32_t OSSwapHostToBigInt32( - UInt32 data + uint32_t data ) { return data; } OS_INLINE -UInt64 +uint64_t OSSwapHostToBigInt64( - UInt64 data + uint64_t data ) { return data; @@ -294,41 +279,9 @@ OSSwapHostToBigInt64( /* Generic host endianess to little endian byte swapping functions. */ -OS_INLINE -UInt -OSSwapHostToLittleInt( - UInt data -) -{ - return OSSwapInt(data); -} - -OS_INLINE -UInt16 -OSSwapHostToLittleInt16( - UInt16 data -) -{ - return OSSwapInt16(data); -} - -OS_INLINE -UInt32 -OSSwapHostToLittleInt32( - UInt32 data -) -{ - return OSSwapInt32(data); -} - -OS_INLINE -UInt64 -OSSwapHostToLittleInt64( - UInt64 data -) -{ - return OSSwapInt64(data); -} +#define OSSwapHostToLittleInt16(x) OSSwapInt16(x) +#define OSSwapHostToLittleInt32(x) OSSwapInt32(x) +#define OSSwapHostToLittleInt64(x) OSSwapInt64(x) /* Big endian to host endianess byte swapping macros for constants. */ @@ -339,36 +292,27 @@ OSSwapHostToLittleInt64( /* Generic big endian to host endianess byte swapping functions. */ OS_INLINE -UInt -OSSwapBigToHostInt( - UInt data -) -{ - return data; -} - -OS_INLINE -UInt16 +uint16_t OSSwapBigToHostInt16( - UInt16 data + uint16_t data ) { return data; } OS_INLINE -UInt32 +uint32_t OSSwapBigToHostInt32( - UInt32 data + uint32_t data ) { return data; } OS_INLINE -UInt64 +uint64_t OSSwapBigToHostInt64( - UInt64 data + uint64_t data ) { return data; @@ -382,81 +326,39 @@ OSSwapBigToHostInt64( /* Generic little endian to host endianess byte swapping functions. */ -OS_INLINE -UInt -OSSwapLittleToHostInt( - UInt data -) -{ - return OSSwapInt(data); -} - -OS_INLINE -UInt16 -OSSwapLittleToHostInt16( - UInt16 data -) -{ - return OSSwapInt16(data); -} - -OS_INLINE -UInt32 -OSSwapLittleToHostInt32( - UInt32 data -) -{ - return OSSwapInt32(data); -} - -OS_INLINE -UInt64 -OSSwapLittleToHostInt64( - UInt64 data -) -{ - return OSSwapInt64(data); -} +#define OSSwapLittleToHostInt16(x) OSSwapInt16(x) +#define OSSwapLittleToHostInt32(x) OSSwapInt32(x) +#define OSSwapLittleToHostInt64(x) OSSwapInt64(x) #elif defined(__LITTLE_ENDIAN__) /* Functions for loading big endian to host endianess. */ OS_INLINE -UInt -OSReadBigInt( - volatile void * base, - UInt offset -) -{ - return OSReadSwapInt(base, offset); -} - -OS_INLINE -UInt16 +uint16_t OSReadBigInt16( volatile void * base, - UInt offset + uintptr_t offset ) { return OSReadSwapInt16(base, offset); } OS_INLINE -UInt32 +uint32_t OSReadBigInt32( volatile void * base, - UInt offset + uintptr_t offset ) { return OSReadSwapInt32(base, offset); } OS_INLINE -UInt64 +uint64_t OSReadBigInt64( volatile void * base, - UInt offset + uintptr_t offset ) { return OSReadSwapInt64(base, offset); @@ -464,23 +366,12 @@ OSReadBigInt64( /* Functions for storing host endianess to big endian. */ -OS_INLINE -void -OSWriteBigInt( - volatile void * base, - UInt offset, - UInt data -) -{ - OSWriteSwapInt(base, offset, data); -} - OS_INLINE void OSWriteBigInt16( volatile void * base, - UInt offset, - UInt16 data + uintptr_t offset, + uint16_t data ) { OSWriteSwapInt16(base, offset, data); @@ -490,8 +381,8 @@ OS_INLINE void OSWriteBigInt32( volatile void * base, - UInt offset, - UInt32 data + uintptr_t offset, + uint32_t data ) { OSWriteSwapInt32(base, offset, data); @@ -501,8 +392,8 @@ OS_INLINE void OSWriteBigInt64( volatile void * base, - UInt offset, - UInt64 data + uintptr_t offset, + uint64_t data ) { OSWriteSwapInt64(base, offset, data); @@ -511,89 +402,68 @@ OSWriteBigInt64( /* Functions for loading little endian to host endianess. */ OS_INLINE -UInt -OSReadLittleInt( - volatile void * base, - UInt offset -) -{ - return *(volatile UInt *)((UInt8 *)base + offset); -} - -OS_INLINE -UInt16 +uint16_t OSReadLittleInt16( volatile void * base, - UInt offset + uintptr_t offset ) { - return *(volatile UInt16 *)((UInt8 *)base + offset); + return *(volatile uint16_t *)((uintptr_t)base + offset); } OS_INLINE -UInt32 +uint32_t OSReadLittleInt32( volatile void * base, - UInt offset + uintptr_t offset ) { - return *(volatile UInt32 *)((UInt8 *)base + offset); + return *(volatile uint32_t *)((uintptr_t)base + offset); } OS_INLINE -UInt64 +uint64_t OSReadLittleInt64( volatile void * base, - UInt offset + uintptr_t offset ) { - return *(volatile UInt64 *)((UInt8 *)base + offset); + return *(volatile uint64_t *)((uintptr_t)base + offset); } /* Functions for storing host endianess to little endian. */ -OS_INLINE -void -OSWriteLittleInt( - volatile void * base, - UInt offset, - UInt data -) -{ - *(volatile UInt *)((UInt8 *)base + offset) = data; -} - OS_INLINE void OSWriteLittleInt16( volatile void * base, - UInt offset, - UInt16 data + uintptr_t offset, + uint16_t data ) { - *(volatile UInt16 *)((UInt8 *)base + offset) = data; + *(volatile uint16_t *)((uintptr_t)base + offset) = data; } OS_INLINE void OSWriteLittleInt32( volatile void * base, - UInt offset, - UInt32 data + uintptr_t offset, + uint32_t data ) { - *(volatile UInt32 *)((UInt8 *)base + offset) = data; + *(volatile uint32_t *)((uintptr_t)base + offset) = data; } OS_INLINE void OSWriteLittleInt64( volatile void * base, - UInt offset, - UInt64 data + uintptr_t offset, + uint64_t data ) { - *(volatile UInt64 *)((UInt8 *)base + offset) = data; + *(volatile uint64_t *)((uintptr_t)base + offset) = data; } /* Host endianess to big endian byte swapping macros for constants. */ @@ -604,41 +474,9 @@ OSWriteLittleInt64( /* Generic host endianess to big endian byte swapping functions. */ -OS_INLINE -UInt -OSSwapHostToBigInt( - UInt data -) -{ - return OSSwapInt(data); -} - -OS_INLINE -UInt16 -OSSwapHostToBigInt16( - UInt16 data -) -{ - return OSSwapInt16(data); -} - -OS_INLINE -UInt32 -OSSwapHostToBigInt32( - UInt32 data -) -{ - return OSSwapInt32(data); -} - -OS_INLINE -UInt64 -OSSwapHostToBigInt64( - UInt64 data -) -{ - return OSSwapInt64(data); -} +#define OSSwapHostToBigInt16(x) OSSwapInt16(x) +#define OSSwapHostToBigInt32(x) OSSwapInt32(x) +#define OSSwapHostToBigInt64(x) OSSwapInt64(x) /* Host endianess to little endian byte swapping macros for constants. */ @@ -649,36 +487,27 @@ OSSwapHostToBigInt64( /* Generic host endianess to little endian byte swapping functions. */ OS_INLINE -UInt -OSSwapHostToLittleInt( - UInt data -) -{ - return data; -} - -OS_INLINE -UInt16 +uint16_t OSSwapHostToLittleInt16( - UInt16 data + uint16_t data ) { return data; } OS_INLINE -UInt32 +uint32_t OSSwapHostToLittleInt32( - UInt32 data + uint32_t data ) { return data; } OS_INLINE -UInt64 +uint64_t OSSwapHostToLittleInt64( - UInt64 data + uint64_t data ) { return data; @@ -692,41 +521,9 @@ OSSwapHostToLittleInt64( /* Generic big endian to host endianess byte swapping functions. */ -OS_INLINE -UInt -OSSwapBigToHostInt( - UInt data -) -{ - return OSSwapInt(data); -} - -OS_INLINE -UInt16 -OSSwapBigToHostInt16( - UInt16 data -) -{ - return OSSwapInt16(data); -} - -OS_INLINE -UInt32 -OSSwapBigToHostInt32( - UInt32 data -) -{ - return OSSwapInt32(data); -} - -OS_INLINE -UInt64 -OSSwapBigToHostInt64( - UInt64 data -) -{ - return OSSwapInt64(data); -} +#define OSSwapBigToHostInt16(x) OSSwapInt16(x) +#define OSSwapBigToHostInt32(x) OSSwapInt32(x) +#define OSSwapBigToHostInt64(x) OSSwapInt64(x) /* Little endian to host endianess byte swapping macros for constants. */ @@ -737,36 +534,27 @@ OSSwapBigToHostInt64( /* Generic little endian to host endianess byte swapping functions. */ OS_INLINE -UInt -OSSwapLittleToHostInt( - UInt data -) -{ - return data; -} - -OS_INLINE -UInt16 +uint16_t OSSwapLittleToHostInt16( - UInt16 data + uint16_t data ) { return data; } OS_INLINE -UInt32 +uint32_t OSSwapLittleToHostInt32( - UInt32 data + uint32_t data ) { return data; } OS_INLINE -UInt64 +uint64_t OSSwapLittleToHostInt64( - UInt64 data + uint64_t data ) { return data; diff --git a/libkern/libkern/OSTypes.h b/libkern/libkern/OSTypes.h index 6a24b78c5..44eb075d0 100644 --- a/libkern/libkern/OSTypes.h +++ b/libkern/libkern/OSTypes.h @@ -84,11 +84,7 @@ typedef unsigned char Boolean; #endif /* __MACTYPES__ */ #if !defined(OS_INLINE) - #if defined(__GNUC__) - #define OS_INLINE static __inline__ - #elif defined(__MWERKS__) || defined(__cplusplus) - #define OS_INLINE static inline - #endif +# define OS_INLINE static inline #endif #endif /* _OS_OSTYPES_H */ diff --git a/libkern/libkern/c++/OSData.h b/libkern/libkern/c++/OSData.h index b306a247a..6f84b50ed 100644 --- a/libkern/libkern/c++/OSData.h +++ b/libkern/libkern/c++/OSData.h @@ -95,13 +95,12 @@ public: unsigned int start, unsigned int inLength); /*! - @function initWithBytes - @abstract A member function to initialize an instance of OSData with the provided data. - @param bytes A pointer to a block of data to be copied. - @param inLength The length of the block of data. + @function initWithCapacity + @abstract A member function to initialize an instance of OSData with a minimum capacity of at least the given size. If this function is called an an object that has been previously used then the length is set down to 0 and a new block of data is allocated if necessary to ensure the given capacity. + @param capacity The length of the allocated block of data. @result Returns true if initialization was successful, false otherwise. */ - virtual bool initWithCapacity(unsigned int inCapacity); + virtual bool initWithCapacity(unsigned int capacity); /*! @function initWithBytes @abstract A member function to initialize an instance of OSData which references a block of data. @@ -176,7 +175,7 @@ public: /*! @function appendBytes @abstract A member function which appends a buffer of data onto the end of the object's internal data buffer. - @param bytes A pointer to the block of data. + @param bytes A pointer to the block of data. If the value is 0 then append zero-ed memory to the data object. @param inLength The length of the data block. @result Returns true if the object was able to append the new data, false otherwise. */ @@ -252,6 +251,8 @@ public: virtual bool appendByte(unsigned char byte, unsigned int inCount); + +private: OSMetaClassDeclareReservedUnused(OSData, 0); OSMetaClassDeclareReservedUnused(OSData, 1); OSMetaClassDeclareReservedUnused(OSData, 2); diff --git a/libkern/libkern/c++/OSMetaClass.h b/libkern/libkern/c++/OSMetaClass.h index 4225bc8a5..719949ec3 100644 --- a/libkern/libkern/c++/OSMetaClass.h +++ b/libkern/libkern/c++/OSMetaClass.h @@ -590,6 +590,10 @@ void classname ::_RESERVED ## classname ## index () \ // IOKit debug internal routines. static void printInstanceCounts(); + static void serializeClassDictionary(OSDictionary *dict); + +private: + // Obsolete APIs static OSDictionary *getClassDictionary(); virtual bool serialize(OSSerialize *s) const; diff --git a/libkern/libkern/c++/OSSymbol.h b/libkern/libkern/c++/OSSymbol.h index d5db609a9..fbae7ae13 100644 --- a/libkern/libkern/c++/OSSymbol.h +++ b/libkern/libkern/c++/OSSymbol.h @@ -59,14 +59,29 @@ private: virtual bool initWithCStringNoCopy(const char *cString); protected: + /*! + @function taggedRelease + @abstract Overriden super class release method so we can synchronise with the symbol pool. + @discussion When we release an symbol we need to synchronise the destruction of the object with any potential searches that may be occuring through the family factor methods. See OSObject::taggedRelease + */ + virtual void taggedRelease(const void *tag, const int when) const; + /*! @function free - @abstract A member function to release all resources created or used by the OSString object. - @discussion This function should not be called directly, use release() instead. + @abstract Overriden super class release method so we can synchronise with the symbol pool. + @discussion When we release an symbol we need to synchronise the destruction of the object with any potential searches that may be occuring through the family factor methods. See OSObject::free */ virtual void free(); public: + /*! + @function taggedRelease + @abstract Release a tag. + @discussion The C++ language has forced me to override this method even though I have implemented it as { super::taggedRelease(tag) }. It seems that C++ is confused about the appearance of the protected taggedRelease with 2 args and refuses to only inherit one function. See OSObject::taggedRelease + */ + virtual void taggedRelease(const void *tag) const; + + /*! @function withString @abstract A static constructor function to create an OSSymbol instance from an OSString object or returns an existing OSSymbol object based on the OSString object given. diff --git a/libkern/libkern/i386/OSByteOrder.h b/libkern/libkern/i386/OSByteOrder.h index 0aa2270f1..87b0a9b48 100644 --- a/libkern/libkern/i386/OSByteOrder.h +++ b/libkern/libkern/i386/OSByteOrder.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,191 +22,139 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - */ #ifndef _OS_OSBYTEORDERI386_H #define _OS_OSBYTEORDERI386_H -#include +#include -/* Functions for byte reversed loads. */ - -OS_INLINE -UInt16 -OSReadSwapInt16( - volatile void * base, - UInt offset -) -{ - UInt16 result; +#if !defined(OS_INLINE) +# define OS_INLINE static inline +#endif - result = *(volatile UInt16 *)((UInt8 *)base + offset); - __asm__ volatile("xchgb %b0,%h0" - : "=q" (result) - : "0" (result)); - return result; -} +/* Generic byte swapping functions. */ OS_INLINE -UInt32 -OSReadSwapInt32( - volatile void * base, - UInt offset +uint16_t +_OSSwapInt16( + uint16_t data ) { - UInt32 result; - - result = *(volatile UInt32 *)((UInt8 *)base + offset); - __asm__ volatile("bswap %0" - : "=r" (result) - : "0" (result)); - return result; + __asm__ ("xchgb %b0, %h0" : "+q" (data)); + return data; } OS_INLINE -UInt64 -OSReadSwapInt64( - volatile void * base, - UInt offset +uint32_t +_OSSwapInt32( + uint32_t data ) { - UInt64 * inp; - union ullc { - UInt64 ull; - UInt ul[2]; - } outv; - - inp = (UInt64 *)base; - outv.ul[0] = OSReadSwapInt32(inp, offset + 4); - outv.ul[1] = OSReadSwapInt32(inp, offset); - return outv.ull; + __asm__ ("bswap %0" : "+r" (data)); + return data; } OS_INLINE -UInt -OSReadSwapInt( - volatile void * base, - UInt offset +uint64_t +_OSSwapInt64( + uint64_t data ) { - UInt result; - - result = *(volatile UInt *)((UInt8 *)base + offset); - __asm__ volatile("bswap %0" - : "=r" (result) - : "0" (result)); - return result; + union { + uint64_t ull; + uint32_t ul[2]; + } u; + + /* This actually generates the best code */ + u.ul[0] = data >> 32; + u.ul[1] = data & 0xffffffff; + u.ul[0] = _OSSwapInt32(u.ul[0]); + u.ul[1] = _OSSwapInt32(u.ul[1]); + return u.ull; } -/* Functions for byte reversed stores. */ +/* Functions for byte reversed loads. */ OS_INLINE -void -OSWriteSwapInt16( - volatile void * base, - UInt offset, - UInt16 data +uint16_t +OSReadSwapInt16( + volatile void * base, + uintptr_t offset ) { - __asm__ volatile("xchgb %b0,%h0" - : "=q" (data) - : "0" (data)); - *(volatile UInt16 *)((UInt8 *)base + offset) = data; + uint16_t result; + + result = *(uint16_t *)((uintptr_t)base + offset); + return _OSSwapInt16(result); } OS_INLINE -void -OSWriteSwapInt32( - volatile void * base, - UInt offset, - UInt32 data +uint32_t +OSReadSwapInt32( + volatile void * base, + uintptr_t offset ) { - __asm__ volatile("bswap %0" - : "=r" (data) - : "0" (data)); - *(volatile UInt32 *)((UInt8 *)base + offset) = data; + uint32_t result; + + result = *(uint32_t *)((uintptr_t)base + offset); + return _OSSwapInt32(result); } OS_INLINE -void -OSWriteSwapInt64( - volatile void * base, - UInt offset, - UInt64 data +uint64_t +OSReadSwapInt64( + volatile void * base, + uintptr_t offset ) { - UInt64 * outp; + uint32_t * inp; union ullc { - UInt64 ull; - UInt ul[2]; - } *inp; - - outp = (UInt64 *)base; - inp = (union ullc *)&data; - OSWriteSwapInt32(outp, offset, inp->ul[1]); - OSWriteSwapInt32(outp, offset + 4, inp->ul[0]); -} + uint64_t ull; + uint32_t ul[2]; + } outv; -OS_INLINE -void -OSWriteSwapInt( - volatile void * base, - UInt offset, - UInt data -) -{ - __asm__ volatile("bswap %0" - : "=r" (data) - : "0" (data)); - *(volatile UInt *)((UInt8 *)base + offset) = data; + inp = (uint32_t *)((uintptr_t)base + offset); + outv.ul[0] = inp[1]; + outv.ul[1] = inp[0]; + outv.ul[0] = _OSSwapInt32(outv.ul[0]); + outv.ul[1] = _OSSwapInt32(outv.ul[1]); + return outv.ull; } -/* Generic byte swapping functions. */ - -OS_INLINE -UInt16 -OSSwapInt16( - UInt16 data -) -{ - UInt16 temp = data; - return OSReadSwapInt16(&temp, 0); -} +/* Functions for byte reversed stores. */ OS_INLINE -UInt32 -OSSwapInt32( - UInt32 data +void +OSWriteSwapInt16( + volatile void * base, + uintptr_t offset, + uint16_t data ) { - UInt32 temp = data; - return OSReadSwapInt32(&temp, 0); + *(uint16_t *)((uintptr_t)base + offset) = _OSSwapInt16(data); } OS_INLINE -UInt64 -OSSwapInt64( - UInt64 data +void +OSWriteSwapInt32( + volatile void * base, + uintptr_t offset, + uint32_t data ) { - UInt64 temp = data; - return OSReadSwapInt64(&temp, 0); + *(uint32_t *)((uintptr_t)base + offset) = _OSSwapInt32(data); } OS_INLINE -UInt -OSSwapInt( - UInt data +void +OSWriteSwapInt64( + volatile void * base, + uintptr_t offset, + uint64_t data ) { - UInt temp = data; - return OSReadSwapInt(&temp, 0); + *(uint64_t *)((uintptr_t)base + offset) = _OSSwapInt64(data); } #endif /* ! _OS_OSBYTEORDERI386_H */ diff --git a/libkern/libkern/machine/OSByteOrder.h b/libkern/libkern/machine/OSByteOrder.h index b85d69b9a..fde044988 100644 --- a/libkern/libkern/machine/OSByteOrder.h +++ b/libkern/libkern/machine/OSByteOrder.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -33,39 +33,43 @@ #ifndef _OS_OSBYTEORDERMACHINE_H #define _OS_OSBYTEORDERMACHINE_H -#include +#include + +#if !defined(OS_INLINE) +# define OS_INLINE static inline +#endif /* Functions for byte reversed loads. */ OS_INLINE -UInt16 +uint16_t OSReadSwapInt16( volatile void * base, - UInt offset + uintptr_t offset ) { union sconv { - UInt16 us; - UInt8 uc[2]; + uint16_t us; + uint8_t uc[2]; } *inp, outv; - inp = (union sconv *)((UInt8 *)base + offset); + inp = (union sconv *)((uint8_t *)base + offset); outv.uc[0] = inp->uc[1]; outv.uc[1] = inp->uc[0]; return (outv.us); } OS_INLINE -UInt32 +uint32_t OSReadSwapInt32( volatile void * base, - UInt offset + uintptr_t offset ) { union lconv { - UInt32 ul; - UInt8 uc[4]; + uint32_t ul; + uint8_t uc[4]; } *inp, outv; - inp = (union lconv *)((UInt8 *)base + offset); + inp = (union lconv *)((uint8_t *)base + offset); outv.uc[0] = inp->uc[3]; outv.uc[1] = inp->uc[2]; outv.uc[2] = inp->uc[1]; @@ -74,17 +78,17 @@ OSReadSwapInt32( } OS_INLINE -UInt64 +uint64_t OSReadSwapInt64( volatile void * base, - UInt offset + uintptr_t offset ) { union llconv { - UInt64 ull; - UInt8 uc[8]; + uint64_t ull; + uint8_t uc[8]; } *inp, outv; - inp = (union llconv *)((UInt8 *)base + offset); + inp = (union llconv *)((uint8_t *)base + offset); outv.uc[0] = inp->uc[7]; outv.uc[1] = inp->uc[6]; outv.uc[2] = inp->uc[5]; @@ -96,31 +100,21 @@ OSReadSwapInt64( return (outv.ull); } -OS_INLINE -UInt -OSReadSwapInt( - volatile void * base, - UInt offset -) -{ - return (UInt)OSReadSwapInt32(base, offset); -} - /* Functions for byte reversed stores. */ OS_INLINE void OSWriteSwapInt16( volatile void * base, - UInt offset, - UInt16 data + uintptr_t offset, + uint16_t data ) { union sconv { - UInt16 us; - UInt8 uc[2]; + uint16_t us; + uint8_t uc[2]; } *inp, *outp; - inp = (union sconv *)((UInt8 *)base + offset); + inp = (union sconv *)((uint8_t *)base + offset); outp = (union sconv *)&data; outp->uc[0] = inp->uc[1]; outp->uc[1] = inp->uc[0]; @@ -130,15 +124,15 @@ OS_INLINE void OSWriteSwapInt32( volatile void * base, - UInt offset, - UInt32 data + uintptr_t offset, + uint32_t data ) { union lconv { - UInt32 ul; - UInt8 uc[4]; + uint32_t ul; + uint8_t uc[4]; } *inp, *outp; - inp = (union lconv *)((UInt8 *)base + offset); + inp = (union lconv *)((uint8_t *)base + offset); outp = (union lconv *)&data; outp->uc[0] = inp->uc[3]; outp->uc[1] = inp->uc[2]; @@ -150,15 +144,15 @@ OS_INLINE void OSWriteSwapInt64( volatile void * base, - UInt offset, - UInt64 data + uintptr_t offset, + uint64_t data ) { union llconv { - UInt64 ull; - UInt8 uc[8]; + uint64_t ull; + uint8_t uc[8]; } *inp, *outp; - inp = (union llconv *)((UInt8 *)base + offset); + inp = (union llconv *)((uint8_t *)base + offset); outp = (union llconv *)&data; outp->uc[0] = inp->uc[7]; outp->uc[1] = inp->uc[6]; @@ -170,57 +164,36 @@ OSWriteSwapInt64( outp->uc[7] = inp->uc[0]; } -OS_INLINE -void -OSWriteSwapInt( - volatile void * base, - UInt offset, - UInt data -) -{ - OSWriteSwapInt32(base, offset, (UInt32)data); -} - /* Generic byte swapping functions. */ OS_INLINE -UInt16 -OSSwapInt16( - UInt16 data +uint16_t +_OSSwapInt16( + uint16_t data ) { - UInt16 temp = data; + uint16_t temp = data; return OSReadSwapInt16(&temp, 0); } OS_INLINE -UInt32 -OSSwapInt32( - UInt32 data +uint32_t +_OSSwapInt32( + uint32_t data ) { - UInt32 temp = data; + uint32_t temp = data; return OSReadSwapInt32(&temp, 0); } OS_INLINE -UInt64 -OSSwapInt64( - UInt64 data +uint64_t +_OSSwapInt64( + uint64_t data ) { - UInt64 temp = data; + uint64_t temp = data; return OSReadSwapInt64(&temp, 0); } -OS_INLINE -UInt -OSSwapInt( - UInt data -) -{ - UInt temp = data; - return OSReadSwapInt(&temp, 0); -} - #endif /* ! _OS_OSBYTEORDERMACHINE_H */ diff --git a/libkern/libkern/ppc/OSByteOrder.h b/libkern/libkern/ppc/OSByteOrder.h index 28ff91dc2..e3c509a17 100644 --- a/libkern/libkern/ppc/OSByteOrder.h +++ b/libkern/libkern/ppc/OSByteOrder.h @@ -33,87 +33,76 @@ #ifndef _OS_OSBYTEORDERPPC_H #define _OS_OSBYTEORDERPPC_H -#include +#include + +#if !defined(OS_INLINE) +# define OS_INLINE static inline +#endif /* Functions for byte reversed loads. */ OS_INLINE -UInt16 +uint16_t OSReadSwapInt16( volatile void * base, - UInt offset + uintptr_t offset ) { - UInt16 result; + uint16_t result; __asm__ volatile("lhbrx %0, %1, %2" : "=r" (result) - : "b" (base), "r" (offset) + : "b%" (base), "r" (offset) : "memory"); return result; } OS_INLINE -UInt32 +uint32_t OSReadSwapInt32( volatile void * base, - UInt offset + uintptr_t offset ) { - UInt32 result; + uint32_t result; __asm__ volatile("lwbrx %0, %1, %2" : "=r" (result) - : "b" (base), "r" (offset) + : "b%" (base), "r" (offset) : "memory"); return result; } OS_INLINE -UInt64 +uint64_t OSReadSwapInt64( volatile void * base, - UInt offset + uintptr_t offset ) { - UInt64 * inp; + uint64_t * inp; union ullc { - UInt64 ull; - UInt ul[2]; + uint64_t ull; + uint32_t ul[2]; } outv; - inp = (UInt64 *)base; + inp = (uint64_t *)base; outv.ul[0] = OSReadSwapInt32(inp, offset + 4); outv.ul[1] = OSReadSwapInt32(inp, offset); return outv.ull; } -OS_INLINE -UInt -OSReadSwapInt( - volatile void * base, - UInt offset -) -{ - UInt result; - __asm__ volatile("lwbrx %0, %1, %2" - : "=r" (result) - : "b" (base), "r" (offset) - : "memory"); - return result; -} - /* Functions for byte reversed stores. */ OS_INLINE void OSWriteSwapInt16( volatile void * base, - UInt offset, - UInt16 data + uintptr_t offset, + uint16_t data ) { __asm__ volatile("sthbrx %0, %1, %2" : - : "r" (data), "b" (base), "r" (offset) + : "r" (data), "b%" (base), "r" (offset) : "memory"); } @@ -121,13 +110,13 @@ OS_INLINE void OSWriteSwapInt32( volatile void * base, - UInt offset, - UInt32 data + uintptr_t offset, + uint32_t data ) { __asm__ volatile("stwbrx %0, %1, %2" : - : "r" (data), "b" (base), "r" (offset) + : "r" (data), "b%" (base), "r" (offset) : "memory" ); } @@ -135,76 +124,52 @@ OS_INLINE void OSWriteSwapInt64( volatile void * base, - UInt offset, - UInt64 data + uintptr_t offset, + uint64_t data ) { - UInt64 * outp; + uint64_t * outp; union ullc { - UInt64 ull; - UInt ul[2]; + uint64_t ull; + uint32_t ul[2]; } *inp; - outp = (UInt64 *)base; + outp = (uint64_t *)base; inp = (union ullc *)&data; OSWriteSwapInt32(outp, offset, inp->ul[1]); OSWriteSwapInt32(outp, offset + 4, inp->ul[0]); } -OS_INLINE -void -OSWriteSwapInt( - volatile void * base, - UInt offset, - UInt data -) -{ - __asm__ volatile("stwbrx %0, %1, %2" - : - : "r" (data), "b" (base), "r" (offset) - : "memory" ); -} - /* Generic byte swapping functions. */ OS_INLINE -UInt16 -OSSwapInt16( - UInt16 data +uint16_t +_OSSwapInt16( + uint16_t data ) { - UInt16 temp = data; + uint16_t temp = data; return OSReadSwapInt16(&temp, 0); } OS_INLINE -UInt32 -OSSwapInt32( - UInt32 data +uint32_t +_OSSwapInt32( + uint32_t data ) { - UInt32 temp = data; + uint32_t temp = data; return OSReadSwapInt32(&temp, 0); } OS_INLINE -UInt64 -OSSwapInt64( - UInt64 data +uint64_t +_OSSwapInt64( + uint64_t data ) { - UInt64 temp = data; + uint64_t temp = data; return OSReadSwapInt64(&temp, 0); } -OS_INLINE -UInt -OSSwapInt( - UInt data -) -{ - UInt temp = data; - return OSReadSwapInt(&temp, 0); -} - #endif /* ! _OS_OSBYTEORDERPPC_H */ diff --git a/libkern/ppc/OSAtomic.s b/libkern/ppc/OSAtomic.s index cc282177e..eaaab74ba 100644 --- a/libkern/ppc/OSAtomic.s +++ b/libkern/ppc/OSAtomic.s @@ -44,9 +44,9 @@ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; .macro ENTRY - .text - .align 2 - .globl $0 + .text + .align 2 + .globl $0 $0: .endmacro @@ -54,50 +54,41 @@ $0: /* int OSCompareAndSwap( UInt32 oldVal, UInt32 newVal, UInt32 * addr ) +This is now an alias to hw_compare_and_store, see xnu/libkern/Makefile */ - - ENTRY _OSCompareAndSwap -.L_CASretry: - lwarx r6, 0,r5 - cmpw r6, r3 - bne- .L_CASfail - stwcx. r4, 0,r5 - bne- .L_CASretry - isync - li r3, 1 - blr -.L_CASfail: - li r3, 0 - blr - - +/* +Note: We can not use the hw_atomic routines provided by osfmk/ppc as +the return the result of the addition not the original value. +*/ /* SInt32 OSDecrementAtomic(SInt32 * value) */ - ENTRY _OSDecrementAtomic - mr r4, r3 - li r3, -1 - b _OSAddAtomic + ENTRY _OSDecrementAtomic + mr r4, r3 + li r3, -1 + b _OSAddAtomic /* SInt32 OSIncrementAtomic(SInt32 * value) */ - ENTRY _OSIncrementAtomic - mr r4, r3 - li r3, 1 + .align 5 + + ENTRY _OSIncrementAtomic + mr r4, r3 + li r3, 1 /* SInt32 OSAddAtomic(SInt32 amount, SInt32 * value) */ - ENTRY _OSAddAtomic + ENTRY _OSAddAtomic - mr r5,r3 /* Save the increment */ + mr r5,r3 /* Save the increment */ .L_AAretry: - lwarx r3, 0, r4 /* Grab the area value */ - add r6, r3, r5 /* Add the value */ - stwcx. r6, 0, r4 /* Try to save the new value */ - bne- .L_AAretry /* Didn't get it, try again... */ - blr /* Return the original value */ + lwarx r3, 0, r4 /* Grab the area value */ + add r6, r3, r5 /* Add the value */ + stwcx. r6, 0, r4 /* Try to save the new value */ + bne- .L_AAretry /* Didn't get it, try again... */ + blr /* Return the original value */ diff --git a/libsa/bootstrap.cpp b/libsa/bootstrap.cpp index 12936fdcc..2cf8df3be 100644 --- a/libsa/bootstrap.cpp +++ b/libsa/bootstrap.cpp @@ -26,15 +26,12 @@ #include #include -#include +#include #include -extern "C" { #include -}; #include "kld_patch.h" -extern "C" { /***** * This function is used by IOCatalogue to load a kernel * extension. libsa initially sets it to be a function @@ -54,7 +51,6 @@ extern void (*remove_startup_extension_function)(const char * name); * about loading and matching drivers. */ extern int kernelLinkerPresent; -}; class KLDBootstrap { @@ -72,6 +68,8 @@ static KLDBootstrap bootstrap_obj; */ KLDBootstrap::KLDBootstrap() { + malloc_init(); + kmod_load_function = &load_kernel_extension; record_startup_extensions_function = &recordStartupExtensions; diff --git a/libsa/catalogue.cpp b/libsa/catalogue.cpp index 35c88255f..82ae948b0 100644 --- a/libsa/catalogue.cpp +++ b/libsa/catalogue.cpp @@ -36,6 +36,7 @@ extern "C" { #include #include #include +#include }; #include @@ -49,30 +50,37 @@ extern kern_return_t host_info(host_t host, host_info_t info, mach_msg_type_number_t *count); extern int check_cpu_subtype(cpu_subtype_t cpu_subtype); +extern struct section * +getsectbyname( + char *segname, + char *sectname); +extern struct segment_command * +getsegbyname(char *seg_name); }; - #define LOG_DELAY() +#if 0 #define VTYELLOW "\033[33m" #define VTRESET "\033[0m" - +#else +#define VTYELLOW "" +#define VTRESET "" +#endif /********************************************************************* *********************************************************************/ static OSDictionary * gStartupExtensions = 0; static OSArray * gBootLoaderObjects = 0; +extern OSArray * gIOPrelinkedModules; OSDictionary * getStartupExtensions(void) { if (gStartupExtensions) { return gStartupExtensions; } gStartupExtensions = OSDictionary::withCapacity(1); - if (!gStartupExtensions) { - IOLog("Error: Couldn't allocate " - "startup extensions dictionary.\n"); - LOG_DELAY(); - } + assert (gStartupExtensions); + return gStartupExtensions; } @@ -87,60 +95,307 @@ OSArray * getBootLoaderObjects(void) { return gBootLoaderObjects; } gBootLoaderObjects = OSArray::withCapacity(1); - if (! gBootLoaderObjects) { - IOLog("Error: Couldn't allocate " - "bootstrap objects array.\n"); - LOG_DELAY(); - } + assert (gBootLoaderObjects); + return gBootLoaderObjects; } - /********************************************************************* * This function checks that a driver dict has all the required * entries and does a little bit of value checking too. +* +* index is nonnegative if the index of an entry from an mkext +* archive. *********************************************************************/ -bool validateExtensionDict(OSDictionary * extension) { +bool validateExtensionDict(OSDictionary * extension, int index) { bool result = true; - OSString * name; // do not release - OSString * stringValue; // do not release - UInt32 vers; + bool not_a_dict = false; + bool id_missing = false; + bool is_kernel_resource = false; + bool has_executable = false; + OSString * bundleIdentifier = NULL; // do not release + OSObject * rawValue = NULL; // do not release + OSString * stringValue = NULL; // do not release + OSBoolean * booleanValue = NULL; // do not release + OSDictionary * personalities = NULL; // do not release + OSDictionary * libraries = NULL; // do not release + OSCollectionIterator * keyIterator = NULL; // must release + OSString * key = NULL; // do not release + VERS_version vers; + VERS_version compatible_vers; - name = OSDynamicCast(OSString, + // Info dict is a dictionary + if (!OSDynamicCast(OSDictionary, extension)) { + not_a_dict = true; + result = false; + goto finish; + } + + // CFBundleIdentifier is a string - REQUIRED + bundleIdentifier = OSDynamicCast(OSString, extension->getObject("CFBundleIdentifier")); - if (!name) { - IOLog(VTYELLOW "Extension has no \"CFBundleIdentifier\" property.\n" - VTRESET); - LOG_DELAY(); + if (!bundleIdentifier) { + id_missing = true; + result = false; + goto finish; + } + + // Length of CFBundleIdentifier is not >= KMOD_MAX_NAME + if (bundleIdentifier->getLength() >= KMOD_MAX_NAME) { + result = false; + goto finish; + } + + // CFBundlePackageType is "KEXT" - REQUIRED + stringValue = OSDynamicCast(OSString, + extension->getObject("CFBundlePackageType")); + if (!stringValue) { + result = false; + goto finish; + } + if (!stringValue->isEqualTo("KEXT")) { result = false; goto finish; } + // CFBundleVersion is a string - REQUIRED stringValue = OSDynamicCast(OSString, extension->getObject("CFBundleVersion")); if (!stringValue) { - IOLog(VTYELLOW "Extension \"%s\" has no \"CFBundleVersion\" " - "property.\n" VTRESET, - name->getCStringNoCopy()); - LOG_DELAY(); result = false; goto finish; } - if (!VERS_parse_string(stringValue->getCStringNoCopy(), - &vers)) { - IOLog(VTYELLOW "Extension \"%s\" has an invalid " - "\"CFBundleVersion\" property.\n" VTRESET, - name->getCStringNoCopy()); - LOG_DELAY(); + // CFBundleVersion is of valid form + vers = VERS_parse_string(stringValue->getCStringNoCopy()); + if (vers < 0) { + result = false; + goto finish; + } + + // OSBundleCompatibleVersion is a string - OPTIONAL + rawValue = extension->getObject("OSBundleCompatibleVersion"); + if (rawValue) { + stringValue = OSDynamicCast(OSString, rawValue); + if (!stringValue) { + result = false; + goto finish; + } + + // OSBundleCompatibleVersion is of valid form + compatible_vers = VERS_parse_string(stringValue->getCStringNoCopy()); + if (compatible_vers < 0) { + result = false; + goto finish; + } + + // OSBundleCompatibleVersion <= CFBundleVersion + if (compatible_vers > vers) { + result = false; + goto finish; + } + } + + // CFBundleExecutable is a string - OPTIONAL + rawValue = extension->getObject("CFBundleExecutable"); + if (rawValue) { + stringValue = OSDynamicCast(OSString, rawValue); + if (!stringValue || stringValue->getLength() == 0) { + result = false; + goto finish; + } + has_executable = true; + } + + // OSKernelResource is a boolean value - OPTIONAL + rawValue = extension->getObject("OSKernelResource"); + if (rawValue) { + booleanValue = OSDynamicCast(OSBoolean, rawValue); + if (!booleanValue) { + result = false; + goto finish; + } + is_kernel_resource = booleanValue->isTrue(); + } + + // IOKitPersonalities is a dictionary - OPTIONAL + rawValue = extension->getObject("IOKitPersonalities"); + if (rawValue) { + personalities = OSDynamicCast(OSDictionary, rawValue); + if (!personalities) { + result = false; + goto finish; + } + + keyIterator = OSCollectionIterator::withCollection(personalities); + if (!keyIterator) { + IOLog("Error: Failed to allocate iterator for personalities.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + while ((key = OSDynamicCast(OSString, keyIterator->getNextObject()))) { + OSDictionary * personality = NULL; // do not release + + // Each personality is a dictionary + personality = OSDynamicCast(OSDictionary, + personalities->getObject(key)); + if (!personality) { + result = false; + goto finish; + } + + // IOClass exists as a string - REQUIRED + if (!OSDynamicCast(OSString, personality->getObject("IOClass"))) { + result = false; + goto finish; + } + + // IOProviderClass exists as a string - REQUIRED + if (!OSDynamicCast(OSString, + personality->getObject("IOProviderClass"))) { + + result = false; + goto finish; + } + + // CFBundleIdentifier is a string - OPTIONAL - INSERT IF ABSENT! + rawValue = personality->getObject("CFBundleIdentifier"); + if (!rawValue) { + personality->setObject("CFBundleIdentifier", bundleIdentifier); + } else { + OSString * personalityID = NULL; // do not release + personalityID = OSDynamicCast(OSString, rawValue); + if (!personalityID) { + result = false; + goto finish; + } else { + // Length of CFBundleIdentifier is not >= KMOD_MAX_NAME + if (personalityID->getLength() >= KMOD_MAX_NAME) { + result = false; + goto finish; + } + } + } + + // IOKitDebug is a number - OPTIONAL + rawValue = personality->getObject("IOKitDebug"); + if (rawValue && !OSDynamicCast(OSNumber, rawValue)) { + result = false; + goto finish; + } + } + + keyIterator->release(); + keyIterator = NULL; + } + + + // OSBundleLibraries is a dictionary - REQUIRED if + // not kernel resource & has executable + // + rawValue = extension->getObject("OSBundleLibraries"); + if (!rawValue && !is_kernel_resource && has_executable) { result = false; goto finish; } + if (rawValue) { + libraries = OSDynamicCast(OSDictionary, rawValue); + if (!libraries) { + result = false; + goto finish; + } + + keyIterator = OSCollectionIterator::withCollection(libraries); + if (!keyIterator) { + IOLog("Error: Failed to allocate iterator for libraries.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + while ((key = OSDynamicCast(OSString, + keyIterator->getNextObject()))) { + + OSString * libraryVersion = NULL; // do not release + + // Each key's length is not >= KMOD_MAX_NAME + if (key->getLength() >= KMOD_MAX_NAME) { + result = false; + goto finish; + } + + libraryVersion = OSDynamicCast(OSString, + libraries->getObject(key)); + if (!libraryVersion) { + result = false; + goto finish; + } + + // Each value is a valid version string + vers = VERS_parse_string(libraryVersion->getCStringNoCopy()); + if (vers < 0) { + result = false; + goto finish; + } + } + + keyIterator->release(); + keyIterator = NULL; + } + + // OSBundleRequired is a legal value - *not* required at boot time + // so we can do install CDs and the like with mkext files containing + // all normally-used drivers. + rawValue = extension->getObject("OSBundleRequired"); + if (rawValue) { + stringValue = OSDynamicCast(OSString, rawValue); + if (!stringValue) { + result = false; + goto finish; + } + if (!stringValue->isEqualTo("Root") && + !stringValue->isEqualTo("Local-Root") && + !stringValue->isEqualTo("Network-Root") && + !stringValue->isEqualTo("Safe Boot") && + !stringValue->isEqualTo("Console")) { + + result = false; + goto finish; + } + + } + finish: - // FIXME: Make return real result after kext conversion - return true; + if (keyIterator) keyIterator->release(); + + if (!result) { + if (not_a_dict) { + if (index > -1) { + IOLog(VTYELLOW "mkext entry %d:." VTRESET, index); + } else { + IOLog(VTYELLOW "kernel extension" VTRESET); + } + IOLog(VTYELLOW "info dictionary isn't a dictionary\n" + VTRESET); + } else if (id_missing) { + if (index > -1) { + IOLog(VTYELLOW "mkext entry %d:." VTRESET, index); + } else { + IOLog(VTYELLOW "kernel extension" VTRESET); + } + IOLog(VTYELLOW "\"CFBundleIdentifier\" property is " + "missing or not a string\n" + VTRESET); + } else { + IOLog(VTYELLOW "kernel extension \"%s\": info dictionary is invalid\n" + VTRESET, bundleIdentifier->getCStringNoCopy()); + } + LOG_DELAY(); + } return result; } @@ -160,8 +415,8 @@ OSDictionary * compareExtensionVersions( OSString * candidateName = NULL; OSString * incumbentVersionString = NULL; OSString * candidateVersionString = NULL; - UInt32 incumbent_vers = 0; - UInt32 candidate_vers = 0; + VERS_version incumbent_vers = 0; + VERS_version candidate_vers = 0; incumbentPlist = OSDynamicCast(OSDictionary, incumbent->getObject("plist")); @@ -207,8 +462,8 @@ OSDictionary * compareExtensionVersions( goto finish; } - if (!VERS_parse_string(incumbentVersionString->getCStringNoCopy(), - &incumbent_vers)) { + incumbent_vers = VERS_parse_string(incumbentVersionString->getCStringNoCopy()); + if (incumbent_vers < 0) { IOLog(VTYELLOW "Error parsing version string for extension %s (%s)\n" VTRESET, @@ -219,8 +474,8 @@ OSDictionary * compareExtensionVersions( goto finish; } - if (!VERS_parse_string(candidateVersionString->getCStringNoCopy(), - &candidate_vers)) { + candidate_vers = VERS_parse_string(candidateVersionString->getCStringNoCopy()); + if (candidate_vers < 0) { IOLog(VTYELLOW "Error parsing version string for extension %s (%s)\n" VTRESET, @@ -503,10 +758,8 @@ OSDictionary * readExtension(OSDictionary * propertyDict, goto finish; } - if (!validateExtensionDict(driverPlist)) { - IOLog("Error: Failed to validate property list " - "for device tree entry \"%s\".\n", memory_map_name); - LOG_DELAY(); + if (!validateExtensionDict(driverPlist, -1)) { + // validateExtensionsDict() logs an error error = 1; goto finish; } @@ -542,7 +795,7 @@ OSDictionary * readExtension(OSDictionary * propertyDict, finish: if (loaded_kmod) { - kfree(loaded_kmod, sizeof(kmod_info_t)); + kfree((unsigned int)loaded_kmod, sizeof(kmod_info_t)); } // do not release bootxDriverDataObject @@ -758,11 +1011,15 @@ bool extractExtensionsFromArchive(MemoryMapFileInfo * mkext_file_info, i++) { if (loaded_kmod) { - kfree(loaded_kmod, sizeof(kmod_info_t)); + kfree((unsigned int)loaded_kmod, sizeof(kmod_info_t)); loaded_kmod = 0; } if (driverPlistDataObject) { + kmem_free(kernel_map, + (unsigned int)driverPlistDataObject->getBytesNoCopy(), + driverPlistDataObject->getLength()); + driverPlistDataObject->release(); driverPlistDataObject = NULL; } @@ -818,10 +1075,8 @@ bool extractExtensionsFromArchive(MemoryMapFileInfo * mkext_file_info, continue; } - if (!validateExtensionDict(driverPlist)) { - IOLog("Error: Failed to validate property list " - "for multikext archive entry %d.\n", i); - LOG_DELAY(); + if (!validateExtensionDict(driverPlist, i)) { + // validateExtensionsDict() logs an error continue; } @@ -868,10 +1123,10 @@ bool extractExtensionsFromArchive(MemoryMapFileInfo * mkext_file_info, * compressed binary module, if there is one. If all four fields * of the module entry are zero, there isn't one. */ - if (OSSwapBigToHostInt32(module_file->offset) || + if (!(loaded_kmod && loaded_kmod->address) && (OSSwapBigToHostInt32(module_file->offset) || OSSwapBigToHostInt32(module_file->compsize) || OSSwapBigToHostInt32(module_file->realsize) || - OSSwapBigToHostInt32(module_file->modifiedsecs)) { + OSSwapBigToHostInt32(module_file->modifiedsecs))) { moduleInfo = OSData::withCapacity(sizeof(MkextEntryInfo)); if (!moduleInfo) { @@ -934,8 +1189,13 @@ bool extractExtensionsFromArchive(MemoryMapFileInfo * mkext_file_info, finish: - if (loaded_kmod) kfree(loaded_kmod, sizeof(kmod_info_t)); - if (driverPlistDataObject) driverPlistDataObject->release(); + if (loaded_kmod) kfree((unsigned int)loaded_kmod, sizeof(kmod_info_t)); + if (driverPlistDataObject) { + kmem_free(kernel_map, + (unsigned int)driverPlistDataObject->getBytesNoCopy(), + driverPlistDataObject->getLength()); + driverPlistDataObject->release(); + } if (driverPlist) driverPlist->release(); if (driverCode) driverCode->release(); if (moduleInfo) moduleInfo->release(); @@ -1082,7 +1342,6 @@ bool addExtensionsFromArchive(OSData * mkextDataObject) { OSDictionary * startupExtensions = NULL; // don't release OSArray * bootLoaderObjects = NULL; // don't release - OSData * localMkextDataObject = NULL; // don't release OSDictionary * extensions = NULL; // must release MemoryMapFileInfo mkext_file_info; OSCollectionIterator * keyIterator = NULL; // must release @@ -1114,25 +1373,12 @@ bool addExtensionsFromArchive(OSData * mkextDataObject) { goto finish; } - /* The mkext we've been handed (or the data it references) can go away, - * so we need to make a local copy to keep around as long as it might - * be needed. - */ - localMkextDataObject = OSData::withData(mkextDataObject); - if (!localMkextDataObject) { - IOLog("Error: Couldn't copy extension archive.\n"); - LOG_DELAY(); - result = false; - goto finish; - } - - mkext_file_info.paddr = (UInt32)localMkextDataObject->getBytesNoCopy(); - mkext_file_info.length = localMkextDataObject->getLength(); + mkext_file_info.paddr = (UInt32)mkextDataObject->getBytesNoCopy(); + mkext_file_info.length = mkextDataObject->getLength(); /* Save the local mkext data object so that we can deallocate it later. */ - bootLoaderObjects->setObject(localMkextDataObject); - localMkextDataObject->release(); + bootLoaderObjects->setObject(mkextDataObject); result = extractExtensionsFromArchive(&mkext_file_info, extensions); if (!result) { @@ -1201,6 +1447,7 @@ finish: * a single extension is not considered fatal, and this function * will simply skip the problematic extension to try the next one. *********************************************************************/ + bool recordStartupExtensions(void) { bool result = true; OSDictionary * startupExtensions = NULL; // must release @@ -1214,8 +1461,9 @@ bool recordStartupExtensions(void) { OSDictionary * newDriverDict = NULL; // must release OSDictionary * driverPlist = NULL; // don't release - IOLog("Recording startup extensions.\n"); - LOG_DELAY(); + struct section * infosect; + struct section * symsect; + unsigned int prelinkedCount = 0; existingExtensions = getStartupExtensions(); if (!existingExtensions) { @@ -1234,6 +1482,92 @@ bool recordStartupExtensions(void) { goto finish; } + // -- + // add any prelinked modules as startup extensions + + infosect = getsectbyname("__PRELINK", "__info"); + symsect = getsectbyname("__PRELINK", "__symtab"); + if (infosect && infosect->addr && infosect->size + && symsect && symsect->addr && symsect->size) do + { + gIOPrelinkedModules = OSDynamicCast(OSArray, + OSUnserializeXML((const char *) infosect->addr, NULL)); + + if (!gIOPrelinkedModules) + break; + for( unsigned int idx = 0; + (propertyDict = OSDynamicCast(OSDictionary, gIOPrelinkedModules->getObject(idx))); + idx++) + { + enum { kPrelinkReservedCount = 4 }; + + /* Get the extension's module name. This is used to record + * the extension. Do *not* release the moduleName. + */ + OSString * moduleName = OSDynamicCast(OSString, + propertyDict->getObject("CFBundleIdentifier")); + if (!moduleName) { + IOLog("Error: Prelinked module entry has " + "no \"CFBundleIdentifier\" property.\n"); + LOG_DELAY(); + continue; + } + + /* Add the kext, & its plist. + */ + newDriverDict = OSDictionary::withCapacity(4); + assert(newDriverDict); + newDriverDict->setObject("plist", propertyDict); + startupExtensions->setObject(moduleName, newDriverDict); + newDriverDict->release(); + + /* Add the code if present. + */ + OSData * data = OSDynamicCast(OSData, propertyDict->getObject("OSBundlePrelink")); + if (data) { + if (data->getLength() < (kPrelinkReservedCount * sizeof(UInt32))) { + IOLog("Error: Prelinked module entry has " + "invalid \"OSBundlePrelink\" property.\n"); + LOG_DELAY(); + continue; + } + UInt32 * prelink; + prelink = (UInt32 *) data->getBytesNoCopy(); + kmod_info_t * kmod_info = (kmod_info_t *) OSReadBigInt32(prelink, 0); + // end of "file" is end of symbol sect + data = OSData::withBytesNoCopy((void *) kmod_info->address, + symsect->addr + symsect->size - kmod_info->address); + newDriverDict->setObject("code", data); + data->release(); + prelinkedCount++; + continue; + } + /* Add the symbols if present. + */ + OSNumber * num = OSDynamicCast(OSNumber, propertyDict->getObject("OSBundlePrelinkSymbols")); + if (num) { + UInt32 offset = num->unsigned32BitValue(); + data = OSData::withBytesNoCopy((void *) (symsect->addr + offset), symsect->size - offset); + newDriverDict->setObject("code", data); + data->release(); + prelinkedCount++; + continue; + } + } + if (gIOPrelinkedModules) + IOLog("%d prelinked modules\n", prelinkedCount); + + // free __info + vm_offset_t + virt = ml_static_ptovirt(infosect->addr); + if( virt) { + ml_static_mfree(virt, infosect->size); + } + newDriverDict = NULL; + } + while (false); + // -- + bootxMemoryMap = IORegistryEntry::fromPath( "/chosen/memory-map", // path @@ -1438,7 +1772,7 @@ finish: keyIterator->release(); keyIterator = 0; } -#endif DEBUG +#endif /* DEBUG */ } if (newDriverDict) newDriverDict->release(); diff --git a/libsa/conf/MASTER b/libsa/conf/MASTER index 62e57f7ec..09d80d909 100644 --- a/libsa/conf/MASTER +++ b/libsa/conf/MASTER @@ -52,4 +52,4 @@ ident LIBSA options KDEBUG # kernel tracing # - +options GPROF # kernel profiling # diff --git a/libsa/conf/MASTER.i386 b/libsa/conf/MASTER.i386 index 83a06f878..b61a6e538 100644 --- a/libsa/conf/MASTER.i386 +++ b/libsa/conf/MASTER.i386 @@ -1,7 +1,7 @@ ###################################################################### # # RELEASE = [intel mach libkerncpp] -# PROFILE = [intel mach libkerncpp profile] +# PROFILE = [RELEASE profile] # DEBUG = [intel mach libkerncpp debug] # ###################################################################### diff --git a/libsa/conf/MASTER.ppc b/libsa/conf/MASTER.ppc index a2764000e..5c6b53d20 100644 --- a/libsa/conf/MASTER.ppc +++ b/libsa/conf/MASTER.ppc @@ -5,7 +5,7 @@ # -------- ---- -------- --------------- # # RELEASE = [ppc mach libkerncpp] -# PROFILE = [ppc mach libkerncpp profile] +# PROFILE = [RELEASE profile] # DEBUG = [ppc mach libkerncpp debug] # RELEASE_TRACE = [ RELEASE kdebug ] # DEBUG_TRACE = [ DEBUG kdebug ] diff --git a/libsa/conf/Makefile b/libsa/conf/Makefile index ffeab35e7..fc29125a6 100644 --- a/libsa/conf/Makefile +++ b/libsa/conf/Makefile @@ -14,47 +14,48 @@ COMP_SUBDIRS = INST_SUBDIRS = -ifndef LIBKERN_KERNEL_CONFIG -export LIBKERN_KERNEL_CONFIG = $(KERNEL_CONFIG) +ifndef LIBSA_KERNEL_CONFIG +export LIBSA_KERNEL_CONFIG = $(KERNEL_CONFIG) endif -COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) +export COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: make build_setup -$(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile : $(SOURCE)/MASTER \ +$(COMPOBJROOT)/$(LIBSA_KERNEL_CONFIG)/Makefile : $(SOURCE)/MASTER \ $(SOURCE)/MASTER.$(ARCH_CONFIG_LC) \ $(SOURCE)/Makefile.template \ $(SOURCE)/Makefile.$(ARCH_CONFIG_LC) \ $(SOURCE)/files \ $(SOURCE)/files.$(ARCH_CONFIG_LC) \ $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf - @echo "Running doconf for $(LIBKERN_KERNEL_CONFIG)"; + @echo "Running doconf for $(LIBSA_KERNEL_CONFIG)"; (doconf_target=$(addsuffix /conf, $(TARGET)); \ echo $${doconf_target};\ $(MKDIR) $${doconf_target}; \ cd $${doconf_target}; \ rm -f $(notdir $?); \ cp $? $${doconf_target}; \ - $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf -c -cpu $(ARCH_CONFIG_LC) -d $(TARGET)/$(LIBKERN_KERNEL_CONFIG) $(LIBKERN_KERNEL_CONFIG); \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf -c -cpu $(ARCH_CONFIG_LC) -d $(TARGET)/$(LIBSA_KERNEL_CONFIG) $(LIBSA_KERNEL_CONFIG); \ ); -.ORDER: $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile +.ORDER: $(COMPOBJROOT)/$(LIBSA_KERNEL_CONFIG)/Makefile do_setup_conf: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf \ - $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile + $(COMPOBJROOT)/$(LIBSA_KERNEL_CONFIG)/Makefile do_all: do_setup_conf - @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(LIBKERN_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(LIBSA_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ next_source=$(subst conf/,,$(SOURCE)); \ - ${MAKE} -C $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG) \ - MAKEFILES=$(TARGET)/$(LIBKERN_KERNEL_CONFIG)/Makefile \ + ${MAKE} -C $(COMPOBJROOT)/$(LIBSA_KERNEL_CONFIG) \ + MAKEFILES=$(TARGET)/$(LIBSA_KERNEL_CONFIG)/Makefile \ SOURCE=$${next_source} \ TARGET=$(TARGET) \ INCL_MAKEDEP=FALSE \ + KERNEL_CONFIG=$(LIBSA_KERNEL_CONFIG) \ build_all; \ - echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(LIBKERN_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(LIBSA_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; do_build_all: do_all diff --git a/libsa/conf/Makefile.template b/libsa/conf/Makefile.template index 816da3ef1..a8d3252d6 100644 --- a/libsa/conf/Makefile.template +++ b/libsa/conf/Makefile.template @@ -27,7 +27,7 @@ include $(MakeInc_def) # # XXX: CFLAGS # -CFLAGS+= -DKERNEL -DLIBKERN_KERNEL_PRIVATE \ +CFLAGS+= -DKERNEL -DLIBSA_KERNEL_PRIVATE \ -Wall -Wno-four-char-constants -fno-common SFLAGS+= -DKERNEL @@ -87,10 +87,10 @@ LDOBJS = $(OBJS) $(COMPONENT).o: $(LDOBJS) @echo "creating $(COMPONENT).o" $(RM) $(RMFLAGS) vers.c - $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + $(COMPOBJROOT)/newvers \ `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c - @echo [ updating $(COMPONENT).o ${LIBKERN_KERNEL_CONFIG} ] + @echo [ updating $(COMPONENT).o ${LIBSA_KERNEL_CONFIG} ] $(LD) $(LDFLAGS_COMPONENT) -o $(COMPONENT).o ${LDOBJS} vers.o /usr/local/lib/libkld.a $(SEG_HACK) __KLD $(COMPONENT).o -o $(COMPONENT)_kld.o mv $(COMPONENT)_kld.o $(COMPONENT).o diff --git a/libsa/conf/files b/libsa/conf/files index 5bb1c0bad..a6c36b9ce 100644 --- a/libsa/conf/files +++ b/libsa/conf/files @@ -2,14 +2,17 @@ # OPTIONS/libkerncpp optional libkerncpp # OPTIONS/kdebug optional kdebug +# OPTIONS/gprof optional gprof # libsa libsa/bootstrap.cpp standard libsa/catalogue.cpp standard libsa/c++rem3.c standard +libsa/dgraph.c standard libsa/kld_patch.c standard -libsa/kmod.cpp standard +libsa/kext.cpp standard +libsa/load.c standard libsa/mach.c standard libsa/misc.c standard libsa/mkext.c standard diff --git a/libsa/conf/version.major b/libsa/conf/version.major index 1e8b31496..7f8f011eb 100644 --- a/libsa/conf/version.major +++ b/libsa/conf/version.major @@ -1 +1 @@ -6 +7 diff --git a/libsa/conf/version.minor b/libsa/conf/version.minor index 45a4fb75d..573541ac9 100644 --- a/libsa/conf/version.minor +++ b/libsa/conf/version.minor @@ -1 +1 @@ -8 +0 diff --git a/libsa/conf/version.variant b/libsa/conf/version.variant index e69de29bb..573541ac9 100644 --- a/libsa/conf/version.variant +++ b/libsa/conf/version.variant @@ -0,0 +1 @@ +0 diff --git a/libsa/dgraph.c b/libsa/dgraph.c new file mode 100644 index 000000000..50abd38cb --- /dev/null +++ b/libsa/dgraph.c @@ -0,0 +1,747 @@ +#ifdef KERNEL +#include +#else +#include +#include +#include +#include +#include +#include + +#include "KXKext.h" +#include "vers_rsrc.h" +#endif /* KERNEL */ + +#include "dgraph.h" +#include "load.h" + + +static void __dgraph_entry_free(dgraph_entry_t * entry); + +#ifdef KERNEL +/******************************************************************************* +* +*******************************************************************************/ +char * strdup(const char * string) +{ + char * dup = 0; + unsigned int length; + + length = strlen(string); + dup = (char *)malloc((1+length) * sizeof(char)); + if (!dup) { + return NULL; + } + strcpy(dup, string); + return dup; +} + +#endif /* KERNEL */ + +/******************************************************************************* +* +*******************************************************************************/ +dgraph_error_t dgraph_init(dgraph_t * dgraph) +{ + bzero(dgraph, sizeof(dgraph_t)); + + dgraph->capacity = (5); // pulled from a hat + + /* Make sure list is big enough & graph has a good start size. + */ + dgraph->graph = (dgraph_entry_t **)malloc( + dgraph->capacity * sizeof(dgraph_entry_t *)); + + if (!dgraph->graph) { + return dgraph_error; + } + + return dgraph_valid; +} + +#ifndef KERNEL +/******************************************************************************* +* +*******************************************************************************/ +dgraph_error_t dgraph_init_with_arglist( + dgraph_t * dgraph, + int expect_addresses, + const char * dependency_delimiter, + const char * kernel_dependency_delimiter, + int argc, + char * argv[]) +{ + dgraph_error_t result = dgraph_valid; + unsigned int i; + int found_zero_load_address = 0; + int found_nonzero_load_address = 0; + dgraph_entry_t * current_dependent = NULL; + char kernel_dependencies = 0; + + result = dgraph_init(dgraph); + if (result != dgraph_valid) { + return result; + } + + for (i = 0; i < argc; i++) { + vm_address_t load_address = 0; + + if (0 == strcmp(argv[i], dependency_delimiter)) { + kernel_dependencies = 0; + current_dependent = NULL; + continue; + } else if (0 == strcmp(argv[i], kernel_dependency_delimiter)) { + kernel_dependencies = 1; + current_dependent = NULL; + continue; + } + + if (expect_addresses) { + char * address = rindex(argv[i], '@'); + if (address) { + *address++ = 0; // snip the address from the filename + load_address = strtoul(address, NULL, 0); + } + } + + if (!current_dependent) { + current_dependent = dgraph_add_dependent(dgraph, argv[i], + /* expected kmod name */ NULL, /* expected vers */ 0, + load_address, 0); + if (!current_dependent) { + return dgraph_error; + } + } else { + if (!dgraph_add_dependency(dgraph, current_dependent, argv[i], + /* expected kmod name */ NULL, /* expected vers */ 0, + load_address, kernel_dependencies)) { + + return dgraph_error; + } + } + } + + dgraph->root = dgraph_find_root(dgraph); + dgraph_establish_load_order(dgraph); + + if (!dgraph->root) { + kload_log_error("dependency graph has no root" KNL); + return dgraph_invalid; + } + + if (dgraph->root->is_kernel_component && !dgraph->root->is_symbol_set) { + kload_log_error("dependency graph root is a kernel component" KNL); + return dgraph_invalid; + } + + for (i = 0; i < dgraph->length; i++) { + if (dgraph->graph[i]->loaded_address == 0) { + found_zero_load_address = 1; + } else { + found_nonzero_load_address = 1; + } + if ( (i > 0) && + (found_zero_load_address && found_nonzero_load_address)) { + + kload_log_error( + "load addresses must be specified for all module files" KNL); + return dgraph_invalid; + } + } + + return dgraph_valid; +} +#endif /* not KERNEL */ + +/******************************************************************************* +* +*******************************************************************************/ +static void __dgraph_entry_free(dgraph_entry_t * entry) +{ + if (entry->name) { + free(entry->name); + entry->name = NULL; + } + if (entry->expected_kmod_name) { + free(entry->expected_kmod_name); + entry->expected_kmod_name = NULL; + } + if (entry->expected_kmod_vers) { + free(entry->expected_kmod_vers); + entry->expected_kmod_vers = NULL; + } + if (entry->dependencies) { + free(entry->dependencies); + entry->dependencies = NULL; + } + if (entry->symbols_malloc) { + free((void *) entry->symbols_malloc); + entry->symbols_malloc = NULL; + } + free(entry); + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +void dgraph_free( + dgraph_t * dgraph, + int free_graph) +{ + unsigned int entry_index; + + if (!dgraph) { + return; + } + + for (entry_index = 0; entry_index < dgraph->length; entry_index++) { + dgraph_entry_t * current = dgraph->graph[entry_index]; + __dgraph_entry_free(current); + } + + if (dgraph->graph) { + free(dgraph->graph); + dgraph->graph = NULL; + } + + if (dgraph->load_order) { + free(dgraph->load_order); + dgraph->load_order = NULL; + } + + if (free_graph && dgraph) { + free(dgraph); + } + + return; +} + + +/******************************************************************************* +* +*******************************************************************************/ +dgraph_entry_t * dgraph_find_root(dgraph_t * dgraph) { + dgraph_entry_t * root = NULL; + dgraph_entry_t * candidate = NULL; + unsigned int candidate_index; + unsigned int scan_index; + unsigned int dep_index; + + + /* Scan each entry in the graph for one that isn't in any other entry's + * dependencies. + */ + for (candidate_index = 0; candidate_index < dgraph->length; + candidate_index++) { + + candidate = dgraph->graph[candidate_index]; + + for (scan_index = 0; scan_index < dgraph->length; scan_index++) { + + dgraph_entry_t * scan_entry = dgraph->graph[scan_index]; + if (candidate == scan_entry) { + // don't check yourself + continue; + } + for (dep_index = 0; dep_index < scan_entry->num_dependencies; + dep_index++) { + + /* If the dependency being checked is the candidate, + * then the candidate can't be the root. + */ + dgraph_entry_t * check = scan_entry->dependencies[dep_index]; + + if (check == candidate) { + candidate = NULL; + break; + } + } + + /* If the candidate was rejected, then hop out of this loop. + */ + if (!candidate) { + break; + } + } + + /* If we got here, the candidate is a valid one. However, if we already + * found another, that means we have two possible roots (or more), which + * is NOT ALLOWED. + */ + if (candidate) { + if (root) { + kload_log_error("dependency graph has multiple roots " + "(%s and %s)" KNL, root->name, candidate->name); + return NULL; // two valid roots, illegal + } else { + root = candidate; + } + } + } + + if (!root) { + kload_log_error("dependency graph has no root node" KNL); + } + + return root; +} + +/******************************************************************************* +* +*******************************************************************************/ +dgraph_entry_t ** fill_backward_load_order( + dgraph_entry_t ** backward_load_order, + unsigned int * list_length, + dgraph_entry_t * first_entry, + unsigned int * last_index /* out param */) +{ + int i; + unsigned int scan_index = 0; + unsigned int add_index = 0; + dgraph_entry_t * scan_entry; + + if (*list_length == 0) { + if (backward_load_order) { + free(backward_load_order); + backward_load_order = NULL; + } + goto finish; + } + + backward_load_order[add_index++] = first_entry; + + while (scan_index < add_index) { + + if (add_index > 255) { + kload_log_error( + "dependency list for %s ridiculously long; probably a loop" KNL, + first_entry->name); + if (backward_load_order) { + free(backward_load_order); + backward_load_order = NULL; + } + goto finish; + } + + scan_entry = backward_load_order[scan_index++]; + + /* Increase the load order list if needed. + */ + if (add_index + scan_entry->num_dependencies > (*list_length)) { + (*list_length) *= 2; + backward_load_order = (dgraph_entry_t **)realloc( + backward_load_order, + (*list_length) * sizeof(dgraph_entry_t *)); + if (!backward_load_order) { + goto finish; + } + } + + /* Put the dependencies of the scanning entry into the list. + */ + for (i = 0; i < scan_entry->num_dependencies; i++) { + backward_load_order[add_index++] = + scan_entry->dependencies[i]; + } + } + +finish: + + if (last_index) { + *last_index = add_index; + } + return backward_load_order; +} + +/******************************************************************************* +* +*******************************************************************************/ +int dgraph_establish_load_order(dgraph_t * dgraph) { + unsigned int total_dependencies; + unsigned int entry_index; + unsigned int list_index; + unsigned int backward_index; + unsigned int forward_index; + size_t load_order_size; + size_t backward_load_order_size; + dgraph_entry_t ** backward_load_order; + + /* Lose the old load_order list. Size can change, so it's easier to just + * recreate from scratch. + */ + if (dgraph->load_order) { + free(dgraph->load_order); + dgraph->load_order = NULL; + } + + /* Figure how long the list needs to be to accommodate the max possible + * entries from the graph. Duplicates get weeded out, but the list + * initially has to accommodate them all. + */ + total_dependencies = dgraph->length; + + for (entry_index = 0; entry_index < dgraph->length; entry_index ++) { + dgraph_entry_t * curdep = dgraph->graph[entry_index]; + total_dependencies += curdep->num_dependencies; + } + + /* Hmm, nothing to do! + */ + if (!total_dependencies) { + return 1; + } + + backward_load_order_size = total_dependencies * sizeof(dgraph_entry_t *); + + backward_load_order = (dgraph_entry_t **)malloc(backward_load_order_size); + if (!backward_load_order) { + kload_log_error("malloc failure" KNL); + return 0; + } + bzero(backward_load_order, backward_load_order_size); + + backward_load_order = fill_backward_load_order(backward_load_order, + &total_dependencies, dgraph->root, &list_index); + if (!backward_load_order) { + kload_log_error("error establishing load order" KNL); + return 0; + } + + load_order_size = dgraph->length * sizeof(dgraph_entry_t *); + dgraph->load_order = (dgraph_entry_t **)malloc(load_order_size); + if (!dgraph->load_order) { + kload_log_error("malloc failure" KNL); + return 0; + } + bzero(dgraph->load_order, load_order_size); + + + /* Reverse the list into the dgraph's load_order list, + * removing any duplicates. + */ + backward_index = list_index; + // + // the required 1 is taken off in loop below! + + forward_index = 0; + do { + dgraph_entry_t * current_entry; + unsigned int already_got_it = 0; + + backward_index--; + + /* Get the entry to check. + */ + current_entry = backward_load_order[backward_index]; + + /* Did we already get it? + */ + for (list_index = 0; list_index < forward_index; list_index++) { + if (current_entry == dgraph->load_order[list_index]) { + already_got_it = 1; + break; + } + } + + if (already_got_it) { + continue; + } + + /* Haven't seen it before; tack it onto the load-order list. + */ + dgraph->load_order[forward_index++] = current_entry; + + } while (backward_index > 0); + + free(backward_load_order); + + return 1; +} + +/******************************************************************************* +* +*******************************************************************************/ +void dgraph_log(dgraph_t * depgraph) +{ + unsigned int i, j; + + kload_log_message("flattened dependency list: " KNL); + for (i = 0; i < depgraph->length; i++) { + dgraph_entry_t * current = depgraph->graph[i]; + + kload_log_message(" %s" KNL, current->name); + kload_log_message(" is kernel component: %s" KNL, + current->is_kernel_component ? "yes" : "no"); + kload_log_message(" expected kmod name: [%s]" KNL, + current->expected_kmod_name); + kload_log_message(" expected kmod vers: [%s]" KNL, + current->expected_kmod_vers); + } + kload_log_message("" KNL); + + kload_log_message("load order dependency list: " KNL); + for (i = 0; i < depgraph->length; i++) { + dgraph_entry_t * current = depgraph->load_order[i]; + kload_log_message(" %s" KNL, current->name); + } + kload_log_message("" KNL); + + kload_log_message("dependency graph: " KNL); + for (i = 0; i < depgraph->length; i++) { + dgraph_entry_t * current = depgraph->graph[i]; + for (j = 0; j < current->num_dependencies; j++) { + dgraph_entry_t * cdep = current->dependencies[j]; + kload_log_message(" %s -> %s" KNL, current->name, cdep->name); + } + } + kload_log_message("" KNL); + + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +dgraph_entry_t * dgraph_find_dependent(dgraph_t * dgraph, const char * name) +{ + unsigned int i; + + for (i = 0; i < dgraph->length; i++) { + dgraph_entry_t * current_entry = dgraph->graph[i]; + if (0 == strcmp(name, current_entry->name)) { + return current_entry; + } + } + + return NULL; +} + +/******************************************************************************* +* +*******************************************************************************/ +dgraph_entry_t * dgraph_add_dependent( + dgraph_t * dgraph, + const char * name, +#ifdef KERNEL + void * object, + size_t object_length, + bool object_is_kmem, +#endif /* KERNEL */ + const char * expected_kmod_name, + const char * expected_kmod_vers, + vm_address_t load_address, + char is_kernel_component) +{ + int error = 0; + dgraph_entry_t * found_entry = NULL; + dgraph_entry_t * new_entry = NULL; // free on error + dgraph_entry_t * the_entry = NULL; // returned + + /* Already got it? Great! + */ + found_entry = dgraph_find_dependent(dgraph, name); + if (found_entry) { + if (found_entry->is_kernel_component != is_kernel_component) { + kload_log_error( + "%s is already defined as a %skernel component" KNL, + name, found_entry->is_kernel_component ? "" : "non-"); + error = 1; + goto finish; + } + + if (load_address != 0) { + if (found_entry->loaded_address == 0) { + found_entry->do_load = 0; + found_entry->loaded_address = load_address; + } else if (found_entry->loaded_address != load_address) { + kload_log_error( + "%s has been assigned two different addresses (0x%x, 0x%x) KNL", + found_entry->name, + found_entry->loaded_address, + load_address); + error = 1; + goto finish; + } + } + the_entry = found_entry; + goto finish; + } + + /* If the graph is full, make it bigger. + */ + if (dgraph->length == dgraph->capacity) { + unsigned int old_capacity = dgraph->capacity; + dgraph_entry_t ** newgraph; + + dgraph->capacity *= 2; + newgraph = (dgraph_entry_t **)malloc(dgraph->capacity * + sizeof(dgraph_entry_t *)); + if (!newgraph) { + return NULL; + } + memcpy(newgraph, dgraph->graph, old_capacity * sizeof(dgraph_entry_t *)); + free(dgraph->graph); + dgraph->graph = newgraph; + } + + if (strlen(expected_kmod_name) > KMOD_MAX_NAME - 1) { + kload_log_error("expected kmod name \"%s\" is too long" KNL, + expected_kmod_name); + error = 1; + goto finish; + } + + /* Fill it. + */ + new_entry = (dgraph_entry_t *)malloc(sizeof(dgraph_entry_t)); + if (!new_entry) { + error = 1; + goto finish; + } + bzero(new_entry, sizeof(dgraph_entry_t)); + new_entry->expected_kmod_name = strdup(expected_kmod_name); + if (!new_entry->expected_kmod_name) { + error = 1; + goto finish; + } + new_entry->expected_kmod_vers = strdup(expected_kmod_vers); + if (!new_entry->expected_kmod_vers) { + error = 1; + goto finish; + } + new_entry->is_kernel_component = is_kernel_component; + + // /hacks + new_entry->is_symbol_set = (2 & is_kernel_component); + new_entry->opaques = !strncmp(new_entry->expected_kmod_name, + "com.apple.kpi", strlen("com.apple.kpi")); + // hacks/ + + dgraph->has_symbol_sets |= new_entry->is_symbol_set; + + new_entry->do_load = !is_kernel_component; + +#ifndef KERNEL + new_entry->object = NULL; // provided elswehere in userland + new_entry->object_length = 0; +#else + new_entry->object = object; + new_entry->object_length = object_length; + new_entry->object_is_kmem = object_is_kmem; +#endif /* KERNEL */ + new_entry->name = strdup(name); + if (!new_entry->name) { + error = 1; + goto finish; + } + dgraph->graph[dgraph->length++] = new_entry; + + + /* Create a dependency list for the entry. Start with 5 slots. + */ + new_entry->dependencies_capacity = 5; + new_entry->num_dependencies = 0; + new_entry->dependencies = (dgraph_entry_t **)malloc( + new_entry->dependencies_capacity * sizeof(dgraph_entry_t *)); + if (!new_entry->dependencies) { + error = 1; + goto finish; + } + + if (new_entry->loaded_address == 0) { + new_entry->loaded_address = load_address; + if (load_address != 0) { + new_entry->do_load = 0; + } + } + + the_entry = new_entry; + +finish: + if (error) { + if (new_entry) __dgraph_entry_free(new_entry); + the_entry = new_entry = NULL; + } + return the_entry; +} + +/******************************************************************************* +* +*******************************************************************************/ +dgraph_entry_t * dgraph_add_dependency( + dgraph_t * dgraph, + dgraph_entry_t * current_dependent, + const char * name, +#ifdef KERNEL + void * object, + size_t object_length, + bool object_is_kmem, +#endif /* KERNEL */ + const char * expected_kmod_name, + const char * expected_kmod_vers, + vm_address_t load_address, + char is_kernel_component) +{ + dgraph_entry_t * dependency = NULL; + unsigned int i = 0; + + /* If the dependent's dependency list is full, make it bigger. + */ + if (current_dependent->num_dependencies == + current_dependent->dependencies_capacity) { + + unsigned int old_capacity = current_dependent->dependencies_capacity; + dgraph_entry_t ** newlist; + + current_dependent->dependencies_capacity *= 2; + newlist = (dgraph_entry_t **)malloc( + (current_dependent->dependencies_capacity * + sizeof(dgraph_entry_t *)) ); + + if (!newlist) { + return NULL; + } + memcpy(newlist, current_dependent->dependencies, + old_capacity * sizeof(dgraph_entry_t *)); + free(current_dependent->dependencies); + current_dependent->dependencies = newlist; + } + + + /* Find or add the entry for the new dependency. + */ + dependency = dgraph_add_dependent(dgraph, name, +#ifdef KERNEL + object, object_length, object_is_kmem, +#endif /* KERNEL */ + expected_kmod_name, expected_kmod_vers, load_address, + is_kernel_component); + if (!dependency) { + return NULL; + } + + if (dependency == current_dependent) { + kload_log_error("attempt to set dependency on itself: %s" KNL, + current_dependent->name); + return NULL; + } + + for (i = 0; i < current_dependent->num_dependencies; i++) { + dgraph_entry_t * this_dependency = current_dependent->dependencies[i]; + if (this_dependency == dependency) { + return dependency; + } + } + + /* Fill in the dependency. + */ + current_dependent->dependencies[current_dependent->num_dependencies] = + dependency; + current_dependent->num_dependencies++; + + current_dependent->opaque_link |= dependency->opaques; + dgraph->has_opaque_links |= current_dependent->opaque_link; + + return dependency; +} diff --git a/libsa/dgraph.h b/libsa/dgraph.h new file mode 100644 index 000000000..17e26b963 --- /dev/null +++ b/libsa/dgraph.h @@ -0,0 +1,171 @@ +#ifndef __DGRAPH_H__ +#define __DGRAPH_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef KERNEL +#include +#include +#else +#include +#include +#endif /* KERNEL */ + +typedef struct dgraph_entry_t { + + char is_kernel_component; // means that name is a CFBundleIdentifier!!! + char is_symbol_set; + char opaques; + char opaque_link; + + // What we have to start from + char * name; // filename if user space, bundleid if kernel or kernel comp. + + void * object; // In kernel we keep track of the object file + size_t object_length; // we don't own this, however; it's just a ref +#ifdef KERNEL + bool object_is_kmem; // Only used when mapping a file! +#endif /* KERNEL */ + + /* If is_kernel_component is true then the do_load field is cleared and + * the kmod_id field gets set. + */ + + // Immediate dependencies of this entry + unsigned int dependencies_capacity; + unsigned int num_dependencies; + struct dgraph_entry_t ** dependencies; + + // These are filled in when the entry is created, and are written into + // the kmod linked image at load time. + char * expected_kmod_name; + char * expected_kmod_vers; + + bool is_mapped; // kld_file_map() has been called for this entry + + // For tracking already-loaded kmods or for doing symbol generation only + int do_load; // actually loading + vm_address_t loaded_address; // address loaded at or being faked at for symbol generation +#ifndef KERNEL + char * link_output_file; + bool link_output_file_alloc; +#endif + struct mach_header * linked_image; + vm_size_t linked_image_length; + + vm_address_t symbols; + vm_size_t symbols_length; + vm_address_t symbols_malloc; + + // for loading into kernel + vm_address_t kernel_alloc_address; + unsigned long kernel_alloc_size; + vm_address_t kernel_load_address; + unsigned long kernel_load_size; + unsigned long kernel_hdr_size; + unsigned long kernel_hdr_pad; + int need_cleanup; // true if load failed with kernel memory allocated + kmod_t kmod_id; // the id assigned by the kernel to a loaded kmod + +} dgraph_entry_t; + +typedef struct { + unsigned int capacity; + unsigned int length; + dgraph_entry_t ** graph; + dgraph_entry_t ** load_order; + dgraph_entry_t * root; + char have_loaded_symbols; + char has_symbol_sets; + char has_opaque_links; + vm_address_t opaque_base_image; + vm_size_t opaque_base_length; +} dgraph_t; + +typedef enum { + dgraph_error = -1, + dgraph_invalid = 0, + dgraph_valid = 1 +} dgraph_error_t; + + +dgraph_error_t dgraph_init(dgraph_t * dgraph); + +#ifndef KERNEL +/********** + * Initialize a dependency graph passed in. Returns nonzero on success, zero + * on failure. + * + * dependency_graph: a pointer to the dgraph to initialize. + * argc: the number of arguments in argv + * argv: an array of strings defining the dependency graph. This is a + * series of dependency lists, delimited by "-d" (except before + * the first list, naturally). Each list has as its first entry + * the dependent, followed by any number of DIRECT dependencies. + * The lists may be given in any order, but the first item in each + * list must be the dependent. Also, there can only be one root + * item (an item with no dependents upon it), and it must not be + * a kernel component. + */ +dgraph_error_t dgraph_init_with_arglist( + dgraph_t * dgraph, + int expect_addresses, + const char * dependency_delimiter, + const char * kernel_dependency_delimiter, + int argc, + char * argv[]); +#endif /* not KERNEL */ + +void dgraph_free( + dgraph_t * dgraph, + int free_graph); + +dgraph_entry_t * dgraph_find_root(dgraph_t * dgraph); + +int dgraph_establish_load_order(dgraph_t * dgraph); + +#ifndef KERNEL +void dgraph_print(dgraph_t * dgraph); +#endif /* not kernel */ +void dgraph_log(dgraph_t * depgraph); + + +/***** + * These functions are useful for hand-building a dgraph. + */ +dgraph_entry_t * dgraph_find_dependent(dgraph_t * dgraph, const char * name); + +dgraph_entry_t * dgraph_add_dependent( + dgraph_t * dgraph, + const char * name, +#ifdef KERNEL + void * object, + size_t object_length, + bool object_is_kmem, +#endif /* KERNEL */ + const char * expected_kmod_name, + const char * expected_kmod_vers, + vm_address_t load_address, + char is_kernel_component); + +dgraph_entry_t * dgraph_add_dependency( + dgraph_t * dgraph, + dgraph_entry_t * current_dependent, + const char * name, +#ifdef KERNEL + void * object, + size_t object_length, + bool object_is_kmem, +#endif /* KERNEL */ + const char * expected_kmod_name, + const char * expected_kmod_vers, + vm_address_t load_address, + char is_kernel_component); + +#ifdef __cplusplus +} +#endif + +#endif /* __DGRAPH_H__ */ diff --git a/libsa/kext.cpp b/libsa/kext.cpp new file mode 100644 index 000000000..e4a465508 --- /dev/null +++ b/libsa/kext.cpp @@ -0,0 +1,746 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +extern "C" { +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kld_patch.h" +#include "dgraph.h" +#include "load.h" +}; + + +extern "C" { +extern kern_return_t +kmod_create_internal( + kmod_info_t *info, + kmod_t *id); + +extern kern_return_t +kmod_destroy_internal(kmod_t id); + +extern kern_return_t +kmod_start_or_stop( + kmod_t id, + int start, + kmod_args_t *data, + mach_msg_type_number_t *dataCount); + +extern kern_return_t kmod_retain(kmod_t id); +extern kern_return_t kmod_release(kmod_t id); + +extern void flush_dcache(vm_offset_t addr, unsigned cnt, int phys); +extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); +}; + +#define DEBUG +#ifdef DEBUG +#define LOG_DELAY(x) IODelay((x) * 1000000) +#define VTYELLOW "\033[33m" +#define VTRESET "\033[0m" +#else +#define LOG_DELAY(x) +#define VTYELLOW +#define VTRESET +#endif /* DEBUG */ + +/********************************************************************* +* +*********************************************************************/ +static +bool getKext( + const char * bundleid, + OSDictionary ** plist, + unsigned char ** code, + unsigned long * code_size, + bool * caller_owns_code) +{ + bool result = true; + OSDictionary * extensionsDict; // don't release + OSDictionary * extDict; // don't release + OSDictionary * extPlist; // don't release + unsigned long code_size_local; + + /* Get the dictionary of startup extensions. + * This is keyed by module name. + */ + extensionsDict = getStartupExtensions(); + if (!extensionsDict) { + IOLog("startup extensions dictionary is missing\n"); + result = false; + goto finish; + } + + /* Get the requested extension's dictionary entry and its property + * list, containing module dependencies. + */ + extDict = OSDynamicCast(OSDictionary, + extensionsDict->getObject(bundleid)); + + if (!extDict) { + IOLog("extension \"%s\" cannot be found\n", + bundleid); + result = false; + goto finish; + } + + if (plist) { + extPlist = OSDynamicCast(OSDictionary, extDict->getObject("plist")); + if (!extPlist) { + IOLog("extension \"%s\" has no info dictionary\n", + bundleid); + result = false; + goto finish; + } + *plist = extPlist; + } + + if (code) { + + /* If asking for code, the caller must provide a return buffer + * for ownership! + */ + if (!caller_owns_code) { + IOLog("getKext(): invalid usage (caller_owns_code not provided)\n"); + result = false; + goto finish; + } + + *code = 0; + if (code_size) { + *code_size = 0; + } + *caller_owns_code = false; + + *code = (unsigned char *)kld_file_getaddr(bundleid, + (long *)&code_size_local); + if (*code) { + if (code_size) { + *code_size = code_size_local; + } + } else { + OSData * driverCode = 0; // release only if uncompressing! + + driverCode = OSDynamicCast(OSData, extDict->getObject("code")); + if (driverCode) { + *code = (unsigned char *)driverCode->getBytesNoCopy(); + if (code_size) { + *code_size = driverCode->getLength(); + } + } else { // Look for compressed code and uncompress it + OSData * compressedCode = 0; + compressedCode = OSDynamicCast(OSData, + extDict->getObject("compressedCode")); + if (compressedCode) { + if (!uncompressModule(compressedCode, &driverCode)) { + IOLog("extension \"%s\": couldn't uncompress code\n", + bundleid); + LOG_DELAY(1); + result = false; + goto finish; + } + *caller_owns_code = true; + *code = (unsigned char *)driverCode->getBytesNoCopy(); + if (code_size) { + *code_size = driverCode->getLength(); + } + driverCode->release(); + } + } + } + } + +finish: + + return result; +} + + +/********************************************************************* +* +*********************************************************************/ +static +bool verifyCompatibility(OSString * extName, OSString * requiredVersion) +{ + OSDictionary * extPlist; // don't release + OSString * extVersion; // don't release + OSString * extCompatVersion; // don't release + VERS_version ext_version; + VERS_version ext_compat_version; + VERS_version required_version; + + if (!getKext(extName->getCStringNoCopy(), &extPlist, NULL, NULL, NULL)) { + return false; + } + + extVersion = OSDynamicCast(OSString, + extPlist->getObject("CFBundleVersion")); + if (!extVersion) { + IOLog("verifyCompatibility(): " + "Extension \"%s\" has no \"CFBundleVersion\" property.\n", + extName->getCStringNoCopy()); + return false; + } + + extCompatVersion = OSDynamicCast(OSString, + extPlist->getObject("OSBundleCompatibleVersion")); + if (!extCompatVersion) { + IOLog("verifyCompatibility(): " + "Extension \"%s\" has no \"OSBundleCompatibleVersion\" property.\n", + extName->getCStringNoCopy()); + return false; + } + + required_version = VERS_parse_string(requiredVersion->getCStringNoCopy()); + if (required_version < 0) { + IOLog("verifyCompatibility(): " + "Can't parse required version \"%s\" of dependency %s.\n", + requiredVersion->getCStringNoCopy(), + extName->getCStringNoCopy()); + return false; + } + ext_version = VERS_parse_string(extVersion->getCStringNoCopy()); + if (ext_version < 0) { + IOLog("verifyCompatibility(): " + "Can't parse version \"%s\" of dependency %s.\n", + extVersion->getCStringNoCopy(), + extName->getCStringNoCopy()); + return false; + } + ext_compat_version = VERS_parse_string(extCompatVersion->getCStringNoCopy()); + if (ext_compat_version < 0) { + IOLog("verifyCompatibility(): " + "Can't parse compatible version \"%s\" of dependency %s.\n", + extCompatVersion->getCStringNoCopy(), + extName->getCStringNoCopy()); + return false; + } + + if (required_version > ext_version || required_version < ext_compat_version) { + return false; + } + + return true; +} + +/********************************************************************* +*********************************************************************/ +static +bool kextIsDependency(const char * kext_name, char * is_kernel) { + bool result = true; + OSDictionary * extensionsDict = 0; // don't release + OSDictionary * extDict = 0; // don't release + OSDictionary * extPlist = 0; // don't release + OSBoolean * isKernelResourceObj = 0; // don't release + OSData * driverCode = 0; // don't release + OSData * compressedCode = 0; // don't release + + if (is_kernel) { + *is_kernel = false; + } + + /* Get the dictionary of startup extensions. + * This is keyed by module name. + */ + extensionsDict = getStartupExtensions(); + if (!extensionsDict) { + IOLog("startup extensions dictionary is missing\n"); + result = false; + goto finish; + } + + /* Get the requested extension's dictionary entry and its property + * list, containing module dependencies. + */ + extDict = OSDynamicCast(OSDictionary, + extensionsDict->getObject(kext_name)); + + if (!extDict) { + IOLog("extension \"%s\" cannot be found\n", + kext_name); + result = false; + goto finish; + } + + extPlist = OSDynamicCast(OSDictionary, extDict->getObject("plist")); + if (!extPlist) { + IOLog("extension \"%s\" has no info dictionary\n", + kext_name); + result = false; + goto finish; + } + + /* A kext that is a kernel component is still a dependency, as there + * are fake kmod entries for them. + */ + isKernelResourceObj = OSDynamicCast(OSBoolean, + extPlist->getObject("OSKernelResource")); + if (isKernelResourceObj && isKernelResourceObj->isTrue()) { + if (is_kernel) { + *is_kernel = true; + } + } + + driverCode = OSDynamicCast(OSData, extDict->getObject("code")); + compressedCode = OSDynamicCast(OSData, + extDict->getObject("compressedCode")); + + if ((driverCode || compressedCode) && is_kernel && *is_kernel) { + *is_kernel = 2; + } + + if (!driverCode && !compressedCode && !isKernelResourceObj) { + result = false; + goto finish; + } + +finish: + + return result; +} + +/********************************************************************* +*********************************************************************/ +static bool +figureDependenciesForKext(OSDictionary * kextPlist, + OSDictionary * dependencies, + OSString * trueParent) +{ + bool result = true; + OSString * kextName = 0; // don't release + OSDictionary * libraries = 0; // don't release + OSCollectionIterator * keyIterator = 0; // must release + OSString * libraryName = 0; // don't release + + kextName = OSDynamicCast(OSString, + kextPlist->getObject("CFBundleIdentifier")); + if (!kextName) { + // XXX: Add log message + result = false; + goto finish; + } + + libraries = OSDynamicCast(OSDictionary, + kextPlist->getObject("OSBundleLibraries")); + if (!libraries) { + result = true; + goto finish; + } + + keyIterator = OSCollectionIterator::withCollection(libraries); + if (!keyIterator) { + // XXX: Add log message + result = false; + goto finish; + } + + while ( (libraryName = OSDynamicCast(OSString, + keyIterator->getNextObject())) ) { + + OSString * libraryVersion = OSDynamicCast(OSString, + libraries->getObject(libraryName)); + if (!libraryVersion) { + // XXX: Add log message + result = false; + goto finish; + } + if (!verifyCompatibility(libraryName, libraryVersion)) { + result = false; + goto finish; + } else { + dependencies->setObject(libraryName, + trueParent ? trueParent : kextName); + } + } + +finish: + if (keyIterator) keyIterator->release(); + return result; +} + +/********************************************************************* +*********************************************************************/ +static +bool getVersionForKext(OSDictionary * kextPlist, char ** version) +{ + OSString * kextName = 0; // don't release + OSString * kextVersion; // don't release + + kextName = OSDynamicCast(OSString, + kextPlist->getObject("CFBundleIdentifier")); + if (!kextName) { + // XXX: Add log message + return false; + } + + kextVersion = OSDynamicCast(OSString, + kextPlist->getObject("CFBundleVersion")); + if (!kextVersion) { + IOLog("getVersionForKext(): " + "Extension \"%s\" has no \"CFBundleVersion\" property.\n", + kextName->getCStringNoCopy()); + return false; + } + + if (version) { + *version = (char *)kextVersion->getCStringNoCopy(); + } + + return true; +} + +/********************************************************************* +*********************************************************************/ +static +bool add_dependencies_for_kmod(const char * kmod_name, dgraph_t * dgraph) +{ + bool result = true; + OSDictionary * kextPlist = 0; // don't release + OSDictionary * workingDependencies = 0; // must release + OSDictionary * pendingDependencies = 0; // must release + OSDictionary * swapDict = 0; // don't release + OSString * dependentName = 0; // don't release + const char * dependent_name = 0; // don't free + OSString * libraryName = 0; // don't release + const char * library_name = 0; // don't free + OSCollectionIterator * dependencyIterator = 0; // must release + unsigned char * code = 0; + unsigned long code_length = 0; + bool code_is_kmem = false; + char * kmod_vers = 0; // from plist, don't free + char is_kernel_component = false; + dgraph_entry_t * dgraph_entry = 0; // don't free + dgraph_entry_t * dgraph_dependency = 0; // don't free + unsigned int graph_depth = 0; + bool kext_is_dependency = true; + + if (!getKext(kmod_name, &kextPlist, &code, &code_length, + &code_is_kmem)) { + IOLog("can't find extension %s\n", kmod_name); + result = false; + goto finish; + } + + if (!kextIsDependency(kmod_name, &is_kernel_component)) { + IOLog("extension %s is not loadable\n", kmod_name); + result = false; + goto finish; + } + + if (!getVersionForKext(kextPlist, &kmod_vers)) { + IOLog("can't get version for extension %s\n", kmod_name); + result = false; + goto finish; + } + + dgraph_entry = dgraph_add_dependent(dgraph, kmod_name, + code, code_length, code_is_kmem, + kmod_name, kmod_vers, + 0 /* load_address not yet known */, is_kernel_component); + if (!dgraph_entry) { + IOLog("can't record %s in dependency graph\n", kmod_name); + result = false; + // kmem_alloc()ed code is freed in finish: block. + goto finish; + } + + // pass ownership of code to kld patcher + if (code) + { + if (kload_map_entry(dgraph_entry) != kload_error_none) { + IOLog("can't map %s in preparation for loading\n", kmod_name); + result = false; + // kmem_alloc()ed code is freed in finish: block. + goto finish; + } + } + // clear local record of code + code = 0; + code_length = 0; + code_is_kmem = false; + + workingDependencies = OSDictionary::withCapacity(5); + if (!workingDependencies) { + IOLog("memory allocation failure\n"); + result = false; + goto finish; + } + + pendingDependencies = OSDictionary::withCapacity(5); + if (!pendingDependencies) { + IOLog("memory allocation failure\n"); + result = false; + goto finish; + } + + if (!figureDependenciesForKext(kextPlist, workingDependencies, NULL)) { + IOLog("can't determine immediate dependencies for extension %s\n", + kmod_name); + result = false; + goto finish; + } + + graph_depth = 0; + while (workingDependencies->getCount()) { + if (graph_depth > 255) { + IOLog("extension dependency graph ridiculously long, indicating a loop\n"); + result = false; + goto finish; + } + + if (dependencyIterator) { + dependencyIterator->release(); + dependencyIterator = 0; + } + + dependencyIterator = OSCollectionIterator::withCollection( + workingDependencies); + if (!dependencyIterator) { + IOLog("memory allocation failure\n"); + result = false; + goto finish; + } + + while ( (libraryName = + OSDynamicCast(OSString, dependencyIterator->getNextObject())) ) { + + library_name = libraryName->getCStringNoCopy(); + + dependentName = OSDynamicCast(OSString, + workingDependencies->getObject(libraryName)); + + dependent_name = dependentName->getCStringNoCopy(); + + if (!getKext(library_name, &kextPlist, NULL, NULL, NULL)) { + IOLog("can't find extension %s\n", library_name); + result = false; + goto finish; + } + + OSString * string; + if ((string = OSDynamicCast(OSString, + kextPlist->getObject("OSBundleSharedExecutableIdentifier")))) + { + library_name = string->getCStringNoCopy(); + if (!getKext(library_name, &kextPlist, NULL, NULL, NULL)) { + IOLog("can't find extension %s\n", library_name); + result = false; + goto finish; + } + } + + kext_is_dependency = kextIsDependency(library_name, + &is_kernel_component); + + if (!kext_is_dependency) { + + /* For binaryless kexts, add a new pending dependency from the + * original dependent onto the dependencies of the current, + * binaryless, dependency. + */ + if (!figureDependenciesForKext(kextPlist, pendingDependencies, + dependentName)) { + + IOLog("can't determine immediate dependencies for extension %s\n", + library_name); + result = false; + goto finish; + } + continue; + } else { + dgraph_entry = dgraph_find_dependent(dgraph, dependent_name); + if (!dgraph_entry) { + IOLog("internal error with dependency graph\n"); + LOG_DELAY(1); + result = false; + goto finish; + } + + if (!getVersionForKext(kextPlist, &kmod_vers)) { + IOLog("can't get version for extension %s\n", library_name); + result = false; + goto finish; + } + + /* It's okay for code to be zero, as for a pseudokext + * representing a kernel component. + */ + if (!getKext(library_name, NULL /* already got it */, + &code, &code_length, &code_is_kmem)) { + IOLog("can't find extension %s\n", library_name); + result = false; + goto finish; + } + + dgraph_dependency = dgraph_add_dependency(dgraph, dgraph_entry, + library_name, code, code_length, code_is_kmem, + library_name, kmod_vers, + 0 /* load_address not yet known */, is_kernel_component); + + if (!dgraph_dependency) { + IOLog("can't record dependency %s -> %s\n", dependent_name, + library_name); + result = false; + // kmem_alloc()ed code is freed in finish: block. + goto finish; + } + + // pass ownership of code to kld patcher + if (code) { + if (kload_map_entry(dgraph_dependency) != kload_error_none) { + IOLog("can't map %s in preparation for loading\n", library_name); + result = false; + // kmem_alloc()ed code is freed in finish: block. + goto finish; + } + } + // clear local record of code + code = 0; + code_length = 0; + code_is_kmem = false; + } + + /* Now put the library's dependencies onto the pending set. + */ + if (!figureDependenciesForKext(kextPlist, pendingDependencies, + NULL)) { + + IOLog("can't determine immediate dependencies for extension %s\n", + library_name); + result = false; + goto finish; + } + } + + dependencyIterator->release(); + dependencyIterator = 0; + + workingDependencies->flushCollection(); + swapDict = workingDependencies; + workingDependencies = pendingDependencies; + pendingDependencies = swapDict; + graph_depth++; + } + +finish: + if (code && code_is_kmem) { + kmem_free(kernel_map, (unsigned int)code, code_length); + } + if (workingDependencies) workingDependencies->release(); + if (pendingDependencies) pendingDependencies->release(); + if (dependencyIterator) dependencyIterator->release(); + return result; +} + +/********************************************************************* +* This is the function that IOCatalogue calls in order to load a kmod. +* It first checks whether the kmod is already loaded. If the kmod +* isn't loaded, this function builds a dependency list and calls +* load_kmod() repeatedly to guarantee that each dependency is in fact +* loaded. +*********************************************************************/ +__private_extern__ +kern_return_t load_kernel_extension(char * kmod_name) +{ + kern_return_t result = KERN_SUCCESS; + kload_error load_result = kload_error_none; + dgraph_t dgraph; + bool free_dgraph = false; + kmod_info_t * kmod_info; + +// Put this in for lots of messages about kext loading. +#if 0 + kload_set_log_level(kload_log_level_load_details); +#endif + + /* See if the kmod is already loaded. + */ + if ((kmod_info = kmod_lookupbyname_locked(kmod_name))) { + kfree((vm_offset_t) kmod_info, sizeof(kmod_info_t)); + return KERN_SUCCESS; + } + + if (dgraph_init(&dgraph) != dgraph_valid) { + IOLog("Can't initialize dependency graph to load %s.\n", + kmod_name); + result = KERN_FAILURE; + goto finish; + } + + free_dgraph = true; + if (!add_dependencies_for_kmod(kmod_name, &dgraph)) { + IOLog("Can't determine dependencies for %s.\n", + kmod_name); + result = KERN_FAILURE; + goto finish; + } + + dgraph.root = dgraph_find_root(&dgraph); + + if (!dgraph.root) { + IOLog("Dependency graph to load %s has no root.\n", + kmod_name); + result = KERN_FAILURE; + goto finish; + } + + /* A kernel component is built in and need not be loaded. + */ + if (dgraph.root->is_kernel_component) { + result = KERN_SUCCESS; + goto finish; + } + + dgraph_establish_load_order(&dgraph); + + load_result = kload_load_dgraph(&dgraph); + if (load_result != kload_error_none && + load_result != kload_error_already_loaded) { + + IOLog(VTYELLOW "Failed to load extension %s.\n" VTRESET, kmod_name); + + result = KERN_FAILURE; + goto finish; + } + +finish: + + if (free_dgraph) { + dgraph_free(&dgraph, 0 /* don't free dgraph itself */); + } + return result; +} diff --git a/libsa/kld_patch.c b/libsa/kld_patch.c index 3c723e860..2119db37d 100644 --- a/libsa/kld_patch.c +++ b/libsa/kld_patch.c @@ -32,6 +32,9 @@ #include #include #include +#if !KERNEL +#include +#endif #if KERNEL @@ -179,6 +182,7 @@ enum patchState { struct patchRecord { struct nlist *fSymbol; + const struct fileRecord *fFile; enum patchState fType; }; @@ -206,6 +210,7 @@ struct fileRecord { DataRef fSym2Strings; struct symtab_command *fSymtab; struct sectionRecord *fSections; + vm_offset_t fVMAddr, fVMEnd; struct segment_command *fLinkEditSeg; const char **fSymbToStringTable; char *fStringBase; @@ -213,19 +218,27 @@ struct fileRecord { const struct nlist *fLocalSyms; unsigned int fNSects; int fNLocal; - Boolean fIsKernel, fNoKernelExecutable, fIsKmem; + Boolean fIsKernel, fIsReloc, fIsIncrLink, fNoKernelExecutable, fIsKmem; Boolean fImageDirty, fSymbolsDirty; Boolean fRemangled, fFoundOSObject; Boolean fIgnoreFile; +#if !KERNEL + Boolean fSwapped; +#endif const char fPath[1]; }; static DataRef sFilesTable; static struct fileRecord *sKernelFile; -static DataRef sMergedFiles; -static DataRef sMergeMetaClasses; -static Boolean sMergedKernel; +static DataRef sMergedFiles; +static DataRef sMergeMetaClasses; +static Boolean sMergedKernel; +#if !KERNEL +static const NXArchInfo * sPreferArchInfo; +#endif +static const struct nlist * +findSymbolByName(struct fileRecord *file, const char *symname); static void errprintf(const char *fmt, ...) { @@ -586,6 +599,108 @@ mapObjectFile(struct fileRecord *file, const char *pathName) close(fd); return result; } + +void +kld_set_architecture(const NXArchInfo * arch) +{ + sPreferArchInfo = arch; +} + +Boolean +kld_macho_swap(struct mach_header * mh) +{ + struct segment_command * seg; + struct section * section; + CFIndex ncmds, cmd, sect; + enum NXByteOrder hostOrder = NXHostByteOrder(); + + if (MH_CIGAM != mh->magic) + return (false); + + swap_mach_header(mh, hostOrder); + + ncmds = mh->ncmds; + seg = (struct segment_command *)(mh + 1); + for (cmd = 0; + cmd < ncmds; + cmd++, seg = (struct segment_command *)(((vm_offset_t)seg) + seg->cmdsize)) + { + if (NXSwapLong(LC_SYMTAB) == seg->cmd) { + swap_symtab_command((struct symtab_command *) seg, hostOrder); + swap_nlist((struct nlist *) (((vm_offset_t) mh) + ((struct symtab_command *) seg)->symoff), + ((struct symtab_command *) seg)->nsyms, hostOrder); + continue; + } + if (NXSwapLong(LC_SEGMENT) != seg->cmd) { + swap_load_command((struct load_command *) seg, hostOrder); + continue; + } + swap_segment_command(seg, hostOrder); + swap_section((struct section *) (seg + 1), seg->nsects, hostOrder); + + section = (struct section *) (seg + 1); + for (sect = 0; sect < seg->nsects; sect++, section++) { + if (section->nreloc) + swap_relocation_info((struct relocation_info *) (((vm_offset_t) mh) + section->reloff), + section->nreloc, hostOrder); + } + } + + return (true); +} + +void +kld_macho_unswap(struct mach_header * mh, Boolean didSwap, int symbols) +{ + // symbols == 0 => everything + // symbols == 1 => just nlists + // symbols == -1 => everything but nlists + + struct segment_command * seg; + struct section * section; + unsigned long cmdsize; + CFIndex ncmds, cmd, sect; + enum NXByteOrder hostOrder = (NXHostByteOrder() == NX_LittleEndian) + ? NX_BigEndian : NX_LittleEndian; + if (!didSwap) + return; + + ncmds = mh->ncmds; + seg = (struct segment_command *)(mh + 1); + for (cmd = 0; + cmd < ncmds; + cmd++, seg = (struct segment_command *)(((vm_offset_t)seg) + cmdsize)) + { + cmdsize = seg->cmdsize; + if (LC_SYMTAB == seg->cmd) { + if (symbols >= 0) + swap_nlist((struct nlist *) (((vm_offset_t) mh) + ((struct symtab_command *) seg)->symoff), + ((struct symtab_command *) seg)->nsyms, hostOrder); + if (symbols > 0) + break; + swap_symtab_command((struct symtab_command *) seg, hostOrder); + continue; + } + if (symbols > 0) + continue; + if (LC_SEGMENT != seg->cmd) { + swap_load_command((struct load_command *) seg, hostOrder); + continue; + } + + section = (struct section *) (seg + 1); + for (sect = 0; sect < seg->nsects; sect++, section++) { + if (section->nreloc) + swap_relocation_info((struct relocation_info *) (((vm_offset_t) mh) + section->reloff), + section->nreloc, hostOrder); + } + swap_section((struct section *) (seg + 1), seg->nsects, hostOrder); + swap_segment_command(seg, hostOrder); + } + if (symbols <= 0) + swap_mach_header(mh, hostOrder); +} + #endif /* !KERNEL */ static Boolean findBestArch(struct fileRecord *file, const char *pathName) @@ -655,7 +770,11 @@ static Boolean findBestArch(struct fileRecord *file, const char *pathName) return_if(file->fMapSize < fatsize, false, ("%s isn't a valid fat file\n", pathName)); - myArch = NXGetLocalArchInfo(); + if (sPreferArchInfo) + myArch = sPreferArchInfo; + else + myArch = NXGetLocalArchInfo(); + arch = NXFindBestFatArch(myArch->cputype, myArch->cpusubtype, (struct fat_arch *) &fat[1], fat->nfat_arch); return_if(!arch, @@ -667,6 +786,10 @@ static Boolean findBestArch(struct fileRecord *file, const char *pathName) magic = ((const struct mach_header *) file->fMachO)->magic; } + file->fSwapped = kld_macho_swap((struct mach_header *) file->fMachO); + if (file->fSwapped) + magic = ((const struct mach_header *) file->fMachO)->magic; + #endif /* KERNEL */ return_if(magic != MH_MAGIC, @@ -700,7 +823,10 @@ parseSegments(struct fileRecord *file, struct segment_command *seg) sections = &file->fSections[file->fNSects]; file->fNSects += nsects; for (i = 0, segMap = (struct segmentMap *) seg; i < nsects; i++) + { sections[i].fSection = &segMap->sect[i]; + file->fIsReloc |= (0 != segMap->sect[i].nreloc); + } return true; } @@ -783,7 +909,7 @@ static Boolean parseSymtab(struct fileRecord *file, const char *pathName) unsigned int i, firstlocal, nsyms; unsigned long strsize; const char *strbase; - Boolean foundOSObject, found295CPP; + Boolean foundOSObject, found295CPP, havelocal; // we found a link edit segment so recompute the bases if (file->fLinkEditSeg) { @@ -825,6 +951,7 @@ static Boolean parseSymtab(struct fileRecord *file, const char *pathName) strsize = file->fSymtab->strsize; strbase = file->fStringBase; firstlocal = 0; + havelocal = false; found295CPP = foundOSObject = false; for (i = 0, sym = file->fSymbolBase; i < nsyms; i++, sym++) { long strx = sym->n_un.n_strx; @@ -833,6 +960,54 @@ static Boolean parseSymtab(struct fileRecord *file, const char *pathName) return_if(((unsigned long) strx > strsize), false, ("%s has an illegal string offset in symbol %d\n", pathName, i)); +#if 0 + // Make all syms abs + if (file->fIsIncrLink) { + if ( (sym->n_type & N_TYPE) == N_SECT) { + sym->n_sect = NO_SECT; + sym->n_type = (sym->n_type & ~N_TYPE) | N_ABS; + } + } +#endif + + if (file->fIsIncrLink && !file->fNSects) + { + // symbol set + struct nlist *patchsym = (struct nlist *) sym; + const char * lookname; + const struct nlist * realsym; + + if ( (patchsym->n_type & N_TYPE) == N_INDR) + lookname = strbase + patchsym->n_value; + else + lookname = symname; + realsym = findSymbolByName(sKernelFile, lookname); + + patchsym->n_sect = NO_SECT; + if (realsym) + { + patchsym->n_type = realsym->n_type; + patchsym->n_desc = realsym->n_desc; + patchsym->n_value = realsym->n_value; + if ((patchsym->n_type & N_TYPE) == N_SECT) + patchsym->n_type = (patchsym->n_type & ~N_TYPE) | N_ABS; + } + else + { + errprintf("%s: Undefined in symbol set: %s\n", pathName, symname); + patchsym->n_type = N_ABS; + patchsym->n_desc = 0; + patchsym->n_value = 0; + patchsym->n_un.n_strx = 0; + } + + if (!havelocal && (patchsym->n_type & N_EXT)) { + firstlocal = i; + havelocal = true; + file->fLocalSyms = patchsym; + } + continue; + } /* symbol set */ // Load up lookup symbol look table with sym names file->fSymbToStringTable[i] = symname; @@ -842,6 +1017,7 @@ static Boolean parseSymtab(struct fileRecord *file, const char *pathName) // Find the first exported symbol if ( !firstlocal && (n_type & N_EXT) ) { firstlocal = i; + havelocal = true; file->fLocalSyms = sym; } @@ -880,10 +1056,11 @@ static Boolean parseSymtab(struct fileRecord *file, const char *pathName) // Finally just check if we need to remangle symname++; // skip leading '__' while (*symname) { - if ('_' == *symname++ && '_' == *symname++) { + if ('_' == symname[0] && '_' == symname[1]) { found295CPP = true; break; } + symname++; } } } @@ -894,10 +1071,11 @@ static Boolean parseSymtab(struct fileRecord *file, const char *pathName) if (!found295CPP) { symname++; // Skip possible second '_' at start. while (*symname) { - if ('_' == *symname++ && '_' == *symname++) { + if ('_' == symname[0] && '_' == symname[1]) { found295CPP = true; break; } + symname++; } } } @@ -951,6 +1129,34 @@ findSymbolByAddress(const struct fileRecord *file, void *entry) return NULL; } +static const struct nlist * +findSymbolByAddressInAllFiles(const struct fileRecord * fromFile, + void *entry, const struct fileRecord **resultFile) +{ + int i, nfiles = 0; + struct fileRecord **files; + + if (sFilesTable) { + + // Check to see if we have already merged this file + nfiles = DataGetLength(sFilesTable) / sizeof(struct fileRecord *); + files = (struct fileRecord **) DataGetPtr(sFilesTable); + for (i = 0; i < nfiles; i++) { + if ((((vm_offset_t)entry) >= files[i]->fVMAddr) + && (((vm_offset_t)entry) < files[i]->fVMEnd)) + { + const struct nlist * result; + if (resultFile) + *resultFile = files[i]; + result = findSymbolByAddress(files[i], entry); + return result; + } + } + } + + return NULL; +} + struct searchContext { const char *fSymname; const struct fileRecord *fFile; @@ -961,7 +1167,7 @@ static int symbolSearch(const void *vKey, const void *vSym) const struct searchContext *key = (const struct searchContext *) vKey; const struct nlist *sym = (const struct nlist *) vSym; - return strcmp(key->fSymname + 1, symbolname(key->fFile, sym) + 1); + return strcmp(key->fSymname, symbolname(key->fFile, sym)); } static const struct nlist * @@ -975,7 +1181,7 @@ findSymbolByName(struct fileRecord *file, const char *symname) int nLocal = file->fNLocal + i; for (sym = file->fLocalSyms; i < nLocal; i++, sym++) - if (!strcmp(symNameByIndex(file, i) + 1, symname + 1)) + if (!strcmp(symNameByIndex(file, i), symname)) return sym; return NULL; } @@ -1081,7 +1287,12 @@ relocateSection(const struct fileRecord *file, struct sectionRecord *sectionRec) ("Invalid relocation entry in %s - local\n", file->fPath)); // Find the symbol, if any, that backs this entry - symbol = findSymbolByAddress(file, *entry); + void * addr = *entry; +#if !KERNEL + if (file->fSwapped) + addr = (void *) NXSwapLong((long) addr); +#endif + symbol = findSymbolByAddress(file, addr); } rec->fValue = *entry; // Save the previous value @@ -1099,11 +1310,24 @@ relocateSection(const struct fileRecord *file, struct sectionRecord *sectionRec) static const struct nlist * findSymbolRefAtLocation(const struct fileRecord *file, - struct sectionRecord *sctn, void **loc) + struct sectionRecord *sctn, void **loc, const struct fileRecord **foundInFile) { - if (file->fIsKernel) { - if (*loc) - return findSymbolByAddress(file, *loc); + const struct nlist * result; + + *foundInFile = file; + + if (!file->fIsReloc) { + if (*loc) { + void * addr = *loc; +#if !KERNEL + if (file->fSwapped) + addr = (void *) NXSwapLong((long) addr); +#endif + result = findSymbolByAddress(file, addr); + if (!result) + result = findSymbolByAddressInAllFiles(file, addr, foundInFile); + return result; + } } else if (sctn->fRelocCache || relocateSection(file, sctn)) { struct relocRecord *reloc = (struct relocRecord *) *loc; @@ -1192,11 +1416,12 @@ recordClass(struct fileRecord *file, const char *cname, const struct nlist *sym) char strbuffer[1024]; // Only do the work to find the super class if we are - // not currently working on the kernel. The kernel is the end + // not currently working on the kernel. The kernel is the end // of all superclass chains by definition as the kernel must be binary // compatible with itself. - if (!file->fIsKernel) { + if (file->fIsReloc) { const char *suffix; + const struct fileRecord *superfile; const struct nlist *supersym; const struct section *section; struct sectionRecord *sectionRec; @@ -1217,15 +1442,15 @@ recordClass(struct fileRecord *file, const char *cname, const struct nlist *sym) section = sectionRec->fSection; location = (void **) ( file->fMachO + section->offset + sym->n_value - section->addr ); - - supersym = findSymbolRefAtLocation(file, sectionRec, location); + + supersym = findSymbolRefAtLocation(file, sectionRec, location, &superfile); if (!supersym) { result = true; // No superclass symbol then it isn't an OSObject. goto finish; } // Find string in file and skip leading '_' and then find the suffix - superstr = symbolname(file, supersym) + 1; + superstr = symbolname(superfile, supersym) + 1; suffix = superstr + strlen(superstr) - sizeof(kGMetaSuffix) + 1; if (suffix <= superstr || strcmp(suffix, kGMetaSuffix)) { result = true; // Not an OSObject superclass so ignore it.. @@ -1409,7 +1634,7 @@ getSectionForSymbol(const struct fileRecord *file, const struct nlist *symb, unsigned char *base; sectind = symb->n_sect; // Default to symbols section - if ((symb->n_type & N_TYPE) == N_ABS && file->fIsKernel) { + if ((symb->n_type & N_TYPE) == N_ABS && !file->fIsReloc) { // Absolute symbol so we have to iterate over our sections for (sectind = 1; sectind <= file->fNSects; sectind++) { unsigned long start, end; @@ -1464,8 +1689,8 @@ static Boolean resolveKernelVTable(struct metaClassRecord *metaClass) // however we don't need to check the superclass in the kernel // as the kernel vtables are always correct wrt themselves. // Note this ends the superclass chain recursion. - return_if(!file->fIsKernel, - false, ("Internal error - resolveKernelVTable not kernel\n")); + return_if(file->fIsReloc, + false, ("Internal error - resolveKernelVTable is relocateable\n")); if (file->fNoKernelExecutable) { // Oh dear attempt to map the kernel's VM into my memory space @@ -1493,9 +1718,29 @@ static Boolean resolveKernelVTable(struct metaClassRecord *metaClass) curPatch = patchedVTable; curEntry = vtableEntries + kVTablePreambleLen; for (; *curEntry; curEntry++, curPatch++) { + void * addr = *curEntry; +#if !KERNEL + if (file->fSwapped) + addr = (void *) NXSwapLong((long) addr); +#endif curPatch->fSymbol = (struct nlist *) - findSymbolByAddress(file, *curEntry); - curPatch->fType = kSymbolLocal; + findSymbolByAddress(file, addr); + if (curPatch->fSymbol) + { + curPatch->fType = kSymbolLocal; + curPatch->fFile = file; + } + else + { + curPatch->fSymbol = (struct nlist *) + findSymbolByAddressInAllFiles(file, addr, &curPatch->fFile); + if (!curPatch->fSymbol) { + errprintf("%s: !findSymbolByAddressInAllFiles(%p)\n", + file->fPath, addr); + return false; + } + curPatch->fType = kSymbolLocal; + } } // Tag the end of the patch vtable @@ -1575,12 +1820,28 @@ getNewSymbol(struct fileRecord *file, } } - // Assert that this is a vaild symbol. I need this condition to be true - // for the later code to make non-zero. So the first time through I'd - // better make sure that it is 0. - return_if(reloc->fSymbol->n_sect, NULL, - ("Undefined symbol entry with non-zero section %s:%s\n", - file->fPath, symbolname(file, reloc->fSymbol))); + if (reloc->fSymbol->n_un.n_strx >= 0) { + // This symbol has not been previously processed, so assert that it + // is a valid non-local symbol. I need this condition to be true for + // the later code to set to -1. Now, being the first time through, + // I'd better make sure that n_sect is NO_SECT. + + return_if(reloc->fSymbol->n_sect != NO_SECT, NULL, + ("Undefined symbol entry with non-zero section %s:%s\n", + file->fPath, symbolname(file, reloc->fSymbol))); + + // Mark the original symbol entry as having been processed. + // This means that we wont attempt to create the symbol again + // in the future if we come through a different path. + ((struct nlist *) reloc->fSymbol)->n_un.n_strx = + -reloc->fSymbol->n_un.n_strx; + + // Mark the old symbol as being potentially deletable I can use the + // n_sect field as the input symbol must be of type N_UNDF which means + // that the n_sect field must be set to NO_SECT otherwise it is an + // invalid input file. + ((struct nlist *) reloc->fSymbol)->n_sect = (unsigned char) -1; + } // If we are here we didn't find the symbol so create a new one now msym = (struct nlist *) malloc(sizeof(struct nlist)); @@ -1592,6 +1853,7 @@ getNewSymbol(struct fileRecord *file, newStr = addNewString(file, supername, strlen(supername)); if (!newStr) return NULL; + // If we are here we didn't find the symbol so create a new one now return_if(!DataAppendBytes(file->fSym2Strings, &newStr, sizeof(newStr)), NULL, ("Unable to grow symbol table for %s\n", file->fPath)); @@ -1605,20 +1867,6 @@ getNewSymbol(struct fileRecord *file, msym->n_desc = 0; msym->n_value = (unsigned long) newStr; - // Mark the old symbol as being potentially deletable I can use the - // n_sect field as the input symbol must be of type N_UNDF which means - // that the n_sect field must be set to NO_SECT otherwise it is an - // invalid input file. - // - // However the symbol may have been just inserted by the fixOldSymbol path. - // If this is the case then we know it is in use and we don't have to - // mark it as a deletable symbol. - if (reloc->fSymbol->n_un.n_strx >= 0) { - ((struct nlist *) reloc->fSymbol)->n_un.n_strx - = -reloc->fSymbol->n_un.n_strx; - ((struct nlist *) reloc->fSymbol)->n_sect = (unsigned char) -1; - } - rinfo->r_symbolnum = i + file->fSymtab->nsyms; file->fSymbolsDirty = true; return msym; @@ -1708,13 +1956,17 @@ static Boolean patchVTable(struct metaClassRecord *metaClass) file = metaClass->fFile; - // If the metaClass we are being to ask is in the kernel then we - // need to do a quick scan to grab the fPatchList in a reliable format - // however we don't need to check the superclass in the kernel - // as the kernel vtables are always correct wrt themselves. - // Note this ends the superclass chain recursion. - return_if(file->fIsKernel, - false, ("Internal error - patchVTable shouldn't used for kernel\n")); + if (!file->fIsReloc) + { + // If the metaClass we are being to ask is already relocated then we + // need to do a quick scan to grab the fPatchList in a reliable format + // however we don't need to check the superclass in the already linked + // modules as the vtables are always correct wrt themselves. + // Note this ends the superclass chain recursion. + Boolean res; + res = resolveKernelVTable(metaClass); + return res; + } if (!metaClass->fSuperName) return false; @@ -1728,11 +1980,7 @@ static Boolean patchVTable(struct metaClassRecord *metaClass) // Superclass recursion if necessary if (!super->fPatchedVTable) { Boolean res; - - if (super->fFile->fIsKernel) - res = resolveKernelVTable(super); - else - res = patchVTable(super); + res = patchVTable(super); if (!res) return false; } @@ -1776,7 +2024,7 @@ static Boolean patchVTable(struct metaClassRecord *metaClass) for ( ; spp->fSymbol; curReloc++, spp++, curPatch++) { const char *supername = - symbolname(super->fFile, spp->fSymbol); + symbolname(spp->fFile, spp->fSymbol); symbol = (struct nlist *) (*curReloc)->fSymbol; @@ -1807,6 +2055,7 @@ static Boolean patchVTable(struct metaClassRecord *metaClass) if (symbol) { curPatch->fSymbol = symbol; (*curReloc)->fSymbol = symbol; + curPatch->fFile = file; } else goto abortPatch; @@ -1818,6 +2067,7 @@ static Boolean patchVTable(struct metaClassRecord *metaClass) // Local reloc symbols curPatch->fType = kSymbolLocal; curPatch->fSymbol = (struct nlist *) (*curReloc)->fSymbol; + curPatch->fFile = file; } // Tag the end of the patch vtable @@ -1853,13 +2103,13 @@ static Boolean growImage(struct fileRecord *file, vm_size_t delta) endMap = (vm_address_t) file->fMap + file->fMapSize; // Do we have room in the current mapped image - if (endMachO < round_page(endMap)) { + if (endMachO < round_page_32(endMap)) { file->fMachOSize += delta; return true; } newsize = endMachO - startMachO; - if (newsize < round_page(file->fMapSize)) { + if (newsize < round_page_32(file->fMapSize)) { DEBUG_LOG(("Growing image %s by moving\n", file->fPath)); // We have room in the map if we shift the macho image within the @@ -1979,8 +2229,15 @@ prepareFileForLink(struct fileRecord *file) // If we didn't even do a pseudo 'relocate' and dirty the image // then we can just return now. - if (!file->fImageDirty) + if (!file->fImageDirty) { +#if !KERNEL + if (file->fSwapped) { + kld_macho_unswap((struct mach_header *) file->fMachO, file->fSwapped, false); + file->fSwapped = false; + } +#endif return true; + } DEBUG_LOG(("Linking 2 %s\n", file->fPath)); // @@@ gvdl: @@ -2025,8 +2282,15 @@ DEBUG_LOG(("Linking 2 %s\n", file->fPath)); // @@@ gvdl: file->fImageDirty = false; // Image is clean // If we didn't dirty the symbol table then just return - if (!file->fSymbolsDirty) + if (!file->fSymbolsDirty) { +#if !KERNEL + if (file->fSwapped) { + kld_macho_unswap((struct mach_header *) file->fMachO, file->fSwapped, false); + file->fSwapped = false; + } +#endif return true; + } // calculate total file size increase and check against padding if (file->fNewSymbols) { @@ -2092,8 +2356,14 @@ DEBUG_LOG(("Linking 2 %s\n", file->fPath)); // @@@ gvdl: } // Don't need the new strings any more - last = DataGetLength(file->fNewStringBlocks) / sizeof(DataRef); - stringBlocks = (DataRef *) DataGetPtr(file->fNewStringBlocks); + if (file->fNewStringBlocks){ + last = DataGetLength(file->fNewStringBlocks) / sizeof(DataRef); + stringBlocks = (DataRef *) DataGetPtr(file->fNewStringBlocks); + } + else{ + last =0; + stringBlocks=0; + } for (i = 0; i < last; i++) DataRelease(stringBlocks[i]); @@ -2138,7 +2408,12 @@ DEBUG_LOG(("Linking 2 %s\n", file->fPath)); // @@@ gvdl: } file->fSymbolsDirty = false; - +#if !KERNEL + if (file->fSwapped) { + kld_macho_unswap((struct mach_header *) file->fMachO, file->fSwapped, false); + file->fSwapped = false; + } +#endif return true; } @@ -2176,6 +2451,7 @@ kld_file_map(const char *pathName) struct load_command c[1]; } *machO; const struct load_command *cmd; + boolean_t lookVMRange; int i; if (!findBestArch(&file, pathName)) @@ -2185,22 +2461,38 @@ kld_file_map(const char *pathName) if (file.fMachOSize < machO->h.sizeofcmds) break; - file.fIsKernel = (MH_EXECUTE == machO->h.filetype); - // If the file type is MH_EXECUTE then this must be a kernel // as all Kernel extensions must be of type MH_OBJECT - for (i = 0, cmd = &machO->c[0]; i < machO->h.ncmds; i++) { + file.fIsKernel = (MH_EXECUTE == machO->h.filetype); + + for (i = 0, cmd = &machO->c[0], lookVMRange = true; i < machO->h.ncmds; i++) { if (cmd->cmd == LC_SYMTAB) file.fSymtab = (struct symtab_command *) cmd; else if (cmd->cmd == LC_SEGMENT) { struct segment_command *seg = (struct segment_command *) cmd; int nsects = seg->nsects; + if (lookVMRange) { + if (!strcmp("__PRELINK", seg->segname)) + // segments following __PRELINK are going to move, so ignore them + lookVMRange = false; + else if (!file.fVMAddr && !file.fVMEnd) { + file.fVMAddr = seg->vmaddr; + file.fVMEnd = seg->vmaddr + seg->vmsize; + } else { + if (seg->vmaddr < file.fVMAddr) + file.fVMAddr = seg->vmaddr; + if ((seg->vmaddr + seg->vmsize) > file.fVMEnd) + file.fVMEnd = seg->vmaddr + seg->vmsize; + } + } + if (nsects) return_if(!parseSegments(&file, seg), false, ("%s isn't a valid mach-o, bad segment", pathName)); - else if (file.fIsKernel) { + + if (file.fIsKernel) { #if KERNEL // We don't need to look for the LinkEdit segment unless // we are running in the kernel environment. @@ -2209,12 +2501,33 @@ kld_file_map(const char *pathName) #endif } } - cmd = (struct load_command *) ((UInt8 *) cmd + cmd->cmdsize); } break_if(!file.fSymtab, ("%s isn't a valid mach-o, no symbols\n", pathName)); + if (machO->h.flags & MH_INCRLINK) { + + file.fIsIncrLink = true; + ((struct machOMapping *) machO)->h.flags &= ~MH_INCRLINK; + +#if !KERNEL + // the symtab fileoffset is the end of seg0's vmsize, + // which can be (rarely) unaligned. + unsigned int + align = file.fSymtab->symoff % sizeof(long); + if (align != 0) { + align = sizeof(long) - align; + growImage(&file, align); + bcopy(file.fMachO + file.fSymtab->symoff, + file.fMachO + file.fSymtab->symoff + align, + file.fSymtab->stroff + file.fSymtab->strsize - file.fSymtab->symoff); + file.fSymtab->symoff += align; + file.fSymtab->stroff += align; + } +#endif + } + if (!parseSymtab(&file, pathName)) break; diff --git a/libsa/kmod.cpp b/libsa/kmod.cpp index 301fa5db2..0c2bca87d 100644 --- a/libsa/kmod.cpp +++ b/libsa/kmod.cpp @@ -62,8 +62,8 @@ kmod_start_or_stop( extern kern_return_t kmod_retain(kmod_t id); extern kern_return_t kmod_release(kmod_t id); -extern void flush_dcache(vm_offset_t addr, unsigned cnt, int phys); -extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); +extern void flush_dcache64(addr64_t addr, unsigned cnt, int phys); +extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); }; @@ -170,6 +170,72 @@ bool verifyCompatibility(OSString * extName, OSString * requiredVersion) return true; } +/********************************************************************* +*********************************************************************/ +static +Boolean kextIsADependency(OSString * name) { + Boolean result = true; + OSDictionary * extensionsDict = 0; // don't release + OSDictionary * extDict = 0; // don't release + OSDictionary * extPlist = 0; // don't release + OSBoolean * isKernelResourceObj = 0; // don't release + OSData * driverCode = 0; // don't release + OSData * compressedCode = 0; // don't release + + extensionsDict = getStartupExtensions(); + if (!extensionsDict) { + IOLog("kextIsADependency(): No extensions dictionary.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + + extDict = OSDynamicCast(OSDictionary, + extensionsDict->getObject(name)); + if (!extDict) { + IOLog("kextIsADependency(): " + "Extension \"%s\" cannot be found.\n", + name->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + + extPlist = OSDynamicCast(OSDictionary, extDict->getObject("plist")); + if (!extPlist) { + IOLog("getDependencyListForKmod(): " + "Extension \"%s\" has no property list.\n", + name->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + + /* A kext that is a kernel component is still a dependency, as there + * are fake kmod entries for them. + */ + isKernelResourceObj = OSDynamicCast(OSBoolean, + extPlist->getObject("OSKernelResource")); + if (isKernelResourceObj && isKernelResourceObj->isTrue()) { + result = true; + goto finish; + } + + driverCode = OSDynamicCast(OSData, extDict->getObject("code")); + compressedCode = OSDynamicCast(OSData, + extDict->getObject("compressedCode")); + + if (!driverCode && !compressedCode) { + result = false; + goto finish; + } + +finish: + + return result; +} + /********************************************************************* * This function builds a uniqued, in-order list of modules that need * to be loaded in order for kmod_name to be successfully loaded. This @@ -185,9 +251,6 @@ OSArray * getDependencyListForKmod(const char * kmod_name) { OSDictionary * extPlist; // don't release OSString * extName; // don't release OSArray * dependencyList = NULL; // return value, caller releases - OSBoolean * isKernelResourceObj = 0; // don't release - bool isKernelResource = false; - bool declaresExecutable = false; unsigned int i; /* These are used to remove duplicates from the dependency list. @@ -258,27 +321,6 @@ OSArray * getDependencyListForKmod(const char * kmod_name) { goto finish; } - /* A kext that's not a kernel extension and declares no executable has nothing - * to load, so just return an empty array. - */ - isKernelResourceObj = OSDynamicCast(OSBoolean, - extPlist->getObject("OSKernelResource")); - if (isKernelResourceObj && isKernelResourceObj->isTrue()) { - isKernelResource = true; - } else { - isKernelResource = false; - } - - if (extPlist->getObject("CFBundleExecutable")) { - declaresExecutable = true; - } else { - declaresExecutable = false; - } - - if (!isKernelResource && !declaresExecutable) { - error = 0; - goto finish; - } /* Okay, let's get started. */ @@ -371,28 +413,6 @@ OSArray * getDependencyListForKmod(const char * kmod_name) { goto finish; } - /* Don't add any entries that are not kernel resources and that declare no - * executable. Such kexts have nothing to load and so don't belong in the - * dependency list. Entries that are kernel resource *do* get added, - * however, because such kexts get fake kmod entries for reference counting. - */ - isKernelResourceObj = OSDynamicCast(OSBoolean, - curExtPlist->getObject("OSKernelResource")); - if (isKernelResourceObj && isKernelResourceObj->isTrue()) { - isKernelResource = true; - } else { - isKernelResource = false; - } - if (curExtPlist->getObject("CFBundleExecutable")) { - declaresExecutable = true; - } else { - declaresExecutable = false; - } - - if (!isKernelResource && !declaresExecutable) { - continue; - } - dependencyList->setObject(curDepName); } @@ -429,7 +449,9 @@ OSArray * getDependencyListForKmod(const char * kmod_name) { /* Go backward through the original list, using the encounteredNames * dictionary to check for duplicates. We put originalList in as the - * value because we need some non-NULL value. + * value because we need some non-NULL value. Here we also drop any + * extensions that aren't proper dependencies (that is, any that are + * nonkernel kexts without code). */ i = originalList->getCount(); @@ -440,7 +462,9 @@ OSArray * getDependencyListForKmod(const char * kmod_name) { OSString * item = OSDynamicCast(OSString, originalList->getObject(i)); - if ( ! encounteredNames->getObject(item) ) { + if ( (!encounteredNames->getObject(item)) && + kextIsADependency(item)) { + encounteredNames->setObject(item, originalList); dependencyList->setObject(item); } @@ -525,7 +549,7 @@ unsigned long address_for_loaded_kmod( return 0; } - round_headers_size = round_page(headers_size); + round_headers_size = round_page_32(headers_size); headers_pad = round_headers_size - headers_size; link_load_address = (unsigned long)g_current_kmod_info->address + @@ -561,8 +585,8 @@ unsigned long alloc_for_kmod( unsigned long round_size; unsigned long headers_pad; - round_headers_size = round_page(headers_size); - round_segments_size = round_page(size - headers_size); + round_headers_size = round_page_32(headers_size); + round_segments_size = round_page_32(size - headers_size); round_size = round_headers_size + round_segments_size; headers_pad = round_headers_size - headers_size; @@ -996,7 +1020,7 @@ kern_return_t load_kmod(OSArray * dependencyList) { // bcopy() is (from, to, length) bcopy((char *)kmod_header, (char *)link_buffer_address, link_header_size); bcopy((char *)kmod_header + link_header_size, - (char *)link_buffer_address + round_page(link_header_size), + (char *)link_buffer_address + round_page_32(link_header_size), link_load_size - link_header_size); @@ -1024,13 +1048,13 @@ kern_return_t load_kmod(OSArray * dependencyList) { */ kmod_info->address = link_buffer_address; kmod_info->size = link_buffer_size; - kmod_info->hdr_size = round_page(link_header_size); + kmod_info->hdr_size = round_page_32(link_header_size); /* We've written data and instructions, so *flush* the data cache * and *invalidate* the instruction cache. */ - flush_dcache(link_buffer_address, link_buffer_size, false); - invalidate_icache(link_buffer_address, link_buffer_size, false); + flush_dcache64((addr64_t)link_buffer_address, link_buffer_size, false); + invalidate_icache64((addr64_t)link_buffer_address, link_buffer_size, false); /* Register the new kmod with the kernel proper. @@ -1047,7 +1071,7 @@ kern_return_t load_kmod(OSArray * dependencyList) { IOLog("kmod id %d successfully created at 0x%lx, size %ld.\n", (unsigned int)kmod_id, link_buffer_address, link_buffer_size); LOG_DELAY(); -#endif DEBUG +#endif /* DEBUG */ /* Record dependencies for the newly-loaded kmod. */ @@ -1082,7 +1106,7 @@ kern_return_t load_kmod(OSArray * dependencyList) { finish: if (kmod_info_freeme) { - kfree(kmod_info_freeme, sizeof(kmod_info_t)); + kfree((unsigned int)kmod_info_freeme, sizeof(kmod_info_t)); } /* Only do a kld_unload_all() if at least one load happened. @@ -1100,7 +1124,7 @@ finish: if (kmod_dependencies) { for (i = 0; i < num_dependencies; i++) { if (kmod_dependencies[i]) { - kfree(kmod_dependencies[i], sizeof(kmod_info_t)); + kfree((unsigned int)kmod_dependencies[i], sizeof(kmod_info_t)); } } kfree((unsigned int)kmod_dependencies, @@ -1194,7 +1218,7 @@ kern_return_t load_kernel_extension(char * kmod_name) { finish: if (kmod_info) { - kfree(kmod_info, sizeof(kmod_info_t)); + kfree((unsigned int)kmod_info, sizeof(kmod_info_t)); } if (dependencyList) { diff --git a/libsa/libsa/catalogue.h b/libsa/libsa/catalogue.h index e21a0d22d..0eb1e8200 100644 --- a/libsa/libsa/catalogue.h +++ b/libsa/libsa/catalogue.h @@ -3,6 +3,7 @@ extern bool addExtensionsFromArchive(OSData * mkext); extern void removeStartupExtension(const char * extensionName); extern OSDictionary * getStartupExtensions(void); +extern OSArray * getPrelinkedModules(void); extern void clearStartupExtensionsAndLoaderInfo(void); diff --git a/libsa/libsa/kmod.h b/libsa/libsa/kext.h similarity index 76% rename from libsa/libsa/kmod.h rename to libsa/libsa/kext.h index 5b31356d8..f734469df 100644 --- a/libsa/libsa/kmod.h +++ b/libsa/libsa/kext.h @@ -1,6 +1,6 @@ #ifdef __cplusplus extern "C" { -#endif __cplusplus +#endif /* __cplusplus */ #include @@ -8,4 +8,4 @@ __private_extern__ kern_return_t load_kernel_extension(char * kmod_name); #ifdef __cplusplus }; -#endif __cplusplus +#endif /* __cplusplus */ diff --git a/libsa/libsa/malloc.h b/libsa/libsa/malloc.h index 44638f139..0f70d2a39 100644 --- a/libsa/libsa/malloc.h +++ b/libsa/libsa/malloc.h @@ -1,8 +1,10 @@ #ifndef _LIBSA_MALLOC_H_ #define _LIBSA_MALLOC_H_ +#include #include "stdlib.h" +__BEGIN_DECLS /***** * These functions are the minimum necessary for use @@ -12,6 +14,7 @@ void * malloc(size_t size); void * realloc(void * address, size_t new_size); void free(void * address); +void malloc_init(void); void malloc_reset(void); // Destroy all memory regions @@ -39,4 +42,6 @@ int malloc_sanity_check(void); #endif /* DEBUG */ #endif /* 0 */ +__END_DECLS + #endif /* defined _LIBSA_MALLOC_H_ */ diff --git a/libsa/libsa/stdlib.h b/libsa/libsa/stdlib.h index 56dbcbd4d..6b2c48a07 100644 --- a/libsa/libsa/stdlib.h +++ b/libsa/libsa/stdlib.h @@ -1,6 +1,7 @@ #ifndef _LIBSA_STDLIB_H_ #define _LIBSA_STDLIB_H_ +#include #ifndef _BSD_SIZE_T_DEFINED_ #define _BSD_SIZE_T_DEFINED_ @@ -15,6 +16,9 @@ typedef __SIZE_TYPE__ size_t; __private_extern__ char *kld_basefile_name; +__BEGIN_DECLS + + __private_extern__ void * malloc(size_t size); __private_extern__ void free(void * address); __private_extern__ void free_all(void); // "Free" all memory blocks @@ -42,4 +46,6 @@ __private_extern__ void * bsearch( extern long strtol(const char *, char **, int); extern unsigned long strtoul(const char *, char **, int); +__END_DECLS + #endif /* _LIBSA_STDLIB_H_ */ diff --git a/libsa/libsa/vers_rsrc.h b/libsa/libsa/vers_rsrc.h index dde8efa71..9ef27afa0 100644 --- a/libsa/libsa/vers_rsrc.h +++ b/libsa/libsa/vers_rsrc.h @@ -1,29 +1,24 @@ #ifndef _LIBSA_VERS_H_ #define _LIBSA_VERS_H_ -#include - -typedef union { - UInt32 vnum; - UInt8 bytes[4]; -} VERS_version; +#ifdef __cplusplus +extern "C" { +#endif -typedef enum { - VERS_development = 0x20, - VERS_alpha = 0x40, - VERS_beta = 0x60, - VERS_candidate = 0x70, // for interim usage only! - VERS_release = 0x80, - VERS_invalid = 0xff -} VERS_revision; - -#define BCD_combine(l, r) ( (((l) & 0xf) << 4) | ((r) & 0xf) ) -#define BCD_get_left(p) ( ((p) >> 4) & 0xf ) -#define BCD_get_right(p) ( (p) & 0xf ) +#ifndef KERNEL +#include +#include +#include +#else +#include +#endif KERNEL -#define BCD_illegal (0xff) // full byte, 11111111 +typedef SInt64 VERS_version; +VERS_version VERS_parse_string(const char * vers_string); +int VERS_string(char * buffer, UInt32 length, VERS_version vers); -int VERS_parse_string(const char * vers_string, UInt32 * version_num); -int VERS_string(char * buffer, UInt32 length, UInt32 vers); +#ifdef __cplusplus +} +#endif -#endif /* _LIBSA_VERS_H_ */ +#endif _LIBSA_VERS_H_ diff --git a/libsa/load.c b/libsa/load.c new file mode 100644 index 000000000..b9c703d99 --- /dev/null +++ b/libsa/load.c @@ -0,0 +1,2749 @@ +/*************** +* HEADERS +***************/ +#ifndef KERNEL + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vers_rsrc.h" + +#else + +#include +#include +#include +#include + +#endif /* not KERNEL */ + +#include "load.h" +#include "dgraph.h" +#include "kld_patch.h" + +/*************** +* MACROS +***************/ + +#ifndef KERNEL + +#define PRIV_EXT + +#else + +#define PRIV_EXT __private_extern__ + +#ifdef DEBUG +#define LOG_DELAY(x) IODelay((x) * 1000000) +#define VTYELLOW "\033[33m" +#define VTRESET "\033[0m" +#else +#define LOG_DELAY() +#define VTYELLOW +#define VTRESET +#endif /* DEBUG */ + +#endif /* not KERNEL */ + +/*************** +* FUNCTION PROTOS +***************/ + +#ifdef KERNEL +extern kern_return_t +kmod_create_internal( + kmod_info_t *info, + kmod_t *id); + +extern kern_return_t +kmod_destroy_internal(kmod_t id); + +extern kern_return_t +kmod_start_or_stop( + kmod_t id, + int start, + kmod_args_t *data, + mach_msg_type_number_t *dataCount); + +extern kern_return_t kmod_retain(kmod_t id); +extern kern_return_t kmod_release(kmod_t id); + +extern void flush_dcache(vm_offset_t addr, unsigned cnt, int phys); +extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); +#endif /* KERNEL */ + + +// Used to pass info between kld library and callbacks +static dgraph_entry_t * G_current_load_entry = NULL; + +#ifndef KERNEL +static mach_port_t G_kernel_port = PORT_NULL; +static mach_port_t G_kernel_priv_port = PORT_NULL; +static int G_syms_only; + +static kload_error +register_prelink(dgraph_entry_t * entry, + kmod_info_t * local_kmod_info, vm_offset_t kernel_kmod_info); + +struct PrelinkState +{ + kmod_info_t modules[1]; +}; +struct PrelinkState * G_prelink; +CFMutableDataRef G_prelink_data; +CFMutableDataRef G_prelink_dependencies; + +#endif /* not KERNEL */ + +// used by dgraph.c so can't be static +kload_log_level log_level = 0; + +#ifndef KERNEL +static void __kload_null_log(const char * format, ...); +static void __kload_null_err_log(const char * format, ...); +static int __kload_null_approve(int default_answer, const char * format, ...); +static int __kload_null_veto(int default_answer, const char * format, ...); +static const char * __kload_null_input(const char * format, ...); + +void (*__kload_log_func)(const char * format, ...) = + &__kload_null_log; +void (*__kload_err_log_func)(const char * format, ...) = &__kload_null_err_log; +int (*__kload_approve_func)(int default_answer, + const char * format, ...) = &__kload_null_approve; +int (*__kload_veto_func)(int default_answer, + const char * format, ...) = &__kload_null_veto; +const char * (*__kload_input_func)(const char * format, ...) = + &__kload_null_input; +#endif /* not KERNEL */ + +static unsigned long __kload_linkedit_address( + unsigned long size, + unsigned long headers_size); +static void __kload_clean_up_entry(dgraph_entry_t * entry); +static void __kload_clear_kld_globals(void); +static kload_error __kload_patch_dgraph(dgraph_t * dgraph +#ifndef KERNEL + , + const char * kernel_file +#endif /* not KERNEL */ + ); +static kload_error __kload_load_modules(dgraph_t * dgraph +#ifndef KERNEL + , + const char * kernel_file, + const char * patch_file, const char * patch_dir, + const char * symbol_file, const char * symbol_dir, + int do_load, int do_start_kmod, int do_prelink, + int interactive_level, + int ask_overwrite_symbols, int overwrite_symbols +#endif /* not KERNEL */ + ); + +static kload_error __kload_check_module_loaded( + dgraph_t * dgraph, + dgraph_entry_t * entry, +#ifndef KERNEL + kmod_info_t * kmod_list, +#endif /* not KERNEL */ + int log_if_already); + +static kload_error __kload_load_module(dgraph_t * dgraph, + dgraph_entry_t * entry, + int is_root +#ifndef KERNEL + , + const char * symbol_file, + const char * symbol_dir, + int do_load, + int interactive_level, + int ask_overwrite_symbols, + int overwrite_symbols +#endif /* not KERNEL */ + ); +static kload_error __kload_set_module_dependencies(dgraph_entry_t * entry); +static kload_error __kload_start_module(dgraph_entry_t * entry); + +#ifndef KERNEL +static kload_error __kload_output_patches( + dgraph_t * dgraph, + const char * patch_file, + const char * patch_dir, + int ask_overwrite_symbols, + int overwrite_symbols); + +Boolean _IOReadBytesFromFile(CFAllocatorRef alloc, const char *path, void **bytes, + CFIndex *length, CFIndex maxLength); +Boolean _IOWriteBytesToFile(const char *path, const void *bytes, CFIndex length); + +#endif /* not KERNEL */ + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +kload_error kload_load_dgraph(dgraph_t * dgraph +#ifndef KERNEL + , + const char * kernel_file, + const char * patch_file, const char * patch_dir, + const char * symbol_file, const char * symbol_dir, + int do_load, int do_start_kmod, int do_prelink, + int interactive_level, + int ask_overwrite_symbols, int overwrite_symbols +#endif /* not KERNEL */ + ) +{ + kload_error result = kload_error_none; + int one_has_address = 0; + int one_lacks_address = 0; + unsigned int i; +#ifndef KERNEL + int syms_only; + + syms_only = (!do_load) && (symbol_dir || symbol_file); + + if (log_level >= kload_log_level_load_details) { + kload_log_message("loading dependency graph:" KNL); + dgraph_log(dgraph); + } + + if (syms_only && log_level >= kload_log_level_load_details) { + kload_log_message("loading for symbol generation only" KNL); + } + + /***** + * If we're not loading and have no request to emit a symbol + * or patch file, there's nothing to do! + */ + if (!do_load && !symbol_dir && !symbol_file && + !patch_dir && !patch_file) { + + if (syms_only && log_level >= kload_log_level_load_details) { + kload_log_message("loader has no work to do" KNL); + } + + result = kload_error_none; // fixme: should this be USAGE error? + goto finish; + } + + /***** + * If we're doing symbols only, then all entries in the dgraph must + * have addresses assigned, or none must. + */ + if (syms_only) { + if (log_level >= kload_log_level_load_details) { + kload_log_message("checking whether modules have addresses assigned" KNL); + } + for (i = 0; i < dgraph->length; i++) { + struct dgraph_entry_t * entry = dgraph->load_order[i]; + if (entry->is_kernel_component) { + continue; + } + if (entry->loaded_address != 0) { + one_has_address = 1; + } else { + one_lacks_address = 1; + } + } + } +#endif /* not KERNEL */ + + if (one_has_address && one_lacks_address) { + kload_log_error( + "either all modules must have addresses set to nonzero values or " + "none must" KNL); + result = kload_error_invalid_argument; + goto finish; + } + +#ifndef KERNEL + /* we need the priv port to check/load modules in the kernel. + */ + if (PORT_NULL == G_kernel_priv_port) { + G_kernel_priv_port = mach_host_self(); /* if we are privileged */ + } +#endif /* not KERNEL */ + +/***** + * In the kernel, ALWAYS get load addresses of existing loaded kmods. + */ +#ifndef KERNEL + /***** + * If we don't have addresses, then get them from the kernel. + */ + if (!one_has_address && !do_prelink && (do_load || symbol_file || symbol_dir)) { +#endif /* not KERNEL */ + if (log_level >= kload_log_level_load_details) { + kload_log_message("getting module addresses from kernel" KNL); + } +#ifndef KERNEL + result = kload_set_load_addresses_from_kernel(dgraph, kernel_file, + do_load); +#else + result = kload_set_load_addresses_from_kernel(dgraph); +#endif /* not KERNEL */ + if (result == kload_error_already_loaded) { + +#ifndef KERNEL + if (do_load) { + goto finish; + } +#else + goto finish; +#endif /* not KERNEL */ + + } else if (result != kload_error_none) { + kload_log_error("can't check load addresses of modules" KNL); + goto finish; + } +#ifndef KERNEL + } +#endif /* not KERNEL */ + +#ifndef KERNEL + /***** + * At this point, if we're doing symbols only, it's an error to not + * have a load address for every module. + */ + if (syms_only && !do_prelink) { + if (log_level >= kload_log_level_load_details) { + kload_log_message("checking that all modules have addresses assigned" KNL); + } + for (i = 0; i < dgraph->length; i++) { + struct dgraph_entry_t * entry = dgraph->load_order[i]; + if (entry->is_kernel_component) { + continue; + } + if (!entry->loaded_address) { + kload_log_error( + "missing load address during symbol generation: %s" KNL, + entry->name); + result = kload_error_unspecified; + goto finish; + } + } + } + + if (do_prelink) + { + void * bytes; + CFIndex length; + CFAllocatorRef alloc; + + // We need a real allocator to pass to _IOReadBytesFromFile + alloc = CFRetain(CFAllocatorGetDefault()); + if (_IOReadBytesFromFile(alloc, "prelinkstate", &bytes, &length, 0)) + { + G_prelink_data = CFDataCreateMutable(alloc, 0); + CFDataAppendBytes(G_prelink_data, (UInt8 *) bytes, length); + CFAllocatorDeallocate(alloc, bytes); + } + G_prelink_dependencies = CFDataCreateMutable(alloc, 0); + if (_IOReadBytesFromFile(alloc, "prelinkdependencies", &bytes, &length, 0)) + { + CFDataAppendBytes(G_prelink_dependencies, (UInt8 *) bytes, length); + CFAllocatorDeallocate(alloc, bytes); + } + CFRelease(alloc); + + if (!G_prelink_data) { + kload_log_error( + "can't get load address for prelink %s" KNL, kernel_file); + result = kload_error_link_load; + goto finish; + } + else + G_prelink = (struct PrelinkState *) CFDataGetMutableBytePtr(G_prelink_data); + } + else + G_prelink = 0; +#endif /* not KERNEL */ + +#ifndef KERNEL + + result = __kload_load_modules(dgraph, kernel_file, + patch_file, patch_dir, symbol_file, symbol_dir, + do_load, do_start_kmod, do_prelink, interactive_level, + ask_overwrite_symbols, overwrite_symbols); +#else + result = __kload_load_modules(dgraph); +#endif /* not KERNEL */ + +finish: + +#ifndef KERNEL + /* Dispose of the host port to prevent security breaches and port + * leaks. We don't care about the kern_return_t value of this + * call for now as there's nothing we can do if it fails. + */ + if (PORT_NULL != G_kernel_priv_port) { + mach_port_deallocate(mach_task_self(), G_kernel_priv_port); + G_kernel_priv_port = PORT_NULL; + } +#endif /* not KERNEL */ + + for (i = 0; i < dgraph->length; i++) { + dgraph_entry_t * current_entry = dgraph->graph[i]; + __kload_clean_up_entry(current_entry); + } + +#ifndef KERNEL + if (G_prelink) + { + SInt32 length; + const void * bytes; + Boolean success; + + length = CFDataGetLength(G_prelink_data); + bytes = (0 == length) ? (const void *)"" : CFDataGetBytePtr(G_prelink_data); + success = _IOWriteBytesToFile("prelinkstate", bytes, length); + if (!success) + { + kload_log_error("write prelinkstate" KNL); + result = kload_error_link_load; + } + length = CFDataGetLength(G_prelink_dependencies); + bytes = (0 == length) ? (const void *)"" : CFDataGetBytePtr(G_prelink_dependencies); + success = _IOWriteBytesToFile("prelinkdependencies", bytes, length); + if (!success) + { + kload_log_error("write prelinkdependencies" KNL); + result = kload_error_link_load; + } + } +#endif /* not KERNEL */ + + return result; +} + +#ifndef KERNEL +/******************************************************************************* +* This function claims the option flags d and D for object file dependencies +* and in-kernel dependencies, respectively. +*******************************************************************************/ +kload_error kload_load_with_arglist( + int argc, char **argv, + const char * kernel_file, + const char * patch_file, const char * patch_dir, + const char * symbol_file, const char * symbol_dir, + int do_load, int do_start_kmod, + int interactive_level, + int ask_overwrite_symbols, int overwrite_symbols) +{ + kload_error result = kload_error_none; + dgraph_error_t dgraph_result; + int syms_only = (!do_load) && (symbol_file || symbol_dir); + + static dgraph_t dependency_graph; + + /* Zero out fields in dependency graph for proper error handling later. + */ + bzero(&dependency_graph, sizeof(dependency_graph)); + + dgraph_result = dgraph_init_with_arglist(&dependency_graph, + syms_only, "-d", "-D", argc, argv); + if (dgraph_result == dgraph_error) { + kload_log_error("error processing dependency list" KNL); + result = kload_error_unspecified; + goto finish; + } else if (dgraph_result == dgraph_invalid) { + // anything to print here, or did init call print something? + result = kload_error_invalid_argument; + goto finish; + } + + result = kload_load_dgraph(&dependency_graph, kernel_file, + patch_file, patch_dir, symbol_file, symbol_dir, + do_load, do_start_kmod, false /* do_prelink */, interactive_level, + ask_overwrite_symbols, overwrite_symbols); + +finish: + return result; +} +#endif /* not KERNEL */ +/******************************************************************************* +* +*******************************************************************************/ +static +kload_error __kload_keep_symbols(dgraph_entry_t * entry) +{ + struct mach_header * hdr; + struct segment_command * seg; + struct nlist * sym; + struct symtab_command * symcmd; + unsigned long idx, ncmds; + vm_size_t size; + vm_address_t mem; + + if (entry->symbols) + return kload_error_none; + + hdr = entry->linked_image; + ncmds = hdr->ncmds; + seg = (struct segment_command *)(hdr + 1); + for (idx = 0; + idx < ncmds; + idx++, seg = (struct segment_command *)(((vm_offset_t)seg) + seg->cmdsize)) + { + if (LC_SYMTAB == seg->cmd) + break; + } + if (idx >= ncmds) + { + kload_log_error("no LC_SYMTAB" KNL); + return kload_error_unspecified; + } + + symcmd = (struct symtab_command *) seg; + + struct load_cmds { + struct mach_header hdr; + struct segment_command seg; + struct symtab_command symcmd; + }; + struct load_cmds * cmd; + unsigned int symtabsize; + + symtabsize = symcmd->stroff + symcmd->strsize - symcmd->symoff; + + size = sizeof(struct load_cmds) + symtabsize; + + mem = (vm_offset_t) malloc(size); + + cmd = (struct load_cmds *) mem; + sym = (struct nlist *) (cmd + 1); + + cmd->hdr = *hdr; + cmd->symcmd = *symcmd; + bcopy((const void *) (((vm_offset_t) hdr) + symcmd->symoff), + sym, + symtabsize); + + hdr = (struct mach_header *) mem; + cmd->hdr.ncmds = 2; + cmd->hdr.sizeofcmds = sizeof(struct load_cmds); + cmd->hdr.flags &= ~MH_INCRLINK; + + cmd->symcmd.stroff -= (symcmd->symoff - sizeof(struct load_cmds)); + cmd->symcmd.symoff = sizeof(struct load_cmds); + + cmd->seg.cmd = LC_SEGMENT; + cmd->seg.cmdsize = sizeof(struct segment_command); + strcpy(cmd->seg.segname, SEG_LINKEDIT); + cmd->seg.vmaddr = 0; + cmd->seg.vmsize = 0; + cmd->seg.fileoff = cmd->symcmd.symoff; + cmd->seg.filesize = symtabsize; + cmd->seg.maxprot = 7; + cmd->seg.initprot = 1; + cmd->seg.nsects = 0; + cmd->seg.flags = 0; + + sym = (struct nlist *) (cmd + 1); + for (idx = 0; idx < symcmd->nsyms; idx++, sym++) + { + if ( (sym->n_type & N_TYPE) == N_SECT) { + sym->n_sect = NO_SECT; + sym->n_type = (sym->n_type & ~N_TYPE) | N_ABS; + } + } + if (log_level >= kload_log_level_load_details) + { + kload_log_message("__kload_keep_symbols %s, nsyms %ld, 0x%x bytes" KNL, + entry->name, symcmd->nsyms, size); + } + + entry->symbols = mem; + entry->symbols_malloc = mem; + entry->symbols_length = size; + + return kload_error_none; +} + + +static +kload_error __kload_make_opaque_basefile(dgraph_t * dgraph, struct mach_header * hdr) +{ + struct segment_command * seg; + struct segment_command * data_seg; + struct segment_command * text_seg; + struct section * sec; + int j; + vm_offset_t offset; + unsigned long idx, ncmds; + vm_size_t size; + vm_address_t mem, out; + static vm_address_t keep_base_image; + static vm_size_t keep_base_size; + + if (dgraph->opaque_base_image) + return kload_error_none; + + if (keep_base_image) + { + dgraph->opaque_base_image = keep_base_image; + dgraph->opaque_base_length = keep_base_size; + return kload_error_none; + } + + data_seg = text_seg = NULL; + ncmds = hdr->ncmds; + seg = (struct segment_command *)(hdr + 1); + for (idx = 0; + idx < ncmds; + idx++, seg = (struct segment_command *)(((vm_offset_t)seg) + seg->cmdsize)) + { + if (LC_SEGMENT != seg->cmd) + continue; + if (!strcmp(SEG_TEXT, seg->segname)) + text_seg = seg; + else if (!strcmp(SEG_DATA, seg->segname)) + data_seg = seg; + } + if (!text_seg || !data_seg) + { + kload_log_error("no SEG_TEXT or SEG_DATA" KNL); + return kload_error_unspecified; + } + + size = sizeof(struct mach_header) + text_seg->cmdsize + data_seg->cmdsize; + mem = (vm_offset_t) malloc(size); + + out = mem; + bcopy(hdr, (void *) out, sizeof(struct mach_header)); + hdr = (struct mach_header *) out; + out += sizeof(struct mach_header); + + bcopy(text_seg, (void *) out, text_seg->cmdsize); + text_seg = (struct segment_command *) out; + out += text_seg->cmdsize; + + bcopy(data_seg, (void *) out, data_seg->cmdsize); + data_seg = (struct segment_command *) out; + out += data_seg->cmdsize; + + hdr->ncmds = 2; + hdr->sizeofcmds = text_seg->cmdsize + data_seg->cmdsize; + + offset = hdr->sizeofcmds; + + text_seg->fileoff = offset; + text_seg->filesize = 0; + + sec = (struct section *)(text_seg + 1); + for (j = 0; j < text_seg->nsects; j++, sec++) + { +// sec->addr = (unsigned long) addr; + sec->size = 0; + sec->offset = offset; + sec->nreloc = 0; + } + + data_seg->fileoff = offset; + data_seg->filesize = 0; + + sec = (struct section *)(data_seg + 1); + for (j = 0; j < data_seg->nsects; j++, sec++) + { +// sec->addr = (unsigned long) addr; + sec->size = 0; + sec->offset = offset; + sec->nreloc = 0; + } + + dgraph->opaque_base_image = mem; + dgraph->opaque_base_length = size; + keep_base_image = mem; + keep_base_size = size; + + return kload_error_none; +} + +/******************************************************************************* +* +*******************************************************************************/ +static +kload_error __kload_load_modules(dgraph_t * dgraph +#ifndef KERNEL + , + const char * kernel_file, + const char * patch_file, + const char * patch_dir, + const char * symbol_file, + const char * symbol_dir, + int do_load, + int do_start_kmod, + int do_prelink, + int interactive_level, + int ask_overwrite_symbols, + int overwrite_symbols +#endif /* not KERNEL */ + ) +{ + kload_error result = kload_error_none; +#ifndef KERNEL + long int kernel_size = 0; + kern_return_t mach_result = KERN_SUCCESS; +#endif /* not KERNEL */ + char * kernel_base_addr = 0; + int kld_result; + Boolean cleanup_kld_loader = false; + unsigned int i; + + /* We have to map all object files to get their CFBundleIdentifier + * names. + */ +#ifndef KERNEL + result = kload_map_dgraph(dgraph, kernel_file); +#else + result = kload_map_dgraph(dgraph); +#endif /* not KERNEL */ + if (result != kload_error_none) { + kload_log_error("error mapping object files" KNL); + goto finish; + } + +#ifndef KERNEL + result = __kload_patch_dgraph(dgraph, kernel_file); +#else + result = __kload_patch_dgraph(dgraph); +#endif /* not KERNEL */ + if (result != kload_error_none) { + // FIXME: print an error message here? + goto finish; + } + +#ifndef KERNEL + // FIXME: check error return + __kload_output_patches(dgraph, patch_file, patch_dir, + ask_overwrite_symbols, overwrite_symbols); + + /***** + * If we're not loading or writing symbols, we're done. + */ + if (!do_load && !do_prelink && !symbol_file && !symbol_dir) { + goto finish; + } + + if (do_load && PORT_NULL == G_kernel_port) { + mach_result = task_for_pid(mach_task_self(), 0, &G_kernel_port); + if (mach_result != KERN_SUCCESS) { + kload_log_error("unable to get kernel task port: %s" KNL, + mach_error_string(mach_result)); + kload_log_error("you must be running as root to load " + "modules into the kernel" KNL); + result = kload_error_kernel_permission; + goto finish; + } + } +#endif /* not KERNEL */ + + kld_address_func(&__kload_linkedit_address); + +#ifndef KERNEL + G_syms_only = (!do_load) && (symbol_file || symbol_dir || patch_dir); + + kernel_base_addr = kld_file_getaddr(kernel_file, &kernel_size); + if (!kernel_base_addr) { + kload_log_error( + "can't get load address for kernel %s" KNL, kernel_file); + result = kload_error_link_load; + goto finish; + } +#else /* KERNEL */ + + const char * kernel_file = "(kernel)"; + extern struct mach_header _mh_execute_header; + kernel_base_addr = (char *) &_mh_execute_header; + +#endif /* not KERNEL */ + + kld_result = true; + if (dgraph->has_symbol_sets) + { + result = __kload_make_opaque_basefile(dgraph, (struct mach_header *) kernel_base_addr); + if (result != kload_error_none) { + kload_log_error("can't construct opaque base image from %s" KNL, kernel_file); + goto finish; + } + + kld_result = kld_load_basefile_from_memory(kernel_file, + (char *) dgraph->opaque_base_image, + dgraph->opaque_base_length); + } +#ifndef KERNEL + else + kld_result = kld_load_basefile_from_memory(kernel_file, + (char *) kernel_base_addr, kernel_size); +#endif /* not KERNEL */ + + if (!kld_result) { + kload_log_error("can't link base image %s" KNL, kernel_file); + result = kload_error_link_load; + goto finish; + } + + cleanup_kld_loader = true; + bool opaque_now = false; + + for (i = 0; i < dgraph->length; i++) { + dgraph_entry_t * current_entry = dgraph->load_order[i]; + + opaque_now |= current_entry->opaque_link; + + if (opaque_now) + { + unsigned int k, j; + + if (log_level >= kload_log_level_load_details) + { + kload_log_message("opaque link for %s" KNL, current_entry->name); + } + + kld_set_link_options(KLD_STRIP_ALL); // KLD_STRIP_NONE + + if (dgraph->have_loaded_symbols) + { + kld_unload_all(1); + kld_result = kld_load_basefile_from_memory(kernel_file, + (char *) dgraph->opaque_base_image, + dgraph->opaque_base_length); + if (!kld_result) { + kload_log_error("can't link base image %s" KNL, kernel_file); + result = kload_error_link_load; + goto finish; + } + } + + dgraph->have_loaded_symbols = false; + + for (j = 0; j < dgraph->length; j++) + { + for (k = 0; + (k < current_entry->num_dependencies) + && (current_entry->dependencies[k] != dgraph->load_order[j]); + k++) {} + + if (k == current_entry->num_dependencies) + continue; + + dgraph_entry_t * image_dep = current_entry->dependencies[k]; + if (!image_dep->symbols) + { + kload_log_error("internal error; no dependent symbols" KNL); + result = kload_error_link_load; + goto finish; + } + else + { + struct mach_header * kld_header; + +#ifndef KERNEL + kld_result = kld_load_from_memory(&kld_header, image_dep->name, + (char *) image_dep->symbols, image_dep->symbols_length, NULL); +#else + kld_result = kld_load_from_memory(&kld_header, image_dep->name, + (char *) image_dep->symbols, image_dep->symbols_length); +#endif /* not KERNEL */ + if (!kld_result) { + kload_log_error("can't link dependent image %s" KNL, image_dep->name); + result = kload_error_link_load; + goto finish; + } + kld_forget_symbol("_kmod_info"); + dgraph->have_loaded_symbols = true; + } + } + } /* opaque_now */ + + if (dgraph->has_opaque_links +#ifndef KERNEL + || symbol_file || symbol_dir +#endif + ) + kld_set_link_options(KLD_STRIP_NONE); + else + kld_set_link_options(KLD_STRIP_ALL); + +#ifndef KERNEL + result = __kload_load_module(dgraph, current_entry, + (current_entry == dgraph->root), + symbol_file, symbol_dir, do_load, + interactive_level, ask_overwrite_symbols, overwrite_symbols); +#else + result = __kload_load_module(dgraph, current_entry, + (current_entry == dgraph->root)); +#endif /* not KERNEL */ + if (result != kload_error_none) { + goto finish; + } + + if (dgraph->has_opaque_links && (current_entry != dgraph->root)) + { + result = __kload_keep_symbols(current_entry); + if (result != kload_error_none) { + kload_log_error("__kload_keep_symbols() failed for module %s" KNL, + current_entry->name); + goto finish; + } + } + +#ifndef KERNEL + if (do_load && current_entry->do_load) { +#else + if (current_entry->do_load) { +#endif /* not KERNEL */ + result = __kload_set_module_dependencies(current_entry); + if ( ! (result == kload_error_none || + result == kload_error_already_loaded) ) { + goto finish; + } + +#ifndef KERNEL + if ( (interactive_level == 1 && current_entry == dgraph->root) || + (interactive_level == 2) ) { + + int approve = (*__kload_approve_func)(1, + "\nStart module %s (ansering no will abort the load)", + current_entry->name); + + if (approve > 0) { + do_start_kmod = true; // override 'cause user said so + } else { + kern_return_t mach_result; + if (approve < 0) { + kload_log_message("error reading user response; " + "destroying loaded module" KNL); + } else { + kload_log_message("user canceled module start; " + "destroying loaded module" KNL); + } + mach_result = kmod_destroy(G_kernel_priv_port, current_entry->kmod_id); + if (mach_result != KERN_SUCCESS) { + kload_log_error("kmod_destroy() failed" KNL); + } + if (approve < 0) { + result = kload_error_unspecified; + goto finish; + } else { + result = kload_error_user_abort; + goto finish; + } + } + } +#endif /* not KERNEL */ + +#ifndef KERNEL + if (current_entry != dgraph->root || + (current_entry == dgraph->root && do_start_kmod)) { +#endif /* not KERNEL */ + + result = __kload_start_module(current_entry); + if ( ! (result == kload_error_none || + result == kload_error_already_loaded) ) { + goto finish; +#ifndef KERNEL + } else if (interactive_level || + log_level >= kload_log_level_load_details) { +#else + } else if (log_level >= kload_log_level_load_details) { +#endif /* not KERNEL */ + + kload_log_message("started module %s" KNL, + current_entry->name); + } /* log_level */ +#ifndef KERNEL + } /* current_entry... */ +#endif /* not KERNEL */ + + +#ifndef KERNEL + } /* if do_load */ +#else + } /* if do_load */ +#endif /* not KERNEL */ + } /* for i, dgraph->length */ + +finish: + +#ifndef KERNEL + /* Dispose of the kernel port to prevent security breaches and port + * leaks. We don't care about the kern_return_t value of this + * call for now as there's nothing we can do if it fails. + */ + if (PORT_NULL != G_kernel_port) { + mach_port_deallocate(mach_task_self(), G_kernel_port); + G_kernel_port = PORT_NULL; + } +#endif /* not KERNEL */ + + if (cleanup_kld_loader) { + kld_unload_all(1); + } + + return result; +} + + +/******************************************************************************* +* +*******************************************************************************/ + +#ifndef KERNEL +#define __KLOAD_SYMBOL_EXTENSION ".sym" +#endif /* not KERNEL */ + +static +kload_error __kload_load_module(dgraph_t * dgraph, + dgraph_entry_t * entry, + int is_root +#ifndef KERNEL + , + const char * symbol_file, + const char * symbol_dir, + int do_load, + int interactive_level, + int ask_overwrite_symbols, + int overwrite_symbols + #endif /* not KERNEL */ + ) +{ + kload_error result = kload_error_none; + + int kld_result; + int mach_result; + struct mach_header * kld_header; + const char * kmod_symbol = "_kmod_info"; + unsigned long kernel_kmod_info; + kmod_info_t * local_kmod_info = NULL; + char * dest_address = 0; +#ifndef KERNEL + char * allocated_filename = NULL; + char * symbol_filename = NULL; + int file_check; + vm_address_t vm_buffer = 0; +#endif /* not KERNEL */ + + /* A kernel component is by nature already linked and loaded and has + * no work to be done upon it. + */ + if (entry->is_kernel_component && !entry->is_symbol_set) { + result = kload_error_none; + goto finish; + } + + G_current_load_entry = entry; + + if (log_level >= kload_log_level_load_basic) { +#ifndef KERNEL + if (do_load) { +#endif /* not KERNEL */ + kload_log_message("link/loading file %s" KNL, entry->name); +#ifndef KERNEL + } else { + kload_log_message("linking file %s" KNL, entry->name); + } +#endif /* not KERNEL */ + } + +#ifndef KERNEL + if (entry->link_output_file != entry->name) { + symbol_filename = entry->link_output_file; + } + + if (symbol_filename) { + file_check = kload_file_exists(symbol_filename); + if (file_check < 0) { + kload_log_error("error checking existence of file %s" KNL, + symbol_filename); + } else if (file_check > 0 && !overwrite_symbols) { + + if (!ask_overwrite_symbols) { + kload_log_message("symbol file %s exists; not overwriting" KNL, + symbol_filename); + symbol_filename = NULL; + } else { + int approve = (*__kload_approve_func)(1, + "\nSymbol file %s exists; overwrite", symbol_filename); + + if (approve < 0) { + result = kload_error_unspecified; + goto finish; + } else if (approve == 0) { + if (allocated_filename) free(allocated_filename); + allocated_filename = NULL; + symbol_filename = NULL; + } + } + } + } + + if (symbol_filename && + (interactive_level || + log_level >= kload_log_level_basic) ) { + + kload_log_message("writing symbol file %s" KNL, symbol_filename); + } + + if (do_load) { + if (interactive_level && entry->loaded_address) { + kload_log_message( + "module %s is already loaded as %s at address 0x%08x" KNL, + entry->name, entry->expected_kmod_name, + entry->loaded_address); + } else if ( (interactive_level == 1 && is_root) || + (interactive_level == 2) ) { + + int approve = (*__kload_approve_func)(1, + "\nLoad module %s", entry->name); + + if (approve < 0) { + result = kload_error_unspecified; + goto finish; + } else if (approve == 0) { + result = kload_error_user_abort; + goto finish; + } + } + } +#endif /* not KERNEL */ + + entry->object = kld_file_getaddr(entry->name, &entry->object_length); + if (!entry->object) { + kload_log_error("kld_file_getaddr() failed for module %s" KNL, + entry->name); + __kload_clear_kld_globals(); + result = kload_error_link_load; + goto finish; + } + + if (entry->is_symbol_set) { + entry->symbols = (vm_address_t) entry->object; + entry->symbols_length = entry->object_length; + +#ifndef KERNEL + if (symbol_filename) { + if (!_IOWriteBytesToFile(symbol_filename, (void *) entry->symbols, entry->symbols_length)) { + kload_log_error("write symbol file failed for module %s" KNL, + entry->name); + __kload_clear_kld_globals(); + result = kload_error_link_load; + goto finish; + } + symbol_filename = 0; + if (G_prelink && (entry->name != entry->link_output_file)) + { + kload_log_error("prelink %s %s %s" KNL, + entry->name, entry->link_output_file, entry->expected_kmod_name); + register_prelink(entry, NULL, NULL); + } + } +#endif /* not KERNEL */ + if (entry->opaques) { + result = kload_error_none; + goto finish; + } + } + +#ifndef KERNEL + kld_result = kld_load_from_memory(&kld_header, entry->name, + entry->object, entry->object_length, symbol_filename); +#else + kld_result = kld_load_from_memory(&kld_header, entry->name, + entry->object, entry->object_length); +#endif /* not KERNEL */ + +#ifndef KERNEL + fflush(stdout); + fflush(stderr); +#endif /* not KERNEL */ + + dgraph->have_loaded_symbols = true; + + if (!kld_result || !entry->kernel_load_address) { + kload_log_error("kld_load_from_memory() failed for module %s" KNL, + entry->name); + __kload_clear_kld_globals(); + entry->need_cleanup = 1; + result = kload_error_link_load; + goto finish; + } + + if (entry->is_symbol_set) { + result = kload_error_none; + goto finish; + } + + entry->linked_image = kld_header; + entry->linked_image_length = -1; // unknown! + +/* If we're in the kernel and not loading (as when handling an + * already-loaded dependency), we don't need to waste any CPU + * cycles looking up the kmod_info struct. + */ +#ifdef KERNEL + if (entry->do_load) { +#endif /* KERNEL */ + + kld_result = kld_lookup(kmod_symbol, &kernel_kmod_info); + if (!kld_result) { + kload_log_error("kld_lookup(\"%s\") failed for module %s" KNL, + kmod_symbol, entry->name); + entry->need_cleanup = 1; + result = kload_error_link_load; + goto finish; + } + +#ifdef KERNEL + } +#endif /* KERNEL */ + + kld_result = kld_forget_symbol(kmod_symbol); +#ifndef KERNEL + fflush(stdout); + fflush(stderr); +#endif /* not KERNEL */ + if (!kld_result) { + kload_log_error("kld_forget_symbol(\"%s\") failed for module %s" KNL, + kmod_symbol, entry->name); + entry->need_cleanup = 1; + result = kload_error_link_load; + goto finish; + } + +/* This section is always done in userland, but in kernel space + * only if we're loading the kext, because what we have in kernel + * space for an already-loaded kext is the kext itself, which + * must not be touched again after it's been loaded and started. + */ +#ifdef KERNEL + if (entry->do_load) +#endif /* KERNEL */ + { + + + /* Get the linked image's kmod_info by translating from the + * destined kernel-space address at kernel_kmod_info to an + * offset from kld_header. + */ + local_kmod_info = (kmod_info_t *)((unsigned long)kernel_kmod_info - + (unsigned long)G_current_load_entry->kernel_load_address + + (unsigned long)kld_header); + + /* Stamp the bundle ID and version from the entry over anything + * resident inside the kmod. + */ + bzero(local_kmod_info->name, sizeof(local_kmod_info->name)); + strcpy(local_kmod_info->name, entry->expected_kmod_name); + + bzero(local_kmod_info->version, sizeof(local_kmod_info->version)); + strcpy(local_kmod_info->version, entry->expected_kmod_vers); + + if (log_level >= kload_log_level_details) { + kload_log_message("kmod name: %s" KNL, local_kmod_info->name); + kload_log_message("kmod start @ 0x%x (offset 0x%lx)" KNL, + (vm_address_t)local_kmod_info->start, + (unsigned long)local_kmod_info->start - (unsigned long)G_current_load_entry->kernel_load_address); + kload_log_message("kmod stop @ 0x%x (offset 0x%lx)" KNL, + (vm_address_t)local_kmod_info->stop, + (unsigned long)local_kmod_info->stop - (unsigned long)G_current_load_entry->kernel_load_address); + } + + if (!local_kmod_info->start || !local_kmod_info->start) { + kload_log_error( + "error for module file %s; start or stop address is zero" KNL, + entry->name); + entry->need_cleanup = 1; + result = kload_error_link_load; + goto finish; + } + + /* Record link info into kmod_info struct, rounding the hdr_size + * to fit the adjustment that was made in __kload_linkedit_address(). + */ + if (entry->kernel_alloc_address) { + local_kmod_info->address = entry->kernel_alloc_address; + } else { + local_kmod_info->address = entry->loaded_address; + } + local_kmod_info->size = entry->kernel_alloc_size; + local_kmod_info->hdr_size = round_page(entry->kernel_hdr_size); + + } + +#ifndef KERNEL + if (G_prelink && (entry->name != entry->link_output_file)) + { + register_prelink(entry, local_kmod_info, kernel_kmod_info); + } + + if (do_load && entry->do_load) { + mach_result = vm_allocate(mach_task_self(), &vm_buffer, + entry->kernel_alloc_size, TRUE); + if (mach_result != KERN_SUCCESS) { + kload_log_error("unable to vm_allocate() copy buffer" KNL); + entry->need_cleanup = 1; + result = kload_error_no_memory; // FIXME: kernel error? + goto finish; + } + + dest_address = (char *)vm_buffer; + + memcpy(dest_address, kld_header, entry->kernel_hdr_size); + memcpy(dest_address + round_page(entry->kernel_hdr_size), + (void *)((unsigned long)kld_header + entry->kernel_hdr_size), + entry->kernel_load_size - entry->kernel_hdr_size); + + mach_result = vm_write(G_kernel_port, entry->kernel_alloc_address, + vm_buffer, entry->kernel_alloc_size); + if (mach_result != KERN_SUCCESS) { + kload_log_error("unable to write module to kernel memory" KNL); + entry->need_cleanup = 1; + result = kload_error_kernel_error; + goto finish; + } + + mach_result = kmod_create(G_kernel_priv_port, + (vm_address_t)kernel_kmod_info, &(entry->kmod_id)); + +#else + if (entry->do_load) { + dest_address = (char *)entry->kernel_alloc_address; + memcpy(dest_address, kld_header, entry->kernel_hdr_size); + memcpy(dest_address + round_page(entry->kernel_hdr_size), + (void *)((unsigned long)kld_header + entry->kernel_hdr_size), + entry->kernel_load_size - entry->kernel_hdr_size); + + /* We've written data & instructions into kernel memory, so flush + * the data cache and invalidate the instruction cache. + */ + flush_dcache(entry->kernel_alloc_address, entry->kernel_alloc_size, false); + invalidate_icache(entry->kernel_alloc_address, entry->kernel_alloc_size, false); + + mach_result = kmod_create_internal( + (kmod_info_t *)kernel_kmod_info, &(entry->kmod_id)); + +#endif /* not KERNEL */ + + if (mach_result != KERN_SUCCESS) { + kload_log_error("unable to register module with kernel" KNL); + entry->need_cleanup = 1; + result = kload_error_kernel_error; + goto finish; + } + +#ifndef KERNEL + if (interactive_level || log_level >= kload_log_level_load_basic) { +#else + if (log_level >= kload_log_level_load_basic) { +#endif /* not KERNEL */ + kload_log_message( + "module %s created as # %d at address 0x%x, size %ld" KNL, + entry->expected_kmod_name, entry->kmod_id, + entry->kernel_alloc_address, + entry->kernel_alloc_size); + +#ifndef KERNEL + } +#else + } +#endif /* not KERNEL */ + +#ifndef KERNEL + if (interactive_level) { + kload_log_message( + "You can now break to the debugger and set breakpoints " + " for this extension." KNL); + } +#endif /* not KERNEL */ + +#ifndef KERNEL + } +#else + } +#endif /* not KERNEL */ + +finish: + +#ifndef KERNEL + if (allocated_filename) { + free(allocated_filename); + } + if (vm_buffer) { + vm_deallocate(mach_task_self(), vm_buffer, entry->kernel_alloc_size); + } +#endif /* not KERNEL */ + __kload_clear_kld_globals(); + + return result; +} + +/******************************************************************************* +*******************************************************************************/ + +#ifndef KERNEL +static kload_error +register_prelink(dgraph_entry_t * entry, + kmod_info_t * local_kmod_info, vm_offset_t kernel_kmod_info) +{ + CFIndex i, j, depoffset; + Boolean exists; + kmod_info_t desc; + + depoffset = CFDataGetLength(G_prelink_dependencies) / sizeof(CFIndex); + + for (i = 0; i < entry->num_dependencies; i++) + { + exists = false; + for (j = 1; (j < (1 + G_prelink->modules[0].id)); j++) + { + exists = (0 == strcmp(entry->dependencies[i]->expected_kmod_name, + G_prelink->modules[j].name)); + if (exists) + break; + } + if (!exists) + { + bzero(&desc, sizeof(desc)); + strcpy(desc.name, entry->dependencies[i]->expected_kmod_name); + + if (log_level >= kload_log_level_basic) { + kload_log_message("[%d] (dep)\n %s" KNL, + G_prelink->modules[0].id + 1, desc.name); + } + G_prelink->modules[0].id++; + CFDataAppendBytes(G_prelink_data, (UInt8 *) &desc, sizeof(desc)); + G_prelink = (struct PrelinkState *) CFDataGetMutableBytePtr(G_prelink_data); + } + + G_prelink->modules[0].reference_count++; + OSWriteBigInt32(&j, 0, j); + CFDataAppendBytes(G_prelink_dependencies, (UInt8 *) &j, sizeof(j)); + } + if (log_level >= kload_log_level_basic) { + kload_log_message("[%d] 0x%08x info 0x%08x\n %s,\n %s" KNL, + G_prelink->modules[0].id + 1, entry->kernel_load_address, + kernel_kmod_info, entry->link_output_file, entry->name); + } + + if (local_kmod_info) + desc = *local_kmod_info; + else + { + bzero(&desc, sizeof(desc)); + desc.size = entry->symbols_length; + } + + desc.id = kernel_kmod_info; + desc.reference_count = entry->num_dependencies; + desc.reference_list = (kmod_reference_t *) depoffset; + + /* Stamp the bundle ID and version from the entry over anything + * resident inside the kmod. + */ + bzero(desc.name, sizeof(local_kmod_info->name)); + strcpy(desc.name, entry->expected_kmod_name); + bzero(desc.version, sizeof(local_kmod_info->version)); + strcpy(desc.version, entry->expected_kmod_vers); + + G_prelink->modules[0].id++; + CFDataAppendBytes(G_prelink_data, (UInt8 *) &desc, sizeof(desc)); + G_prelink = (struct PrelinkState *) CFDataGetMutableBytePtr(G_prelink_data); + + return kload_error_none; +} + +#endif + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +#ifndef KERNEL +kload_error kload_map_dgraph( + dgraph_t * dgraph, + const char * kernel_file) +#else +kload_error kload_map_dgraph( + dgraph_t * dgraph) +#endif /* not KERNEL */ +{ + kload_error result = kload_error_none; + int i; + + if (log_level >= kload_log_level_load_details) { +#ifndef KERNEL + kload_log_message("mapping the kernel file %s" KNL, kernel_file); +#else + kload_log_message("mapping the kernel" KNL); +#endif /* not KERNEL */ + } + +#ifndef KERNEL + if (!kld_file_map(kernel_file)) { + result = kload_error_link_load; + goto finish; + } +#endif /* not KERNEL */ + + for (i = 0; i < dgraph->length; i++) { + dgraph_entry_t * entry = dgraph->load_order[i]; + + if (entry->is_kernel_component && !entry->is_symbol_set) { + continue; + } + + result = kload_map_entry(entry); + if (result != kload_error_none) { + goto finish; + } + } + +finish: + return result; + +} + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +kload_error kload_map_entry(dgraph_entry_t * entry) +{ + kload_error result = kload_error_none; + + if (entry->is_kernel_component && !entry->is_symbol_set) { + kload_log_error("attempt to map kernel component %s" KNL, entry->name); + result = kload_error_invalid_argument; + goto finish; + } + + if (log_level >= kload_log_level_load_details) { + kload_log_message("mapping module file %s" KNL, entry->name); + } + + if (kld_file_getaddr(entry->name, NULL)) { + if (log_level >= kload_log_level_load_details) { + kload_log_message("module file %s is already mapped" KNL, entry->name); + } + result = kload_error_none; + goto finish; + } + +#ifndef KERNEL + if (!kld_file_map(entry->name)) { +#else + if (!kld_file_map(entry->name, entry->object, entry->object_length, + entry->object_is_kmem)) { +#endif /* not KERNEL */ + kload_log_error("error mapping module file %s" KNL, entry->name); + + result = kload_error_link_load; + goto finish; +#ifndef KERNEL + } +#else + } +#endif /* not KERNEL */ + + entry->is_mapped = true; + + /* Clear these bits now, as the kld patch module now owns the info + * and it is subject to change. We reset them in the entry from the + * kld patch module as needed. + */ + entry->object = 0; + entry->object_length = 0; +#ifdef KERNEL + entry->object_is_kmem = false; +#endif /* KERNEL */ + + // FIXME: Stop using this symbol; have the info passed in by + // FIXME: ...the kext management library. +#ifndef KERNEL + if (!entry->is_kernel_component && !kld_file_lookupsymbol(entry->name, "_kmod_info")) { + kload_log_error("%s does not not contain kernel extension code" KNL, + entry->name); + result = kload_error_executable_bad; + goto finish; + } +#endif /* not KERNEL */ + +finish: + return result; +} + +#ifndef KERNEL +/******************************************************************************* +* +*******************************************************************************/ +kload_error kload_request_load_addresses( + dgraph_t * dgraph, + const char * kernel_file) +{ + kload_error result = kload_error_none; + int i; + const char * user_response = NULL; // must free + int scan_result; + unsigned int address; + + /* We have to map all object files to get their CFBundleIdentifier + * names. + */ + result = kload_map_dgraph(dgraph, kernel_file); + if (result != kload_error_none) { + kload_log_error("error mapping object files" KNL); + goto finish; + } + + // fixme: this shouldn't be printf, should it? + printf("enter the hexadecimal load addresses for these modules:\n"); + + for (i = 0; i < dgraph->length; i++) { + dgraph_entry_t * entry = dgraph->load_order[i]; + + if (!entry) { + result = kload_error_unspecified; + goto finish; + } + + if (entry->is_kernel_component) { + continue; + } + + if (!entry->is_mapped) { + result = kload_error_unspecified; + goto finish; + } + + user_response = __kload_input_func("%s:", + entry->expected_kmod_name); + if (!user_response) { + result = kload_error_unspecified; + goto finish; + } + scan_result = sscanf(user_response, "%x", &address); + if (scan_result < 1 || scan_result == EOF) { + result = kload_error_unspecified; + goto finish; + } + entry->loaded_address = address; + } + +finish: + return result; + +} + +/******************************************************************************* +* addresses is a NULL-terminated list of string of the form "module_id@address" +*******************************************************************************/ +kload_error kload_set_load_addresses_from_args( + dgraph_t * dgraph, + const char * kernel_file, + char ** addresses) +{ + kload_error result = kload_error_none; + int i, j; + + + /* We have to map all object files to get their CFBundleIdentifier + * names. + */ + result = kload_map_dgraph(dgraph, kernel_file); + if (result != kload_error_none) { + kload_log_error("error mapping object files" KNL); + goto finish; + } + + /***** + * Run through and assign all addresses to their relevant module + * entries. + */ + for (i = 0; i < dgraph->length; i++) { + dgraph_entry_t * entry = dgraph->load_order[i]; + + if (!entry) { + result = kload_error_unspecified; + goto finish; + } + + if (entry->is_kernel_component) { + continue; + } + + if (!entry->is_mapped) { + result = kload_error_unspecified; + goto finish; + } + + for (j = 0; addresses[j]; j++) { + char * this_addr = addresses[j]; + char * address_string = NULL; + unsigned int address; + unsigned int module_namelen = strlen(entry->expected_kmod_name); + + if (!this_addr) { + result = kload_error_unspecified; + goto finish; + } + + if (strncmp(this_addr, entry->expected_kmod_name, module_namelen)) { + continue; + } + if (this_addr[module_namelen] != '@') { + continue; + } + + address_string = index(this_addr, '@'); + if (!address_string) { + result = kload_error_unspecified; + goto finish; + } + address_string++; + address = strtoul(address_string, NULL, 16); + entry->loaded_address = address; + } + } + + /***** + * Now that we've done that see that all non-kernel modules do have + * addresses set. If even one doesn't, we can't complete the link + * relocation of symbols, so return a usage error. + */ + for (i = 0; i < dgraph->length; i++) { + dgraph_entry_t * entry = dgraph->load_order[i]; + + if (entry->is_kernel_component) { + continue; + } + + if (!entry->loaded_address) { + result = kload_error_invalid_argument; + goto finish; + } + } + +finish: + return result; + +} + +/******************************************************************************* +* This function requires G_kernel_priv_port to be set before it will work. +*******************************************************************************/ +kload_error kload_set_load_addresses_from_kernel( + dgraph_t * dgraph, + const char * kernel_file, + int do_load) +{ + kload_error result = kload_error_none; + int mach_result; + kmod_info_t * loaded_modules = NULL; + int loaded_bytecount = 0; + unsigned int i; + + + /***** + * We have to map the dgraph's modules before checking whether they've + * been loaded. + */ + result = kload_map_dgraph(dgraph, kernel_file); + if (result != kload_error_none) { + kload_log_error("can't map module files" KNL); + goto finish; + } + + + /* First clear all the load addresses. + */ + for (i = 0; i < dgraph->length; i++) { + struct dgraph_entry_t * entry = dgraph->load_order[i]; + entry->loaded_address = 0; + } + + mach_result = kmod_get_info(G_kernel_priv_port, + (void *)&loaded_modules, &loaded_bytecount); + if (mach_result != KERN_SUCCESS) { + kload_log_error("kmod_get_info() failed" KNL); + result = kload_error_kernel_error; + goto finish; + } + + /***** + * Find out which modules have already been loaded & verify + * that loaded versions are same as requested. + */ + for (i = 0; i < dgraph->length; i++) { + kload_error cresult; + dgraph_entry_t * current_entry = dgraph->load_order[i]; + + /* If necessary, check whether the current module is already loaded. + * (We already did the root module above.) + */ + cresult = __kload_check_module_loaded(dgraph, current_entry, + loaded_modules, do_load); + if ( ! (cresult == kload_error_none || + cresult == kload_error_already_loaded) ) { + goto finish; + } + if (current_entry == dgraph->root && + cresult == kload_error_already_loaded) { + + result = cresult; + } + } + +finish: + + if (loaded_modules) { + vm_deallocate(mach_task_self(), (vm_address_t)loaded_modules, + loaded_bytecount); + loaded_modules = 0; + } + + return result; +} + +#else +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +kload_error kload_set_load_addresses_from_kernel( + dgraph_t * dgraph) +{ + kload_error result = kload_error_none; +#ifndef KERNEL + int mach_result; + kmod_info_t * loaded_modules = NULL; + int loaded_bytecount = 0; +#endif /* not KERNEL */ + unsigned int i; + + + /***** + * We have to map the dgraph's modules before checking whether they've + * been loaded. + */ + result = kload_map_dgraph(dgraph); + if (result != kload_error_none) { + kload_log_error("can't map module files" KNL); + goto finish; + } + + + /* First clear all the load addresses. + */ + for (i = 0; i < dgraph->length; i++) { + struct dgraph_entry_t * entry = dgraph->load_order[i]; + entry->loaded_address = 0; + } + + /***** + * Find out which modules have already been loaded & verify + * that loaded versions are same as requested. + */ + for (i = 0; i < dgraph->length; i++) { + kload_error cresult; + dgraph_entry_t * current_entry = dgraph->load_order[i]; + + /* If necessary, check whether the current module is already loaded. + * (We already did the root module above.) + */ + cresult = __kload_check_module_loaded(dgraph, current_entry, false); + if ( ! (cresult == kload_error_none || + cresult == kload_error_already_loaded) ) { + goto finish; + } + if (current_entry == dgraph->root && + cresult == kload_error_already_loaded) { + + result = cresult; + } + } + +finish: + + return result; +} +#endif /* not KERNEL */ + +/******************************************************************************* +* +*******************************************************************************/ +#ifdef KERNEL +extern kern_return_t kmod_load_from_cache(const char * kmod_name); +#endif /* KERNEL */ + +static kmod_info_t * __kload_find_kmod_info(const char * kmod_name +#ifndef KERNEL + , + kmod_info_t * kmod_list +#endif /* not KERNEL */ + ) +{ +#ifndef KERNEL + unsigned int i; + + for (i = 0; ; i++) { + kmod_info_t * current_kmod = &(kmod_list[i]); + if (0 == strcmp(current_kmod->name, kmod_name)) { + return current_kmod; + } + if (kmod_list[i].next == 0) { + break; + } + } + return NULL; +#else + kmod_info_t * info; + info = kmod_lookupbyname_locked(kmod_name); + if (!info && (KERN_SUCCESS == kmod_load_from_cache(kmod_name))) { + info = kmod_lookupbyname_locked(kmod_name); + } + return info; +#endif /* not KERNEL */ +} + +/******************************************************************************* +* +*******************************************************************************/ +static +kload_error __kload_check_module_loaded( + dgraph_t * dgraph, + dgraph_entry_t * entry, +#ifndef KERNEL + kmod_info_t * kmod_list, +#endif /* not KERNEL */ + int log_if_already) +{ + kload_error result = kload_error_none; + const char * kmod_name; + kmod_info_t * current_kmod = 0; + + VERS_version entry_vers; + VERS_version loaded_vers; + + if (false && entry->is_kernel_component) { + kmod_name = entry->name; + } else { + kmod_name = entry->expected_kmod_name; + if (log_level >= kload_log_level_load_details) { + kload_log_message("checking whether module file %s is already loaded" KNL, + kmod_name); + } + } + +#ifndef KERNEL + current_kmod = __kload_find_kmod_info(kmod_name, kmod_list); +#else + current_kmod = __kload_find_kmod_info(kmod_name); +#endif /* not KERNEL */ + + if (!current_kmod) { + goto finish; + } + + entry->do_load = 0; + entry->kmod_id = current_kmod->id; + entry->loaded_address = current_kmod->address; + + if (entry->is_kernel_component) { + goto finish; + } + + if (log_level >= kload_log_level_load_details) { + kload_log_message("module file %s is loaded; checking status" KNL, + kmod_name); + } + + // We really want to move away from having this info in a kmod.... + // + loaded_vers = VERS_parse_string(current_kmod->version); + if (loaded_vers < 0) { + kload_log_error( + "can't parse version string \"%s\" of loaded module %s" KNL, + current_kmod->version, + current_kmod->name); + result = kload_error_unspecified; + goto finish; + } + + entry_vers = VERS_parse_string(entry->expected_kmod_vers); + if (entry_vers < 0) { + kload_log_error( + "can't parse version string \"%s\" of module file %s" KNL, + entry->expected_kmod_name, + kmod_name); + result = kload_error_unspecified; + goto finish; + } + + if (loaded_vers != entry_vers) { + kload_log_error( + "loaded version %s of module %s differs from " + "requested version %s" KNL, + current_kmod->version, + current_kmod->name, + entry->expected_kmod_name); + if (entry == dgraph->root) { + result = kload_error_loaded_version_differs; + } else { + result = kload_error_dependency_loaded_version_differs; + } + goto finish; + } else { + + if (log_if_already && log_level >= + kload_log_level_load_basic) { + + kload_log_message( + "module %s (identifier %s) is already loaded" KNL, + entry->name, kmod_name); + } + result = kload_error_already_loaded; + goto finish; + } + +finish: +#ifdef KERNEL + // Do this ONLY if in the kernel! + if (current_kmod) { + kfree((unsigned int)current_kmod, sizeof(kmod_info_t)); + } +#endif /* KERNEL */ + return result; +} + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +kload_error __kload_patch_dgraph(dgraph_t * dgraph +#ifndef KERNEL + , + const char * kernel_file +#endif /* not KERNEL */ + ) +{ + kload_error result = kload_error_none; + unsigned int i; + +#ifndef KERNEL + if (!kld_file_merge_OSObjects(kernel_file)) { + result = kload_error_link_load; + goto finish; + } +#endif /* not KERNEL */ + + for (i = 0; i < dgraph->length; i++) { + dgraph_entry_t * current_entry = dgraph->load_order[i]; + + /* The kernel has already been patched. + */ + if (current_entry->is_kernel_component) { + continue; + } + + if (log_level >= kload_log_level_load_details) { + kload_log_message("patching C++ code in module %s" KNL, + current_entry->name); + } + +#ifndef KERNEL + /* In userland, we call the patch function for all kmods, + * loaded or not, because we don't have all the info that + * the kernel environment has. + */ + if (!kld_file_patch_OSObjects(current_entry->name)) { + result = kload_error_link_load; // FIXME: need a "patch" error? + goto finish; + } +#else + /* In the kernel, we call the merge function for already-loaded + * kmods, since the kld patch environment retains info for kmods + * that have already been patched. The patch function does a little + * more work, and is only for kmods that haven't been processed yet. + * NOTE: We are depending here on kload_check_module_loaded() + * having been called, which is guaranteed by kload_load_dgraph() + * is used, but not by its subroutines (such as + * __kload_load_modules()). + */ + if (current_entry->loaded_address) { + if (!kld_file_merge_OSObjects(current_entry->name)) { + result = kload_error_link_load; // FIXME: need a "patch" error? + goto finish; + } + } else { + if (!kld_file_patch_OSObjects(current_entry->name)) { + result = kload_error_link_load; // FIXME: need a "patch" error? + goto finish; + } + } +#endif /* not KERNEL */ + + } + + if (!kld_file_prepare_for_link()) { + result = kload_error_link_load; // FIXME: need more specific error? + goto finish; + } + +finish: + return result; +} + +#ifndef KERNEL +/******************************************************************************* +* +*******************************************************************************/ +#define __KLOAD_PATCH_EXTENSION ".patch" + +kload_error __kload_output_patches( + dgraph_t * dgraph, + const char * patch_file, + const char * patch_dir, + int ask_overwrite_symbols, + int overwrite_symbols) +{ + kload_error result = kload_error_none; + unsigned int i; + char * allocated_filename = NULL; + char * patch_filename = NULL; + int file_check; + int output_patch; + + if (patch_dir) { + + for (i = 0; i < dgraph->length; i++) { + + struct dgraph_entry_t * entry = dgraph->load_order[i]; + unsigned long length; + + if (entry->is_kernel_component) { + continue; + } + + length = strlen(patch_dir) + + strlen(entry->expected_kmod_name) + + strlen(__KLOAD_PATCH_EXTENSION) + + 1 + 1 ; // 1 for '/' added, 1 for terminating null + if (length >= MAXPATHLEN) { + kload_log_error( + "output filename \"%s/%s%s\" would be too long" KNL, + patch_dir, entry->expected_kmod_name, + __KLOAD_PATCH_EXTENSION); + result = kload_error_invalid_argument; + goto finish; + } + + allocated_filename = (char *)malloc(length); + if (! allocated_filename) { + kload_log_error("malloc failure" KNL); + result = kload_error_no_memory; + goto finish; + } + + patch_filename = allocated_filename; + strcpy(patch_filename, patch_dir); + strcat(patch_filename, "/"); + strcat(patch_filename, entry->expected_kmod_name); + strcat(patch_filename, __KLOAD_PATCH_EXTENSION); + + output_patch = 1; + file_check = kload_file_exists(patch_filename); + + if (file_check < 0) { + kload_log_error("error checking existence of file %s" KNL, + patch_filename); + } else if (file_check > 0 && !overwrite_symbols) { + if (!ask_overwrite_symbols) { + kload_log_error( + "patch file %s exists; not overwriting" KNL, + patch_filename); + output_patch = 0; + } else { + int approve = (*__kload_approve_func)(1, + "\nPatch file %s exists; overwrite", patch_filename); + + if (approve < 0) { + result = kload_error_unspecified; + goto finish; + } else { + output_patch = approve; + } + } + } + + if (output_patch) { + if (log_level >= kload_log_level_basic) { + kload_log_message("writing patch file %s" KNL, patch_filename); + } + kld_file_debug_dump(entry->name, patch_filename); + } + + if (allocated_filename) free(allocated_filename); + allocated_filename = NULL; + } + + } else if (patch_file) { + output_patch = 1; + file_check = kload_file_exists(patch_file); + + if (file_check < 0) { + kload_log_error("error checking existence of file %s" KNL, + patch_file); + } else if (file_check > 0 && !overwrite_symbols) { + if (!ask_overwrite_symbols) { + kload_log_error("patch file %s exists; not overwriting" KNL, + patch_filename); + output_patch = 0; + } else { + int approve = (*__kload_approve_func)(1, + "\nPatch file %s exists; overwrite", patch_filename); + + if (approve < 0) { + result = kload_error_unspecified; + goto finish; + } else { + output_patch = approve; + } + } + } + + if (output_patch) { + if (log_level >= kload_log_level_basic) { + kload_log_message("writing patch file %s" KNL, patch_filename); + } + kld_file_debug_dump(dgraph->root->name, patch_file); + } + } + +finish: + if (allocated_filename) free(allocated_filename); + + return result; +} +#endif /* not KERNEL */ + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +kload_error __kload_set_module_dependencies(dgraph_entry_t * entry) { + kload_error result = kload_error_none; + int mach_result; +#ifndef KERNEL + void * kmod_control_args = 0; + int num_args = 0; +#endif /* not KERNEL */ + kmod_t packed_id; + unsigned int i; + dgraph_entry_t * current_dep = NULL; + + if (!entry->do_load) { + result = kload_error_already_loaded; + goto finish; + } + + for (i = 0; i < entry->num_dependencies; i++) { + current_dep = entry->dependencies[i]; + + if (log_level >= kload_log_level_load_details) { + kload_log_message("adding reference from %s (%d) to %s (%d)" KNL, + entry->expected_kmod_name, entry->kmod_id, + current_dep->expected_kmod_name, current_dep->kmod_id); + } + + packed_id = KMOD_PACK_IDS(entry->kmod_id, current_dep->kmod_id); +#ifndef KERNEL + mach_result = kmod_control(G_kernel_priv_port, + packed_id, KMOD_CNTL_RETAIN, &kmod_control_args, &num_args); +#else + mach_result = kmod_retain(packed_id); +#endif /* not KERNEL */ + if (mach_result != KERN_SUCCESS) { + kload_log_error( + "kmod retain failed for %s; destroying kmod" KNL, + entry->expected_kmod_name); +#ifndef KERNEL + mach_result = kmod_destroy(G_kernel_priv_port, entry->kmod_id); +#else + mach_result = kmod_destroy_internal(entry->kmod_id); +#endif /* not KERNEL */ + if (mach_result != KERN_SUCCESS) { + kload_log_error("kmod destroy failed" KNL); + } + result = kload_error_link_load; + goto finish; + } + } + + if (log_level >= kload_log_level_load_basic) { + kload_log_message("module # %d reference counts incremented" KNL, + entry->kmod_id); + } + +finish: + return result; +} + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +kload_error __kload_start_module(dgraph_entry_t * entry) { + kload_error result = kload_error_none; + int mach_result; +#ifndef KERNEL + void * kmod_control_args = 0; + int num_args = 0; +#endif /* not KERNEL */ + + if (!entry->do_load) { + result = kload_error_already_loaded; + goto finish; + } + +#ifndef KERNEL + mach_result = kmod_control(G_kernel_priv_port, + entry->kmod_id, KMOD_CNTL_START, &kmod_control_args, &num_args); +#else + mach_result = kmod_start_or_stop(entry->kmod_id, 1, 0, 0); +#endif /* not KERNEL */ + + if (mach_result != KERN_SUCCESS) { + kload_log_error( + "kmod_control/start failed for %s; destroying kmod" KNL, + entry->expected_kmod_name); +#ifndef KERNEL + mach_result = kmod_destroy(G_kernel_priv_port, entry->kmod_id); +#else + mach_result = kmod_destroy_internal(entry->kmod_id); +#endif /* not KERNEL */ + if (mach_result != KERN_SUCCESS) { + kload_log_error("kmod destroy failed" KNL); + } + result = kload_error_link_load; + goto finish; + } + + if (log_level >= kload_log_level_load_basic) { + kload_log_message("module # %d started" KNL, + entry->kmod_id); + } + +finish: + return result; +} + +/******************************************************************************* +*******************************************************************************/ + +/******************************************************************************* +* +*******************************************************************************/ +static +unsigned long __kload_linkedit_address( + unsigned long size, + unsigned long headers_size) +{ + unsigned long round_segments_size; + unsigned long round_headers_size; + unsigned long round_size; + int mach_result; + const struct machOMapping { + struct mach_header h; + struct segment_command seg[1]; + } *machO; + + if (!G_current_load_entry) { + return 0; + } + + // the actual size allocated by kld_load_from_memory() + G_current_load_entry->kernel_load_size = size; + + round_headers_size = round_page(headers_size); + round_segments_size = round_page(size - headers_size); + round_size = round_headers_size + round_segments_size; + + G_current_load_entry->kernel_alloc_size = round_size; + + // will need to be rounded *after* load/link + G_current_load_entry->kernel_hdr_size = headers_size; + G_current_load_entry->kernel_hdr_pad = round_headers_size - headers_size; + + if (G_current_load_entry->loaded_address) { + G_current_load_entry->kernel_load_address = + G_current_load_entry->loaded_address + + G_current_load_entry->kernel_hdr_pad; + if (log_level >= kload_log_level_load_basic) { + kload_log_message( + "using %s load address 0x%x (0x%x with header pad)" KNL, + G_current_load_entry->kmod_id ? "existing" : "provided", + G_current_load_entry->loaded_address, + G_current_load_entry->kernel_load_address); + } + return G_current_load_entry->kernel_load_address; + } + + machO = (const struct machOMapping *) G_current_load_entry->object; + if (machO->seg[0].vmaddr) + { + G_current_load_entry->loaded_address = trunc_page(machO->seg[0].vmaddr - machO->seg[0].fileoff); + + G_current_load_entry->kernel_load_address = G_current_load_entry->loaded_address + + G_current_load_entry->kernel_hdr_pad; + + return G_current_load_entry->kernel_load_address; + } + +#ifndef KERNEL + if (G_prelink) { + G_current_load_entry->kernel_alloc_address = G_prelink->modules[0].address; + G_prelink->modules[0].address += round_page(G_current_load_entry->kernel_alloc_size); + mach_result = KERN_SUCCESS; + + } else if (G_syms_only) { + kload_log_error( + "internal error; asked to allocate kernel memory" KNL); + // FIXME: no provision for cleanup here + return kload_error_unspecified; + + } else +#endif /* not KERNEL */ + + { +#ifndef KERNEL + mach_result = vm_allocate(G_kernel_port, + &G_current_load_entry->kernel_alloc_address, + G_current_load_entry->kernel_alloc_size, TRUE); +#else + mach_result = vm_allocate(kernel_map, + &G_current_load_entry->kernel_alloc_address, + G_current_load_entry->kernel_alloc_size, TRUE); +#endif /* not KERNEL */ + } + + if (mach_result != KERN_SUCCESS) { + kload_log_error("can't allocate kernel memory" KNL); + // FIXME: no provision for cleanup here + return kload_error_kernel_error; + } + + if (log_level >= kload_log_level_load_basic) { + kload_log_message("allocated %ld bytes in kernel space at 0x%x" KNL, + G_current_load_entry->kernel_alloc_size, + G_current_load_entry->kernel_alloc_address); + } + + G_current_load_entry->kernel_load_address = + G_current_load_entry->kernel_alloc_address + + G_current_load_entry->kernel_hdr_pad; + + G_current_load_entry->loaded_address = G_current_load_entry->kernel_alloc_address; + + if (log_level >= kload_log_level_load_basic) { + kload_log_message( + "using load address of 0x%x" KNL, + G_current_load_entry->kernel_alloc_address); + } + + return G_current_load_entry->kernel_load_address; +} + +/******************************************************************************* +* +*******************************************************************************/ +static +void __kload_clear_kld_globals(void) { + G_current_load_entry = NULL; + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +static +void __kload_clean_up_entry(dgraph_entry_t * entry) { + int mach_result; + + if (entry->need_cleanup && entry->kernel_alloc_address) { +#ifndef KERNEL + if (G_prelink) { + + if ((entry->kernel_alloc_address + entry->kernel_alloc_size) == G_prelink->modules[0].address) { + G_prelink->modules[0].address = entry->kernel_alloc_address; + } else { + kload_log_error( + "bad free load address of 0x%x (last 0x%x)" KNL, + entry->kernel_alloc_address, G_prelink->modules[0].address); + } + } else { + mach_result = vm_deallocate(G_kernel_port, entry->kernel_alloc_address, + entry->kernel_alloc_size); + } +#else + mach_result = vm_deallocate(kernel_map, entry->kernel_alloc_address, + entry->kernel_alloc_size); +#endif /* not KERNEL */ + entry->kernel_alloc_address = 0; + } + return; +} + +#ifndef KERNEL +/******************************************************************************* +* +*******************************************************************************/ +int kload_file_exists(const char * path) +{ + int result = 0; // assume it doesn't exist + struct stat stat_buf; + + if (stat(path, &stat_buf) == 0) { + result = 1; // the file does exist; we don't care beyond that + goto finish; + } + + switch (errno) { + case ENOENT: + result = 0; // the file doesn't exist + goto finish; + break; + default: + result = -1; // unknown error + goto finish; + break; + } + +finish: + return result; +} +#endif /* not KERNEL */ + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +void kload_set_log_level(kload_log_level level) +{ + log_level = level; + return; +} + +#ifndef KERNEL +/******************************************************************************* +* +*******************************************************************************/ +void kload_set_log_function( + void (*func)(const char * format, ...)) +{ + if (!func) { + __kload_log_func = &__kload_null_log; + } else { + __kload_log_func = func; + } + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +void kload_set_error_log_function( + void (*func)(const char * format, ...)) +{ + if (!func) { + __kload_err_log_func = &__kload_null_err_log; + } else { + __kload_err_log_func = func; + } + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +void kload_set_user_approve_function( + int (*func)(int default_answer, const char * format, ...)) +{ + if (!func) { + __kload_approve_func = &__kload_null_approve; + } else { + __kload_approve_func = func; + } + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +void kload_set_user_veto_function( + int (*func)(int default_answer, const char * format, ...)) +{ + if (!func) { + __kload_veto_func = &__kload_null_veto; + } else { + __kload_veto_func = func; + } + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +void kload_set_user_input_function( + const char * (*func)(const char * format, ...)) +{ + if (!func) { + __kload_input_func = &__kload_null_input; + } else { + __kload_input_func = func; + } + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +void kload_log_message(const char * format, ...) +{ + va_list ap; + char fake_buffer[2]; + int output_length; + char * output_string; + + if (log_level <= kload_log_level_silent) { + return; + } + + va_start(ap, format); + output_length = vsnprintf(fake_buffer, 1, format, ap); + va_end(ap); + + output_string = (char *)malloc(output_length + 1); + if (!output_string) { + return; + } + + va_start(ap, format); + vsprintf(output_string, format, ap); + va_end(ap); + + __kload_log_func(output_string); + free(output_string); + + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +PRIV_EXT +void kload_log_error(const char * format, ...) +{ + va_list ap; + char fake_buffer[2]; + int output_length; + char * output_string; + + if (log_level <= kload_log_level_silent) { + return; + } + + va_start(ap, format); + output_length = vsnprintf(fake_buffer, 1, format, ap); + va_end(ap); + + output_string = (char *)malloc(output_length + 1); + if (!output_string) { + return; + } + + va_start(ap, format); + vsprintf(output_string, format, ap); + va_end(ap); + + __kload_err_log_func(output_string); + free(output_string); + + return; +} +/******************************************************************************* +* +*******************************************************************************/ +void __kload_null_log(const char * format, ...) +{ + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +void __kload_null_err_log(const char * format, ...) +{ + return; +} + +/******************************************************************************* +* +*******************************************************************************/ +int __kload_null_approve(int default_answer, const char * format, ...) +{ + return 0; +} + +/******************************************************************************* +* +*******************************************************************************/ +int __kload_null_veto(int default_answer, const char * format, ...) +{ + return 1; +} + +/******************************************************************************* +* +*******************************************************************************/ +const char * __kload_null_input(const char * format, ...) +{ + return NULL; +} + +/******************************************************************************* +* The kld_patch.c module uses this function, if defined, to print errors. In +* the kernel this function is defined in libsa/misc.c. +*******************************************************************************/ +void kld_error_vprintf(const char * format, va_list ap) { + if (log_level <= kload_log_level_silent) return; + vfprintf(stderr, format, ap); + return; +} + +#endif /* not KERNEL */ diff --git a/libsa/load.h b/libsa/load.h new file mode 100644 index 000000000..8a79050ad --- /dev/null +++ b/libsa/load.h @@ -0,0 +1,162 @@ +#ifndef __LOAD_H__ +#define __LOAD_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dgraph.h" + +#ifdef KERNEL +#else +#include "KXKext.h" +#endif /* KERNEL */ + +#ifndef KERNEL +typedef KXKextManagerError kload_error; +enum { + kload_error_none = kKXKextManagerErrorNone, + kload_error_unspecified = kKXKextManagerErrorUnspecified, + kload_error_invalid_argument = kKXKextManagerErrorInvalidArgument, + kload_error_no_memory = kKXKextManagerErrorNoMemory, + + kload_error_user_abort = kKXKextManagerErrorUserAbort, + kload_error_kernel_error = kKXKextManagerErrorKernelError, + kload_error_kernel_permission = kKXKextManagerErrorKernelPermission, + + kload_error_executable_bad = kKXKextManagerErrorLoadExecutableBad, + kload_error_already_loaded = kKXKextManagerErrorAlreadyLoaded, + kload_error_loaded_version_differs = kKXKextManagerErrorLoadedVersionDiffers, + kload_error_dependency_loaded_version_differs = kKXKextManagerErrorDependencyLoadedVersionDiffers, + kload_error_link_load = kKXKextManagerErrorLinkLoad +}; + +typedef KXKextManagerLogLevel kload_log_level; +enum { + kload_log_level_silent = kKXKextManagerLogLevelSilent, + kload_log_level_errors_only = kKXKextManagerLogLevelErrorsOnly, + kload_log_level_default = kKXKextManagerLogLevelDefault, + kload_log_level_basic = kKXKextManagerLogLevelBasic, + kload_log_level_load_basic = kKXKextManagerLogLevelLoadBasic, + kload_log_level_details = kKXKextManagerLogLevelDetails, + kload_log_level_kexts = kKXKextManagerLogLevelKexts, + kload_log_level_kext_details = kKXKextManagerLogLevelKextDetails, + kload_log_level_load_details = kKXKextManagerLogLevelLoadDetails +}; +#else + +typedef enum { + kload_error_none, + kload_error_unspecified, + kload_error_invalid_argument, + kload_error_no_memory, + + kload_error_user_abort, + kload_error_kernel_error, + kload_error_kernel_permission, + + kload_error_executable_bad, + kload_error_already_loaded, + kload_error_loaded_version_differs, + kload_error_dependency_loaded_version_differs, + kload_error_link_load +} kload_error; + +typedef enum { + kload_log_level_silent = -2, // no notices, no errors + kload_log_level_errors_only = -1, + kload_log_level_default = 0, + kload_log_level_basic = 1, + kload_log_level_load_basic = 2, + kload_log_level_details = 3, + kload_log_level_kexts = 4, + kload_log_level_kext_details = 5, + kload_log_level_load_details = 6 +} kload_log_level; + +#endif /* KERNEL */ + + +kload_error kload_load_dgraph(dgraph_t * dgraph +#ifndef KERNEL + , + const char * kernel_file, + const char * patch_file, const char * patch_dir, + const char * symbol_file, const char * symbol_dir, + int do_load, int do_start_kmod, int do_prelink, + int interactive_level, + int ask_overwrite_symbols, int overwrite_symbols +#endif /* not KERNEL */ + ); + +#ifndef KERNEL +kload_error kload_load_with_arglist( + int argc, char **argv, + const char * kernel_file, + const char * patch_file, const char * patch_dir, + const char * symbol_file, const char * symbol_dir, + int do_load, int do_start_kmod, + int interactive_level, + int ask_overwrite_symbols, int overwrite_symbols); +#endif /* not KERNEL */ + +kload_error kload_map_dgraph(dgraph_t * dgraph +#ifndef KERNEL + , + const char * kernel_file +#endif /* not KERNEL */ + ); +kload_error kload_map_entry(dgraph_entry_t * entry); + +#ifndef KERNEL +int kload_file_exists(const char * path); +kload_error kload_request_load_addresses( + dgraph_t * dgraph, + const char * kernel_file); +kload_error kload_set_load_addresses_from_args( + dgraph_t * dgraph, + const char * kernel_file, + char ** addresses); +#endif /* not KERNEL */ + +kload_error kload_set_load_addresses_from_kernel( + dgraph_t * dgraph +#ifndef KERNEL + , + const char * kernel_file, + int do_load +#endif /* not KERNEL */ + ); + +void kload_set_log_level(kload_log_level level); +#ifndef KERNEL +void kload_set_log_function( + void (*)(const char * format, ...)); +void kload_set_error_log_function( + void (*)(const char * format, ...)); +void kload_set_user_approve_function( + int (*)(int default_answer, const char * format, ...)); +void kload_set_user_veto_function( + int (*)(int default_answer, const char * format, ...)); +void kload_set_user_input_function( + const char * (*)(const char * format, ...)); + +void kload_log_message(const char * format, ...); +void kload_log_error(const char * format, ...); +#define KNL "" + +#else +#define kload_log_message IOLog +#define kload_log_error IOLog +#define KNL "\n" + +#endif /* not KERNEL */ + + + +#endif /* __LOAD_H__ */ + +#ifdef __cplusplus +} +#endif + diff --git a/libsa/malloc.c b/libsa/malloc.c index c5bcb1ec9..00a94e54e 100644 --- a/libsa/malloc.c +++ b/libsa/malloc.c @@ -22,43 +22,17 @@ * * @APPLE_LICENSE_HEADER_END@ */ -#include -#include #include -#include - -#undef CLIENT_DEBUG - - -/********************************************************************* -* I'm not sure this is really necessary.... -*********************************************************************/ -static inline size_t round_to_long(size_t size) { - return (size + sizeof(long int)) & ~(sizeof(long int) - 1); -} - - -typedef struct queue_entry queue_entry; - -/********************************************************************* -* Structure for an allocation region. Each one is created using -* kmem_alloc(), and the whole list of these is destroyed by calling -* malloc_reset(). Client blocks are allocated from a linked list of these -* regions, on a first-fit basis, and are never freed. -*********************************************************************/ -typedef struct malloc_region { - queue_entry links; // Uses queue.h for linked list - vm_size_t region_size; // total size w/ this bookeeping info - - queue_entry block_list; // list of allocated blocks; uses queue.h - - vm_size_t free_size; // size of unused area - void * free_address; // points at the unused area +#include +#include +#include +#include +#include - char buffer[0]; // beginning of useable area -} malloc_region; +#include "libsa/malloc.h" +extern void panic(const char *string, ...); /********************************************************************* * Structure for a client memory block. Contains linked-list pointers, @@ -67,171 +41,49 @@ typedef struct malloc_region { * field is guaranteed to lie on a 16-byte boundary. *********************************************************************/ typedef struct malloc_block { - queue_entry links; // Uses queue.h for linked list - malloc_region * region; -#ifdef CLIENT_DEBUG - size_t request_size; -#endif /* CLIENT_DEBUG */ - size_t block_size; // total size w/ all bookeeping info - - // the client's memory block - char buffer[0] __attribute__((aligned(16))); -} malloc_block; - -/********************************************************************* -* Private functions. -* -* malloc_create_region() -* size - The size in bytes of the region. This is rounded up -* to a multiple of the VM page size. -* Returns a pointer to the new region. -* -* malloc_free_region() -* region - The region to free. -* Returns whatever vm_deallocate() returns. -* -* malloc_create_block_in_region() -* region - The region to alloate a block from. -* size - The total size, including the header, of the block to -* allocate. -* Returns a pointer to the block, or NULL on failure. -* -* malloc_find_block() -* address - The address of the client buffer to find a block for. -* block (out) - The block header for the address. -* region (out) - The region the block was found in, or NULL. -*********************************************************************/ -static malloc_region * malloc_create_region(vm_size_t size); -static kern_return_t malloc_free_region(malloc_region * region); -static malloc_block * malloc_create_block_in_region( - malloc_region * region, - size_t size); -static void malloc_find_block( - void * address, - malloc_block ** block, - malloc_region ** region); -static void malloc_get_free_block( - size_t size, - malloc_block ** block, - malloc_region ** region); - - -/********************************************************************* -* Pointers to the linked list of VM-allocated regions, and a high -* water mark used in testing/debugging. -*********************************************************************/ -static queue_entry malloc_region_list = { - &malloc_region_list, // the "next" field - &malloc_region_list // the "prev" field -}; - -static queue_entry sorted_free_block_list = { - &sorted_free_block_list, - &sorted_free_block_list -}; - -#ifdef CLIENT_DEBUG -static size_t malloc_hiwater_mark = 0; -static long int num_regions = 0; + struct malloc_block *malFwd; + struct malloc_block *malBwd; + unsigned int malSize; + unsigned int malActl; +} malloc_block; -static size_t current_block_total = 0; -static double peak_usage = 0.0; -static double min_usage = 100.0; -#endif /* CLIENT_DEBUG */ +static malloc_block malAnchor = {&malAnchor, &malAnchor, 0, 0}; +static int malInited = 0; +static mutex_t *malloc_lock; -/********************************************************************* -* malloc() -*********************************************************************/ __private_extern__ void * malloc(size_t size) { - size_t need_size; - malloc_region * cur_region = NULL; - malloc_region * use_region = NULL; - malloc_block * client_block = NULL; - void * client_buffer = NULL; - - /* Add the size of the block header to the request size. - */ - need_size = round_to_long(size + sizeof(malloc_block)); - - - /* See if there's a previously-freed block that we can reuse. - */ - malloc_get_free_block(need_size, - &client_block, &use_region); - - /* If we found a free block that we can reuse, then reuse it. - */ - if (client_block != NULL) { - - /* Remove the found block from the list of free blocks - * and tack it onto the list of allocated blocks. - */ - queue_remove(&sorted_free_block_list, client_block, malloc_block *, links); - queue_enter(&use_region->block_list, client_block, malloc_block *, links); - - client_buffer = client_block->buffer; - // Don't return here! There's bookkeeping done below. - - } else { - - /* Didn't find a freed block to reuse. */ - - /* Look for a region with enough unused space to carve out a new block. - */ - queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { - if (use_region == NULL && cur_region->free_size >= need_size) { - use_region = cur_region; - break; - } - } - - - /* If we haven't found a region with room, create a new one and - * put it at the end of the list of regions. - */ - if (use_region == NULL) { - use_region = malloc_create_region(need_size); - if (use_region == NULL) { - return NULL; - // FIXME: panic? - } - } - - /* Create a new block in the found/created region. - */ - client_block = malloc_create_block_in_region(use_region, need_size); - if (client_block != NULL) { - client_buffer = client_block->buffer; - // Don't return here! There's bookkeeping done below. - } - } - -#ifdef CLIENT_DEBUG - if (client_block != NULL) { - size_t region_usage = malloc_region_usage(); - double current_usage; - - current_block_total += client_block->block_size; - if (region_usage > 0) { - current_usage = (double)current_block_total / (double)malloc_region_usage(); - if (current_usage > peak_usage) { - peak_usage = current_usage; - } - - if (current_usage < min_usage) { - min_usage = current_usage; - } - } - - client_block->request_size = size; - } -#endif /* CLIENT_DEBUG */ - - return client_buffer; + unsigned int nsize; + unsigned int nmem, rmem; + malloc_block *amem; + + assert(malInited); + + nsize = size + sizeof(malloc_block) + 15; /* Make sure we get enough to fit */ + + nmem = (unsigned int)kalloc(nsize); /* Get some */ + if(!nmem) { /* Got any? */ + panic("malloc: no memory for a %08X sized request\n", nsize); + } + + rmem = (nmem + 15) & -16; /* Round to 16 byte boundary */ + amem = (malloc_block *)rmem; /* Point to the block */ + amem->malActl = (unsigned int)nmem; /* Set the actual address */ + amem->malSize = nsize; /* Size */ + + mutex_lock(malloc_lock); + + amem->malFwd = malAnchor.malFwd; /* Move anchor to our forward */ + amem->malBwd = &malAnchor; /* We point back to anchor */ + malAnchor.malFwd->malBwd = amem; /* The old forward's back points to us */ + malAnchor.malFwd = amem; /* Now we point the anchor to us */ + + mutex_unlock(malloc_lock); /* Unlock now */ + + return (void *)(rmem + 16); /* Return the block */ } /* malloc() */ @@ -239,56 +91,45 @@ void * malloc(size_t size) { /********************************************************************* * free() * -* Moves a block from the allocated list to the free list. Neither -* list is kept sorted! *********************************************************************/ __private_extern__ void free(void * address) { - malloc_region * found_region = NULL; - malloc_block * found_block = NULL; - malloc_block * cur_block = NULL; - - /* Find the block and region for the given address. - */ - malloc_find_block(address, &found_block, &found_region); - - if (found_block == NULL) { - return; - // FIXME: panic? - } - - - /* Remove the found block from the list of allocated blocks - * and tack it onto the list of free blocks. - */ - queue_remove(&found_region->block_list, found_block, malloc_block *, links); - found_block->links.next = NULL; - queue_iterate(&sorted_free_block_list, cur_block, malloc_block *, links) { - if (cur_block->block_size > found_block->block_size) { - queue_insert_before(&sorted_free_block_list, found_block, cur_block, - malloc_block *, links); - break; - } - } + malloc_block *amem, *fore, *aft; + + if(!(unsigned int)address) return; /* Leave if they try to free nothing */ + + + amem = (malloc_block *)((unsigned int)address - sizeof(malloc_block)); /* Point to the header */ - /* If the "next" link is still NULL, then either the list is empty or the - * freed block has to go on the end, so just tack it on. - */ - if (found_block->links.next == NULL) { - queue_enter(&sorted_free_block_list, found_block, malloc_block *, links); - } + mutex_lock(malloc_lock); + fore = amem->malFwd; /* Get the guy in front */ + aft = amem->malBwd; /* And the guy behind */ + fore->malBwd = aft; /* The next guy's previous is now my previous */ + aft->malFwd = fore; /* The previous guy's forward is now mine */ -#ifdef CLIENT_DEBUG - current_block_total -= found_block->block_size; -#endif /* CLIENT_DEBUG */ + mutex_unlock(malloc_lock); /* Unlock now */ + + kfree(amem->malActl, amem->malSize); /* Toss it */ - return; + return; } /* free() */ +/********************************************************************* +* malloc_reset() +* +* Allocate the mutual exclusion lock that protect malloc's data. +*********************************************************************/ +__private_extern__ void +malloc_init(void) +{ + malloc_lock = mutex_alloc(ETAP_IO_AHA); + malInited = 1; +} + /********************************************************************* * malloc_reset() @@ -299,20 +140,27 @@ void free(void * address) { *********************************************************************/ __private_extern__ void malloc_reset(void) { - malloc_region * cur_region; - - while (! queue_empty(&malloc_region_list)) { - kern_return_t kern_result; - queue_remove_first(&malloc_region_list, cur_region, - malloc_region *, links); - kern_result = malloc_free_region(cur_region); - if (kern_result != KERN_SUCCESS) { - // what sort of error checking can we even do here? - // printf("malloc_free_region() failed.\n"); - // panic(); - } - } - + + malloc_block *amem, *bmem; + + mutex_lock(malloc_lock); + + amem = malAnchor.malFwd; /* Get the first one */ + + while(amem != &malAnchor) { /* Go until we hit the anchor */ + + bmem = amem->malFwd; /* Next one */ + kfree(amem->malActl, amem->malSize); /* Toss it */ + amem = bmem; /* Skip to it */ + + } + + malAnchor.malFwd = (struct malloc_block *) 0x666; /* Cause a fault if we try again */ + malAnchor.malBwd = (struct malloc_block *) 0x666; /* Cause a fault if we try again */ + + mutex_unlock(malloc_lock); /* Unlock now */ + + mutex_free(malloc_lock); return; } /* malloc_reset() */ @@ -327,270 +175,22 @@ void malloc_reset(void) { *********************************************************************/ __private_extern__ void * realloc(void * address, size_t new_client_size) { - malloc_region * found_region = NULL; - malloc_block * found_block = NULL; void * new_address; - size_t new_block_size; - size_t copy_bytecount; - - - malloc_find_block(address, &found_block, &found_region); - - - /* If we couldn't find the requested block, - * the caller is in error so return NULL. - */ - if (found_block == NULL) { - // printf("realloc() called with invalid block.\n"); - return NULL; - // FIXME: panic? - } - - - /* Figure out how much memory is actually needed. - */ - new_block_size = new_client_size + sizeof(malloc_block); - - - /* If the new size is <= the current size, don't bother. - */ - if (new_block_size <= found_block->block_size) { -#ifdef CLIENT_DEBUG - if (new_client_size > found_block->request_size) { - found_block->request_size = new_client_size; - } -#endif /* CLIENT_DEBUG */ - return address; - } - - - /* Create a new block of the requested size. - */ - new_address = malloc(new_client_size); - - if (new_address == NULL) { - // printf("error in realloc()\n"); - return NULL; - // FIXME: panic? - } - - - /* Copy the data from the old block to the new one. - * Make sure to copy only the lesser of the existing and - * requested new size. (Note: The code above currently - * screens out a realloc to a smaller size, but it might - * not always do that.) - */ - copy_bytecount = found_block->block_size - sizeof(malloc_block); - - if (new_client_size < copy_bytecount) { - copy_bytecount = new_client_size; - } - - memcpy(new_address, address, copy_bytecount); - - - /* Free the old block. - */ - free(address); - - return (void *)new_address; + malloc_block *amem; + + amem = (malloc_block *)((unsigned int)address - sizeof(malloc_block)); /* Point to allocation block */ + + new_address = malloc(new_client_size); /* get a new one */ + if(!new_address) { /* Did we get it? */ + panic("realloc: can not reallocate one of %08X size\n", new_client_size); + } + + memcpy(new_address, address, amem->malSize - sizeof(malloc_block)); /* Copy the old in */ + + free(address); /* Toss the old one */ + + return new_address; } /* realloc() */ -/********************************************************************* -********************************************************************** -***** PACKAGE-INTERNAL FUNCTIONS BELOW HERE ***** -********************************************************************** -*********************************************************************/ - - - -/********************************************************************* -* malloc_create_region() -* -* Package-internal function. VM-allocates a new region and adds it to -* the given region list. -*********************************************************************/ -__private_extern__ -malloc_region * malloc_create_region(vm_size_t block_size) { - - malloc_region * new_region; - vm_address_t vm_address; - vm_size_t region_size; - kern_return_t kern_result; - - - /* Figure out how big the region needs to be and allocate it. - */ - region_size = block_size + sizeof(malloc_region); - region_size = round_page(region_size); - - kern_result = kmem_alloc(kernel_map, - &vm_address, region_size); - - if (kern_result != KERN_SUCCESS) { - // printf("kmem_alloc() failed in malloc_create_region()\n"); - return NULL; - // panic(); - } - - - /* Cast the allocated pointer to a region header. - */ - new_region = (malloc_region *)vm_address; - - - /* Initialize the region header fields and link it onto - * the previous region. - */ - new_region->region_size = region_size; - queue_init(&new_region->block_list); -// queue_init(&new_region->free_list); - - new_region->free_size = region_size - sizeof(malloc_region); - new_region->free_address = &new_region->buffer; - - queue_enter(&malloc_region_list, new_region, malloc_region *, links); - - /* If debugging, add the new region's size to the total. - */ -#ifdef CLIENT_DEBUG - malloc_hiwater_mark += region_size; - num_regions++; -#endif /* CLIENT_DEBUG */ - - return new_region; - -} /* malloc_create_region() */ - - -/********************************************************************* -* malloc_free_region() -* -* Package-internal function. VM-deallocates the given region. -*********************************************************************/ -__private_extern__ -kern_return_t malloc_free_region(malloc_region * region) { - - kmem_free(kernel_map, - (vm_address_t)region, - region->region_size); - -#ifdef CLIENT_DEBUG - num_regions--; -#endif /* CLIENT_DEBUG */ - return KERN_SUCCESS; - -} /* malloc_free_region() */ - - -/********************************************************************* -* malloc_create_block_in_region() -* -* Package-internal function. Allocates a new block out of the given -* region. The requested size must include the block header. If the -* size requested is larger than the region's free size, returns NULL. -*********************************************************************/ -__private_extern__ -malloc_block * malloc_create_block_in_region( - malloc_region * region, - size_t block_size) { - - malloc_block * new_block = NULL; - - - /* Sanity checking. - */ - if (block_size > region->free_size) { - return NULL; - // FIXME: panic? - } - - - /* Carve out a new block. - */ - new_block = (malloc_block *)region->free_address; - region->free_address = (char *)region->free_address + block_size; - region->free_size -= block_size; - - memset(new_block, 0, sizeof(malloc_block)); - - new_block->region = region; - new_block->block_size = block_size; - - /* Record the new block as the last one in the region. - */ - queue_enter(®ion->block_list, new_block, malloc_block *, links); - - return new_block; - -} /* malloc_create_block_in_region() */ - - -/********************************************************************* -* malloc_find_block() -* -* Package-internal function. Given a client buffer address, find the -* malloc_block for it. -*********************************************************************/ -__private_extern__ -void malloc_find_block(void * address, - malloc_block ** block, - malloc_region ** region) { - - malloc_region * cur_region; - - *block = NULL; - *region = NULL; - - queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { - - malloc_block * cur_block; - - queue_iterate(&cur_region->block_list, cur_block, malloc_block *, links) { - if (cur_block->buffer == address) { - *block = cur_block; - *region = cur_region; - return; - } - } - } - - return; - -} /* malloc_find_block() */ - - -/********************************************************************* -* malloc_get_free_block() -*********************************************************************/ -__private_extern__ -void malloc_get_free_block( - size_t size, - malloc_block ** block, - malloc_region ** region) { - - malloc_block * cur_block; - size_t fit_threshold = 512; - - *block = NULL; - *region = NULL; - - queue_iterate(&sorted_free_block_list, cur_block, malloc_block *, links) { - - /* If we find a block large enough, but not too large to waste memory, - * pull it out and return it, along with its region. - */ - if (cur_block->block_size >= size && - cur_block->block_size < (size + fit_threshold)) { - - queue_remove(&sorted_free_block_list, cur_block, malloc_block *, links); - *block = cur_block; - *region = cur_block->region; - return; - } - } - return; -} diff --git a/libsa/vers_rsrc.c b/libsa/vers_rsrc.c index 9f9f52e6e..409b71644 100644 --- a/libsa/vers_rsrc.c +++ b/libsa/vers_rsrc.c @@ -1,29 +1,53 @@ -#include +#ifndef KERNEL +#include +#include "vers_rsrc.h" +#else #include -#include - - -int isdigit(char c) { +#include +#endif /* not KERNEL */ + +#ifndef KERNEL +#define PRIV_EXT +#else +#define PRIV_EXT __private_extern__ +#endif /* not KERNEL */ + +#define VERS_MAJOR_DIGITS (4) +#define VERS_MINOR_DIGITS (2) +#define VERS_REVISION_DIGITS (2) +#define VERS_STAGE_DIGITS (1) +#define VERS_STAGE_LEVEL_DIGITS (3) + +#define VERS_MAJOR_MULT (100000000) +#define VERS_MINOR_MULT (1000000) +#define VERS_REVISION_MULT (10000) +#define VERS_STAGE_MULT (1000) + +typedef enum { + VERS_invalid = 0, + VERS_development = 1, + VERS_alpha = 3, + VERS_beta = 5, + VERS_candidate = 7, + VERS_release = 9, +} VERS_stage; + + +static int __vers_isdigit(char c) { return (c == '0' || c == '1' || c == '2' || c == '3' || c == '4' || c == '5' || c == '6' || c == '7' || c == '8' || c == '9'); } -int isspace(char c) { +static int __vers_isspace(char c) { return (c == ' ' || c == '\t' || c == '\r' || c == '\n'); } - -int isreleasestate(char c) { - return (c == 'd' || c == 'a' || c == 'b' || c == 'f'); -} - - -UInt8 BCD_digit_for_char(char c) { +static int __vers_digit_for_char(char c) { switch (c) { case '0': return 0; break; case '1': return 1; break; @@ -35,32 +59,19 @@ UInt8 BCD_digit_for_char(char c) { case '7': return 7; break; case '8': return 8; break; case '9': return 9; break; - default: return BCD_illegal; break; + default: return -1; break; } - return BCD_illegal; -} + return -1; +} -char BCD_char_for_digit(UInt8 digit) { - switch (digit) { - case 0: return '0'; break; - case 1: return '1'; break; - case 2: return '2'; break; - case 3: return '3'; break; - case 4: return '4'; break; - case 5: return '5'; break; - case 6: return '6'; break; - case 7: return '7'; break; - case 8: return '8'; break; - case 9: return '9'; break; - default: return '?'; break; - } - return '?'; +static int __VERS_isreleasestate(char c) { + return (c == 'd' || c == 'a' || c == 'b' || c == 'f'); } -VERS_revision VERS_revision_for_string(const char ** string_p) { - const char * string; +static VERS_stage __VERS_stage_for_string(char ** string_p) { + char * string; if (!string_p || !*string_p) { return VERS_invalid; @@ -68,33 +79,33 @@ VERS_revision VERS_revision_for_string(const char ** string_p) { string = *string_p; - if (isspace(string[0]) || string[0] == '\0') { + if (__vers_isspace(string[0]) || string[0] == '\0') { return VERS_release; } else { switch (string[0]) { case 'd': - if (isdigit(string[1])) { + if (__vers_isdigit(string[1])) { *string_p = &string[1]; return VERS_development; } break; case 'a': - if (isdigit(string[1])) { + if (__vers_isdigit(string[1])) { *string_p = &string[1]; return VERS_alpha; } break; case 'b': - if (isdigit(string[1])) { + if (__vers_isdigit(string[1])) { *string_p = &string[1]; return VERS_beta; } break; case 'f': - if (isdigit(string[1])) { + if (__vers_isdigit(string[1])) { *string_p = &string[1]; return VERS_candidate; - } else if (string[1] == 'c' && isdigit(string[2])) { + } else if (string[1] == 'c' && __vers_isdigit(string[2])) { *string_p = &string[2]; return VERS_candidate; } else { @@ -110,321 +121,297 @@ VERS_revision VERS_revision_for_string(const char ** string_p) { return VERS_invalid; } +static char * __VERS_string_for_stage(VERS_stage stage) { + switch (stage) { + case VERS_invalid: return "?"; break; + case VERS_development: return "d"; break; + case VERS_alpha: return "a"; break; + case VERS_beta: return "b"; break; + case VERS_candidate: return "f"; break; + case VERS_release: return ""; break; + } -int VERS_parse_string(const char * vers_string, UInt32 * version_num) { - int result = 1; - VERS_version vers; - const char * current_char_p; - UInt8 scratch; + return "?"; +} + +PRIV_EXT +VERS_version VERS_parse_string(const char * vers_string) { + VERS_version result = -1; + int vers_digit = -1; + int num_digits_scanned = 0; + VERS_version vers_major = 0; + VERS_version vers_minor = 0; + VERS_version vers_revision = 0; + VERS_version vers_stage = 0; + VERS_version vers_stage_level = 0; + char * current_char_p; if (!vers_string || *vers_string == '\0') { - return 0; + return -1; } - vers.vnum = 0; - - current_char_p = &vers_string[0]; - + current_char_p = (char *)&vers_string[0]; /***** * Check for an initial digit of the major release number. */ - vers.bytes[0] = BCD_digit_for_char(*current_char_p); - if (vers.bytes[0] == BCD_illegal) { - return 0; + vers_major = __vers_digit_for_char(*current_char_p); + if (vers_major < 0) { + return -1; } current_char_p++; + num_digits_scanned = 1; - - /***** - * Check for a second digit of the major release number. + /* Complete scan for major version number. Legal characters are + * any digit, period, any buildstage letter. */ - if (*current_char_p == '\0') { - vers.bytes[2] = VERS_release; - vers.bytes[3] = 0xff; - goto finish; - } else if (isdigit(*current_char_p)) { - scratch = BCD_digit_for_char(*current_char_p); - if (scratch == BCD_illegal) { - return 0; - } - vers.bytes[0] = BCD_combine(vers.bytes[0], scratch); - current_char_p++; - - if (*current_char_p == '\0') { - vers.bytes[2] = VERS_release; - vers.bytes[3] = 0xff; + while (num_digits_scanned < VERS_MAJOR_DIGITS) { + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + vers_stage = VERS_release; goto finish; - } else if (isreleasestate(*current_char_p)) { + } else if (__vers_isdigit(*current_char_p)) { + vers_digit = __vers_digit_for_char(*current_char_p); + if (vers_digit < 0) { + return -1; + } + vers_major = (vers_major) * 10 + vers_digit; + current_char_p++; + num_digits_scanned++; + } else if (__VERS_isreleasestate(*current_char_p)) { goto release_state; } else if (*current_char_p == '.') { current_char_p++; + goto minor_version; } else { - return 0; + return -1; } - } else if (isreleasestate(*current_char_p)) { - goto release_state; - } else if (*current_char_p == '.') { - current_char_p++; - } else { - return 0; } - - /***** - * Check for the minor release number. + /* Check for too many digits. */ - if (*current_char_p == '\0') { - vers.bytes[2] = VERS_release; - vers.bytes[3] = 0xff; - goto finish; - } else if (isdigit(*current_char_p)) { - vers.bytes[1] = BCD_digit_for_char(*current_char_p); - if (vers.bytes[1] == BCD_illegal) { - return 0; + if (num_digits_scanned == VERS_MAJOR_DIGITS) { + if (*current_char_p == '.') { + current_char_p++; + } else if (__vers_isdigit(*current_char_p)) { + return -1; } + } - // Make sure its the first nibble of byte 1! - vers.bytes[1] = BCD_combine(vers.bytes[1], 0); +minor_version: - current_char_p++; + num_digits_scanned = 0; - if (*current_char_p == '\0') { - vers.bytes[2] = VERS_release; - vers.bytes[3] = 0xff; + /* Scan for minor version number. Legal characters are + * any digit, period, any buildstage letter. + */ + while (num_digits_scanned < VERS_MINOR_DIGITS) { + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + vers_stage = VERS_release; goto finish; - } else if (isreleasestate(*current_char_p)) { + } else if (__vers_isdigit(*current_char_p)) { + vers_digit = __vers_digit_for_char(*current_char_p); + if (vers_digit < 0) { + return -1; + } + vers_minor = (vers_minor) * 10 + vers_digit; + current_char_p++; + num_digits_scanned++; + } else if (__VERS_isreleasestate(*current_char_p)) { goto release_state; } else if (*current_char_p == '.') { current_char_p++; + goto revision; } else { - return 0; + return -1; } - } else { - return 0; } - - /***** - * Check for the bugfix number. + /* Check for too many digits. */ - if (*current_char_p == '\0') { - vers.bytes[2] = VERS_release; - vers.bytes[3] = 0xff; - goto finish; - } else if (isdigit(*current_char_p)) { - scratch = BCD_digit_for_char(*current_char_p); - if (scratch == BCD_illegal) { - return 0; + if (num_digits_scanned == VERS_MINOR_DIGITS) { + if (*current_char_p == '.') { + current_char_p++; + } else if (__vers_isdigit(*current_char_p)) { + return -1; } + } - /* vers.bytes[1] has its left nibble set already */ - vers.bytes[1] = vers.bytes[1] | scratch; +revision: - current_char_p++; + num_digits_scanned = 0; - if (*current_char_p == '\0') { - vers.bytes[2] = VERS_release; - vers.bytes[3] = 0xff; + /* Scan for revision version number. Legal characters are + * any digit, any buildstage letter (NOT PERIOD). + */ + while (num_digits_scanned < VERS_REVISION_DIGITS) { + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + vers_stage = VERS_release; goto finish; - } else if (isreleasestate(*current_char_p)) { + } else if (__vers_isdigit(*current_char_p)) { + vers_digit = __vers_digit_for_char(*current_char_p); + if (vers_digit < 0) { + return -1; + } + vers_revision = (vers_revision) * 10 + vers_digit; + current_char_p++; + num_digits_scanned++; + } else if (__VERS_isreleasestate(*current_char_p)) { goto release_state; } else { - return 0; + return -1; } - } else { - return 0; } + /* Check for too many digits. + */ + if (num_digits_scanned == VERS_REVISION_DIGITS) { + if (*current_char_p == '.') { + current_char_p++; + } else if (__vers_isdigit(*current_char_p)) { + return -1; + } + } release_state: /***** * Check for the release state. */ - if (*current_char_p == '\0') { - vers.bytes[2] = VERS_release; - vers.bytes[3] = 0xff; + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + vers_stage = VERS_release; goto finish; } else { - vers.bytes[2] = VERS_revision_for_string(¤t_char_p); - if (vers.bytes[2] == VERS_invalid) { - return 0; + vers_stage = __VERS_stage_for_string(¤t_char_p); + if (vers_stage == VERS_invalid) { + return -1; } } - /***** - * Get the nonrelease revision number (0..255). - */ - if (vers.bytes[2] != VERS_release) { - UInt32 revision_num = 0; - int i; +// stage level - if (*current_char_p == '\0' || !isdigit(*current_char_p)) { - return 0; - } - for (i = 0; i < 3 && *current_char_p != '\0'; i++, current_char_p++) { - UInt8 scratch_digit; - scratch_digit = BCD_digit_for_char(*current_char_p); - if (scratch_digit == BCD_illegal) { - return 0; - } - revision_num *= 10; - revision_num += scratch_digit; - } - if (isdigit(*current_char_p) || revision_num > 255) { - return 0; - } - vers.bytes[3] = (UInt8)revision_num; - } + num_digits_scanned = 0; - if (vers.bytes[2] == VERS_release) { - vers.bytes[3] = 0xff; - } else { - if (vers.bytes[2] == VERS_candidate) { - if (vers.bytes[3] == 0) { - return 0; + /* Scan for stage level number. Legal characters are + * any digit only. + */ + while (num_digits_scanned < VERS_STAGE_LEVEL_DIGITS) { + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + if (num_digits_scanned) { + goto finish; } else { - vers.bytes[2] = VERS_release; - vers.bytes[3]--; + return -1; + } + } else if (__vers_isdigit(*current_char_p)) { + vers_digit = __vers_digit_for_char(*current_char_p); + if (vers_digit < 0) { + return -1; } + vers_stage_level = (vers_stage_level) * 10 + vers_digit; + current_char_p++; + num_digits_scanned++; + } else { + return -1; } } -finish: - *version_num = OSSwapBigToHostInt32(vers.vnum); - return result; -} + /* Check for too many digits. + */ + if ((num_digits_scanned == VERS_STAGE_LEVEL_DIGITS) && + ! (__vers_isspace(*current_char_p) || (*current_char_p == '\0'))) { + return -1; + } -#define VERS_STRING_MAX_LEN (12) + if (vers_stage_level > 255) { + return -1; + } -int VERS_string(char * buffer, UInt32 length, UInt32 vers) { - VERS_version version; - int cpos = 0; - int result = 1; +finish: - char major1; - char major2; - char minor; - char bugfix; + if (vers_stage == VERS_candidate && vers_stage_level == 0) { + return -1; + } - version.vnum = OSSwapHostToBigInt32(vers); + result = (vers_major * VERS_MAJOR_MULT) + + (vers_minor * VERS_MINOR_MULT) + + (vers_revision * VERS_REVISION_MULT) + + (vers_stage * VERS_STAGE_MULT) + + vers_stage_level; - /* No buffer, length less than longest possible vers string, + return result; +} + +#define VERS_STRING_MAX_LEN (16) + +PRIV_EXT +int VERS_string(char * buffer, UInt32 length, VERS_version vers) { + int cpos = 0; + VERS_version vers_major = 0; + VERS_version vers_minor = 0; + VERS_version vers_revision = 0; + VERS_version vers_stage = 0; + VERS_version vers_stage_level = 0; + char * stage_string = NULL; // don't free + + /* No buffer or length less than longest possible vers string, * return 0. */ if (!buffer || length < VERS_STRING_MAX_LEN) { - result = -1; - goto finish; + return 0; } bzero(buffer, length * sizeof(char)); - - /***** - * Major version number. - */ - major1 = BCD_char_for_digit(BCD_get_left(version.bytes[0])); - if (major1 == '?') { - result = 0; - } /* this is not an 'else' situation */ - if (major1 != '0') { - buffer[cpos] = major1; - cpos++; + if (vers < 0) { + strcpy(buffer, "(invalid)"); + return 1; } - major2 = BCD_char_for_digit(BCD_get_right(version.bytes[0])); - if (major2 == '?') { - result = 0; - } + vers_major = vers / VERS_MAJOR_MULT; - buffer[cpos] = major2; - cpos++; + vers_minor = vers - (vers_major * VERS_MAJOR_MULT); + vers_minor /= VERS_MINOR_MULT; + vers_revision = vers - + ( (vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT) ); + vers_revision /= VERS_REVISION_MULT; - /***** - * Minor & bug-fix version numbers. - */ - minor = BCD_char_for_digit(BCD_get_left(version.bytes[1])); - if (minor == '?') { - result = 0; - } - bugfix = BCD_char_for_digit(BCD_get_right(version.bytes[1])); - if (bugfix == '?') { - result = 0; - } + vers_stage = vers - + ( (vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT) + + (vers_revision * VERS_REVISION_MULT)); + vers_stage /= VERS_STAGE_MULT; + + vers_stage_level = vers - + ( (vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT) + + (vers_revision * VERS_REVISION_MULT) + (vers_stage * VERS_STAGE_MULT)); + cpos = sprintf(buffer, "%lu", (UInt32)vers_major); - /* Always display the minor version number. + /* Always include the minor version; it just looks weird without. */ buffer[cpos] = '.'; cpos++; - buffer[cpos] = minor; - cpos++; + cpos += sprintf(buffer+cpos, "%lu", (UInt32)vers_minor); - - /* Only display the bugfix version number if it's nonzero. + /* The revision is displayed only if nonzero. */ - if (bugfix != '0') { + if (vers_revision) { buffer[cpos] = '.'; cpos++; - buffer[cpos] = bugfix; - cpos++; - } - - - /* If the release state is final, we're done! - */ - if (version.bytes[2] == VERS_release && version.bytes[3] == 255) { - result = 0; - goto finish; + cpos += sprintf(buffer+cpos, "%lu", (UInt32)vers_revision); } - - /***** - * Do the release state and update level. - */ - switch (version.bytes[2]) { - case VERS_development: - buffer[cpos] = 'd'; - cpos++; - break; - case VERS_alpha: - buffer[cpos] = 'a'; - cpos++; - break; - case VERS_beta: - buffer[cpos] = 'b'; - cpos++; - break; - case VERS_release: - if (version.bytes[3] < 255) { - buffer[cpos] = 'f'; - buffer[cpos+1] = 'c'; - cpos += 2; - } else { - result = 1; - goto finish; - } - break; - default: - result = 0; - buffer[cpos] = '?'; - cpos++; - break; + stage_string = __VERS_string_for_stage(vers_stage); + if (stage_string && stage_string[0]) { + strcat(buffer, stage_string); + cpos += strlen(stage_string); } - if (version.bytes[2] != VERS_release) { - sprintf(&buffer[cpos], "%d", version.bytes[3]); - } else { - if (version.bytes[3] < 255) { - sprintf(&buffer[cpos], "%d", version.bytes[3] + 1); - } + if (vers_stage < VERS_release) { + sprintf(buffer+cpos, "%lu", (UInt32)vers_stage_level); } -finish: - return result; + return 1; } diff --git a/makedefs/MakeInc.def b/makedefs/MakeInc.def index fb14d0d33..7a705ed5d 100644 --- a/makedefs/MakeInc.def +++ b/makedefs/MakeInc.def @@ -118,20 +118,23 @@ endif export CFLAGS_GEN = -static -g -nostdinc -nostdlib -no-cpp-precomp \ -fno-builtin -finline -fno-keep-inline-functions -msoft-float \ - -fsigned-bitfields -Wpointer-arith $(OTHER_CFLAGS) -fpermissive + -fsigned-bitfields -Wpointer-arith $(OTHER_CFLAGS) export CFLAGS_RELEASE = -export CFLAGS_DEBUG = -fno-omit-frame-pointer -export CFLAGS_PROFILE = +export CFLAGS_DEBUG = +export CFLAGS_PROFILE = -pg -export CFLAGS_PPC = -arch ppc -Dppc -DPPC -D__PPC__ -D_BIG_ENDIAN=__BIG_ENDIAN__ -export CFLAGS_I386 = -arch i386 -Di386 -DI386 -D__I386__ -D_BIG_ENDIAN=__LITTLE_ENDIAN__ +export CFLAGS_PPC = -arch ppc -Dppc -DPPC -D__PPC__ -DPAGE_SIZE_FIXED +export CFLAGS_I386 = -arch i386 -Di386 -DI386 -D__I386__ \ + -march=i686 -mpreferred-stack-boundary=2 -falign-functions=4 -mcpu=pentium4 export CFLAGS_RELEASEPPC = -O2 -mcpu=750 -mmultiple -fschedule-insns export CFLAGS_RELEASE_TRACEPPC = -O2 -mcpu=750 -mmultiple -fschedule-insns -export CFLAGS_DEBUGPPC = -O1 -mcpu=750 -mmultiple -fschedule-insns +export CFLAGS_DEBUGPPC = -O2 -mcpu=750 -mmultiple -fschedule-insns +export CFLAGS_PROFILEPPC = -O2 -mcpu=750 -mmultiple -fschedule-insns export CFLAGS_RELEASEI386 = -O2 export CFLAGS_DEBUGI386 = -O2 +export CFLAGS_PROFILEI386 = -O2 export CFLAGS = $(CFLAGS_GEN) \ $($(addsuffix $(ARCH_CONFIG),CFLAGS_)) \ @@ -139,16 +142,13 @@ export CFLAGS = $(CFLAGS_GEN) \ $($(addsuffix $(ARCH_CONFIG), $(addsuffix $(KERNEL_CONFIG),CFLAGS_))) \ $(DEFINES) -# Default CCFLAGS -#(we do not call it CPPFLAGS as that has a special meaning in unix tradition -# and in gcc: CPPFLAGS is for C Pre-Processor flags. CCFLAGS has precedent -# in ProjectBuilder because of the .cc extension) +# Default C++ flags # -CPPFLAGS_GEN = -fno-rtti -fno-exceptions -fcheck-new -fapple-kext +CXXFLAGS_GEN = -fno-rtti -fno-exceptions -fcheck-new -fapple-kext -fpermissive -CPPFLAGS = $(CPPFLAGS_GEN) \ - $($(addsuffix $(ARCH_CONFIG),CCFLAGS_)) \ - $($(addsuffix $(KERNEL_CONFIG),CCFLAGS_)) +CXXFLAGS = $(CXXFLAGS_GEN) \ + $($(addsuffix $(ARCH_CONFIG),CXXFLAGS_)) \ + $($(addsuffix $(KERNEL_CONFIG),CXXFLAGS_)) # # Assembler command @@ -181,14 +181,14 @@ LD = /usr/bin/ld # # Default LDFLAGS # -export LDFLAGS_COMPONENT_GEN = -static -r +export LDFLAGS_COMPONENT_GEN = -static -r $(COMP_LDFLAGS_COMPONENT_GEN) -export LDFLAGS_COMPONENT_RELEASE = -export LDFLAGS_COMPONENT_DEBUG = -export LDFLAGS_COMPONENT_PROFILE = +export LDFLAGS_COMPONENT_RELEASE = $(COMP_LDFLAGS_COMPONENT_RELEASE) +export LDFLAGS_COMPONENT_DEBUG = $(COMP_LDFLAGS_COMPONENT_DEBUG) +export LDFLAGS_COMPONENT_PROFILE = $(COMP_LDFLAGS_COMPONENT_PROFILE) -export LDFLAGS_COMPONENT_PPC = -arch ppc -export LDFLAGS_COMPONENT_I386 = -arch i386 +export LDFLAGS_COMPONENT_PPC = -arch ppc $(COMP_LDFLAGS_COMPONENT_PPC) +export LDFLAGS_COMPONENT_I386 = -arch i386 $(COMP_LDFLAGS_COMPONENT_i386) export LDFLAGS_COMPONENT = $(LDFLAGS_COMPONENT_GEN) \ $($(addsuffix $(ARCH_CONFIG),LDFLAGS_COMPONENT_)) \ @@ -201,8 +201,8 @@ export LDFLAGS_KERNEL_RELEASE = export LDFLAGS_KERNEL_DEBUG = export LDFLAGS_KERNEL_PROFILE = -export LDFLAGS_KERNEL_PPC = -arch ppc -segaddr __VECTORS 0x0 -segaddr __TEXT 0x11000 -e __start -sectalign __DATA __common 0x1000 -sectalign __DATA __bss 0x1000 -export LDFLAGS_KERNEL_I386 = -arch i386 -segaddr __TEXT 0x100000 -e _pstart +export LDFLAGS_KERNEL_PPC = -arch ppc -segaddr __VECTORS 0x0 -segaddr __TEXT 0x7000 -e __start -sectalign __TEXT __text 0x1000 -sectalign __DATA __common 0x1000 -sectalign __DATA __bss 0x1000 -sectcreate __PRELINK __text /dev/null -sectcreate __PRELINK __symtab /dev/null -sectcreate __PRELINK __info /dev/null +export LDFLAGS_KERNEL_I386 = -arch i386 -segaddr __TEXT 0x100000 -e _pstart -sectcreate __PRELINK __text /dev/null -sectcreate __PRELINK __symtab /dev/null -sectcreate __PRELINK __info /dev/null export LDFLAGS_KERNEL = $(LDFLAGS_KERNEL_GEN) \ $($(addsuffix $(ARCH_CONFIG),LDFLAGS_KERNEL_)) \ @@ -274,10 +274,11 @@ EXPDIR = EXPORT_HDRS/$(COMPONENT) # # Strip Flags # -export STRIP_FLAGS_RELEASE = -S -export STRIP_FLAGS_RELEASE_TRACE = -S +export STRIP_FLAGS_RELEASE = -S -x +export STRIP_FLAGS_RELEASE_TRACE = -S -x export STRIP_FLAGS_DEBUG = -S -export STRIP_FLAGS_PROFILE = -S +export STRIP_FLAGS_DEBUG_TRACE = -S +export STRIP_FLAGS_PROFILE = -S -x export STRIP_FLAGS = $($(addsuffix $(KERNEL_CONFIG),STRIP_FLAGS_)) diff --git a/makedefs/MakeInc.dir b/makedefs/MakeInc.dir index dbeb55c17..a810d4517 100644 --- a/makedefs/MakeInc.dir +++ b/makedefs/MakeInc.dir @@ -272,7 +272,18 @@ build_all: TARGET=$${TARGET} \ build_all; \ done; \ - ${MAKE} ${MAKEJOBS} INCL_MAKEDEP=TRUE TARGET=$${TARGET} do_build_all; + ${MAKE} ${MAKEJOBS} INCL_MAKEDEP=TRUE TARGET=$${TARGET} do_build_all; \ + _TMP_comp_subdir="$(CONFIG_SUBDIRS) $($(addprefix CONFIG_SUBDIRS_, $(ARCH_CONFIG)))"; \ + for comp_subdir in $${_TMP_comp_subdir}; \ + do \ + $(MKDIR) $${comp_subdir}; \ + ${MAKE} -C $${comp_subdir} \ + MAKEFILES=${SOURCE}/$${comp_subdir}/Makefile \ + SOURCE=${SOURCE}$${comp_subdir}/ \ + TARGET=$${TARGET} \ + build_all; \ + done; \ + # # Build all architectures for all Configuration/Architecture options @@ -304,6 +315,7 @@ build_mach_kernel: @echo "[ $(SOURCE) ] make build_mach_kernel $(COMPONENT) $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ ${MAKE} ${MAKEJOBS} do_build_mach_kernel; + # # # Install dependencies order diff --git a/makedefs/MakeInc.rule b/makedefs/MakeInc.rule index 2f844ffa3..86d80e70c 100644 --- a/makedefs/MakeInc.rule +++ b/makedefs/MakeInc.rule @@ -421,7 +421,7 @@ COMP_SOBJ_FILES = $(addprefix $(TARGET)$(COMP_OBJ_DIR), $(COMP_SOBJ_LIST)) $(COMP_SOBJ_FILES): $(TARGET)$(COMP_OBJ_DIR)%.o : %.s ${S_KCC} -E -MD ${SFLAGS} -DASSEMBLER $(INCFLAGS) $< > $(patsubst %.o, %.pp, ${@}); sed '/^\#/d' $(patsubst %.o, %.pp, ${@}) > $(patsubst %.o, %.s, ${@}); - ${S_KCC} ${SFLAGS} -m${ARCH_CONFIG_LC} ${_HOST_AS_FLAGS} -c $(patsubst %.o, %.s, ${@}); + ${S_KCC} ${SFLAGS} ${_HOST_AS_FLAGS} -c $(patsubst %.o, %.s, ${@}); ${RM} ${_RMFLAGS_} $(patsubst %.o, %.pp, ${@}) $(patsubst %.o,%.s,${@}); S_RULE_1A=@ls / @@ -429,7 +429,7 @@ S_RULE_1B= ${patsubst %.o,%.s,${@}} > /dev/null S_RULE_2= ${S_KCC} -E -MD ${SFLAGS} -DASSEMBLER $(INCFLAGS) $< \ > $(patsubst %.o, %.pp, ${@}); \ sed '/^\#/d' $(patsubst %.o, %.pp, ${@}) > $(patsubst %.o, %.s, ${@}); -S_RULE_3= ${S_KCC} ${SFLAGS} -m${ARCH_CONFIG_LC} ${_HOST_AS_FLAGS} -c $(patsubst %.o, %.s, ${@});\ +S_RULE_3= ${S_KCC} ${SFLAGS} ${_HOST_AS_FLAGS} -c $(patsubst %.o, %.s, ${@});\ ${RM} ${_RMFLAGS_} $(patsubst %.o, %.pp, ${@}) $(patsubst %.o,%.s,${@}) # @@ -438,12 +438,12 @@ S_RULE_3= ${S_KCC} ${SFLAGS} -m${ARCH_CONFIG_LC} ${_HOST_AS_FLAGS} -c $(patsubs COMP_COBJ_FILES = $(addprefix $(TARGET)$(COMP_OBJ_DIR), $(COMP_COBJ_LIST)) $(COMP_COBJ_FILES): $(TARGET)$(COMP_OBJ_DIR)%.o : %.c - ${KCC} -c ${CFLAGS} -MD ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} $< + ${KCC} -c ${filter-out ${${join $@,_CFLAGS_RM}}, ${CFLAGS}} -MD ${${join $@,_CFLAGS_ADD}} ${INCFLAGS} ${${join $@,_INCFLAGS}} $< # # Compilation rules to generate .o from .c for normal files # -C_RULE_1A=${KCC} -c ${CFLAGS} -MD ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} +C_RULE_1A=${KCC} -c ${filter-out ${${join $@,_CFLAGS_RM}}, ${CFLAGS}} -MD ${${join $@,_CFLAGS_ADD}} ${INCFLAGS} ${${join $@,_INCFLAGS}} C_RULE_1B=$*.c C_RULE_2= C_RULE_3= @@ -461,7 +461,7 @@ C_RULE_4_D=${C_RULE_4} # # Compilation rules to generate .o from .m # -M_RULE_1A=${KCC} -c ${CFLAGS} -MD ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} +M_RULE_1A=${KCC} -c ${filter-out ${${join $@,_CFLAGS_RM}}, ${CFLAGS}} -MD ${${join $@,_CFLAGS_ADD}} ${INCFLAGS} ${${join $@,_INCFLAGS}} M_RULE_1B=$*.m M_RULE_2= M_RULE_3= @@ -472,7 +472,7 @@ M_RULE_4= # The config tool slickly changes the last source filename char to 'o' # for the object filename. # -P_RULE_1A=${KC++} -o $@ -c ${CPPFLAGS} ${CFLAGS} -MD ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} +P_RULE_1A=${KC++} -o $@ -c ${CXXFLAGS} ${filter-out ${${join $@,_CFLAGS_RM}}, ${CFLAGS}} -MD ${${join $@,_CFLAGS_ADD}} ${INCFLAGS} ${${join $@,_INCFLAGS}} P_RULE_1B=$( $(@:.cpo=.d~) && mv $(@:.cpo=.d~) $(@:.cpo=.d) P_RULE_3= @@ -503,15 +503,19 @@ endif # # mach_kernel building rules # -do_build_mach_kernel: +do_build_mach_kernel: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/kgmacros @echo "[ building mach_kernel ]"; $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/kernel_newvers \ "`${CAT} $(SRCROOT)/osfmk/conf/kernelversion.major`" \ "`${CAT} $(SRCROOT)/osfmk/conf/kernelversion.minor`" \ "`${CAT} $(SRCROOT)/osfmk/conf/kernelversion.variant`"; \ ${KCC} $(CFLAGS) $(INCLUDES) -c kernel_vers.c; \ - $(LD) $(LDFLAGS_KERNEL) $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST), $(addprefix $(component)/$(firstword $($(addsuffix _KERNEL_CONFIG, $(component))) $(KERNEL_CONFIG))/, $(addsuffix .o, $(component))))) kernel_vers.o -o $(TARGET)/mach_kernel.sys $(LD_KERNEL_LIBS); \ + $(LD) $(LDFLAGS_KERNEL) $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST), $(addprefix $(component)/$(firstword $($(addsuffix _KERNEL_CONFIG, $(shell echo -n $(component) | tr a-z A-Z))) $(KERNEL_CONFIG))/, $(addsuffix .o, $(component))))) kernel_vers.o -o $(TARGET)/mach_kernel.sys $(LD_KERNEL_LIBS); \ $(STRIP) $(STRIP_FLAGS) $(TARGET)/mach_kernel.sys -o $(TARGET)/mach_kernel; + +$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/kgmacros: $(SRCROOT)/kgmacros + cp $? $@ + # # Generic Install rules # @@ -521,17 +525,19 @@ force_file_install: $(INSTALL_FILE_FILES): $(DSTROOT)$(INSTALL_FILE_DIR)% : $(TARGET)/% force_file_install @echo Installing $< in $@; - @$(MKDIR) $(DSTROOT)$(INSTALL_FILE_DIR); \ + @if [ ! -e $(DSTROOT)$(INSTALL_FILE_DIR) ]; then \ + $(MKDIR) $(DSTROOT)$(INSTALL_FILE_DIR); \ + fi; \ if [ "`echo $(INSTALL_ARCHS_LC) | wc -w`" -eq 1 ]; then \ $(RM) $(RMFLAGS) $@; \ install $(FILE_INSTALL_FLAGS) $< $(dir $@); \ else \ if [ ! -e $@ ]; then \ - echo >empty_file; \ - lipo_arg="$(subst _empty_file, empty_file,$(foreach lipo_arch,$(INSTALL_ARCHS_LC), $(addprefix -arch , $(addsuffix _empty_file, $(lipo_arch)))))"; \ + echo >empty_file_$(notdir $@); \ + lipo_arg="$(subst _empty_file, empty_file_$(notdir $@),$(foreach lipo_arch,$(INSTALL_ARCHS_LC), $(addprefix -arch , $(addsuffix _empty_file, $(lipo_arch)))))"; \ $(LIPO) $${lipo_arg} -create -output $@; \ - $(RM) $(RMFLAGS) empty_file; \ - fi; \ + $(RM) $(RMFLAGS) empty_file_$(notdir $@); \ + fi; \ $(LIPO) $@ -replace $(ARCH_CONFIG_LC) $< -o $@; \ fi @@ -541,19 +547,22 @@ force_filesys_install: $(INSTALL_FILESYS_FILES): $(SYMROOT)$(INSTALL_FILE_DIR)% : $(TARGET)/%.sys force_filesys_install @echo Installing $< in $@; - @$(MKDIR) $(SYMROOT)$(INSTALL_FILE_DIR); \ + @if [ ! -e $(SYMROOT)$(INSTALL_FILE_DIR) ]; then \ + $(MKDIR) $(SYMROOT)$(INSTALL_FILE_DIR); \ + fi; \ if [ "`echo $(INSTALL_ARCHS_LC) | wc -w`" -eq 1 ]; then \ $(RM) $(RMFLAGS) $@; \ install $(INSTALL_FLAGS) $< $(dir $@); \ else \ if [ ! -e $@ ]; then \ - echo >empty_file; \ - lipo_arg="$(subst _empty_file, empty_file,$(foreach lipo_arch,$(INSTALL_ARCHS_LC), $(addprefix -arch , $(addsuffix _empty_file, $(lipo_arch)))))"; \ + echo >empty_filesys_$(notdir $@); \ + lipo_arg="$(subst _empty_file, empty_filesys_$(notdir $@),$(foreach lipo_arch,$(INSTALL_ARCHS_LC), $(addprefix -arch , $(addsuffix _empty_file, $(lipo_arch)))))"; \ $(LIPO) $${lipo_arg} -create -output $@; \ - $(RM) $(RMFLAGS) empty_file; \ - fi; \ + $(RM) $(RMFLAGS) empty_filesys_$(notdir $@); \ + fi; \ $(LIPO) $@ -replace $(ARCH_CONFIG_LC) $< -o $@; \ fi + cp $(SOURCE)kgmacros $(SYMROOT)$(INSTALL_FILE_DIR) INSTALL_DATA_FILES = $(addprefix $(DSTROOT)$(INSTALL_DATA_DIR), $(INSTALL_DATA_LIST)) diff --git a/osfmk/UserNotification/KUNCUserNotifications.c b/osfmk/UserNotification/KUNCUserNotifications.c index 8007e03b1..b53ff7aa6 100644 --- a/osfmk/UserNotification/KUNCUserNotifications.c +++ b/osfmk/UserNotification/KUNCUserNotifications.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -47,9 +47,6 @@ * DEFINES AND STRUCTURES */ -UNDServerRef gUNDServer = UND_SERVER_NULL; - - struct UNDReply { decl_mutex_data(,lock) /* UNDReply lock */ int userLandNotificationKey; @@ -67,8 +64,10 @@ UNDReply_deallocate( UNDReplyRef reply) { ipc_port_t port; + UNDReply_lock(reply); port = reply->self_port; + assert(IP_VALID(port)); ipc_kobject_set(port, IKO_NULL, IKOT_NONE); reply->self_port = IP_NULL; UNDReply_unlock(reply); @@ -78,6 +77,25 @@ UNDReply_deallocate( return; } +static UNDServerRef +UNDServer_reference(void) +{ + UNDServerRef UNDServer; + kern_return_t kr; + + kr = host_get_user_notification_port(host_priv_self(), &UNDServer); + assert(kr == KERN_SUCCESS); + return UNDServer; +} + +static void +UNDServer_deallocate( + UNDServerRef UNDServer) +{ + if (IP_VALID(UNDServer)) + ipc_port_release_send(UNDServer); +} + /* * UND Mig Callbacks */ @@ -184,7 +202,17 @@ KUNCGetNotificationID() kern_return_t KUNCExecute(char executionPath[1024], int uid, int gid) { - return UNDExecute_rpc(gUNDServer, executionPath, uid, gid); + + UNDServerRef UNDServer; + + UNDServer = UNDServer_reference(); + if (IP_VALID(UNDServer)) { + kern_return_t kr; + kr = UNDExecute_rpc(UNDServer, executionPath, uid, gid); + UNDServer_deallocate(UNDServer); + return kr; + } + return MACH_SEND_INVALID_DEST; } kern_return_t KUNCUserNotificationCancel( @@ -205,9 +233,17 @@ kern_return_t KUNCUserNotificationCancel( reply->inprogress = FALSE; if (ulkey = reply->userLandNotificationKey) { + UNDServerRef UNDServer; + reply->userLandNotificationKey = 0; UNDReply_unlock(reply); - kr = UNDCancelNotification_rpc(gUNDServer,ulkey); + + UNDServer = UNDServer_reference(); + if (IP_VALID(UNDServer)) { + kr = UNDCancelNotification_rpc(UNDServer,ulkey); + UNDServer_deallocate(UNDServer); + } else + kr = MACH_SEND_INVALID_DEST; } else { UNDReply_unlock(reply); kr = KERN_SUCCESS; @@ -227,8 +263,12 @@ KUNCUserNotificationDisplayNotice( char *alertMessage, char *defaultButtonTitle) { - kern_return_t kr; - kr = UNDDisplayNoticeSimple_rpc(gUNDServer, + UNDServerRef UNDServer; + + UNDServer = UNDServer_reference(); + if (IP_VALID(UNDServer)) { + kern_return_t kr; + kr = UNDDisplayNoticeSimple_rpc(UNDServer, timeout, flags, iconPath, @@ -237,7 +277,10 @@ KUNCUserNotificationDisplayNotice( alertHeader, alertMessage, defaultButtonTitle); - return kr; + UNDServer_deallocate(UNDServer); + return kr; + } + return MACH_SEND_INVALID_DEST; } kern_return_t @@ -254,9 +297,12 @@ KUNCUserNotificationDisplayAlert( char *otherButtonTitle, unsigned *responseFlags) { - kern_return_t kr; + UNDServerRef UNDServer; - kr = UNDDisplayAlertSimple_rpc(gUNDServer, + UNDServer = UNDServer_reference(); + if (IP_VALID(UNDServer)) { + kern_return_t kr; + kr = UNDDisplayAlertSimple_rpc(UNDServer, timeout, flags, iconPath, @@ -268,7 +314,10 @@ KUNCUserNotificationDisplayAlert( alternateButtonTitle, otherButtonTitle, responseFlags); - return kr; + UNDServer_deallocate(UNDServer); + return kr; + } + return MACH_SEND_INVALID_DEST; } kern_return_t @@ -283,8 +332,8 @@ KUNCUserNotificationDisplayFromBundle( int contextKey) { UNDReplyRef reply = (UNDReplyRef)id; + UNDServerRef UNDServer; ipc_port_t reply_port; - kern_return_t kr; if (reply == UND_REPLY_NULL) return KERN_INVALID_ARGUMENT; @@ -298,14 +347,21 @@ KUNCUserNotificationDisplayFromBundle( reply_port = ipc_port_make_send(reply->self_port); UNDReply_unlock(reply); - kr = UNDDisplayCustomFromBundle_rpc(gUNDServer, + UNDServer = UNDServer_reference(); + if (IP_VALID(UNDServer)) { + kern_return_t kr; + + kr = UNDDisplayCustomFromBundle_rpc(UNDServer, reply_port, bundlePath, fileName, fileExtension, messageKey, tokenString); - return kr; + UNDServer_deallocate(UNDServer); + return kr; + } + return MACH_SEND_INVALID_DEST; } /* @@ -332,6 +388,7 @@ convert_port_to_UNDReply( reply = (UNDReplyRef) port->ip_kobject; assert(reply != UND_REPLY_NULL); ip_unlock(port); + return reply; } return UND_REPLY_NULL; } @@ -345,13 +402,7 @@ host_set_UNDServer( host_priv_t host_priv, UNDServerRef server) { - - if (host_priv == HOST_PRIV_NULL || server == UND_SERVER_NULL) - return KERN_INVALID_ARGUMENT; - if (gUNDServer != UND_SERVER_NULL) - ipc_port_dealloc_kernel(gUNDServer); - gUNDServer = server; - return KERN_SUCCESS; + return (host_set_user_notification_port(host_priv, server)); } /* @@ -361,11 +412,7 @@ host_set_UNDServer( kern_return_t host_get_UNDServer( host_priv_t host_priv, - UNDServerRef *server) + UNDServerRef *serverp) { - if (host_priv == HOST_PRIV_NULL) - return KERN_INVALID_ARGUMENT; - *server = gUNDServer; - return KERN_SUCCESS; + return (host_get_user_notification_port(host_priv, serverp)); } - diff --git a/osfmk/conf/MASTER b/osfmk/conf/MASTER index 025c0c4af..ba9c83411 100644 --- a/osfmk/conf/MASTER +++ b/osfmk/conf/MASTER @@ -58,7 +58,6 @@ options MACH_KERNEL options MACH_PAGEMAP options MACH_LOAD options MACH_RT -options THREAD_SWAPPER # options TASK_SWAPPER # pseudo-device test_device 1 options ADVISORY_PAGEOUT diff --git a/osfmk/conf/MASTER.i386 b/osfmk/conf/MASTER.i386 index 6afea0041..7ce7d8f0a 100644 --- a/osfmk/conf/MASTER.i386 +++ b/osfmk/conf/MASTER.i386 @@ -6,43 +6,6 @@ # ###################################################################### # -# Master Apple MacOS X configuration file -# (see the master machine independent -# configuration file for a description of the file format). -# -###################################################################### -# -# NeXT (PSEUDO-)DEVICES (select any combination) -# ex = Excelan EXOS 202 Ethernet interface -# ip = Interphase V/SMD 3200 disk controller -# od = Canon OMD-1 Optical Disk -# rd = RAM disk -# sd = SCSI disk -# sg = Generic SCSI Device -# st = SCSI tape -# fd = Floppy Disk -# en = Integrated Ethernet controller -# dsp = DSP560001 digital signal processor -# iplmeas = ipl time measurement -# nextp = NeXT Laser Printer -# sound = sound I/O -# vol = removable volume support device -# venip = virtual Ethernet/IP network interface -# zs = Serial device -# -# MULTIPROCESSOR SUPPORT (select exactly one) -# multi = support 4 processors -# uni = supports single processor -# -# SPECIAL CHARACTERISTICS (select any combination) -# gdb = GNU kernel debugger -# posix_kern = POSIX support -# -# CPU TYPE (select exactly one) -# NeXT = FIXME -# -###################################################################### -# # Standard Apple MacOS X Configurations: # -------- ---- -------- --------------- # @@ -50,13 +13,16 @@ # RELEASE = [intel pc iokit mach_pe mach mach_kdp small event vol hd pst gdb fixpri simple_clock mkernserv uxpr kernstack ipc_compat ipc_debug fb mk30 mk30_i386] # DEBUG_KDP = [intel pc iokit mach_pe mach mach_kdp small event vol hd pst gdb fixpri simple_clock mkernserv uxpr kernstack ipc_compat ipc_debug fb mk30 mk30_i386 osf_debug debug] # DEBUG= [intel pc iokit mach_pe mach mach_kdp small event vol hd pst gdb fixpri simple_clock mkernserv uxpr kernstack ipc_compat ipc_debug fb mk30 mk30_i386 osf_debug debug] +# PROFILE = [ RELEASE profile ] # ###################################################################### # machine "i386" # cpu "i386" # -pseudo-device cpus 2 +pseudo-device cpus 4 +pseudo-device com 2 +pseudo-device vc 1 # choices for platform_bus are pci at386 sqt and kkt makeoptions OSFMK_MACHINE = "i386" # @@ -71,10 +37,13 @@ options SHOW_SPACE # print size of structures # options EVENTMETER # event meter support # options FP_EMUL # floating point emulation # options PC_SUPPORT # virtual PC support # +options PROFILE # kernel profiling # options UXPR # user-level XPR package # options STAT_TIME # time stats config mach_kernel swap generic # +options GPROF # kgmon profiling # + options EVENT # options MACH_BSD @@ -84,8 +53,3 @@ options MACH_PE # # #options DDB # Inline debugger # options MACH_KDP # KDP # -# SMP -options MP_V1_1 - -# FIXME -pseudo-device com 2 diff --git a/osfmk/conf/MASTER.ppc b/osfmk/conf/MASTER.ppc index b2f060952..7511c465e 100644 --- a/osfmk/conf/MASTER.ppc +++ b/osfmk/conf/MASTER.ppc @@ -13,7 +13,7 @@ # RELEASE_TRACE = [ RELEASE kdebug ] # DEBUG = [ RELEASE mach_kdb debug ] # DEBUG_TRACE = [ DEBUG kdebug ] -# PROFILE = [ RELEASE ] +# PROFILE = [ RELEASE profile ] # ###################################################################### # @@ -38,6 +38,8 @@ options MACH_PROF # # # options DEBUG # # +options PROFILE # kernel profiling # + machine "ppc" cpu "ppc" pseudo-device cpus 2 diff --git a/osfmk/conf/Makefile b/osfmk/conf/Makefile index e9eacc5e9..131bd8c56 100644 --- a/osfmk/conf/Makefile +++ b/osfmk/conf/Makefile @@ -18,7 +18,7 @@ ifndef OSFMK_KERNEL_CONFIG export OSFMK_KERNEL_CONFIG = $(KERNEL_CONFIG) endif -COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) +export COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: make build_setup @@ -58,6 +58,7 @@ do_all: do_setup_conf SOURCE=$${next_source} \ TARGET=$(TARGET) \ INCL_MAKEDEP=FALSE \ + KERNEL_CONFIG=$(OSFMK_KERNEL_CONFIG) \ build_all; \ echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(OSFMK_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; diff --git a/osfmk/conf/Makefile.ppc b/osfmk/conf/Makefile.ppc index ac2face46..eca596885 100644 --- a/osfmk/conf/Makefile.ppc +++ b/osfmk/conf/Makefile.ppc @@ -9,7 +9,7 @@ makedis: $(SRCROOT)/osfmk/ddb/makedis.c $(CC) -o $@ $< -ppc_disasm.o_CFLAGS = -Dperror=db_printf -Dexit=db_error -Dmalloc=db_disasm_malloc +ppc_disasm.o_CFLAGS_ADD = -Dperror=db_printf -Dexit=db_error -Dmalloc=db_disasm_malloc ppc_disasm : $(SRCROOT)/osfmk/ppc/ppc_disasm.i makedis ./makedis -w -h ./ppc_disasm.h $(SOURCE_DIR)/osfmk/ppc/ppc_disasm.i > ./ppc_disasm.c diff --git a/osfmk/conf/Makefile.template b/osfmk/conf/Makefile.template index 70ae5e94f..7b918321e 100644 --- a/osfmk/conf/Makefile.template +++ b/osfmk/conf/Makefile.template @@ -92,7 +92,7 @@ LDOBJS = $(OBJS) $(COMPONENT).o: $(LDOBJS) assym.s @echo "[ creating $(COMPONENT).o ]" $(RM) $(RMFLAGS) vers.c - $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + $(COMPOBJROOT)/newvers \ `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c @echo [ updating $(COMPONENT).o ${OSFMK_KERNEL_CONFIG} ] diff --git a/osfmk/conf/files b/osfmk/conf/files index e4041ccdb..f4d913c62 100644 --- a/osfmk/conf/files +++ b/osfmk/conf/files @@ -70,7 +70,6 @@ OPTIONS/mach_tr optional mach_tr OPTIONS/mach_vm_debug optional mach_vm_debug OPTIONS/mach_page_hash_stats optional mach_page_hash_stats OPTIONS/mig_debug optional mig_debug -OPTIONS/hw_footprint optional hw_footprint OPTIONS/simple_clock optional simple_clock OPTIONS/stat_time optional stat_time OPTIONS/time_stamp optional time_stamp @@ -81,7 +80,6 @@ OPTIONS/power_save optional power_save OPTIONS/zone_debug optional zone_debug OPTIONS/vm_cpm optional vm_cpm OPTIONS/task_swapper optional task_swapper -OPTIONS/thread_swapper optional thread_swapper OPTIONS/stack_usage optional stack_usage # Default pager and system pager files, to be moved to separate component @@ -147,6 +145,7 @@ osfmk/kern/exception.c standard osfmk/kern/etap.c standard osfmk/kern/etap_pool.c optional etap osfmk/kern/host.c standard +osfmk/kern/host_notify.c standard osfmk/kern/ipc_clock.c standard osfmk/kern/ipc_host.c standard osfmk/kern/ipc_kobject.c standard @@ -199,6 +198,7 @@ osfmk/kern/bsd_kern.c optional mach_bsd ./mach/ledger_server.c standard ./mach/lock_set_server.c standard ./mach/mach_host_server.c standard +./mach/mach_notify_user.c standard ./mach/mach_port_server.c standard ./mach/memory_object_server.c standard ./mach/memory_object_control_server.c standard @@ -206,11 +206,8 @@ osfmk/kern/bsd_kern.c optional mach_bsd ./mach/memory_object_name_server.c standard ./mach/upl_server.c standard # -# JMM- right now we don't use the MIG-generated client interface -# for notifications, instead we hand create them. We need to switch -# when we can (i.e. when we can get the send-always behavior covered -# even with MIG-generated code). -# ./mach/notify_user.c standard +# For now, no external pagers +# # ./mach/memory_object_user.c standard # ./mach/memory_object_default_user.c standard # diff --git a/osfmk/conf/files.i386 b/osfmk/conf/files.i386 index 70255775e..8b4b58d37 100644 --- a/osfmk/conf/files.i386 +++ b/osfmk/conf/files.i386 @@ -15,7 +15,6 @@ OPTIONS/db_machine_commands optional db_machine_commands OPTIONS/dynamic_num_nodes optional dynamic_num_nodes OPTIONS/vtoc_compat optional vtoc_compat OPTIONS/fddi optional fddi -OPTIONS/mp_v1_1 optional mp_v1_1 @@ -56,15 +55,26 @@ osfmk/i386/start.s standard osfmk/i386/cswitch.s standard osfmk/i386/machine_routines.c standard osfmk/i386/machine_routines_asm.s standard -osfmk/i386/mp_desc.c optional mp_v1_1 +osfmk/i386/mcount.s optional profile +osfmk/i386/mp_desc.c standard osfmk/i386/ntoh.s standard osfmk/i386/pcb.c standard osfmk/i386/phys.c standard osfmk/i386/rtclock.c standard osfmk/i386/trap.c standard osfmk/i386/user_ldt.c standard +osfmk/i386/i386_init.c standard +osfmk/i386/i386_vm_init.c standard osfmk/i386/commpage/commpage.c standard +osfmk/i386/commpage/commpage_mach_absolute_time.s standard +osfmk/i386/commpage/spinlocks.s standard +osfmk/i386/commpage/pthreads.s standard +osfmk/i386/commpage/cacheflush.s standard +osfmk/i386/commpage/commpage_gettimeofday.s standard +osfmk/i386/commpage/bcopy_scalar.s standard +osfmk/i386/commpage/bzero_scalar.s standard +osfmk/i386/commpage/commpage_sigs.s standard osfmk/i386/AT386/autoconf.c standard osfmk/i386/AT386/bbclock.c standard @@ -73,24 +83,30 @@ osfmk/i386/AT386/himem.c optional himem osfmk/i386/AT386/model_dep.c standard osfmk/i386/AT386/physmem.c optional physmem device-driver -osfmk/i386/AT386/mp/mp.c optional mp_v1_1 -osfmk/i386/AT386/mp/mp_v1_1.c optional mp_v1_1 +osfmk/i386/mp.c standard +osfmk/i386/mp_slave_boot.s standard -osfmk/i386/AT386/video_console.c standard +osfmk/console/i386/serial_console.c optional com device-driver + +osfmk/console/i386/kdasm.s optional vc device-driver +osfmk/console/i386/text_console.c optional vc device-driver + +osfmk/console/panic_dialog.c optional vc device-driver +osfmk/console/video_console.c optional vc device-driver +osfmk/console/i386/video_scroll.c optional vc device-driver osfmk/kern/etap_map.c optional etap device-driver -osfmk/profiling/i386/profile-md.c optional gprof profiling-routine -osfmk/profiling/i386/profile-asm.s optional gprof profiling-routine -osfmk/profiling/profile-kgmon.c optional gprof profiling-routine -osfmk/profiling/profile-mk.c optional gprof profiling-routine +osfmk/profiling/i386/profile-md.c optional gprof +osfmk/profiling/i386/profile-asm.s optional gprof +osfmk/profiling/profile-kgmon.c optional gprof +#osfmk/profiling/profile-mk.c optional gprof osfmk/kdp/ml/i386/kdp_machdep.c optional mach_kdp osfmk/kdp/ml/i386/kdp_vm.c optional mach_kdp -# -# Dummy devices for now +# DUMMIES TO FORCE GENERATION OF .h FILES osfmk/OPTIONS/ln optional ln osfmk/OPTIONS/eisa optional eisa osfmk/OPTIONS/himem optional himem diff --git a/osfmk/conf/files.ppc b/osfmk/conf/files.ppc index 57d753237..96fb09479 100644 --- a/osfmk/conf/files.ppc +++ b/osfmk/conf/files.ppc @@ -13,29 +13,29 @@ OPTIONS/mp optional mp # that the file is placed at the front of the line -osfmk/ddb/db_aout.c optional mach_kdb -./ppc_disasm.c optional mach_kdb -osfmk/ppc/db_disasm.c optional mach_kdb -osfmk/ppc/db_asm.s optional mach_kdb +osfmk/ddb/db_aout.c optional mach_kdb +./ppc_disasm.c optional mach_kdb +osfmk/ppc/db_disasm.c optional mach_kdb osfmk/ppc/db_interface.c optional mach_kdb osfmk/ppc/db_trace.c optional mach_kdb osfmk/ppc/db_low_trace.c optional mach_kdb +osfmk/ppc/bcopytest.c optional mach_kdb osfmk/ppc/lowmem_vectors.s standard osfmk/ppc/start.s standard osfmk/ppc/_setjmp.s standard +osfmk/ppc/mcount.s optional profile -osfmk/ppc/cpu.c standard +osfmk/ppc/cpu.c standard osfmk/ppc/ppc_init.c standard osfmk/ppc/ppc_vm_init.c standard -osfmk/ppc/bat_init.c standard osfmk/ppc/model_dep.c standard -osfmk/ppc/mem.c standard osfmk/ppc/pmap.c standard osfmk/ppc/mappings.c standard osfmk/ppc/savearea.c standard osfmk/ppc/savearea_asm.s standard osfmk/ppc/hw_vm.s standard +osfmk/ppc/skiplists.s standard osfmk/ppc/hw_lock.s standard osfmk/ppc/misc_asm.s standard osfmk/ppc/status.c standard @@ -55,42 +55,55 @@ osfmk/ppc/misc.c standard osfmk/ppc/interrupt.c standard osfmk/ppc/machine_routines.c standard osfmk/ppc/machine_routines_asm.s standard -#osfmk/ppc/Performance.s standard osfmk/ppc/Emulate.s standard +osfmk/ppc/Emulate64.s standard osfmk/ppc/AltiAssist.s standard osfmk/ppc/conf.c standard osfmk/ppc/rtclock.c standard osfmk/ppc/Diagnostics.c standard osfmk/ppc/PPCcalls.c standard osfmk/ppc/vmachmon.c standard -osfmk/ppc/vmachmon_asm.s standard +osfmk/ppc/vmachmon_asm.s standard -#osfmk/ppc/POWERMAC/ser16550.c standard -osfmk/ppc/POWERMAC/autoconf.c optional xxx -osfmk/ppc/POWERMAC/pci.c optional xxx -osfmk/ppc/POWERMAC/pci_probe.c optional xxx -osfmk/ppc/POWERMAC/pci_compat.c optional xxx +osfmk/ppc/Firmware.s standard +osfmk/ppc/FirmwareC.c standard -osfmk/ppc/Firmware.s standard -osfmk/ppc/FirmwareC.c standard +osfmk/ppc/aligned_data.s standard -osfmk/ppc/aligned_data.s standard +osfmk/ppc/hw_perfmon.c standard osfmk/ppc/commpage/commpage.c standard osfmk/ppc/commpage/commpage_asm.s standard +osfmk/ppc/commpage/bcopy_g3.s standard +osfmk/ppc/commpage/bcopy_g4.s standard +osfmk/ppc/commpage/bcopy_970.s standard +osfmk/ppc/commpage/bcopy_64.s standard +osfmk/ppc/commpage/bzero_32.s standard +osfmk/ppc/commpage/bzero_128.s standard +osfmk/ppc/commpage/cacheflush.s standard +osfmk/ppc/commpage/gettimeofday.s standard +osfmk/ppc/commpage/mach_absolute_time.s standard +osfmk/ppc/commpage/pthread.s standard +osfmk/ppc/commpage/spinlocks.s standard +osfmk/ppc/commpage/bigcopy_970.s standard + +osfmk/ppc/chud/chud_osfmk_callback.c standard +osfmk/ppc/chud/chud_cpu.c standard +osfmk/ppc/chud/chud_cpu_asm.s standard +osfmk/ppc/chud/chud_memory.c standard +osfmk/ppc/chud/chud_thread.c standard +osfmk/ppc/chud/chud_glue.c standard osfmk/kdp/ml/ppc/kdp_machdep.c optional mach_kdp osfmk/kdp/ml/ppc/kdp_vm.c optional mach_kdp osfmk/kdp/ml/ppc/kdp_misc.s optional mach_kdp -#osfmk/kdp/pe/POWERMAC/kdp_mace.c optional mach_kdp - -osfmk/ppc/serial_console.c optional scc device-driver -osfmk/ppc/POWERMAC/serial_io.c optional scc device-driver -osfmk/ppc/POWERMAC/video_console.c optional vc device-driver -osfmk/ppc/POWERMAC/video_scroll.s optional vc device-driver +osfmk/console/ppc/serial_console.c optional scc device-driver +osfmk/ppc/serial_io.c optional scc device-driver -osfmk/ppc/POWERMAC/dbdma.c standard +osfmk/console/panic_dialog.c optional vc device-driver +osfmk/console/video_console.c optional vc device-driver +osfmk/console/ppc/video_scroll.s optional vc device-driver # DUMMIES TO FORCE GENERATION OF .h FILES OPTIONS/hi_res_clock optional hi_res_clock diff --git a/osfmk/conf/kernelversion.major b/osfmk/conf/kernelversion.major index 1e8b31496..7f8f011eb 100644 --- a/osfmk/conf/kernelversion.major +++ b/osfmk/conf/kernelversion.major @@ -1 +1 @@ -6 +7 diff --git a/osfmk/conf/kernelversion.minor b/osfmk/conf/kernelversion.minor index 45a4fb75d..573541ac9 100644 --- a/osfmk/conf/kernelversion.minor +++ b/osfmk/conf/kernelversion.minor @@ -1 +1 @@ -8 +0 diff --git a/osfmk/conf/kernelversion.variant b/osfmk/conf/kernelversion.variant index e69de29bb..573541ac9 100644 --- a/osfmk/conf/kernelversion.variant +++ b/osfmk/conf/kernelversion.variant @@ -0,0 +1 @@ +0 diff --git a/osfmk/conf/version.major b/osfmk/conf/version.major index 1e8b31496..7f8f011eb 100644 --- a/osfmk/conf/version.major +++ b/osfmk/conf/version.major @@ -1 +1 @@ -6 +7 diff --git a/osfmk/conf/version.minor b/osfmk/conf/version.minor index 45a4fb75d..573541ac9 100644 --- a/osfmk/conf/version.minor +++ b/osfmk/conf/version.minor @@ -1 +1 @@ -8 +0 diff --git a/osfmk/conf/version.variant b/osfmk/conf/version.variant index 8b1378917..573541ac9 100644 --- a/osfmk/conf/version.variant +++ b/osfmk/conf/version.variant @@ -1 +1 @@ - +0 diff --git a/pexpert/i386/kdasm.s b/osfmk/console/i386/kdasm.s similarity index 100% rename from pexpert/i386/kdasm.s rename to osfmk/console/i386/kdasm.s diff --git a/osfmk/console/i386/serial_console.c b/osfmk/console/i386/serial_console.c new file mode 100644 index 000000000..d3b53bc43 --- /dev/null +++ b/osfmk/console/i386/serial_console.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +void +cnputc(char c) +{ + boolean_t nolock = mp_kdp_trap || !ml_get_interrupts_enabled(); + + /* + * Note: this lock prevents other cpus interferring with the + * output is this one character to the console (screen). It + * does not prevent multiple printfs being interleaved - that's + * the responsibility of the caller. Without this lock, + * an unreadable black-on-black or white-on-white display may result. + * We avoid taking this lock, however, if we're in the debugger or + * at interrupt level. + */ + if (!nolock) + simple_lock(&mp_putc_lock); + vcputc(0, 0, c); + if (c == '\n') + vcputc(0, 0,'\r'); + if (!nolock) + simple_unlock(&mp_putc_lock); +} diff --git a/pexpert/i386/text_console.c b/osfmk/console/i386/text_console.c similarity index 90% rename from pexpert/i386/text_console.c rename to osfmk/console/i386/text_console.c index bbad4ebd6..37154d4f7 100644 --- a/pexpert/i386/text_console.c +++ b/osfmk/console/i386/text_console.c @@ -29,10 +29,8 @@ * VGA text console support. */ -#include -#include -#include -#include "video_console.h" +#include +#include /* * Macros and typedefs. @@ -80,9 +78,9 @@ static char * vram_start = 0; /* VM start of VGA frame buffer */ /* * Functions in kdasm.s. */ -extern void kd_slmwd(u_char * pos, int count, u_short val); -extern void kd_slmscu(u_char * from, u_char * to, int count); -extern void kd_slmscd(u_char * from, u_char * to, int count); +extern void kd_slmwd(unsigned char * pos, int count, unsigned short val); +extern void kd_slmscu(unsigned char * from, unsigned char * to, int count); +extern void kd_slmscd(unsigned char * from, unsigned char * to, int count); /* * move_up @@ -138,10 +136,10 @@ set_cursor_position( csrpos_t newpos ) curpos = newpos / ONE_SPACE; outb(vga_idx_reg, VGA_C_HIGH); - outb(vga_io_reg, (u_char)(curpos >> 8)); + outb(vga_io_reg, (unsigned char)(curpos >> 8)); outb(vga_idx_reg, VGA_C_LOW); - outb(vga_io_reg, (u_char)(curpos & 0xff)); + outb(vga_io_reg, (unsigned char)(curpos & 0xff)); } /* @@ -178,12 +176,12 @@ vga_init(int cols, int rows, unsigned char * addr) } /* - * tc_scrollup + * tc_scroll_up * * Scroll the screen up 'n' character lines. */ void -tc_scrollup( int lines ) +tc_scroll_up( int lines, int top, int bottom ) { csrpos_t to; csrpos_t from; @@ -202,12 +200,12 @@ tc_scrollup( int lines ) } /* - * tc_scrolldown + * tc_scroll_down * * Scrolls the screen down 'n' character lines. */ void -tc_scrolldown( int lines ) +tc_scroll_down( int lines, int top, int bottom ) { csrpos_t to; csrpos_t from; @@ -309,7 +307,7 @@ tc_hide_cursor( int x, int y ) * relative to the current cursor position. */ void -tc_clear_screen(int x, int y, int operation) +tc_clear_screen(int x, int y, int top, int bottom, int operation) { csrpos_t start; int count; @@ -334,13 +332,13 @@ tc_clear_screen(int x, int y, int operation) } /* - * tc_putchar + * tc_paint_char * * Display a character on screen with the given coordinates, * and attributes. */ void -tc_putchar( unsigned char ch, int x, int y, int attrs ) +tc_paint_char( int x, int y, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous ) { char my_attr = vga_attr; @@ -349,6 +347,17 @@ tc_putchar( unsigned char ch, int x, int y, int attrs ) display_char( XY_TO_CSRPOS(x, y), ch, vga_attr ); } +/* + * tc_enable + * + * Enable / disable the console. + */ +void +tc_enable(boolean_t enable) +{ + +} + /* * tc_initialize * diff --git a/osfmk/ppc/POWERMAC/mp/mp.h b/osfmk/console/i386/text_console.h similarity index 64% rename from osfmk/ppc/POWERMAC/mp/mp.h rename to osfmk/console/i386/text_console.h index d5b02d4a4..c44122a6d 100644 --- a/osfmk/ppc/POWERMAC/mp/mp.h +++ b/osfmk/console/i386/text_console.h @@ -22,29 +22,20 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* - * @OSF_COPYRIGHT@ - */ - -#ifndef _PPC_POWERMAC_MP_MP_H_ -#define _PPC_POWERMAC_MP_MP_H_ - -#include - -#if NCPUS > 1 -#ifndef ASSEMBLER -#include -extern int real_ncpus; /* real number of cpus */ -extern int wncpu; /* wanted number of cpus */ -decl_simple_lock_data(extern, debugger_lock) /* debugger lock */ +#ifndef _TEXT_CONSOLE_H_ +#define _TEXT_CONSOLE_H_ -extern int debugger_cpu; /* current cpu running debugger */ -extern int debugger_debug; -extern int debugger_is_slave[]; -extern int debugger_active[]; -#endif /* ASSEMBLER */ +#define TEXT_MODE 0 -#endif /* NCPUS > 1 */ +void tc_paint_char(int x, int y, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous); +void tc_scroll_down(int lines, int top, int bottom); +void tc_scroll_up(int lines, int top, int bottom); +void tc_clear_screen(int x, int y, int top, int bottom, int operation); +void tc_show_cursor(int x, int y); +void tc_hide_cursor(int x, int y); +void tc_enable(boolean_t enable); +void tc_initialize(struct vc_info * vinfo_p); +void tc_update_color(int color, int fore); -#endif /* _PPC_POWERMAC_MP_MP_H_ */ +#endif /* !_TEXT_CONSOLE_H_ */ diff --git a/osfmk/i386/AT386/mp/mp_events.h b/osfmk/console/i386/video_scroll.c similarity index 67% rename from osfmk/i386/AT386/mp/mp_events.h rename to osfmk/console/i386/video_scroll.c index 4fa8081c6..542976ad5 100644 --- a/osfmk/i386/AT386/mp/mp_events.h +++ b/osfmk/console/i386/video_scroll.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,22 +22,17 @@ * * @APPLE_LICENSE_HEADER_END@ */ -#ifndef __AT386_MP_EVENTS__ -#define __AT386_MP_EVENTS__ -/* Interrupt types */ +void video_scroll_up(unsigned long start, + unsigned long end, + unsigned long dest) +{ + bcopy(start, dest, (end - start) << 2); +} -#define MP_TLB_FLUSH 0x00 -#define MP_CLOCK 0x01 -#define MP_KDB 0x02 -#define MP_AST 0x03 -#define MP_SOFTCLOCK 0x04 -#define MP_INT_AVAIL 0x05 -#define MP_AST_URGENT 0x06 -#define MP_TLB_RELOAD 0x07 - -#ifndef ASSEMBLER -extern void i386_signal_cpus(int event); -#endif - -#endif +void video_scroll_down(unsigned long start, /* HIGH addr */ + unsigned long end, /* LOW addr */ + unsigned long dest) /* HIGH addr */ +{ + bcopy(end, dest, (start - end) << 2); +} diff --git a/osfmk/ppc/iso_font.h b/osfmk/console/iso_font.c similarity index 99% rename from osfmk/ppc/iso_font.h rename to osfmk/console/iso_font.c index 26a96c403..a3ffb5fda 100644 --- a/osfmk/ppc/iso_font.h +++ b/osfmk/console/iso_font.c @@ -302,4 +302,5 @@ unsigned char iso_font[256*16] = { #define ISO_CHAR_MIN 0x00 #define ISO_CHAR_MAX 0xFF +#define ISO_CHAR_WIDTH 8 #define ISO_CHAR_HEIGHT 16 diff --git a/osfmk/console/panic_dialog.c b/osfmk/console/panic_dialog.c new file mode 100644 index 000000000..18c98a5c7 --- /dev/null +++ b/osfmk/console/panic_dialog.c @@ -0,0 +1,631 @@ +/* + * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#include +#include +#include + +#include "panic_image.c" +#include "rendered_numbers.c" + +extern struct vc_info vinfo; +extern boolean_t panicDialogDesired; + +/* panic image clut */ +static const unsigned char *clut = NULL; +extern void panic_ui_initialize(const unsigned char * system_clut); + +/* We use this standard MacOS clut as a fallback */ +static const unsigned char appleClut8[ 256 * 3 ] = { +// 00 + 0xFF,0xFF,0xFF, 0xFF,0xFF,0xCC, 0xFF,0xFF,0x99, 0xFF,0xFF,0x66, + 0xFF,0xFF,0x33, 0xFF,0xFF,0x00, 0xFF,0xCC,0xFF, 0xFF,0xCC,0xCC, + 0xFF,0xCC,0x99, 0xFF,0xCC,0x66, 0xFF,0xCC,0x33, 0xFF,0xCC,0x00, + 0xFF,0x99,0xFF, 0xFF,0x99,0xCC, 0xFF,0x99,0x99, 0xFF,0x99,0x66, +// 10 + 0xFF,0x99,0x33, 0xFF,0x99,0x00, 0xFF,0x66,0xFF, 0xFF,0x66,0xCC, + 0xFF,0x66,0x99, 0xFF,0x66,0x66, 0xFF,0x66,0x33, 0xFF,0x66,0x00, + 0xFF,0x33,0xFF, 0xFF,0x33,0xCC, 0xFF,0x33,0x99, 0xFF,0x33,0x66, + 0xFF,0x33,0x33, 0xFF,0x33,0x00, 0xFF,0x00,0xFF, 0xFF,0x00,0xCC, +// 20 + 0xFF,0x00,0x99, 0xFF,0x00,0x66, 0xFF,0x00,0x33, 0xFF,0x00,0x00, + 0xCC,0xFF,0xFF, 0xCC,0xFF,0xCC, 0xCC,0xFF,0x99, 0xCC,0xFF,0x66, + 0xCC,0xFF,0x33, 0xCC,0xFF,0x00, 0xCC,0xCC,0xFF, 0xCC,0xCC,0xCC, + 0xCC,0xCC,0x99, 0xCC,0xCC,0x66, 0xCC,0xCC,0x33, 0xCC,0xCC,0x00, +// 30 + 0xCC,0x99,0xFF, 0xCC,0x99,0xCC, 0xCC,0x99,0x99, 0xCC,0x99,0x66, + 0xCC,0x99,0x33, 0xCC,0x99,0x00, 0xCC,0x66,0xFF, 0xCC,0x66,0xCC, + 0xCC,0x66,0x99, 0xCC,0x66,0x66, 0xCC,0x66,0x33, 0xCC,0x66,0x00, + 0xCC,0x33,0xFF, 0xCC,0x33,0xCC, 0xCC,0x33,0x99, 0xCC,0x33,0x66, +// 40 + 0xCC,0x33,0x33, 0xCC,0x33,0x00, 0xCC,0x00,0xFF, 0xCC,0x00,0xCC, + 0xCC,0x00,0x99, 0xCC,0x00,0x66, 0xCC,0x00,0x33, 0xCC,0x00,0x00, + 0x99,0xFF,0xFF, 0x99,0xFF,0xCC, 0x99,0xFF,0x99, 0x99,0xFF,0x66, + 0x99,0xFF,0x33, 0x99,0xFF,0x00, 0x99,0xCC,0xFF, 0x99,0xCC,0xCC, +// 50 + 0x99,0xCC,0x99, 0x99,0xCC,0x66, 0x99,0xCC,0x33, 0x99,0xCC,0x00, + 0x99,0x99,0xFF, 0x99,0x99,0xCC, 0x99,0x99,0x99, 0x99,0x99,0x66, + 0x99,0x99,0x33, 0x99,0x99,0x00, 0x99,0x66,0xFF, 0x99,0x66,0xCC, + 0x99,0x66,0x99, 0x99,0x66,0x66, 0x99,0x66,0x33, 0x99,0x66,0x00, +// 60 + 0x99,0x33,0xFF, 0x99,0x33,0xCC, 0x99,0x33,0x99, 0x99,0x33,0x66, + 0x99,0x33,0x33, 0x99,0x33,0x00, 0x99,0x00,0xFF, 0x99,0x00,0xCC, + 0x99,0x00,0x99, 0x99,0x00,0x66, 0x99,0x00,0x33, 0x99,0x00,0x00, + 0x66,0xFF,0xFF, 0x66,0xFF,0xCC, 0x66,0xFF,0x99, 0x66,0xFF,0x66, +// 70 + 0x66,0xFF,0x33, 0x66,0xFF,0x00, 0x66,0xCC,0xFF, 0x66,0xCC,0xCC, + 0x66,0xCC,0x99, 0x66,0xCC,0x66, 0x66,0xCC,0x33, 0x66,0xCC,0x00, + 0x66,0x99,0xFF, 0x66,0x99,0xCC, 0x66,0x99,0x99, 0x66,0x99,0x66, + 0x66,0x99,0x33, 0x66,0x99,0x00, 0x66,0x66,0xFF, 0x66,0x66,0xCC, +// 80 + 0x66,0x66,0x99, 0x66,0x66,0x66, 0x66,0x66,0x33, 0x66,0x66,0x00, + 0x66,0x33,0xFF, 0x66,0x33,0xCC, 0x66,0x33,0x99, 0x66,0x33,0x66, + 0x66,0x33,0x33, 0x66,0x33,0x00, 0x66,0x00,0xFF, 0x66,0x00,0xCC, + 0x66,0x00,0x99, 0x66,0x00,0x66, 0x66,0x00,0x33, 0x66,0x00,0x00, +// 90 + 0x33,0xFF,0xFF, 0x33,0xFF,0xCC, 0x33,0xFF,0x99, 0x33,0xFF,0x66, + 0x33,0xFF,0x33, 0x33,0xFF,0x00, 0x33,0xCC,0xFF, 0x33,0xCC,0xCC, + 0x33,0xCC,0x99, 0x33,0xCC,0x66, 0x33,0xCC,0x33, 0x33,0xCC,0x00, + 0x33,0x99,0xFF, 0x33,0x99,0xCC, 0x33,0x99,0x99, 0x33,0x99,0x66, +// a0 + 0x33,0x99,0x33, 0x33,0x99,0x00, 0x33,0x66,0xFF, 0x33,0x66,0xCC, + 0x33,0x66,0x99, 0x33,0x66,0x66, 0x33,0x66,0x33, 0x33,0x66,0x00, + 0x33,0x33,0xFF, 0x33,0x33,0xCC, 0x33,0x33,0x99, 0x33,0x33,0x66, + 0x33,0x33,0x33, 0x33,0x33,0x00, 0x33,0x00,0xFF, 0x33,0x00,0xCC, +// b0 + 0x33,0x00,0x99, 0x33,0x00,0x66, 0x33,0x00,0x33, 0x33,0x00,0x00, + 0x00,0xFF,0xFF, 0x00,0xFF,0xCC, 0x00,0xFF,0x99, 0x00,0xFF,0x66, + 0x00,0xFF,0x33, 0x00,0xFF,0x00, 0x00,0xCC,0xFF, 0x00,0xCC,0xCC, + 0x00,0xCC,0x99, 0x00,0xCC,0x66, 0x00,0xCC,0x33, 0x00,0xCC,0x00, +// c0 + 0x00,0x99,0xFF, 0x00,0x99,0xCC, 0x00,0x99,0x99, 0x00,0x99,0x66, + 0x00,0x99,0x33, 0x00,0x99,0x00, 0x00,0x66,0xFF, 0x00,0x66,0xCC, + 0x00,0x66,0x99, 0x00,0x66,0x66, 0x00,0x66,0x33, 0x00,0x66,0x00, + 0x00,0x33,0xFF, 0x00,0x33,0xCC, 0x00,0x33,0x99, 0x00,0x33,0x66, +// d0 + 0x00,0x33,0x33, 0x00,0x33,0x00, 0x00,0x00,0xFF, 0x00,0x00,0xCC, + 0x00,0x00,0x99, 0x00,0x00,0x66, 0x00,0x00,0x33, 0xEE,0x00,0x00, + 0xDD,0x00,0x00, 0xBB,0x00,0x00, 0xAA,0x00,0x00, 0x88,0x00,0x00, + 0x77,0x00,0x00, 0x55,0x00,0x00, 0x44,0x00,0x00, 0x22,0x00,0x00, +// e0 + 0x11,0x00,0x00, 0x00,0xEE,0x00, 0x00,0xDD,0x00, 0x00,0xBB,0x00, + 0x00,0xAA,0x00, 0x00,0x88,0x00, 0x00,0x77,0x00, 0x00,0x55,0x00, + 0x00,0x44,0x00, 0x00,0x22,0x00, 0x00,0x11,0x00, 0x00,0x00,0xEE, + 0x00,0x00,0xDD, 0x00,0x00,0xBB, 0x00,0x00,0xAA, 0x00,0x00,0x88, +// f0 + 0x00,0x00,0x77, 0x00,0x00,0x55, 0x00,0x00,0x44, 0x00,0x00,0x22, + 0x00,0x00,0x11, 0xEE,0xEE,0xEE, 0xDD,0xDD,0xDD, 0xBB,0xBB,0xBB, + 0xAA,0xAA,0xAA, 0x88,0x88,0x88, 0x77,0x77,0x77, 0x55,0x55,0x55, + 0x44,0x44,0x44, 0x22,0x22,0x22, 0x11,0x11,0x11, 0x00,0x00,0x00 +}; + + +/* panic dialog and info saving */ +static int mac_addr_digit_x; +static int mac_addr_digit_y; +static void blit_digit( int digit ); +static boolean_t panicDialogDrawn = FALSE; + +static void +panic_blit_rect( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ); + +static void +panic_blit_rect_8( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ); + +static void +panic_blit_rect_16( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ); + +static void +panic_blit_rect_32( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ); + +static void +dim_screen(void); + +static void +dim_screen_16(void); + +static void +dim_screen_32(void); + +static int +decode_rle( unsigned char * dataPtr, unsigned int * quantity, unsigned int * value ); + +void +panic_ui_initialize(const unsigned char * system_clut) +{ + clut = system_clut; +} + +void +draw_panic_dialog( void ) +{ + int pd_x,pd_y, iconx, icony, tx_line, tx_col; + int line_width = 1; + int f1, f2, d1, d2, d3, rem; + char *pair = "ff"; + int count = 0; + char digit; + int nibble; + char colon = ':'; + char dot = '.'; + struct ether_addr kdp_mac_addr = kdp_get_mac_addr(); + unsigned int ip_addr = (unsigned int) ntohl(kdp_get_ip_address()); + + if (!panicDialogDrawn && panicDialogDesired) + { + if ( !logPanicDataToScreen ) + { + + /* dim the screen 50% before putting up panic dialog */ + dim_screen(); + + /* set up to draw background box */ + pd_x = (vinfo.v_width/2) - panic_dialog.pd_width/2; + pd_y = (vinfo.v_height/2) - panic_dialog.pd_height/2; + + /* draw image */ + panic_blit_rect( pd_x, pd_y, panic_dialog.pd_width, panic_dialog.pd_height, 0, (unsigned char*) panic_dialog.image_pixel_data); + + /* do not display the mac and ip addresses if the machine isn't attachable. */ + /* there's no sense in possibly confusing people. */ + if (panicDebugging) + { + + /* offset for mac address text */ + mac_addr_digit_x = (vinfo.v_width/2) - 130; /* use 62 if no ip */ + mac_addr_digit_y = (vinfo.v_height/2) + panic_dialog.pd_height/2 - 20; + + if(kdp_mac_addr.ether_addr_octet[0] || kdp_mac_addr.ether_addr_octet[1]|| kdp_mac_addr.ether_addr_octet[2] + || kdp_mac_addr.ether_addr_octet[3] || kdp_mac_addr.ether_addr_octet[4] || kdp_mac_addr.ether_addr_octet[5]) + { + /* blit the digits for mac address */ + for (count = 0; count < 6; count++ ) + { + nibble = (kdp_mac_addr.ether_addr_octet[count] & 0xf0) >> 4; + digit = nibble < 10 ? nibble + '0':nibble - 10 + 'a'; + blit_digit(digit); + + nibble = kdp_mac_addr.ether_addr_octet[count] & 0xf; + digit = nibble < 10 ? nibble + '0':nibble - 10 + 'a'; + blit_digit(digit); + if( count < 5 ) + blit_digit( colon ); + } + } + else /* blit the ff's */ + { + for( count = 0; count < 6; count++ ) + { + digit = pair[0]; + blit_digit(digit); + digit = pair[1]; + blit_digit(digit); + if( count < 5 ) + blit_digit( colon ); + } + } + /* now print the ip address */ + mac_addr_digit_x = (vinfo.v_width/2) + 10; + if(ip_addr != 0) + { + /* blit the digits for ip address */ + for (count = 0; count < 4; count++ ) + { + nibble = (ip_addr & 0xff000000 ) >> 24; + + d3 = (nibble % 0xa) + '0'; + nibble = nibble/0xa; + d2 = (nibble % 0xa) + '0'; + nibble = nibble /0xa; + d1 = (nibble % 0xa) + '0'; + + if( d1 != '0' ) blit_digit(d1); + blit_digit(d2); + blit_digit(d3); + if( count < 3 ) + blit_digit(dot); + + d1= d2 = d3 = 0; + ip_addr = ip_addr << 8; + } + } + } + } + } + panicDialogDrawn = TRUE; + panicDialogDesired = FALSE; + +} + +static void +blit_digit( int digit ) +{ + switch( digit ) + { + case '0': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_0.num_w, num_0.num_h, 255, (unsigned char*) num_0.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_0.num_w - 1; + break; + } + case '1': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_1.num_w, num_1.num_h, 255, (unsigned char*) num_1.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_1.num_w ; + break; + } + case '2': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_2.num_w, num_2.num_h, 255, (unsigned char*) num_2.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_2.num_w ; + break; + } + case '3': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_3.num_w, num_3.num_h, 255, (unsigned char*) num_3.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_3.num_w ; + break; + } + case '4': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_4.num_w, num_4.num_h, 255, (unsigned char*) num_4.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_4.num_w ; + break; + } + case '5': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_5.num_w, num_5.num_h, 255, (unsigned char*) num_5.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_5.num_w ; + break; + } + case '6': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_6.num_w, num_6.num_h, 255, (unsigned char*) num_6.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_6.num_w ; + break; + } + case '7': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_7.num_w, num_7.num_h, 255, (unsigned char*) num_7.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_7.num_w ; + break; + } + case '8': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_8.num_w, num_8.num_h, 255, (unsigned char*) num_8.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_8.num_w ; + break; + } + case '9': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_9.num_w, num_9.num_h, 255, (unsigned char*) num_9.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_9.num_w ; + break; + } + case 'a': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_a.num_w, num_a.num_h, 255, (unsigned char*) num_a.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_a.num_w ; + break; + } + case 'b': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_b.num_w, num_b.num_h, 255, (unsigned char*) num_b.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_b.num_w ; + break; + } + case 'c': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_c.num_w, num_c.num_h, 255, (unsigned char*) num_c.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_c.num_w ; + break; + } + case 'd': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_d.num_w, num_d.num_h, 255, (unsigned char*) num_d.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_d.num_w ; + break; + } + case 'e': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_e.num_w, num_e.num_h, 255, (unsigned char*) num_e.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_e.num_w ; + break; + } + case 'f': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_f.num_w, num_f.num_h, 255, (unsigned char*) num_f.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_f.num_w ; + break; + } + case ':': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_colon.num_w, num_colon.num_h, 255, (unsigned char*) num_colon.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_colon.num_w; + break; + } + case '.': { + panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y + (num_colon.num_h/2), num_colon.num_w, num_colon.num_h/2, 255, (unsigned char*) num_colon.num_pixel_data); + mac_addr_digit_x = mac_addr_digit_x + num_colon.num_w; + break; + } + default: + break; + + } +} + +static void +panic_blit_rect( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ) +{ + if(!vinfo.v_depth) + return; + + switch( vinfo.v_depth) { + case 8: + panic_blit_rect_8( x, y, width, height, transparent, dataPtr); + break; + case 16: + panic_blit_rect_16( x, y, width, height, transparent, dataPtr); + break; + case 32: + panic_blit_rect_32( x, y, width, height, transparent, dataPtr); + break; + } +} + +/* panic_blit_rect_8 uses the built in clut for drawing. + +*/ +static void +panic_blit_rect_8( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned char * dst; + int line, col; + unsigned int data, quantity, value; + + dst = (volatile unsigned char *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + x); + + quantity = 0; + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + if (quantity == 0) { + dataPtr += decode_rle(dataPtr, &quantity, &value); + } + + data = value; + *(dst + col) = data; + quantity--; + } + + dst = (volatile unsigned char *) (((int)dst) + vinfo.v_rowbytes); + } +} + +/* panic_blit_rect_16 draws using a clut. + + panic_blit_rect_16 decodes the RLE encoded image data on the fly, looks up the + color by indexing into the clut, uses the top 5 bits to fill in each of the three + pixel values (RGB) and writes each pixel to the screen. +*/ + static void + panic_blit_rect_16( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ) + { + volatile unsigned short * dst; + int line, col; + unsigned int quantity, index, value, data; + + /* If our clut has disappeared, use the standard MacOS 8-bit clut */ + if(!clut) { + clut = appleClut8; + } + + dst = (volatile unsigned short *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 2)); + + quantity = 0; + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + + if (quantity == 0) { + dataPtr += decode_rle(dataPtr, &quantity, &value); + index = value * 3; + } + + data = ( (unsigned short) (0xf8 & (clut[index + 0])) << 7) + | ( (unsigned short) (0xf8 & (clut[index + 1])) << 2) + | ( (unsigned short) (0xf8 & (clut[index + 2])) >> 3); + + *(dst + col) = data; + quantity--; + } + + dst = (volatile unsigned short *) (((int)dst) + vinfo.v_rowbytes); + } + + } + + /* + panic_blit_rect_32 decodes the RLE encoded image data on the fly, and fills + in each of the three pixel values from the clut (RGB) for each pixel and + writes it to the screen. + */ + static void + panic_blit_rect_32( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ) + { + volatile unsigned int * dst; + int line, col; + unsigned int value, quantity, index, data; + + + /* If our clut has disappeared, use the standard MacOS 8-bit clut */ + if(!clut) { + clut = appleClut8; + } + + dst = (volatile unsigned int *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 4)); + + quantity = 0; + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + if (quantity == 0) { + dataPtr += decode_rle(dataPtr, &quantity, &value); + index = value * 3; + } + + data = ( (unsigned int) clut[index + 0] << 16) + | ( (unsigned int) clut[index + 1] << 8) + | ( (unsigned int) clut[index + 2]); + + *(dst + col) = data; + quantity--; + } + + dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); + } +} + +/* + decode_rle decodes a single quantity/value pair of a "modified-RLE" encoded + image. The encoding works as follows: + + The quantity and value will be described by either two or three bytes. If the + most significant bit of the first byte is a 0, then the next seven bits are + the quantity (run-length) and the following 8 bits are the value (index into + a clut, in this case). If the msb of the first byte is a 1, then the next 15 bits + are the quantity and the following 8 are the value. Visually, the two possible + encodings are: (q = quantity, v = value) + + Byte 1 Byte 2 Byte 3 + case 1: [ 0 q6 q5 q4 q3 q2 q1 q0 ] [ v7 v6 v5 v4 v3 v2 v1 v0 ] [ ] + case 2: [ 1 q14 q13 q12 a11 q10 q9 q8 ] [ q7 q6 q5 q4 q3 q2 q1 q0 ] [ v7 v6 v5 v4 v3 v2 v1 v0 ] +*/ +static int +decode_rle( unsigned char * dataPtr, unsigned int * quantity, unsigned int * value ) +{ + unsigned char byte1 = *dataPtr++; + unsigned char byte2 = *dataPtr++; + int num_slots = 0; + + /* if the most-significant bit is 0, then the first byte is quanity, the second is value */ + if ((byte1 >> 7) == 0) { + *quantity = (unsigned int) byte1; + *value = (unsigned int) byte2; + num_slots = 2; + } else { + /* clear the leading 1 */ + byte1 ^= 0x80; + + /* the first two bytes are the quantity, the third is value */ + *quantity = (unsigned int) byte1 << 8 | byte2; + *value = *dataPtr++; + num_slots = 3; + } + + return num_slots; +} + +static void +dim_screen(void) +{ + if(!vinfo.v_depth) + return; + + switch( vinfo.v_depth) { + case 16: + dim_screen_16(); + break; + case 32: + dim_screen_32(); + break; + } +} + +static void +dim_screen_16(void) +{ + unsigned long *p, *endp, *row; + int col; + int rowline, rowlongs; + unsigned long value, tmp; + + rowline = vinfo.v_rowscanbytes / 4; + rowlongs = vinfo.v_rowbytes / 4; + + p = (unsigned long*) vinfo.v_baseaddr; + endp = (unsigned long*) vinfo.v_baseaddr; + + endp += rowlongs * vinfo.v_height; + + for (row = p ; row < endp ; row += rowlongs) { + for (col = 0; col < rowline; col++) { + value = *(row+col); + tmp = ((value & 0x7C007C00) >> 1) & 0x3C003C00; + tmp |= ((value & 0x03E003E0) >> 1) & 0x01E001E0; + tmp |= ((value & 0x001F001F) >> 1) & 0x000F000F; + *(row+col) = tmp; //half (dimmed)? + } + + } + +} + +static void +dim_screen_32(void) +{ + unsigned long *p, *endp, *row; + int col; + int rowline, rowlongs; + unsigned long value, tmp; + + rowline = vinfo.v_rowscanbytes / 4; + rowlongs = vinfo.v_rowbytes / 4; + + p = (unsigned long*) vinfo.v_baseaddr; + endp = (unsigned long*) vinfo.v_baseaddr; + + endp += rowlongs * vinfo.v_height; + + for (row = p ; row < endp ; row += rowlongs) { + for (col = 0; col < rowline; col++) { + value = *(row+col); + tmp = ((value & 0x00FF0000) >> 1) & 0x007F0000; + tmp |= ((value & 0x0000FF00) >> 1) & 0x00007F00; + tmp |= (value & 0x000000FF) >> 1; + *(row+col) = tmp; //half (dimmed)? + } + + } + +} diff --git a/osfmk/console/panic_image.c b/osfmk/console/panic_image.c new file mode 100644 index 000000000..8933773fe --- /dev/null +++ b/osfmk/console/panic_image.c @@ -0,0 +1,1953 @@ +/* generated c file */ + +static const struct { + unsigned int pd_width; + unsigned int pd_height; + unsigned int bytes_per_pixel; /* 1: CLUT, 3:RGB, 4:RGBA */ + unsigned char image_pixel_data[0x880a]; +} panic_dialog = { + 472, 255, 1, +0xae,0x87,0xfd, 0x01,0x6c, 0x01,0x55, 0x80,0xbb,0xfd, 0x01,0x55, 0x01,0x6c, 0x06,0xfd, +0x01,0x6c, 0x01,0x55, 0x0b,0xfd, 0x01,0x41, 0x01,0x83, 0x24,0xfd, 0x01,0x83, 0x01,0x41, 0x80,0xa6,0xfd, +0x02,0x2b, 0x04,0xfd, 0x01,0x2b, 0x01,0x19, 0x30,0xfd, 0x01,0x2b, 0x01,0x00, 0x80,0xa9,0xfd, +0x01,0x55, 0x01,0x00, 0x04,0xfd, 0x01,0x19, 0x01,0x2b, 0x0a,0xfd, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, +0x01,0x2b, 0x01,0x00, 0x0b,0xfd, 0x01,0x00, 0x01,0x41, 0x24,0xfd, 0x01,0x41, 0x01,0x00, 0x12,0xfd, +0x04,0x00, 0x01,0x07, 0x80,0x8f,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x83, 0x30,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x55, 0x01,0x6c, 0x22,0xfd, +0x01,0x6c, 0x01,0x55, 0x10,0xfd, 0x01,0x41, 0x01,0x83, 0x52,0xfd, 0x01,0x55, 0x01,0x6c, 0x17,0xfd, +0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x0a,0xfd, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, +0x01,0x2b, 0x01,0x00, 0x0b,0xfd, 0x01,0x00, 0x01,0x41, 0x1f,0xfd, 0x01,0x83, 0x01,0x41, 0x03,0xfd, +0x01,0x41, 0x01,0x00, 0x12,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0x41, 0x01,0x07, 0x01,0x00, 0x01,0x2b, +0x80,0x8f,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x19, 0x31,0xfd, 0x01,0x2b, +0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x22,0xfd, 0x01,0x2b, 0x01,0x00, 0x10,0xfd, 0x01,0x00, +0x01,0x41, 0x52,0xfd, 0x01,0x00, 0x01,0x2b, 0x17,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x2b, +0x01,0x00, 0x0a,0xfd, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x0b,0xfd, 0x01,0x00, +0x01,0x41, 0x1f,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x12,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x80,0x8f,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x07, +0x01,0x19, 0x01,0x00, 0x03,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x02,0x00, 0x01,0x19, +0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x55, 0x05,0x00, 0x05,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x04,0x00, +0x01,0x83, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x02,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0xfd, 0x01,0x41, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2e, +0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x04,0x00, +0x04,0xfd, 0x01,0x19, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x19, 0x03,0x00, +0x01,0x19, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x01,0x19, 0x01,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x02,0xfd, +0x01,0x19, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2e, 0x02,0x00, 0x01,0x2b, +0x01,0x83, 0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x03,0x00, 0x01,0x55, 0x02,0xfd, +0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x09,0xfd, +0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x55, 0x05,0x00, 0x07,0xfd, 0x01,0x2b, 0x02,0x00, +0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x01,0xfd, 0x02,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0x19, 0x02,0x00, 0x01,0x19, 0x06,0xfd, 0x04,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x55, 0x02,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x83, 0x06,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x01,0xfd, +0x01,0x00, 0x01,0x19, 0x02,0xfd, 0x02,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x71,0xfd, +0x01,0x19, 0x02,0x00, 0x01,0x55, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x2b, 0x01,0x83, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x83, 0x01,0x00, +0x01,0x19, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x02,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x02,0x55, +0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x07, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x06,0xfd, 0x01,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x01,0x83, +0x06,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x6c, 0x01,0x55, +0x02,0x00, 0x01,0x41, 0x01,0x83, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x02,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, 0x01,0x00, +0x02,0xfd, 0x02,0x00, 0x01,0x41, 0x0a,0xfd, 0x01,0x41, 0x07,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, +0x01,0x07, 0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0x83, +0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, +0x01,0x00, 0x01,0x83, 0x01,0x19, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, +0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x07, 0x01,0x00, 0x06,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x6c, 0x01,0x55, 0x01,0x00, 0x01,0x55, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x83, +0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x02,0x2b, 0x01,0xfd, 0x01,0x6c, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, +0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x07, 0x74,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0x41, 0x01,0x07, 0x01,0x00, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, +0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, +0x01,0x07, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x01,0x6c, 0x03,0xfd, 0x01,0x2b, +0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x00, +0x01,0x41, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x02,0x2b, +0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x06,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x04,0xfd, +0x02,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x00, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x03,0x41, 0x01,0x00, 0x01,0x55, +0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x0b,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0x41, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, +0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, +0x01,0x00, 0x01,0x07, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x01,0x55, 0x01,0x00, 0x02,0x2b, 0x01,0xfd, +0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x06,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, 0x01,0x07, 0x01,0x00, 0x06,0xfd, 0x04,0x00, 0x01,0x2e, +0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x01,0x19, +0x01,0x00, 0x01,0xfd, 0x01,0x07, 0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x00, 0x01,0x83, +0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x41, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x75,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x05,0x00, 0x01,0x2b, 0x01,0xfd, 0x06,0x00, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, +0x06,0x00, 0x02,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, +0x01,0x6c, 0x04,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x41, +0x06,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x01,0x55, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x07,0xfd, 0x01,0x41, 0x01,0x00, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x0b,0xfd, 0x01,0x41, +0x01,0x00, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, +0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x00, +0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0x19, 0x02,0x2b, 0x01,0x19, +0x01,0x00, 0x01,0x83, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x06,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x2b, 0x01,0x2e, +0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x75,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x05,0xfd, 0x01,0x2b, +0x01,0x00, 0x05,0xfd, 0x01,0x07, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, +0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x08,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x07, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x41, 0x07,0xfd, 0x01,0x00, +0x02,0x2b, 0x01,0x07, 0x02,0xfd, 0x02,0x2b, 0x03,0xfd, 0x02,0x2b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x07,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x6c, 0x04,0xfd, 0x02,0x2b, 0x01,0x87, 0x01,0x82, 0x01,0x7d, 0x02,0x2b, 0x01,0x74, +0x01,0x26, 0x01,0x00, 0x01,0x6e, 0x01,0x6d, 0x01,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, +0x01,0x2b, 0x01,0x6f, 0x01,0x71, 0x01,0x00, 0x01,0x2c, 0x01,0x7a, 0x01,0x7d, 0x01,0x58, 0x01,0x00, +0x01,0x5f, 0x01,0x8b, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x0b,0xfd, 0x01,0x41, +0x01,0x00, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x07, 0x01,0x00, +0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2e, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x02,0x2b, 0x01,0x00, +0x01,0x55, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x0a,0xfd, 0x01,0x00, 0x01,0x41, 0x04,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, +0x01,0x07, 0x01,0x00, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x2b, 0x01,0x07, 0x01,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x75,0xfd, 0x01,0x00, 0x01,0x2b, 0x04,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0xfd, 0x01,0x07, +0x02,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x07, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, +0x02,0xfd, 0x01,0x2e, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x19, 0x02,0x00, +0x06,0xfd, 0x02,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, 0x01,0xfd, +0x01,0x6c, 0x01,0x41, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, 0x01,0x00, +0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x2e, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x07, 0x07,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0x6c, 0x02,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x08,0xfd, 0x02,0x00, 0x01,0x56, 0x01,0x7a, 0x01,0x15, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x22, +0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, +0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x02,0x00, 0x01,0x47, 0x01,0x59, 0x02,0x00, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0x64, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x02,0x00, +0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0xfd, 0x01,0x55, 0x01,0x6c, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x41, 0x01,0x2b, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x19, 0x02,0x00, +0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x19, +0x02,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x06,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, 0x06,0xfd, +0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x6c, 0x02,0x00, 0x01,0x6c, 0x02,0xfd, 0x02,0x00, +0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x01,0x55, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x75,0xfd, 0x01,0x00, +0x01,0x2b, 0x04,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x55, 0x02,0x00, 0x02,0x2b, +0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x07, +0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x41, 0x04,0x00, 0x02,0xfd, 0x01,0x19, 0x02,0x00, 0x02,0x2b, +0x01,0x00, 0x06,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, +0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x04,0xfd, 0x01,0x41, 0x04,0x00, 0x01,0xfd, 0x01,0x6c, 0x03,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x02,0x00, 0x02,0x2b, +0x01,0x00, 0x01,0x55, 0x01,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x19, 0x02,0x00, 0x06,0xfd, 0x01,0x83, +0x01,0x00, 0x01,0x2b, 0x04,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, +0x01,0x55, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x8b, 0x01,0x82, +0x01,0x79, 0x01,0x70, 0x01,0x59, 0x04,0x00, 0x02,0xfb, 0x01,0x03, 0x03,0x00, 0x01,0x03, 0x02,0xfb, +0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x03,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x03, 0x02,0x00, 0x01,0x03, 0x01,0x00, +0x01,0x2b, 0x01,0x70, 0x01,0x79, 0x01,0x44, 0x02,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x6c, 0x04,0x00, +0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x41, +0x01,0x00, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x02,0x00, 0x02,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x2e, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x07,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x41, 0x04,0x00, 0x06,0xfd, 0x01,0x00, +0x01,0x41, 0x05,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x2e, +0x02,0xfd, 0x02,0x00, 0x03,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x2b, +0x01,0x00, 0x80,0xf2,0xfd, 0x01,0x00, 0x01,0x41, 0x15,0xfd, 0x01,0x8b, 0x01,0x84, 0x01,0x7a, +0x01,0x6f, 0x1f,0xfb, 0x01,0x00, 0x01,0x22, 0x10,0xfb, 0x01,0x6f, 0x01,0x7a, 0x01,0x84, 0x01,0x8b, +0x81,0x87,0xfd, 0x01,0x19, 0x01,0x00, 0x13,0xfd, 0x01,0x8b, 0x01,0x80, 0x01,0x75, 0x23,0xfb, +0x01,0x00, 0x01,0x22, 0x14,0xfb, 0x01,0x75, 0x01,0x80, 0x01,0x8b, 0x81,0x84,0xfd, 0x01,0x2e, +0x01,0x41, 0x10,0xfd, 0x01,0x8b, 0x01,0x81, 0x01,0x75, 0x26,0xfb, 0x01,0x2b, 0x01,0x47, 0x17,0xfb, +0x01,0x75, 0x01,0x81, 0x01,0x8b, 0x81,0x91,0xfd, 0x01,0x86, 0x01,0x79, 0x45,0xfb, 0x01,0x79, +0x01,0x86, 0x81,0x8c,0xfd, 0x01,0x8b, 0x01,0x80, 0x01,0x72, 0x49,0xfb, 0x01,0x72, 0x01,0x80, +0x01,0x8b, 0x80,0xfe,0xfd, 0x01,0x55, 0x01,0x6c, 0x2e,0xfd, 0x01,0x55, 0x01,0x41, 0x3b,0xfd, +0x01,0x41, 0x01,0x83, 0x1a,0xfd, 0x01,0x8b, 0x01,0xfc, 0x01,0x6d, 0x12,0xfb, 0x01,0x22, 0x01,0x59, +0x39,0xfb, 0x01,0x6d, 0x01,0xfc, 0x01,0x8b, 0x0c,0xfd, 0x01,0x83, 0x01,0x41, 0x42,0xfd, 0x01,0x6c, +0x01,0x55, 0x80,0xaa,0xfd, 0x01,0x00, 0x01,0x2b, 0x2d,0xfd, 0x03,0x00, 0x01,0x2b, 0x3a,0xfd, +0x01,0x00, 0x01,0x41, 0x18,0xfd, 0x01,0x8b, 0x01,0x7b, 0x15,0xfb, 0x01,0x00, 0x01,0x22, 0x3c,0xfb, +0x01,0x7b, 0x01,0x8b, 0x0a,0xfd, 0x01,0x41, 0x01,0x00, 0x12,0xfd, 0x04,0x00, 0x01,0x2e, 0x2b,0xfd, +0x01,0x2b, 0x01,0x00, 0x80,0xaa,0xfd, 0x01,0x00, 0x01,0x2b, 0x0e,0xfd, 0x01,0x6c, 0x01,0x55, +0x03,0xfd, 0x01,0x55, 0x01,0x6c, 0x17,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x83, 0x3c,0xfd, 0x01,0x00, +0x01,0x41, 0x16,0xfd, 0x01,0x8b, 0x01,0x7c, 0x17,0xfb, 0x01,0x00, 0x01,0x22, 0x3e,0xfb, 0x01,0x7c, +0x01,0x8b, 0x03,0xfd, 0x01,0x83, 0x01,0x41, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x12,0xfd, 0x01,0x00, +0x01,0x07, 0x01,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x10,0xfd, 0x01,0x55, 0x01,0x6c, 0x0f,0xfd, +0x01,0x6c, 0x01,0x55, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x0e,0xfd, 0x01,0x83, 0x01,0x41, 0x03,0xfd, +0x01,0x83, 0x01,0x41, 0x80,0x95,0xfd, 0x01,0x00, 0x01,0x2b, 0x0e,0xfd, 0x01,0x2b, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x17,0xfd, 0x01,0x41, 0x01,0x00, 0x3d,0xfd, 0x01,0x00, 0x01,0x41, +0x14,0xfd, 0x01,0x8b, 0x01,0x7d, 0x19,0xfb, 0x01,0x00, 0x01,0x22, 0x40,0xfb, 0x01,0x7d, 0x01,0x8b, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x12,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x10,0xfd, 0x01,0x00, 0x01,0x2b, 0x0f,0xfd, 0x01,0x2b, 0x01,0x00, +0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x0e,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, +0x80,0x95,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x03,0x00, 0x02,0x2b, 0x03,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x55, +0x05,0xfd, 0x01,0x41, 0x04,0x00, 0x01,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0x00, 0x05,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x83, 0x03,0x00, +0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2e, 0x01,0x00, 0x01,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0x6c, +0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x83, 0x01,0x2b, 0x02,0x00, +0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x8b, 0x01,0x81, 0x01,0x14, +0x03,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x59, 0x04,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x00, 0x02,0x2b, +0x02,0x00, 0x01,0x36, 0x03,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x22, 0x01,0xfb, +0x01,0x36, 0x03,0x00, 0x01,0x03, 0x06,0xfb, 0x01,0x59, 0x04,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x00, +0x01,0x2b, 0x02,0x00, 0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x12, 0x03,0x00, 0x01,0x59, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0xfb, 0x01,0x59, 0x03,0x00, 0x01,0x03, 0x02,0xfb, 0x01,0x36, +0x03,0x00, 0x01,0x03, 0x02,0xfb, 0x01,0x2b, 0x03,0x00, 0x04,0xfb, 0x01,0x70, 0x01,0x81, 0x04,0x00, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x02,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x19, 0x03,0x00, +0x01,0x83, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x03,0x00, +0x01,0x41, 0x01,0xfd, 0x04,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0x41, +0x03,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x04,0x00, 0x01,0xfd, 0x04,0x00, +0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x02,0x00, +0x01,0x19, 0x80,0x84,0xfd, 0x02,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x2e, 0x01,0x00, 0x01,0x6c, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x02,0x00, 0x01,0x07, 0x01,0xfd, 0x02,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x41, 0x06,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x6c, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x00, 0x01,0x19, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x07, 0x02,0xfd, 0x01,0x6c, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x07, 0x02,0xfd, +0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x6c, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0x83, 0x01,0x00, 0x01,0x0e, 0x01,0x75, 0x01,0x22, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x47, 0x01,0xfb, 0x02,0x00, 0x02,0x59, 0x02,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x2b, +0x01,0xfb, 0x02,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x47, 0x01,0x59, 0x01,0x2b, 0x01,0x00, +0x01,0x22, 0x01,0xfb, 0x02,0x00, 0x02,0xfb, 0x01,0x47, 0x06,0xfb, 0x02,0x00, 0x02,0x59, 0x02,0x00, +0x02,0xfb, 0x02,0x00, 0x01,0x22, 0x06,0xfb, 0x01,0x22, 0x02,0x00, 0x01,0x47, 0x01,0x59, 0x02,0x00, +0x02,0xfb, 0x02,0x00, 0x01,0x47, 0x02,0xfb, 0x02,0x00, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x47, +0x01,0xfb, 0x02,0x00, 0x02,0xfb, 0x01,0x47, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x47, 0x01,0xfb, +0x01,0x47, 0x06,0xfb, 0x01,0x28, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x6c, 0x01,0x55, +0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x06,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0x2e, 0x02,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2e, 0x01,0x83, 0x01,0xfd, 0x02,0x00, 0x02,0xfd, 0x02,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x83, +0x01,0x6c, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, +0x01,0x83, 0x01,0x07, 0x01,0x00, 0x80,0x84,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, +0x01,0x07, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2e, +0x03,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x41, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x41, +0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x19, 0x01,0x83, 0x03,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0x41, 0x01,0x3a, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x02,0x00, 0x04,0xfb, 0x01,0x47, +0x01,0x00, 0x01,0x47, 0x02,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x03, 0x03,0xfb, 0x01,0x00, 0x01,0x22, +0x01,0xfb, 0x02,0x00, 0x01,0x12, 0x07,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x47, 0x02,0xfb, 0x01,0x47, +0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, +0x01,0x12, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x12, 0x02,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x12, +0x02,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x02,0x00, 0x01,0x12, 0x03,0xfb, 0x01,0x47, 0x01,0x00, +0x01,0x2b, 0x01,0x47, 0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0xfc, 0x01,0x8b, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, +0x01,0x07, 0x01,0x00, 0x06,0xfd, 0x04,0x00, 0x03,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x41, +0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2e, 0x04,0xfd, 0x01,0x00, 0x01,0x2b, +0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, +0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x80,0x84,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x00, +0x01,0x2b, 0x07,0xfd, 0x01,0x83, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x05,0x00, +0x01,0x19, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0xfd, 0x02,0x2b, 0x01,0xfd, 0x01,0x41, 0x05,0x00, +0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x06,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x19, 0x02,0xfd, 0x05,0x00, +0x01,0x2b, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x04,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x01,0x00, +0x01,0x22, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, +0x01,0x22, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x2b, 0x02,0x00, 0x01,0x36, +0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, +0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x02,0xfb, 0x01,0x22, 0x05,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x59, 0x01,0x2b, 0x02,0x00, 0x01,0x36, +0x02,0xfb, 0x01,0x22, 0x02,0x00, 0x01,0x2b, 0x06,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0xfb, 0x01,0x72, +0x01,0x85, 0x01,0x3f, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x06,0x00, 0x06,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0x83, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x19, +0x01,0xfd, 0x01,0x83, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, +0x01,0x07, 0x04,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, +0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x80,0x84,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x19, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2e, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x09,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2e, 0x01,0x00, 0x01,0x6c, +0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x08,0xfd, 0x01,0x07, +0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x05,0xfb, 0x02,0x00, 0x04,0xfb, 0x01,0x47, +0x01,0x00, 0x01,0x47, 0x02,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x12, 0x03,0xfb, 0x01,0x00, 0x01,0x22, +0x03,0xfb, 0x01,0x59, 0x02,0x00, 0x05,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x47, 0x02,0xfb, 0x01,0x47, +0x01,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, +0x01,0x03, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x36, +0x07,0xfb, 0x01,0x59, 0x02,0x00, 0x04,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x2b, 0x05,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x2f, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, +0x01,0x00, 0x0a,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x07,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, +0x02,0x2b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, +0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x80,0x84,0xfd, +0x02,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x07, 0x01,0x00, +0x01,0x83, 0x01,0x41, 0x02,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x02,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x41, 0x01,0x6c, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x01,0x55, +0x02,0xfd, 0x03,0x00, 0x03,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x01,0x55, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x02,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x02,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x6c, 0x01,0x88, +0x01,0x2b, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, +0x01,0x2b, 0x01,0xfb, 0x01,0x47, 0x01,0x36, 0x01,0xfb, 0x02,0x00, 0x02,0x59, 0x02,0x00, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x59, 0x01,0x47, +0x02,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x47, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x06,0xfb, +0x02,0x00, 0x02,0x59, 0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, 0x01,0x00, +0x01,0x2b, 0x01,0x59, 0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x03,0xfb, +0x02,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x47, 0x01,0x36, 0x01,0xfb, 0x01,0x22, 0x01,0x47, 0x01,0xfb, +0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x47, 0x01,0x22, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x03, +0x05,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x88, 0x02,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, 0x06,0xfd, +0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, +0x01,0x6c, 0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x02,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0x6c, 0x02,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x03,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x19, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0x6c, +0x02,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x55, 0x01,0x00, +0x01,0x55, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x2b, +0x80,0x80,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x83, 0x03,0x00, +0x01,0x2e, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x02,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x83, +0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x02,0xfd, 0x01,0x2e, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x83, +0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x55, 0x02,0x00, +0x01,0x2b, 0x01,0x19, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x2e, +0x03,0x00, 0x01,0x15, 0x01,0x81, 0x02,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x12, +0x03,0x00, 0x01,0x12, 0x01,0xfb, 0x01,0x59, 0x04,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x59, 0x03,0x00, 0x01,0x12, 0x01,0x00, 0x01,0x22, +0x01,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x36, 0x06,0xfb, 0x01,0x59, 0x04,0x00, 0x01,0x59, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x03,0xfb, +0x01,0x00, 0x01,0x22, 0x03,0xfb, 0x01,0x59, 0x01,0x2b, 0x03,0x00, 0x01,0x12, 0x01,0xfb, 0x01,0x2b, +0x03,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x47, 0x03,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x2b, 0x02,0x00, +0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0xfb, 0x01,0x81, 0x01,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, +0x01,0x41, 0x04,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x07, +0x01,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, +0x02,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x41, 0x01,0x83, 0x03,0x00, 0x01,0x2e, 0x02,0x00, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x2b, 0x02,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, +0x01,0x55, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x80,0xf7,0xfd, 0x01,0x8b, 0x01,0x7b, 0x44,0xfb, 0x01,0x22, 0x01,0x00, 0x2d,0xfb, +0x01,0x7b, 0x01,0x8b, 0x81,0x60,0xfd, 0x01,0x8b, 0x01,0x75, 0x45,0xfb, 0x01,0x22, 0x01,0x00, +0x2e,0xfb, 0x01,0x75, 0x01,0x8b, 0x81,0x5e,0xfd, 0x01,0x86, 0x01,0x70, 0x46,0xfb, 0x01,0x47, +0x01,0x2b, 0x2f,0xfb, 0x01,0x70, 0x01,0x86, 0x81,0x5c,0xfd, 0x01,0x82, 0x7b,0xfb, 0x01,0x82, +0x81,0x5a,0xfd, 0x01,0x7f, 0x7d,0xfb, 0x01,0x7f, 0x81,0x58,0xfd, 0x01,0x7d, 0x3d,0xfb, +0x01,0x82, 0x04,0xfd, 0x01,0x82, 0x3c,0xfb, 0x01,0x7d, 0x81,0x55,0xfd, 0x01,0x8b, 0x01,0x7c, +0x3d,0xfb, 0x08,0xfd, 0x3c,0xfb, 0x01,0x7c, 0x01,0x8b, 0x81,0x52,0xfd, 0x01,0x8b, 0x01,0x7b, +0x3d,0xfb, 0x0a,0xfd, 0x3c,0xfb, 0x01,0x7b, 0x01,0x8b, 0x81,0x50,0xfd, 0x01,0x8b, 0x01,0x7b, +0x3d,0xfb, 0x01,0x7e, 0x0a,0xfd, 0x01,0x7e, 0x3c,0xfb, 0x01,0x7b, 0x01,0x8b, 0x81,0x4f,0xfd, +0x01,0x7c, 0x3e,0xfb, 0x0c,0xfd, 0x3d,0xfb, 0x01,0x7c, 0x81,0x4e,0xfd, 0x01,0x7d, 0x3f,0xfb, +0x0c,0xfd, 0x3e,0xfb, 0x01,0x7d, 0x81,0x4c,0xfd, 0x01,0x7f, 0x40,0xfb, 0x0c,0xfd, 0x3f,0xfb, +0x01,0x7f, 0x81,0x4a,0xfd, 0x01,0x82, 0x41,0xfb, 0x0c,0xfd, 0x40,0xfb, 0x01,0x82, 0x81,0x48,0xfd, +0x01,0x86, 0x42,0xfb, 0x0c,0xfd, 0x41,0xfb, 0x01,0x86, 0x81,0x46,0xfd, 0x01,0x8b, 0x01,0x70, +0x42,0xfb, 0x0c,0xfd, 0x41,0xfb, 0x01,0x70, 0x01,0x8b, 0x81,0x44,0xfd, 0x01,0x8b, 0x01,0x75, +0x43,0xfb, 0x0c,0xfd, 0x42,0xfb, 0x01,0x75, 0x01,0x8b, 0x81,0x43,0xfd, 0x01,0x7b, 0x44,0xfb, +0x0c,0xfd, 0x43,0xfb, 0x01,0x7b, 0x81,0x42,0xfd, 0x01,0x81, 0x45,0xfb, 0x0c,0xfd, 0x44,0xfb, +0x01,0x81, 0x81,0x40,0xfd, 0x01,0x88, 0x46,0xfb, 0x0c,0xfd, 0x45,0xfb, 0x01,0x88, 0x81,0x3e,0xfd, +0x01,0x8b, 0x01,0x74, 0x46,0xfb, 0x0c,0xfd, 0x45,0xfb, 0x01,0x74, 0x01,0x8b, 0x80,0xf2,0xfd, +0x01,0x83, 0x01,0x41, 0x02,0xfd, 0x01,0x6c, 0x01,0x55, 0x26,0xfd, 0x01,0x55, 0x01,0x6c, 0x05,0xfd, +0x01,0x41, 0x01,0x83, 0x16,0xfd, 0x01,0xfc, 0x47,0xfb, 0x08,0xfd, 0x01,0x83, 0x01,0x41, 0x02,0xfd, +0x46,0xfb, 0x01,0xfc, 0x33,0xfd, 0x01,0x6c, 0x01,0x55, 0x29,0xfd, 0x01,0x41, 0x01,0x83, 0x76,0xfd, +0x01,0x07, 0x01,0x2b, 0x05,0xfd, 0x01,0x00, 0x01,0x83, 0x10,0xfd, 0x01,0x00, 0x01,0x55, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x26,0xfd, 0x01,0x00, 0x01,0x2b, 0x04,0xfd, +0x02,0x2b, 0x16,0xfd, 0x01,0x85, 0x48,0xfb, 0x08,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x03, +0x01,0x2b, 0x33,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x04,0xfb, 0x01,0x2b, 0x01,0x00, 0x09,0xfb, +0x01,0x0d, 0x01,0x2b, 0x31,0xfd, 0x01,0x2b, 0x01,0x00, 0x29,0xfd, 0x01,0x00, 0x01,0x41, 0x76,0xfd, +0x01,0x2e, 0x01,0x00, 0x01,0x83, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x11,0xfd, 0x01,0x2b, 0x01,0x6c, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x26,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x55, 0x01,0x2b, 0x16,0xfd, 0x01,0x8b, 0x01,0x72, 0x29,0xfb, 0x01,0x47, 0x01,0x36, +0x1d,0xfb, 0x08,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x22, 0x01,0x03, 0x11,0xfb, 0x01,0x47, +0x01,0x36, 0x20,0xfb, 0x02,0x00, 0x01,0x03, 0x03,0xfb, 0x01,0x47, 0x02,0x00, 0x09,0xfb, 0x01,0x27, +0x01,0x12, 0x0a,0xfd, 0x01,0x6c, 0x01,0x55, 0x25,0xfd, 0x01,0x2b, 0x01,0x00, 0x0d,0xfd, 0x01,0x6c, +0x01,0x55, 0x1a,0xfd, 0x01,0x00, 0x01,0x41, 0x77,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x2b, +0x01,0x07, 0x14,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x26,0xfd, 0x01,0x00, +0x01,0x2b, 0x1b,0xfd, 0x01,0xfc, 0x2a,0xfb, 0x01,0x2b, 0x01,0x00, 0x1d,0xfb, 0x08,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x13,0xfb, 0x01,0x2b, 0x01,0x00, 0x20,0xfb, 0x03,0x00, 0x03,0xfb, 0x01,0x2b, +0x02,0x00, 0x0a,0xfb, 0x01,0xfc, 0x0a,0xfd, 0x01,0x2b, 0x01,0x00, 0x25,0xfd, 0x01,0x2b, 0x01,0x00, +0x0d,0xfd, 0x01,0x2b, 0x01,0x00, 0x1a,0xfd, 0x01,0x00, 0x01,0x41, 0x77,0xfd, 0x01,0x2b, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x19, 0x02,0xfd, +0x06,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x55, 0x03,0x00, +0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0x19, 0x02,0x00, 0x01,0x19, +0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x19, 0x01,0x88, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, +0x01,0x00, 0x02,0x22, 0x01,0x00, 0x01,0x03, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x36, 0x03,0x00, +0x01,0x36, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x04,0xfb, 0x01,0x12, 0x01,0x00, 0x01,0x59, +0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x36, 0x01,0xfb, +0x01,0x22, 0x03,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x02,0x8b, 0x01,0x13, +0x03,0x00, 0x01,0x59, 0x06,0xfb, 0x01,0x36, 0x03,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0xf7, +0x02,0x00, 0x01,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfb, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x13, +0x01,0x8b, 0x01,0x3c, 0x03,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x59, 0x03,0x00, 0x01,0x03, 0x02,0xfb, +0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x03, +0x01,0x00, 0x01,0x2b, 0x09,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x47, 0x02,0xfb, 0x01,0x00, +0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x12, 0x03,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0x88, 0x01,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x03,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x19, +0x02,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x05,0x00, +0x01,0x41, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x19, 0x05,0xfd, +0x01,0x41, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x02,0xfd, +0x01,0x00, 0x01,0x2e, 0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x55, 0x6c,0xfd, +0x01,0x2e, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2e, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x04,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, 0x02,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x19, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, 0x01,0x00, +0x02,0xfd, 0x02,0x00, 0x01,0x07, 0x01,0xfd, 0x02,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x07, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x75, 0x01,0xfb, 0x01,0x2b, +0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0x59, 0x01,0xfb, 0x01,0x59, +0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x22, 0x06,0xfb, +0x01,0x00, 0x01,0x03, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x47, +0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x02,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x05,0xfb, +0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x02,0x00, +0x01,0x6c, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x2b, 0x01,0xfb, 0x02,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x63, +0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x85, 0x02,0x00, +0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, +0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0x59, 0x0a,0xfb, 0x01,0x00, 0x01,0x22, +0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x02,0x36, +0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x75, 0x01,0x8b, +0x02,0x00, 0x01,0x07, 0x01,0xfd, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, +0x01,0x83, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x83, 0x01,0x00, 0x01,0x19, +0x05,0xfd, 0x02,0x00, 0x01,0x83, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x6c, +0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x02,0xfd, 0x02,0x00, 0x01,0x41, 0x01,0x83, +0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, 0x01,0x00, +0x6d,0xfd, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0x41, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x41, 0x01,0x00, 0x01,0x07, 0x03,0xfd, 0x01,0x83, +0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x41, +0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x02,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, +0x01,0x00, 0x03,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, +0x01,0x00, 0x03,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x03, 0x01,0x00, 0x03,0x22, +0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x03, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x7a, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0x41, 0x01,0x07, 0x01,0x00, 0x05,0xfb, 0x01,0x03, 0x01,0x00, 0x03,0xfb, 0x01,0x03, +0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, +0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x19, 0x02,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x59, 0x0b,0xfb, +0x01,0x00, 0x01,0x22, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x02,0x2b, 0x01,0x22, 0x01,0x00, 0x06,0xfb, +0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x81, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x2e, 0x02,0x41, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x41, 0x01,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0x83, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x06,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x6c, 0x05,0xfd, 0x01,0x00, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x19, 0x01,0x00, 0x03,0x41, 0x01,0x00, 0x01,0x55, 0x6c,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x83, +0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x06,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x19, 0x03,0xfd, 0x02,0x00, 0x08,0xfd, 0x01,0x00, +0x01,0x2b, 0x03,0xfd, 0x05,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x04,0x00, 0x02,0xfb, +0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x05,0x00, 0x01,0x36, +0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x47, 0x01,0x12, 0x01,0x00, +0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0x82, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x06,0x00, 0x01,0x87, 0x04,0xfb, 0x01,0x2b, +0x01,0x00, 0x03,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x3e, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, +0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x35, 0x04,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x0c,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0x47, +0x01,0x00, 0x01,0x36, 0x01,0x00, 0x01,0x47, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x47, 0x04,0x00, +0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x70, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x19, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x05,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x04,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, +0x05,0x00, 0x01,0x55, 0x6c,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, +0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x55, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x08,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x00, 0x01,0x07, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x30, +0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x03, +0x01,0x00, 0x06,0xfb, 0x01,0x00, 0x01,0x2b, 0x08,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x03, +0x02,0xfb, 0x01,0x00, 0x01,0x03, 0x03,0xfb, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x70, 0x03,0xfb, +0x01,0x03, 0x01,0x00, 0x03,0xfb, 0x01,0x03, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x3e, 0x02,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x4a, 0x01,0x00, +0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x8b, 0x01,0x7e, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x01,0x12, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x0c,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, +0x03,0x00, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, +0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x05,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x06,0xfd, +0x01,0x19, 0x01,0x00, 0x01,0x55, 0x08,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x05,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x72,0xfd, 0x02,0x00, +0x01,0x19, 0x03,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, +0x01,0x07, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, +0x01,0x6c, 0x01,0x55, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x09,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0xfd, 0x01,0x19, +0x01,0x00, 0x01,0x6c, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, +0x01,0x2b, 0x01,0x83, 0x01,0xfd, 0x01,0x55, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, +0x01,0x74, 0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x59, 0x02,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x36, 0x01,0x47, 0x01,0xfb, +0x01,0x00, 0x01,0x2b, 0x08,0xfb, 0x01,0x2b, 0x02,0x00, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x47, +0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x01,0x87, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, 0x01,0xfd, +0x01,0x7e, 0x03,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x00, 0x01,0x3e, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x02,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0xfb, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0xfd, +0x01,0x07, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x01,0x48, +0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x01,0x22, 0x02,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, +0x04,0xfb, 0x01,0x2b, 0x01,0x22, 0x06,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x03, 0x01,0x00, +0x01,0x03, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x03, +0x01,0x00, 0x01,0x12, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, +0x01,0xfd, 0x01,0x6c, 0x01,0x55, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0xfd, 0x01,0x19, +0x01,0x00, 0x01,0x2e, 0x09,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, +0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x05,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0xfd, 0x01,0x07, +0x02,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0xfd, +0x01,0x55, 0x01,0x6c, 0x6d,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x04,0xfd, 0x01,0x41, 0x04,0x00, +0x02,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x19, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, +0x03,0x00, 0x01,0x2e, 0x01,0xfd, 0x06,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x04,0xfd, 0x01,0x07, +0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x6c, 0x04,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x12, 0x02,0x00, 0x01,0xfb, +0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x01,0x47, 0x04,0x00, 0x01,0x47, +0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x08,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x04,0xfb, 0x01,0x2b, +0x03,0x00, 0x01,0x36, 0x01,0x8b, 0x01,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, +0x01,0x2b, 0x04,0xfd, 0x01,0x41, 0x04,0x00, 0x01,0xfd, 0x01,0x7e, 0x04,0xfb, 0x01,0x36, 0x03,0x00, +0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x3e, 0x03,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x07, 0x01,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0xfb, 0x01,0x7e, 0x01,0x2b, 0x02,0x00, 0x01,0x2e, 0x02,0x00, 0x01,0xfd, 0x01,0x83, 0x02,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x01,0x8b, 0x01,0x59, 0x03,0x00, +0x01,0x12, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x06,0xfb, +0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0x59, 0x01,0xfb, 0x01,0x22, 0x01,0x00, +0x02,0xfb, 0x01,0x03, 0x02,0x00, 0x02,0x2b, 0x01,0x00, 0x01,0x36, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x07, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, 0x06,0x00, 0x01,0x41, +0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2e, 0x02,0x00, 0x05,0xfd, +0x01,0x83, 0x02,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x55, +0x02,0x00, 0x02,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x6c, 0x80,0xca,0xfd, +0x01,0x8b, 0x2c,0xfb, 0x01,0x8b, 0x13,0xfd, 0x01,0x7e, 0x0d,0xfb, 0x0c,0xfd, 0x0b,0xfb, 0x01,0x7e, +0x13,0xfd, 0x01,0x8b, 0x2d,0xfb, 0x01,0x8b, 0x81,0x2f,0xfd, 0x01,0x7b, 0x2b,0xfb, 0x01,0x8b, +0x14,0xfd, 0x01,0x78, 0x0d,0xfb, 0x0c,0xfd, 0x0b,0xfb, 0x01,0x78, 0x14,0xfd, 0x01,0x8b, 0x2c,0xfb, +0x01,0x7b, 0x81,0x2e,0xfd, 0x01,0x8b, 0x2b,0xfb, 0x01,0x8b, 0x15,0xfd, 0x0e,0xfb, 0x0c,0xfd, +0x0c,0xfb, 0x15,0xfd, 0x01,0x8b, 0x2c,0xfb, 0x01,0x8b, 0x81,0x2d,0xfd, 0x01,0xfc, 0x29,0xfb, +0x01,0x6e, 0x01,0x8b, 0x15,0xfd, 0x01,0x8b, 0x0e,0xfb, 0x0c,0xfd, 0x0c,0xfb, 0x01,0x8b, 0x15,0xfd, +0x01,0x8b, 0x01,0x6e, 0x2a,0xfb, 0x01,0xfc, 0x81,0x2c,0xfd, 0x01,0x8b, 0x01,0x6d, 0x29,0xfb, +0x01,0x8b, 0x16,0xfd, 0x0f,0xfb, 0x0c,0xfd, 0x0d,0xfb, 0x16,0xfd, 0x01,0x8b, 0x2a,0xfb, 0x01,0x6d, +0x01,0x8b, 0x80,0xd2,0xfd, 0x01,0x83, 0x01,0x41, 0x12,0xfd, 0x01,0x55, 0x01,0x6c, 0x05,0xfd, +0x01,0x41, 0x01,0x83, 0x3c,0xfd, 0x01,0x80, 0x0e,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0x47, 0x18,0xfb, +0x01,0x8b, 0x01,0xfd, 0x01,0x6c, 0x01,0x55, 0x13,0xfd, 0x10,0xfb, 0x07,0xfd, 0x01,0x55, 0x01,0x6c, +0x03,0xfd, 0x0e,0xfb, 0x11,0xfd, 0x01,0x55, 0x01,0x6c, 0x03,0xfd, 0x01,0x8b, 0x2a,0xfb, 0x01,0x80, +0x2e,0xfd, 0x01,0x41, 0x01,0x83, 0x27,0xfd, 0x01,0x83, 0x01,0x41, 0x79,0xfd, 0x01,0x41, 0x01,0x00, +0x12,0xfd, 0x01,0x00, 0x01,0x2b, 0x04,0xfd, 0x02,0x2b, 0x3d,0xfd, 0x01,0x72, 0x0d,0xfb, 0x01,0x22, +0x03,0x00, 0x17,0xfb, 0x01,0x8b, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x11,0xfd, 0x01,0x8b, +0x11,0xfb, 0x07,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x0f,0xfb, 0x01,0x8b, 0x0f,0xfd, 0x01,0x00, +0x01,0x2b, 0x04,0xfd, 0x01,0x8b, 0x0b,0xfb, 0x01,0x36, 0x01,0x00, 0x1c,0xfb, 0x01,0x72, 0x2e,0xfd, +0x01,0x00, 0x01,0x41, 0x27,0xfd, 0x01,0x41, 0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0x55, 0x70,0xfd, +0x01,0x41, 0x01,0x00, 0x12,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x55, 0x01,0x2b, 0x3d,0xfd, +0x01,0x86, 0x0e,0xfb, 0x01,0x00, 0x01,0x2b, 0x18,0xfb, 0x01,0x8b, 0x02,0xfd, 0x01,0x2b, 0x01,0x55, +0x11,0xfd, 0x01,0x85, 0x12,0xfb, 0x07,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x0e,0xfb, 0x01,0x36, +0x01,0x47, 0x01,0x85, 0x0e,0xfd, 0x01,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x8b, 0x0a,0xfb, 0x01,0x47, +0x01,0x2b, 0x1d,0xfb, 0x01,0x86, 0x2d,0xfd, 0x01,0x00, 0x01,0x41, 0x27,0xfd, 0x01,0x41, 0x01,0x00, +0x07,0xfd, 0x01,0x2b, 0x01,0x6c, 0x70,0xfd, 0x01,0x41, 0x01,0x00, 0x12,0xfd, 0x01,0x00, 0x01,0x2b, +0x42,0xfd, 0x01,0x79, 0x0e,0xfb, 0x01,0x00, 0x01,0x2b, 0x17,0xfb, 0x01,0x87, 0x14,0xfd, 0x01,0x8b, +0x01,0x82, 0x13,0xfb, 0x07,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x0e,0xfb, 0x01,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x82, 0x01,0x8b, 0x0c,0xfd, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x87, 0x28,0xfb, +0x01,0x79, 0x2d,0xfd, 0x01,0x00, 0x01,0x41, 0x27,0xfd, 0x01,0x41, 0x01,0x00, 0x75,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x19, +0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x55, 0x02,0xfd, +0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0x19, 0x02,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, +0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x02,0x41, 0x01,0x00, +0x01,0x19, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x83, +0x05,0x00, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x8b, 0x01,0x03, 0x03,0x00, +0x01,0x59, 0x02,0xfb, 0x01,0x00, 0x01,0x12, 0x03,0x00, 0x01,0x59, 0x01,0xfb, 0x04,0x00, 0x01,0x22, +0x01,0xfb, 0x01,0x36, 0x03,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x12, 0x03,0x00, 0x01,0x59, +0x02,0xfb, 0x01,0x36, 0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x03,0xfd, +0x01,0x19, 0x03,0x00, 0x01,0x83, 0x05,0xfd, 0x01,0x8b, 0x01,0x00, 0x01,0x2b, 0x03,0x00, 0x01,0x03, +0x03,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x36, 0x02,0x00, +0x01,0x2b, 0x03,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x04,0x00, +0x01,0x36, 0x02,0xfb, 0x01,0x00, 0x01,0x12, 0x03,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x03,0x00, +0x01,0x22, 0x01,0xfb, 0x01,0x8b, 0x03,0xfd, 0x01,0x00, 0x01,0x07, 0x03,0x00, 0x01,0x55, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, +0x01,0x2b, 0x03,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, +0x01,0x03, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x12, 0x03,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x8b, +0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x2e, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x02,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, +0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, +0x07,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2e, 0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x03,0x00, +0x01,0x83, 0x5d,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0x83, 0x01,0x00, 0x01,0x19, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x19, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, +0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x07, 0x01,0xfd, 0x02,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x07, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x02,0xfd, 0x01,0x41, +0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, +0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x83, 0x01,0x00, 0x01,0x19, 0x05,0xfd, 0x01,0x32, +0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x02,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x22, 0x01,0x59, 0x01,0x00, +0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x02,0x00, 0x01,0x22, 0x01,0x59, 0x01,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x59, 0x02,0x00, 0x01,0x59, 0x01,0x8b, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, +0x01,0x2b, 0x04,0xfd, 0x01,0x8b, 0x01,0xfb, 0x02,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x12, 0x01,0x00, +0x01,0x47, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0x59, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x22, +0x02,0x00, 0x01,0x47, 0x01,0x36, 0x01,0x00, 0x01,0x52, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2c, 0x01,0x59, 0x01,0xfb, 0x02,0x00, +0x02,0xfb, 0x02,0x00, 0x01,0x22, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x04,0xfb, 0x01,0x8b, 0x02,0xfd, 0x02,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x36, 0x01,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x47, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, +0x01,0x2b, 0x01,0x00, 0x02,0x59, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, +0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0x59, 0x02,0xfb, 0x01,0x00, +0x01,0x03, 0x01,0xfb, 0x02,0x59, 0x02,0xfb, 0x01,0x81, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, +0x01,0xfd, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, +0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x83, +0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0x83, 0x01,0x2b, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, +0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x01,0x6c, 0x01,0x83, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x02,0x00, 0x01,0x41, 0x01,0x83, 0x01,0x00, 0x01,0x07, 0x5c,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x41, +0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x02,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, +0x01,0x00, 0x03,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0x41, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x02,0x00, 0x02,0x22, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x01,0x00, 0x01,0x12, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, +0x01,0x03, 0x01,0x00, 0x03,0xfb, 0x01,0x03, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x12, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x82, 0x03,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, 0x01,0x07, +0x01,0x00, 0x03,0xfd, 0x01,0x87, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0x22, 0x02,0x00, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x59, +0x02,0xfb, 0x01,0x00, 0x01,0x3e, 0x01,0xfd, 0x02,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, +0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x12, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x05,0xfb, 0x01,0x87, 0x01,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x82, 0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x01,0x47, 0x03,0xfb, +0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0x22, 0x02,0x00, 0x01,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x59, 0x03,0xfb, +0x02,0x00, 0x01,0x36, 0x04,0xfb, 0x01,0x75, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x19, 0x01,0x83, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x05,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, 0x01,0x07, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x2b, 0x01,0x6c, 0x07,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x2e, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, +0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x5c,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x05,0x00, 0x01,0x2b, 0x05,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x55, +0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x04,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, +0x04,0xfd, 0x01,0x41, 0x04,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x05,0x00, 0x01,0x2b, 0x04,0xfd, 0x01,0x8b, 0x06,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, +0x03,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x7c, 0x04,0xfd, 0x05,0x00, 0x01,0x2b, 0x01,0xfd, 0x06,0x00, +0x02,0xfd, 0x01,0x7e, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x05,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x3e, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0xf7, 0x04,0x00, 0x02,0xfb, +0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x06,0xfb, +0x01,0x7e, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x7c, 0x01,0x22, +0x02,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x05,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, +0x01,0x47, 0x03,0x00, 0x01,0x59, 0x03,0xfb, 0x01,0x8b, 0x02,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x19, +0x02,0xfd, 0x05,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x00, 0x01,0x41, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x06,0x00, 0x02,0xfd, +0x01,0x41, 0x02,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, +0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x06,0x00, 0x02,0xfd, +0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x5c,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x09,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x07, +0x08,0xfd, 0x01,0x80, 0x02,0x00, 0x06,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x03, 0x01,0x00, 0x03,0xfb, 0x01,0x03, 0x01,0x00, +0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x03, 0x01,0x00, +0x05,0xfd, 0x01,0x00, 0x01,0x07, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x84, 0x04,0xfb, +0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x05,0xfb, +0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x3e, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, +0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x07,0xfb, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x02,0xfb, +0x01,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, +0x01,0x00, 0x06,0xfb, 0x01,0x47, 0x02,0x00, 0x03,0xfb, 0x01,0x80, 0x04,0xfd, 0x01,0x07, 0x01,0x00, +0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x07, 0x05,0xfd, 0x02,0x2b, 0x05,0xfd, 0x01,0x00, 0x01,0x19, +0x03,0xfd, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x08,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x07, 0x05,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, +0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2e, 0x01,0x00, 0x06,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x19, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, +0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x5d,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x83, 0x01,0x41, 0x05,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x6c, 0x01,0x83, 0x01,0x2b, 0x01,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0xfd, 0x01,0x55, 0x01,0x6c, +0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, +0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x02,0x00, 0x01,0x41, 0x01,0x2b, 0x02,0x00, +0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x41, 0x04,0xfd, 0x01,0x75, +0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x12, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, +0x01,0x2b, 0x01,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x41, 0x01,0xfd, 0x01,0x07, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, +0x02,0xfd, 0x01,0x2c, 0x01,0x85, 0x05,0xfb, 0x02,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0xfb, +0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x3e, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x6c, +0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x59, 0x01,0x47, +0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x02,0x00, +0x07,0xfb, 0x02,0x00, 0x01,0x69, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x6c, +0x01,0x36, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, +0x01,0x2b, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0xfb, 0x01,0x36, 0x01,0x00, +0x01,0x36, 0x01,0x47, 0x02,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x01,0x12, +0x02,0x59, 0x02,0x00, 0x03,0xfb, 0x01,0x75, 0x01,0xfd, 0x02,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x6c, 0x01,0x55, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, 0x01,0xfd, 0x01,0x6c, +0x01,0x41, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x19, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, +0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0x41, +0x02,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x5d,0xfd, 0x01,0x55, 0x03,0x00, +0x01,0x07, 0x01,0x00, 0x03,0xfd, 0x01,0x07, 0x03,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x2b, 0x02,0x00, +0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x02,0x00, 0x01,0x2e, 0x02,0x00, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, +0x04,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2e, 0x02,0x00, 0x01,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, +0x02,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x07, 0x03,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x8b, 0x02,0xfb, +0x01,0x22, 0x04,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x36, 0x03,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x22, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x70, 0x01,0x53, 0x03,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x07, 0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x41, 0x04,0x00, 0x06,0xfb, 0x05,0x00, 0x01,0x36, +0x03,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, +0x01,0x3e, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x12, 0x02,0x00, 0x01,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x36, 0x02,0x00, 0x01,0x22, 0x05,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x19, 0x01,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x6c, 0x03,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, +0x03,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x36, 0x01,0x00, 0x01,0x22, 0x01,0xfb, +0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x04,0x00, 0x01,0x59, 0x04,0xfb, 0x01,0x8b, 0x01,0x2e, 0x03,0x00, +0x01,0x19, 0x03,0xfd, 0x01,0x07, 0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2e, 0x03,0x00, 0x01,0x2e, +0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x2e, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x41, +0x04,0x00, 0x01,0xfd, 0x01,0x6c, 0x03,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, +0x02,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x2e, 0x01,0x00, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, +0x02,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x41, 0x04,0x00, 0x02,0xfd, +0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x80,0xa9,0xfd, 0x01,0x00, 0x01,0x2b, +0x0b,0xfd, 0x01,0x84, 0x25,0xfb, 0x01,0x8b, 0x11,0xfd, 0x01,0x87, 0x07,0xfb, 0x01,0x00, 0x01,0x2b, +0x13,0xfb, 0x0c,0xfd, 0x17,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x87, 0x11,0xfd, 0x01,0x8b, +0x26,0xfb, 0x01,0x84, 0x81,0x12,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x6c, 0x0b,0xfd, +0x01,0x7a, 0x25,0xfb, 0x11,0xfd, 0x01,0x8b, 0x08,0xfb, 0x01,0x00, 0x01,0x2b, 0x13,0xfb, 0x0c,0xfd, +0x17,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x8b, 0x11,0xfd, 0x26,0xfb, 0x01,0x7a, 0x81,0x12,0xfd, +0x01,0x6c, 0x01,0x07, 0x01,0x2b, 0x01,0x07, 0x01,0x83, 0x0c,0xfd, 0x01,0x6f, 0x24,0xfb, 0x01,0x8b, +0x10,0xfd, 0x01,0x8b, 0x09,0xfb, 0x01,0x03, 0x01,0x22, 0x13,0xfb, 0x0c,0xfd, 0x17,0xfb, 0x01,0x2b, +0x01,0x47, 0x03,0xfb, 0x01,0x8b, 0x10,0xfd, 0x01,0x8b, 0x25,0xfb, 0x01,0x6f, 0x81,0x22,0xfd, +0x01,0x8b, 0x24,0xfb, 0x01,0x78, 0x10,0xfd, 0x01,0x8b, 0x1f,0xfb, 0x0c,0xfd, 0x1d,0xfb, 0x01,0x8b, +0x10,0xfd, 0x01,0x78, 0x25,0xfb, 0x01,0x8b, 0x81,0x21,0xfd, 0x01,0x82, 0x24,0xfb, 0x01,0x8b, +0x10,0xfd, 0x01,0x80, 0x1f,0xfb, 0x0c,0xfd, 0x1d,0xfb, 0x01,0x80, 0x10,0xfd, 0x01,0x8b, 0x25,0xfb, +0x01,0x82, 0x81,0x1a,0xfd, 0x01,0x6c, 0x01,0x55, 0x05,0xfd, 0x01,0x79, 0x07,0xfb, 0x01,0x59, +0x01,0x22, 0x1b,0xfb, 0x10,0xfd, 0x01,0x8b, 0x08,0xfb, 0x01,0x47, 0x01,0x36, 0x15,0xfb, 0x01,0x59, +0x01,0x3e, 0x0b,0xfd, 0x17,0xfb, 0x01,0x47, 0x01,0x36, 0x05,0xfb, 0x01,0x8b, 0x10,0xfd, 0x25,0xfb, +0x01,0x79, 0x81,0x1a,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x70, 0x07,0xfb, 0x01,0x22, +0x01,0x00, 0x1a,0xfb, 0x01,0x7d, 0x10,0xfd, 0x09,0xfb, 0x01,0x2b, 0x01,0x00, 0x15,0xfb, 0x01,0x00, +0x01,0x16, 0x03,0xfd, 0x01,0x00, 0x01,0x55, 0x06,0xfd, 0x04,0xfb, 0x01,0x00, 0x01,0x36, 0x06,0xfb, +0x01,0x03, 0x01,0x2b, 0x09,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x03, 0x02,0xfb, +0x10,0xfd, 0x01,0x7d, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x21,0xfb, 0x01,0x70, 0x81,0x1a,0xfd, +0x01,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x8b, 0x08,0xfb, 0x01,0x22, 0x01,0x00, 0x17,0xfb, 0x01,0x22, +0x01,0x59, 0x01,0xfb, 0x01,0x8b, 0x0f,0xfd, 0x01,0x8b, 0x09,0xfb, 0x01,0x2b, 0x01,0x00, 0x14,0xfb, +0x02,0x03, 0x04,0xfd, 0x01,0x2b, 0x01,0x6c, 0x06,0xfd, 0x04,0xfb, 0x01,0x2b, 0x01,0x47, 0x01,0xfb, +0x01,0x59, 0x01,0x22, 0x03,0xfb, 0x01,0x22, 0x01,0x03, 0x09,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x01,0x03, 0x01,0x22, 0x02,0xfb, 0x01,0x8b, 0x0c,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0xfd, 0x01,0x8b, +0x01,0xfb, 0x01,0x47, 0x01,0x2b, 0x22,0xfb, 0x01,0x8b, 0x81,0x19,0xfd, 0x01,0x2b, 0x01,0x00, +0x04,0xfd, 0x01,0x88, 0x08,0xfb, 0x01,0x22, 0x01,0x00, 0x17,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0x7e, +0x10,0xfd, 0x0a,0xfb, 0x01,0x2b, 0x01,0x00, 0x16,0xfb, 0x0c,0xfd, 0x07,0xfb, 0x01,0x22, 0x01,0x00, +0x0e,0xfb, 0x01,0x2b, 0x01,0x00, 0x07,0xfb, 0x0c,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x7e, +0x24,0xfb, 0x01,0x88, 0x80,0xc8,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0x19, 0x01,0x00, +0x04,0xfd, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x41, +0x05,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x05,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x03, 0x06,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x12, +0x03,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x36, 0x03,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x01,0x00, +0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x04,0x00, 0x02,0xfd, 0x01,0x19, 0x03,0x00, 0x01,0x19, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x02,0x00, 0x01,0x2b, 0x01,0x80, 0x06,0xfb, 0x01,0x36, +0x05,0x00, 0x02,0xfb, 0x01,0x59, 0x03,0x00, 0x01,0x03, 0x06,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, +0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x03, 0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x02,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x22, +0x01,0xfb, 0x04,0x00, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x03, +0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x03,0x00, +0x02,0xfd, 0x01,0x2e, 0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x04,0x00, 0x01,0x8b, 0x01,0x22, 0x01,0x00, +0x03,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x03, 0x02,0x00, +0x01,0x03, 0x12,0xfb, 0x01,0x80, 0x80,0xc8,0xfd, 0x01,0x41, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, +0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x01,0x00, 0x01,0x19, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x01,0x00, 0x01,0x19, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x19, 0x02,0xfd, +0x01,0x19, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x02,0x00, 0x01,0x41, +0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x7a, 0x01,0x36, 0x01,0x00, +0x01,0x47, 0x05,0xfb, 0x01,0x22, 0x02,0x00, 0x01,0x47, 0x01,0x59, 0x02,0x00, 0x01,0xfb, 0x01,0x59, +0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, +0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x6c, +0x01,0x55, 0x01,0x00, 0x01,0x36, 0x05,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, +0x02,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x47, 0x05,0xfb, 0x01,0x2b, +0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x6c, 0x01,0x52, 0x01,0x00, +0x01,0x36, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x47, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x47, 0x01,0xfd, +0x01,0x6c, 0x02,0xfd, 0x02,0x55, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x47, 0x01,0xfb, +0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x59, 0x01,0x2b, 0x01,0x00, +0x12,0xfb, 0x01,0x7a, 0x80,0xcb,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, 0x01,0x07, 0x01,0x00, +0x04,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x55, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2e, 0x04,0xfd, +0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x01,0x28, 0x01,0x22, 0x01,0x00, +0x01,0x2b, 0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x12, 0x01,0x00, 0x01,0xfb, 0x01,0x03, +0x01,0x00, 0x03,0xfb, 0x01,0x03, 0x01,0x00, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, +0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfd, 0x02,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x22, 0x05,0xfb, 0x01,0x03, +0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x12, 0x02,0x22, +0x01,0x00, 0x01,0x2b, 0x05,0xfb, 0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x02,0x00, 0x02,0x22, 0x01,0xf7, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, +0x01,0x2b, 0x01,0x00, 0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x01,0x5d, 0x07,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x00, +0x01,0x03, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, +0x01,0x00, 0x12,0xfb, 0x01,0x73, 0x80,0xc8,0xfd, 0x01,0x41, 0x04,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x6c, 0x01,0x55, 0x01,0x00, 0x02,0xfd, 0x06,0x00, 0x03,0xfd, 0x01,0x2e, 0x01,0x00, +0x01,0x2e, 0x07,0xfd, 0x01,0x83, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x03, 0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x22, +0x01,0x00, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x39, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, +0x01,0x22, 0x05,0xfb, 0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x22, +0x05,0x00, 0x01,0x03, 0x05,0xfb, 0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x06,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, +0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, +0x04,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, +0x02,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x03,0xfd, 0x01,0x39, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x13,0xfb, 0x80,0xc7,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x03,0xfd, 0x02,0x2b, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x02,0x2b, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, 0x01,0x07, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x19, 0x0a,0xfd, 0x01,0x83, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x4d, +0x09,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x03, 0x01,0x00, 0x01,0xfb, 0x01,0x03, 0x01,0x00, +0x03,0xfb, 0x01,0x03, 0x01,0x00, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x59, 0x01,0x00, +0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x40, 0x02,0xfd, 0x02,0x2b, 0x03,0xfd, 0x02,0x2b, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x87, 0x01,0x00, 0x01,0x22, 0x05,0xfb, 0x01,0x2b, 0x01,0x00, +0x03,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x36, 0x09,0xfb, 0x01,0x2b, +0x01,0x00, 0x03,0xfb, 0x02,0x00, 0x02,0xfb, 0x04,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x36, 0x01,0xfb, +0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x04,0xfb, +0x01,0x22, 0x01,0x00, 0x01,0x06, 0x01,0xfd, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x40, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x03, +0x03,0xfb, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, +0x13,0xfb, 0x01,0x8b, 0x80,0xc6,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0x83, 0x01,0x2b, +0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0x6c, 0x02,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x19, +0x02,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, 0x01,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x2b, 0x09,0xfd, 0x01,0x41, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x47, +0x01,0x36, 0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0x59, 0x01,0x47, 0x01,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x36, 0x01,0x00, 0x01,0x36, 0x01,0x47, 0x02,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x07, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0xfb, 0x01,0x00, 0x01,0x22, 0x05,0xfb, 0x01,0x47, 0x01,0x00, +0x01,0x12, 0x01,0xfb, 0x01,0x03, 0x02,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x47, +0x01,0x36, 0x05,0xfb, 0x01,0x2b, 0x01,0x00, 0x03,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0xfd, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x36, +0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x36, 0x01,0x59, 0x01,0x2b, +0x01,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x47, 0x01,0x22, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x16, 0x01,0xfd, 0x01,0x00, 0x01,0x07, +0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x22, +0x01,0x2b, 0x0f,0xfb, 0x01,0x8b, 0x80,0xc7,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2e, 0x02,0x00, +0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x2b, 0x02,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x04,0xfd, 0x01,0x41, 0x04,0x00, 0x01,0xfd, 0x01,0x2b, +0x05,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x19, 0x02,0x00, +0x01,0x19, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x12, 0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x2b, +0x02,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x36, 0x03,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x2b, 0x02,0x00, +0x01,0x36, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x09, 0x02,0x00, 0x02,0xfd, 0x01,0x19, 0x03,0x00, +0x01,0x19, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0xfb, 0x01,0x00, 0x01,0x22, 0x06,0xfb, +0x01,0x03, 0x03,0x00, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x59, 0x01,0x2b, 0x03,0x00, 0x01,0x12, +0x05,0xfb, 0x01,0x2b, 0x01,0x00, 0x04,0xfb, 0x01,0x22, 0x04,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x02,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x02,0x00, +0x01,0x12, 0x02,0x00, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x47, 0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x02,0x00, 0x02,0x2b, 0x01,0x00, 0x01,0x55, +0x01,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x03,0x00, +0x01,0x36, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, +0x01,0x00, 0x0f,0xfb, 0x01,0x87, 0x80,0xce,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, +0x01,0x00, 0x10,0xfd, 0x01,0x00, 0x01,0x41, 0x33,0xfd, 0x01,0x82, 0x22,0xfb, 0x01,0x82, 0x0e,0xfd, +0x01,0x8b, 0x25,0xfb, 0x0c,0xfd, 0x23,0xfb, 0x01,0x8b, 0x0e,0xfd, 0x01,0x82, 0x23,0xfb, 0x01,0x82, +0x80,0xce,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x0f,0xfd, 0x01,0x19, +0x01,0x00, 0x34,0xfd, 0x01,0x7d, 0x22,0xfb, 0x01,0x8b, 0x0e,0xfd, 0x26,0xfb, 0x0c,0xfd, 0x24,0xfb, +0x0e,0xfd, 0x01,0x8b, 0x23,0xfb, 0x01,0x7d, 0x80,0xce,0xfd, 0x01,0x41, 0x01,0x19, 0x06,0xfd, +0x01,0x41, 0x01,0x19, 0x0f,0xfd, 0x01,0x2e, 0x01,0x41, 0x34,0xfd, 0x01,0x7a, 0x22,0xfb, 0x01,0x8b, +0x0e,0xfd, 0x26,0xfb, 0x0c,0xfd, 0x24,0xfb, 0x0e,0xfd, 0x01,0x8b, 0x23,0xfb, 0x01,0x7a, 0x81,0x1d,0xfd, +0x01,0x77, 0x22,0xfb, 0x0e,0xfd, 0x01,0x89, 0x26,0xfb, 0x0c,0xfd, 0x24,0xfb, 0x01,0x89, 0x0e,0xfd, +0x23,0xfb, 0x01,0x77, 0x81,0x1d,0xfd, 0x01,0x74, 0x21,0xfb, 0x01,0x71, 0x0e,0xfd, 0x01,0x7d, +0x26,0xfb, 0x0c,0xfd, 0x24,0xfb, 0x01,0x7d, 0x0e,0xfd, 0x01,0x71, 0x22,0xfb, 0x01,0x74, 0x81,0x1d,0xfd, +0x01,0x71, 0x21,0xfb, 0x01,0x7d, 0x0e,0xfd, 0x01,0x73, 0x26,0xfb, 0x0c,0xfd, 0x24,0xfb, 0x01,0x73, +0x0e,0xfd, 0x01,0x7d, 0x22,0xfb, 0x01,0x71, 0x81,0x1d,0xfd, 0x01,0x6f, 0x21,0xfb, 0x01,0x84, +0x0e,0xfd, 0x27,0xfb, 0x0c,0xfd, 0x25,0xfb, 0x0e,0xfd, 0x01,0x84, 0x22,0xfb, 0x01,0x6f, 0x81,0x1d,0xfd, +0x01,0x6e, 0x21,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x27,0xfb, 0x0c,0xfd, 0x25,0xfb, 0x01,0x8b, +0x0d,0xfd, 0x01,0x8b, 0x22,0xfb, 0x01,0x6e, 0x81,0x1d,0xfd, 0x01,0x6d, 0x21,0xfb, 0x01,0x8b, +0x0d,0xfd, 0x01,0x8b, 0x27,0xfb, 0x01,0x82, 0x0a,0xfd, 0x01,0x82, 0x25,0xfb, 0x01,0x8b, 0x0d,0xfd, +0x01,0x8b, 0x22,0xfb, 0x01,0x6d, 0x81,0x1d,0xfd, 0x22,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, +0x28,0xfb, 0x0a,0xfd, 0x26,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x23,0xfb, 0x81,0x1d,0xfd, +0x22,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x28,0xfb, 0x01,0x75, 0x08,0xfd, 0x01,0x75, 0x26,0xfb, +0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x23,0xfb, 0x81,0x1d,0xfd, 0x22,0xfb, 0x01,0x8b, 0x0d,0xfd, +0x01,0x8b, 0x2a,0xfb, 0x01,0x82, 0x04,0xfd, 0x01,0x82, 0x28,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, +0x23,0xfb, 0x81,0x1d,0xfd, 0x22,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x89, 0x58,0xfb, 0x01,0x89, +0x0d,0xfd, 0x01,0x8b, 0x23,0xfb, 0x81,0x1d,0xfd, 0x22,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x89, +0x58,0xfb, 0x01,0x89, 0x0d,0xfd, 0x01,0x8b, 0x23,0xfb, 0x81,0x1d,0xfd, 0x01,0x6d, 0x21,0xfb, +0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x58,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x22,0xfb, 0x01,0x6d, +0x81,0x1d,0xfd, 0x01,0x6e, 0x21,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x58,0xfb, 0x01,0x8b, +0x0d,0xfd, 0x01,0x8b, 0x22,0xfb, 0x01,0x6e, 0x81,0x1d,0xfd, 0x01,0x6f, 0x21,0xfb, 0x01,0x8b, +0x0d,0xfd, 0x01,0x8b, 0x58,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x22,0xfb, 0x01,0x6f, 0x81,0x1d,0xfd, +0x01,0x71, 0x21,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x58,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, +0x22,0xfb, 0x01,0x71, 0x81,0x1d,0xfd, 0x01,0x74, 0x21,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, +0x58,0xfb, 0x01,0x8b, 0x0d,0xfd, 0x01,0x8b, 0x22,0xfb, 0x01,0x74, 0x81,0x16,0xfd, 0x01,0x41, +0x01,0x83, 0x05,0xfd, 0x01,0x77, 0x21,0xfb, 0x01,0x84, 0x0e,0xfd, 0x58,0xfb, 0x0e,0xfd, 0x01,0x84, +0x22,0xfb, 0x01,0x77, 0x02,0xfd, 0x01,0x55, 0x01,0x6c, 0x35,0xfd, 0x01,0x41, 0x01,0x83, 0x80,0x8c,0xfd, +0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x2e, 0x02,0xfd, 0x01,0x2b, 0x01,0x19, 0x1a,0xfd, +0x02,0x6c, 0x01,0xfd, 0x01,0x41, 0x24,0xfd, 0x01,0x2b, 0x01,0x19, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x05,0xfd, 0x01,0x7a, 0x1a,0xfb, 0x01,0x22, 0x03,0x00, 0x01,0x2b, 0x01,0x47, 0x01,0xfb, 0x01,0x7d, +0x0e,0xfd, 0x01,0x73, 0x56,0xfb, 0x01,0x73, 0x0e,0xfd, 0x01,0x7d, 0x14,0xfb, 0x01,0x03, 0x01,0x2b, +0x04,0xfb, 0x01,0x2b, 0x01,0x03, 0x06,0xfb, 0x01,0x7a, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x1a,0xfd, +0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x2e, 0x02,0xfd, 0x01,0x2b, 0x01,0x19, 0x12,0xfd, +0x01,0x00, 0x01,0x41, 0x80,0x8c,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x41, 0x01,0x19, 0x01,0x2b, +0x02,0xfd, 0x01,0x19, 0x01,0x41, 0x1a,0xfd, 0x02,0x19, 0x01,0xfd, 0x01,0x00, 0x24,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x7d, 0x19,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0x2b, 0x01,0x22, 0x01,0x12, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x71, 0x0e,0xfd, 0x01,0x7d, +0x15,0xfb, 0x01,0x36, 0x01,0x47, 0x35,0xfb, 0x01,0x59, 0x01,0x22, 0x08,0xfb, 0x01,0x7d, 0x07,0xfd, +0x01,0x55, 0x01,0x6c, 0x05,0xfd, 0x01,0x71, 0x14,0xfb, 0x01,0x2b, 0x01,0x00, 0x04,0xfb, 0x01,0x00, +0x01,0x2b, 0x06,0xfb, 0x01,0x7d, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x41, 0x01,0x83, +0x16,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x41, 0x01,0x19, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x01,0x41, +0x12,0xfd, 0x01,0x00, 0x01,0x41, 0x80,0x8b,0xfd, 0x01,0x41, 0x01,0x00, 0x4a,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x82, 0x18,0xfb, 0x01,0x36, 0x01,0x00, +0x01,0x36, 0x07,0xfb, 0x0e,0xfd, 0x01,0x89, 0x15,0xfb, 0x01,0x00, 0x01,0x2b, 0x35,0xfb, 0x01,0x22, +0x01,0x00, 0x08,0xfb, 0x01,0x89, 0x07,0xfd, 0x01,0x00, 0x01,0x2b, 0x05,0xfd, 0x15,0xfb, 0x01,0x2b, +0x01,0x00, 0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x06,0xfb, 0x01,0x82, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x15,0xfd, 0x01,0x41, 0x01,0x00, 0x1a,0xfd, 0x01,0x00, 0x01,0x41, +0x80,0x8b,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x55, 0x03,0x00, 0x01,0x55, 0x06,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0x19, +0x02,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, +0x01,0x55, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2e, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x19, +0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x03,0x00, 0x01,0x83, 0x06,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x03,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x87, 0x01,0x00, +0x01,0x2b, 0x02,0x00, 0x02,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0x03, 0x02,0x00, 0x01,0x03, 0x06,0xfb, 0x01,0x2b, 0x01,0x00, 0x08,0xfb, 0x01,0x13, 0x03,0x00, +0x01,0x19, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2e, 0x02,0x00, 0x01,0x2b, 0x01,0x83, 0x03,0x00, +0x01,0x59, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x36, +0x03,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x05,0xfb, 0x01,0x22, 0x01,0x00, +0x01,0x36, 0x02,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x03, 0x03,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x00, +0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x06,0xfb, 0x01,0x12, 0x03,0x00, 0x01,0x36, 0x01,0xfb, +0x04,0x00, 0x01,0xfb, 0x01,0x47, 0x04,0x00, 0x01,0xfc, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x19, +0x01,0x00, 0x02,0x2b, 0x03,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x36, 0x02,0xfb, +0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x36, 0x0a,0xfb, 0x01,0x2b, 0x01,0x00, 0x04,0xfb, 0x01,0x00, +0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x0e, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, +0x04,0x00, 0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, +0x01,0x55, 0x05,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x55, 0x03,0x00, 0x01,0x55, 0x07,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x76,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x41, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, +0x01,0x00, 0x06,0xfd, 0x02,0x00, 0x01,0x07, 0x01,0xfd, 0x02,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x07, +0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x02,0xfd, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x02,0x83, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x02,0x00, 0x01,0x41, +0x01,0x83, 0x01,0x00, 0x01,0x07, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x02,0x00, 0x01,0x41, +0x01,0x83, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x8b, 0x02,0x00, 0x01,0x47, 0x02,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0x59, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x59, 0x01,0x2b, +0x01,0x00, 0x06,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x6c, 0x01,0x55, 0x02,0x00, +0x01,0x3e, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x02,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, +0x01,0x2b, 0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x02,0x00, 0x01,0x22, 0x06,0xfb, 0x01,0x22, 0x02,0x00, 0x01,0x47, 0x01,0x36, 0x01,0x00, 0x01,0x36, +0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x06,0xfb, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x02,0x59, 0x01,0xfb, +0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x47, 0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0xf7, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x11, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x2b, +0x01,0xfb, 0x02,0x00, 0x0a,0xfb, 0x01,0x2b, 0x06,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x01,0x47, +0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x02,0x00, +0x01,0x07, 0x01,0xfd, 0x02,0x00, 0x06,0xfd, 0x01,0x19, 0x02,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x06,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x6c, +0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x07, 0x05,0xfd, 0x02,0x00, 0x01,0x83, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x77,0xfd, 0x01,0x6c, 0x02,0x00, 0x01,0x19, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x03,0x41, 0x01,0x00, 0x01,0x55, +0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x2e, 0x04,0xfd, 0x02,0x00, 0x01,0x55, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, 0x01,0x07, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x8b, +0x01,0x00, 0x01,0x12, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0x22, 0x02,0x00, 0x01,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x06,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x00, +0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x8b, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, +0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x03, 0x01,0x00, 0x03,0x22, 0x01,0x00, +0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x59, 0x02,0xfb, +0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x02,0x00, 0x02,0x22, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, +0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x06,0xfb, 0x02,0x00, 0x01,0x36, 0x03,0xfb, 0x01,0x22, +0x01,0x00, 0x07,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x03,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x33, 0x02,0x22, 0x01,0x00, 0x01,0x36, +0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x0a,0xfb, 0x01,0x2b, 0x01,0x00, +0x04,0x22, 0x01,0x00, 0x01,0x2b, 0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x03,0x41, 0x01,0x00, 0x01,0x55, +0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, 0x01,0x6c, 0x02,0x00, +0x01,0x19, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x03,0x41, 0x01,0x00, +0x01,0x55, 0x05,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x00, +0x01,0x2b, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x79,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x6c, 0x01,0x2b, +0x02,0x00, 0x01,0x83, 0x01,0xfd, 0x06,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x22, 0x03,0xfb, 0x05,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x06,0xfb, 0x02,0x00, 0x07,0xfb, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x03,0xfb, 0x01,0x00, 0x01,0x22, +0x01,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x2b, 0x05,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x06,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x06,0xfb, 0x01,0x47, 0x03,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x59, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x04,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, +0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x0a,0xfb, 0x01,0x2b, 0x01,0x00, 0x04,0xfb, 0x01,0x00, +0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x04,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x09,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x6c, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x7a,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x0a,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x2e, 0x01,0x00, 0x04,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x04,0xfd, +0x01,0x6c, 0x02,0x00, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x22, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x05,0xfb, +0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x06,0xfb, 0x01,0x03, 0x01,0x00, 0x01,0x47, +0x06,0xfb, 0x02,0x00, 0x03,0xfd, 0x02,0x2b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x47, +0x01,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, +0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x03, 0x01,0x00, 0x06,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, +0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x02,0x00, 0x06,0xfb, 0x01,0x00, +0x01,0x22, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x08,0xfb, 0x01,0x47, 0x02,0x00, 0x01,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x04,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x70, 0x05,0xfb, 0x01,0x00, +0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x0a,0xfb, 0x01,0x2b, 0x01,0x00, 0x04,0xfb, 0x01,0x00, +0x01,0x2b, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x28, 0x01,0x00, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x06,0xfd, +0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x0a,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x0a,0xfd, 0x01,0x00, 0x01,0x2e, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x19, 0x01,0x00, 0x01,0x55, 0x04,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x75,0xfd, 0x01,0x55, 0x01,0x19, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0xfd, 0x01,0x55, +0x01,0x6c, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0x41, 0x02,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2e, 0x02,0x83, 0x01,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x22, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, +0x01,0x00, 0x07,0xfb, 0x02,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x12, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, +0x01,0x00, 0x01,0x3e, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x02,0x00, 0x01,0x47, +0x01,0x59, 0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0x59, 0x02,0x00, 0x01,0x2b, 0x02,0xfb, +0x02,0x00, 0x02,0xfb, 0x01,0x59, 0x02,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x36, 0x01,0x47, 0x01,0xfb, +0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, +0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x12, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0x59, +0x02,0x00, 0x01,0x2b, 0x06,0xfb, 0x01,0x12, 0x02,0x59, 0x02,0x00, 0x01,0xfb, 0x01,0x36, 0x01,0x00, +0x01,0x36, 0x02,0xfb, 0x02,0x00, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x04,0xfd, 0x02,0x00, 0x02,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x36, +0x01,0x47, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x02,0x03, +0x06,0xfb, 0x01,0x2b, 0x01,0x00, 0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x36, 0x01,0x00, +0x01,0x36, 0x01,0x59, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0xfd, 0x01,0x55, +0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x55, +0x01,0x19, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0xfd, 0x01,0x55, 0x01,0x6c, 0x05,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x02,0x00, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x2e, 0x05,0xfd, +0x02,0x00, 0x01,0xfd, 0x01,0x07, 0x02,0x00, 0x75,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x55, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x6c, 0x05,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x2e, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x04,0x00, +0x01,0x83, 0x02,0xfd, 0x01,0x41, 0x04,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x22, 0x04,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x07,0xfb, 0x01,0x59, 0x01,0x2b, 0x04,0x00, +0x01,0x12, 0x02,0xfb, 0x01,0x03, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0x00, +0x01,0x59, 0x02,0xfb, 0x01,0x03, 0x02,0x00, 0x01,0x03, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x36, +0x02,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x47, 0x04,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, +0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x22, 0x04,0x00, +0x02,0xfb, 0x01,0x03, 0x02,0x00, 0x01,0x03, 0x01,0x00, 0x01,0x2b, 0x06,0xfb, 0x04,0x00, 0x01,0x59, +0x02,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0xfb, 0x01,0x36, 0x02,0x00, 0x01,0x2b, 0x01,0x19, 0x01,0x00, +0x01,0x07, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x5d, +0x04,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, +0x02,0x00, 0x06,0xfb, 0x01,0x2b, 0x01,0x00, 0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, +0x02,0x00, 0x01,0x1d, 0x02,0x00, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x02,0x00, +0x02,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x05,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x6c, 0x05,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x2e, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x2b, 0x01,0x19, 0x01,0x00, 0x01,0x07, 0x06,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x02,0x00, 0x02,0x2b, 0x01,0x00, 0x80,0xcd,0xfd, 0x01,0x88, +0x23,0xfb, 0x0f,0xfd, 0x01,0x87, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x4c,0xfb, 0x01,0x87, 0x0f,0xfd, +0x24,0xfb, 0x01,0x88, 0x81,0x1f,0xfd, 0x01,0x8b, 0x23,0xfb, 0x0f,0xfd, 0x01,0x8b, 0x02,0xfb, +0x01,0x00, 0x01,0x22, 0x4c,0xfb, 0x01,0x8b, 0x0f,0xfd, 0x24,0xfb, 0x01,0x8b, 0x81,0x20,0xfd, +0x01,0x70, 0x22,0xfb, 0x01,0x8b, 0x0f,0xfd, 0x01,0x80, 0x01,0xfb, 0x01,0x2b, 0x01,0x47, 0x4b,0xfb, +0x01,0x80, 0x0f,0xfd, 0x01,0x8b, 0x23,0xfb, 0x01,0x70, 0x81,0x21,0xfd, 0x01,0x79, 0x22,0xfb, +0x01,0x7d, 0x10,0xfd, 0x4e,0xfb, 0x10,0xfd, 0x01,0x7d, 0x23,0xfb, 0x01,0x79, 0x81,0x21,0xfd, +0x01,0x82, 0x23,0xfb, 0x01,0x8b, 0x0f,0xfd, 0x01,0x89, 0x4c,0xfb, 0x01,0x89, 0x0f,0xfd, 0x01,0x8b, +0x24,0xfb, 0x01,0x82, 0x80,0xcd,0xfd, 0x01,0x83, 0x01,0x41, 0x31,0xfd, 0x01,0x83, 0x01,0x41, +0x0e,0xfd, 0x01,0x41, 0x01,0x83, 0x0f,0xfd, 0x01,0x8b, 0x23,0xfb, 0x01,0x7d, 0x10,0xfd, 0x1e,0xfb, +0x01,0x22, 0x01,0x59, 0x1a,0xfb, 0x01,0x36, 0x01,0x47, 0x10,0xfb, 0x10,0xfd, 0x01,0x7d, 0x08,0xfb, +0x01,0x36, 0x01,0x47, 0x16,0xfb, 0x01,0x22, 0x01,0x59, 0x02,0xfb, 0x01,0x8b, 0x18,0xfd, 0x01,0x41, +0x01,0x83, 0x16,0xfd, 0x01,0x83, 0x01,0x41, 0x16,0xfd, 0x01,0x6c, 0x01,0x55, 0x80,0x83,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x19, 0x01,0x2b, 0x0d,0xfd, 0x01,0x19, 0x04,0x00, 0x01,0x2b, +0x01,0xfd, 0x01,0x19, 0x01,0x2b, 0x17,0xfd, 0x01,0x41, 0x01,0x00, 0x0e,0xfd, 0x01,0x00, 0x01,0x41, +0x10,0xfd, 0x01,0x6f, 0x1f,0xfb, 0x01,0x36, 0x01,0x00, 0x02,0xfb, 0x08,0xfd, 0x01,0x19, 0x01,0x2b, +0x06,0xfd, 0x01,0x8b, 0x0f,0xfb, 0x01,0x03, 0x02,0x00, 0x01,0x2b, 0x01,0x59, 0x09,0xfb, 0x01,0x00, +0x01,0x22, 0x1a,0xfb, 0x01,0x00, 0x01,0x2b, 0x0f,0xfb, 0x01,0x8b, 0x10,0xfd, 0x09,0xfb, 0x01,0x00, +0x01,0x2b, 0x08,0xfb, 0x01,0x36, 0x02,0x59, 0x01,0x36, 0x0a,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, +0x01,0x6f, 0x19,0xfd, 0x01,0x00, 0x01,0x41, 0x16,0xfd, 0x01,0x41, 0x01,0x00, 0x08,0xfd, 0x01,0x83, +0x01,0x55, 0x01,0xfd, 0x01,0x55, 0x01,0x83, 0x09,0xfd, 0x01,0x2b, 0x01,0x00, 0x80,0x83,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x19, 0x0d,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0x41, +0x01,0x55, 0x01,0xfd, 0x01,0x41, 0x01,0x19, 0x17,0xfd, 0x01,0x41, 0x01,0x00, 0x0e,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x41, 0x03,0xfd, 0x01,0x6c, 0x01,0x55, 0x08,0xfd, 0x01,0x7a, +0x08,0xfb, 0x01,0x36, 0x01,0x47, 0x15,0xfb, 0x01,0x47, 0x01,0x2b, 0x02,0xfb, 0x01,0x8b, 0x07,0xfd, +0x01,0x41, 0x01,0x19, 0x07,0xfd, 0x01,0x7e, 0x0d,0xfb, 0x01,0x03, 0x01,0x00, 0x01,0x12, 0x01,0x22, +0x01,0x2b, 0x01,0x36, 0x09,0xfb, 0x01,0x00, 0x01,0x22, 0x1a,0xfb, 0x01,0x00, 0x01,0x2b, 0x0e,0xfb, +0x01,0x7e, 0x10,0xfd, 0x01,0x8b, 0x09,0xfb, 0x01,0x00, 0x01,0x2b, 0x08,0xfb, 0x01,0x2b, 0x02,0x36, +0x01,0x2b, 0x0a,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x7a, 0x03,0xfd, 0x01,0x6c, 0x01,0x55, +0x14,0xfd, 0x01,0x00, 0x01,0x41, 0x16,0xfd, 0x01,0x41, 0x01,0x00, 0x08,0xfd, 0x01,0x55, 0x01,0x2b, +0x01,0xfd, 0x01,0x2b, 0x01,0x55, 0x09,0xfd, 0x01,0x2b, 0x01,0x00, 0x80,0x83,0xfd, 0x01,0x41, +0x01,0x00, 0x11,0xfd, 0x01,0x2b, 0x01,0x00, 0x1e,0xfd, 0x01,0x41, 0x01,0x00, 0x0e,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x08,0xfd, 0x01,0x84, +0x08,0xfb, 0x01,0x00, 0x01,0x2b, 0x19,0xfb, 0x01,0x73, 0x10,0xfd, 0x01,0x8b, 0x0d,0xfb, 0x01,0x00, +0x01,0x2b, 0x0d,0xfb, 0x01,0x00, 0x01,0x22, 0x1a,0xfb, 0x01,0x00, 0x01,0x2b, 0x0e,0xfb, 0x01,0x8b, +0x10,0xfd, 0x01,0x73, 0x09,0xfb, 0x01,0x00, 0x01,0x2b, 0x16,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, +0x01,0x84, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x14,0xfd, 0x01,0x00, 0x01,0x41, 0x16,0xfd, 0x01,0x41, +0x01,0x00, 0x16,0xfd, 0x01,0x2b, 0x01,0x00, 0x7f,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x19, 0x06,0xfd, 0x01,0x2b, +0x01,0x00, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x55, +0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x01,0x55, 0x02,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x04,0x00, 0x01,0x41, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x2e, 0x03,0x00, 0x01,0x2b, 0x01,0x8b, 0x01,0xfb, 0x01,0x36, 0x03,0x00, 0x01,0x03, 0x01,0xfb, +0x01,0x2b, 0x03,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x36, 0x03,0x00, 0x01,0x36, 0x07,0xfb, 0x01,0x2b, +0x02,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x17, +0x02,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x05,0x00, 0x01,0x8b, +0x02,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x06,0xfb, 0x02,0x00, 0x01,0x36, 0x05,0xfb, 0x01,0x03, +0x03,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x36, +0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, +0x01,0x36, 0x02,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x03, 0x04,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x36, +0x03,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x54, 0x06,0xfd, 0x01,0x83, +0x05,0x00, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x03, 0x04,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x03, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x59, 0x04,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x01,0xfb, 0x01,0xfa, 0x01,0x00, 0x01,0x55, 0x01,0x41, 0x03,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x2b, +0x03,0x00, 0x01,0x55, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x02,0xfd, +0x01,0x19, 0x03,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x02,0x00, 0x05,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x02,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x2b, 0x02,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x19, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x02,0x00, 0x01,0x19, 0x6c,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x83, +0x01,0x6c, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x07, 0x01,0xfd, 0x02,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x6c, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0xfd, 0x01,0x6c, +0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x6c, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x6c, +0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x02,0x55, 0x01,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x54, 0x01,0x75, 0x02,0x00, 0x02,0xfb, 0x01,0x47, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x06,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0x59, 0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, +0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x02,0x00, 0x01,0xfd, 0x01,0x87, 0x01,0x2b, 0x01,0x00, +0x02,0x59, 0x01,0x00, 0x01,0x03, 0x05,0xfb, 0x01,0x59, 0x02,0x00, 0x01,0x2b, 0x01,0x59, 0x02,0xfb, +0x01,0x22, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0x47, +0x01,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, +0x01,0x22, 0x02,0x00, 0x01,0x47, 0x01,0x36, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x22, 0x01,0x00, +0x01,0x22, 0x01,0xfb, 0x01,0x03, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x03, +0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x2b, 0x01,0x87, 0x02,0x00, 0x06,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x59, +0x01,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x03, 0x01,0x00, +0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0x59, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, +0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x47, 0x02,0xfb, +0x01,0x00, 0x01,0x22, 0x01,0x4d, 0x01,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0x6c, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x06,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x07, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, +0x01,0x83, 0x01,0xfd, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x02,0x2b, 0x03,0xfd, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x2b, 0x01,0x83, 0x01,0x07, 0x01,0x00, 0x6b,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, +0x02,0x41, 0x01,0x00, 0x01,0x07, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0x2e, 0x03,0xfd, 0x01,0x07, 0x01,0x00, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0x80, 0x02,0x00, 0x01,0x12, 0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x03, 0x01,0x00, +0x03,0x22, 0x01,0x00, 0x01,0x36, 0x05,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0x22, 0x02,0x00, 0x01,0xfb, +0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x8b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x22, 0x02,0x00, 0x07,0xfb, 0x01,0x2b, 0x02,0x00, +0x01,0x59, 0x01,0xfb, 0x02,0x00, 0x02,0x22, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x12, +0x01,0x00, 0x01,0x03, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, +0x01,0x22, 0x01,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x02,0x00, 0x03,0xfb, +0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x03, 0x01,0x00, 0x03,0x22, 0x01,0x00, 0x01,0x36, 0x01,0xfb, +0x01,0x00, 0x01,0x2b, 0x01,0x87, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x55, 0x01,0x00, +0x01,0x6c, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x22, 0x02,0x00, +0x01,0xfb, 0x02,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x59, +0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x36, 0x01,0x00, +0x01,0x47, 0x05,0xfb, 0x01,0x00, 0x01,0x12, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, +0x07,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x00, 0x01,0x19, +0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0x41, 0x01,0x07, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x2e, 0x06,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x01,0x00, +0x01,0x83, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x41, 0x01,0x00, 0x01,0x07, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x6b,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x19, +0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, +0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x6c, 0x04,0x00, 0x01,0x41, 0x01,0x8b, +0x01,0x59, 0x01,0x2b, 0x02,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, +0x05,0x00, 0x01,0x36, 0x05,0xfb, 0x05,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, +0x01,0x2b, 0x01,0x00, 0x01,0x71, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x05,0x00, 0x01,0x2b, +0x08,0xfb, 0x01,0x59, 0x02,0x00, 0x01,0xfb, 0x06,0x00, 0x02,0xfb, 0x03,0x00, 0x01,0x59, 0x03,0xfb, +0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x03,0xfb, +0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x2b, 0x05,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x05,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, +0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x01,0x22, 0x01,0x00, +0x06,0xfb, 0x03,0x00, 0x01,0x83, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0x41, +0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x01,0xfd, 0x06,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x07, +0x03,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x6b,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x09,0xfd, 0x01,0x2b, 0x01,0x00, +0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x04,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x05,0xfd, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x75, +0x01,0xfb, 0x01,0x59, 0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x03, 0x01,0x00, +0x0a,0xfb, 0x01,0x00, 0x01,0x2b, 0x05,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0xfb, 0x01,0x8b, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0xf7, 0x01,0x84, +0x0c,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, 0x02,0x00, 0x06,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0x00, +0x01,0x59, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x12, 0x01,0x00, 0x02,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, +0x01,0x2b, 0x01,0xfb, 0x01,0x03, 0x01,0x00, 0x06,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x05,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, +0x01,0x8b, 0x01,0x00, 0x01,0x2b, 0x05,0xfb, 0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x00, 0x01,0x2b, +0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x47, 0x05,0xfb, 0x01,0x00, 0x01,0x28, 0x02,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x19, +0x01,0xfd, 0x01,0x00, 0x01,0x2e, 0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x06,0xfd, 0x01,0x00, 0x01,0x41, 0x06,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x83, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2e, +0x01,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x6c,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x02,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x01,0x55, +0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x41, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x03,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x02,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x07, +0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x2e, +0x01,0xfd, 0x01,0x32, 0x01,0x47, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x02,0xfb, +0x01,0x59, 0x02,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x36, 0x01,0x47, 0x05,0xfb, 0x01,0x2b, 0x01,0x00, +0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x3e, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, +0x01,0x41, 0x01,0x2b, 0x02,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x41, 0x01,0x82, 0x01,0x59, +0x01,0x22, 0x05,0xfb, 0x01,0x2b, 0x01,0x22, 0x01,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, +0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x12, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0x59, +0x02,0x00, 0x02,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x01,0x22, 0x02,0x00, 0x02,0xfb, 0x01,0x22, +0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x03, 0x01,0x00, 0x01,0x47, 0x01,0x59, +0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x59, 0x02,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x36, +0x01,0x47, 0x01,0x82, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x02,0x00, +0x01,0x41, 0x01,0x2b, 0x02,0x00, 0x01,0xfd, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x22, 0x01,0xfb, +0x01,0x59, 0x01,0x22, 0x01,0xfb, 0x01,0x03, 0x01,0x00, 0x01,0x47, 0x01,0x59, 0x01,0x2b, 0x01,0x00, +0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x02,0x00, 0x01,0xfb, 0x01,0x2b, 0x02,0x00, +0x02,0xfb, 0x02,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x12, 0x02,0xfb, 0x01,0x00, 0x01,0x32, 0x01,0x83, +0x02,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, +0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0x6c, +0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x2e, +0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x07,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x02,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x01,0x6c, 0x02,0x00, +0x01,0x41, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x02,0x83, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x55, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, +0x01,0x6c, 0x01,0x55, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x6c,0xfd, +0x01,0x55, 0x03,0x00, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, +0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x05,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, +0x01,0x55, 0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x2b, 0x01,0x19, 0x01,0x00, 0x01,0x07, +0x01,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0xfd, 0x01,0x83, 0x02,0x00, +0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x02,0x00, 0x02,0x2b, 0x01,0x00, 0x01,0x55, 0x01,0x2b, 0x03,0x00, +0x01,0x36, 0x02,0xfb, 0x01,0x36, 0x02,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x47, 0x04,0x00, 0x01,0x47, +0x06,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x01,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x2e, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, +0x01,0x2b, 0x01,0x00, 0x02,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x07, 0x03,0x00, 0x01,0x2b, 0x05,0xfb, +0x01,0x12, 0x03,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x22, 0x04,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, +0x01,0xfb, 0x01,0x59, 0x02,0x00, 0x01,0xfb, 0x01,0x59, 0x03,0x00, 0x01,0x12, 0x01,0x00, 0x02,0xfb, +0x01,0x22, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, +0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x47, 0x04,0x00, 0x01,0x56, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x06,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0x2b, 0x01,0x00, +0x01,0x7c, 0x02,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, +0x01,0x00, 0x01,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x01,0x36, 0x02,0x00, 0x02,0x2b, +0x01,0x00, 0x02,0xfb, 0x01,0x59, 0x04,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x39, 0x01,0xfd, 0x01,0x83, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x2b, 0x03,0x00, +0x01,0x55, 0x02,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x2e, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x41, +0x04,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x07,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x07, 0x01,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x01,0x00, 0x01,0x41, +0x02,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x07, +0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x80,0xc8,0xfd, 0x01,0x79, 0x25,0xfb, 0x01,0x82, +0x0a,0xfd, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x87, 0x38,0xfb, 0x01,0x87, 0x12,0xfd, 0x01,0x00, +0x01,0x2b, 0x26,0xfb, 0x01,0x79, 0x81,0x29,0xfd, 0x01,0x86, 0x26,0xfb, 0x01,0x8b, 0x05,0xfd, +0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x6c, 0x08,0xfd, 0x01,0x87, 0x36,0xfb, 0x01,0x87, 0x0f,0xfd, +0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x47, 0x26,0xfb, 0x01,0x86, 0x81,0x2a,0xfd, 0x01,0x72, +0x25,0xfb, 0x01,0x7c, 0x05,0xfd, 0x01,0x6c, 0x01,0x07, 0x01,0x2b, 0x01,0x07, 0x01,0x83, 0x0a,0xfd, +0x01,0x8b, 0x34,0xfb, 0x01,0x8b, 0x10,0xfd, 0x01,0x6c, 0x01,0x07, 0x01,0x2b, 0x01,0x07, 0x01,0x66, +0x26,0xfb, 0x01,0x72, 0x81,0x2b,0xfd, 0x01,0x80, 0x26,0xfb, 0x01,0x87, 0x14,0xfd, 0x01,0x8b, +0x01,0x7e, 0x30,0xfb, 0x01,0x7e, 0x01,0x8b, 0x14,0xfd, 0x01,0x87, 0x27,0xfb, 0x01,0x80, 0x81,0x2b,0xfd, +0x01,0x8b, 0x01,0x6d, 0x26,0xfb, 0x01,0x8b, 0x15,0xfd, 0x01,0x8b, 0x2e,0xfb, 0x01,0x8b, 0x15,0xfd, +0x01,0x8b, 0x27,0xfb, 0x01,0x6d, 0x01,0x8b, 0x80,0xe8,0xfd, 0x01,0x83, 0x01,0x41, 0x42,0xfd, +0x01,0xfc, 0x27,0xfb, 0x01,0x8b, 0x16,0xfd, 0x01,0x89, 0x2a,0xfb, 0x01,0x89, 0x16,0xfd, 0x01,0x8b, +0x28,0xfb, 0x01,0xfc, 0x80,0xcf,0xfd, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x2e, +0x02,0xfd, 0x01,0x2b, 0x01,0x19, 0x11,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x19, 0x01,0x2b, +0x0d,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x03,0xfd, 0x01,0x55, 0x01,0x2b, 0x29,0xfd, 0x01,0x8b, +0x0b,0xfb, 0x07,0x00, 0x01,0x2b, 0x15,0xfb, 0x01,0x8b, 0x17,0xfd, 0x01,0x80, 0x26,0xfb, 0x01,0x80, +0x17,0xfd, 0x01,0x8b, 0x29,0xfb, 0x01,0x8b, 0x80,0xcf,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x41, +0x01,0x19, 0x01,0x2b, 0x02,0xfd, 0x01,0x19, 0x01,0x41, 0x11,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, +0x01,0x41, 0x01,0x19, 0x0d,0xfd, 0x01,0x41, 0x02,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x18,0xfd, +0x01,0x55, 0x01,0x6c, 0x0f,0xfd, 0x01,0x6b, 0x01,0x3f, 0x0a,0xfb, 0x03,0x22, 0x02,0x00, 0x02,0x22, +0x01,0x36, 0x0f,0xfb, 0x01,0x22, 0x01,0x59, 0x05,0xfb, 0x01,0x8b, 0x17,0xfd, 0x01,0x8b, 0x01,0x87, +0x22,0xfb, 0x01,0x87, 0x01,0x8b, 0x17,0xfd, 0x01,0x8b, 0x29,0xfb, 0x01,0x7b, 0x80,0xcf,0xfd, +0x01,0x41, 0x01,0x00, 0x19,0xfd, 0x01,0x41, 0x01,0x00, 0x11,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x2e, +0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x18,0xfd, 0x01,0x00, 0x01,0x2b, 0x0f,0xfd, 0x01,0x2b, 0x01,0x00, +0x0d,0xfb, 0x01,0x00, 0x01,0x2b, 0x12,0xfb, 0x01,0x00, 0x01,0x22, 0x05,0xfb, 0x01,0x6e, 0x01,0x8b, +0x19,0xfd, 0x01,0x89, 0x1e,0xfb, 0x01,0x89, 0x19,0xfd, 0x01,0x8b, 0x01,0x6e, 0x29,0xfb, 0x01,0x8b, +0x80,0xcf,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x01,0x55, 0x03,0x00, 0x01,0x55, 0x06,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x03,0x00, 0x01,0x19, 0x06,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x03,0x00, +0x01,0x19, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x55, +0x03,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x41, 0x01,0xfd, 0x04,0x00, 0x01,0x55, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0x41, 0x03,0x00, 0x01,0x2b, 0x0b,0xfb, 0x01,0x00, +0x01,0x2b, 0x04,0xfb, 0x01,0x2b, 0x03,0x00, 0x01,0x03, 0x02,0xfb, 0x01,0x12, 0x03,0x00, 0x01,0x36, +0x01,0xfb, 0x04,0x00, 0x02,0xfb, 0x01,0x03, 0x03,0x00, 0x01,0x7a, 0x1b,0xfd, 0x01,0x8b, 0x18,0xfb, +0x01,0x8b, 0x1b,0xfd, 0x01,0x8b, 0x2a,0xfb, 0x01,0x7c, 0x80,0xd1,0xfd, 0x01,0x19, 0x02,0x00, +0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x01,0xfd, +0x01,0x07, 0x01,0x00, 0x06,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x01,0x6c, 0x02,0x00, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, +0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2e, 0x01,0x83, 0x01,0xfd, 0x02,0x00, 0x02,0xfd, +0x02,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x8b, 0x0c,0xfb, 0x01,0x00, 0x01,0x2b, +0x04,0xfb, 0x01,0x22, 0x01,0x47, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x03, +0x01,0xfb, 0x02,0x59, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x01,0x36, +0x01,0xfb, 0x02,0x00, 0x01,0x8b, 0x1c,0xfd, 0x01,0x8b, 0x01,0x89, 0x01,0x7c, 0x10,0xfb, 0x01,0x7c, +0x01,0x89, 0x01,0x8b, 0x1c,0xfd, 0x01,0x8b, 0x2b,0xfb, 0x01,0x8b, 0x80,0xd2,0xfd, 0x01,0x6c, +0x02,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x03,0x41, +0x01,0x00, 0x01,0x55, 0x04,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x41, 0x01,0x00, +0x01,0x07, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0x41, +0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x02,0x41, 0x01,0x00, 0x01,0x07, 0x01,0xfd, +0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2e, +0x04,0xfd, 0x01,0x00, 0x01,0x2b, 0x06,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x7d, 0x01,0xfb, 0x01,0x36, 0x04,0x22, 0x01,0x36, +0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x07,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x01,0x36, +0x04,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x02,0x00, 0x02,0x22, 0x01,0x2b, 0x01,0x00, 0x01,0xfb, +0x01,0x8b, 0x20,0xfd, 0x04,0x8b, 0x04,0x89, 0x04,0x8b, 0x20,0xfd, 0x01,0x8b, 0x2b,0xfb, 0x01,0x7d, +0x80,0xd5,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x2b, 0x05,0x00, 0x01,0x55, 0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x05,0x00, 0x01,0x19, 0x05,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x05,0x00, +0x01,0x19, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x83, +0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x07, 0x04,0x00, +0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x8b, 0x01,0x70, +0x01,0x2b, 0x04,0x00, 0x01,0x2b, 0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x04,0xfb, 0x01,0x22, 0x04,0x00, +0x02,0xfb, 0x01,0x47, 0x03,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x06,0x00, +0x02,0xfb, 0x01,0x8b, 0x4a,0xfd, 0x01,0x8b, 0x2b,0xfb, 0x01,0x70, 0x01,0x8b, 0x80,0xd6,0xfd, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x09,0xfd, +0x01,0x55, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x09,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0x2e, 0x01,0x00, 0x04,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x02,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x81, 0x0a,0xfb, 0x01,0x00, 0x01,0x2b, +0x03,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x04,0xfb, 0x01,0x47, +0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x02,0xfb, 0x02,0x00, 0x07,0xfb, 0x01,0x87, 0x48,0xfd, +0x01,0x87, 0x2c,0xfb, 0x01,0x81, 0x80,0xd2,0xfd, 0x01,0x55, 0x01,0x19, 0x01,0x6c, 0x01,0xfd, +0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x01,0x00, +0x01,0x2b, 0x01,0x83, 0x01,0xfd, 0x01,0x55, 0x01,0x6c, 0x05,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0x41, 0x02,0x00, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0xfd, +0x01,0x6c, 0x01,0x55, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x2e, 0x02,0x00, 0x02,0xfd, +0x02,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x6c, 0x01,0x55, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, +0x01,0x41, 0x02,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x6c, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x02,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0x6c, 0x02,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0x2b, 0x03,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x8b, 0x01,0x75, 0x09,0xfb, +0x01,0x00, 0x01,0x2b, 0x03,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x36, 0x01,0x59, 0x01,0x2b, 0x01,0x00, +0x01,0x59, 0x01,0xfb, 0x01,0x12, 0x02,0x59, 0x02,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, +0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, 0x01,0x12, 0x02,0xfb, 0x01,0x2b, 0x01,0x22, 0x01,0x7a, +0x01,0x8b, 0x44,0xfd, 0x01,0x8b, 0x01,0x7a, 0x2c,0xfb, 0x01,0x75, 0x01,0x8b, 0x80,0xd2,0xfd, +0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x6c, +0x04,0x00, 0x01,0x6c, 0x05,0xfd, 0x01,0x55, 0x03,0x00, 0x01,0x07, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, +0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x05,0xfd, 0x01,0x41, 0x01,0x00, +0x04,0xfd, 0x02,0x00, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x03,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x83, +0x03,0x00, 0x01,0x2e, 0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x55, +0x02,0x00, 0x01,0x41, 0x01,0x83, 0x03,0x00, 0x01,0x2e, 0x02,0x00, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, +0x03,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x88, 0x09,0xfb, 0x01,0x00, 0x01,0x2b, +0x04,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x12, 0x02,0x00, 0x01,0xfb, 0x04,0x00, 0x01,0x59, 0x02,0xfb, +0x01,0x03, 0x02,0x00, 0x02,0xfb, 0x01,0x22, 0x04,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x2b, 0x02,0xfb, +0x01,0x82, 0x42,0xfd, 0x01,0x82, 0x2e,0xfb, 0x01,0x88, 0x81,0x38,0xfd, 0x01,0xfc, 0x2d,0xfb, +0x01,0x7c, 0x40,0xfd, 0x01,0x7c, 0x2e,0xfb, 0x01,0xfc, 0x81,0x39,0xfd, 0x01,0x8b, 0x01,0x72, +0x2e,0xfb, 0x01,0x85, 0x3c,0xfd, 0x01,0x85, 0x2f,0xfb, 0x01,0x72, 0x01,0x8b, 0x81,0x3a,0xfd, +0x01,0x85, 0x2f,0xfb, 0x01,0x6e, 0x01,0x8b, 0x38,0xfd, 0x01,0x8b, 0x01,0x6e, 0x30,0xfb, 0x01,0x85, +0x81,0x3c,0xfd, 0x01,0xfc, 0x31,0xfb, 0x01,0x8b, 0x34,0xfd, 0x01,0x8b, 0x32,0xfb, 0x01,0xfc, +0x81,0x3d,0xfd, 0x01,0x8b, 0x01,0x74, 0x31,0xfb, 0x01,0x73, 0x01,0x8b, 0x30,0xfd, 0x01,0x8b, +0x01,0x73, 0x32,0xfb, 0x01,0x74, 0x01,0x8b, 0x81,0x3e,0xfd, 0x01,0x88, 0x34,0xfb, 0x01,0x7c, +0x01,0x8b, 0x2a,0xfd, 0x01,0x8b, 0x01,0x7c, 0x35,0xfb, 0x01,0x88, 0x81,0x40,0xfd, 0x01,0x81, +0x35,0xfb, 0x01,0x7d, 0x01,0x8b, 0x26,0xfd, 0x01,0x8b, 0x01,0x7d, 0x36,0xfb, 0x01,0x81, 0x81,0x42,0xfd, +0x01,0x7b, 0x38,0xfb, 0x01,0x87, 0x20,0xfd, 0x01,0x87, 0x39,0xfb, 0x01,0x7b, 0x81,0x43,0xfd, +0x01,0x8b, 0x01,0x75, 0x3a,0xfb, 0x01,0x77, 0x01,0x80, 0x02,0x8b, 0x14,0xfd, 0x02,0x8b, 0x01,0x80, +0x01,0x77, 0x3b,0xfb, 0x01,0x75, 0x01,0x8b, 0x81,0x44,0xfd, 0x01,0x8b, 0x01,0x70, 0x3e,0xfb, +0x01,0x71, 0x01,0x7d, 0x01,0x84, 0x0c,0x8b, 0x01,0x84, 0x01,0x7d, 0x01,0x71, 0x3f,0xfb, 0x01,0x70, +0x01,0x8b, 0x81,0x46,0xfd, 0x01,0x86, 0x80,0x8f,0xfb, 0x01,0x86, 0x81,0x48,0xfd, +0x01,0x82, 0x80,0x8d,0xfb, 0x01,0x82, 0x81,0x4a,0xfd, 0x01,0x7f, 0x80,0x8b,0xfb, +0x01,0x7f, 0x81,0x4c,0xfd, 0x01,0x7d, 0x80,0x89,0xfb, 0x01,0x7d, 0x81,0x4e,0xfd, +0x01,0x7c, 0x80,0x87,0xfb, 0x01,0x7c, 0x81,0x4f,0xfd, 0x01,0x8b, 0x01,0x7b, 0x80,0x85,0xfb, +0x01,0x7b, 0x01,0x8b, 0x81,0x50,0xfd, 0x01,0x8b, 0x01,0x7b, 0x80,0x83,0xfb, 0x01,0x7b, +0x01,0x8b, 0x81,0x52,0xfd, 0x01,0x8b, 0x01,0x7c, 0x80,0x81,0xfb, 0x01,0x7c, 0x01,0x8b, +0x81,0x55,0xfd, 0x01,0x7d, 0x7f,0xfb, 0x01,0x7d, 0x81,0x58,0xfd, 0x01,0x7f, 0x7d,0xfb, +0x01,0x7f, 0x81,0x08,0xfd, 0x02,0x2b, 0x01,0x6c, 0x1f,0xfd, 0x01,0x07, 0x01,0x55, 0x0b,0xfd, +0x01,0x41, 0x01,0x55, 0x06,0xfd, 0x01,0x83, 0x0b,0x41, 0x01,0x83, 0x03,0xfd, 0x01,0x19, 0x01,0x55, +0x09,0xfd, 0x01,0x82, 0x02,0xfb, 0x01,0x22, 0x01,0x2b, 0x01,0x03, 0x01,0xfb, 0x01,0x47, 0x01,0x12, +0x0a,0xfb, 0x01,0x2b, 0x16,0xfb, 0x01,0x00, 0x01,0x03, 0x07,0xfb, 0x01,0x59, 0x0b,0x22, 0x01,0x59, +0x04,0xfb, 0x01,0x22, 0x01,0x59, 0x04,0xfb, 0x01,0x59, 0x01,0x2b, 0x05,0xfb, 0x02,0x22, 0x0a,0xfb, +0x02,0x47, 0x0e,0xfb, 0x01,0x03, 0x01,0x47, 0x0c,0xfb, 0x01,0x2b, 0x01,0x82, 0x3e,0xfd, 0x01,0x41, +0x02,0xfd, 0x01,0x2e, 0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x07, 0x01,0x55, 0x18,0xfd, 0x01,0x41, +0x01,0x55, 0x80,0x8e,0xfd, 0x01,0x83, 0x0c,0xfd, 0x01,0x41, 0x01,0x00, 0x05,0xfd, 0x01,0x41, +0x01,0x19, 0x01,0x6c, 0x01,0x2b, 0x1e,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x83, 0x0b,0xfd, 0x01,0x00, +0x01,0x6c, 0x02,0xfd, 0x01,0x83, 0x03,0xfd, 0x01,0x6c, 0x05,0x2b, 0x01,0x00, 0x05,0x2b, 0x01,0x6c, +0x03,0xfd, 0x01,0x2b, 0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x04,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, +0x03,0x00, 0x01,0x22, 0x02,0xfb, 0x01,0x22, 0x01,0x2b, 0x0a,0xfb, 0x01,0x00, 0x08,0xfb, 0x01,0x2b, +0x05,0x00, 0x01,0x36, 0x07,0xfb, 0x01,0x47, 0x02,0x00, 0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x01,0x36, +0x02,0xfb, 0x01,0x47, 0x03,0x2b, 0x02,0x00, 0x01,0x2b, 0x02,0x00, 0x03,0x2b, 0x01,0x47, 0x04,0xfb, +0x01,0x00, 0x04,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x01,0x12, 0x04,0xfb, 0x02,0x2b, 0x02,0xfb, +0x01,0x59, 0x01,0x22, 0x01,0x47, 0x05,0xfb, 0x01,0x2b, 0x01,0x12, 0x01,0xfb, 0x01,0x03, 0x01,0x2b, +0x01,0x22, 0x0a,0xfb, 0x01,0x00, 0x01,0x22, 0x0b,0xfb, 0x01,0x70, 0x01,0x00, 0x15,0xfd, 0x01,0x83, +0x01,0x41, 0x02,0xfd, 0x01,0x6c, 0x01,0x55, 0x01,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x55, 0x08,0x41, 0x01,0x83, 0x14,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x2b, +0x01,0x41, 0x04,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x83, 0x08,0xfd, 0x01,0x83, 0x0f,0xfd, 0x01,0x00, +0x01,0x6c, 0x02,0xfd, 0x01,0x83, 0x7e,0xfd, 0x09,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x83, +0x0a,0xfd, 0x01,0x41, 0x01,0x00, 0x05,0xfd, 0x01,0x6c, 0x02,0x2b, 0x01,0x6c, 0x1e,0xfd, 0x01,0x2b, +0x06,0x00, 0x03,0xfd, 0x01,0x55, 0x08,0x00, 0x01,0x55, 0x08,0xfd, 0x01,0x00, 0x07,0xfd, 0x01,0x2b, +0x04,0x00, 0x01,0x55, 0x03,0xfd, 0x01,0x41, 0x01,0x2b, 0x01,0xfd, 0x01,0x6c, 0x03,0x2b, 0x01,0x00, +0x02,0x2b, 0x01,0xfb, 0x01,0x22, 0x01,0x2b, 0x04,0xfb, 0x01,0x03, 0x03,0x2b, 0x06,0x00, 0x01,0x2b, +0x04,0xfb, 0x01,0x36, 0x01,0x22, 0x01,0x36, 0x01,0xfb, 0x02,0x00, 0x0a,0xfb, 0x01,0x47, 0x01,0x00, +0x01,0x59, 0x01,0x00, 0x01,0x59, 0x03,0xfb, 0x01,0x47, 0x02,0x22, 0x01,0x00, 0x01,0x2b, 0x01,0x22, +0x01,0x00, 0x01,0x2b, 0x02,0x22, 0x01,0x59, 0x04,0xfb, 0x01,0x59, 0x01,0x00, 0x04,0xfb, 0x01,0x59, +0x01,0x2b, 0x01,0x03, 0x01,0x22, 0x01,0xfb, 0x01,0x22, 0x07,0x00, 0x01,0x2b, 0x01,0x47, 0x05,0xfb, +0x01,0x00, 0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x01,0x22, 0x01,0x00, 0x01,0x03, 0x04,0xfb, 0x01,0x36, +0x01,0x00, 0x03,0x2b, 0x05,0x00, 0x03,0xfb, 0x01,0x03, 0x03,0x2b, 0x06,0x00, 0x01,0x2b, 0x11,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x07, 0x01,0x55, 0x01,0x07, 0x01,0x55, +0x02,0xfd, 0x01,0x00, 0x07,0x2b, 0x01,0x00, 0x01,0x2e, 0x14,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x2e, +0x06,0xfd, 0x01,0x2b, 0x06,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x09,0xfd, 0x01,0x55, +0x08,0x00, 0x01,0x55, 0x80,0x84,0xfd, 0x01,0x41, 0x01,0x00, 0x05,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x55, 0x04,0xfd, 0x01,0x2e, 0x01,0x19, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x83, +0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x1f,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x03,0xfd, 0x01,0x07, +0x01,0x2b, 0x06,0xfd, 0x01,0x00, 0x01,0x6c, 0x09,0xfd, 0x09,0x00, 0x05,0xfd, 0x01,0x2b, 0x01,0x41, +0x05,0xfd, 0x01,0x41, 0x01,0x2b, 0x01,0xfd, 0x01,0x83, 0x02,0x41, 0x01,0x07, 0x01,0x2b, 0x01,0x2e, +0x01,0x22, 0x01,0x2b, 0x04,0x00, 0x02,0xfb, 0x05,0x22, 0x01,0x47, 0x01,0x00, 0x03,0xfb, 0x01,0x59, +0x07,0xfb, 0x01,0x00, 0x01,0x2b, 0x09,0xfb, 0x01,0x00, 0x03,0xfb, 0x02,0x2b, 0x04,0xfb, 0x01,0x2b, +0x01,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x22, 0x02,0xfb, +0x01,0x59, 0x01,0x22, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x03, 0x01,0xfb, 0x01,0x59, 0x01,0x00, +0x01,0x22, 0x06,0xfb, 0x01,0x2b, 0x01,0x22, 0x02,0xfb, 0x01,0x2b, 0x07,0xfb, 0x01,0x00, 0x01,0x2b, +0x01,0x03, 0x02,0xfb, 0x01,0x47, 0x01,0x00, 0x05,0xfb, 0x01,0x36, 0x03,0x22, 0x01,0x00, 0x01,0x2b, +0x01,0x47, 0x05,0xfb, 0x03,0x22, 0x01,0x2e, 0x01,0x3a, 0x01,0x6c, 0x01,0x00, 0x03,0xfd, 0x01,0x83, +0x11,0xfd, 0x01,0x2e, 0x01,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x83, 0x01,0x07, 0x01,0x19, 0x03,0xfd, +0x01,0x00, 0x01,0x41, 0x06,0xfd, 0x01,0x00, 0x01,0x83, 0x0f,0xfd, 0x01,0x2b, 0x09,0x00, 0x01,0x2b, +0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x03,0xfd, 0x01,0x07, 0x01,0x2b, 0x05,0xfd, 0x01,0x2b, +0x01,0x00, 0x01,0x55, 0x04,0xfd, 0x01,0x2e, 0x01,0x19, 0x05,0xfd, 0x01,0x00, 0x01,0x6c, 0x80,0x89,0xfd, +0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x19, 0x01,0x2b, 0x04,0xfd, 0x01,0x00, 0x01,0x19, 0x03,0xfd, +0x01,0x41, 0x01,0x00, 0x01,0x83, 0x01,0x2e, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x1f,0xfd, +0x01,0x55, 0x01,0x00, 0x02,0x55, 0x03,0xfd, 0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x02,0x2b, 0x01,0x07, +0x01,0x2b, 0x01,0x83, 0x02,0xfd, 0x01,0x83, 0x04,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x02,0xfd, 0x01,0x6c, 0x02,0x2b, 0x01,0x00, 0x02,0x2b, 0x01,0x41, 0x04,0x00, 0x01,0x2b, +0x02,0xfd, 0x06,0x00, 0x01,0xfb, 0x01,0x22, 0x01,0x2b, 0x01,0xfb, 0x01,0x00, 0x05,0xfb, 0x01,0x03, +0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x09,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x03, 0x08,0xfb, 0x01,0x2b, +0x01,0xfb, 0x01,0x00, 0x02,0xfb, 0x01,0x47, 0x01,0x00, 0x01,0x47, 0x01,0x22, 0x03,0xfb, 0x01,0x2b, +0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x01,0x22, 0x01,0xfb, 0x01,0x2b, +0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x22, 0x01,0x2b, +0x01,0x00, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x06,0xfb, 0x04,0x2b, 0x01,0x00, 0x07,0xfb, 0x02,0x00, +0x04,0xfb, 0x01,0x00, 0x01,0x36, 0x04,0xfb, 0x01,0x47, 0x03,0xfb, 0x01,0x00, 0x03,0x22, 0x01,0x03, +0x05,0xfb, 0x01,0x81, 0x01,0x15, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x15,0xfd, 0x01,0x2b, 0x01,0x07, +0x03,0xfd, 0x01,0x00, 0x01,0x6c, 0x05,0xfd, 0x01,0x00, 0x01,0x41, 0x05,0xfd, 0x01,0x6c, 0x01,0x00, +0x15,0xfd, 0x01,0x00, 0x08,0xfd, 0x01,0x55, 0x01,0x00, 0x02,0x55, 0x03,0xfd, 0x01,0x00, 0x01,0x55, +0x06,0xfd, 0x01,0x19, 0x01,0x2b, 0x04,0xfd, 0x01,0x00, 0x01,0x19, 0x04,0xfd, 0x02,0x2b, 0x01,0x07, +0x01,0x2b, 0x01,0x83, 0x02,0xfd, 0x01,0x83, 0x80,0x84,0xfd, 0x01,0x41, 0x01,0x00, 0x0b,0xfd, +0x01,0x2e, 0x01,0x00, 0x04,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x2b, 0x01,0x2e, 0x09,0xfd, 0x01,0x2b, +0x05,0x00, 0x05,0xfd, 0x01,0x19, 0x09,0x2b, 0x01,0x19, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0x55, +0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x05,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x2e, +0x01,0x19, 0x01,0x00, 0x01,0x2e, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x09,0x00, 0x02,0xfd, +0x01,0x83, 0x02,0x41, 0x01,0x2b, 0x01,0x07, 0x01,0x41, 0x01,0x6c, 0x01,0x00, 0x06,0xfd, 0x01,0x00, +0x02,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x88, 0x02,0x2b, 0x01,0xfb, 0x01,0x00, 0x04,0xfb, +0x01,0x22, 0x01,0x00, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x08,0xfb, 0x01,0x59, 0x05,0x00, 0x01,0x03, +0x04,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0xfb, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x22, 0x01,0x59, +0x01,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x2b, 0x09,0x00, 0x01,0x22, 0x04,0xfb, 0x01,0x00, 0x01,0x47, +0x02,0xfb, 0x01,0x00, 0x02,0xfb, 0x01,0x00, 0x01,0x03, 0x04,0xfb, 0x01,0x2b, 0x02,0x00, 0x01,0x22, +0x03,0x00, 0x01,0x03, 0x05,0xfb, 0x01,0x00, 0x01,0x12, 0x04,0xfb, 0x01,0x00, 0x01,0x22, 0x04,0xfb, +0x06,0x00, 0x02,0x2b, 0x01,0x22, 0x03,0xfb, 0x01,0x74, 0x01,0x88, 0x01,0x41, 0x01,0x2b, 0x01,0xfd, +0x01,0x41, 0x01,0x00, 0x15,0xfd, 0x01,0x00, 0x01,0x55, 0x03,0xfd, 0x01,0x2b, 0x01,0x07, 0x05,0xfd, +0x01,0x2b, 0x01,0x55, 0x05,0xfd, 0x01,0x19, 0x01,0x00, 0x03,0xfd, 0x01,0x19, 0x09,0x2b, 0x01,0x19, +0x04,0xfd, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x83, 0x01,0x00, 0x04,0xfd, +0x01,0x41, 0x01,0x00, 0x02,0x55, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x0c,0xfd, +0x01,0x2e, 0x01,0x00, 0x04,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x2e, 0x01,0x19, 0x01,0x00, 0x01,0x2e, +0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x80,0x83,0xfd, 0x01,0x41, 0x01,0x00, 0x0a,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x55, 0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x10,0xfd, 0x01,0x41, 0x01,0x00, 0x05,0xfd, +0x01,0x19, 0x09,0x2b, 0x01,0x19, 0x03,0xfd, 0x01,0x6c, 0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x55, +0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x83, 0x01,0x07, 0x01,0x00, 0x01,0x2b, +0x01,0x55, 0x05,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, +0x01,0x2e, 0x01,0x2b, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x83, 0x02,0xfd, 0x01,0x00, +0x01,0x41, 0x01,0x07, 0x01,0x2b, 0x01,0x41, 0x01,0x00, 0x01,0xfd, 0x01,0x2b, 0x01,0x2f, 0x01,0xfb, +0x01,0x00, 0x04,0xfb, 0x01,0x2b, 0x01,0x22, 0x01,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x36, 0x06,0xfb, +0x01,0x36, 0x02,0x00, 0x01,0x03, 0x01,0x59, 0x01,0xfb, 0x01,0x47, 0x02,0x00, 0x03,0xfb, 0x02,0x2b, +0x01,0xfb, 0x01,0x00, 0x01,0xfb, 0x02,0x2b, 0x02,0xfb, 0x01,0x03, 0x01,0x2b, 0x05,0xfb, 0x01,0x47, +0x01,0x00, 0x01,0x59, 0x08,0xfb, 0x01,0x59, 0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x02,0xfb, 0x01,0x47, +0x01,0x00, 0x03,0xfb, 0x01,0x00, 0x02,0x2b, 0x01,0x22, 0x01,0x59, 0x01,0x00, 0x01,0xfb, 0x01,0x59, +0x01,0x00, 0x01,0x36, 0x04,0xfb, 0x01,0x00, 0x01,0x59, 0x04,0xfb, 0x01,0x00, 0x01,0x22, 0x08,0xfb, +0x01,0x2b, 0x01,0x22, 0x05,0xfb, 0x01,0xfc, 0x01,0x8b, 0x01,0xfd, 0x01,0x2b, 0x01,0x41, 0x01,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x55, 0x13,0xfd, 0x01,0x6c, 0x01,0x00, 0x04,0xfd, 0x01,0x55, 0x01,0x00, +0x0c,0xfd, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x19, 0x09,0x2b, 0x01,0x19, 0x03,0xfd, 0x01,0x55, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x04,0xfd, 0x01,0x6c, 0x03,0xfd, +0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x0b,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x03,0xfd, 0x01,0x41, +0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x83, 0x01,0x07, 0x01,0x00, 0x01,0x2b, 0x01,0x55, 0x80,0x85,0xfd, +0x01,0x41, 0x01,0x00, 0x09,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x19, 0x05,0xfd, 0x01,0x41, 0x01,0x00, +0x10,0xfd, 0x01,0x2e, 0x01,0x2b, 0x18,0xfd, 0x01,0x00, 0x01,0x2b, 0x05,0xfd, 0x01,0x55, 0x01,0x41, +0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x01,0x00, 0x05,0xfd, 0x01,0x6c, 0x01,0x41, 0x01,0x00, +0x03,0x41, 0x01,0x00, 0x03,0x41, 0x01,0x00, 0x01,0x41, 0x01,0x83, 0x01,0xfd, 0x01,0x2b, 0x01,0x41, +0x01,0x2b, 0x02,0x00, 0x01,0x19, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, +0x04,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0x69, 0x01,0x85, 0x01,0x00, 0x04,0xfb, 0x01,0x22, +0x01,0x00, 0x01,0x22, 0x02,0x00, 0x01,0x22, 0x05,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x22, 0x06,0xfb, +0x01,0x00, 0x01,0x36, 0x02,0xfb, 0x01,0x00, 0x01,0x47, 0x01,0xfb, 0x01,0x00, 0x01,0x03, 0x01,0x00, +0x03,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0xfb, 0x01,0x03, 0x0b,0x00, 0x01,0x36, 0x02,0xfb, 0x02,0x2b, +0x03,0xfb, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x01,0x59, 0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0xfb, +0x01,0x2b, 0x01,0x22, 0x01,0x00, 0x01,0x12, 0x02,0xfb, 0x02,0x2b, 0x04,0xfb, 0x01,0x00, 0x04,0xfb, +0x01,0x59, 0x01,0x00, 0x06,0xfb, 0x01,0x36, 0x02,0x2b, 0x01,0x00, 0x01,0x03, 0x03,0xfb, 0x01,0x72, +0x01,0x85, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x13,0xfd, +0x02,0x2b, 0x05,0xfd, 0x01,0x00, 0x01,0x41, 0x0a,0xfd, 0x01,0x07, 0x01,0x00, 0x12,0xfd, 0x01,0x2b, +0x01,0x07, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x2e, 0x01,0x2b, 0x09,0xfd, 0x01,0x00, 0x01,0x2b, +0x0b,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x19, 0x04,0xfd, 0x01,0x55, 0x01,0x41, 0x01,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x07, 0x01,0x00, 0x80,0x87,0xfd, 0x01,0x41, 0x01,0x00, 0x07,0xfd, 0x01,0x6c, +0x01,0x2b, 0x01,0x00, 0x01,0x55, 0x06,0xfd, 0x01,0x41, 0x01,0x00, 0x10,0xfd, 0x01,0x2b, 0x01,0x07, +0x17,0xfd, 0x02,0x2b, 0x08,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x05,0xfd, +0x01,0x41, 0x01,0x2b, 0x01,0x00, 0x07,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x6c, 0x01,0xfd, 0x03,0x2b, +0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x03,0x2b, 0x01,0x00, +0x01,0x2b, 0x01,0x19, 0x01,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0xfc, 0x04,0xfb, 0x01,0x22, +0x01,0x2b, 0x01,0x22, 0x01,0x00, 0x01,0x59, 0x06,0xfb, 0x01,0x47, 0x01,0xfb, 0x03,0x00, 0x01,0x36, +0x02,0xfb, 0x01,0x00, 0x01,0x36, 0x01,0xfb, 0x01,0x22, 0x01,0x00, 0x02,0xfb, 0x02,0x00, 0x03,0xfb, +0x01,0x00, 0x01,0xfb, 0x01,0x00, 0x01,0x12, 0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x12, 0x02,0xfb, +0x01,0x36, 0x01,0x00, 0x06,0xfb, 0x01,0x00, 0x01,0x47, 0x02,0xfb, 0x01,0x22, 0x01,0x00, 0x06,0xfb, +0x01,0x2b, 0x01,0x22, 0x01,0xfb, 0x01,0x2b, 0x02,0x00, 0x03,0xfb, 0x01,0x00, 0x01,0x03, 0x09,0xfb, +0x01,0x2b, 0x01,0x00, 0x05,0xfb, 0x02,0x00, 0x01,0x12, 0x01,0x22, 0x03,0x00, 0x01,0x22, 0x01,0xfc, +0x01,0x8b, 0x05,0xfd, 0x01,0x41, 0x01,0x2b, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x05,0xfd, 0x01,0x2b, +0x02,0x00, 0x01,0x83, 0x09,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x83, 0x05,0xfd, 0x01,0x41, 0x01,0x00, +0x01,0x83, 0x08,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x6c, 0x11,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x83, +0x02,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x55, 0x07,0xfd, 0x02,0x2b, 0x0a,0xfd, 0x01,0x6c, +0x01,0x2b, 0x01,0x00, 0x01,0x55, 0x07,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, +0x80,0x80,0xfd, 0x09,0x00, 0x03,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0x19, 0x02,0x00, 0x01,0x2b, +0x01,0x83, 0x07,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x55, 0x03,0xfd, 0x01,0x6c, 0x01,0x41, 0x01,0x2e, +0x09,0xfd, 0x01,0x00, 0x01,0x41, 0x15,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x09,0xfd, 0x01,0x41, +0x01,0x00, 0x02,0xfd, 0x01,0x55, 0x07,0xfd, 0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, +0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x03,0x00, 0x01,0x2b, 0x02,0xfd, 0x02,0x41, +0x01,0x07, 0x01,0x2b, 0x01,0x41, 0x01,0x2e, 0x01,0x19, 0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, +0x01,0xfd, 0x01,0x88, 0x01,0x75, 0x04,0xfb, 0x01,0x00, 0x01,0x2b, 0x08,0xfb, 0x01,0x22, 0x01,0x2b, +0x01,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0xfb, 0x01,0x59, 0x01,0x36, +0x01,0x47, 0x02,0x00, 0x04,0xfb, 0x01,0x00, 0x01,0xfb, 0x01,0x22, 0x01,0x36, 0x02,0xfb, 0x01,0x36, +0x01,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x05,0xfb, 0x01,0x03, 0x01,0x00, 0x02,0x59, +0x01,0xfb, 0x02,0x2b, 0x06,0xfb, 0x02,0x2b, 0x01,0x59, 0x02,0x00, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, +0x09,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x59, 0x05,0xfb, 0x01,0x00, 0x03,0xfb, 0x01,0x2b, 0x01,0x22, +0x01,0x3c, 0x02,0x00, 0x01,0x83, 0x07,0xfd, 0x02,0x2b, 0x05,0xfd, 0x01,0x55, 0x01,0x2b, 0x01,0xfd, +0x01,0x19, 0x01,0x07, 0x08,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2e, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x83, 0x05,0xfd, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x55, 0x11,0xfd, 0x01,0x83, 0x01,0x00, +0x01,0x2e, 0x03,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x04,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0x19, 0x02,0x00, 0x01,0x2b, 0x01,0x83, +0x08,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x55, 0x80,0x87,0xfd, 0x01,0x55, 0x01,0x2b, +0x04,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x2e, 0x0b,0xfd, 0x01,0x2e, 0x05,0x00, 0x01,0x2b, 0x01,0x19, +0x04,0xfd, 0x08,0x00, 0x01,0x2b, 0x11,0xfd, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x2e, 0x0b,0xfd, +0x01,0x2b, 0x01,0x00, 0x03,0x2b, 0x02,0x00, 0x04,0xfd, 0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x02,0xfd, +0x01,0x41, 0x01,0x2b, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x07, 0x06,0x41, 0x01,0x2e, 0x01,0x6c, +0x01,0x2b, 0x04,0x00, 0x01,0x07, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x19, 0x01,0x2b, 0x02,0xfd, +0x01,0x8b, 0x01,0x81, 0x01,0x70, 0x01,0x12, 0x01,0x00, 0x01,0x2b, 0x0a,0xfb, 0x02,0x00, 0x01,0x2b, +0x03,0x00, 0x01,0x59, 0x02,0xfb, 0x01,0x47, 0x01,0x2b, 0x01,0x00, 0x01,0x22, 0x01,0x00, 0x03,0xfb, +0x01,0x22, 0x01,0x00, 0x05,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0x2b, 0x01,0x00, 0x02,0x2b, 0x02,0x00, +0x01,0x22, 0x02,0xfb, 0x01,0x59, 0x01,0x00, 0x01,0x47, 0x01,0x36, 0x03,0x00, 0x01,0x47, 0x06,0xfb, +0x01,0x59, 0x02,0x00, 0x01,0x03, 0x01,0xfb, 0x01,0x12, 0x02,0x00, 0x01,0x2b, 0x01,0x59, 0x06,0xfb, +0x01,0x22, 0x01,0x2b, 0x02,0x00, 0x01,0x59, 0x06,0xfb, 0x01,0x2b, 0x01,0x00, 0x02,0x2b, 0x01,0x00, +0x01,0x6c, 0x01,0x8b, 0x01,0xfd, 0x01,0x2b, 0x01,0x83, 0x05,0xfd, 0x01,0x2e, 0x01,0x00, 0x01,0x2b, +0x06,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x83, 0x01,0x2b, 0x01,0x19, 0x08,0xfd, 0x01,0x83, 0x01,0x07, +0x08,0xfd, 0x01,0x83, 0x01,0x2b, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x01,0x83, 0x13,0xfd, +0x01,0x41, 0x04,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x2e, 0x03,0xfd, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, +0x01,0x2e, 0x09,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x2e, 0x0c,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0x2b, +0x02,0x00, 0x80,0xbe,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x83, 0x0d,0xfd, 0x01,0x83, 0x03,0x41, +0x01,0x6c, 0x05,0xfd, 0x01,0x2b, 0x04,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x41, +0x01,0x55, 0x02,0xfd, 0x01,0x83, 0x01,0x41, 0x06,0x2b, 0x01,0x41, 0x01,0x83, 0x01,0x41, 0x04,0xfd, +0x01,0x2b, 0x01,0x2e, 0x01,0x41, 0x02,0x00, 0x01,0x6c, 0x04,0xfd, 0x01,0x8b, 0x01,0x01, 0x01,0x22, +0x0c,0xfb, 0x01,0x36, 0x01,0x22, 0x01,0x36, 0x05,0xfb, 0x01,0x59, 0x01,0x2b, 0x01,0x59, 0x01,0xfb, +0x01,0x2b, 0x04,0x00, 0x01,0x36, 0x03,0xfb, 0x01,0x22, 0x02,0x00, 0x01,0x2b, 0x01,0x12, 0x04,0xfb, +0x01,0x22, 0x02,0x00, 0x02,0xfb, 0x01,0x36, 0x03,0xfb, 0x01,0x22, 0x01,0x47, 0x0c,0xfb, 0x01,0x59, +0x01,0x22, 0x09,0xfb, 0x01,0x47, 0x01,0x2b, 0x01,0x47, 0x09,0xfb, 0x01,0x59, 0x01,0x22, 0x01,0x30, +0x01,0x8b, 0x0a,0xfd, 0x01,0x07, 0x01,0x41, 0x08,0xfd, 0x01,0x2e, 0x01,0x00, 0x01,0x2b, 0x1b,0xfd, +0x01,0x83, 0x1a,0xfd, 0x01,0x2b, 0x08,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x83, 0x1a,0xfd, 0x01,0x83, +0x03,0x41, 0x01,0x6c, 0x81,0x03,0xfd, 0x01,0x8b, 0x01,0x7c, 0x57,0xfb, 0x01,0x7c, 0x01,0x8b, +0x81,0x7f,0xfd, 0x01,0x8b, 0x01,0x7b, 0x53,0xfb, 0x01,0x7b, 0x01,0x8b, 0x81,0x83,0xfd, +0x01,0x8b, 0x01,0xfc, 0x01,0x6d, 0x4d,0xfb, 0x01,0x6d, 0x01,0xfc, 0x01,0x8b, 0x81,0x87,0xfd, +0x01,0x8b, 0x01,0x80, 0x01,0x72, 0x49,0xfb, 0x01,0x72, 0x01,0x80, 0x01,0x8b, 0x80,0xfd,0xfd, +0x01,0x19, 0x01,0xfd, 0x01,0x19, 0x02,0x55, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x83, 0x05,0xfd, +0x01,0x83, 0x01,0x19, 0x01,0x00, 0x03,0xfd, 0x01,0x2b, 0x05,0xfd, 0x01,0x55, 0x03,0x41, 0x01,0x55, +0x01,0xfd, 0x04,0x41, 0x01,0x55, 0x03,0xfd, 0x01,0x2b, 0x02,0xfd, 0x06,0x41, 0x01,0x55, 0x04,0xfd, +0x01,0x41, 0x01,0x83, 0x0a,0xfd, 0x01,0x41, 0x01,0x19, 0x04,0xfd, 0x01,0x2b, 0x06,0xfd, 0x01,0x6c, +0x05,0xfd, 0x01,0x6c, 0x01,0x55, 0x14,0xfd, 0x01,0x19, 0x01,0x55, 0x22,0xfd, 0x01,0x86, 0x01,0x79, +0x1b,0xfb, 0x01,0x22, 0x0e,0xfb, 0x01,0x22, 0x02,0xfb, 0x01,0x12, 0x01,0x22, 0x01,0x00, 0x06,0xfb, +0x01,0x2b, 0x01,0x36, 0x0d,0xfb, 0x01,0x79, 0x01,0x86, 0x09,0xfd, 0x01,0x41, 0x01,0x55, 0x08,0xfd, +0x01,0x2b, 0x02,0xfd, 0x06,0x41, 0x01,0x55, 0x04,0xfd, 0x01,0x41, 0x01,0x83, 0x1c,0xfd, 0x01,0x83, +0x01,0x55, 0x08,0xfd, 0x01,0x19, 0x04,0xfd, 0x01,0x6c, 0x01,0x2b, 0x08,0xfd, 0x01,0x07, 0x80,0xa7,0xfd, +0x01,0x19, 0x02,0x2b, 0x01,0x41, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x83, +0x02,0x00, 0x01,0x2b, 0x01,0x83, 0x03,0xfd, 0x01,0x00, 0x05,0xfd, 0x01,0x00, 0x03,0x2b, 0x01,0x00, +0x01,0xfd, 0x01,0x00, 0x03,0x2b, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, +0x01,0x00, 0x02,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x41, 0x0a,0xfd, 0x01,0x2b, 0x02,0x6c, +0x03,0x2b, 0x01,0x00, 0x03,0x2b, 0x03,0xfd, 0x01,0x2b, 0x01,0x19, 0x04,0xfd, 0x01,0x19, 0x01,0x2b, +0x06,0xfd, 0x01,0x2b, 0x05,0x00, 0x01,0x55, 0x07,0xfd, 0x01,0x00, 0x01,0x41, 0x17,0xfd, 0x01,0x00, +0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x8b, 0x01,0x00, 0x01,0x75, 0x18,0xfb, 0x01,0x00, +0x0e,0xfb, 0x01,0x00, 0x02,0xfb, 0x01,0x36, 0x01,0x00, 0x01,0x2b, 0x01,0x22, 0x04,0xfb, 0x01,0x59, +0x01,0x00, 0x01,0x59, 0x08,0xfb, 0x01,0x59, 0x01,0xfb, 0x01,0x75, 0x01,0x81, 0x01,0x8b, 0x0b,0xfd, +0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x83, 0x05,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, +0x01,0x00, 0x02,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x41, 0x13,0xfd, 0x01,0x6c, 0x07,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x83, 0x01,0x00, 0x03,0xfd, 0x01,0x55, 0x01,0x00, +0x01,0x2b, 0x01,0x07, 0x07,0xfd, 0x01,0x00, 0x01,0x2e, 0x06,0xfd, 0x01,0x83, 0x01,0x55, 0x80,0x9d,0xfd, +0x01,0x83, 0x01,0x41, 0x04,0x2b, 0x01,0x55, 0x01,0x2e, 0x01,0x00, 0x03,0x41, 0x01,0x6c, 0x02,0xfd, +0x01,0x41, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x00, 0x03,0xfd, +0x05,0x00, 0x01,0xfd, 0x05,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0x6c, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x83, 0x08,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x01,0x41, 0x01,0x2b, 0x03,0x41, 0x01,0x00, 0x03,0x41, 0x03,0xfd, +0x01,0x00, 0x01,0x55, 0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x55, 0x01,0x41, 0x01,0x55, +0x01,0xfd, 0x02,0x2b, 0x08,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x2e, 0x13,0xfd, 0x01,0x00, 0x04,0xfd, +0x01,0x41, 0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x8b, 0x01,0x80, 0x01,0x75, 0x15,0xfb, +0x01,0x00, 0x0e,0xfb, 0x01,0x00, 0x03,0xfb, 0x01,0x12, 0x06,0xfb, 0x01,0x2b, 0x06,0x00, 0x03,0xfb, +0x01,0x75, 0x01,0x00, 0x01,0x2b, 0x01,0x7f, 0x09,0xfd, 0x01,0x55, 0x08,0x00, 0x01,0x55, 0x02,0xfd, +0x01,0x6c, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0x6c, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, +0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x01,0x83, 0x09,0xfd, 0x01,0x19, 0x01,0x2b, 0x08,0x00, 0x01,0x19, +0x06,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x08,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0x19, +0x02,0xfd, 0x01,0x2b, 0x01,0x41, 0x01,0x6c, 0x07,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0x2b, +0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x05,0xfd, 0x01,0x41, 0x01,0x55, 0x80,0x96,0xfd, +0x01,0x6c, 0x02,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x19, 0x03,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x41, +0x01,0x6c, 0x02,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x00, 0x01,0xfd, +0x01,0x2b, 0x01,0x55, 0x02,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0xfd, 0x01,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x41, 0x01,0x00, 0x02,0x41, +0x01,0x00, 0x02,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x0a,0xfd, 0x01,0x19, 0x01,0x2b, 0x01,0x00, +0x01,0x83, 0x01,0x19, 0x02,0x2b, 0x01,0x00, 0x02,0x2b, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0xfd, +0x01,0x6c, 0x03,0x41, 0x01,0x07, 0x01,0x00, 0x02,0x2b, 0x07,0xfd, 0x02,0x2b, 0x06,0xfd, 0x01,0x41, +0x02,0x2b, 0x03,0x00, 0x01,0x41, 0x01,0xfd, 0x02,0x2b, 0x12,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x41, +0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x55, 0x01,0x41, 0x01,0xf7, 0x01,0x2b, 0x01,0x00, +0x01,0x09, 0x01,0x6f, 0x03,0xfb, 0x01,0x59, 0x01,0xfb, 0x01,0x2b, 0x01,0x59, 0x02,0xfb, 0x01,0x36, +0x01,0x47, 0x06,0xfb, 0x01,0x00, 0x09,0xfb, 0x01,0x2b, 0x09,0x00, 0x01,0x2b, 0x04,0xfb, 0x01,0x22, +0x01,0x00, 0x01,0x59, 0x03,0xfb, 0x01,0x2b, 0x01,0x00, 0x01,0x7a, 0x01,0x84, 0x01,0x8b, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x55, 0x04,0xfd, 0x01,0x2e, 0x01,0x19, 0x05,0xfd, 0x01,0x00, 0x01,0x6c, +0x07,0xfd, 0x01,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x41, 0x01,0x00, 0x02,0x41, 0x01,0x00, +0x02,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x0a,0xfd, 0x01,0x2e, 0x02,0x41, 0x01,0x83, 0x02,0xfd, +0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x07,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x19, 0x07,0xfd, 0x01,0x2b, +0x03,0x00, 0x02,0x2b, 0x08,0xfd, 0x06,0x00, 0x01,0x2b, 0x01,0x2e, 0x01,0x83, 0x03,0xfd, 0x01,0x41, +0x01,0x00, 0x05,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x83, 0x80,0x96,0xfd, 0x01,0x83, 0x01,0x00, +0x02,0x2b, 0x01,0x00, 0x01,0x55, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x2b, 0x01,0x41, 0x01,0xfd, +0x01,0x83, 0x01,0x41, 0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x01,0x55, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, +0x01,0xfd, 0x01,0x41, 0x01,0x2b, 0x02,0xfd, 0x04,0x00, 0x01,0x2b, 0x01,0xfd, 0x05,0x00, 0x03,0xfd, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, 0x01,0x00, 0x02,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x00, +0x0b,0xfd, 0x01,0x00, 0x01,0x07, 0x01,0x2e, 0x01,0x55, 0x05,0x41, 0x01,0x55, 0x03,0xfd, 0x01,0x00, +0x01,0xfd, 0x01,0x6c, 0x04,0x2b, 0x01,0x00, 0x01,0x41, 0x06,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, +0x07,0xfd, 0x01,0x07, 0x01,0x19, 0x02,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x83, 0x01,0xfd, +0x01,0x00, 0x01,0x41, 0x11,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x02,0x2b, +0x04,0x00, 0x01,0x2b, 0x01,0x07, 0x01,0x41, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x8b, 0x01,0x82, +0x01,0x08, 0x01,0x00, 0x01,0xfb, 0x02,0x2b, 0x02,0xfb, 0x01,0x2b, 0x01,0x03, 0x06,0xfb, 0x01,0x00, +0x01,0x03, 0x01,0x59, 0x0c,0xfb, 0x01,0x00, 0x08,0xfb, 0x01,0x36, 0x01,0x00, 0x02,0x36, 0x01,0x70, +0x01,0x79, 0x01,0x82, 0x01,0x00, 0x01,0x50, 0x06,0xfd, 0x01,0x19, 0x01,0x2b, 0x04,0xfd, 0x01,0x00, +0x01,0x19, 0x04,0xfd, 0x02,0x2b, 0x01,0x07, 0x01,0x2b, 0x01,0x83, 0x02,0xfd, 0x01,0x83, 0x04,0xfd, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0x2b, 0x01,0x00, 0x02,0x2b, 0x01,0x00, 0x04,0xfd, 0x01,0x00, +0x0f,0xfd, 0x01,0x2b, 0x01,0x19, 0x08,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x41, 0x0a,0xfd, 0x01,0x2b, +0x01,0x41, 0x01,0xfd, 0x01,0x55, 0x01,0x41, 0x03,0x2b, 0x01,0x6c, 0x08,0xfd, 0x01,0x19, 0x01,0x2b, +0x05,0xfd, 0x01,0x6c, 0x01,0x00, 0x06,0xfd, 0x02,0x2b, 0x80,0x95,0xfd, 0x01,0x55, 0x01,0x00, +0x01,0x55, 0x01,0x19, 0x02,0x55, 0x01,0x00, 0x01,0x2b, 0x01,0x07, 0x01,0xfd, 0x01,0x00, 0x01,0x83, +0x03,0xfd, 0x02,0x00, 0x01,0x6c, 0x01,0x2b, 0x01,0x19, 0x01,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x83, +0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x00, 0x02,0xfd, 0x05,0x41, 0x02,0xfd, 0x01,0x00, 0x03,0xfd, +0x01,0x00, 0x01,0x83, 0x01,0x55, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x04,0xfd, +0x01,0x00, 0x0a,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x41, 0x01,0x00, 0x01,0x2b, 0x06,0x00, 0x03,0xfd, +0x01,0x00, 0x05,0xfd, 0x01,0x41, 0x01,0x00, 0x06,0xfd, 0x01,0x83, 0x02,0x00, 0x01,0x2b, 0x02,0x00, +0x01,0x19, 0x06,0xfd, 0x01,0x2b, 0x01,0x19, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x01,0xfd, 0x01,0x41, +0x01,0x00, 0x11,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x41, 0x01,0x00, 0x04,0xfd, 0x01,0x2e, 0x01,0x41, +0x01,0x83, 0x01,0x00, 0x04,0xfd, 0x02,0x2b, 0x05,0xfd, 0x01,0x00, 0x01,0x79, 0x01,0x71, 0x01,0x00, +0x01,0x7a, 0x01,0x73, 0x01,0x00, 0x01,0x47, 0x06,0xfb, 0x03,0x00, 0x01,0x2b, 0x01,0x47, 0x07,0xfb, +0x01,0x00, 0x01,0x59, 0x01,0xfb, 0x01,0x00, 0x01,0xfb, 0x01,0x59, 0x01,0x00, 0x04,0xfb, 0x01,0x28, +0x01,0x00, 0x01,0x43, 0x01,0x49, 0x01,0x00, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x00, 0x0c,0xfd, +0x01,0x2e, 0x01,0x00, 0x04,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x2e, 0x01,0x19, 0x01,0x00, 0x01,0x2e, +0x01,0x2b, 0x01,0x00, 0x01,0x41, 0x03,0xfd, 0x01,0x00, 0x01,0x83, 0x01,0x55, 0x01,0x00, 0x02,0xfd, +0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x0e,0xfd, 0x02,0x2b, 0x08,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x83, 0x0b,0xfd, 0x01,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x19, 0x01,0x2b, 0x03,0x41, +0x01,0x83, 0x09,0xfd, 0x01,0x00, 0x01,0x55, 0x05,0xfd, 0x01,0x00, 0x06,0xfd, 0x01,0x83, 0x01,0x00, +0x01,0x83, 0x80,0x95,0xfd, 0x02,0x83, 0x01,0x00, 0x02,0xfd, 0x01,0x55, 0x01,0x83, 0x01,0x00, +0x01,0x83, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x02,0x2b, 0x02,0x00, 0x01,0x83, 0x01,0xfd, 0x01,0x00, +0x01,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x03,0x2b, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x6c, 0x01,0xfd, 0x01,0x55, +0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, 0x01,0x55, 0x01,0x83, 0x01,0x55, +0x01,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x03,0xfd, 0x01,0x41, 0x01,0x00, +0x05,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x19, 0x01,0x83, 0x01,0xfd, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, +0x05,0xfd, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x00, 0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x55, +0x10,0xfd, 0x01,0x2b, 0x04,0xfd, 0x01,0x19, 0x01,0x2b, 0x07,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x83, +0x02,0x2b, 0x06,0xfd, 0x01,0x2b, 0x01,0x19, 0x01,0xfd, 0x01,0x41, 0x01,0x83, 0x01,0x41, 0x01,0x00, +0x02,0x8b, 0x01,0x87, 0x01,0x82, 0x01,0x7d, 0x01,0x7a, 0x01,0x77, 0x01,0x00, 0x01,0x71, 0x01,0x5c, +0x01,0x2b, 0x01,0x00, 0x01,0x2b, 0x01,0x59, 0x03,0xfb, 0x01,0x6d, 0x01,0x38, 0x01,0x00, 0x01,0x71, +0x01,0x74, 0x01,0x00, 0x01,0x7a, 0x01,0x7d, 0x01,0x00, 0x01,0x35, 0x02,0x8b, 0x02,0xfd, 0x01,0x6c, +0x03,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x55, 0x0b,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x55, 0x03,0xfd, +0x01,0x41, 0x01,0x00, 0x01,0x2e, 0x01,0xfd, 0x01,0x83, 0x01,0x07, 0x01,0x00, 0x01,0x2b, 0x01,0x55, +0x04,0xfd, 0x01,0x55, 0x02,0x00, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, +0x04,0xfd, 0x01,0x00, 0x06,0xfd, 0x01,0x2b, 0x01,0x6c, 0x06,0xfd, 0x01,0x00, 0x01,0x6c, 0x08,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x19, 0x0a,0xfd, 0x01,0x6c, 0x01,0x00, 0x0c,0xfd, 0x01,0x6c, 0x01,0x2b, +0x03,0x00, 0x01,0x2b, 0x01,0x00, 0x05,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x83, 0x04,0xfd, 0x01,0x2b, +0x01,0x07, 0x80,0x94,0xfd, 0x01,0x55, 0x06,0x00, 0x01,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x2e, 0x01,0x2b, 0x01,0x6c, 0x01,0xfd, 0x01,0x41, 0x02,0x00, 0x01,0x41, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x03,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, +0x01,0xfd, 0x01,0x07, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, 0x07,0x00, 0x04,0xfd, 0x01,0x00, 0x05,0xfd, +0x01,0x83, 0x01,0x00, 0x03,0xfd, 0x01,0x07, 0x01,0x41, 0x01,0x2b, 0x01,0x07, 0x01,0x19, 0x01,0xfd, +0x01,0x00, 0x01,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x19, 0x03,0xfd, 0x01,0x00, 0x01,0x19, 0x01,0x2b, +0x03,0xfd, 0x01,0x19, 0x01,0x2b, 0x04,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x41, 0x06,0xfd, 0x01,0x00, +0x01,0x55, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x2e, +0x01,0x6c, 0x15,0xfd, 0x01,0x2b, 0x01,0x07, 0x07,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x55, 0x01,0x2b, +0x0c,0xfd, 0x01,0x00, 0x01,0x41, 0x07,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, +0x04,0xfd, 0x01,0x2b, 0x01,0x07, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x2e, 0x01,0x2b, 0x09,0xfd, +0x01,0x00, 0x01,0x2b, 0x0b,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x19, 0x04,0xfd, 0x01,0x55, 0x01,0x41, +0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x07, 0x01,0x00, 0x05,0xfd, 0x01,0x07, 0x01,0x2b, 0x01,0x00, +0x02,0xfd, 0x07,0x00, 0x04,0xfd, 0x01,0x00, 0x05,0xfd, 0x01,0x83, 0x01,0x00, 0x07,0xfd, 0x01,0x00, +0x0a,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x09,0xfd, 0x01,0x07, 0x01,0x2b, 0x02,0xfd, 0x01,0x2b, +0x01,0x83, 0x07,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x2e, 0x02,0xfd, 0x01,0x83, 0x01,0x2e, 0x01,0x00, +0x01,0x55, 0x04,0xfd, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x07, 0x01,0x19, 0x03,0xfd, 0x01,0x41, +0x01,0x00, 0x80,0x95,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0xfd, 0x01,0x19, 0x01,0x07, 0x02,0xfd, +0x01,0x2e, 0x01,0x00, 0x01,0x83, 0x02,0xfd, 0x01,0x07, 0x01,0x2e, 0x01,0x41, 0x01,0x2b, 0x04,0xfd, +0x01,0x83, 0x01,0x00, 0x01,0x55, 0x03,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x03,0x41, 0x01,0x00, +0x02,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x19, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, +0x01,0x19, 0x04,0xfd, 0x01,0x00, 0x01,0x83, 0x04,0xfd, 0x02,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0x41, +0x02,0x2b, 0x02,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x00, 0x05,0xfd, 0x02,0x00, 0x01,0x83, 0x03,0xfd, +0x01,0x2b, 0x01,0x19, 0x05,0xfd, 0x01,0x6c, 0x01,0xfd, 0x03,0x00, 0x01,0x55, 0x02,0xfd, 0x01,0x00, +0x01,0x55, 0x03,0xfd, 0x01,0x2b, 0x01,0x2e, 0x03,0xfd, 0x01,0x00, 0x05,0xfd, 0x01,0x83, 0x01,0x2e, +0x12,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x83, 0x07,0xfd, 0x01,0x00, 0x0f,0xfd, 0x01,0x07, 0x01,0x2b, +0x08,0xfd, 0x01,0x00, 0x05,0xfd, 0x01,0x83, 0x03,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x83, 0x02,0xfd, +0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x01,0x55, 0x07,0xfd, 0x02,0x2b, 0x0a,0xfd, 0x01,0x6c, 0x01,0x2b, +0x01,0x00, 0x01,0x55, 0x07,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x00, 0x07,0xfd, +0x01,0x00, 0x02,0xfd, 0x01,0x19, 0x02,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x19, 0x04,0xfd, 0x01,0x00, +0x01,0x83, 0x04,0xfd, 0x02,0x2b, 0x07,0xfd, 0x01,0x00, 0x01,0x07, 0x0b,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x83, 0x07,0xfd, 0x01,0x00, 0x01,0x55, 0x01,0xfd, 0x01,0x19, 0x01,0x2b, 0x08,0xfd, 0x01,0x00, +0x01,0x2e, 0x0b,0xfd, 0x01,0x07, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0x6c, 0x03,0xfd, 0x01,0x83, +0x01,0x00, 0x02,0xfd, 0x01,0x2b, 0x02,0x00, 0x01,0x83, 0x80,0x8f,0xfd, 0x01,0x6c, 0x01,0x2b, +0x02,0x00, 0x01,0x83, 0x02,0xfd, 0x02,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x83, 0x01,0xfd, 0x01,0x41, +0x01,0x2b, 0x03,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x19, 0x04,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, +0x03,0x2b, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, 0x05,0xfd, 0x01,0x00, 0x07,0xfd, +0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x83, 0x03,0xfd, 0x01,0x00, +0x01,0x41, 0x02,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x2b, 0x01,0xfd, 0x01,0x00, 0x01,0xfd, 0x01,0x07, +0x01,0x41, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x19, 0x01,0x00, 0x07,0xfd, 0x01,0x41, +0x01,0x2b, 0x01,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0xfd, 0x01,0x07, 0x01,0x00, 0x03,0xfd, 0x01,0x6c, +0x01,0x00, 0x01,0xfd, 0x01,0x6c, 0x01,0xfd, 0x01,0x2e, 0x01,0x00, 0x05,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x19, 0x10,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x55, 0x08,0xfd, 0x01,0x00, 0x01,0x41, 0x03,0xfd, +0x01,0x55, 0x01,0x2e, 0x08,0xfd, 0x01,0x2b, 0x01,0x00, 0x09,0xfd, 0x01,0x00, 0x08,0xfd, 0x01,0x83, +0x01,0x00, 0x01,0x2e, 0x03,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x83, 0x04,0xfd, +0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x07,0xfd, 0x01,0x83, 0x01,0x41, 0x01,0x19, 0x02,0x00, 0x01,0x2b, +0x01,0x83, 0x08,0xfd, 0x01,0x41, 0x01,0x00, 0x02,0xfd, 0x01,0x55, 0x07,0xfd, 0x01,0x00, 0x05,0xfd, +0x01,0x00, 0x07,0xfd, 0x01,0x00, 0x01,0x07, 0x02,0xfd, 0x01,0x83, 0x01,0x2b, 0x01,0x00, 0x01,0x83, +0x07,0xfd, 0x01,0x55, 0x01,0x00, 0x01,0x2b, 0x01,0x6c, 0x0a,0xfd, 0x01,0x07, 0x01,0x00, 0x01,0x6c, +0x05,0xfd, 0x01,0x55, 0x01,0x00, 0x02,0xfd, 0x01,0x07, 0x01,0x00, 0x04,0xfd, 0x01,0x83, 0x03,0xfd, +0x01,0x2b, 0x01,0x07, 0x05,0xfd, 0x01,0x6c, 0x05,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x2b, 0x01,0x00, +0x05,0xfd, 0x01,0x83, 0x01,0xfd, 0x01,0x55, 0x01,0x2b, 0x01,0xfd, 0x01,0x19, 0x01,0x07, 0x80,0x90,0xfd, +0x01,0x19, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x01,0x6c, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x02,0x2b, +0x01,0x83, 0x02,0xfd, 0x01,0x41, 0x01,0x2b, 0x01,0xfd, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x41, +0x05,0xfd, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x04,0x41, 0x02,0xfd, 0x01,0x00, 0x03,0xfd, 0x01,0x00, +0x05,0xfd, 0x01,0x00, 0x07,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x2b, 0x01,0x83, 0x03,0xfd, 0x01,0x55, +0x01,0x2b, 0x01,0x41, 0x01,0x2b, 0x01,0x83, 0x01,0x41, 0x01,0x00, 0x01,0x6c, 0x01,0xfd, 0x01,0x00, +0x01,0xfd, 0x01,0x2b, 0x01,0x41, 0x02,0xfd, 0x01,0x2b, 0x01,0x07, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0x55, 0x08,0xfd, 0x01,0x00, 0x02,0x2b, 0x02,0x00, 0x01,0x2b, 0x01,0x83, 0x03,0xfd, 0x01,0x00, +0x01,0x19, 0x01,0xfd, 0x03,0x00, 0x01,0x2e, 0x06,0xfd, 0x01,0x83, 0x01,0x00, 0x01,0x19, 0x0d,0xfd, +0x01,0x2b, 0x02,0x00, 0x01,0x6c, 0x09,0xfd, 0x01,0x2e, 0x06,0x00, 0x06,0xfd, 0x01,0x2e, 0x01,0x00, +0x01,0x2b, 0x0a,0xfd, 0x01,0x00, 0x09,0xfd, 0x01,0x41, 0x04,0xfd, 0x01,0x00, 0x04,0xfd, 0x01,0x2e, +0x03,0xfd, 0x01,0x6c, 0x01,0x2b, 0x01,0x00, 0x01,0x2e, 0x09,0xfd, 0x01,0x00, 0x01,0x2b, 0x01,0x2e, +0x0c,0xfd, 0x01,0x2b, 0x01,0x00, 0x03,0x2b, 0x02,0x00, 0x04,0xfd, 0x01,0x00, 0x05,0xfd, 0x01,0x00, +0x07,0xfd, 0x01,0x83, 0x04,0x00, 0x01,0x2b, 0x01,0x83, 0x09,0xfd, 0x01,0x6c, 0x01,0x2b, 0x02,0x00, +0x0a,0xfd, 0x01,0x19, 0x01,0x00, 0x01,0x55, 0x04,0xfd, 0x01,0x00, 0x01,0x19, 0x02,0xfd, 0x01,0x83, +0x01,0x2b, 0x04,0x00, 0x01,0x2b, 0x03,0xfd, 0x01,0x6c, 0x02,0x00, 0x03,0x2b, 0x02,0x00, 0x06,0xfd, +0x01,0x55, 0x01,0x2b, 0x01,0x83, 0x07,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x83, 0x01,0x2b, 0x01,0x19, +0x80,0x8e,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0x41, 0x01,0x00, 0x01,0x41, +0x03,0xfd, 0x02,0x2b, 0x02,0xfd, 0x01,0x41, 0x01,0x2b, 0x01,0x19, 0x01,0x00, 0x01,0x19, 0x01,0x83, +0x06,0xfd, 0x01,0x00, 0x07,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x19, 0x01,0xfd, 0x01,0x6c, 0x01,0x00, +0x01,0x2b, 0x05,0xfd, 0x01,0x00, 0x09,0xfd, 0x01,0x55, 0x01,0x41, 0x01,0x83, 0x06,0xfd, 0x01,0x6c, +0x01,0x41, 0x01,0x2b, 0x01,0x41, 0x01,0x00, 0x01,0x6c, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x2b, +0x07,0xfd, 0x01,0x41, 0x01,0x83, 0x0a,0xfd, 0x01,0x55, 0x01,0x41, 0x01,0x55, 0x06,0xfd, 0x01,0x55, +0x03,0xfd, 0x02,0x55, 0x08,0xfd, 0x01,0x55, 0x01,0x2b, 0x0d,0xfd, 0x01,0x55, 0x01,0x6c, 0x18,0xfd, +0x01,0x55, 0x01,0x6c, 0x0b,0xfd, 0x01,0x2b, 0x0e,0xfd, 0x01,0x2b, 0x08,0xfd, 0x01,0x83, 0x01,0x2b, +0x01,0x83, 0x1a,0xfd, 0x01,0x83, 0x03,0x41, 0x01,0x6c, 0x03,0xfd, 0x01,0x6c, 0x01,0x00, 0x01,0x2b, +0x05,0xfd, 0x01,0x00, 0x09,0xfd, 0x01,0x55, 0x01,0x41, 0x01,0x83, 0x0e,0xfd, 0x01,0x83, 0x0b,0xfd, +0x01,0x19, 0x01,0x83, 0x04,0xfd, 0x01,0x6c, 0x01,0x83, 0x0e,0xfd, 0x01,0x83, 0x03,0x41, 0x13,0xfd, +0x01,0x2e, 0x01,0x00, 0x01,0x2b, 0x80,0x95,0xfd, 0x01,0x83, 0x3c,0xfd, 0x01,0x83, 0x01,0x6c, +0xb6,0xd5,0xfd +}; diff --git a/osfmk/ppc/serial_console.c b/osfmk/console/ppc/serial_console.c similarity index 98% rename from osfmk/ppc/serial_console.c rename to osfmk/console/ppc/serial_console.c index 19b49d159..9d506ac11 100644 --- a/osfmk/ppc/serial_console.c +++ b/osfmk/console/ppc/serial_console.c @@ -36,10 +36,9 @@ #include #include /* spl definitions */ #include -#include +#include #include -#include -#include +#include #include #include #include @@ -102,6 +101,7 @@ unsigned int killprint = 0; unsigned int debcnputc = 0; extern unsigned int mappingdeb0; extern int debugger_holdoff[NCPUS]; +extern int debugger_cpu; static void _cnputc(char c) { diff --git a/osfmk/ppc/POWERMAC/video_scroll.s b/osfmk/console/ppc/video_scroll.s similarity index 93% rename from osfmk/ppc/POWERMAC/video_scroll.s rename to osfmk/console/ppc/video_scroll.s index 20824be8e..a15f42ec8 100644 --- a/osfmk/ppc/POWERMAC/video_scroll.s +++ b/osfmk/console/ppc/video_scroll.s @@ -44,8 +44,6 @@ ENTRY(video_scroll_up, TAG_NO_FRAME_USED) mfmsr r0 /* Get the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off mflr r6 /* Get the LR */ ori r7,r0,1<<(31-MSR_FP_BIT) /* Turn on floating point */ stwu r1,-(FM_SIZE+16)(r1) /* Get space for a couple of registers on stack */ @@ -99,8 +97,6 @@ ENTRY(video_scroll_down, TAG_NO_FRAME_USED) mfmsr r0 /* Get the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off mflr r6 /* Get the LR */ ori r7,r0,1<<(31-MSR_FP_BIT) /* Turn on floating point */ stwu r1,-(FM_SIZE+16)(r1) /* Get space for a couple of registers on stack */ diff --git a/osfmk/console/rendered_numbers.c b/osfmk/console/rendered_numbers.c new file mode 100644 index 000000000..c2d571ef9 --- /dev/null +++ b/osfmk/console/rendered_numbers.c @@ -0,0 +1,376 @@ + /* generated c file */ + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x82]; +} num_0 = { +/* w */ 9, +/* h */ 11, +/* pixel_data */ +0x09,0xfd, +0x02,0xfd, 0x01,0x81, 0x01,0x2b, 0x02,0x00, 0x01,0x26, 0x02,0xfd, +0x02,0xfd, 0x01,0x2b, 0x01,0x01, 0x01,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x69, 0x01,0xfd, +0x02,0xfd, 0x01,0x00, 0x01,0x52, 0x02,0xfd, 0x01,0x00, 0x01,0x01, 0x01,0xfd, +0x01,0xfd, 0x01,0xf9, 0x01,0x00, 0x03,0xfd, 0x01,0x01, 0x01,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0xf9, 0x01,0x00, 0x03,0xfd, 0x01,0x01, 0x01,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0xf9, 0x01,0x00, 0x03,0xfd, 0x01,0x01, 0x01,0x00, 0x01,0xfd, +0x02,0xfd, 0x01,0x00, 0x01,0x52, 0x02,0xfd, 0x01,0x00, 0x01,0x01, 0x01,0xfd, +0x02,0xfd, 0x01,0x2b, 0x01,0x01, 0x01,0xfd, 0x01,0x69, 0x01,0x00, 0x01,0x69, 0x01,0xfd, +0x02,0xfd, 0x01,0x81, 0x01,0x2b, 0x02,0x00, 0x01,0x26, 0x02,0xfd, +0x09,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x54]; +} num_1 = { +/* w */ 7, +/* h */ 11, +/* pixel_data */ +0x07,0xfd, +0x01,0xfd, 0x01,0xf9, 0x01,0x2b, 0x01,0x00, 0x01,0xf9, 0x02,0xfd, +0x01,0xfd, 0x01,0x26, 0x01,0x52, 0x01,0x00, 0x01,0xf9, 0x02,0xfd, +0x03,0xfd, 0x01,0x00, 0x01,0xf9, 0x02,0xfd, +0x03,0xfd, 0x01,0x00, 0x01,0xf9, 0x02,0xfd, +0x03,0xfd, 0x01,0x00, 0x01,0xf9, 0x02,0xfd, +0x03,0xfd, 0x01,0x00, 0x01,0xf9, 0x02,0xfd, +0x03,0xfd, 0x01,0x00, 0x01,0xf9, 0x02,0xfd, +0x03,0xfd, 0x01,0x00, 0x01,0xf9, 0x02,0xfd, +0x01,0xfd, 0x01,0x2b, 0x04,0x00, 0x01,0xfd, +0x07,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x5c]; +} num_2 = { +/* w */ 8, +/* h */ 11, +/* pixel_data */ +0x08,0xfd, +0x01,0xfd, 0x01,0x52, 0x03,0x00, 0x01,0x01, 0x02,0xfd, +0x01,0xfd, 0x01,0x52, 0x01,0x81, 0x01,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x12, 0x01,0xfd, +0x05,0xfd, 0x01,0x00, 0x01,0x01, 0x01,0xfd, +0x04,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0xf9, 0x01,0xfd, +0x03,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x02,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x01, 0x03,0xfd, +0x02,0xfd, 0x01,0x00, 0x01,0x12, 0x04,0xfd, +0x01,0xfd, 0x01,0x01, 0x01,0x2b, 0x05,0xfd, +0x01,0xfd, 0x05,0x00, 0x01,0x01, 0x01,0xfd, +0x08,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x46]; +} num_3 = { +/* w */ 6, +/* h */ 11, +/* pixel_data */ +0x06,0xfd, +0x01,0xfd, 0x04,0x00, 0x01,0x52, +0x01,0xfd, 0x01,0x81, 0x02,0xfd, 0x02,0x00, +0x04,0xfd, 0x01,0x01, 0x01,0x00, +0x03,0xfd, 0x01,0xf9, 0x01,0x00, 0x01,0x52, +0x01,0xfd, 0x01,0x52, 0x02,0x00, 0x01,0x2b, 0x01,0x81, +0x04,0xfd, 0x01,0x2b, 0x01,0x00, +0x04,0xfd, 0x01,0xf9, 0x01,0x00, +0x01,0xfd, 0x01,0x81, 0x02,0xfd, 0x01,0x2b, 0x01,0x00, +0x01,0xfd, 0x04,0x00, 0x01,0x52, +0x06,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x64]; +} num_4 = { +/* w */ 9, +/* h */ 11, +/* pixel_data */ +0x09,0xfd, +0x05,0xfd, 0x02,0x00, 0x02,0xfd, +0x04,0xfd, 0x01,0x01, 0x02,0x00, 0x02,0xfd, +0x03,0xfd, 0x01,0x52, 0x01,0x2b, 0x01,0x01, 0x01,0x00, 0x02,0xfd, +0x03,0xfd, 0x01,0x00, 0x01,0x81, 0x01,0x01, 0x01,0x00, 0x02,0xfd, +0x02,0xfd, 0x01,0x01, 0x01,0xf9, 0x01,0xfd, 0x01,0x01, 0x01,0x00, 0x02,0xfd, +0x01,0xfd, 0x01,0x69, 0x01,0x00, 0x02,0xf9, 0x01,0x2b, 0x01,0x00, 0x01,0xf9, 0x01,0xfd, +0x01,0xfd, 0x01,0x69, 0x03,0x01, 0x02,0x00, 0x01,0x01, 0x01,0xfd, +0x05,0xfd, 0x01,0x01, 0x01,0x00, 0x02,0xfd, +0x05,0xfd, 0x01,0x01, 0x01,0x00, 0x02,0xfd, +0x09,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x58]; +} num_5 = { +/* w */ 7, +/* h */ 11, +/* pixel_data */ +0x07,0xfd, +0x01,0xfd, 0x01,0xf9, 0x04,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0xf9, 0x01,0x00, 0x04,0xfd, +0x01,0xfd, 0x01,0xf9, 0x01,0x00, 0x04,0xfd, +0x01,0xfd, 0x01,0xf9, 0x01,0x00, 0x01,0x01, 0x01,0x69, 0x02,0xfd, +0x01,0xfd, 0x01,0x81, 0x01,0xf9, 0x01,0x12, 0x01,0x00, 0x01,0x12, 0x01,0xfd, +0x04,0xfd, 0x01,0x12, 0x01,0x00, 0x01,0xfd, +0x04,0xfd, 0x01,0xf9, 0x01,0x00, 0x01,0xfd, +0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0x12, 0x03,0x00, 0x01,0x81, 0x01,0xfd, +0x07,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x72]; +} num_6 = { +/* w */ 8, +/* h */ 11, +/* pixel_data */ +0x08,0xfd, +0x02,0xfd, 0x01,0x52, 0x03,0x00, 0x01,0x01, 0x01,0xfd, +0x01,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x52, 0x02,0xfd, 0x01,0x81, 0x01,0xfd, +0x01,0xfd, 0x01,0x01, 0x01,0x00, 0x05,0xfd, +0x01,0xfd, 0x01,0x00, 0x02,0x01, 0x01,0x00, 0x01,0x2b, 0x01,0x69, 0x01,0xfd, +0x01,0xfd, 0x02,0x00, 0x01,0xf9, 0x01,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x02,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0xf9, +0x01,0xfd, 0x02,0x2b, 0x03,0xfd, 0x01,0x00, 0x01,0xf9, +0x01,0xfd, 0x01,0x69, 0x01,0x00, 0x01,0x81, 0x01,0xfd, 0x01,0x12, 0x01,0x00, 0x01,0xfd, +0x02,0xfd, 0x01,0x26, 0x03,0x00, 0x01,0x52, 0x01,0xfd, +0x08,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x4a]; +} num_7 = { +/* w */ 7, +/* h */ 11, +/* pixel_data */ +0x07,0xfd, +0x01,0xfd, 0x06,0x00, +0x05,0xfd, 0x01,0x2b, 0x01,0x01, +0x04,0xfd, 0x01,0xf9, 0x01,0x00, 0x01,0xfd, +0x04,0xfd, 0x01,0x00, 0x01,0xf9, 0x01,0xfd, +0x03,0xfd, 0x01,0x12, 0x01,0x00, 0x02,0xfd, +0x02,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x69, 0x02,0xfd, +0x02,0xfd, 0x01,0x01, 0x01,0x00, 0x03,0xfd, +0x02,0xfd, 0x01,0x00, 0x01,0x12, 0x03,0xfd, +0x01,0xfd, 0x01,0x52, 0x01,0x00, 0x01,0x81, 0x03,0xfd, +0x07,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x76]; +} num_8 = { +/* w */ 8, +/* h */ 11, +/* pixel_data */ +0x08,0xfd, +0x02,0xfd, 0x01,0x52, 0x03,0x00, 0x01,0x12, 0x01,0xfd, +0x02,0xfd, 0x01,0x00, 0x01,0x26, 0x01,0xfd, 0x01,0x26, 0x01,0x00, 0x01,0xfd, +0x02,0xfd, 0x01,0x00, 0x01,0x01, 0x01,0xfd, 0x01,0x26, 0x01,0x00, 0x01,0xfd, +0x02,0xfd, 0x01,0x12, 0x01,0x00, 0x01,0x01, 0x01,0x00, 0x01,0x52, 0x01,0xfd, +0x02,0xfd, 0x01,0x12, 0x03,0x00, 0x01,0x52, 0x01,0xfd, +0x01,0xfd, 0x01,0x52, 0x01,0x00, 0x02,0x81, 0x02,0x00, 0x01,0x81, +0x01,0xfd, 0x01,0x01, 0x01,0x00, 0x02,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0xf9, +0x01,0xfd, 0x01,0x12, 0x01,0x00, 0x01,0x81, 0x01,0xfd, 0x01,0x52, 0x01,0x00, 0x01,0x81, +0x02,0xfd, 0x01,0x01, 0x03,0x00, 0x01,0xf9, 0x01,0xfd, +0x08,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x66]; +} num_9 = { +/* w */ 7, +/* h */ 11, +/* pixel_data */ +0x07,0xfd, +0x02,0xfd, 0x01,0x01, 0x02,0x00, 0x01,0x2b, 0x01,0xfd, +0x01,0xfd, 0x01,0x12, 0x01,0x00, 0x02,0xfd, 0x01,0x00, 0x01,0x12, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x02,0xfd, 0x01,0x26, 0x01,0x00, +0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, 0x01,0xf9, 0x01,0x00, +0x01,0xfd, 0x01,0x26, 0x01,0x00, 0x01,0x69, 0x01,0x81, 0x02,0x00, +0x02,0xfd, 0x01,0x26, 0x01,0x00, 0x01,0x2b, 0x01,0x26, 0x01,0x00, +0x05,0xfd, 0x01,0x01, 0x01,0x2b, +0x01,0xfd, 0x01,0x81, 0x02,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x52, +0x01,0xfd, 0x01,0x69, 0x03,0x00, 0x01,0x26, 0x01,0xfd, +0x07,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x76]; +} num_a = { +/* w */ 10, +/* h */ 11, +/* pixel_data */ +0x0a,0xfd, +0x04,0xfd, 0x01,0x01, 0x01,0x00, 0x01,0x81, 0x03,0xfd, +0x04,0xfd, 0x02,0x00, 0x01,0x12, 0x03,0xfd, +0x03,0xfd, 0x01,0xf9, 0x01,0x00, 0x01,0x2b, 0x01,0x00, 0x03,0xfd, +0x03,0xfd, 0x01,0x2b, 0x01,0x12, 0x01,0x69, 0x01,0x00, 0x01,0x52, 0x02,0xfd, +0x02,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x81, 0x01,0xfd, 0x01,0x00, 0x01,0x2b, 0x02,0xfd, +0x02,0xfd, 0x01,0x12, 0x01,0x00, 0x02,0x01, 0x02,0x00, 0x02,0xfd, +0x02,0xfd, 0x01,0x00, 0x01,0x12, 0x03,0xf9, 0x01,0x00, 0x01,0x26, 0x01,0xfd, +0x01,0xfd, 0x01,0x52, 0x01,0x00, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0x2b, 0x01,0x01, 0x04,0xfd, 0x01,0xf9, 0x01,0x00, 0x01,0x81, +0x0a,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x68]; +} num_b = { +/* w */ 7, +/* h */ 11, +/* pixel_data */ +0x07,0xfd, +0x01,0xfd, 0x04,0x00, 0x01,0x2b, 0x01,0x81, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x01,0xfd, 0x01,0x81, 0x01,0x00, 0x01,0x01, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x02,0xfd, 0x01,0x00, 0x01,0x01, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x01,0xfd, 0x01,0x01, 0x01,0x00, 0x01,0x81, +0x01,0xfd, 0x04,0x00, 0x01,0xf9, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x01,0xfd, 0x01,0x52, 0x01,0x00, 0x01,0x26, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x02,0xfd, 0x01,0x01, 0x01,0x00, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x02,0xfd, 0x01,0x00, 0x01,0x2b, +0x01,0xfd, 0x05,0x00, 0x01,0x81, +0x07,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x5a]; +} num_c = { +/* w */ 9, +/* h */ 11, +/* pixel_data */ +0x09,0xfd, +0x03,0xfd, 0x01,0x01, 0x04,0x00, 0x01,0xf9, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x81, 0x03,0xfd, 0x01,0x69, +0x01,0xfd, 0x01,0x52, 0x01,0x00, 0x01,0x69, 0x05,0xfd, +0x01,0xfd, 0x01,0x01, 0x01,0x00, 0x06,0xfd, +0x01,0xfd, 0x01,0x01, 0x01,0x00, 0x06,0xfd, +0x01,0xfd, 0x01,0x01, 0x01,0x00, 0x06,0xfd, +0x01,0xfd, 0x01,0x52, 0x01,0x00, 0x01,0x69, 0x05,0xfd, +0x02,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x81, 0x02,0xfd, 0x01,0x81, 0x01,0x52, +0x03,0xfd, 0x01,0x01, 0x04,0x00, 0x01,0x52, +0x09,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x7c]; +} num_d = { +/* w */ 10, +/* h */ 11, +/* pixel_data */ +0x0a,0xfd, +0x01,0xfd, 0x05,0x00, 0x01,0x2b, 0x01,0x81, 0x02,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0x81, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x04,0xfd, 0x01,0x00, 0x01,0x01, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x04,0xfd, 0x01,0x01, 0x01,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x04,0xfd, 0x01,0x2b, 0x01,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x04,0xfd, 0x01,0x00, 0x01,0x12, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0x01, 0x03,0xfd, 0x01,0x2b, 0x01,0x00, 0x02,0xfd, +0x01,0xfd, 0x05,0x00, 0x01,0x01, 0x01,0x81, 0x02,0xfd, +0x0a,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x48]; +} num_e = { +/* w */ 7, +/* h */ 11, +/* pixel_data */ +0x07,0xfd, +0x01,0xfd, 0x05,0x00, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x04,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x04,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x04,0xfd, +0x01,0xfd, 0x04,0x00, 0x01,0x12, 0x01,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x04,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x04,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x04,0xfd, +0x01,0xfd, 0x05,0x00, 0x01,0x52, +0x07,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x46]; +} num_f = { +/* w */ 6, +/* h */ 11, +/* pixel_data */ +0x06,0xfd, +0x01,0xfd, 0x05,0x00, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x03,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x03,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x03,0xfd, +0x01,0xfd, 0x04,0x00, 0x01,0x12, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x03,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x03,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x03,0xfd, +0x01,0xfd, 0x01,0x00, 0x01,0xf9, 0x03,0xfd, +0x06,0xfd +}; + + +static const struct { + unsigned int num_w; + unsigned int num_h; + unsigned char num_pixel_data[0x2e]; +} num_colon = { +/* w */ 4, +/* h */ 11, +/* pixel_data */ +0x04,0xfd, +0x04,0xfd, +0x04,0xfd, +0x01,0xfd, 0x01,0x69, 0x01,0x01, 0x01,0xfd, +0x01,0xfd, 0x01,0x52, 0x01,0x00, 0x01,0xfd, +0x04,0xfd, +0x04,0xfd, +0x04,0xfd, +0x01,0xfd, 0x01,0x81, 0x01,0xf9, 0x01,0xfd, +0x01,0xfd, 0x01,0xf9, 0x01,0x00, 0x01,0xfd, +0x04,0xfd +}; + + diff --git a/osfmk/console/video_console.c b/osfmk/console/video_console.c new file mode 100644 index 000000000..ad15fd553 --- /dev/null +++ b/osfmk/console/video_console.c @@ -0,0 +1,2412 @@ +/* + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + * + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * NetBSD: ite.c,v 1.16 1995/07/17 01:24:34 briggs Exp + * + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: ite.c 1.28 92/12/20$ + * + * @(#)ite.c 8.2 (Berkeley) 1/12/94 + */ + +/* + * ite.c + * + * The ite module handles the system console; that is, stuff printed + * by the kernel and by user programs while "desktop" and X aren't + * running. Some (very small) parts are based on hp300's 4.4 ite.c, + * hence the above copyright. + * + * -- Brad and Lawrence, June 26th, 1994 + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "iso_font.c" + +/* + * Generic Console (Front-End) + * --------------------------- + */ + +struct vc_info vinfo; +/* if panicDialogDesired is true then we use the panic dialog when its */ +/* allowed otherwise we won't use the panic dialog even if it is allowed */ +boolean_t panicDialogDesired; + + +extern int disableConsoleOutput; +static boolean_t gc_enabled = FALSE; +static boolean_t gc_initialized = FALSE; +static boolean_t vm_initialized = FALSE; + +static struct { + void (*initialize)(struct vc_info * info); + void (*enable)(boolean_t enable); + void (*paint_char)(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous); + void (*clear_screen)(int xx, int yy, int top, int bottom, int which); + void (*scroll_down)(int num, int top, int bottom); + void (*scroll_up)(int num, int top, int bottom); + void (*hide_cursor)(int xx, int yy); + void (*show_cursor)(int xx, int yy); + void (*update_color)(int color, boolean_t fore); +} gc_ops; + +static unsigned char * gc_buffer_attributes = NULL; +static unsigned char * gc_buffer_characters = NULL; +static unsigned char * gc_buffer_colorcodes = NULL; +static unsigned long gc_buffer_columns = 0; +static unsigned long gc_buffer_rows = 0; +static unsigned long gc_buffer_size = 0; +decl_simple_lock_data(,gc_buffer_lock) + +/* +# Attribute codes: +# 00=none 01=bold 04=underscore 05=blink 07=reverse 08=concealed +# Text color codes: +# 30=black 31=red 32=green 33=yellow 34=blue 35=magenta 36=cyan 37=white +# Background color codes: +# 40=black 41=red 42=green 43=yellow 44=blue 45=magenta 46=cyan 47=white +*/ + +#define ATTR_NONE 0 +#define ATTR_BOLD 1 +#define ATTR_UNDER 2 +#define ATTR_REVERSE 4 + +#define COLOR_BACKGROUND 0 +#define COLOR_FOREGROUND 7 + +#define COLOR_CODE_GET(code, fore) (((code) & ((fore) ? 0xF0 : 0x0F)) >> ((fore) ? 4 : 0)) +#define COLOR_CODE_SET(code, color, fore) (((code) & ((fore) ? 0x0F : 0xF0)) | ((color) << ((fore) ? 4 : 0))) + +static unsigned char gc_color_code = 0; + +/* VT100 state: */ +#define MAXPARS 16 +static int gc_x = 0, gc_y = 0, gc_savex, gc_savey; +static int gc_par[MAXPARS], gc_numpars, gc_hanging_cursor, gc_attr, gc_saveattr; + +/* VT100 tab stops & scroll region */ +static char gc_tab_stops[255]; +static int gc_scrreg_top, gc_scrreg_bottom; + +enum vt100state_e { + ESnormal, /* Nothing yet */ + ESesc, /* Got ESC */ + ESsquare, /* Got ESC [ */ + ESgetpars, /* About to get or getting the parameters */ + ESgotpars, /* Finished getting the parameters */ + ESfunckey, /* Function key */ + EShash, /* DEC-specific stuff (screen align, etc.) */ + ESsetG0, /* Specify the G0 character set */ + ESsetG1, /* Specify the G1 character set */ + ESask, + EScharsize, + ESignore /* Ignore this sequence */ +} gc_vt100state = ESnormal; + +static int gc_wrap_mode = 1, gc_relative_origin = 0; +static int gc_charset_select = 0, gc_save_charset_s = 0; +static int gc_charset[2] = { 0, 0 }; +static int gc_charset_save[2] = { 0, 0 }; + +static void gc_clear_line(int xx, int yy, int which); +static void gc_clear_screen(int xx, int yy, int top, int bottom, int which); +static void gc_enable(boolean_t enable); +static void gc_hide_cursor(int xx, int yy); +static void gc_initialize(struct vc_info * info); +static void gc_paint_char(int xx, int yy, unsigned char ch, int attrs); +static void gc_putchar(char ch); +static void gc_putc_askcmd(unsigned char ch); +static void gc_putc_charsetcmd(int charset, unsigned char ch); +static void gc_putc_charsizecmd(unsigned char ch); +static void gc_putc_esc(unsigned char ch); +static void gc_putc_getpars(unsigned char ch); +static void gc_putc_gotpars(unsigned char ch); +static void gc_putc_normal(unsigned char ch); +static void gc_putc_square(unsigned char ch); +static void gc_refresh_screen(void); +static void gc_reset_screen(void); +static void gc_reset_tabs(void); +static void gc_reset_vt100(void); +static void gc_scroll_down(int num, int top, int bottom); +static void gc_scroll_up(int num, int top, int bottom); +static void gc_show_cursor(int xx, int yy); +static void gc_update_color(int color, boolean_t fore); +extern int vcputc(int l, int u, int c); + +static void +gc_clear_line(int xx, int yy, int which) +{ + int start, end, i; + + /* + * This routine runs extremely slowly. I don't think it's + * used all that often, except for To end of line. I'll go + * back and speed this up when I speed up the whole vc + * module. --LK + */ + + switch (which) { + case 0: /* To end of line */ + start = xx; + end = vinfo.v_columns-1; + break; + case 1: /* To start of line */ + start = 0; + end = xx; + break; + case 2: /* Whole line */ + start = 0; + end = vinfo.v_columns-1; + break; + } + + for (i = start; i <= end; i++) { + gc_paint_char(i, yy, ' ', ATTR_NONE); + } + +} + +static void +gc_clear_screen(int xx, int yy, int top, int bottom, int which) +{ + spl_t s; + + s = splhigh(); + simple_lock(&gc_buffer_lock); + + if ( xx < gc_buffer_columns && yy < gc_buffer_rows && bottom <= gc_buffer_rows ) + { + unsigned long start, end; + + switch (which) { + case 0: /* To end of screen */ + start = (yy * gc_buffer_columns) + xx; + end = (bottom * gc_buffer_columns) - 1; + break; + case 1: /* To start of screen */ + start = (top * gc_buffer_columns); + end = (yy * gc_buffer_columns) + xx; + break; + case 2: /* Whole screen */ + start = (top * gc_buffer_columns); + end = (bottom * gc_buffer_columns) - 1; + break; + } + + memset(gc_buffer_attributes + start, 0x00, end - start + 1); + memset(gc_buffer_characters + start, 0x00, end - start + 1); + memset(gc_buffer_colorcodes + start, gc_color_code, end - start + 1); + } + + simple_unlock(&gc_buffer_lock); + splx(s); + + gc_ops.clear_screen(xx, yy, top, bottom, which); +} + +static void +gc_enable( boolean_t enable ) +{ + unsigned char * buffer_attributes; + unsigned char * buffer_characters; + unsigned char * buffer_colorcodes; + unsigned long buffer_columns; + unsigned long buffer_rows; + unsigned long buffer_size; + + spl_t s; + + if ( enable == FALSE ) + { + disableConsoleOutput = TRUE; + gc_enabled = FALSE; + gc_ops.enable(FALSE); + } + + s = splhigh( ); + simple_lock( &gc_buffer_lock ); + + if ( gc_buffer_size ) + { + buffer_attributes = gc_buffer_attributes; + buffer_characters = gc_buffer_characters; + buffer_colorcodes = gc_buffer_colorcodes; + buffer_size = gc_buffer_size; + + gc_buffer_attributes = NULL; + gc_buffer_characters = NULL; + gc_buffer_colorcodes = NULL; + gc_buffer_columns = 0; + gc_buffer_rows = 0; + gc_buffer_size = 0; + + simple_unlock( &gc_buffer_lock ); + splx( s ); + + kfree( (vm_offset_t)buffer_attributes, buffer_size ); + kfree( (vm_offset_t)buffer_characters, buffer_size ); + kfree( (vm_offset_t)buffer_colorcodes, buffer_size ); + } + else + { + simple_unlock( &gc_buffer_lock ); + splx( s ); + } + + if ( enable ) + { + if ( vm_initialized ) + { + buffer_columns = vinfo.v_columns; + buffer_rows = vinfo.v_rows; + buffer_size = buffer_columns * buffer_rows; + + if ( buffer_size ) + { + buffer_attributes = (unsigned char *) kalloc( buffer_size ); + buffer_characters = (unsigned char *) kalloc( buffer_size ); + buffer_colorcodes = (unsigned char *) kalloc( buffer_size ); + + if ( buffer_attributes == NULL || + buffer_characters == NULL || + buffer_colorcodes == NULL ) + { + if ( buffer_attributes ) kfree( (vm_offset_t)buffer_attributes, buffer_size ); + if ( buffer_characters ) kfree( (vm_offset_t)buffer_characters, buffer_size ); + if ( buffer_colorcodes ) kfree( (vm_offset_t)buffer_colorcodes, buffer_size ); + + buffer_columns = 0; + buffer_rows = 0; + buffer_size = 0; + } + else + { + memset( buffer_attributes, 0x00, buffer_size ); + memset( buffer_characters, 0x00, buffer_size ); + memset( buffer_colorcodes, 0x0F, buffer_size ); + } + } + } + + s = splhigh( ); + simple_lock( &gc_buffer_lock ); + + gc_buffer_attributes = buffer_attributes; + gc_buffer_characters = buffer_characters; + gc_buffer_colorcodes = buffer_colorcodes; + gc_buffer_columns = buffer_columns; + gc_buffer_rows = buffer_rows; + gc_buffer_size = buffer_size; + + simple_unlock( &gc_buffer_lock ); + splx( s ); + + gc_reset_screen(); + + gc_ops.enable(TRUE); + gc_enabled = TRUE; + disableConsoleOutput = FALSE; + } +} + +static void +gc_hide_cursor(int xx, int yy) +{ + spl_t s; + + s = splhigh(); + simple_lock(&gc_buffer_lock); + + if ( xx < gc_buffer_columns && yy < gc_buffer_rows ) + { + unsigned long index = (yy * gc_buffer_columns) + xx; + unsigned char attribute = gc_buffer_attributes[index]; + unsigned char character = gc_buffer_characters[index]; + unsigned char colorcode = gc_buffer_colorcodes[index]; + unsigned char colorcodesave = gc_color_code; + + simple_unlock(&gc_buffer_lock); + splx(s); + + gc_update_color(COLOR_CODE_GET(colorcode, TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(colorcode, FALSE), FALSE); + + gc_ops.paint_char(xx, yy, character, attribute, 0, 0); + + gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); + } + else + { + simple_unlock(&gc_buffer_lock); + splx(s); + + gc_ops.hide_cursor(xx, yy); + } +} + +static void +gc_initialize(struct vc_info * info) +{ + if ( gc_initialized == FALSE ) + { + /* Init our lock */ + simple_lock_init(&gc_buffer_lock, ETAP_IO_TTY); + + gc_initialized = TRUE; + } + + gc_ops.initialize(info); + + gc_reset_vt100(); + gc_x = gc_y = 0; +} + +static void +gc_paint_char(int xx, int yy, unsigned char ch, int attrs) +{ + spl_t s; + + s = splhigh(); + simple_lock(&gc_buffer_lock); + + if ( xx < gc_buffer_columns && yy < gc_buffer_rows ) + { + unsigned long index = (yy * gc_buffer_columns) + xx; + + gc_buffer_attributes[index] = attrs; + gc_buffer_characters[index] = ch; + gc_buffer_colorcodes[index] = gc_color_code; + } + + simple_unlock(&gc_buffer_lock); + splx(s); + + gc_ops.paint_char(xx, yy, ch, attrs, 0, 0); +} + +static void +gc_putchar(char ch) +{ + if (!ch) { + return; /* ignore null characters */ + } + switch (gc_vt100state) { + default:gc_vt100state = ESnormal; /* FALLTHROUGH */ + case ESnormal: + gc_putc_normal(ch); + break; + case ESesc: + gc_putc_esc(ch); + break; + case ESsquare: + gc_putc_square(ch); + break; + case ESgetpars: + gc_putc_getpars(ch); + break; + case ESgotpars: + gc_putc_gotpars(ch); + break; + case ESask: + gc_putc_askcmd(ch); + break; + case EScharsize: + gc_putc_charsizecmd(ch); + break; + case ESsetG0: + gc_putc_charsetcmd(0, ch); + break; + case ESsetG1: + gc_putc_charsetcmd(1, ch); + break; + } + + if (gc_x >= vinfo.v_columns) { + gc_x = vinfo.v_columns - 1; + } + if (gc_x < 0) { + gc_x = 0; + } + if (gc_y >= vinfo.v_rows) { + gc_y = vinfo.v_rows - 1; + } + if (gc_y < 0) { + gc_y = 0; + } + +} + +static void +gc_putc_askcmd(unsigned char ch) +{ + if (ch >= '0' && ch <= '9') { + gc_par[gc_numpars] = (10*gc_par[gc_numpars]) + (ch-'0'); + return; + } + gc_vt100state = ESnormal; + + switch (gc_par[0]) { + case 6: + gc_relative_origin = ch == 'h'; + break; + case 7: /* wrap around mode h=1, l=0*/ + gc_wrap_mode = ch == 'h'; + break; + default: + break; + } + +} + +static void +gc_putc_charsetcmd(int charset, unsigned char ch) +{ + gc_vt100state = ESnormal; + + switch (ch) { + case 'A' : + case 'B' : + default: + gc_charset[charset] = 0; + break; + case '0' : /* Graphic characters */ + case '2' : + gc_charset[charset] = 0x21; + break; + } + +} + +static void +gc_putc_charsizecmd(unsigned char ch) +{ + gc_vt100state = ESnormal; + + switch (ch) { + case '3' : + case '4' : + case '5' : + case '6' : + break; + case '8' : /* fill 'E's */ + { + int xx, yy; + for (yy = 0; yy < vinfo.v_rows; yy++) + for (xx = 0; xx < vinfo.v_columns; xx++) + gc_paint_char(xx, yy, 'E', ATTR_NONE); + } + break; + } + +} + +static void +gc_putc_esc(unsigned char ch) +{ + gc_vt100state = ESnormal; + + switch (ch) { + case '[': + gc_vt100state = ESsquare; + break; + case 'c': /* Reset terminal */ + gc_reset_vt100(); + gc_clear_screen(gc_x, gc_y, 0, vinfo.v_rows, 2); + gc_x = gc_y = 0; + break; + case 'D': /* Line feed */ + case 'E': + if (gc_y >= gc_scrreg_bottom -1) { + gc_scroll_up(1, gc_scrreg_top, gc_scrreg_bottom); + gc_y = gc_scrreg_bottom - 1; + } else { + gc_y++; + } + if (ch == 'E') gc_x = 0; + break; + case 'H': /* Set tab stop */ + gc_tab_stops[gc_x] = 1; + break; + case 'M': /* Cursor up */ + if (gc_y <= gc_scrreg_top) { + gc_scroll_down(1, gc_scrreg_top, gc_scrreg_bottom); + gc_y = gc_scrreg_top; + } else { + gc_y--; + } + break; + case '>': + gc_reset_vt100(); + break; + case '7': /* Save cursor */ + gc_savex = gc_x; + gc_savey = gc_y; + gc_saveattr = gc_attr; + gc_save_charset_s = gc_charset_select; + gc_charset_save[0] = gc_charset[0]; + gc_charset_save[1] = gc_charset[1]; + break; + case '8': /* Restore cursor */ + gc_x = gc_savex; + gc_y = gc_savey; + gc_attr = gc_saveattr; + gc_charset_select = gc_save_charset_s; + gc_charset[0] = gc_charset_save[0]; + gc_charset[1] = gc_charset_save[1]; + break; + case 'Z': /* return terminal ID */ + break; + case '#': /* change characters height */ + gc_vt100state = EScharsize; + break; + case '(': + gc_vt100state = ESsetG0; + break; + case ')': /* character set sequence */ + gc_vt100state = ESsetG1; + break; + case '=': + break; + default: + /* Rest not supported */ + break; + } + +} + +static void +gc_putc_getpars(unsigned char ch) +{ + if (ch == '?') { + gc_vt100state = ESask; + return; + } + if (ch == '[') { + gc_vt100state = ESnormal; + /* Not supported */ + return; + } + if (ch == ';' && gc_numpars < MAXPARS - 1) { + gc_numpars++; + } else + if (ch >= '0' && ch <= '9') { + gc_par[gc_numpars] *= 10; + gc_par[gc_numpars] += ch - '0'; + } else { + gc_numpars++; + gc_vt100state = ESgotpars; + gc_putc_gotpars(ch); + } +} + +static void +gc_putc_gotpars(unsigned char ch) +{ + int i; + + if (ch < ' ') { + /* special case for vttest for handling cursor + movement in escape sequences */ + gc_putc_normal(ch); + gc_vt100state = ESgotpars; + return; + } + gc_vt100state = ESnormal; + switch (ch) { + case 'A': /* Up */ + gc_y -= gc_par[0] ? gc_par[0] : 1; + if (gc_y < gc_scrreg_top) + gc_y = gc_scrreg_top; + break; + case 'B': /* Down */ + gc_y += gc_par[0] ? gc_par[0] : 1; + if (gc_y >= gc_scrreg_bottom) + gc_y = gc_scrreg_bottom - 1; + break; + case 'C': /* Right */ + gc_x += gc_par[0] ? gc_par[0] : 1; + if (gc_x >= vinfo.v_columns) + gc_x = vinfo.v_columns-1; + break; + case 'D': /* Left */ + gc_x -= gc_par[0] ? gc_par[0] : 1; + if (gc_x < 0) + gc_x = 0; + break; + case 'H': /* Set cursor position */ + case 'f': + gc_x = gc_par[1] ? gc_par[1] - 1 : 0; + gc_y = gc_par[0] ? gc_par[0] - 1 : 0; + if (gc_relative_origin) + gc_y += gc_scrreg_top; + gc_hanging_cursor = 0; + break; + case 'X': /* clear p1 characters */ + if (gc_numpars) { + int i; + for (i = gc_x; i < gc_x + gc_par[0]; i++) + gc_paint_char(i, gc_y, ' ', ATTR_NONE); + } + break; + case 'J': /* Clear part of screen */ + gc_clear_screen(gc_x, gc_y, 0, vinfo.v_rows, gc_par[0]); + break; + case 'K': /* Clear part of line */ + gc_clear_line(gc_x, gc_y, gc_par[0]); + break; + case 'g': /* tab stops */ + switch (gc_par[0]) { + case 1: + case 2: /* reset tab stops */ + /* gc_reset_tabs(); */ + break; + case 3: /* Clear every tabs */ + { + int i; + + for (i = 0; i <= vinfo.v_columns; i++) + gc_tab_stops[i] = 0; + } + break; + case 0: + gc_tab_stops[gc_x] = 0; + break; + } + break; + case 'm': /* Set attribute */ + for (i = 0; i < gc_numpars; i++) { + switch (gc_par[i]) { + case 0: + gc_attr = ATTR_NONE; + gc_update_color(COLOR_BACKGROUND, FALSE); + gc_update_color(COLOR_FOREGROUND, TRUE ); + break; + case 1: + gc_attr |= ATTR_BOLD; + break; + case 4: + gc_attr |= ATTR_UNDER; + break; + case 7: + gc_attr |= ATTR_REVERSE; + break; + case 22: + gc_attr &= ~ATTR_BOLD; + break; + case 24: + gc_attr &= ~ATTR_UNDER; + break; + case 27: + gc_attr &= ~ATTR_REVERSE; + break; + case 5: + case 25: /* blink/no blink */ + break; + default: + if (gc_par[i] >= 30 && gc_par[i] <= 37) + gc_update_color(gc_par[i] - 30, TRUE); + if (gc_par[i] >= 40 && gc_par[i] <= 47) + gc_update_color(gc_par[i] - 40, FALSE); + break; + } + } + break; + case 'r': /* Set scroll region */ + gc_x = gc_y = 0; + /* ensure top < bottom, and both within limits */ + if ((gc_numpars > 0) && (gc_par[0] < vinfo.v_rows)) { + gc_scrreg_top = gc_par[0] ? gc_par[0] - 1 : 0; + if (gc_scrreg_top < 0) + gc_scrreg_top = 0; + } else { + gc_scrreg_top = 0; + } + if ((gc_numpars > 1) && (gc_par[1] <= vinfo.v_rows) && (gc_par[1] > gc_par[0])) { + gc_scrreg_bottom = gc_par[1]; + if (gc_scrreg_bottom > vinfo.v_rows) + gc_scrreg_bottom = vinfo.v_rows; + } else { + gc_scrreg_bottom = vinfo.v_rows; + } + if (gc_relative_origin) + gc_y = gc_scrreg_top; + break; + } + +} + +static void +gc_putc_normal(unsigned char ch) +{ + switch (ch) { + case '\a': /* Beep */ + break; + case 127: /* Delete */ + case '\b': /* Backspace */ + if (gc_hanging_cursor) { + gc_hanging_cursor = 0; + } else + if (gc_x > 0) { + gc_x--; + } + break; + case '\t': /* Tab */ + while (gc_x < vinfo.v_columns && !gc_tab_stops[++gc_x]); + if (gc_x >= vinfo.v_columns) + gc_x = vinfo.v_columns-1; + break; + case 0x0b: + case 0x0c: + case '\n': /* Line feed */ + if (gc_y >= gc_scrreg_bottom -1 ) { + gc_scroll_up(1, gc_scrreg_top, gc_scrreg_bottom); + gc_y = gc_scrreg_bottom - 1; + } else { + gc_y++; + } + break; + case '\r': /* Carriage return */ + gc_x = 0; + gc_hanging_cursor = 0; + break; + case 0x0e: /* Select G1 charset (Control-N) */ + gc_charset_select = 1; + break; + case 0x0f: /* Select G0 charset (Control-O) */ + gc_charset_select = 0; + break; + case 0x18 : /* CAN : cancel */ + case 0x1A : /* like cancel */ + /* well, i do nothing here, may be later */ + break; + case '\033': /* Escape */ + gc_vt100state = ESesc; + gc_hanging_cursor = 0; + break; + default: + if (ch >= ' ') { + if (gc_hanging_cursor) { + gc_x = 0; + if (gc_y >= gc_scrreg_bottom -1 ) { + gc_scroll_up(1, gc_scrreg_top, gc_scrreg_bottom); + gc_y = gc_scrreg_bottom - 1; + } else { + gc_y++; + } + gc_hanging_cursor = 0; + } + gc_paint_char(gc_x, gc_y, (ch >= 0x60 && ch <= 0x7f) ? ch + gc_charset[gc_charset_select] + : ch, gc_attr); + if (gc_x == vinfo.v_columns - 1) { + gc_hanging_cursor = gc_wrap_mode; + } else { + gc_x++; + } + } + break; + } + +} + +static void +gc_putc_square(unsigned char ch) +{ + int i; + + for (i = 0; i < MAXPARS; i++) { + gc_par[i] = 0; + } + + gc_numpars = 0; + gc_vt100state = ESgetpars; + + gc_putc_getpars(ch); + +} + +static void +gc_refresh_screen(void) +{ + spl_t s; + + s = splhigh(); + simple_lock(&gc_buffer_lock); + + if ( gc_buffer_size ) + { + unsigned char colorcodesave = gc_color_code; + unsigned long column, row; + unsigned long index; + + for ( index = 0, row = 0 ; row < gc_buffer_rows ; row++ ) + { + for ( column = 0 ; column < gc_buffer_columns ; index++, column++ ) + { + if ( gc_buffer_colorcodes[index] != gc_color_code ) + { + gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index], TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index], FALSE), FALSE); + } + + gc_ops.paint_char(column, row, gc_buffer_characters[index], gc_buffer_attributes[index], 0, 0); + } + } + + if ( colorcodesave != gc_color_code ) + { + gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); + } + } + + simple_unlock(&gc_buffer_lock); + splx(s); +} + +static void +gc_reset_screen(void) +{ + gc_hide_cursor(gc_x, gc_y); + gc_reset_vt100(); + gc_x = gc_y = 0; + gc_clear_screen(gc_x, gc_y, 0, vinfo.v_rows, 2); + gc_show_cursor(gc_x, gc_y); +} + +static void +gc_reset_tabs(void) +{ + int i; + + for (i = 0; i<= vinfo.v_columns; i++) { + gc_tab_stops[i] = ((i % 8) == 0); + } + +} + +static void +gc_reset_vt100(void) +{ + gc_reset_tabs(); + gc_scrreg_top = 0; + gc_scrreg_bottom = vinfo.v_rows; + gc_attr = ATTR_NONE; + gc_charset[0] = gc_charset[1] = 0; + gc_charset_select = 0; + gc_wrap_mode = 1; + gc_relative_origin = 0; + gc_update_color(COLOR_BACKGROUND, FALSE); + gc_update_color(COLOR_FOREGROUND, TRUE); +} + +static void +gc_scroll_down(int num, int top, int bottom) +{ + spl_t s; + + s = splhigh(); + simple_lock(&gc_buffer_lock); + + if ( bottom <= gc_buffer_rows ) + { + unsigned char colorcodesave = gc_color_code; + unsigned long column, row; + unsigned long index, jump; + + jump = num * gc_buffer_columns; + + for ( row = bottom - 1 ; row >= top + num ; row-- ) + { + index = row * gc_buffer_columns; + + for ( column = 0 ; column < gc_buffer_columns ; index++, column++ ) + { + if ( gc_buffer_attributes[index] != gc_buffer_attributes[index - jump] || + gc_buffer_characters[index] != gc_buffer_characters[index - jump] || + gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index - jump] ) + { + if ( gc_color_code != gc_buffer_colorcodes[index - jump] ) + { + gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index - jump], TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index - jump], FALSE), FALSE); + } + + if ( gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index - jump] ) + { + gc_ops.paint_char( /* xx */ column, + /* yy */ row, + /* ch */ gc_buffer_characters[index - jump], + /* attrs */ gc_buffer_attributes[index - jump], + /* ch_previous */ 0, + /* attrs_previous */ 0 ); + } + else + { + gc_ops.paint_char( /* xx */ column, + /* yy */ row, + /* ch */ gc_buffer_characters[index - jump], + /* attrs */ gc_buffer_attributes[index - jump], + /* ch_previous */ gc_buffer_characters[index], + /* attrs_previous */ gc_buffer_attributes[index] ); + } + + gc_buffer_attributes[index] = gc_buffer_attributes[index - jump]; + gc_buffer_characters[index] = gc_buffer_characters[index - jump]; + gc_buffer_colorcodes[index] = gc_buffer_colorcodes[index - jump]; + } + } + } + + if ( colorcodesave != gc_color_code ) + { + gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); + } + + simple_unlock(&gc_buffer_lock); + splx(s); + } + else + { + simple_unlock(&gc_buffer_lock); + splx(s); + + gc_ops.scroll_down(num, top, bottom); + } + + /* Now set the freed up lines to the background colour */ + + gc_clear_screen(vinfo.v_columns - 1, top + num - 1, top, bottom, 1); +} + +static void +gc_scroll_up(int num, int top, int bottom) +{ + spl_t s; + + s = splhigh(); + simple_lock(&gc_buffer_lock); + + if ( bottom <= gc_buffer_rows ) + { + unsigned char colorcodesave = gc_color_code; + unsigned long column, row; + unsigned long index, jump; + + jump = num * gc_buffer_columns; + + for ( row = top ; row < bottom - num ; row++ ) + { + index = row * gc_buffer_columns; + + for ( column = 0 ; column < gc_buffer_columns ; index++, column++ ) + { + if ( gc_buffer_attributes[index] != gc_buffer_attributes[index + jump] || + gc_buffer_characters[index] != gc_buffer_characters[index + jump] || + gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index + jump] ) + { + if ( gc_color_code != gc_buffer_colorcodes[index + jump] ) + { + gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index + jump], TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index + jump], FALSE), FALSE); + } + + if ( gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index + jump] ) + { + gc_ops.paint_char( /* xx */ column, + /* yy */ row, + /* ch */ gc_buffer_characters[index + jump], + /* attrs */ gc_buffer_attributes[index + jump], + /* ch_previous */ 0, + /* attrs_previous */ 0 ); + } + else + { + gc_ops.paint_char( /* xx */ column, + /* yy */ row, + /* ch */ gc_buffer_characters[index + jump], + /* attrs */ gc_buffer_attributes[index + jump], + /* ch_previous */ gc_buffer_characters[index], + /* attrs_previous */ gc_buffer_attributes[index] ); + } + + gc_buffer_attributes[index] = gc_buffer_attributes[index + jump]; + gc_buffer_characters[index] = gc_buffer_characters[index + jump]; + gc_buffer_colorcodes[index] = gc_buffer_colorcodes[index + jump]; + + } + } + } + + if ( colorcodesave != gc_color_code ) + { + gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); + } + + simple_unlock(&gc_buffer_lock); + splx(s); + } + else + { + simple_unlock(&gc_buffer_lock); + splx(s); + + gc_ops.scroll_up(num, top, bottom); + } + + /* Now set the freed up lines to the background colour */ + + gc_clear_screen(0, bottom - num, top, bottom, 0); +} + +static void +gc_show_cursor(int xx, int yy) +{ + spl_t s; + + s = splhigh(); + simple_lock(&gc_buffer_lock); + + if ( xx < gc_buffer_columns && yy < gc_buffer_rows ) + { + unsigned long index = (yy * gc_buffer_columns) + xx; + unsigned char attribute = gc_buffer_attributes[index]; + unsigned char character = gc_buffer_characters[index]; + unsigned char colorcode = gc_buffer_colorcodes[index]; + unsigned char colorcodesave = gc_color_code; + + simple_unlock(&gc_buffer_lock); + splx(s); + + gc_update_color(COLOR_CODE_GET(colorcode, FALSE), TRUE ); + gc_update_color(COLOR_CODE_GET(colorcode, TRUE ), FALSE); + + gc_ops.paint_char(xx, yy, character, attribute, 0, 0); + + gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); + gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); + } + else + { + simple_unlock(&gc_buffer_lock); + splx(s); + + gc_ops.show_cursor(xx, yy); + } +} + +static void +gc_update_color(int color, boolean_t fore) +{ + gc_color_code = COLOR_CODE_SET(gc_color_code, color, fore); + gc_ops.update_color(color, fore); +} + +int +vcputc(int l, int u, int c) +{ + if ( gc_enabled || debug_mode ) + { + gc_hide_cursor(gc_x, gc_y); + gc_putchar(c); + gc_show_cursor(gc_x, gc_y); + } + + return 0; +} + +/* + * Video Console (Back-End) + * ------------------------ + */ + +/* + * For the color support (Michel Pollet) + */ +static unsigned char vc_color_index_table[33] = + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }; + +static unsigned long vc_colors[8][3] = { + { 0xFFFFFFFF, 0x00000000, 0x00000000 }, /* black */ + { 0x23232323, 0x7C007C00, 0x00FF0000 }, /* red */ + { 0xb9b9b9b9, 0x03e003e0, 0x0000FF00 }, /* green */ + { 0x05050505, 0x7FE07FE0, 0x00FFFF00 }, /* yellow */ + { 0xd2d2d2d2, 0x001f001f, 0x000000FF}, /* blue */ +// { 0x80808080, 0x31933193, 0x00666699 }, /* blue */ + { 0x18181818, 0x7C1F7C1F, 0x00FF00FF }, /* magenta */ + { 0xb4b4b4b4, 0x03FF03FF, 0x0000FFFF }, /* cyan */ + { 0x00000000, 0x7FFF7FFF, 0x00FFFFFF } /* white */ +}; + +static unsigned long vc_color_fore = 0; +static unsigned long vc_color_back = 0; + +/* + * New Rendering code from Michel Pollet + */ + +/* Rendered Font Buffer */ +static unsigned char *vc_rendered_font = NULL; + +/* Rendered Font Size */ +static unsigned long vc_rendered_font_size = 0; + +/* Size of a character in the table (bytes) */ +static int vc_rendered_char_size = 0; + +#define REN_MAX_DEPTH 32 +static unsigned char vc_rendered_char[ISO_CHAR_HEIGHT * ((REN_MAX_DEPTH / 8) * ISO_CHAR_WIDTH)]; + +static void vc_clear_screen(int xx, int yy, int scrreg_top, int scrreg_bottom, int which); +static void vc_enable(boolean_t enable); +static void vc_initialize(struct vc_info * vinfo_p); +static void vc_paint_char(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous); +static void vc_paint_char_8(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous); +static void vc_paint_char_16(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous); +static void vc_paint_char_32(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous); +static void vc_render_char(unsigned char ch, unsigned char *renderptr, short newdepth); +static void vc_render_font(short newdepth); +static void vc_reverse_cursor(int xx, int yy); +static void vc_scroll_down(int num, int scrreg_top, int scrreg_bottom); +static void vc_scroll_up(int num, int scrreg_top, int scrreg_bottom); +static void vc_update_color(int color, boolean_t fore); + +static void +vc_clear_screen(int xx, int yy, int scrreg_top, int scrreg_bottom, int which) +{ + unsigned long *p, *endp, *row; + int linelongs, col; + int rowline, rowlongs; + + if(!vinfo.v_depth) + return; + + linelongs = vinfo.v_rowbytes * (ISO_CHAR_HEIGHT >> 2); + rowline = vinfo.v_rowscanbytes >> 2; + rowlongs = vinfo.v_rowbytes >> 2; + + p = (unsigned long*) vinfo.v_baseaddr; + endp = (unsigned long*) vinfo.v_baseaddr; + + switch (which) { + case 0: /* To end of screen */ + gc_clear_line(xx, yy, 0); + if (yy < scrreg_bottom - 1) { + p += (yy + 1) * linelongs; + endp += scrreg_bottom * linelongs; + } + break; + case 1: /* To start of screen */ + gc_clear_line(xx, yy, 1); + if (yy > scrreg_top) { + p += scrreg_top * linelongs; + endp += yy * linelongs; + } + break; + case 2: /* Whole screen */ + p += scrreg_top * linelongs; + if (scrreg_bottom == vinfo.v_rows) { + endp += rowlongs * vinfo.v_height; + } else { + endp += scrreg_bottom * linelongs; + } + break; + } + + for (row = p ; row < endp ; row += rowlongs) { + for (col = 0; col < rowline; col++) + *(row+col) = vc_color_back; + } +} + +static void +vc_enable(boolean_t enable) +{ + if ( enable ) + { + vc_render_font(vinfo.v_depth); + } +} + +static void +vc_initialize(struct vc_info * vinfo_p) +{ + vinfo.v_rows = vinfo.v_height / ISO_CHAR_HEIGHT; + vinfo.v_columns = vinfo.v_width / ISO_CHAR_WIDTH; + vinfo.v_rowscanbytes = (vinfo.v_depth / 8) * vinfo.v_width; +} + +static void +vc_paint_char(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous) +{ + if( !vinfo.v_depth) + return; + + switch( vinfo.v_depth) { + case 8: + vc_paint_char_8(xx, yy, ch, attrs, ch_previous, attrs_previous); + break; + case 16: + vc_paint_char_16(xx, yy, ch, attrs, ch_previous, attrs_previous); + break; + case 32: + vc_paint_char_32(xx, yy, ch, attrs, ch_previous, attrs_previous); + break; + } +} + +static void +vc_paint_char_8(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + if (vc_rendered_font) { + theChar = (unsigned long*)(vc_rendered_font + (ch * vc_rendered_char_size)); + } else { + vc_render_char(ch, vc_rendered_char, 8); + theChar = (unsigned long*)(vc_rendered_char); + } + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + + (xx * ISO_CHAR_WIDTH)); + + if (!attrs) for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attr? FLY !*/ + unsigned long *store = where; + int x; + for (x = 0; x < 2; x++) { + unsigned long val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 2; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (lastpixel && !(save & 0xFF000000)) + val |= 0xff000000; + if ((save & 0xFFFF0000) == 0xFF000000) + val |= 0x00FF0000; + if ((save & 0x00FFFF00) == 0x00FF0000) + val |= 0x0000FF00; + if ((save & 0x0000FFFF) == 0x0000FF00) + val |= 0x000000FF; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + lastpixel = save & 0xff; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void +vc_paint_char_16(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + if (vc_rendered_font) { + theChar = (unsigned long*)(vc_rendered_font + (ch * vc_rendered_char_size)); + } else { + vc_render_char(ch, vc_rendered_char, 16); + theChar = (unsigned long*)(vc_rendered_char); + } + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + + (xx * ISO_CHAR_WIDTH * 2)); + + if (!attrs) for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attrs ? FLY ! */ + unsigned long *store = where; + int x; + for (x = 0; x < 4; x++) { + unsigned long val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little bit slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 4; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (save == 0xFFFF0000) val |= 0xFFFF; + else if (lastpixel && !(save & 0xFFFF0000)) + val |= 0xFFFF0000; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + + *store++ = val; + lastpixel = save & 0x7fff; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void +vc_paint_char_32(int xx, int yy, unsigned char ch, int attrs, unsigned char ch_previous, int attrs_previous) +{ + unsigned long *theChar; + unsigned long *theCharPrevious; + unsigned long *where; + int i; + + if (vc_rendered_font) { + theChar = (unsigned long*)(vc_rendered_font + (ch * vc_rendered_char_size)); + theCharPrevious = (unsigned long*)(vc_rendered_font + (ch_previous * vc_rendered_char_size)); + } else { + vc_render_char(ch, vc_rendered_char, 32); + theChar = (unsigned long*)(vc_rendered_char); + theCharPrevious = NULL; + } + if (!ch_previous) { + theCharPrevious = NULL; + } + if (attrs_previous) { + theCharPrevious = NULL; + } + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + + (xx * ISO_CHAR_WIDTH * 4)); + + if (!attrs) for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attrs ? FLY ! */ + unsigned long *store = where; + int x; + for (x = 0; x < 8; x++) { + unsigned long val = *theChar++; + if (theCharPrevious == NULL || val != *theCharPrevious++ ) { + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } else { + store++; + } + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 8; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (lastpixel && !save) + val = 0xFFFFFFFF; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + lastpixel = save; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void +vc_render_char(unsigned char ch, unsigned char *renderptr, short newdepth) +{ + union { + unsigned char *charptr; + unsigned short *shortptr; + unsigned long *longptr; + } current; /* current place in rendered font, multiple types. */ + + unsigned char *theChar; /* current char in iso_font */ + + int line; + + current.charptr = renderptr; + theChar = iso_font + (ch * ISO_CHAR_HEIGHT); + + for (line = 0; line < ISO_CHAR_HEIGHT; line++) { + unsigned char mask = 1; + do { + switch (newdepth) { + case 8: + *current.charptr++ = (*theChar & mask) ? 0xFF : 0; + break; + case 16: + *current.shortptr++ = (*theChar & mask) ? 0xFFFF : 0; + break; + + case 32: + *current.longptr++ = (*theChar & mask) ? 0xFFFFFFFF : 0; + break; + } + mask <<= 1; + } while (mask); /* while the single bit drops to the right */ + theChar++; + } +} + +static void +vc_render_font(short newdepth) +{ + static short olddepth = 0; + + int charindex; /* index in ISO font */ + + if (vm_initialized == FALSE) { + return; /* nothing to do */ + } + if (olddepth == newdepth && vc_rendered_font) { + return; /* nothing to do */ + } + if (vc_rendered_font) { + kfree((vm_offset_t)vc_rendered_font, vc_rendered_font_size); + } + + vc_rendered_char_size = ISO_CHAR_HEIGHT * ((newdepth / 8) * ISO_CHAR_WIDTH); + vc_rendered_font_size = (ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size; + vc_rendered_font = (unsigned char *) kalloc(vc_rendered_font_size); + + if (vc_rendered_font == NULL) { + vc_rendered_font_size = 0; + return; + } + + for (charindex = ISO_CHAR_MIN; charindex <= ISO_CHAR_MAX; charindex++) { + vc_render_char(charindex, vc_rendered_font + (charindex * vc_rendered_char_size), newdepth); + } + + olddepth = newdepth; +} + +static void +vc_reverse_cursor(int xx, int yy) +{ + unsigned long *where; + int line, col; + + if(!vinfo.v_depth) + return; + + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + + (xx /** ISO_CHAR_WIDTH*/ * vinfo.v_depth)); + for (line = 0; line < ISO_CHAR_HEIGHT; line++) { + switch (vinfo.v_depth) { + case 8: + where[0] = ~where[0]; + where[1] = ~where[1]; + break; + case 16: + for (col = 0; col < 4; col++) + where[col] = ~where[col]; + break; + case 32: + for (col = 0; col < 8; col++) + where[col] = ~where[col]; + break; + } + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } +} + +static void +vc_scroll_down(int num, int scrreg_top, int scrreg_bottom) +{ + unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; + + if(!vinfo.v_depth) + return; + + linelongs = vinfo.v_rowbytes * (ISO_CHAR_HEIGHT >> 2); + rowline = vinfo.v_rowbytes >> 2; + rowscanline = vinfo.v_rowscanbytes >> 2; + + to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_bottom) + - (rowline - rowscanline); + from = to - (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ + + i = (scrreg_bottom - scrreg_top) - num; + + while (i-- > 0) { + for (line = 0; line < ISO_CHAR_HEIGHT; line++) { + /* + * Only copy what is displayed + */ + video_scroll_down((unsigned int) from, + (unsigned int) (from-(vinfo.v_rowscanbytes >> 2)), + (unsigned int) to); + + from -= rowline; + to -= rowline; + } + } +} + +static void +vc_scroll_up(int num, int scrreg_top, int scrreg_bottom) +{ + unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; + + if(!vinfo.v_depth) + return; + + linelongs = vinfo.v_rowbytes * (ISO_CHAR_HEIGHT >> 2); + rowline = vinfo.v_rowbytes >> 2; + rowscanline = vinfo.v_rowscanbytes >> 2; + + to = (unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs); + from = to + (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ + + i = (scrreg_bottom - scrreg_top) - num; + + while (i-- > 0) { + for (line = 0; line < ISO_CHAR_HEIGHT; line++) { + /* + * Only copy what is displayed + */ + video_scroll_up((unsigned int) from, + (unsigned int) (from+(vinfo.v_rowscanbytes >> 2)), + (unsigned int) to); + + from += rowline; + to += rowline; + } + } +} + +static void +vc_update_color(int color, boolean_t fore) +{ + if (!vinfo.v_depth) + return; + if (fore) { + vc_color_fore = vc_colors[color][vc_color_index_table[vinfo.v_depth]]; + } else { + vc_color_back = vc_colors[color][vc_color_index_table[vinfo.v_depth]]; + } +} + +/* + * Video Console (Back-End): Icon Control + * -------------------------------------- + */ + +struct vc_progress_element { + unsigned int version; + unsigned int flags; + unsigned int time; + unsigned char count; + unsigned char res[3]; + int width; + int height; + int dx; + int dy; + int transparent; + unsigned int res2[3]; + unsigned char data[0]; +}; +typedef struct vc_progress_element vc_progress_element; + +static vc_progress_element * vc_progress; +static const unsigned char * vc_progress_data; +static const unsigned char * vc_progress_alpha; +static boolean_t vc_progress_enable; +static const unsigned char * vc_clut; +static const unsigned char * vc_clut8; +static unsigned char vc_revclut8[256]; +static unsigned int vc_progress_tick; +static boolean_t vc_needsave; +static vm_address_t vc_saveunder; +static vm_size_t vc_saveunder_len; +decl_simple_lock_data(,vc_progress_lock) + +static void vc_blit_rect( int x, int y, int width, int height, + const unsigned char * dataPtr, const unsigned char * alphaPtr, + vm_address_t backBuffer, boolean_t save, boolean_t static_alpha ); +static void vc_blit_rect_8( int x, int y, int width, int height, + const unsigned char * dataPtr, const unsigned char * alphaPtr, + unsigned char * backBuffer, boolean_t save, boolean_t static_alpha ); +static void vc_blit_rect_16( int x, int y, int width, int height, + const unsigned char * dataPtr, const unsigned char * alphaPtr, + unsigned short * backBuffer, boolean_t save, boolean_t static_alpha ); +static void vc_blit_rect_32( int x, int y, int width, int height, + const unsigned char * dataPtr, const unsigned char * alphaPtr, + unsigned int * backBuffer, boolean_t save, boolean_t static_alpha ); +extern void vc_display_icon( vc_progress_element * desc, const unsigned char * data ); +extern void vc_progress_initialize( vc_progress_element * desc, const unsigned char * data, const unsigned char * clut ); +static void vc_progress_set( boolean_t enable, unsigned int initial_tick ); +static void vc_progress_task( void * arg ); + +static void vc_blit_rect( int x, int y, + int width, int height, + const unsigned char * dataPtr, + const unsigned char * alphaPtr, + vm_address_t backBuffer, + boolean_t save, boolean_t static_alpha ) +{ + if(!vinfo.v_depth) + return; + + switch( vinfo.v_depth) { + case 8: + if( vc_clut8 == vc_clut) + vc_blit_rect_8( x, y, width, height, dataPtr, alphaPtr, (unsigned char *) backBuffer, save, static_alpha ); + break; + case 16: + vc_blit_rect_16( x, y, width, height, dataPtr, alphaPtr, (unsigned short *) backBuffer, save, static_alpha ); + break; + case 32: + vc_blit_rect_32( x, y, width, height, dataPtr, alphaPtr, (unsigned int *) backBuffer, save, static_alpha ); + break; + } +} + +static void vc_blit_rect_8( int x, int y, + int width, int height, + const unsigned char * dataPtr, + const unsigned char * alphaPtr, + unsigned char * backPtr, + boolean_t save, boolean_t static_alpha ) +{ + volatile unsigned char * dst; + int line, col; + unsigned int data; + + dst = (unsigned char *)(vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = 0; + if( dataPtr != 0) data = *dataPtr++; + else if( alphaPtr != 0) data = vc_revclut8[*alphaPtr++]; + *(dst + col) = data; + } + dst = (volatile unsigned char *) (((int)dst) + vinfo.v_rowbytes); + } +} + +static void vc_blit_rect_16( int x, int y, + int width, int height, + const unsigned char * dataPtr, + const unsigned char * alphaPtr, + unsigned short * backPtr, + boolean_t save, boolean_t static_alpha ) +{ + volatile unsigned short * dst; + int line, col; + unsigned int data, index, alpha, back; + + dst = (volatile unsigned short *)(vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 2)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + if( dataPtr != 0) { + index = *dataPtr++; + index *= 3; + } + + if( alphaPtr && backPtr) { + + alpha = *alphaPtr++; + data = 0; + if( dataPtr != 0) { + if( vc_clut[index + 0] > alpha) + data |= (((vc_clut[index + 0] - alpha) & 0xf8) << 7); + if( vc_clut[index + 1] > alpha) + data |= (((vc_clut[index + 1] - alpha) & 0xf8) << 2); + if( vc_clut[index + 2] > alpha) + data |= (((vc_clut[index + 2] - alpha) & 0xf8) >> 3); + } + + if( save) { + back = *(dst + col); + if ( !static_alpha) + *backPtr++ = back; + back = (((((back & 0x7c00) * alpha) + 0x3fc00) >> 8) & 0x7c00) + | (((((back & 0x03e0) * alpha) + 0x01fe0) >> 8) & 0x03e0) + | (((((back & 0x001f) * alpha) + 0x000ff) >> 8) & 0x001f); + if ( static_alpha) + *backPtr++ = back; + } else { + back = *backPtr++; + if ( !static_alpha) { + back = (((((back & 0x7c00) * alpha) + 0x3fc00) >> 8) & 0x7c00) + | (((((back & 0x03e0) * alpha) + 0x01fe0) >> 8) & 0x03e0) + | (((((back & 0x001f) * alpha) + 0x000ff) >> 8) & 0x001f); + } + } + + data += back; + + } else + if( dataPtr != 0) { + data = ( (0xf8 & (vc_clut[index + 0])) << 7) + | ( (0xf8 & (vc_clut[index + 1])) << 2) + | ( (0xf8 & (vc_clut[index + 2])) >> 3); + } + + *(dst + col) = data; + } + dst = (volatile unsigned short *) (((int)dst) + vinfo.v_rowbytes); + } +} + +static void vc_blit_rect_32( int x, int y, + int width, int height, + const unsigned char * dataPtr, + const unsigned char * alphaPtr, + unsigned int * backPtr, + boolean_t save, boolean_t static_alpha ) +{ + volatile unsigned int * dst; + int line, col; + unsigned int data, index, alpha, back; + + dst = (volatile unsigned int *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 4)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + if( dataPtr != 0) { + index = *dataPtr++; + index *= 3; + } + + if( alphaPtr && backPtr) { + + alpha = *alphaPtr++; + data = 0; + if( dataPtr != 0) { + if( vc_clut[index + 0] > alpha) + data |= ((vc_clut[index + 0] - alpha) << 16); + if( vc_clut[index + 1] > alpha) + data |= ((vc_clut[index + 1] - alpha) << 8); + if( vc_clut[index + 2] > alpha) + data |= ((vc_clut[index + 2] - alpha)); + } + + if( save) { + back = *(dst + col); + if ( !static_alpha) + *backPtr++ = back; + back = (((((back & 0x00ff00ff) * alpha) + 0x00ff00ff) >> 8) & 0x00ff00ff) + | (((((back & 0x0000ff00) * alpha) + 0x0000ff00) >> 8) & 0x0000ff00); + if ( static_alpha) + *backPtr++ = back; + } else { + back = *backPtr++; + if ( !static_alpha) { + back = (((((back & 0x00ff00ff) * alpha) + 0x00ff00ff) >> 8) & 0x00ff00ff) + | (((((back & 0x0000ff00) * alpha) + 0x0000ff00) >> 8) & 0x0000ff00); + } + } + + data += back; + + } else + if( dataPtr != 0) { + data = (vc_clut[index + 0] << 16) + | (vc_clut[index + 1] << 8) + | (vc_clut[index + 2]); + } + + *(dst + col) = data; + } + dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); + } +} + +void vc_display_icon( vc_progress_element * desc, + const unsigned char * data ) +{ + int x, y, width, height; + + if( vc_progress_enable && vc_clut) { + + width = desc->width; + height = desc->height; + x = desc->dx; + y = desc->dy; + if( 1 & desc->flags) { + x += ((vinfo.v_width - width) / 2); + y += ((vinfo.v_height - height) / 2); + } + vc_blit_rect( x, y, width, height, data, NULL, (vm_address_t) NULL, FALSE, TRUE ); + } +} + +void +vc_progress_initialize( vc_progress_element * desc, + const unsigned char * data, + const unsigned char * clut ) +{ + if( (!clut) || (!desc) || (!data)) + return; + vc_clut = clut; + vc_clut8 = clut; + + vc_progress = desc; + vc_progress_data = data; + if( 2 & vc_progress->flags) + vc_progress_alpha = vc_progress_data + + vc_progress->count * vc_progress->width * vc_progress->height; + else + vc_progress_alpha = NULL; + vc_progress_tick = vc_progress->time * hz / 1000; + + simple_lock_init(&vc_progress_lock, ETAP_IO_TTY); +} + +static void +vc_progress_set( boolean_t enable, unsigned int initial_tick ) +{ + spl_t s; + vm_address_t saveBuf = 0; + vm_size_t saveLen = 0; + unsigned int count; + unsigned int index; + unsigned char pdata8; + unsigned short pdata16; + unsigned short * buf16; + unsigned int pdata32; + unsigned int * buf32; + + if( !vc_progress) + return; + + if( enable) { + saveLen = vc_progress->width * vc_progress->height * vinfo.v_depth / 8; + saveBuf = kalloc( saveLen ); + + switch( vinfo.v_depth) { + case 8 : + for( count = 0; count < 256; count++) { + vc_revclut8[count] = vc_clut[0x01 * 3]; + pdata8 = (vc_clut[0x01 * 3] * count + 0x0ff) >> 8; + for( index = 0; index < 256; index++) { + if( (pdata8 == vc_clut[index * 3 + 0]) && + (pdata8 == vc_clut[index * 3 + 1]) && + (pdata8 == vc_clut[index * 3 + 2])) { + vc_revclut8[count] = index; + break; + } + } + } + memset( (void *) saveBuf, 0x01, saveLen ); + break; + + case 16 : + buf16 = (unsigned short *) saveBuf; + pdata16 = ((vc_clut[0x01 * 3 + 0] & 0xf8) << 7) + | ((vc_clut[0x01 * 3 + 0] & 0xf8) << 2) + | ((vc_clut[0x01 * 3 + 0] & 0xf8) >> 3); + for( count = 0; count < saveLen / 2; count++) + buf16[count] = pdata16; + break; + + case 32 : + buf32 = (unsigned int *) saveBuf; + pdata32 = ((vc_clut[0x01 * 3 + 0] & 0xff) << 16) + | ((vc_clut[0x01 * 3 + 1] & 0xff) << 8) + | ((vc_clut[0x01 * 3 + 2] & 0xff) << 0); + for( count = 0; count < saveLen / 4; count++) + buf32[count] = pdata32; + break; + } + } + + s = splhigh(); + simple_lock(&vc_progress_lock); + + if( vc_progress_enable != enable) { + vc_progress_enable = enable; + if( enable) { + vc_needsave = TRUE; + vc_saveunder = saveBuf; + vc_saveunder_len = saveLen; + saveBuf = 0; + saveLen = 0; + timeout(vc_progress_task, (void *) 0, + initial_tick ); + } else { + if( vc_saveunder) { + saveBuf = vc_saveunder; + saveLen = vc_saveunder_len; + vc_saveunder = 0; + vc_saveunder_len = 0; + } + untimeout( vc_progress_task, (void *) 0 ); + } + } + + simple_unlock(&vc_progress_lock); + splx(s); + + if( saveBuf) + kfree( saveBuf, saveLen ); +} + +static void vc_progress_task( void * arg ) +{ + spl_t s; + int count = (int) arg; + int x, y, width, height; + const unsigned char * data; + + s = splhigh(); + simple_lock(&vc_progress_lock); + + if( vc_progress_enable) { + + count++; + if( count >= vc_progress->count) + count = 0; + + width = vc_progress->width; + height = vc_progress->height; + x = vc_progress->dx; + y = vc_progress->dy; + data = vc_progress_data; + data += count * width * height; + if( 1 & vc_progress->flags) { + x += ((vinfo.v_width - width) / 2); + y += ((vinfo.v_height - height) / 2); + } + vc_blit_rect( x, y, width, height, + NULL, data, vc_saveunder, + vc_needsave, (0 == (4 & vc_progress->flags)) ); + vc_needsave = FALSE; + + timeout( vc_progress_task, (void *) count, + vc_progress_tick ); + } + simple_unlock(&vc_progress_lock); + splx(s); +} + +/* + * Generic Console (Front-End): Master Control + * ------------------------------------------- + */ + +#ifdef __i386__ +#include +#endif /* __i386__ */ + +static boolean_t gc_acquired = FALSE; +static boolean_t gc_graphics_boot = FALSE; + +static unsigned int lastVideoPhys = 0; +static unsigned int lastVideoVirt = 0; +static unsigned int lastVideoSize = 0; + +#ifdef __i386__ +void +initialize_screen(Boot_Video * boot_vinfo, unsigned int op) +{ + if ( boot_vinfo ) + { + vinfo.v_name[0] = 0; + vinfo.v_width = boot_vinfo->v_width; + vinfo.v_height = boot_vinfo->v_height; + vinfo.v_depth = boot_vinfo->v_depth; + vinfo.v_rowbytes = boot_vinfo->v_rowBytes; + vinfo.v_physaddr = boot_vinfo->v_baseAddr; + vinfo.v_baseaddr = vinfo.v_physaddr; + vinfo.v_type = boot_vinfo->v_display; + + if ( (vinfo.v_type == TEXT_MODE) ) + { + // Text mode setup by the booter. + gc_ops.initialize = tc_initialize; + gc_ops.enable = tc_enable; + gc_ops.paint_char = tc_paint_char; + gc_ops.clear_screen = tc_clear_screen; + gc_ops.scroll_down = tc_scroll_down; + gc_ops.scroll_up = tc_scroll_up; + gc_ops.hide_cursor = tc_hide_cursor; + gc_ops.show_cursor = tc_show_cursor; + gc_ops.update_color = tc_update_color; + } + else + + { + // Graphics mode setup by the booter. + gc_ops.initialize = vc_initialize; + gc_ops.enable = vc_enable; + gc_ops.paint_char = vc_paint_char; + gc_ops.scroll_down = vc_scroll_down; + gc_ops.scroll_up = vc_scroll_up; + gc_ops.clear_screen = vc_clear_screen; + gc_ops.hide_cursor = vc_reverse_cursor; + gc_ops.show_cursor = vc_reverse_cursor; + gc_ops.update_color = vc_update_color; + } + + gc_initialize(&vinfo); + +#ifdef GRATEFULDEBUGGER + GratefulDebInit((bootBumbleC *)boot_vinfo); /* Re-initialize GratefulDeb */ +#endif /* GRATEFULDEBUGGER */ + } + + switch ( op ) + { + case kPEGraphicsMode: + panicDialogDesired = TRUE; + gc_graphics_boot = TRUE; + break; + + case kPETextMode: + panicDialogDesired = FALSE; + gc_graphics_boot = FALSE; + break; + + case kPEAcquireScreen: + if ( gc_acquired ) break; + + vc_progress_set( gc_graphics_boot, 2 * hz ); + gc_enable( !gc_graphics_boot ); + gc_acquired = TRUE; + break; + + case kPEEnableScreen: + /* deprecated */ + break; + + case kPETextScreen: + panicDialogDesired = FALSE; + if ( gc_acquired == FALSE ) break; + if ( gc_graphics_boot == FALSE ) break; + + vc_progress_set( FALSE, 0 ); + gc_enable( TRUE ); + break; + + case kPEDisableScreen: + /* deprecated */ + /* skip break */ + + case kPEReleaseScreen: + gc_acquired = FALSE; + gc_enable( FALSE ); + vc_progress_set( FALSE, 0 ); + + vc_clut8 = NULL; +#ifdef GRATEFULDEBUGGER + GratefulDebInit(0); /* Stop grateful debugger */ +#endif /* GRATEFULDEBUGGER */ + break; + } +#ifdef GRATEFULDEBUGGER + if ( boot_vinfo ) GratefulDebInit((bootBumbleC *)boot_vinfo); /* Re initialize GratefulDeb */ +#endif /* GRATEFULDEBUGGER */ +} +#else +void +initialize_screen(Boot_Video * boot_vinfo, unsigned int op) +{ + unsigned int fbsize; + unsigned int newVideoVirt; + ppnum_t fbppage; + + if ( boot_vinfo ) + { +// bcopy((const void *)boot_vinfo, (void *)&boot_video_info, sizeof(boot_video_info)); + + /* + * First, check if we are changing the size and/or location of the framebuffer + */ + + vinfo.v_name[0] = 0; + vinfo.v_width = boot_vinfo->v_width; + vinfo.v_height = boot_vinfo->v_height; + vinfo.v_depth = boot_vinfo->v_depth; + vinfo.v_rowbytes = boot_vinfo->v_rowBytes; + vinfo.v_physaddr = boot_vinfo->v_baseAddr; /* Get the physical address */ + + kprintf("initialize_screen: b=%08X, w=%08X, h=%08X, r=%08X\n", /* (BRINGUP) */ + vinfo.v_physaddr, vinfo.v_width, vinfo.v_height, vinfo.v_rowbytes); /* (BRINGUP) */ + + if (!vinfo.v_physaddr) /* Check to see if we have a framebuffer */ + { + kprintf("initialize_screen: No video - forcing serial mode\n"); /* (BRINGUP) */ + vinfo.v_depth = 0; /* vc routines are nop */ + (void)switch_to_serial_console(); /* Switch into serial mode */ + gc_graphics_boot = FALSE; /* Say we are not in graphics mode */ + disableConsoleOutput = FALSE; /* Allow printfs to happen */ + gc_acquired = TRUE; + } + else + { + /* + * Note that for the first time only, boot_vinfo->v_baseAddr is physical. + */ + + if (kernel_map != VM_MAP_NULL) /* If VM is up, we are given a virtual address */ + { + fbppage = pmap_find_phys(kernel_pmap, (addr64_t)boot_vinfo->v_baseAddr); /* Get the physical address of frame buffer */ + if(!fbppage) /* Did we find it? */ + { + panic("initialize_screen: Strange framebuffer - addr = %08X\n", boot_vinfo->v_baseAddr); + } + vinfo.v_physaddr = (fbppage << 12) | (boot_vinfo->v_baseAddr & PAGE_MASK); /* Get the physical address */ + } + + vinfo.v_type = 0; + + fbsize = round_page_32(vinfo.v_height * vinfo.v_rowbytes); /* Remember size */ + + if ((lastVideoPhys != vinfo.v_physaddr) || (fbsize > lastVideoSize)) /* Did framebuffer change location or get bigger? */ + { + newVideoVirt = io_map_spec((vm_offset_t)vinfo.v_physaddr, fbsize); /* Allocate address space for framebuffer */ + + if (lastVideoVirt) /* Was the framebuffer mapped before? */ + { + pmap_remove(kernel_pmap, trunc_page_64(lastVideoVirt), + round_page_64(lastVideoVirt + lastVideoSize)); /* Toss mappings */ + + if(lastVideoVirt <= vm_last_addr) /* Was this not a special pre-VM mapping? */ + { + kmem_free(kernel_map, lastVideoVirt, lastVideoSize); /* Toss kernel addresses */ + } + } + + lastVideoPhys = vinfo.v_physaddr; /* Remember the framebuffer address */ + lastVideoSize = fbsize; /* Remember the size */ + lastVideoVirt = newVideoVirt; /* Remember the virtual framebuffer address */ + } + } + + vinfo.v_baseaddr = lastVideoVirt; /* Set the new framebuffer address */ + +#ifdef __i386__ + if ( (vinfo.v_type == TEXT_MODE) ) + { + // Text mode setup by the booter. + + gc_ops.initialize = tc_initialize; + gc_ops.enable = tc_enable; + gc_ops.paint_char = tc_paint_char; + gc_ops.clear_screen = tc_clear_screen; + gc_ops.scroll_down = tc_scroll_down; + gc_ops.scroll_up = tc_scroll_up; + gc_ops.hide_cursor = tc_hide_cursor; + gc_ops.show_cursor = tc_show_cursor; + gc_ops.update_color = tc_update_color; + } + else +#endif /* __i386__ */ + { + // Graphics mode setup by the booter. + + gc_ops.initialize = vc_initialize; + gc_ops.enable = vc_enable; + gc_ops.paint_char = vc_paint_char; + gc_ops.scroll_down = vc_scroll_down; + gc_ops.scroll_up = vc_scroll_up; + gc_ops.clear_screen = vc_clear_screen; + gc_ops.hide_cursor = vc_reverse_cursor; + gc_ops.show_cursor = vc_reverse_cursor; + gc_ops.update_color = vc_update_color; + } + + gc_initialize(&vinfo); + +#ifdef GRATEFULDEBUGGER + GratefulDebInit((bootBumbleC *)boot_vinfo); /* Re-initialize GratefulDeb */ +#endif /* GRATEFULDEBUGGER */ + } + + switch ( op ) + { + case kPEGraphicsMode: + panicDialogDesired = TRUE; + gc_graphics_boot = TRUE; + break; + + case kPETextMode: + panicDialogDesired = FALSE; + gc_graphics_boot = FALSE; + break; + + case kPEAcquireScreen: + if ( gc_acquired ) break; + + vc_progress_set( gc_graphics_boot, 2 * hz ); + gc_enable( !gc_graphics_boot ); + gc_acquired = TRUE; + break; + + case kPEEnableScreen: + /* deprecated */ + break; + + case kPETextScreen: + panicDialogDesired = FALSE; + if ( gc_acquired == FALSE ) break; + if ( gc_graphics_boot == FALSE ) break; + + vc_progress_set( FALSE, 0 ); + gc_enable( TRUE ); + break; + + case kPEDisableScreen: + /* deprecated */ + /* skip break */ + + case kPEReleaseScreen: + gc_acquired = FALSE; + gc_enable( FALSE ); + vc_progress_set( FALSE, 0 ); + + vc_clut8 = NULL; +#ifdef GRATEFULDEBUGGER + GratefulDebInit(0); /* Stop grateful debugger */ +#endif /* GRATEFULDEBUGGER */ + break; + } +#ifdef GRATEFULDEBUGGER + if ( boot_vinfo ) GratefulDebInit((bootBumbleC *)boot_vinfo); /* Re initialize GratefulDeb */ +#endif /* GRATEFULDEBUGGER */ +} +#endif + +void +refresh_screen(void) +{ + if ( gc_enabled ) + { + gc_refresh_screen(); + gc_show_cursor(gc_x, gc_y); + } +} + +void +vcattach(void) +{ + extern struct { long msg_magic; long msg_bufx; long msg_bufr; char msg_bufc[]; } * msgbufp; + + vm_initialized = TRUE; + + if ( gc_graphics_boot == FALSE ) + { + unsigned int index; + + if ( gc_acquired ) + { + initialize_screen( 0, kPEReleaseScreen ); + } + + initialize_screen( 0, kPEAcquireScreen ); + + for ( index = 0 ; index < msgbufp->msg_bufx ; index++ ) + { + vcputc( 0, 0, msgbufp->msg_bufc[index] ); + + if ( msgbufp->msg_bufc[index] == '\n' ) + { + vcputc( 0, 0,'\r' ); + } + } + } +} diff --git a/osfmk/console/video_console.h b/osfmk/console/video_console.h new file mode 100644 index 000000000..13ac301f2 --- /dev/null +++ b/osfmk/console/video_console.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +#ifndef _VIDEO_CONSOLE_H_ +#define _VIDEO_CONSOLE_H_ + +#include + +int vcputc( int l, + int u, + int c ); + +int vcgetc( int l, + int u, + boolean_t wait, + boolean_t raw ); + +void video_scroll_up( unsigned long start, + unsigned long end, + unsigned long dest ); + +void video_scroll_down( unsigned long start, /* HIGH addr */ + unsigned long end, /* LOW addr */ + unsigned long dest ); /* HIGH addr */ + +struct vc_info +{ + unsigned long v_height; /* pixels */ + unsigned long v_width; /* pixels */ + unsigned long v_depth; + unsigned long v_rowbytes; + unsigned long v_baseaddr; + unsigned long v_type; + char v_name[32]; + unsigned long v_physaddr; + unsigned long v_rows; /* characters */ + unsigned long v_columns; /* characters */ + unsigned long v_rowscanbytes; /* Actualy number of bytes used for display per row*/ + unsigned long v_reserved[5]; +}; + +#endif /* _VIDEO_CONSOLE_H_ */ diff --git a/osfmk/ddb/db_access.c b/osfmk/ddb/db_access.c index bd29b746d..d803b7e54 100644 --- a/osfmk/ddb/db_access.c +++ b/osfmk/ddb/db_access.c @@ -72,33 +72,6 @@ int db_access_level = DB_ACCESS_LEVEL; -/* - * This table is for sign-extending things. - * Therefore its entries are signed, and yes - * they are in fact negative numbers. - * So don't put Us in it. Or Ls either. - * Otherwise there is no point having it, n'est pas ? - */ -static int db_extend[sizeof(long)+1] = { /* table for sign-extending */ -#if defined(__arch64__) - 0, - 0xFFFFFFFFFFFFFF80, - 0xFFFFFFFFFFFF8000, - 0xFFFFFFFFFF800000, - 0xFFFFFFFF80000000, - 0xFFFFFF8000000000, - 0xFFFF800000000000, - 0xFF80000000000000, - 0x8000000000000000, -#else /* !defined(__arch64__) */ - 0, - 0xFFFFFF80, - 0xFFFF8000, - 0xFF800000, - 0x80000000 -#endif /* defined(__arch64__) */ -}; - db_expr_t db_get_task_value( db_addr_t addr, @@ -109,6 +82,9 @@ db_get_task_value( char data[sizeof(db_expr_t)]; register db_expr_t value; register int i; + uint64_t signx; + + if(size == 0) return 0; db_read_bytes((vm_offset_t)addr, size, data, task); @@ -121,11 +97,13 @@ db_get_task_value( { value = (value << 8) + (data[i] & 0xFF); } - - if (size <= sizeof(int)) { - if (is_signed && (value & db_extend[size]) != 0) - value |= db_extend[size]; - } + + if(!is_signed) return value; + + signx = 0xFFFFFFFFFFFFFFFFULL << ((size << 3) - 1); + + if(value & signx) value |= signx; /* Add 1s to front if sign bit is on */ + return (value); } diff --git a/osfmk/ddb/db_break.c b/osfmk/ddb/db_break.c index aa96f95bf..585ef8c1b 100644 --- a/osfmk/ddb/db_break.c +++ b/osfmk/ddb/db_break.c @@ -25,195 +25,6 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.2 1998/04/29 17:35:26 mburg - * MK7.3 merger - * - * Revision 1.2.47.1 1998/02/03 09:23:57 gdt - * Merge up to MK7.3 - * [1998/02/03 09:10:14 gdt] - * - * Revision 1.2.45.1 1997/03/27 18:46:16 barbou - * ri-osc CR1557: re-enable thread-specific breakpoints. - * [1995/09/20 15:23:46 bolinger] - * [97/02/25 barbou] - * - * Revision 1.2.21.6 1996/01/09 19:15:21 devrcs - * Changed declarations of 'register foo' to 'register int foo' - * Fixed printfs which print addresses. - * [1995/12/01 21:41:51 jfraser] - * - * Merged '64-bit safe' changes from DEC alpha port. - * [1995/11/21 18:02:40 jfraser] - * - * Revision 1.2.21.5 1995/04/07 18:52:54 barbou - * Allow breakpoints on non-resident pages. The breakpoint will - * actually be set when the page is paged in. - * [93/09/23 barbou] - * [95/03/08 barbou] - * - * Revision 1.2.21.4 1995/02/23 21:43:19 alanl - * Merged with DIPC2_SHARED. - * [1995/01/04 20:15:04 alanl] - * - * Revision 1.2.28.1 1994/11/04 09:52:15 dwm - * mk6 CR668 - 1.3b26 merge - * * Revision 1.2.4.5 1994/05/06 18:38:52 tmt - * Merged osc1.3dec/shared with osc1.3b19 - * Moved struct db_breakpoint from here to db_break.h. - * Merge Alpha changes into osc1.312b source code. - * 64bit cleanup. - * * End1.3merge - * [1994/11/04 08:49:10 dwm] - * - * Revision 1.2.21.2 1994/09/23 01:17:57 ezf - * change marker to not FREE - * [1994/09/22 21:09:19 ezf] - * - * Revision 1.2.21.1 1994/06/11 21:11:24 bolinger - * Merge up to NMK17.2. - * [1994/06/11 20:01:06 bolinger] - * - * Revision 1.2.25.2 1994/10/28 18:56:21 rwd - * Delint. - * [94/10/28 rwd] - * - * Revision 1.2.25.1 1994/08/04 01:42:15 mmp - * 23-Jun-94 Stan Smith (stans@ssd.intel.com) - * Let d * delete all breakpoints. - * [1994/06/28 13:54:00 sjs] - * - * Revision 1.2.19.2 1994/04/11 09:34:22 bernadat - * Moved db_breakpoint struct declaration to db_break.h - * [94/03/16 bernadat] - * - * Revision 1.2.19.1 1994/02/08 10:57:22 bernadat - * When setting a breakpoint, force user_space if breakpoint is - * outside kernel_space (like in the case of an emulator). - * [93/09/27 paire] - * - * Changed silly decimal display to hex (to match input conventions). - * Change from NORMA_MK14.6 [93/01/09 sjs] - * [93/07/16 bernadat] - * [94/02/07 bernadat] - * - * Revision 1.2.4.3 1993/07/27 18:26:48 elliston - * Add ANSI prototypes. CR #9523. - * [1993/07/27 18:10:54 elliston] - * - * Revision 1.2.4.2 1993/06/09 02:19:39 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 20:55:42 jeffc] - * - * Revision 1.2 1993/04/19 16:01:31 devrcs - * Changes from MK78: - * Removed unused variable from db_delete_cmd(). - * Added declaration for arg 'count' of db_add_thread_breakpoint(). - * [92/05/18 jfriedl] - * Fixed b/tu to b/Tu work if the specified address is valid in the - * target address space but not the current user space. Explicit - * user space breakpoints (b/u, b/Tu, etc) will no longer get - * inserted into the kernel if the specified address is invalid. - * [92/04/18 danner] - * [92/12/18 bruel] - * - * Revision 1.1 1992/09/30 02:00:52 robert - * Initial revision - * - * $EndLog$ - */ -/* CMU_HIST */ -/* - * Revision 2.11.3.1 92/03/03 16:13:20 jeffreyh - * Pick up changes from TRUNK - * [92/02/26 10:58:37 jeffreyh] - * - * Revision 2.12 92/02/19 16:46:24 elf - * Removed one of the many user-unfriendlinesses. - * [92/02/10 17:48:25 af] - * - * Revision 2.11 91/11/12 11:50:24 rvb - * Fixed db_delete_cmd so that just "d" works in user space. - * [91/10/31 rpd] - * Fixed db_delete_thread_breakpoint for zero task_thd. - * [91/10/30 rpd] - * - * Revision 2.10 91/10/09 15:57:41 af - * Supported thread-oriented break points. - * [91/08/29 tak] - * - * Revision 2.9 91/07/09 23:15:39 danner - * Conditionalized db_map_addr to work right on the luna. Used a - * ifdef luna88k. This is evil, and needs to be fixed. - * [91/07/08 danner] - * - * Revision 2.2 91/04/10 22:54:50 mbj - * Grabbed 3.0 copyright/disclaimer since ddb comes from 3.0. - * [91/04/09 rvb] - * - * Revision 2.7 91/02/05 17:06:00 mrt - * Changed to new Mach copyright - * [91/01/31 16:17:01 mrt] - * - * Revision 2.6 91/01/08 15:09:03 rpd - * Added db_map_equal, db_map_current, db_map_addr. - * [90/11/10 rpd] - * - * Revision 2.5 90/11/05 14:26:32 rpd - * Initialize db_breakpoints_inserted to TRUE. - * [90/11/04 rpd] - * - * Revision 2.4 90/10/25 14:43:33 rwd - * Added map field to breakpoints. - * Added map argument to db_set_breakpoint, db_delete_breakpoint, - * db_find_breakpoint. Added db_find_breakpoint_here. - * [90/10/18 rpd] - * - * Revision 2.3 90/09/28 16:57:07 jsb - * Fixed db_breakpoint_free. - * [90/09/18 rpd] - * - * Revision 2.2 90/08/27 21:49:53 dbg - * Reflected changes in db_printsym()'s calling seq. - * [90/08/20 af] - * Clear breakpoints only if inserted. - * Reduce lint. - * [90/08/07 dbg] - * Created. - * [90/07/25 dbg] - * - */ -/* CMU_ENDHIST */ -/* - * Mach Operating System - * Copyright (c) 1991,1990 Carnegie Mellon University - * All Rights Reserved. - * - * Permission to use, copy, modify and distribute this software and its - * documentation is hereby granted, provided that both the copyright - * notice and this permission notice appear in all copies of the - * software, derivative works or modified versions, and any portions - * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" - * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR - * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * - * Carnegie Mellon requests users of this software to return to - * - * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU - * School of Computer Science - * Carnegie Mellon University - * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon - * the rights to redistribute these changes. - */ /* */ /* @@ -388,7 +199,7 @@ db_find_thread_breakpoint( { register db_thread_breakpoint_t tp; register task_t task = - (thr_act == THR_ACT_NULL || thr_act->kernel_loaded) + (thr_act == THR_ACT_NULL) ? TASK_NULL : thr_act->task; for (tp = bkpt->threads; tp; tp = tp->tb_next) { @@ -630,7 +441,7 @@ db_set_breakpoints(void) db_expr_t inst; thread_act_t cur_act = current_act(); task_t cur_task = - (cur_act && !cur_act->kernel_loaded) ? + (cur_act) ? cur_act->task : TASK_NULL; boolean_t inserted = TRUE; @@ -672,7 +483,7 @@ db_clear_breakpoints(void) register task_t task; db_expr_t inst; thread_act_t cur_act = current_act(); - task_t cur_task = (cur_act && !cur_act->kernel_loaded) ? + task_t cur_task = (cur_act) ? cur_act->task: TASK_NULL; if (db_breakpoints_inserted) { @@ -969,7 +780,11 @@ db_breakpoint_cmd( db_error("Invalid user space address\n"); user_space = TRUE; db_printf("%#X is in user space\n", addr); +#ifdef ppc + db_printf("kernel is from %#X to %#x\n", VM_MIN_KERNEL_ADDRESS, vm_last_addr); +#else db_printf("kernel is from %#X to %#x\n", VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS); +#endif } if (db_option(modif, 't') || task_bpt) { for (n = 0; db_get_next_act(&thr_act, n); n++) { diff --git a/osfmk/ddb/db_command.c b/osfmk/ddb/db_command.c index 62945c9c7..3dc1af941 100644 --- a/osfmk/ddb/db_command.c +++ b/osfmk/ddb/db_command.c @@ -465,7 +465,6 @@ struct db_command db_show_cmds[] = { { "simple_lock", db_show_one_simple_lock, 0, 0 }, { "thread_log", (db_func)db_show_thread_log, 0, 0 }, { "shuttle", db_show_shuttle, 0, 0 }, - { "etap_log", db_show_etap_log, 0, 0 }, { (char *)0, } }; @@ -519,14 +518,19 @@ struct db_command db_command_table[] = { #if defined(__ppc__) { "lt", db_low_trace, CS_MORE|CS_SET_DOT, 0 }, { "dl", db_display_long, CS_MORE|CS_SET_DOT, 0 }, + { "dc", db_display_char, CS_MORE|CS_SET_DOT, 0 }, { "dr", db_display_real, CS_MORE|CS_SET_DOT, 0 }, { "dv", db_display_virtual, CS_MORE|CS_SET_DOT, 0 }, { "dm", db_display_mappings, CS_MORE|CS_SET_DOT, 0 }, + { "dh", db_display_hash, CS_MORE|CS_SET_DOT, 0 }, { "dp", db_display_pmap, CS_MORE, 0 }, + { "di", db_display_iokit, CS_MORE, 0 }, { "ds", db_display_save, CS_MORE|CS_SET_DOT, 0 }, { "dx", db_display_xregs, CS_MORE|CS_SET_DOT, 0 }, { "dk", db_display_kmod, CS_MORE, 0 }, { "gs", db_gsnoop, CS_MORE, 0 }, + { "cm", db_check_mappings, CS_MORE, 0 }, + { "cp", db_check_pmaps, CS_MORE, 0 }, #endif { (char *)0, } }; @@ -619,16 +623,6 @@ db_error(char *s) { extern int db_macro_level; -#if defined(__alpha) -# if KDEBUG - extern boolean_t kdebug_mode; - if (kdebug_mode) { - if (s) kprintf(DBG_DEBUG, s); - return; - } -# endif /* KDEBUG */ -#endif /* defined(__alpha) */ - db_macro_level = 0; if (db_recover) { if (s > (char *)1) @@ -654,10 +648,11 @@ db_fncall(void) { db_expr_t fn_addr; #define MAXARGS 11 - db_expr_t args[MAXARGS]; + uint32_t args[MAXARGS]; + db_expr_t argwork; int nargs = 0; - db_expr_t retval; - db_expr_t (*func)(db_expr_t, ...); + uint32_t retval; + uint32_t (*func)(uint32_t, ...); int t; if (!db_expression(&fn_addr)) { @@ -665,31 +660,33 @@ db_fncall(void) db_flush_lex(); return; } - func = (db_expr_t (*) (db_expr_t, ...)) fn_addr; + func = (uint32_t (*) (uint32_t, ...)) fn_addr; t = db_read_token(); if (t == tLPAREN) { - if (db_expression(&args[0])) { - nargs++; - while ((t = db_read_token()) == tCOMMA) { - if (nargs == MAXARGS) { - db_printf("Too many arguments\n"); - db_flush_lex(); - return; - } - if (!db_expression(&args[nargs])) { - db_printf("Argument missing\n"); - db_flush_lex(); - return; - } - nargs++; - } - db_unread_token(t); + if (db_expression(&argwork)) { + args[nargs] = (uint32_t)argwork; + nargs++; + while ((t = db_read_token()) == tCOMMA) { + if (nargs == MAXARGS) { + db_printf("Too many arguments\n"); + db_flush_lex(); + return; + } + if (!db_expression(&argwork)) { + db_printf("Argument missing\n"); + db_flush_lex(); + return; + } + args[nargs] = (uint32_t)argwork; + nargs++; + } + db_unread_token(t); } if (db_read_token() != tRPAREN) { - db_printf("?\n"); - db_flush_lex(); - return; + db_printf("?\n"); + db_flush_lex(); + return; } } while (nargs < MAXARGS) { diff --git a/osfmk/ddb/db_examine.c b/osfmk/ddb/db_examine.c index 7026e36f4..93e4cce67 100644 --- a/osfmk/ddb/db_examine.c +++ b/osfmk/ddb/db_examine.c @@ -25,185 +25,6 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.2 1998/04/24 19:34:23 semeria - * KDP and KDB support - * - * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.2.42.2 1997/09/12 17:15:15 stephen - * make x/x do zero fill right justified hex display - * [1997/09/12 16:31:04 stephen] - * - * Revision 1.2.42.1 1997/03/27 18:46:31 barbou - * Add 'p' option to the "examine" command - values in - * memory treated as addresses and rendered as sym+offset - * [1995/12/29 21:32:33 mod] - * ri-osc CR1560: make search command output address of any matching - * data it finds (so user knows it did something). - * [1995/09/20 15:24:55 bolinger] - * [97/02/25 barbou] - * - * Revision 1.2.25.5 1996/01/09 19:15:38 devrcs - * Add db_print_loc() & db_print_inst() functions. - * Make 'l' display 32 bits and new 'q' to display 64 bits. - * Allow 'u' to display unsigned decimal values (same as 'U'). - * Changed declarations of 'register foo' to 'register int foo'. - * [1995/12/01 21:42:03 jfraser] - * - * Merged '64-bit safe' changes from DEC alpha port. - * [1995/11/21 18:02:58 jfraser] - * - * Revision 1.2.25.4 1995/06/13 18:21:27 sjs - * Merge with flipc_shared. - * [95/05/22 sjs] - * - * Revision 1.2.30.1 1995/04/03 17:35:17 randys - * Minor change; allow a repeat count to work properly when multiple - * modifier flags are given to the ddb 'x' command. This allows, - * for instance, examination of multiple words in activations other - * than the current one. - * [95/04/03 randys] - * - * Revision 1.2.25.3 1995/01/06 19:10:09 devrcs - * mk6 CR668 - 1.3b26 merge - * * Revision 1.2.6.7 1994/05/06 18:39:09 tmt - * Merged osc1.3dec/shared with osc1.3b19 - * Merge Alpha changes into osc1.312b source code. - * 64bit cleanup. - * * End1.3merge - * [1994/11/04 08:49:22 dwm] - * - * Revision 1.2.25.2 1994/09/23 01:18:44 ezf - * change marker to not FREE - * [1994/09/22 21:09:44 ezf] - * - * Revision 1.2.25.1 1994/06/11 21:11:43 bolinger - * Merge up to NMK17.2. - * [1994/06/11 20:01:31 bolinger] - * - * Revision 1.2.23.1 1994/02/08 10:57:47 bernadat - * Fixed output of an examine command to have a power of 2 - * number of fields. - * [93/09/29 paire] - * - * Added dump of hexadecimal address in each line of examine command. - * Fixed beginning of line to be always located at position 0. - * [93/08/11 paire] - * [94/02/07 bernadat] - * - * Revision 1.2.21.4 1994/03/17 22:35:27 dwm - * The infamous name change: thread_activation + thread_shuttle = thread. - * [1994/03/17 21:25:43 dwm] - * - * Revision 1.2.21.3 1994/01/12 17:50:40 dwm - * Coloc: initial restructuring to follow Utah model. - * [1994/01/12 17:13:08 dwm] - * - * Revision 1.2.21.2 1993/10/12 16:38:58 dwm - * Print '\n' in x/s statements. [rwd] - * [1993/10/12 16:14:41 dwm] - * - * Revision 1.2.6.5 1993/08/11 20:37:37 elliston - * Add ANSI Prototypes. CR #9523. - * [1993/08/11 03:33:05 elliston] - * - * Revision 1.2.6.4 1993/08/09 19:34:42 dswartz - * Add ANSI prototypes - CR#9523 - * [1993/08/06 15:47:32 dswartz] - * - * Revision 1.2.6.3 1993/07/27 18:27:07 elliston - * Add ANSI prototypes. CR #9523. - * [1993/07/27 18:11:21 elliston] - * - * Revision 1.2.6.2 1993/06/09 02:20:00 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 20:56:10 jeffc] - * - * Revision 1.2 1993/04/19 16:01:58 devrcs - * Changes from mk78: - * Added void type to functions that needed it. - * Added init to 'size' in db_search_cmd(). Removed unused variables. - * Other cleanup to quiet gcc warnings. - * [92/05/16 jfriedl] - * x/u now examines current user space. x/t still examines user - * space of the the specified thread. x/tu is redundant. - * To examine an value as unsigned decimal, use x/U. - * [92/04/18 danner] - * [93/02/02 bruel] - * - * Remember count argument when repeating commands instead of the - * default command, also apply all the formats to current address - * instead of incrementing addresses when switching to next format. - * [barbou@gr.osf.org] - * - * Support 'A' format for print 'p' command [barbou@gr.osf.org] - * [92/12/03 bernadat] - * - * Revision 1.1 1992/09/30 02:01:01 robert - * Initial revision - * - * $EndLog$ - */ -/* CMU_HIST */ -/* - * Revision 2.7 91/10/09 15:59:28 af - * Revision 2.6.1.1 91/10/05 13:05:49 jeffreyh - * Supported non current task space data examination and search. - * Added 'm' format and db_xcdump to print with hex and characters. - * Added db_examine_{forward, backward}. - * Changed db_print_cmd to support variable number of parameters - * including string constant. - * Included "db_access.h". - * [91/08/29 tak] - * - * Revision 2.6.1.1 91/10/05 13:05:49 jeffreyh - * Supported non current task space data examination and search. - * Added 'm' format and db_xcdump to print with hex and characters. - * Added db_examine_{forward, backward}. - * Changed db_print_cmd to support variable number of parameters - * including string constant. - * Included "db_access.h". - * [91/08/29 tak] - * - * Revision 2.6 91/08/28 11:11:01 jsb - * Added 'A' flag to examine: just like 'a' (address), but prints addr - * as a procedure type, thus printing file/line info if available. - * Useful when called as 'x/Ai'. - * [91/08/13 18:14:55 jsb] - * - * Revision 2.5 91/05/14 15:33:31 mrt - * Correcting copyright - * - * Revision 2.4 91/02/05 17:06:20 mrt - * Changed to new Mach copyright - * [91/01/31 16:17:37 mrt] - * - * Revision 2.3 90/11/07 16:49:23 rpd - * Added db_search_cmd, db_search. - * [90/11/06 rpd] - * - * Revision 2.2 90/08/27 21:50:38 dbg - * Add 'r', 'z' to print and examine formats. - * Change calling sequence of db_disasm. - * db_examine sets db_prev and db_next instead of explicitly - * advancing dot. - * [90/08/20 dbg] - * Reflected changes in db_printsym()'s calling seq. - * [90/08/20 af] - * Reduce lint. - * [90/08/07 dbg] - * Created. - * [90/07/25 dbg] - * - */ -/* CMU_ENDHIST */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University @@ -448,10 +269,10 @@ db_examine( sizeof(db_expr_t), FALSE, task ); db_find_task_sym_and_offset( value, &symName, &offset, task); - db_printf("\n\t*%8x(%8X) = %s", + db_printf("\n\t*%8llX(%8llX) = %s", next_addr, value, symName ); if( offset ) { - db_printf("+%X", offset ); + db_printf("+%llX", offset ); } next_addr += size; } @@ -467,7 +288,7 @@ db_examine( value = db_get_task_value(next_addr, sizeof (db_expr_t), TRUE,task); - db_printf("%-*r", width, value); + db_printf("%-*llr", width, value); next_addr += sizeof (db_expr_t); } if (sz > 0) { @@ -477,13 +298,11 @@ db_examine( } value = db_get_task_value(next_addr, sz, TRUE, task); - db_printf("%-*R", width, value); + db_printf("%-*llR", width, value); next_addr += sz; } break; -#ifdef APPLE case 'X': /* unsigned hex */ -#endif case 'x': /* unsigned hex */ for (sz = size, next_addr = addr; sz >= sizeof (db_expr_t); @@ -495,14 +314,10 @@ db_examine( value = db_get_task_value(next_addr, sizeof (db_expr_t), FALSE,task); -#ifdef APPLE if ( c == 'X') - db_printf("%0*X ", 2*size, value); + db_printf("%0*llX ", 2*size, value); else - db_printf("%-*x", width, value); -#else - db_printf("%-*x", width, value); -#endif + db_printf("%-*llx", width, value); next_addr += sizeof (db_expr_t); } if (sz > 0) { @@ -512,14 +327,10 @@ db_examine( } value = db_get_task_value(next_addr, sz, FALSE, task); -#ifdef APPLE if ( c == 'X') - db_printf("%0*X ", 2*size, value); + db_printf("%0*llX ", 2*size, value); else - db_printf("%-*X", width, value); -#else - db_printf("%-*X", width, value); -#endif + db_printf("%-*llX", width, value); next_addr += sz; } break; @@ -534,7 +345,7 @@ db_examine( value = db_get_task_value(next_addr, sizeof (db_expr_t), TRUE, task); - db_printf("%-*z", width, value); + db_printf("%-*llz", width, value); next_addr += sizeof (db_expr_t); } if (sz > 0) { @@ -544,7 +355,7 @@ db_examine( } value = db_get_task_value(next_addr,sz, TRUE,task); - db_printf("%-*Z", width, value); + db_printf("%-*llZ", width, value); next_addr += sz; } break; @@ -559,7 +370,7 @@ db_examine( value = db_get_task_value(next_addr, sizeof (db_expr_t), TRUE,task); - db_printf("%-*d", width, value); + db_printf("%-*lld", width, value); next_addr += sizeof (db_expr_t); } if (sz > 0) { @@ -569,7 +380,7 @@ db_examine( } value = db_get_task_value(next_addr, sz, TRUE, task); - db_printf("%-*D", width, value); + db_printf("%-*llD", width, value); next_addr += sz; } break; @@ -585,7 +396,7 @@ db_examine( value = db_get_task_value(next_addr, sizeof (db_expr_t), FALSE,task); - db_printf("%-*u", width, value); + db_printf("%-*llu", width, value); next_addr += sizeof (db_expr_t); } if (sz > 0) { @@ -595,7 +406,7 @@ db_examine( } value = db_get_task_value(next_addr, sz, FALSE, task); - db_printf("%-*U", width, value); + db_printf("%-*llU", width, value); next_addr += sz; } break; @@ -610,7 +421,7 @@ db_examine( value = db_get_task_value(next_addr, sizeof (db_expr_t), FALSE,task); - db_printf("%-*o", width, value); + db_printf("%-*llo", width, value); next_addr += sizeof (db_expr_t); } if (sz > 0) { @@ -620,7 +431,7 @@ db_examine( } value = db_get_task_value(next_addr, sz, FALSE, task); - db_printf("%-*o", width, value); + db_printf("%-*llo", width, value); next_addr += sz; } break; @@ -633,9 +444,9 @@ db_examine( if ((value >= ' ' && value <= '~') || value == '\n' || value == '\t') - db_printf("%c", value); + db_printf("%llc", value); else - db_printf("\\%03o", value); + db_printf("\\%03llo", value); } break; case 's': /* null-terminated string */ @@ -648,9 +459,9 @@ db_examine( if (value == 0) break; if (value >= ' ' && value <= '~') - db_printf("%c", value); + db_printf("%llc", value); else - db_printf("\\%03o", value); + db_printf("\\%03llo", value); } break; case 'i': /* instruction */ @@ -721,29 +532,32 @@ db_print_cmd(void) task); break; case 'r': - db_printf("%11r", value); + db_printf("%11llr", value); + break; + case 'X': + db_printf("%016llX", value); break; case 'x': - db_printf("%08x", value); + db_printf("%016llx", value); break; case 'z': - db_printf("%8z", value); + db_printf("%16llz", value); break; case 'd': - db_printf("%11d", value); + db_printf("%11lld", value); break; case 'u': - db_printf("%11u", value); + db_printf("%11llu", value); break; case 'o': - db_printf("%16o", value); + db_printf("%16llo", value); break; case 'c': value = value & 0xFF; if (value >= ' ' && value <= '~') - db_printf("%c", value); + db_printf("%llc", value); else - db_printf("\\%03o", value); + db_printf("\\%03llo", value); break; default: db_printf("Unknown format %c\n", db_print_format); @@ -906,11 +720,11 @@ db_xcdump( db_printf("%s:\n", name); off = -1; } - db_printf("%0*X:%s", 2*sizeof(db_addr_t), addr, + db_printf("%0*llX:%s", 2*sizeof(db_addr_t), addr, (size != 1) ? " " : "" ); bcount = ((n > DB_XCDUMP_NC)? DB_XCDUMP_NC: n); - if (trunc_page(addr) != trunc_page(addr+bcount-1)) { - db_addr_t next_page_addr = trunc_page(addr+bcount-1); + if (trunc_page_32(addr) != trunc_page_32(addr+bcount-1)) { + db_addr_t next_page_addr = trunc_page_32(addr+bcount-1); if (!DB_CHECK_ACCESS(next_page_addr, sizeof(int), task)) bcount = next_page_addr - addr; } @@ -919,7 +733,7 @@ db_xcdump( if (i % 4 == 0) db_printf(" "); value = db_get_task_value(addr, size, FALSE, task); - db_printf("%0*x ", size*2, value); + db_printf("%0*llX ", size*2, value); addr += size; db_find_task_sym_and_offset(addr, &name, &off, task); } @@ -930,7 +744,7 @@ db_xcdump( db_printf("%s*", (size != 1)? " ": ""); for (i = 0; i < bcount; i++) { value = data[i]; - db_printf("%c", (value >= ' ' && value <= '~')? value: '.'); + db_printf("%llc", (value >= ' ' && value <= '~')? value: '.'); } db_printf("*\n"); } diff --git a/osfmk/ddb/db_expr.c b/osfmk/ddb/db_expr.c index dc49ab9bd..28079f5a3 100644 --- a/osfmk/ddb/db_expr.c +++ b/osfmk/ddb/db_expr.c @@ -25,102 +25,6 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.2.19.1 1997/03/27 18:46:35 barbou - * ri-osc CR1561: make operators "logical and", "logical or" - * lex correctly. - * [1995/09/20 15:26:38 bolinger] - * [97/02/25 barbou] - * - * Revision 1.2.10.2 1995/01/06 19:10:13 devrcs - * mk6 CR668 - 1.3b26 merge - * * Revision 1.2.3.5 1994/05/06 18:39:16 tmt - * Merged osc1.3dec/shared with osc1.3b19 - * Merge Alpha changes into osc1.312b source code. - * 64bit cleanup. - * * End1.3merge - * [1994/11/04 08:49:27 dwm] - * - * Revision 1.2.10.1 1994/09/23 01:19:06 ezf - * change marker to not FREE - * [1994/09/22 21:09:53 ezf] - * - * Revision 1.2.3.3 1993/07/27 18:27:15 elliston - * Add ANSI prototypes. CR #9523. - * [1993/07/27 18:11:36 elliston] - * - * Revision 1.2.3.2 1993/06/09 02:20:06 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 20:56:16 jeffc] - * - * Revision 1.2 1993/04/19 16:02:09 devrcs - * Allow unprefixed (0x) hexadecimal constants starting by a letter: - * unknown symbols are tentatively interpreted as hexadecimal constants, - * and ambiguities are reported. - * [93/03/24 barbou] - * - * Changes from mk78: - * Removed unused variable from db_unary(). - * [92/05/16 jfriedl] - * [93/02/02 bruel] - * - * Added string format arguments [barbou@gr.osf.org] - * [92/12/03 bernadat] - * - * Revision 1.1 1992/09/30 02:01:04 robert - * Initial revision - * - * $EndLog$ - */ -/* CMU_HIST */ -/* - * Revision 2.5 91/10/09 15:59:46 af - * Revision 2.4.3.1 91/10/05 13:06:04 jeffreyh - * Added relational expression etc. to support condition expression. - * Supported modifier after indirect expression to specify size, - * sign extention and non current task space indirection. - * Changed error messages to print more information. - * [91/08/29 tak] - * - * Revision 2.4.3.1 91/10/05 13:06:04 jeffreyh - * Added relational expression etc. to support condition expression. - * Supported modifier after indirect expression to specify size, - * sign extention and non current task space indirection. - * Changed error messages to print more information. - * [91/08/29 tak] - * - * Revision 2.4 91/05/14 15:33:45 mrt - * Correcting copyright - * - * Revision 2.3 91/02/05 17:06:25 mrt - * Changed to new Mach copyright - * [91/01/31 16:17:46 mrt] - * - * Revision 2.2 90/08/27 21:50:57 dbg - * Use '..' instead of '$$' for db_prev. - * Use '+' for db_next. - * [90/08/22 dbg] - * - * Allow repeated unary operators. - * [90/08/20 dbg] - * - * Reflected back rename of db_symbol_value->db_value_of_name - * [90/08/20 af] - * Reduce lint. - * [90/08/07 dbg] - * Created. - * [90/07/25 dbg] - * - */ -/* CMU_ENDHIST */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University @@ -465,7 +369,7 @@ db_shift_expr(db_expr_t *valuep) lhs <<= rhs; else { /* Shift right is unsigned */ - lhs = (natural_t) lhs >> rhs; + lhs = (uint64_t) lhs >> rhs; } t = db_read_token(); } diff --git a/osfmk/ddb/db_ext_symtab.c b/osfmk/ddb/db_ext_symtab.c index b4976334f..4549a76e6 100644 --- a/osfmk/ddb/db_ext_symtab.c +++ b/osfmk/ddb/db_ext_symtab.c @@ -74,6 +74,9 @@ * Loads a symbol table for an external file into the kernel debugger. * The symbol table data is an array of characters. It is assumed that * the caller and the kernel debugger agree on its format. + + * This has never and will never be supported on MacOS X. The only reason I don't remove + * it entirely is that it is an exported symbol. */ kern_return_t host_load_symbol_table( @@ -83,69 +86,5 @@ host_load_symbol_table( pointer_t symtab, mach_msg_type_number_t symtab_count) { -#if !MACH_DEBUG || !MACH_KDB return KERN_FAILURE; -#else - kern_return_t result; - vm_offset_t symtab_start; - vm_offset_t symtab_end; - vm_map_t map; - vm_map_copy_t symtab_copy_object; - - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_ARGUMENT); - - /* - * Copy the symbol table array into the kernel. - * We make a copy of the copy object, and clear - * the old one, so that returning error will not - * deallocate the data twice. - */ - symtab_copy_object = (vm_map_copy_t) symtab; - result = vm_map_copyout( - kernel_map, - &symtab_start, - vm_map_copy_copy(symtab_copy_object)); - if (result != KERN_SUCCESS) - return (result); - - symtab_end = symtab_start + symtab_count; - - /* - * Add the symbol table. - * Do not keep a reference for the task map. XXX - */ - if (task == TASK_NULL) - map = VM_MAP_NULL; - else - map = task->map; - if (!X_db_sym_init((char *)symtab_start, - (char *)symtab_end, - name, - (char *)map)) - { - /* - * Not enough room for symbol table - failure. - */ - (void) vm_deallocate(kernel_map, - symtab_start, - symtab_count); - return (KERN_FAILURE); - } - - /* - * Wire down the symbol table - */ - (void) vm_map_wire(kernel_map, - symtab_start, - round_page(symtab_end), - VM_PROT_READ|VM_PROT_WRITE, FALSE); - - /* - * Discard the original copy object - */ - vm_map_copy_discard(symtab_copy_object); - - return (KERN_SUCCESS); -#endif /* MACH_DEBUG && MACH_KDB */ } diff --git a/osfmk/ddb/db_macro.c b/osfmk/ddb/db_macro.c index 7e33231e7..c82deb92d 100644 --- a/osfmk/ddb/db_macro.c +++ b/osfmk/ddb/db_macro.c @@ -25,80 +25,6 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.2.10.4 1996/01/09 19:15:54 devrcs - * Change 'register foo' to 'register int foo'. - * [1995/12/01 21:42:14 jfraser] - * - * Merged '64-bit safe' changes from DEC alpha port. - * [1995/11/21 18:03:15 jfraser] - * - * Revision 1.2.10.3 1995/01/06 19:10:28 devrcs - * mk6 CR668 - 1.3b26 merge - * fix typing - * [1994/11/04 08:49:38 dwm] - * - * Revision 1.2.10.2 1994/09/23 01:20:19 ezf - * change marker to not FREE - * [1994/09/22 21:10:23 ezf] - * - * Revision 1.2.10.1 1994/06/11 21:11:52 bolinger - * Merge up to NMK17.2. - * [1994/06/11 20:01:51 bolinger] - * - * Revision 1.2.8.1 1994/02/08 10:58:03 bernadat - * Fixed reinitialization of db_macro_level to -1. - * Put DB_MACRO_LEVEL and DB_NARGS macros to . - * Changed name of DB_NARGS to DB_MACRO_NARGS. - * Added support of DB_VAR_SHOW. - * [93/08/12 paire] - * [94/02/07 bernadat] - * - * Revision 1.2.2.4 1993/08/11 20:37:58 elliston - * Add ANSI Prototypes. CR #9523. - * [1993/08/11 03:33:33 elliston] - * - * Revision 1.2.2.3 1993/07/27 18:27:42 elliston - * Add ANSI prototypes. CR #9523. - * [1993/07/27 18:12:24 elliston] - * - * Revision 1.2.2.2 1993/06/09 02:20:18 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 20:56:40 jeffc] - * - * Revision 1.2 1993/04/19 16:02:25 devrcs - * Changes from mk78: - * Removed unused variable from db_exec_macro(). - * Added include of . - * [92/05/16 jfriedl] - * [93/02/02 bruel] - * - * Revision 1.1 1992/09/30 02:01:12 robert - * Initial revision - * - * $EndLog$ - */ -/* CMU_HIST */ -/* - * Revision 2.2 91/10/09 16:01:09 af - * Revision 2.1.3.1 91/10/05 13:06:40 jeffreyh - * Created for macro support. - * [91/08/29 tak] - * - * Revision 2.1.3.1 91/10/05 13:06:40 jeffreyh - * Created for macro support. - * [91/08/29 tak] - * - */ -/* CMU_ENDHIST */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University @@ -278,7 +204,7 @@ db_arg_variable( if (flag == DB_VAR_SHOW) { value = db_macro_args[ap->hidden_level][ap->suffix[0]-1]; - db_printf("%#n", value); + db_printf("%#lln", value); db_find_xtrn_task_sym_and_offset(value, &name, &offset, TASK_NULL); if (name != (char *)0 && offset <= db_maxoff && offset != value) { db_printf("\t%s", name); diff --git a/osfmk/ddb/db_output.c b/osfmk/ddb/db_output.c index 32d7f4239..972b0e6d8 100644 --- a/osfmk/ddb/db_output.c +++ b/osfmk/ddb/db_output.c @@ -85,7 +85,7 @@ */ #ifndef DB_MAX_LINE -#define DB_MAX_LINE 24 /* maximum line */ +#define DB_MAX_LINE 43 /* maximum line */ #define DB_MAX_WIDTH 132 /* maximum width */ #endif /* DB_MAX_LINE */ @@ -147,10 +147,6 @@ db_more(void) register char *p; boolean_t quit_output = FALSE; -#if defined(__alpha) - extern boolean_t kdebug_mode; - if (kdebug_mode) return; -#endif /* defined(__alpha) */ for (p = "--db_more--"; *p; p++) cnputc(*p); switch(cngetc()) { @@ -292,9 +288,6 @@ db_printf(char *fmt, ...) { va_list listp; -#ifdef luna88k - db_printing(); -#endif va_start(listp, fmt); _doprnt(fmt, &listp, db_putchar, db_radix); va_end(listp); @@ -343,9 +336,7 @@ void db_output_prompt(void) { db_printf("db%s", (db_default_act) ? "t": ""); -#if NCPUS > 1 db_printf("{%d}", cpu_number()); -#endif db_printf("> "); } diff --git a/osfmk/ddb/db_print.c b/osfmk/ddb/db_print.c index bbb0fb10f..29b5bea6b 100644 --- a/osfmk/ddb/db_print.c +++ b/osfmk/ddb/db_print.c @@ -190,7 +190,7 @@ db_show_regs( 12-strlen(regp->name)-((i<10)?1:2), ""); else db_printf("%-12s", regp->name); - db_printf("%#*N", 2+2*sizeof(vm_offset_t), value); + db_printf("%#*llN", 2+2*sizeof(db_expr_t), value); db_find_xtrn_task_sym_and_offset((db_addr_t)value, &name, &offset, task); if (name != 0 && offset <= db_maxoff && offset != value) { @@ -434,7 +434,7 @@ db_print_task( db_printf("%3d: %0*X %0*X %3d %3d %3d %2d %c ", task_id, 2*sizeof(vm_offset_t), task, 2*sizeof(vm_offset_t), task->map, - task->thr_act_count, task->res_act_count, + task->thread_count, task->res_thread_count, task->suspend_count, task->priority, sstate); @@ -443,10 +443,10 @@ db_print_task( if (flag & OPTION_TASK_TITLE) flag |= OPTION_THREAD_TITLE; db_printf("\n"); - } else if (task->thr_act_count <= 1) + } else if (task->thread_count <= 1) flag &= ~OPTION_INDENT; act_id = 0; - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) { db_print_act(thr_act, act_id, flag); flag &= ~OPTION_THREAD_TITLE; act_id++; @@ -457,22 +457,22 @@ db_print_task( if (flag & OPTION_LONG) { if (flag & OPTION_TASK_TITLE) { db_printf(" TASK ACT\n"); - if (task->thr_act_count > 1) + if (task->thread_count > 1) flag |= OPTION_THREAD_TITLE; } } db_printf("%3d (%0*X): ", task_id, 2*sizeof(vm_offset_t), task); - if (task->thr_act_count == 0) { + if (task->thread_count == 0) { db_printf("no threads\n"); } else { - if (task->thr_act_count > 1) { - db_printf("%d threads: \n", task->thr_act_count); + if (task->thread_count > 1) { + db_printf("%d threads: \n", task->thread_count); flag |= OPTION_INDENT; } else flag &= ~OPTION_INDENT; act_id = 0; - queue_iterate(&task->thr_acts, thr_act, - thread_act_t, thr_acts) { + queue_iterate(&task->threads, thr_act, + thread_act_t, task_threads) { db_print_act(thr_act, act_id++, flag); flag &= ~OPTION_THREAD_TITLE; } @@ -487,7 +487,7 @@ db_print_space( int flag) { ipc_space_t space; - thread_act_t act = (thread_act_t)queue_first(&task->thr_acts); + thread_act_t act = (thread_act_t)queue_first(&task->threads); int count; count = 0; @@ -722,7 +722,7 @@ db_show_one_act( thr_act = (thread_act_t) addr; if ((act_id = db_lookup_act(thr_act)) < 0) { - db_printf("bad thr_act address %#x\n", addr); + db_printf("bad thr_act address %#llX\n", addr); db_error(0); /*NOTREACHED*/ } @@ -774,7 +774,7 @@ db_show_one_task( task = (task_t) addr; if ((task_id = db_lookup_task(task)) < 0) { - db_printf("bad task address 0x%x\n", addr); + db_printf("bad task address 0x%llX\n", addr); db_error(0); /*NOTREACHED*/ } @@ -789,11 +789,11 @@ db_show_shuttle( db_expr_t count, char * modif) { - thread_shuttle_t shuttle; + thread_t shuttle; thread_act_t thr_act; if (have_addr) - shuttle = (thread_shuttle_t) addr; + shuttle = (thread_t) addr; else { thr_act = current_act(); if (thr_act == THR_ACT_NULL) { @@ -972,7 +972,7 @@ db_show_port_id( } else thr_act = (thread_act_t) addr; if (db_lookup_act(thr_act) < 0) { - db_printf("Bad thr_act address 0x%x\n", addr); + db_printf("Bad thr_act address 0x%llX\n", addr); db_error(0); /*NOTREACHED*/ } diff --git a/osfmk/ddb/db_sym.c b/osfmk/ddb/db_sym.c index c0d3f5f12..2578ef347 100644 --- a/osfmk/ddb/db_sym.c +++ b/osfmk/ddb/db_sym.c @@ -25,322 +25,6 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.3.22.8 1996/07/31 09:07:24 paire - * Merged with nmk20b7_shared (1.3.47.1) - * [96/07/24 paire] - * - * Revision 1.3.47.1 1996/06/13 12:36:08 bernadat - * Do not assume anymore that VM_MIN_KERNEL_ADDRESS - * is greater or equal than VM_MAX_ADDRESS. - * [96/05/23 bernadat] - * - * Revision 1.3.22.7 1996/01/09 19:16:15 devrcs - * Added db_task_getlinenum() function. (steved) - * Make db_maxval & db_minval long int's for Alpha. - * Changed declarations of 'register foo' to 'register int foo'. - * [1995/12/01 21:42:29 jfraser] - * - * Merged '64-bit safe' changes from DEC alpha port. - * [1995/11/21 18:03:41 jfraser] - * - * Revision 1.3.22.6 1995/02/28 01:58:46 dwm - * Merged with changes from 1.3.22.5 - * [1995/02/28 01:53:47 dwm] - * - * mk6 CR1120 - Merge mk6pro_shared into cnmk_shared - * remove a couple local protos, now in .h file (for better or worse) - * [1995/02/28 01:12:51 dwm] - * - * Revision 1.3.22.5 1995/02/23 21:43:43 alanl - * Move TR_INIT to model_dep.c (MACH_TR and MACH_KDB shouldn't - * be bound). - * [95/02/16 travos] - * - * Prepend a "db_" to qsort and qsort_limit_search - * (collisions with the real qsort in stdlib.h) - * [95/02/14 travos] - * - * Added X_db_init for object independent formats. - * [95/01/24 sjs] - * - * Merge with DIPC2_SHARED. - * [1995/01/05 13:32:53 alanl] - * - * Revision 1.3.30.2 1994/12/22 20:36:15 bolinger - * Fix ri-osc CR881: enable freer use of symbol table of collocated - * tasks. No point in requiring task to be named for symbols to be - * usable. Also fixed glitch in use of symtab cloning. - * [1994/12/22 20:34:55 bolinger] - * - * Revision 1.3.30.1 1994/11/04 09:53:14 dwm - * mk6 CR668 - 1.3b26 merge - * add arg to *_db_search_by_addr() from mk6 - * * Revision 1.3.4.9 1994/05/13 15:57:14 tmt - * Add hooks for catching calls to uninstalled symbol tables. - * Add XXX_search_by_addr() vectors. - * * Revision 1.3.4.8 1994/05/12 21:59:00 tmt - * Fix numerous db_sym_t/char * mixups. - * Fix and enable db_qualify_ambiguous_names. - * Make dif and newdiff unsigned in symbol searches. - * * Revision 1.3.4.7 1994/05/06 18:39:52 tmt - * Merged osc1.3dec/shared with osc1.3b19 - * Fix function prototype declarations. - * Merge Alpha changes into osc1.312b source code. - * String protos. - * Handle multiple, coexisting symbol table types. - * 64bit cleanup. - * Revision 1.3.4.5 1993/10/20 18:58:55 gm - * CR9704: Removed symbol load printf. - * * End1.3merge - * [1994/11/04 08:50:02 dwm] - * - * Revision 1.3.22.5 1995/02/23 21:43:43 alanl - * Move TR_INIT to model_dep.c (MACH_TR and MACH_KDB shouldn't - * be bound). - * [95/02/16 travos] - * - * Prepend a "db_" to qsort and qsort_limit_search - * (collisions with the real qsort in stdlib.h) - * [95/02/14 travos] - * - * Added X_db_init for object independent formats. - * [95/01/24 sjs] - * - * Merge with DIPC2_SHARED. - * [1995/01/05 13:32:53 alanl] - * - * Revision 1.3.30.2 1994/12/22 20:36:15 bolinger - * Fix ri-osc CR881: enable freer use of symbol table of collocated - * tasks. No point in requiring task to be named for symbols to be - * usable. Also fixed glitch in use of symtab cloning. - * [1994/12/22 20:34:55 bolinger] - * - * Revision 1.3.30.1 1994/11/04 09:53:14 dwm - * mk6 CR668 - 1.3b26 merge - * add arg to *_db_search_by_addr() from mk6 - * * Revision 1.3.4.9 1994/05/13 15:57:14 tmt - * Add hooks for catching calls to uninstalled symbol tables. - * Add XXX_search_by_addr() vectors. - * * Revision 1.3.4.8 1994/05/12 21:59:00 tmt - * Fix numerous db_sym_t/char * mixups. - * Fix and enable db_qualify_ambiguous_names. - * Make dif and newdiff unsigned in symbol searches. - * * Revision 1.3.4.7 1994/05/06 18:39:52 tmt - * Merged osc1.3dec/shared with osc1.3b19 - * Fix function prototype declarations. - * Merge Alpha changes into osc1.312b source code. - * String protos. - * Handle multiple, coexisting symbol table types. - * 64bit cleanup. - * Revision 1.3.4.5 1993/10/20 18:58:55 gm - * CR9704: Removed symbol load printf. - * * End1.3merge - * [1994/11/04 08:50:02 dwm] - * - * Revision 1.3.22.3 1994/09/23 01:21:37 ezf - * change marker to not FREE - * [1994/09/22 21:10:58 ezf] - * - * Revision 1.3.22.2 1994/06/26 22:58:24 bolinger - * Suppress symbol table range output when table is unsorted, since output - * is meaningless in this case. - * [1994/06/23 20:19:02 bolinger] - * - * Revision 1.3.22.1 1994/06/11 21:12:19 bolinger - * Merge up to NMK17.2. - * [1994/06/11 20:02:31 bolinger] - * - * Revision 1.3.17.1 1994/02/08 10:58:40 bernadat - * Check result of X_db_line_at_pc() before - * invoking db_shorten_filename(). - * [93/11/30 bernadat] - * - * Installed ddb_init() routine in a symbol-independent file to call - * symbol-dependent and machine-dependent initialization routines. - * [93/08/27 paire] - * - * Fixed db_shorten_filename() to gobble the last slash. - * Modified db_search_task_symbol_and_line() interface to return - * the number of a function arguments. - * [93/08/19 paire] - * - * Added new arguments to db_sym_print_completion() call. - * [93/08/18 paire] - * - * Added db_lookup_incomplete(), db_sym_parse_and_lookup_incomplete(), - * db_sym_print_completion() and db_completion_print() for support of - * symbol completion. - * [93/08/14 paire] - * [94/02/07 bernadat] - * - * Revision 1.3.15.4 1994/06/08 19:11:23 dswartz - * Preemption merge. - * [1994/06/08 19:10:24 dswartz] - * - * Revision 1.3.20.2 1994/06/01 21:34:39 klj - * Initial preemption code base merge - * - * Revision 1.3.15.3 1994/02/10 02:28:15 bolinger - * Fix db_add_symbol_table() to increase db_maxval if highest-addressed - * symbol in new symtab is greater than its current value. - * [1994/02/09 21:42:12 bolinger] - * - * Revision 1.3.15.2 1994/02/03 21:44:23 bolinger - * Update db_maxval when a symbol table is cloned for kernel-loaded - * server. - * [1994/02/03 20:47:22 bolinger] - * - * Revision 1.3.15.1 1994/02/03 02:41:58 dwm - * Add short-term kludge to provide symbolic info on INKServer. - * [1994/02/03 02:31:17 dwm] - * - * Revision 1.3.4.4 1993/08/11 20:38:11 elliston - * Add ANSI Prototypes. CR #9523. - * [1993/08/11 03:33:59 elliston] - * - * Revision 1.3.4.3 1993/07/27 18:28:09 elliston - * Add ANSI prototypes. CR #9523. - * [1993/07/27 18:12:57 elliston] - * - * Revision 1.3.4.2 1993/06/09 02:20:50 gm - * CR9176 - ANSI C violations: trailing tokens on CPP - * directives, extra semicolons after decl_ ..., asm keywords - * [1993/06/07 18:57:31 jeffc] - * - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 20:57:10 jeffc] - * - * Revision 1.3 1993/04/19 16:03:09 devrcs - * Protect db_line_at_pc() against null db_last_symtab. - * [1993/02/11 15:37:16 barbou] - * - * Changes from MK78: - * Upped MAXNOSYMTABS from 3 to 5. Now there is space for kernel, - * bootstrap, server, and emulator symbols - plus one for future - * expansion. - * [92/03/21 danner] - * Changed CHAR arg of db_eqname to UNSIGNED. - * Made arg types proper for db_line_at_pc(). - * [92/05/16 jfriedl] - * [92/12/18 bruel] - * - * Sort large symbol tables to speedup lookup. - * Improved symbol lookup (use of max_offset, dichotomic search) - * [barbou@gr.osf.org] - * - * db_add_symbol_table now takes 3 additional arguments. Machine - * dependant modules must provide them. [barbou@gr.osf.org] - * [92/12/03 bernadat] - * - * Revision 1.2 1992/11/25 01:04:42 robert - * integrate changes below for norma_14 - * [1992/11/13 19:22:44 robert] - * - * Revision 1.1 1992/09/30 02:01:25 robert - * Initial revision - * - * $EndLog$ - */ -/* CMU_HIST */ -/* - * Revision 2.10.4.1 92/02/18 18:38:53 jeffreyh - * Added db_get_sym(). Simple interface to get symbol names - * knowing the offset. - * [91/12/20 bernadat] - * - * Do not look for symbol names if address - * is to small or to large, otherwise get - * random names like INCLUDE_VERSION+?? - * [91/06/25 bernadat] - * - * Revision 2.10 91/10/09 16:02:30 af - * Revision 2.9.2.1 91/10/05 13:07:27 jeffreyh - * Changed symbol table name qualification syntax from "xxx:yyy" - * to "xxx::yyy" to allow "file:func:line" in "yyy" part. - * "db_sym_parse_and_lookup" is also added for "yyy" part parsing. - * Replaced db_search_symbol with db_search_task_symbol, and moved - * it to "db_sym.h" as a macro. - * Added db_task_printsym, and changed db_printsym to call it. - * Added include "db_task_thread.h". - * Fixed infinite recursion of db_symbol_values. - * [91/08/29 tak] - * - * Revision 2.9.2.1 91/10/05 13:07:27 jeffreyh - * Changed symbol table name qualification syntax from "xxx:yyy" - * to "xxx::yyy" to allow "file:func:line" in "yyy" part. - * "db_sym_parse_and_lookup" is also added for "yyy" part parsing. - * Replaced db_search_symbol with db_search_task_symbol, and moved - * it to "db_sym.h" as a macro. - * Added db_task_printsym, and changed db_printsym to call it. - * Added include "db_task_thread.h". - * Fixed infinite recursion of db_symbol_values. - * [91/08/29 tak] - * - * Revision 2.9 91/07/31 17:31:14 dbg - * Add task pointer and space for string storage to symbol table - * descriptor. - * [91/07/31 dbg] - * - * Revision 2.8 91/07/09 23:16:08 danner - * Changed a printf. - * [91/07/08 danner] - * - * Revision 2.7 91/05/14 15:35:54 mrt - * Correcting copyright - * - * Revision 2.6 91/03/16 14:42:40 rpd - * Changed the default db_maxoff to 4K. - * [91/03/10 rpd] - * - * Revision 2.5 91/02/05 17:07:07 mrt - * Changed to new Mach copyright - * [91/01/31 16:19:17 mrt] - * - * Revision 2.4 90/10/25 14:44:05 rwd - * Changed db_printsym to print unsigned. - * [90/10/19 rpd] - * - * Revision 2.3 90/09/09 23:19:56 rpd - * Avoid totally incorrect guesses of symbol names for small values. - * [90/08/30 17:39:48 af] - * - * Revision 2.2 90/08/27 21:52:18 dbg - * Removed nlist.h. Fixed some type declarations. - * Qualifier character is ':'. - * [90/08/20 dbg] - * Modularized symtab info into a new db_symtab_t type. - * Modified db_add_symbol_table and others accordingly. - * Defined db_sym_t, a new (opaque) type used to represent - * symbols. This should support all sort of future symtable - * formats. Functions like db_qualify take a db_sym_t now. - * New db_symbol_values() function to explode the content - * of a db_sym_t. - * db_search_symbol() replaces db_find_sym_and_offset(), which is - * now a macro defined in our (new) header file. This new - * function accepts more restrictive searches, which are - * entirely delegated to the symtab-specific code. - * Accordingly, db_printsym() accepts a strategy parameter. - * New db_line_at_pc() function. - * Renamed misleading db_eqsym into db_eqname. - * [90/08/20 10:47:06 af] - * - * Created. - * [90/07/25 dbg] - * - * Revision 2.1 90/07/26 16:43:52 dbg - * Created. - * - */ -/* CMU_ENDHIST */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University @@ -1635,7 +1319,7 @@ db_clone_symtabXXX( } /* alloc new symbols */ size = (vm_size_t)(st_src->end - st_src->private); - memp = (char *)kalloc( round_page(size) ); + memp = (char *)kalloc( round_page_32(size) ); if (!memp) { db_printf("db_clone_symtab: no memory for symtab\n"); return; diff --git a/osfmk/ddb/db_task_thread.c b/osfmk/ddb/db_task_thread.c index 765382c97..6ff2066a1 100644 --- a/osfmk/ddb/db_task_thread.c +++ b/osfmk/ddb/db_task_thread.c @@ -25,75 +25,6 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.1.16.3 1996/01/09 19:16:26 devrcs - * Make db_lookup_task_id() globally available (remove static). - * Changed declarations of 'register foo' to 'register int foo'. - * [1995/12/01 21:42:37 jfraser] - * - * Merged '64-bit safe' changes from DEC alpha port. - * [1995/11/21 18:03:48 jfraser] - * - * Revision 1.1.16.2 1994/09/23 01:21:59 ezf - * change marker to not FREE - * [1994/09/22 21:11:09 ezf] - * - * Revision 1.1.16.1 1994/06/11 21:12:29 bolinger - * Merge up to NMK17.2. - * [1994/06/11 20:02:43 bolinger] - * - * Revision 1.1.14.1 1994/02/08 10:59:02 bernadat - * Added support of DB_VAR_SHOW. - * [93/08/12 paire] - * [94/02/08 bernadat] - * - * Revision 1.1.12.3 1994/03/17 22:35:35 dwm - * The infamous name change: thread_activation + thread_shuttle = thread. - * [1994/03/17 21:25:50 dwm] - * - * Revision 1.1.12.2 1994/01/17 18:08:54 dwm - * Add patchable integer force_act_lookup to force successful - * lookup, to allow stack trace on orphaned act/thread pairs. - * [1994/01/17 16:06:50 dwm] - * - * Revision 1.1.12.1 1994/01/12 17:50:52 dwm - * Coloc: initial restructuring to follow Utah model. - * [1994/01/12 17:13:23 dwm] - * - * Revision 1.1.3.3 1993/07/27 18:28:15 elliston - * Add ANSI prototypes. CR #9523. - * [1993/07/27 18:13:06 elliston] - * - * Revision 1.1.3.2 1993/06/02 23:12:39 jeffc - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 20:57:24 jeffc] - * - * Revision 1.1 1992/09/30 02:01:27 robert - * Initial revision - * - * $EndLog$ - */ -/* CMU_HIST */ -/* - * Revision 2.2 91/10/09 16:03:04 af - * Revision 2.1.3.1 91/10/05 13:07:50 jeffreyh - * Created for task/thread handling. - * [91/08/29 tak] - * - * Revision 2.1.3.1 91/10/05 13:07:50 jeffreyh - * Created for task/thread handling. - * [91/08/29 tak] - * - */ -/* CMU_ENDHIST */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University @@ -193,9 +124,9 @@ db_lookup_task_act( register int act_id; act_id = 0; - if (queue_first(&task->thr_acts) == 0) + if (queue_first(&task->threads) == 0) return(-1); - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) { if (target_act == thr_act) return(act_id); if (act_id++ >= DB_MAX_THREADID) @@ -224,7 +155,7 @@ db_lookup_act(thread_act_t target_act) queue_iterate(&pset->tasks, task, task_t, pset_tasks) { if (ntask++ > DB_MAX_TASKID) return(-1); - if (task->thr_act_count == 0) + if (task->thread_count == 0) continue; act_id = db_lookup_task_act(task, target_act); if (act_id >= 0) @@ -284,9 +215,9 @@ db_lookup_act_id( if (act_id > DB_MAX_THREADID) return(THR_ACT_NULL); - if (queue_first(&task->thr_acts) == 0) + if (queue_first(&task->threads) == 0) return(THR_ACT_NULL); - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) { if (act_id-- <= 0) return(thr_act); } diff --git a/osfmk/ddb/db_task_thread.h b/osfmk/ddb/db_task_thread.h index 398e2c270..a11263178 100644 --- a/osfmk/ddb/db_task_thread.h +++ b/osfmk/ddb/db_task_thread.h @@ -25,68 +25,6 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.1.9.1 1994/09/23 01:22:09 ezf - * change marker to not FREE - * [1994/09/22 21:11:13 ezf] - * - * Revision 1.1.7.4 1994/03/17 22:35:38 dwm - * The infamous name change: thread_activation + thread_shuttle = thread. - * [1994/03/17 21:25:53 dwm] - * - * Revision 1.1.7.3 1994/02/03 21:44:27 bolinger - * Change a surviving current_thread() to current_act(). - * [1994/02/03 20:48:03 bolinger] - * - * Revision 1.1.7.2 1994/01/12 17:50:56 dwm - * Coloc: initial restructuring to follow Utah model. - * [1994/01/12 17:13:27 dwm] - * - * Revision 1.1.7.1 1994/01/05 19:28:18 bolinger - * Separate notions of "address space" and "task" (i.e., symbol table), - * via new macros db_current_space() and db_is_current_space(); also update - * db_target_space() to treat kernel-loaded tasks correctly. - * [1994/01/04 17:41:47 bolinger] - * - * Revision 1.1.2.4 1993/07/27 18:28:17 elliston - * Add ANSI prototypes. CR #9523. - * [1993/07/27 18:13:10 elliston] - * - * Revision 1.1.2.3 1993/06/07 22:06:58 jeffc - * CR9176 - ANSI C violations: trailing tokens on CPP - * directives, extra semicolons after decl_ ..., asm keywords - * [1993/06/07 18:57:35 jeffc] - * - * Revision 1.1.2.2 1993/06/02 23:12:46 jeffc - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 20:57:32 jeffc] - * - * Revision 1.1 1992/09/30 02:24:23 robert - * Initial revision - * - * $EndLog$ - */ -/* CMU_HIST */ -/* - * Revision 2.2 91/10/09 16:03:18 af - * Revision 2.1.3.1 91/10/05 13:08:07 jeffreyh - * Created for task/thread handling. - * [91/08/29 tak] - * - * Revision 2.1.3.1 91/10/05 13:08:07 jeffreyh - * Created for task/thread handling. - * [91/08/29 tak] - * - */ -/* CMU_ENDHIST */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University @@ -133,10 +71,10 @@ #define db_current_task() \ ((current_act())? current_act()->task: TASK_NULL) #define db_current_space() \ - ((current_act() && !current_act()->kernel_loaded)?\ + ((current_act())?\ current_act()->task: TASK_NULL) #define db_target_space(thr_act, user_space) \ - ((!(user_space) || ((thr_act) && (thr_act)->kernel_loaded))?\ + ((!(user_space) || ((thr_act)))?\ TASK_NULL: \ (thr_act)? \ (thr_act)->task: db_current_space()) diff --git a/osfmk/ddb/db_variables.c b/osfmk/ddb/db_variables.c index ba7192c60..b30a42221 100644 --- a/osfmk/ddb/db_variables.c +++ b/osfmk/ddb/db_variables.c @@ -25,132 +25,6 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.2.18.5 1996/01/09 19:16:34 devrcs - * Search the alternate register names if configured - * Changed declarations of 'register foo' to 'register int foo'. - * [1995/12/01 21:42:42 jfraser] - * - * Merged '64-bit safe' changes from DEC alpha port. - * [1995/11/21 18:03:56 jfraser] - * - * Revision 1.2.18.4 1995/02/23 21:43:56 alanl - * Merged with DIPC2_SHARED. - * [1995/01/05 13:35:55 alanl] - * - * Revision 1.2.21.1 1994/11/04 09:53:26 dwm - * mk6 CR668 - 1.3b26 merge - * * Revision 1.2.4.6 1994/05/06 18:40:13 tmt - * Merged osc1.3dec/shared with osc1.3b19 - * Merge Alpha changes into osc1.312b source code. - * 64bit cleanup. - * * End1.3merge - * [1994/11/04 08:50:12 dwm] - * - * Revision 1.2.18.2 1994/09/23 01:22:35 ezf - * change marker to not FREE - * [1994/09/22 21:11:24 ezf] - * - * Revision 1.2.18.1 1994/06/11 21:12:37 bolinger - * Merge up to NMK17.2. - * [1994/06/11 20:03:04 bolinger] - * - * Revision 1.2.23.1 1994/12/06 19:43:18 alanl - * Intel merge, Oct 94 code drop. - * Added db_find_reg_name (came from db_print.c). - * [94/11/28 mmp] - * - * Revision 1.2.16.1 1994/02/08 10:59:08 bernadat - * Added completion variable. - * [93/08/17 paire] - * - * Set up new fields (hidden_xxx) of db_vars[] array that are supposed - * to be helpful to display variables depending on an internal value - * like db_macro_level for macro arguments. - * Added db_auto_wrap as new variable. - * Added "set help" for listing all available variables. - * Added db_show_variable() and db_show_one_variable() - * to print variable values. - * [93/08/12 paire] - * [94/02/08 bernadat] - * - * Revision 1.2.4.4 1993/08/11 20:38:20 elliston - * Add ANSI Prototypes. CR #9523. - * [1993/08/11 03:34:13 elliston] - * - * Revision 1.2.4.3 1993/07/27 18:28:27 elliston - * Add ANSI prototypes. CR #9523. - * [1993/07/27 18:13:22 elliston] - * - * Revision 1.2.4.2 1993/06/09 02:21:02 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 20:57:43 jeffc] - * - * Revision 1.2 1993/04/19 16:03:25 devrcs - * Changes from mk78: - * Added void to db_read_write_variable(). - * Removed unused variable 'func' from db_set_cmd(). - * [92/05/16 jfriedl] - * [93/02/02 bruel] - * - * Print old value when changing register values. - * [barbou@gr.osf.org] - * [92/12/03 bernadat] - * - * Revision 1.1 1992/09/30 02:01:31 robert - * Initial revision - * - * $EndLog$ - */ -/* CMU_HIST */ -/* - * Revision 2.5 91/10/09 16:03:59 af - * Revision 2.4.3.1 91/10/05 13:08:27 jeffreyh - * Added suffix handling and thread handling of variables. - * Added new variables: lines, task, thread, work and arg. - * Moved db_read_variable and db_write_variable to db_variables.h - * as macros, and added db_read_write_variable instead. - * Changed some error messages. - * [91/08/29 tak] - * - * Revision 2.4.3.1 91/10/05 13:08:27 jeffreyh - * Added suffix handling and thread handling of variables. - * Added new variables: lines, task, thread, work and arg. - * Moved db_read_variable and db_write_variable to db_variables.h - * as macros, and added db_read_write_variable instead. - * Changed some error messages. - * [91/08/29 tak] - * - * Revision 2.4 91/05/14 15:36:57 mrt - * Correcting copyright - * - * Revision 2.3 91/02/05 17:07:19 mrt - * Changed to new Mach copyright - * [91/01/31 16:19:46 mrt] - * - * Revision 2.2 90/08/27 21:53:24 dbg - * New db_read/write_variable functions. Should be used instead - * of dereferencing valuep directly, which might not be a true - * pointer if there is an fcn() access function. - * [90/08/20 af] - * - * Fix declarations. - * Check for trailing garbage after last expression on command line. - * [90/08/10 14:34:54 dbg] - * - * Created. - * [90/07/25 dbg] - * - */ -/* CMU_ENDHIST */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University @@ -390,7 +264,7 @@ db_read_write_variable( } else (*func)(vp, valuep, rw_flag, ap); if (rw_flag == DB_VAR_SET && vp->precious) - db_printf("\t$%s:%s<%#x>\t%#8n\t=\t%#8n\n", vp->name, + db_printf("\t$%s:%s<%#x>\t%#8lln\t=\t%#8lln\n", vp->name, ap->modif, ap->thr_act, old_value, *valuep); } @@ -633,7 +507,7 @@ db_show_one_variable(void) aux_param.suffix[0] = i; (*cur->fcn)(cur, (db_expr_t *)0, DB_VAR_SHOW, &aux_param); } else { - db_printf("%#n", *(cur->valuep + i)); + db_printf("%#lln", *(cur->valuep + i)); db_find_xtrn_task_sym_and_offset(*(cur->valuep + i), &name, &offset, TASK_NULL); if (name != (char *)0 && offset <= db_maxoff && @@ -779,7 +653,7 @@ db_show_variable(void) aux_param.suffix[0] = i; (*cur->fcn)(cur, (db_expr_t *)0, DB_VAR_SHOW, &aux_param); } else { - db_printf("%#n", *(cur->valuep + i)); + db_printf("%#lln", *(cur->valuep + i)); db_find_xtrn_task_sym_and_offset(*(cur->valuep + i), &name, &offset, TASK_NULL); if (name != (char *)0 && offset <= db_maxoff && diff --git a/osfmk/ddb/db_watch.c b/osfmk/ddb/db_watch.c index c032fbc82..f6de79c76 100644 --- a/osfmk/ddb/db_watch.c +++ b/osfmk/ddb/db_watch.c @@ -397,8 +397,8 @@ db_set_watchpoints(void) for (watch = db_watchpoint_list; watch != 0; watch = watch->link) { map = (watch->task)? watch->task->map: kernel_map; pmap_protect(map->pmap, - trunc_page(watch->loaddr), - round_page(watch->hiaddr), + trunc_page_32(watch->loaddr), + round_page_32(watch->hiaddr), VM_PROT_READ); } db_watchpoints_inserted = TRUE; @@ -427,8 +427,8 @@ db_find_watchpoint( if (watch->task == task_space) { if ((watch->loaddr <= addr) && (addr < watch->hiaddr)) return (TRUE); - else if ((trunc_page(watch->loaddr) <= addr) && - (addr < round_page(watch->hiaddr))) + else if ((trunc_page_32(watch->loaddr) <= addr) && + (addr < round_page_32(watch->hiaddr))) found = watch; } } diff --git a/osfmk/default_pager/default_pager.c b/osfmk/default_pager/default_pager.c index 50e0f7969..3a6f75c5a 100644 --- a/osfmk/default_pager/default_pager.c +++ b/osfmk/default_pager/default_pager.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -273,8 +273,8 @@ start_def_pager(char *bs_device) /* MACH_PORT_FACE master_device_port; */ - MACH_PORT_FACE security_port; /* + MACH_PORT_FACE security_port; MACH_PORT_FACE root_ledger_wired; MACH_PORT_FACE root_ledger_paged; */ @@ -288,8 +288,8 @@ start_def_pager(char *bs_device) master_device_port = ipc_port_make_send(master_device_port); root_ledger_wired = ipc_port_make_send(root_wired_ledger_port); root_ledger_paged = ipc_port_make_send(root_paged_ledger_port); -*/ security_port = ipc_port_make_send(realhost.host_security_self); +*/ #if NORMA_VM @@ -302,6 +302,9 @@ start_def_pager(char *bs_device) /* setup read buffers, etc */ default_pager_initialize(); default_pager(); + + /* start the backing store monitor, it runs on a callout thread */ + thread_call_func(default_pager_backing_store_monitor, NULL, FALSE); } /* @@ -332,8 +335,8 @@ default_pager_info( bs_global_info(&pages_total, &pages_free); - infop->dpi_total_space = ptoa(pages_total); - infop->dpi_free_space = ptoa(pages_free); + infop->dpi_total_space = ptoa_32(pages_total); + infop->dpi_free_space = ptoa_32(pages_free); infop->dpi_page_size = vm_page_size; return KERN_SUCCESS; diff --git a/osfmk/default_pager/default_pager_internal.h b/osfmk/default_pager/default_pager_internal.h index 170b0a49c..0665447f9 100644 --- a/osfmk/default_pager/default_pager_internal.h +++ b/osfmk/default_pager/default_pager_internal.h @@ -812,4 +812,6 @@ extern boolean_t bs_set_default_clsize(unsigned int); extern boolean_t verbose; +extern void default_pager_backing_store_monitor(thread_call_param_t, thread_call_param_t); + #endif /* _DEFAULT_PAGER_INTERNAL_H_ */ diff --git a/osfmk/default_pager/default_pager_types.defs b/osfmk/default_pager/default_pager_types.defs index 2ff5f7112..038d1cc1f 100644 --- a/osfmk/default_pager/default_pager_types.defs +++ b/osfmk/default_pager/default_pager_types.defs @@ -28,6 +28,22 @@ /* * HISTORY * $Log: default_pager_types.defs,v $ + * Revision 1.4 2002/11/23 05:08:24 lindak + * Merged PR-3107160-3107168 into ZZ100 + * 3107160 Panther Kernel builds spew tons of compiler warnings about default + * argument + * 3107168 Kernel warnings about "extra tokens at end of #endif directive" + * Kernel + * + * Revision 1.3.1930.1 2002/11/21 22:11:29 sarcone + * + * Bug #:3107160,3107168 + * Submitted by: Chris Sarcone + * Reviewed by: Simon Douglas + * + * Fixed a bunch of compiler warnings about default arguments and + * extra tokens at end of #endif directives. + * * Revision 1.3 2000/01/26 05:56:23 wsanchez * Add APSL * @@ -113,4 +129,4 @@ type backing_store_info_t = array[*:20] of integer_t; import ; -#endif _MACH_DEFAULT_PAGER_TYPES_DEFS_ +#endif /* _MACH_DEFAULT_PAGER_TYPES_DEFS_ */ diff --git a/osfmk/default_pager/dp_backing_store.c b/osfmk/default_pager/dp_backing_store.c index 8d0163625..2361da663 100644 --- a/osfmk/default_pager/dp_backing_store.c +++ b/osfmk/default_pager/dp_backing_store.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -552,7 +552,7 @@ default_pager_backing_store_create( priority = BS_MINPRI; bs->bs_priority = priority; - bs->bs_clsize = bs_get_global_clsize(atop(clsize)); + bs->bs_clsize = bs_get_global_clsize(atop_32(clsize)); BSL_LOCK(); queue_enter(&backing_store_list.bsl_queue, bs, backing_store_t, @@ -616,7 +616,7 @@ default_pager_backing_store_info( basic->bs_pages_out_fail= bs->bs_pages_out_fail; basic->bs_priority = bs->bs_priority; - basic->bs_clsize = ptoa(bs->bs_clsize); /* in bytes */ + basic->bs_clsize = ptoa_32(bs->bs_clsize); /* in bytes */ BS_UNLOCK(bs); @@ -1227,7 +1227,7 @@ ps_vstruct_create( vs->vs_errors = 0; vs->vs_clshift = local_log2(bs_get_global_clsize(0)); - vs->vs_size = ((atop(round_page(size)) - 1) >> vs->vs_clshift) + 1; + vs->vs_size = ((atop_32(round_page_32(size)) - 1) >> vs->vs_clshift) + 1; vs->vs_async_pending = 0; /* @@ -1453,6 +1453,9 @@ ps_allocate_cluster( ps = use_ps; PSL_LOCK(); PS_LOCK(ps); + + ASSERT(ps->ps_clcount != 0); + ps->ps_clcount--; dp_pages_free -= 1 << ps->ps_clshift; if(min_pages_trigger_port && @@ -1492,7 +1495,6 @@ ps_allocate_cluster( } return (vm_offset_t) -1; } - ASSERT(ps->ps_clcount != 0); /* * Look for an available cluster. At the end of the loop, @@ -1530,7 +1532,6 @@ ps_deallocate_cluster( paging_segment_t ps, vm_offset_t cluster) { - ipc_port_t trigger = IP_NULL; if (cluster >= (vm_offset_t) ps->ps_ncls) panic("ps_deallocate_cluster: Invalid cluster number"); @@ -1544,12 +1545,6 @@ ps_deallocate_cluster( clrbit(ps->ps_bmap, cluster); ++ps->ps_clcount; dp_pages_free += 1 << ps->ps_clshift; - if(max_pages_trigger_port - && (backing_store_release_trigger_disable == 0) - && (dp_pages_free > maximum_pages_free)) { - trigger = max_pages_trigger_port; - max_pages_trigger_port = NULL; - } PSL_UNLOCK(); /* @@ -1570,21 +1565,6 @@ ps_deallocate_cluster( ps_select_array[ps->ps_bs->bs_priority] = 0; PSL_UNLOCK(); - if (trigger != IP_NULL) { - VSL_LOCK(); - if(backing_store_release_trigger_disable != 0) { - assert_wait((event_t) - &backing_store_release_trigger_disable, - THREAD_UNINT); - VSL_UNLOCK(); - thread_block(THREAD_CONTINUE_NULL); - } else { - VSL_UNLOCK(); - } - default_pager_space_alert(trigger, LO_WAT_ALERT); - ipc_port_release_send(trigger); - } - return; } @@ -1773,7 +1753,7 @@ ps_clmap( VS_MAP_LOCK(vs); ASSERT(vs->vs_dmap); - cluster = atop(offset) >> vs->vs_clshift; + cluster = atop_32(offset) >> vs->vs_clshift; /* * Initialize cluster error value @@ -1889,14 +1869,14 @@ ps_clmap( * relatively quick. */ ASSERT(trunc_page(offset) == offset); - newcl = ptoa(newcl) << vs->vs_clshift; + newcl = ptoa_32(newcl) << vs->vs_clshift; newoff = offset & ((1<<(vm_page_shift + vs->vs_clshift)) - 1); if (flag == CL_ALLOC) { /* * set bits in the allocation bitmap according to which * pages were requested. size is in bytes. */ - i = atop(newoff); + i = atop_32(newoff); while ((size > 0) && (i < VSCLSIZE(vs))) { VSM_SETALLOC(*vsmap, i); i++; @@ -1909,7 +1889,7 @@ ps_clmap( * Offset is not cluster aligned, so number of pages * and bitmaps must be adjusted */ - clmap->cl_numpages -= atop(newoff); + clmap->cl_numpages -= atop_32(newoff); CLMAP_SHIFT(clmap, vs); CLMAP_SHIFTALLOC(clmap, vs); } @@ -1938,7 +1918,7 @@ ps_clmap( } else { BS_STAT(clmap->cl_ps->ps_bs, clmap->cl_ps->ps_bs->bs_pages_out_fail += - atop(size)); + atop_32(size)); off = VSM_CLOFF(*vsmap); VSM_SETERR(*vsmap, error); } @@ -1985,7 +1965,7 @@ ps_clunmap( vm_offset_t newoff; int i; - cluster = atop(offset) >> vs->vs_clshift; + cluster = atop_32(offset) >> vs->vs_clshift; if (vs->vs_indirect) /* indirect map */ vsmap = vs->vs_imap[cluster/CLMAP_ENTRIES]; else @@ -2010,7 +1990,7 @@ ps_clunmap( * Not cluster aligned. */ ASSERT(trunc_page(newoff) == newoff); - i = atop(newoff); + i = atop_32(newoff); } else i = 0; while ((i < VSCLSIZE(vs)) && (length > 0)) { @@ -2081,7 +2061,7 @@ vs_cl_write_complete( dprintf(("write failed error = 0x%x\n", error)); /* add upl_abort code here */ } else - GSTAT(global_stats.gs_pages_out += atop(size)); + GSTAT(global_stats.gs_pages_out += atop_32(size)); /* * Notify the vstruct mapping code, so it can do its accounting. */ @@ -2237,7 +2217,7 @@ ps_read_device( default_pager_thread_t *dpt = NULL; device = dev_port_lookup(ps->ps_device); - clustered_reads[atop(size)]++; + clustered_reads[atop_32(size)]++; dev_offset = (ps->ps_offset + (offset >> (vm_page_shift - ps->ps_record_shift))); @@ -2369,7 +2349,7 @@ ps_write_device( - clustered_writes[atop(size)]++; + clustered_writes[atop_32(size)]++; dev_offset = (ps->ps_offset + (offset >> (vm_page_shift - ps->ps_record_shift))); @@ -2406,7 +2386,7 @@ ps_write_device( "device_write_request returned ", kr, addr, size, offset)); BS_STAT(ps->ps_bs, - ps->ps_bs->bs_pages_out_fail += atop(size)); + ps->ps_bs->bs_pages_out_fail += atop_32(size)); /* do the completion notification to free resources */ device_write_reply(reply_port, kr, 0); return PAGER_ERROR; @@ -2432,7 +2412,7 @@ ps_write_device( "device_write returned ", kr, addr, size, offset)); BS_STAT(ps->ps_bs, - ps->ps_bs->bs_pages_out_fail += atop(size)); + ps->ps_bs->bs_pages_out_fail += atop_32(size)); return PAGER_ERROR; } if (bytes_written & ((vm_page_size >> ps->ps_record_shift) - 1)) @@ -2494,7 +2474,7 @@ pvs_object_data_provided( upl, offset, size)); ASSERT(size > 0); - GSTAT(global_stats.gs_pages_in += atop(size)); + GSTAT(global_stats.gs_pages_in += atop_32(size)); #if USE_PRECIOUS @@ -2623,7 +2603,7 @@ pvs_cluster_read( /* * Let VM system know about holes in clusters. */ - GSTAT(global_stats.gs_pages_unavail += atop(abort_size)); + GSTAT(global_stats.gs_pages_unavail += atop_32(abort_size)); page_list_count = 0; memory_object_super_upl_request( @@ -2669,7 +2649,7 @@ pvs_cluster_read( while (cl_index < pages_in_cl && xfer_size < size) { /* - * accumulate allocated pages within + * accumulate allocated pages within * a physical segment */ if (CLMAP_ISSET(clmap, cl_index)) { @@ -2685,7 +2665,7 @@ pvs_cluster_read( if (cl_index < pages_in_cl || xfer_size >= size) { /* - * we've hit an unallocated page or + * we've hit an unallocated page or * the end of this request... go fire * the I/O */ @@ -2693,23 +2673,23 @@ pvs_cluster_read( } /* * we've hit the end of the current physical - * segment and there's more to do, so try + * segment and there's more to do, so try * moving to the next one */ seg_index++; ps_offset[seg_index] = - ps_clmap(vs, - cur_offset & ~cl_mask, + ps_clmap(vs, + cur_offset & ~cl_mask, &clmap, CL_FIND, 0, 0); psp[seg_index] = CLMAP_PS(clmap); ps_info_valid = 1; if ((ps_offset[seg_index - 1] != (ps_offset[seg_index] - cl_size)) || (psp[seg_index - 1] != psp[seg_index])) { /* - * if the physical segment we're about - * to step into is not contiguous to - * the one we're currently in, or it's + * if the physical segment we're about + * to step into is not contiguous to + * the one we're currently in, or it's * in a different paging file, or * it hasn't been allocated.... * we stop here and generate the I/O @@ -2718,7 +2698,7 @@ pvs_cluster_read( } /* * start with first page of the next physical - * segment + * segment */ cl_index = 0; } @@ -2734,10 +2714,10 @@ pvs_cluster_read( &upl, NULL, &page_list_count, request_flags | UPL_SET_INTERNAL); - error = ps_read_file(psp[beg_pseg], + error = ps_read_file(psp[beg_pseg], upl, (vm_offset_t) 0, - ps_offset[beg_pseg] + - (beg_indx * vm_page_size), + ps_offset[beg_pseg] + + (beg_indx * vm_page_size), xfer_size, &residual, 0); } else continue; @@ -2745,22 +2725,22 @@ pvs_cluster_read( failed_size = 0; /* - * Adjust counts and send response to VM. Optimize + * Adjust counts and send response to VM. Optimize * for the common case, i.e. no error and/or partial - * data. If there was an error, then we need to error + * data. If there was an error, then we need to error * the entire range, even if some data was successfully - * read. If there was a partial read we may supply some + * read. If there was a partial read we may supply some * data and may error some as well. In all cases the - * VM must receive some notification for every page in the - * range. + * VM must receive some notification for every page + * in the range. */ if ((error == KERN_SUCCESS) && (residual == 0)) { /* * Got everything we asked for, supply the data - * to the VM. Note that as a side effect of - * supplying * the data, the buffer holding the - * supplied data is * deallocated from the pager's - * address space. + * to the VM. Note that as a side effect of + * supplying the data, the buffer holding the + * supplied data is deallocated from the pager's + * address space. */ pvs_object_data_provided( vs, upl, vs_offset, xfer_size); @@ -2792,10 +2772,10 @@ pvs_cluster_read( fill = residual & ~vm_page_size; - lsize = (xfer_size - residual) + lsize = (xfer_size - residual) + fill; pvs_object_data_provided( - vs, upl, + vs, upl, vs_offset, lsize); if (lsize < xfer_size) { @@ -2809,12 +2789,12 @@ pvs_cluster_read( /* * If there was an error in any part of the range, tell * the VM. Note that error is explicitly checked again - * since it can be modified above. + * since it can be modified above. */ if (error != KERN_SUCCESS) { BS_STAT(psp[beg_pseg]->ps_bs, psp[beg_pseg]->ps_bs->bs_pages_in_fail - += atop(failed_size)); + += atop_32(failed_size)); } size -= xfer_size; vs_offset += xfer_size; @@ -2854,7 +2834,13 @@ vs_cluster_write( upl_page_info_t *pl; int page_index; int list_size; + int pages_in_cl; int cl_size; + int base_index; + int seg_size; + + pages_in_cl = 1 << vs->vs_clshift; + cl_size = pages_in_cl * vm_page_size; if (!dp_internal) { int page_list_count; @@ -2864,17 +2850,12 @@ vs_cluster_write( int num_dirty; int num_of_pages; int seg_index; - int pages_in_cl; - int must_abort; vm_offset_t upl_offset; vm_offset_t seg_offset; - vm_offset_t ps_offset[(VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT]; - paging_segment_t psp[(VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT]; + vm_offset_t ps_offset[((VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT) + 1]; + paging_segment_t psp[((VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT) + 1]; - pages_in_cl = 1 << vs->vs_clshift; - cl_size = pages_in_cl * vm_page_size; - if (bs_low) { super_size = cl_size; @@ -2894,18 +2875,20 @@ vs_cluster_write( (memory_object_offset_t)offset, cnt, super_size, &upl, NULL, &page_list_count, - request_flags | UPL_PAGEOUT); + request_flags | UPL_FOR_PAGEOUT); pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + seg_size = cl_size - (upl->offset % cl_size); + upl_offset = upl->offset & ~(cl_size - 1); + for (seg_index = 0, transfer_size = upl->size; transfer_size > 0; ) { - ps_offset[seg_index] = - ps_clmap(vs, upl->offset + (seg_index * cl_size), - &clmap, CL_ALLOC, - transfer_size < cl_size ? - transfer_size : cl_size, 0); + ps_clmap(vs, + upl_offset, + &clmap, CL_ALLOC, + cl_size, 0); if (ps_offset[seg_index] == (vm_offset_t) -1) { upl_abort(upl, 0); @@ -2916,30 +2899,41 @@ vs_cluster_write( } psp[seg_index] = CLMAP_PS(clmap); - if (transfer_size > cl_size) { - transfer_size -= cl_size; + if (transfer_size > seg_size) { + transfer_size -= seg_size; + upl_offset += cl_size; + seg_size = cl_size; seg_index++; } else transfer_size = 0; } - for (page_index = 0, - num_of_pages = upl->size / vm_page_size; - page_index < num_of_pages; ) { + /* + * Ignore any non-present pages at the end of the + * UPL. + */ + for (page_index = upl->size / vm_page_size; page_index > 0;) + if (UPL_PAGE_PRESENT(pl, --page_index)) + break; + num_of_pages = page_index + 1; + + base_index = (upl->offset % cl_size) / PAGE_SIZE; + + for (page_index = 0; page_index < num_of_pages; ) { /* * skip over non-dirty pages */ for ( ; page_index < num_of_pages; page_index++) { - if (UPL_DIRTY_PAGE(pl, page_index) + if (UPL_DIRTY_PAGE(pl, page_index) || UPL_PRECIOUS_PAGE(pl, page_index)) /* * this is a page we need to write - * go see if we can buddy it up with + * go see if we can buddy it up with * others that are contiguous to it */ break; /* * if the page is not-dirty, but present we - * need to commit it... This is an unusual + * need to commit it... This is an unusual * case since we only asked for dirty pages */ if (UPL_PAGE_PRESENT(pl, page_index)) { @@ -2951,8 +2945,11 @@ vs_cluster_write( pl, page_list_count, &empty); - if (empty) + if (empty) { + assert(page_index == + num_of_pages - 1); upl_deallocate(upl); + } } } if (page_index == num_of_pages) @@ -2962,15 +2959,15 @@ vs_cluster_write( break; /* - * gather up contiguous dirty pages... we have at - * least 1 otherwise we would have bailed above + * gather up contiguous dirty pages... we have at + * least 1 * otherwise we would have bailed above * make sure that each physical segment that we step * into is contiguous to the one we're currently in * if it's not, we have to stop and write what we have */ - for (first_dirty = page_index; + for (first_dirty = page_index; page_index < num_of_pages; ) { - if ( !UPL_DIRTY_PAGE(pl, page_index) + if ( !UPL_DIRTY_PAGE(pl, page_index) && !UPL_PRECIOUS_PAGE(pl, page_index)) break; page_index++; @@ -2983,19 +2980,18 @@ vs_cluster_write( int cur_seg; int nxt_seg; - cur_seg = - (page_index - 1) / pages_in_cl; - nxt_seg = page_index / pages_in_cl; + cur_seg = (base_index + (page_index - 1))/pages_in_cl; + nxt_seg = (base_index + page_index)/pages_in_cl; if (cur_seg != nxt_seg) { if ((ps_offset[cur_seg] != (ps_offset[nxt_seg] - cl_size)) || (psp[cur_seg] != psp[nxt_seg])) - /* - * if the segment we're about - * to step into is not - * contiguous to the one we're - * currently in, or it's in a + /* + * if the segment we're about + * to step into is not + * contiguous to the one we're + * currently in, or it's in a * different paging file.... - * we stop here and generate + * we stop here and generate * the I/O */ break; @@ -3003,20 +2999,15 @@ vs_cluster_write( } } num_dirty = page_index - first_dirty; - must_abort = 1; if (num_dirty) { upl_offset = first_dirty * vm_page_size; - seg_index = first_dirty / pages_in_cl; - seg_offset = upl_offset - (seg_index * cl_size); transfer_size = num_dirty * vm_page_size; - while (transfer_size) { - int seg_size; if ((seg_size = cl_size - - (upl_offset % cl_size)) + ((upl->offset + upl_offset) % cl_size)) > transfer_size) seg_size = transfer_size; @@ -3029,22 +3020,26 @@ vs_cluster_write( } upl_offset = first_dirty * vm_page_size; transfer_size = num_dirty * vm_page_size; + + seg_index = (base_index + first_dirty) / pages_in_cl; + seg_offset = (upl->offset + upl_offset) % cl_size; + error = ps_write_file(psp[seg_index], upl, upl_offset, ps_offset[seg_index] + seg_offset, transfer_size, flags); - must_abort = 0; - } - if (must_abort) { + } else { boolean_t empty = FALSE; upl_abort_range(upl, first_dirty * vm_page_size, num_dirty * vm_page_size, UPL_ABORT_NOTIFY_EMPTY, &empty); - if (empty) + if (empty) { + assert(page_index == num_of_pages); upl_deallocate(upl); + } } } @@ -3083,7 +3078,7 @@ vs_cluster_write( cnt, flags); if (error) break; - } + } if (error) break; actual_offset += cnt; @@ -3145,7 +3140,7 @@ ps_vstruct_allocated_size( } } - return ptoa(num_pages); + return ptoa_32(num_pages); } size_t @@ -3354,7 +3349,7 @@ vs_get_map_entry( struct vs_map *vsmap; vm_offset_t cluster; - cluster = atop(offset) >> vs->vs_clshift; + cluster = atop_32(offset) >> vs->vs_clshift; if (vs->vs_indirect) { long ind_block = cluster/CLMAP_ENTRIES; @@ -3719,7 +3714,7 @@ ps_read_file( int result; - clustered_reads[atop(size)]++; + clustered_reads[atop_32(size)]++; f_offset = (vm_object_offset_t)(ps->ps_offset + offset); @@ -3757,7 +3752,7 @@ ps_write_file( int error = 0; - clustered_writes[atop(size)]++; + clustered_writes[atop_32(size)]++; f_offset = (vm_object_offset_t)(ps->ps_offset + offset); if (vnode_pageout(ps->ps_vnode, @@ -3802,3 +3797,73 @@ default_pager_triggers(MACH_PORT_FACE default_pager, return kr; } + +/* + * Monitor the amount of available backing store vs. the amount of + * required backing store, notify a listener (if present) when + * backing store may safely be removed. + * + * We attempt to avoid the situation where backing store is + * discarded en masse, as this can lead to thrashing as the + * backing store is compacted. + */ + +#define PF_INTERVAL 3 /* time between free level checks */ +#define PF_LATENCY 10 /* number of intervals before release */ + +static int dp_pages_free_low_count = 0; + +void +default_pager_backing_store_monitor(thread_call_param_t p1, thread_call_param_t p2) +{ + unsigned long long average; + ipc_port_t trigger; + uint64_t deadline; + + /* + * We determine whether it will be safe to release some + * backing store by watching the free page level. If + * it remains below the maximum_pages_free threshold for + * at least PF_LATENCY checks (taken at PF_INTERVAL seconds) + * then we deem it safe. + * + * Note that this establishes a maximum rate at which backing + * store will be released, as each notification (currently) + * only results in a single backing store object being + * released. + */ + if (dp_pages_free > maximum_pages_free) { + dp_pages_free_low_count++; + } else { + dp_pages_free_low_count = 0; + } + + /* decide whether to send notification */ + trigger = IP_NULL; + if (max_pages_trigger_port && + (backing_store_release_trigger_disable == 0) && + (dp_pages_free_low_count > PF_LATENCY)) { + trigger = max_pages_trigger_port; + max_pages_trigger_port = NULL; + } + + /* send notification */ + if (trigger != IP_NULL) { + VSL_LOCK(); + if(backing_store_release_trigger_disable != 0) { + assert_wait((event_t) + &backing_store_release_trigger_disable, + THREAD_UNINT); + VSL_UNLOCK(); + thread_block(THREAD_CONTINUE_NULL); + } else { + VSL_UNLOCK(); + } + default_pager_space_alert(trigger, LO_WAT_ALERT); + ipc_port_release_send(trigger); + dp_pages_free_low_count = 0; + } + + clock_interval_to_deadline(PF_INTERVAL, NSEC_PER_SEC, &deadline); + thread_call_func_delayed(default_pager_backing_store_monitor, NULL, deadline); +} diff --git a/osfmk/default_pager/dp_memory_object.c b/osfmk/default_pager/dp_memory_object.c index 7c21f3d6c..9a0a2e5c2 100644 --- a/osfmk/default_pager/dp_memory_object.c +++ b/osfmk/default_pager/dp_memory_object.c @@ -473,7 +473,6 @@ dp_memory_object_deallocate( { vstruct_t vs; mach_port_seqno_t seqno; - ipc_port_t trigger; /* * Because we don't give out multiple first references @@ -555,22 +554,6 @@ dp_memory_object_deallocate( thread_wakeup((event_t)&backing_store_release_trigger_disable); } VSL_UNLOCK(); - - PSL_LOCK(); - if(max_pages_trigger_port - && (backing_store_release_trigger_disable == 0) - && (dp_pages_free > maximum_pages_free)) { - trigger = max_pages_trigger_port; - max_pages_trigger_port = NULL; - } else - trigger = IP_NULL; - PSL_UNLOCK(); - - if (trigger != IP_NULL) { - default_pager_space_alert(trigger, LO_WAT_ALERT); - ipc_port_release_send(trigger); - } - } kern_return_t @@ -659,7 +642,7 @@ dp_memory_object_data_initialize( DEBUG(DEBUG_MO_EXTERNAL, ("mem_obj=0x%x,offset=0x%x,cnt=0x%x\n", (int)mem_obj, (int)offset, (int)size)); - GSTAT(global_stats.gs_pages_init += atop(size)); + GSTAT(global_stats.gs_pages_init += atop_32(size)); vs_lookup(mem_obj, vs); vs_lock(vs); @@ -900,10 +883,10 @@ default_pager_objects( if (kr != KERN_SUCCESS) return kr; - osize = round_page(*ocountp * sizeof * objects); + osize = round_page_32(*ocountp * sizeof * objects); kr = vm_map_wire(ipc_kernel_map, - trunc_page((vm_offset_t)objects), - round_page(((vm_offset_t)objects) + osize), + trunc_page_32((vm_offset_t)objects), + round_page_32(((vm_offset_t)objects) + osize), VM_PROT_READ|VM_PROT_WRITE, FALSE); osize=0; @@ -929,7 +912,7 @@ default_pager_objects( vm_offset_t newaddr; vm_size_t newsize; - newsize = 2 * round_page(actual * sizeof * objects); + newsize = 2 * round_page_32(actual * sizeof * objects); kr = vm_allocate(kernel_map, &newaddr, newsize, TRUE); if (kr != KERN_SUCCESS) @@ -945,7 +928,7 @@ default_pager_objects( vm_offset_t newaddr; vm_size_t newsize; - newsize = 2 * round_page(actual * sizeof * pagers); + newsize = 2 * round_page_32(actual * sizeof * pagers); kr = vm_allocate(kernel_map, &newaddr, newsize, TRUE); if (kr != KERN_SUCCESS) @@ -1043,7 +1026,7 @@ default_pager_objects( } else { vm_offset_t used; - used = round_page(actual * sizeof * objects); + used = round_page_32(actual * sizeof * objects); if (used != osize) (void) vm_deallocate(kernel_map, @@ -1069,7 +1052,7 @@ default_pager_objects( } else { vm_offset_t used; - used = round_page(actual * sizeof * pagers); + used = round_page_32(actual * sizeof * pagers); if (used != psize) (void) vm_deallocate(kernel_map, @@ -1125,10 +1108,10 @@ default_pager_object_pages( if (kr != KERN_SUCCESS) return kr; - size = round_page(*countp * sizeof * pages); + size = round_page_32(*countp * sizeof * pages); kr = vm_map_wire(ipc_kernel_map, - trunc_page((vm_offset_t)pages), - round_page(((vm_offset_t)pages) + size), + trunc_page_32((vm_offset_t)pages), + round_page_32(((vm_offset_t)pages) + size), VM_PROT_READ|VM_PROT_WRITE, FALSE); size=0; @@ -1184,7 +1167,7 @@ default_pager_object_pages( if (pages != *pagesp) (void) vm_deallocate(kernel_map, addr, size); - size = round_page(actual * sizeof * pages); + size = round_page_32(actual * sizeof * pages); kr = vm_allocate(kernel_map, &addr, size, TRUE); if (kr != KERN_SUCCESS) return kr; @@ -1213,7 +1196,7 @@ default_pager_object_pages( } else { vm_offset_t used; - used = round_page(actual * sizeof * pages); + used = round_page_32(actual * sizeof * pages); if (used != size) (void) vm_deallocate(kernel_map, diff --git a/osfmk/device/device.defs b/osfmk/device/device.defs index 01f3f1dce..5f0d6b5ac 100644 --- a/osfmk/device/device.defs +++ b/osfmk/device/device.defs @@ -452,6 +452,37 @@ routine io_registry_entry_get_property_recursively( out properties : io_buf_ptr_t, physicalcopy ); + +routine io_service_get_state( + service : io_object_t; + out state : uint64_t + ); + +routine io_service_get_matching_services_ool( + master_port : mach_port_t; + in matching : io_buf_ptr_t, physicalcopy; + out result : natural_t; + out existing : io_object_t + ); + +routine io_service_match_property_table_ool( + service : io_object_t; + in matching : io_buf_ptr_t, physicalcopy; + out result : natural_t; + out matches : boolean_t + ); + +routine io_service_add_notification_ool( + master_port : mach_port_t; + in notification_type : io_name_t; + in matching : io_buf_ptr_t, physicalcopy; + in wake_port : mach_port_make_send_t; + in reference : io_async_ref_t; + out result : natural_t; + out notification : io_object_t + ); + + #endif diff --git a/osfmk/device/device_init.c b/osfmk/device/device_init.c index ac1823984..4200dda48 100644 --- a/osfmk/device/device_init.c +++ b/osfmk/device/device_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -75,8 +75,8 @@ device_service_create(void) panic("can't allocate master device port"); ipc_kobject_set(master_device_port, 1, IKOT_MASTER_DEVICE); - host_set_io_master(host_priv_self(), - ipc_port_make_send(master_device_port)); + kernel_set_special_port(host_priv_self(), HOST_IO_MASTER_PORT, + ipc_port_make_send(master_device_port)); #if 0 ds_init(); diff --git a/osfmk/device/iokit_rpc.c b/osfmk/device/iokit_rpc.c index 008f828fd..0662600fe 100644 --- a/osfmk/device/iokit_rpc.c +++ b/osfmk/device/iokit_rpc.c @@ -64,7 +64,6 @@ #ifdef __ppc__ #include -#include #endif #include @@ -83,7 +82,7 @@ extern ipc_port_t iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type ); extern kern_return_t iokit_client_died( io_object_t obj, - ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t mscount ); + ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount ); extern kern_return_t iokit_client_memory_for_type( @@ -390,67 +389,80 @@ iokit_notify( mach_msg_header_t * msg ) } } -#ifndef i386 -unsigned int IOTranslateCacheBits(struct phys_entry *pp) +/* need to create a pmap function to generalize */ +unsigned int IODefaultCacheBits(addr64_t pa) { - unsigned int flags; - unsigned int memattr; + unsigned int flags; +#ifndef i386 + struct phys_entry * pp; + + // Find physical address + if ((pp = pmap_find_physentry(pa >> 12))) { + // Use physical attributes as default + // NOTE: DEVICE_PAGER_FLAGS are made to line up + flags = VM_MEM_COHERENT; /* We only support coherent memory */ + if(pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */ + if(pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */ + } else + // If no physical, just hard code attributes + flags = VM_WIMG_IO; +#else + extern vm_offset_t avail_end; - /* need to create a pmap function to generalize */ - memattr = ((pp->pte1 & 0x00000078) >> 3); + if (pa < avail_end) + flags = VM_WIMG_COPYBACK; + else + flags = VM_WIMG_IO; +#endif - /* NOTE: DEVICE_PAGER_FLAGS are made to line up */ - flags = memattr & VM_WIMG_MASK; - return flags; + return flags; } -#endif kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa, vm_size_t length, unsigned int options) { vm_size_t off; vm_prot_t prot; - int memattr; - struct phys_entry *pp; - pmap_t pmap = map->pmap; + unsigned int flags; + pmap_t pmap = map->pmap; prot = (options & kIOMapReadOnly) ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); + switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ + + case kIOMapDefaultCache: + default: + flags = IODefaultCacheBits(pa); + break; + + case kIOMapInhibitCache: + flags = VM_WIMG_IO; + break; + + case kIOMapWriteThruCache: + flags = VM_WIMG_WTHRU; + break; + + case kIOWriteCombineCache: + flags = VM_WIMG_WCOMB; + break; + + case kIOMapCopybackCache: + flags = VM_WIMG_COPYBACK; + break; + } #if __ppc__ - switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ - - case kIOMapDefaultCache: - default: - if(pp = pmap_find_physentry(pa)) { /* Find physical address */ - memattr = ((pp->pte1 & 0x00000078) >> 3); /* Use physical attributes as default */ - } - else { /* If no physical, just hard code attributes */ - memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED; - } - break; - - case kIOMapInhibitCache: - memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED; - break; - - case kIOMapWriteThruCache: - memattr = PTE_WIMG_WT_CACHED_COHERENT_GUARDED; - break; - - case kIOMapCopybackCache: - memattr = PTE_WIMG_CB_CACHED_COHERENT; - break; - } + // Set up a block mapped area + pmap_map_block(pmap, (addr64_t)va, (ppnum_t)(pa >> 12), length, prot, flags, 0); - pmap_map_block(pmap, va, pa, length, prot, memattr, 0); /* Set up a block mapped area */ - #else -// enter each page's physical address in the target map - for (off = 0; off < length; off += page_size) { /* Loop for the whole length */ - pmap_enter(pmap, va + off, pa + off, prot, VM_WIMG_USE_DEFAULT, TRUE); /* Map it in */ - } +// enter each page's physical address in the target map + + for (off = 0; off < length; off += page_size) + pmap_enter(pmap, va + off, (pa + off) >> 12, prot, flags, TRUE); + #endif return( KERN_SUCCESS ); @@ -460,7 +472,7 @@ kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length) { pmap_t pmap = map->pmap; - pmap_remove(pmap, trunc_page(va), round_page(va + length)); + pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length)); return( KERN_SUCCESS ); } @@ -468,6 +480,5 @@ kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length) void IOGetTime( mach_timespec_t * clock_time); void IOGetTime( mach_timespec_t * clock_time) { - *clock_time = clock_get_system_value(); + clock_get_system_nanotime(&clock_time->tv_sec, &clock_time->tv_nsec); } - diff --git a/osfmk/i386/AT386/asm_startup.h b/osfmk/i386/AT386/asm_startup.h index 4bc3ef027..a951c108c 100644 --- a/osfmk/i386/AT386/asm_startup.h +++ b/osfmk/i386/AT386/asm_startup.h @@ -266,6 +266,7 @@ addl $MEM_BASE,%ebx /* translate */ 1: #else + movl %ebx,PA(EXT(boot_args_start)) /* Save KERNBOOTSTRUCT */ cld call PA(EXT(i386_preinit)) movl %eax,%ebx diff --git a/osfmk/i386/AT386/bbclock.c b/osfmk/i386/AT386/bbclock.c index 25564bb58..4d3afcf63 100644 --- a/osfmk/i386/AT386/bbclock.c +++ b/osfmk/i386/AT386/bbclock.c @@ -204,7 +204,7 @@ bbc_settime( rtclk.rtc_hr = dectohexdec(n/60); n = (new_time->tv_sec - diff) / (3600 * 24); /* days */ rtclk.rtc_dow = (n + 4) % 7; /* 1/1/70 is Thursday */ - for (j = 1970; n >= (i = yeartoday(j)); j++) + for (j = 70; n >= (i = yeartoday(j)); j++) n -= i; rtclk.rtc_yr = dectohexdec(j % 100); if (yeartoday(j) == 366) @@ -289,7 +289,7 @@ int yeartoday( int year) { - year += 1900; + year += 1900; return((year % 4) ? 365 : ((year % 100) ? 366 : ((year % 400) ? 365: 366))); } diff --git a/osfmk/i386/AT386/iso_scan_font.h b/osfmk/i386/AT386/iso_scan_font.h deleted file mode 100644 index 26a96c403..000000000 --- a/osfmk/i386/AT386/iso_scan_font.h +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - */ - -/* - * ISO Latin-1 Font - * - * Copyright (c) 2000 - * Ka-Ping Yee - * - * This font may be freely used for any purpose. - */ - -/* - * adjusted 'A' 'V' to improve their dense appearance (ie. lightened) - * adjusted 'i' 'l' to improve their flow within a word (ie. widened) - * adjusted 'E' 'F' '#' - */ - -unsigned char iso_font[256*16] = { -/* 0 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 1 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 2 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 3 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 4 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 5 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 6 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 7 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 8 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 9 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 10 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 11 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 12 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 13 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 14 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 15 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 16 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 17 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 18 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 19 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 20 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 21 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 22 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 23 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 24 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 25 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 26 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 27 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 28 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 29 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 30 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 31 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 32 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 33 */ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, -/* 34 */ 0x00,0x00,0x6c,0x6c,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 35 */ 0x00,0x00,0x00,0x36,0x36,0x7f,0x36,0x36,0x7f,0x36,0x36,0x00,0x00,0x00,0x00,0x00, -/* 36 */ 0x00,0x08,0x08,0x3e,0x6b,0x0b,0x0b,0x3e,0x68,0x68,0x6b,0x3e,0x08,0x08,0x00,0x00, -/* 37 */ 0x00,0x00,0x00,0x33,0x13,0x18,0x08,0x0c,0x04,0x06,0x32,0x33,0x00,0x00,0x00,0x00, -/* 38 */ 0x00,0x00,0x1c,0x36,0x36,0x1c,0x6c,0x3e,0x33,0x33,0x7b,0xce,0x00,0x00,0x00,0x00, -/* 39 */ 0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 40 */ 0x00,0x00,0x30,0x18,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x18,0x30,0x00,0x00,0x00, -/* 41 */ 0x00,0x00,0x0c,0x18,0x18,0x30,0x30,0x30,0x30,0x30,0x18,0x18,0x0c,0x00,0x00,0x00, -/* 42 */ 0x00,0x00,0x00,0x00,0x00,0x36,0x1c,0x7f,0x1c,0x36,0x00,0x00,0x00,0x00,0x00,0x00, -/* 43 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00, -/* 44 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00, -/* 45 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 46 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, -/* 47 */ 0x00,0x00,0x60,0x20,0x30,0x10,0x18,0x08,0x0c,0x04,0x06,0x02,0x03,0x00,0x00,0x00, -/* 48 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x6b,0x6b,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 49 */ 0x00,0x00,0x18,0x1e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 50 */ 0x00,0x00,0x3e,0x63,0x60,0x60,0x30,0x18,0x0c,0x06,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 51 */ 0x00,0x00,0x3e,0x63,0x60,0x60,0x3c,0x60,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 52 */ 0x00,0x00,0x30,0x38,0x3c,0x36,0x33,0x7f,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00, -/* 53 */ 0x00,0x00,0x7f,0x03,0x03,0x3f,0x60,0x60,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 54 */ 0x00,0x00,0x3c,0x06,0x03,0x03,0x3f,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 55 */ 0x00,0x00,0x7f,0x60,0x30,0x30,0x18,0x18,0x18,0x0c,0x0c,0x0c,0x00,0x00,0x00,0x00, -/* 56 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x3e,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 57 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x7e,0x60,0x60,0x60,0x30,0x1e,0x00,0x00,0x00,0x00, -/* 58 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, -/* 59 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00, -/* 60 */ 0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x06,0x0c,0x18,0x30,0x60,0x00,0x00,0x00,0x00, -/* 61 */ 0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 62 */ 0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0x60,0x30,0x18,0x0c,0x06,0x00,0x00,0x00,0x00, -/* 63 */ 0x00,0x00,0x3e,0x63,0x60,0x30,0x30,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, -/* 64 */ 0x00,0x00,0x3c,0x66,0x73,0x7b,0x6b,0x6b,0x7b,0x33,0x06,0x3c,0x00,0x00,0x00,0x00, -/* 65 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 66 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x3f,0x63,0x63,0x63,0x63,0x3f,0x00,0x00,0x00,0x00, -/* 67 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x03,0x03,0x03,0x66,0x3c,0x00,0x00,0x00,0x00, -/* 68 */ 0x00,0x00,0x1f,0x33,0x63,0x63,0x63,0x63,0x63,0x63,0x33,0x1f,0x00,0x00,0x00,0x00, -/* 69 */ 0x00,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 70 */ 0x00,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, -/* 71 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x73,0x63,0x63,0x66,0x7c,0x00,0x00,0x00,0x00, -/* 72 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 73 */ 0x00,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 74 */ 0x00,0x00,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x33,0x1e,0x00,0x00,0x00,0x00, -/* 75 */ 0x00,0x00,0x63,0x33,0x1b,0x0f,0x07,0x07,0x0f,0x1b,0x33,0x63,0x00,0x00,0x00,0x00, -/* 76 */ 0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 77 */ 0x00,0x00,0x63,0x63,0x77,0x7f,0x7f,0x6b,0x6b,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 78 */ 0x00,0x00,0x63,0x63,0x67,0x6f,0x6f,0x7b,0x7b,0x73,0x63,0x63,0x00,0x00,0x00,0x00, -/* 79 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 80 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x63,0x3f,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, -/* 81 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x6f,0x7b,0x3e,0x30,0x60,0x00,0x00, -/* 82 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x63,0x3f,0x1b,0x33,0x63,0x63,0x00,0x00,0x00,0x00, -/* 83 */ 0x00,0x00,0x3e,0x63,0x03,0x03,0x0e,0x38,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 84 */ 0x00,0x00,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 85 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 86 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x36,0x36,0x1c,0x1c,0x08,0x00,0x00,0x00,0x00, -/* 87 */ 0x00,0x00,0x63,0x63,0x6b,0x6b,0x6b,0x6b,0x7f,0x36,0x36,0x36,0x00,0x00,0x00,0x00, -/* 88 */ 0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x36,0x36,0x63,0x63,0x00,0x00,0x00,0x00, -/* 89 */ 0x00,0x00,0xc3,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 90 */ 0x00,0x00,0x7f,0x30,0x30,0x18,0x18,0x0c,0x0c,0x06,0x06,0x7f,0x00,0x00,0x00,0x00, -/* 91 */ 0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c,0x00,0x00,0x00,0x00, -/* 92 */ 0x00,0x00,0x03,0x02,0x06,0x04,0x0c,0x08,0x18,0x10,0x30,0x20,0x60,0x00,0x00,0x00, -/* 93 */ 0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c,0x00,0x00,0x00,0x00, -/* 94 */ 0x00,0x08,0x1c,0x36,0x63,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 95 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00, -/* 96 */ 0x00,0x00,0x0c,0x0c,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 97 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 98 */ 0x00,0x00,0x03,0x03,0x03,0x3b,0x67,0x63,0x63,0x63,0x67,0x3b,0x00,0x00,0x00,0x00, -/* 99 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x03,0x03,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 100 */ 0x00,0x00,0x60,0x60,0x60,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 101 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 102 */ 0x00,0x00,0x3c,0x66,0x06,0x1f,0x06,0x06,0x06,0x06,0x06,0x06,0x00,0x00,0x00,0x00, -/* 103 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x60,0x63,0x3e,0x00, -/* 104 */ 0x00,0x00,0x03,0x03,0x03,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 105 */ 0x00,0x00,0x0c,0x0c,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 106 */ 0x00,0x00,0x30,0x30,0x00,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x33,0x1e,0x00, -/* 107 */ 0x00,0x00,0x03,0x03,0x03,0x63,0x33,0x1b,0x0f,0x1f,0x33,0x63,0x00,0x00,0x00,0x00, -/* 108 */ 0x00,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 109 */ 0x00,0x00,0x00,0x00,0x00,0x35,0x6b,0x6b,0x6b,0x6b,0x6b,0x6b,0x00,0x00,0x00,0x00, -/* 110 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 111 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 112 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x63,0x63,0x63,0x67,0x3b,0x03,0x03,0x03,0x00, -/* 113 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x60,0xe0,0x60,0x00, -/* 114 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, -/* 115 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x0e,0x38,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 116 */ 0x00,0x00,0x00,0x0c,0x0c,0x3e,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 117 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 118 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x08,0x00,0x00,0x00,0x00, -/* 119 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x6b,0x6b,0x6b,0x3e,0x36,0x36,0x00,0x00,0x00,0x00, -/* 120 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x36,0x1c,0x1c,0x1c,0x36,0x63,0x00,0x00,0x00,0x00, -/* 121 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00, -/* 122 */ 0x00,0x00,0x00,0x00,0x00,0x7f,0x60,0x30,0x18,0x0c,0x06,0x7f,0x00,0x00,0x00,0x00, -/* 123 */ 0x00,0x00,0x70,0x18,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00, -/* 124 */ 0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00, -/* 125 */ 0x00,0x00,0x0e,0x18,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00, -/* 126 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x6e,0x3b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 127 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 128 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 129 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 130 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 131 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 132 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 133 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 134 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 135 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 136 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 137 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 138 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 139 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 140 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 141 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 142 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 143 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 144 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 145 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 146 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 147 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 148 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 149 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 150 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 151 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 152 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 153 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 154 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 155 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 156 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 157 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 158 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 159 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 160 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 161 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00, -/* 162 */ 0x00,0x00,0x00,0x08,0x08,0x3e,0x6b,0x0b,0x0b,0x0b,0x6b,0x3e,0x08,0x08,0x00,0x00, -/* 163 */ 0x00,0x00,0x1c,0x36,0x06,0x06,0x1f,0x06,0x06,0x07,0x6f,0x3b,0x00,0x00,0x00,0x00, -/* 164 */ 0x00,0x00,0x00,0x00,0x66,0x3c,0x66,0x66,0x66,0x3c,0x66,0x00,0x00,0x00,0x00,0x00, -/* 165 */ 0x00,0x00,0xc3,0xc3,0x66,0x66,0x3c,0x7e,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00, -/* 166 */ 0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 167 */ 0x00,0x3c,0x66,0x0c,0x1e,0x33,0x63,0x66,0x3c,0x18,0x33,0x1e,0x00,0x00,0x00,0x00, -/* 168 */ 0x00,0x00,0x36,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 169 */ 0x00,0x00,0x3c,0x42,0x99,0xa5,0x85,0xa5,0x99,0x42,0x3c,0x00,0x00,0x00,0x00,0x00, -/* 170 */ 0x00,0x1e,0x30,0x3e,0x33,0x3b,0x36,0x00,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 171 */ 0x00,0x00,0x00,0x00,0x00,0x6c,0x36,0x1b,0x1b,0x36,0x6c,0x00,0x00,0x00,0x00,0x00, -/* 172 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x7f,0x60,0x60,0x60,0x00,0x00,0x00,0x00,0x00,0x00, -/* 173 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 174 */ 0x00,0x00,0x3c,0x42,0x9d,0xa5,0x9d,0xa5,0xa5,0x42,0x3c,0x00,0x00,0x00,0x00,0x00, -/* 175 */ 0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 176 */ 0x00,0x00,0x1c,0x36,0x36,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 177 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x7e,0x00,0x00,0x00,0x00,0x00, -/* 178 */ 0x00,0x1e,0x33,0x18,0x0c,0x06,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 179 */ 0x00,0x1e,0x33,0x18,0x30,0x33,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 180 */ 0x00,0x30,0x18,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 181 */ 0x00,0x00,0x00,0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x76,0x6e,0x06,0x06,0x03,0x00, -/* 182 */ 0x00,0x00,0x7e,0x2f,0x2f,0x2f,0x2e,0x28,0x28,0x28,0x28,0x28,0x00,0x00,0x00,0x00, -/* 183 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 184 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x30,0x1e,0x00, -/* 185 */ 0x00,0x0c,0x0e,0x0c,0x0c,0x0c,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 186 */ 0x00,0x1e,0x33,0x33,0x33,0x33,0x1e,0x00,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 187 */ 0x00,0x00,0x00,0x00,0x00,0x1b,0x36,0x6c,0x6c,0x36,0x1b,0x00,0x00,0x00,0x00,0x00, -/* 188 */ 0x00,0x10,0x1c,0x18,0x18,0x18,0x00,0x7f,0x00,0x18,0x1c,0x1a,0x3e,0x18,0x00,0x00, -/* 189 */ 0x00,0x10,0x1c,0x18,0x18,0x18,0x00,0x7f,0x00,0x1c,0x36,0x18,0x0c,0x3e,0x00,0x00, -/* 190 */ 0x00,0x1c,0x36,0x18,0x36,0x1c,0x00,0x7f,0x00,0x18,0x1c,0x1a,0x3e,0x18,0x00,0x00, -/* 191 */ 0x00,0x00,0x00,0x00,0x0c,0x0c,0x00,0x0c,0x0c,0x06,0x06,0x03,0x63,0x3e,0x00,0x00, -/* 192 */ 0x0c,0x18,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 193 */ 0x18,0x0c,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 194 */ 0x08,0x14,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 195 */ 0x6e,0x3b,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 196 */ 0x36,0x00,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 197 */ 0x1c,0x36,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 198 */ 0x00,0x00,0xfe,0x33,0x33,0x33,0xff,0x33,0x33,0x33,0x33,0xf3,0x00,0x00,0x00,0x00, -/* 199 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x03,0x03,0x03,0x66,0x3c,0x18,0x30,0x1e,0x00, -/* 200 */ 0x0c,0x18,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 201 */ 0x18,0x0c,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 202 */ 0x08,0x14,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 203 */ 0x36,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 204 */ 0x0c,0x18,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 205 */ 0x30,0x18,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 206 */ 0x18,0x24,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 207 */ 0x66,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 208 */ 0x00,0x00,0x1e,0x36,0x66,0x66,0x6f,0x66,0x66,0x66,0x36,0x1e,0x00,0x00,0x00,0x00, -/* 209 */ 0x6e,0x3b,0x63,0x63,0x67,0x6f,0x6f,0x7b,0x7b,0x73,0x63,0x63,0x00,0x00,0x00,0x00, -/* 210 */ 0x06,0x0c,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 211 */ 0x30,0x18,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 212 */ 0x08,0x14,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 213 */ 0x6e,0x3b,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 214 */ 0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 215 */ 0x00,0x00,0x00,0x00,0x00,0x66,0x3c,0x18,0x3c,0x66,0x00,0x00,0x00,0x00,0x00,0x00, -/* 216 */ 0x00,0x20,0x3e,0x73,0x73,0x6b,0x6b,0x6b,0x6b,0x67,0x67,0x3e,0x02,0x00,0x00,0x00, -/* 217 */ 0x0c,0x18,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 218 */ 0x18,0x0c,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 219 */ 0x08,0x14,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 220 */ 0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 221 */ 0x30,0x18,0xc3,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 222 */ 0x00,0x00,0x0f,0x06,0x3e,0x66,0x66,0x66,0x66,0x3e,0x06,0x0f,0x00,0x00,0x00,0x00, -/* 223 */ 0x00,0x00,0x1e,0x33,0x33,0x1b,0x33,0x63,0x63,0x63,0x63,0x3b,0x00,0x00,0x00,0x00, -/* 224 */ 0x00,0x0c,0x18,0x30,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 225 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 226 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 227 */ 0x00,0x00,0x6e,0x3b,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 228 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 229 */ 0x00,0x1c,0x36,0x1c,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 230 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0xdb,0xd8,0xfe,0x1b,0xdb,0x76,0x00,0x00,0x00,0x00, -/* 231 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x03,0x03,0x03,0x63,0x3e,0x18,0x30,0x1e,0x00, -/* 232 */ 0x00,0x0c,0x18,0x30,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 233 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 234 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 235 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 236 */ 0x00,0x06,0x0c,0x18,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 237 */ 0x00,0x18,0x0c,0x06,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 238 */ 0x00,0x08,0x1c,0x36,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 239 */ 0x00,0x00,0x36,0x36,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 240 */ 0x00,0x00,0x2c,0x18,0x34,0x60,0x7c,0x66,0x66,0x66,0x66,0x3c,0x00,0x00,0x00,0x00, -/* 241 */ 0x00,0x00,0x6e,0x3b,0x00,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 242 */ 0x00,0x06,0x0c,0x18,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 243 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 244 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 245 */ 0x00,0x00,0x6e,0x3b,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 246 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 247 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x7e,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00, -/* 248 */ 0x00,0x00,0x00,0x00,0x20,0x3e,0x73,0x6b,0x6b,0x6b,0x67,0x3e,0x02,0x00,0x00,0x00, -/* 249 */ 0x00,0x06,0x0c,0x18,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 250 */ 0x00,0x30,0x18,0x0c,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 251 */ 0x00,0x08,0x1c,0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 252 */ 0x00,0x00,0x36,0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 253 */ 0x00,0x30,0x18,0x0c,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00, -/* 254 */ 0x00,0x00,0x0f,0x06,0x06,0x3e,0x66,0x66,0x66,0x66,0x66,0x3e,0x06,0x06,0x0f,0x00, -/* 255 */ 0x00,0x00,0x36,0x36,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00 -}; - -#define ISO_CHAR_MIN 0x00 -#define ISO_CHAR_MAX 0xFF -#define ISO_CHAR_HEIGHT 16 diff --git a/osfmk/i386/AT386/kernBootStruct.h b/osfmk/i386/AT386/kernBootStruct.h deleted file mode 100644 index d9cc8bebe..000000000 --- a/osfmk/i386/AT386/kernBootStruct.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * kernBootStruct.h - * What the booter leaves behind for the kernel. - */ - -/* - * Maximum number of boot drivers that can be loaded. - */ -#define NDRIVERS 500 - -/* - * Types of boot driver that may be loaded by the booter. - */ -enum { - kBootDriverTypeInvalid = 0, - kBootDriverTypeKEXT = 1, - kBootDriverTypeMKEXT = 2 -}; - -typedef struct { - unsigned long address; // address where driver was loaded - unsigned long size; // number of bytes - unsigned long type; // driver type -} driver_config_t; - -/* - * APM BIOS information. - */ -typedef struct { - unsigned short major_vers; // == 0 if not present - unsigned short minor_vers; - unsigned long cs32_base; - unsigned long cs16_base; - unsigned long ds_base; - unsigned long cs_length; - unsigned long ds_length; - unsigned long entry_offset; - union { - struct { - unsigned long mode_16 :1; - unsigned long mode_32 :1; - unsigned long idle_slows_cpu :1; - unsigned long reserved :29; - } f; - unsigned long data; - } flags; - unsigned long connected; -} APM_config_t; - -/* - * PCI bus information. - */ -typedef struct _PCI_bus_info_t { - union { - struct { - unsigned char configMethod1 :1; - unsigned char configMethod2 :1; - unsigned char :2; - unsigned char specialCycle1 :1; - unsigned char specialCycle2 :1; - } s; - unsigned char d; - } u_bus; - unsigned char maxBusNum; - unsigned char majorVersion; - unsigned char minorVersion; - unsigned char BIOSPresent; -} PCI_bus_info_t; - -/* - * Video information. - */ -struct boot_video { - unsigned long v_baseAddr; // Base address of video memory - unsigned long v_display; // Display Code (if Applicable - unsigned long v_rowBytes; // Number of bytes per pixel row - unsigned long v_width; // Width - unsigned long v_height; // Height - unsigned long v_depth; // Pixel Depth -}; - -typedef struct boot_video boot_video; - -#define GRAPHICS_MODE 1 -#define TEXT_MODE 0 - -#define BOOT_STRING_LEN 160 -#define CONFIG_SIZE (12 * 4096) - -typedef struct { - short version; - char bootString[BOOT_STRING_LEN]; // boot arguments - int magicCookie; // KERNBOOTMAGIC - int numIDEs; // number of IDE drives - int rootdev; // root device - int convmem; // conventional memory - int extmem; // extended memory - char bootFile[128]; // kernel file name - int firstAddr0; // first address for kern convmem - int diskInfo[4]; // info for bios dev 80-83 - int graphicsMode; // booted in graphics mode? - int kernDev; // device kernel was fetched from - int numBootDrivers; // number of drivers loaded - char * configEnd; // pointer to end of config files - int kaddr; // kernel load address - int ksize; // size of kernel - driver_config_t driverConfig[NDRIVERS]; - char _reserved[2052]; - boot_video video; - PCI_bus_info_t pciInfo; - APM_config_t apmConfig; - char config[CONFIG_SIZE]; -} KERNBOOTSTRUCT; - -#define KERNSTRUCT_ADDR ((KERNBOOTSTRUCT *) 0x11000) -#define KERNBOOTMAGIC 0xa7a7a7a7 - -#ifndef KERNEL -extern KERNBOOTSTRUCT * kernBootStruct; -#endif diff --git a/osfmk/i386/AT386/misc_protos.h b/osfmk/i386/AT386/misc_protos.h index ab1001441..cfe486c33 100644 --- a/osfmk/i386/AT386/misc_protos.h +++ b/osfmk/i386/AT386/misc_protos.h @@ -29,14 +29,14 @@ #ifndef _AT386_MISC_PROTOS_H_ #define _AT386_MISC_PROTOS_H_ -#include /* for dev_t */ -#include /* for vm_offset_t */ +#include /* for KernelBootArgs_t */ /* * i386/AT386/model_dep.c */ extern void i386_init(void); +extern void i386_vm_init(unsigned int maxmem, KernelBootArgs_t *args); extern void machine_init(void); extern void machine_startup(void); diff --git a/osfmk/i386/AT386/model_dep.c b/osfmk/i386/AT386/model_dep.c index a1afdfa9f..c4c220264 100644 --- a/osfmk/i386/AT386/model_dep.c +++ b/osfmk/i386/AT386/model_dep.c @@ -65,7 +65,6 @@ #include #include -#include #include #include #include @@ -77,7 +76,6 @@ #include #include #include -#include #include #include #include @@ -85,272 +83,122 @@ #include #include #include -#include #include -#include -#include -#include #include -#include #include #include #include -#include #include -#include +#include +#include #if MACH_KDB #include #endif /* MACH_KDB */ -#include -#ifdef __MACHO__ -#include -#include -#include -#endif #if NCPUS > 1 #include #endif /* NCPUS */ -#if MP_V1_1 -#include -#endif /* MP_V1_1 */ +#if NCPUS > 1 +#include +#endif /* NCPUS > 1 */ #include -vm_size_t mem_size = 0; -uint64_t max_mem; -vm_offset_t first_addr = 0; /* set by start.s - keep out of bss */ -vm_offset_t first_avail = 0;/* first after page tables */ -vm_offset_t last_addr; - -vm_offset_t avail_start, avail_end; -vm_offset_t virtual_avail, virtual_end; -vm_offset_t hole_start, hole_end; -vm_offset_t avail_next; -unsigned int avail_remaining; - -/* parameters passed from bootstrap loader */ -int cnvmem = 0; /* must be in .data section */ -int extmem = 0; - -/* FIXME!! REMOVE WHEN OSFMK DEVICES ARE COMPLETELY PULLED OUT */ -int dev_name_count = 0; -int dev_name_list = 0; - -#ifndef __MACHO__ -extern char edata, end; -#endif - -extern char version[]; - -void parse_arguments(void); -const char *getenv(const char *); - -#define BOOT_LINE_LENGTH 160 -char boot_string_store[BOOT_LINE_LENGTH] = {0}; -char *boot_string = (char *)0; -int boot_string_sz = BOOT_LINE_LENGTH; -int boottype = 0; - -#if __MACHO__ -#include -vm_offset_t edata, etext, end; - -extern struct mach_header _mh_execute_header; -void *sectTEXTB; int sectSizeTEXT; -void *sectDATAB; int sectSizeDATA; -void *sectOBJCB; int sectSizeOBJC; -void *sectLINKB; int sectSizeLINK; - -/* Kernel boot information */ -KERNBOOTSTRUCT kernBootStructData; -KERNBOOTSTRUCT *kernBootStruct; -#endif - -vm_offset_t kern_args_start = 0; /* kernel arguments */ -vm_size_t kern_args_size = 0; /* size of kernel arguments */ - -#ifdef __MACHO__ - -unsigned long -i386_preinit() -{ - int i; - struct segment_command *sgp; - struct section *sp; - - sgp = (struct segment_command *) getsegbyname("__DATA"); - if (sgp) { - sp = (struct section *) firstsect(sgp); - if (sp) { - do { - if (sp->flags & S_ZEROFILL) - bzero((char *) sp->addr, sp->size); - } while (sp = (struct section *)nextsect(sgp, sp)); - } - } - - bcopy((char *) KERNSTRUCT_ADDR, (char *) &kernBootStructData, - sizeof(kernBootStructData)); - - kernBootStruct = &kernBootStructData; - - end = round_page( kernBootStruct->kaddr + kernBootStruct->ksize ); - - return end; -} -#endif +static void machine_conf(void); +#include -/* - * Cpu initialization. Running virtual, but without MACH VM - * set up. First C routine called. - */ void -machine_startup(void) +machine_startup() { + int boot_arg; -#ifdef __MACHO__ - /* Now copy over various bits.. */ - cnvmem = kernBootStruct->convmem; - extmem = kernBootStruct->extmem; - kern_args_start = (vm_offset_t) kernBootStruct->bootString; - kern_args_size = strlen(kernBootStruct->bootString); - boottype = kernBootStruct->rootdev; - - /* Now retrieve addresses for end, edata, and etext - * from MACH-O headers. - */ - - sectTEXTB = (void *) getsegdatafromheader( - &_mh_execute_header, "__TEXT", §SizeTEXT); - sectDATAB = (void *) getsegdatafromheader( - &_mh_execute_header, "__DATA", §SizeDATA); - sectOBJCB = (void *) getsegdatafromheader( - &_mh_execute_header, "__OBJC", §SizeOBJC); - sectLINKB = (void *) getsegdatafromheader( - &_mh_execute_header, "__LINKEDIT", §SizeLINK); - - etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; - edata = (vm_offset_t) sectDATAB + sectSizeDATA; +#if 0 + if( PE_get_hotkey( kPEControlKey )) + halt_in_debugger = halt_in_debugger ? 0 : 1; #endif - printf_init(); /* Init this in case we need debugger */ - panic_init(); /* Init this in case we need debugger */ - - PE_init_platform(FALSE, kernBootStruct); - PE_init_kprintf(FALSE); - PE_init_printf(FALSE); - - /* - * Parse startup arguments - */ - parse_arguments(); - - /* - * Set up initial thread so current_thread() works early on - */ - pageout_thread.top_act = &pageout_act; - pageout_act.thread = &pageout_thread; - thread_machine_set_current(&pageout_thread); - - /* - * Do basic VM initialization - */ - i386_init(); + if (PE_parse_boot_arg("debug", &boot_arg)) { + if (boot_arg & DB_HALT) halt_in_debugger=1; + if (boot_arg & DB_PRT) disableDebugOuput=FALSE; + if (boot_arg & DB_SLOG) systemLogDiags=TRUE; + if (boot_arg & DB_NMI) panicDebugging=TRUE; + if (boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE; + } - PE_init_platform(TRUE, kernBootStruct); - PE_init_kprintf(TRUE); - PE_init_printf(TRUE); +#if NOTYET + hw_lock_init(&debugger_lock); /* initialize debugger lock */ + hw_lock_init(&pbtlock); /* initialize print backtrace lock */ +#endif #if MACH_KDB /* - * Initialize the kernel debugger. + * Initialize KDB */ +#if DB_MACHINE_COMMANDS + db_machine_commands_install(ppc_db_commands); +#endif /* DB_MACHINE_COMMANDS */ ddb_init(); + if (boot_arg & DB_KDB) + current_debugger = KDB_CUR_DB; + /* * Cause a breakpoint trap to the debugger before proceeding * any further if the proper option bit was specified in * the boot flags. */ + if (halt_in_debugger && (current_debugger == KDB_CUR_DB)) { + Debugger("inline call to debugger(machine_startup)"); + halt_in_debugger = 0; + active_debugger =1; + } +#endif /* MACH_KDB */ + + if (PE_parse_boot_arg("preempt", &boot_arg)) { + extern int default_preemption_rate; - if (halt_in_debugger) { - printf("inline call to debugger(machine_startup)\n"); - Debugger("inline call"); + default_preemption_rate = boot_arg; } -#endif /* MACH_KDB */ + if (PE_parse_boot_arg("unsafe", &boot_arg)) { + extern int max_unsafe_quanta; - TR_INIT(); + max_unsafe_quanta = boot_arg; + } + if (PE_parse_boot_arg("poll", &boot_arg)) { + extern int max_poll_quanta; - printf(version); + max_poll_quanta = boot_arg; + } + if (PE_parse_boot_arg("yield", &boot_arg)) { + extern int sched_poll_yield_shift; - machine_slot[0].is_cpu = TRUE; - machine_slot[0].running = TRUE; -#ifdef MACH_BSD - /* FIXME */ - machine_slot[0].cpu_type = CPU_TYPE_I386; - machine_slot[0].cpu_subtype = CPU_SUBTYPE_PENTPRO; -#else - machine_slot[0].cpu_type = cpuid_cputype(0); - machine_slot[0].cpu_subtype = CPU_SUBTYPE_AT386; + sched_poll_yield_shift = boot_arg; + } + + machine_conf(); + +#if NOTYET + ml_thrm_init(); /* Start thermal monitoring on this processor */ #endif /* * Start the system. */ -#if NCPUS > 1 - mp_desc_init(0); -#endif /* NCPUS */ - setup_main(); -} - - -vm_offset_t env_start = 0; /* environment */ -vm_size_t env_size = 0; /* size of environment */ -/* - * Parse command line arguments. - */ -void -parse_arguments(void) -{ - unsigned int boot_arg; - - if (PE_parse_boot_arg("maxmem", &boot_arg)) - { - mem_size = boot_arg * (1024 * 1024); - } - - if (PE_parse_boot_arg("debug", &boot_arg)) - { - if (boot_arg & DB_HALT) halt_in_debugger = 1; - if (boot_arg & DB_PRT) disableDebugOuput = FALSE; - } + /* Should never return */ } -const char * -getenv(const char *name) + +static void +machine_conf(void) { - int len = strlen(name); - const char *p = (const char *)env_start; - const char *endp = p + env_size; - - while (p < endp) { - if (len >= endp - p) - break; - if (strncmp(name, p, len) == 0 && *(p + len) == '=') - return p + len + 1; - while (*p++) - ; - } - return NULL; + machine_info.max_cpus = NCPUS; + machine_info.avail_cpus = 1; + machine_info.memory_size = mem_size; } -extern void -calibrate_delay(void); - /* * Find devices. The system is alive. */ @@ -361,33 +209,22 @@ machine_init(void) const char *p; int n; - /* - * Adjust delay count before entering drivers - */ - - calibrate_delay(); - /* * Display CPU identification */ cpuid_cpu_display("CPU identification", 0); - cpuid_cache_display("CPU configuration", 0); + cpuid_feature_display("CPU features", 0); -#if MP_V1_1 - mp_v1_1_init(); -#endif /* MP_V1_1 */ + +#if NCPUS > 1 + smp_init(); +#endif /* * Set up to use floating point. */ init_fpu(); -#if 0 -#if NPCI > 0 - dma_zones_init(); -#endif /* NPCI > 0 */ -#endif - /* * Configure clock devices. */ @@ -411,183 +248,20 @@ int reset_mem_on_reboot = 1; void halt_all_cpus(boolean_t reboot) { - if (reboot) - { - /* - * Tell the BIOS not to clear and test memory. - */ - if (!reset_mem_on_reboot) - *(unsigned short *)phystokv(0x472) = 0x1234; - - printf("MACH Reboot\n"); - PEHaltRestart( kPERestartCPU ); - } - else - { - printf("CPU halted\n"); - PEHaltRestart( kPEHaltCPU ); - } - while(1); -} - -/* - * Basic VM initialization. - */ - -void -i386_init(void) -{ - int i,j; /* Standard index vars. */ - vm_size_t bios_hole_size; - -#ifndef __MACHO__ - /* - * Zero the BSS. - */ - - bzero((char *)&edata,(unsigned)(&end - &edata)); -#endif - - boot_string = &boot_string_store[0]; - - /* - * Initialize the pic prior to any possible call to an spl. - */ - - set_cpu_model(); - vm_set_page_size(); - - /* - * Initialize the Event Trace Analysis Package - * Static Phase: 1 of 2 - */ - etap_init_phase1(); - - /* - * Compute the memory size. - */ - -#if 1 - /* FIXME - * fdisk needs to change to use a sysctl instead of - * opening /dev/kmem and reading out the kernboot structure - */ - - first_addr = (char *)(KERNSTRUCT_ADDR) + sizeof(KERNBOOTSTRUCT); -#else -#if NCPUS > 1 - first_addr = 0x1000; -#else - /* First two pages are used to boot the other cpus. */ - /* TODO - reclaim pages after all cpus have booted */ - - first_addr = 0x3000; -#endif -#endif - - /* BIOS leaves data in low memory */ - last_addr = 1024*1024 + extmem*1024; - - /* extended memory starts at 1MB */ - - bios_hole_size = 1024*1024 - trunc_page((vm_offset_t)(1024 * cnvmem)); - - /* - * Initialize for pmap_free_pages and pmap_next_page. - * These guys should be page-aligned. - */ - - hole_start = trunc_page((vm_offset_t)(1024 * cnvmem)); - hole_end = round_page((vm_offset_t)first_avail); - - /* - * compute mem_size - */ - - if (mem_size != 0) { - if (mem_size < (last_addr) - bios_hole_size) - last_addr = mem_size + bios_hole_size; - } - - first_addr = round_page(first_addr); - last_addr = trunc_page(last_addr); - mem_size = last_addr - bios_hole_size; - max_mem = mem_size; - - avail_start = first_addr; - avail_end = last_addr; - avail_next = avail_start; - - /* - * Initialize kernel physical map, mapping the - * region from loadpt to avail_start. - * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS. - */ - - -#if NCPUS > 1 && AT386 - /* - * Must Allocate interrupt stacks before kdb is called and also - * before vm is initialized. Must find out number of cpus first. - */ - /* - * Get number of cpus to boot, passed as an optional argument - * boot: mach [-sah#] # from 0 to 9 is the number of cpus to boot - */ - if (wncpu == -1) { + if (reboot) { /* - * "-1" check above is to allow for old boot loader to pass - * wncpu through boothowto. New boot loader uses environment. + * Tell the BIOS not to clear and test memory. */ - const char *cpus; - if ((cpus = getenv("cpus")) != NULL) { - /* only a single digit for now */ - if ((*cpus > '0') && (*cpus <= '9')) - wncpu = *cpus - '0'; - } else - wncpu = NCPUS; + if (!reset_mem_on_reboot) + *(unsigned short *)phystokv(0x472) = 0x1234; + + printf("MACH Reboot\n"); + PEHaltRestart( kPERestartCPU ); + } else { + printf("CPU halted\n"); + PEHaltRestart( kPEHaltCPU ); } - mp_probe_cpus(); - interrupt_stack_alloc(); - -#endif /* NCPUS > 1 && AT386 */ - - pmap_bootstrap(0); - - avail_remaining = atop((avail_end - avail_start) - - (hole_end - hole_start)); -} - -unsigned int -pmap_free_pages(void) -{ - return avail_remaining; -} - -boolean_t -pmap_next_page( - vm_offset_t *addrp) -{ - if (avail_next == avail_end) - return FALSE; - - /* skip the hole */ - - if (avail_next == hole_start) - avail_next = hole_end; - - *addrp = avail_next; - avail_next += PAGE_SIZE; - avail_remaining--; - - return TRUE; -} - -boolean_t -pmap_valid_page( - vm_offset_t x) -{ - return ((avail_start <= x) && (x < avail_end)); + while(1); } /*XXX*/ @@ -606,6 +280,8 @@ Debugger( { printf("Debugger called: <%s>\n", message); + draw_panic_dialog(); + __asm__("int3"); } @@ -615,7 +291,7 @@ display_syscall(int syscall) printf("System call happened %d\n", syscall); } -#if XPR_DEBUG && (NCPUS == 1 || MP_V1_1) +#if XPR_DEBUG && (NCPUS == 1) extern kern_return_t sysclk_gettime_interrupts_disabled( mach_timespec_t *cur_time); @@ -627,7 +303,7 @@ int xpr_time(void) sysclk_gettime_interrupts_disabled(&time); return(time.tv_sec*1000000 + time.tv_nsec/1000); } -#endif /* XPR_DEBUG && (NCPUS == 1 || MP_V1_1) */ +#endif /* XPR_DEBUG && (NCPUS == 1) */ enable_bluebox() { diff --git a/osfmk/i386/AT386/mp/mp.c b/osfmk/i386/AT386/mp/mp.c deleted file mode 100644 index 4ea476827..000000000 --- a/osfmk/i386/AT386/mp/mp.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - */ -/* - * Mach Operating System - * Copyright (c) 1991,1990 Carnegie Mellon University - * All Rights Reserved. - * - * Permission to use, copy, modify and distribute this software and its - * documentation is hereby granted, provided that both the copyright - * notice and this permission notice appear in all copies of the - * software, derivative works or modified versions, and any portions - * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" - * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR - * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * - * Carnegie Mellon requests users of this software to return to - * - * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU - * School of Computer Science - * Carnegie Mellon University - * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon - * the rights to redistribute these changes. - */ - -/* - */ - -#include -#include - -#if NCPUS > 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -int cpu_int_word[NCPUS]; - -extern void cpu_interrupt(int cpu); -extern int get_ncpus(void); - -/* - * Generate a clock interrupt on next running cpu - * - * Instead of having the master processor interrupt - * all active processors, each processor in turn interrupts - * the next active one. This avoids all slave processors - * accessing the same R/W data simultaneously. - */ - -void -slave_clock(void) -{ -} - -void -i386_signal_cpus(int event) -{ -} - -/*ARGSUSED*/ -void -init_ast_check( - processor_t processor) -{ -} - -void -cause_ast_check( - processor_t processor) -{ -} - -/*ARGSUSED*/ -kern_return_t -cpu_start( - int slot_num) -{ - printf("cpu_start not implemented\n"); - return (KERN_FAILURE); -} - - -int real_ncpus; -int wncpu = -1; - -/* - * Find out how many cpus will run - */ - -void -mp_probe_cpus(void) -{ - int i; - - /* - * get real number of cpus - */ - - real_ncpus = get_ncpus(); - - if (wncpu <= 0) - wncpu = NCPUS; - - /* - * Ignore real number of cpus it if number of requested cpus - * is smaller. - * Keep it if number of requested cpu is null or larger. - */ - - if (real_ncpus < wncpu) - wncpu = real_ncpus; -#if MP_V1_1 - { - extern void validate_cpus(int); - - /* - * We do NOT have CPUS numbered contiguously. - */ - - validate_cpus(wncpu); - } -#else - for (i=0; i < wncpu; i++) - machine_slot[i].is_cpu = TRUE; -#endif -} - -/* - * invoke kdb on slave processors - */ - -void -remote_kdb(void) -{ -} - -/* - * Clear kdb interrupt - */ - -void -clear_kdb_intr(void) -{ -} -#else /* NCPUS > 1 */ -int cpu_int_word[NCPUS]; -#endif /* NCPUS > 1 */ diff --git a/osfmk/i386/AT386/mp/mp_v1_1.c b/osfmk/i386/AT386/mp/mp_v1_1.c deleted file mode 100644 index bc5a1c4d9..000000000 --- a/osfmk/i386/AT386/mp/mp_v1_1.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MP_DEBUG 1 - -#if MP_DEBUG -vm_offset_t bios_start; -#endif /* MP_DEBUG */ - -unsigned int lapic_id_initdata = 0; -int lapic_id = (int)&lapic_id_initdata; -vm_offset_t lapic_start; - -void lapic_init(void); -int get_ncpus(void); -void validate_cpus(int ncpus); -void cpu_interrupt(int cpu); -void slave_boot(int cpu); - -boolean_t mp_v1_1_initialized = FALSE; - -void -mp_v1_1_init(void) -{ - /*WILL BE REMOVED IN FUTURE REVISION!!! !*/ - /* SIMPLY COMMENTED OUT FOR THE MOMENT */ - return; -} - -void -lapic_init(void) -{ -} - -void -cpu_interrupt( - int cpu) -{ -} - -#if NCPUS > 1 -void -slave_boot( - int cpu) -{ -} - -void -start_other_cpus(void) -{ -} - -void -validate_cpus(int ncpus) -{ - int i; - for(i=0;i 1 */ - -#if MACH_KDB -#include - -#define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */ - - -#if TRAP_DEBUG -#define MTRAPS 100 -struct mp_trap_hist_struct { - unsigned char type; - unsigned char data[5]; -} trap_hist[MTRAPS], *cur_trap_hist = trap_hist, - *max_trap_hist = &trap_hist[MTRAPS]; - -void db_trap_hist(void); - -/* - * SPL: - * 1: new spl - * 2: old spl - * 3: new tpr - * 4: old tpr - * INT: - * 1: int vec - * 2: old spl - * 3: new spl - * 4: post eoi tpr - * 5: exit tpr - */ - -void -db_trap_hist(void) -{ - int i,j; - for(i=0;i=cur_trap_hist)?"*":" ", - (trap_hist[i].type == 1)?"SPL":"INT"); - for(j=0;j<5;j++) - db_printf(" %02x", trap_hist[i].data[j]); - db_printf("\n"); - } - -} -#endif /* TRAP_DEBUG */ - -void db_lapic(int cpu); -unsigned int db_remote_read(int cpu, int reg); -void db_ioapic(unsigned int); -void kdb_console(void); - -void -kdb_console(void) -{ -} - -#define BOOLP(a) ((a)?' ':'!') - -static char *DM[8] = { - "Fixed", - "Lowest Priority", - "Invalid", - "Invalid", - "NMI", - "Reset", - "Invalid", - "ExtINT"}; - -unsigned int -db_remote_read(int cpu, int reg) -{ - return -1; -} - -void -db_lapic(int cpu) -{ -} - -void -db_ioapic(unsigned int ind) -{ -} - -#endif /* MACH_KDB */ diff --git a/osfmk/i386/AT386/mp/mp_v1_1.h b/osfmk/i386/AT386/mp/mp_v1_1.h deleted file mode 100644 index 86e9d1c00..000000000 --- a/osfmk/i386/AT386/mp/mp_v1_1.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - */ - -#ifndef _MP_MP_V1_1_H_ -#define _MP_MP_V1_1_H_ - -#include -#include -#include - -struct MP_Config_EntryP { - unsigned char Entry_Type; - unsigned char Local_Apic_Id; - unsigned char Local_Apic_Version; - unsigned char CPU_Flags; - unsigned int CPU_Signature; - unsigned int Feature_Flags; - unsigned int Reserved[2]; -}; - -/* Entry types */ - -#define MP_CPU_ENTRY 0 /* Processor entry */ -#define MP_BUS_ENTRY 1 /* bus entry */ -#define MP_IO_APIC_ENTRY 2 /* I/O APIC entry */ -#define MP_IO_INT_ENTRY 3 /* I/O Interrupt assignment */ -#define MP_LOC_INT_ENTRY 4 /* Local Interrupt assignment */ - -struct MP_Config_EntryB { - unsigned char Entry_Type; - unsigned char Bus_Id; - char Ident[6]; -}; - -struct MP_Config_EntryA { - unsigned char Entry_Type; - unsigned char IO_Apic_Id; - unsigned char IO_Apic_Version; - unsigned char IO_Apic_Flags; - vm_offset_t IO_Apic_Address; -}; - -struct MP_Config_EntryI { - unsigned char Entry_Type; - unsigned char Int_Type; - unsigned short Int_Flag; - unsigned char Source_Bus; - unsigned char Source_IRQ; - unsigned char Dest_IO_Apic; - unsigned char Dest_INTIN; -}; -struct MP_Config_EntryL { - unsigned char Entry_Type; - unsigned char Int_Type; - unsigned short Int_Flag; - unsigned char Source_Bus; - unsigned char Source_IRQ; - unsigned char Dest_Local_Apic; - unsigned char Dest_INTIN; -}; - -struct MP_FPS_struct { - unsigned int Signature; - vm_offset_t Config_Ptr; - unsigned char Length; - unsigned char Spec_Rev; - unsigned char CheckSum; - unsigned char Feature[5]; -}; - -struct MP_Config_Table { - unsigned int Signature; - unsigned short Length; - unsigned char Spec_Rev; - unsigned char CheckSum; - char OEM[8]; - char PROD[12]; - vm_offset_t OEM_Ptr; - unsigned short OEM_Size; - unsigned short Entries; - vm_offset_t Local_Apic; - unsigned int Reserved; -}; - -#define IMCR_ADDRESS 0x22 -#define IMCR_DATA 0x23 -#define IMCR_SELECT 0x70 -#define IMCR_APIC_ENABLE 0x01 - -#if 0 -extern boolean_t mp_v1_1_take_irq(int pic, - int unit, - int spl, - i386_intr_t intr); - -extern boolean_t mp_v1_1_reset_irq(int pic, - int *unit, - int *spl, - i386_intr_t *intr); - -#endif - -void mp_v1_1_init(void); -boolean_t mp_v1_1_io_lock(int, struct processor **); -void mp_v1_1_io_unlock(struct processor *); - -/* Intel default Configurations */ - -#define MP_PROPRIETARY_CONF 0 -#define MP_ISA_CONF 1 -#define MP_EISA_1_CONF 2 -#define MP_EISA_2_CONF 3 -#define MP_MCA_CONF 4 -#define MP_ISA_PCI_CONF 5 -#define MP_EISA_PCI_CONF 6 -#define MP_MCA_PCI_CONF 7 - -#if NCPUS > 1 -#define at386_io_lock_state() panic("at386_io_lock_state called") -#define at386_io_lock(x) panic("at386_io_lock called"); -#define at386_io_unlock() panic("at386_io_unlock") -#endif /* NCPUS > 1 */ - -#endif /* _MP_MP_V1_1_H_ */ diff --git a/osfmk/i386/AT386/video_console.c b/osfmk/i386/AT386/video_console.c deleted file mode 100644 index 6dc5f0438..000000000 --- a/osfmk/i386/AT386/video_console.c +++ /dev/null @@ -1,1996 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_FREE_COPYRIGHT@ - * - */ -/* - * @APPLE_FREE_COPYRIGHT@ - */ -/* MACH PPC - video_console.c - * - * Original based on NetBSD's mac68k/dev/ite.c driver - * - * This driver differs in - * - MACH driver"ized" - * - Uses phys_copy and flush_cache to in several places - * for performance optimizations - * - 7x15 font - * - Black background and white (character) foreground - * - Assumes 6100/7100/8100 class of machine - * - * The original header follows... - * - * - * NetBSD: ite.c,v 1.16 1995/07/17 01:24:34 briggs Exp - * - * Copyright (c) 1988 University of Utah. - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * the Systems Programming Group of the University of Utah Computer - * Science Department. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: Utah $Hdr: ite.c 1.28 92/12/20$ - * - * @(#)ite.c 8.2 (Berkeley) 1/12/94 - */ - -/* - * ite.c - * - * The ite module handles the system console; that is, stuff printed - * by the kernel and by user programs while "desktop" and X aren't - * running. Some (very small) parts are based on hp300's 4.4 ite.c, - * hence the above copyright. - * - * -- Brad and Lawrence, June 26th, 1994 - * - */ -#include -#include /* spl definitions */ -#include "iso_scan_font.h" -#include -#include -#include -#include -#include "video_console.h" - -#define CHARWIDTH 8 -#define CHARHEIGHT 16 - -#define ATTR_NONE 0 -#define ATTR_BOLD 1 -#define ATTR_UNDER 2 -#define ATTR_REVERSE 4 - -enum vt100state_e { - ESnormal, /* Nothing yet */ - ESesc, /* Got ESC */ - ESsquare, /* Got ESC [ */ - ESgetpars, /* About to get or getting the parameters */ - ESgotpars, /* Finished getting the parameters */ - ESfunckey, /* Function key */ - EShash, /* DEC-specific stuff (screen align, etc.) */ - ESsetG0, /* Specify the G0 character set */ - ESsetG1, /* Specify the G1 character set */ - ESask, - EScharsize, - ESignore /* Ignore this sequence */ -} vt100state = ESnormal; - -static struct vc_info vinfo; -#define IS_TEXT_MODE (vinfo.v_type == TEXT_MODE) - -/* Calculated in vccninit(): */ -static int vc_wrap_mode = 1, vc_relative_origin = 0; -static int vc_charset_select = 0, vc_save_charset_s = 0; -static int vc_charset[2] = { 0, 0 }; -static int vc_charset_save[2] = { 0, 0 }; - -/* VT100 state: */ -#define MAXPARS 16 -static int x = 0, y = 0, savex, savey; -static int par[MAXPARS], numpars, hanging_cursor, attr, saveattr; - -/* VT100 tab stops & scroll region */ -static char tab_stops[255]; -static int scrreg_top, scrreg_bottom; - -/* Misc */ -void vc_flush_forward_buffer(void); -void vc_store_char(unsigned char); - -/* - * For the color support (Michel Pollet) - */ -unsigned char vc_color_index_table[33] = - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }; - -unsigned long vc_color_depth_masks[4] = - { 0x000000FF, 0x00007FFF, 0x00FFFFFF }; - -unsigned long vc_colors[8][3] = { - { 0xFFFFFFFF, 0x00000000, 0x00000000 }, /* black */ - { 0x23232323, 0x7C007C00, 0x00FF0000 }, /* red */ - { 0xb9b9b9b9, 0x03e003e0, 0x0000FF00 }, /* green */ - { 0x05050505, 0x7FE07FE0, 0x00FFFF00 }, /* yellow */ - { 0xd2d2d2d2, 0x001f001f, 0x000000FF}, /* blue */ -// { 0x80808080, 0x31933193, 0x00666699 }, /* blue */ - { 0x18181818, 0x7C1F7C1F, 0x00FF00FF }, /* magenta */ - { 0xb4b4b4b4, 0x03FF03FF, 0x0000FFFF }, /* cyan */ - { 0x00000000, 0x7FFF7FFF, 0x00FFFFFF } /* white */ -}; - -unsigned long vc_color_mask = 0; -unsigned long vc_color_fore = 0; -unsigned long vc_color_back = 0; -int vc_normal_background = 1; - -/* - * For the jump scroll and buffering (Michel Pollet) - * 80*22 means on a 80*24 screen, the screen will - * scroll jump almost a full screen - * keeping only what's necessary for you to be able to read ;-) - */ -#define VC_MAX_FORWARD_SIZE (80*22) - -/* - * Delay between console updates in clock hz units, the larger the - * delay the fuller the jump-scroll buffer will be and so the faster the - * (scrolling) output. The smaller the delay, the less jerky the - * display. Heuristics show that at 10 touch-typists (Mike!) complain - */ -#define VC_CONSOLE_UPDATE_TIMEOUT 5 - -static unsigned char vc_forward_buffer[VC_MAX_FORWARD_SIZE]; -static long vc_forward_buffer_size = 0; -decl_simple_lock_data(,vc_forward_lock) - -/* Set to 1 by initialize_screen() */ -static int vc_initialized = 0; - -/* Function pointers initialized via initialize_screen() */ -static struct { - void (*initialize)(struct vc_info * vinfo_p); - void (*paintchar)(unsigned char c, int x, int y, int attrs); - void (*scrolldown)(int num); - void (*scrollup)(int num); - void (*clear_screen)(int xx, int yy, int which); - void (*show_cursor)(int x, int y); - void (*hide_cursor)(int x, int y); - void (*update_color)(int color, int fore); -} vc_ops; - -/* - * New Rendering code from Michel Pollet - */ - -#define REN_MAX_DEPTH 32 -/* that's the size for a 32 bits buffer... */ -#define REN_MAX_SIZE (128L*1024) -unsigned char renderedFont[REN_MAX_SIZE]; - -/* Rendered Font Size */ -unsigned long vc_rendered_font_size = REN_MAX_SIZE; -long vc_rendered_error = 0; - -/* If the one bit table was reversed */ -short vc_one_bit_reversed = 0; - -/* Size of a character in the table (bytes) */ -int vc_rendered_char_size = 0; - -/* -# Attribute codes: -# 00=none 01=bold 04=underscore 05=blink 07=reverse 08=concealed -# Text color codes: -# 30=black 31=red 32=green 33=yellow 34=blue 35=magenta 36=cyan 37=white -# Background color codes: -# 40=black 41=red 42=green 43=yellow 44=blue 45=magenta 46=cyan 47=white -*/ - -#define VC_RESET_BACKGROUND 40 -#define VC_RESET_FOREGROUND 37 - -static void vc_color_set(int color) -{ - if (vinfo.v_depth < 8) - return; - if (color >= 30 && color <= 37) { - vc_color_fore = vc_colors[color-30][vc_color_index_table[vinfo.v_depth]]; - if ( vc_ops.update_color ) vc_ops.update_color(color - 30, 1); - } - if (color >= 40 && color <= 47) { - vc_color_back = vc_colors[color-40][vc_color_index_table[vinfo.v_depth]]; - if ( vc_ops.update_color ) vc_ops.update_color(color - 40, 0); - vc_normal_background = color == 40; - } -} - -static void vc_render_font(short olddepth, short newdepth) -{ - int charIndex; /* index in ISO font */ - union { - unsigned char *charptr; - unsigned short *shortptr; - unsigned long *longptr; - } current; /* current place in rendered font, multiple types. */ - - unsigned char *theChar; /* current char in iso_font */ - - if (olddepth == newdepth) - return; /* nothing to do */ - - vc_rendered_font_size = REN_MAX_SIZE; - if (newdepth == 1) { - vc_rendered_char_size = 16; - if (!vc_one_bit_reversed) { /* reverse the font for the blitter */ - int i; - for (i = 0; i < ((ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size); i++) { - if (iso_font[i]) { - unsigned char mask1 = 0x80; - unsigned char mask2 = 0x01; - unsigned char val = 0; - while (mask1) { - if (iso_font[i] & mask1) - val |= mask2; - mask1 >>= 1; - mask2 <<= 1; - } - renderedFont[i] = ~val; - } else renderedFont[i] = 0xff; - } - vc_one_bit_reversed = 1; - } - return; - } - { - long csize = newdepth / 8; /* bytes per pixel */ - vc_rendered_char_size = csize ? CHARHEIGHT * (csize * CHARWIDTH) : - /* for 2 & 4 */ CHARHEIGHT * (CHARWIDTH/(6-newdepth)); - csize = (ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size; - if (csize > vc_rendered_font_size) { - vc_rendered_error = csize; - return; - } else - vc_rendered_font_size = csize; - } - - current.charptr = renderedFont; - theChar = iso_font; - for (charIndex = ISO_CHAR_MIN; charIndex <= ISO_CHAR_MAX; charIndex++) { - int line; - for (line = 0; line < CHARHEIGHT; line++) { - unsigned char mask = 1; - do { - switch (newdepth) { - case 2: { - unsigned char value = 0; - if (*theChar & mask) value |= 0xC0; mask <<= 1; - if (*theChar & mask) value |= 0x30; mask <<= 1; - if (*theChar & mask) value |= 0x0C; mask <<= 1; - if (*theChar & mask) value |= 0x03; - value = ~value; - *current.charptr++ = value; - } - break; - case 4: - { - unsigned char value = 0; - if (*theChar & mask) value |= 0xF0; mask <<= 1; - if (*theChar & mask) value |= 0x0F; - value = ~value; - *current.charptr++ = value; - } - break; - case 8: - *current.charptr++ = (*theChar & mask) ? 0xff : 0; - break; - case 16: - *current.shortptr++ = (*theChar & mask) ? 0xFFFF : 0; - break; - - case 32: - *current.longptr++ = (*theChar & mask) ? 0xFFFFFFFF : 0; - break; - } - mask <<= 1; - } while (mask); /* while the single bit drops to the right */ - theChar++; - } - } -} - -static void vc_paint_char1(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned char *theChar; - unsigned char *where; - int i; - - theChar = (unsigned char*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned char*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ - *where = *theChar++; - - where = (unsigned char*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ - unsigned char val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - unsigned char mask1 = 0xC0, mask2 = 0x40; - int bit = 0; - for (bit = 0; bit < 7; bit++) { - if ((save & mask1) == mask2) - val &= ~mask2; - mask1 >>= 1; - mask2 >>= 1; - } - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - *where = val; - - where = (unsigned char*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} - -static void vc_paint_char2(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned short *theChar; - unsigned short *where; - int i; - - theChar = (unsigned short*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned short*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * 2)); - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ - *where = *theChar++; - - where = (unsigned short*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ - unsigned short val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - unsigned short mask1 = 0xF000, mask2 = 0x3000; - int bit = 0; - for (bit = 0; bit < 7; bit++) { - if ((save & mask1) == mask2) - val &= ~mask2; - mask1 >>= 2; - mask2 >>= 2; - } - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - *where = val; - - where = (unsigned short*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} - -static void vc_paint_char4(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned long *theChar; - unsigned long *where; - int i; - - theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned long*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * 4)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ - *where = *theChar++; - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ - unsigned long val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - unsigned long mask1 = 0xff000000, mask2 = 0x0F000000; - int bit = 0; - for (bit = 0; bit < 7; bit++) { - if ((save & mask1) == mask2) - val &= ~mask2; - mask1 >>= 4; - mask2 >>= 4; - } - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - *where = val; - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} - -static void vc_paint_char8c(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned long *theChar; - unsigned long *where; - int i; - - theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned long*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * CHARWIDTH)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attr? FLY !*/ - unsigned long *store = where; - int x; - for (x = 0; x < 2; x++) { - unsigned long val = *theChar++; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little slower */ - unsigned long *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 2; x++) { - unsigned long val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (lastpixel && !(save & 0xFF000000)) - val |= 0xff000000; - if ((save & 0xFFFF0000) == 0xFF000000) - val |= 0x00FF0000; - if ((save & 0x00FFFF00) == 0x00FF0000) - val |= 0x0000FF00; - if ((save & 0x0000FFFF) == 0x0000FF00) - val |= 0x000000FF; - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - lastpixel = save & 0xff; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} -static void vc_paint_char16c(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned long *theChar; - unsigned long *where; - int i; - - theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned long*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * CHARWIDTH * 2)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attrs ? FLY ! */ - unsigned long *store = where; - int x; - for (x = 0; x < 4; x++) { - unsigned long val = *theChar++; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ - unsigned long *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 4; x++) { - unsigned long val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (save == 0xFFFF0000) val |= 0xFFFF; - else if (lastpixel && !(save & 0xFFFF0000)) - val |= 0xFFFF0000; - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - - val = (vc_color_back & ~val) | (vc_color_fore & val); - - *store++ = val; - lastpixel = save & 0x7fff; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} -static void vc_paint_char32c(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned long *theChar; - unsigned long *where; - int i; - - theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned long*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * CHARWIDTH * 4)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attrs ? FLY ! */ - unsigned long *store = where; - int x; - for (x = 0; x < 8; x++) { - unsigned long val = *theChar++; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little slower */ - unsigned long *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 8; x++) { - unsigned long val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (lastpixel && !save) - val = 0xFFFFFFFF; - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - lastpixel = save; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} - -/* - * That's a plain dumb reverse of the cursor position - * It do a binary reverse, so it will not looks good when we have - * color support. we'll see that later - */ -static void reversecursor(int xx, int yy) -{ - union { - unsigned char *charptr; - unsigned short *shortptr; - unsigned long *longptr; - } where; - int line, col; - - where.longptr = (unsigned long*)(vinfo.v_baseaddr + - (y * CHARHEIGHT * vinfo.v_rowbytes) + - (x /** CHARWIDTH*/ * vinfo.v_depth)); - for (line = 0; line < CHARHEIGHT; line++) { - switch (vinfo.v_depth) { - case 1: - *where.charptr = ~*where.charptr; - break; - case 2: - *where.shortptr = ~*where.shortptr; - break; - case 4: - *where.longptr = ~*where.longptr; - break; -/* that code still exists because since characters on the screen are - * of different colors that reverse function may not work if the - * cursor is on a character that is in a different color that the - * current one. When we have buffering, things will work better. MP - */ -#if 1 /*VC_BINARY_REVERSE*/ - case 8: - where.longptr[0] = ~where.longptr[0]; - where.longptr[1] = ~where.longptr[1]; - break; - case 16: - for (col = 0; col < 4; col++) - where.longptr[col] = ~where.longptr[col]; - break; - case 32: - for (col = 0; col < 8; col++) - where.longptr[col] = ~where.longptr[col]; - break; -#else - case 8: - for (col = 0; col < 8; col++) - where.charptr[col] = where.charptr[col] != (vc_color_fore & vc_color_mask) ? - vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; - break; - case 16: - for (col = 0; col < 8; col++) - where.shortptr[col] = where.shortptr[col] != (vc_color_fore & vc_color_mask) ? - vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; - break; - case 32: - for (col = 0; col < 8; col++) - where.longptr[col] = where.longptr[col] != (vc_color_fore & vc_color_mask) ? - vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; - break; -#endif - } - where.charptr += vinfo.v_rowbytes; - } -} - - -static void -scrollup(int num) -{ - unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; - - linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; - rowline = vinfo.v_rowbytes / 4; - rowscanline = vinfo.v_rowscanbytes / 4; - - to = (unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs); - from = to + (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ - - i = (scrreg_bottom - scrreg_top) - num; - - while (i-- > 0) { - for (line = 0; line < CHARHEIGHT; line++) { - /* - * Only copy what is displayed - */ -#if 1 - bcopy((unsigned int) from, (unsigned int) to, - vinfo.v_rowscanbytes); -#else - video_scroll_up((unsigned int) from, - (unsigned int) (from+(vinfo.v_rowscanbytes/4)), - (unsigned int) to); -#endif - - from += rowline; - to += rowline; - } - } - - /* Now set the freed up lines to the background colour */ - - - to = ((unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs)) - + ((scrreg_bottom - scrreg_top - num) * linelongs); - - for (linelongs = CHARHEIGHT * num; linelongs-- > 0;) { - from = to; - for (i = 0; i < rowscanline; i++) - *to++ = vc_color_back; - - to = from + rowline; - } - -} - -static void -scrolldown(int num) -{ - unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; - - linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; - rowline = vinfo.v_rowbytes / 4; - rowscanline = vinfo.v_rowscanbytes / 4; - - - to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_bottom) - - (rowline - rowscanline); - from = to - (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ - - i = (scrreg_bottom - scrreg_top) - num; - - while (i-- > 0) { - for (line = 0; line < CHARHEIGHT; line++) { - /* - * Only copy what is displayed - */ -#if 1 - bcopy(from-(vinfo.v_rowscanbytes/4), to, - vinfo.v_rowscanbytes); -#else - - video_scroll_down((unsigned int) from, - (unsigned int) (from-(vinfo.v_rowscanbytes/4)), - (unsigned int) to); -#endif - - from -= rowline; - to -= rowline; - } - } - - /* Now set the freed up lines to the background colour */ - - to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_top); - - for (line = CHARHEIGHT * num; line > 0; line--) { - from = to; - - for (i = 0; i < rowscanline; i++) - *(to++) = vc_color_back; - - to = from + rowline; - } - -} - - -static void -clear_line(int which) -{ - int start, end, i; - - /* - * This routine runs extremely slowly. I don't think it's - * used all that often, except for To end of line. I'll go - * back and speed this up when I speed up the whole vc - * module. --LK - */ - - switch (which) { - case 0: /* To end of line */ - start = x; - end = vinfo.v_columns-1; - break; - case 1: /* To start of line */ - start = 0; - end = x; - break; - default: - case 2: /* Whole line */ - start = 0; - end = vinfo.v_columns-1; - break; - } - - for (i = start; i <= end; i++) { - vc_ops.paintchar(' ', i, y, ATTR_NONE); - } - -} - -static void -clear_screen(int xx, int yy, int which) -{ - unsigned long *p, *endp, *row; - int linelongs, col; - int rowline, rowlongs; - - rowline = vinfo.v_rowscanbytes / 4; - rowlongs = vinfo.v_rowbytes / 4; - - p = (unsigned long*) vinfo.v_baseaddr;; - endp = (unsigned long*) vinfo.v_baseaddr; - - linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; - - switch (which) { - case 0: /* To end of screen */ - clear_line(0); - if (y < vinfo.v_rows - 1) { - p += (y + 1) * linelongs; - endp += rowlongs * vinfo.v_height; - } - break; - case 1: /* To start of screen */ - clear_line(1); - if (y > 1) { - endp += (y + 1) * linelongs; - } - break; - case 2: /* Whole screen */ - endp += rowlongs * vinfo.v_height; - break; - } - - for (row = p ; row < endp ; row += rowlongs) { - for (col = 0; col < rowline; col++) - *(row+col) = vc_color_back; - } - -} - -static void -reset_tabs(void) -{ - int i; - - for (i = 0; i<= vinfo.v_columns; i++) { - tab_stops[i] = ((i % 8) == 0); - } - -} - -static void -vt100_reset(void) -{ - reset_tabs(); - scrreg_top = 0; - scrreg_bottom = vinfo.v_rows; - attr = ATTR_NONE; - vc_charset[0] = vc_charset[1] = 0; - vc_charset_select = 0; - vc_wrap_mode = 1; - vc_relative_origin = 0; - vc_color_set(VC_RESET_BACKGROUND); - vc_color_set(VC_RESET_FOREGROUND); - -} - -static void -putc_normal(unsigned char ch) -{ - switch (ch) { - case '\a': /* Beep */ - { - if ( FALSE && !IS_TEXT_MODE ) { - /* - * No sound hardware, invert the screen twice instead - */ - unsigned long *ptr; - int i, j; - /* XOR the screen twice */ - for (i = 0; i < 2 ; i++) { - /* For each row, xor the scanbytes */ - for (ptr = (unsigned long*)vinfo.v_baseaddr; - ptr < (unsigned long*)(vinfo.v_baseaddr + - (vinfo.v_height * vinfo.v_rowbytes)); - ptr += (vinfo.v_rowbytes / - sizeof (unsigned long*))) - for (j = 0; - j < vinfo.v_rowscanbytes / - sizeof (unsigned long*); - j++) - *(ptr+j) =~*(ptr+j); - } - } - } - break; - - case 127: /* Delete */ - case '\b': /* Backspace */ - if (hanging_cursor) { - hanging_cursor = 0; - } else - if (x > 0) { - x--; - } - break; - case '\t': /* Tab */ - while (x < vinfo.v_columns && !tab_stops[++x]); - if (x >= vinfo.v_columns) - x = vinfo.v_columns-1; - break; - case 0x0b: - case 0x0c: - case '\n': /* Line feed */ - if (y >= scrreg_bottom -1 ) { - vc_ops.scrollup(1); - y = scrreg_bottom - 1; - } else { - y++; - } - /*break; Pass thru */ - case '\r': /* Carriage return */ - x = 0; - hanging_cursor = 0; - break; - case 0x0e: /* Select G1 charset (Control-N) */ - vc_charset_select = 1; - break; - case 0x0f: /* Select G0 charset (Control-O) */ - vc_charset_select = 0; - break; - case 0x18 : /* CAN : cancel */ - case 0x1A : /* like cancel */ - /* well, i do nothing here, may be later */ - break; - case '\033': /* Escape */ - vt100state = ESesc; - hanging_cursor = 0; - break; - default: - if (ch >= ' ') { - if (hanging_cursor) { - x = 0; - if (y >= scrreg_bottom -1 ) { - vc_ops.scrollup(1); - y = scrreg_bottom - 1; - } else { - y++; - } - hanging_cursor = 0; - } - vc_ops.paintchar((ch >= 0x60 && ch <= 0x7f) ? ch + vc_charset[vc_charset_select] - : ch, x, y, attr); - if (x == vinfo.v_columns - 1) { - hanging_cursor = vc_wrap_mode; - } else { - x++; - } - } - break; - } - -} - -static void -putc_esc(unsigned char ch) -{ - vt100state = ESnormal; - - switch (ch) { - case '[': - vt100state = ESsquare; - break; - case 'c': /* Reset terminal */ - vt100_reset(); - vc_ops.clear_screen(x, y, 2); - x = y = 0; - break; - case 'D': /* Line feed */ - case 'E': - if (y >= scrreg_bottom -1) { - vc_ops.scrollup(1); - y = scrreg_bottom - 1; - } else { - y++; - } - if (ch == 'E') x = 0; - break; - case 'H': /* Set tab stop */ - tab_stops[x] = 1; - break; - case 'M': /* Cursor up */ - if (y <= scrreg_top) { - vc_ops.scrolldown(1); - y = scrreg_top; - } else { - y--; - } - break; - case '>': - vt100_reset(); - break; - case '7': /* Save cursor */ - savex = x; - savey = y; - saveattr = attr; - vc_save_charset_s = vc_charset_select; - vc_charset_save[0] = vc_charset[0]; - vc_charset_save[1] = vc_charset[1]; - break; - case '8': /* Restore cursor */ - x = savex; - y = savey; - attr = saveattr; - vc_charset_select = vc_save_charset_s; - vc_charset[0] = vc_charset_save[0]; - vc_charset[1] = vc_charset_save[1]; - break; - case 'Z': /* return terminal ID */ - break; - case '#': /* change characters height */ - vt100state = EScharsize; - break; - case '(': - vt100state = ESsetG0; - break; - case ')': /* character set sequence */ - vt100state = ESsetG1; - break; - case '=': - break; - default: - /* Rest not supported */ - break; - } - -} - -static void -putc_askcmd(unsigned char ch) -{ - if (ch >= '0' && ch <= '9') { - par[numpars] = (10*par[numpars]) + (ch-'0'); - return; - } - vt100state = ESnormal; - - switch (par[0]) { - case 6: - vc_relative_origin = ch == 'h'; - break; - case 7: /* wrap around mode h=1, l=0*/ - vc_wrap_mode = ch == 'h'; - break; - default: - break; - } - -} - -static void -putc_charsizecmd(unsigned char ch) -{ - vt100state = ESnormal; - - switch (ch) { - case '3' : - case '4' : - case '5' : - case '6' : - break; - case '8' : /* fill 'E's */ - { - int xx, yy; - for (yy = 0; yy < vinfo.v_rows; yy++) - for (xx = 0; xx < vinfo.v_columns; xx++) - vc_ops.paintchar('E', xx, yy, ATTR_NONE); - } - break; - } - -} - -static void -putc_charsetcmd(int charset, unsigned char ch) -{ - vt100state = ESnormal; - - switch (ch) { - case 'A' : - case 'B' : - default: - vc_charset[charset] = 0; - break; - case '0' : /* Graphic characters */ - case '2' : - vc_charset[charset] = 0x21; - break; - } - -} - -static void -putc_gotpars(unsigned char ch) -{ - int i; - - if (ch < ' ') { - /* special case for vttest for handling cursor - movement in escape sequences */ - putc_normal(ch); - vt100state = ESgotpars; - return; - } - vt100state = ESnormal; - switch (ch) { - case 'A': /* Up */ - y -= par[0] ? par[0] : 1; - if (y < scrreg_top) - y = scrreg_top; - break; - case 'B': /* Down */ - y += par[0] ? par[0] : 1; - if (y >= scrreg_bottom) - y = scrreg_bottom - 1; - break; - case 'C': /* Right */ - x += par[0] ? par[0] : 1; - if (x >= vinfo.v_columns) - x = vinfo.v_columns-1; - break; - case 'D': /* Left */ - x -= par[0] ? par[0] : 1; - if (x < 0) - x = 0; - break; - case 'H': /* Set cursor position */ - case 'f': - x = par[1] ? par[1] - 1 : 0; - y = par[0] ? par[0] - 1 : 0; - if (vc_relative_origin) - y += scrreg_top; - hanging_cursor = 0; - break; - case 'X': /* clear p1 characters */ - if (numpars) { - int i; - for (i = x; i < x + par[0]; i++) - vc_ops.paintchar(' ', i, y, ATTR_NONE); - } - break; - case 'J': /* Clear part of screen */ - vc_ops.clear_screen(x, y, par[0]); - break; - case 'K': /* Clear part of line */ - clear_line(par[0]); - break; - case 'g': /* tab stops */ - switch (par[0]) { - case 1: - case 2: /* reset tab stops */ - /* reset_tabs(); */ - break; - case 3: /* Clear every tabs */ - { - int i; - - for (i = 0; i <= vinfo.v_columns; i++) - tab_stops[i] = 0; - } - break; - case 0: - tab_stops[x] = 0; - break; - } - break; - case 'm': /* Set attribute */ - for (i = 0; i < numpars; i++) { - switch (par[i]) { - case 0: - attr = ATTR_NONE; - vc_color_set(VC_RESET_BACKGROUND); - vc_color_set(VC_RESET_FOREGROUND); - break; - case 1: - attr |= ATTR_BOLD; - break; - case 4: - attr |= ATTR_UNDER; - break; - case 7: - attr |= ATTR_REVERSE; - break; - case 22: - attr &= ~ATTR_BOLD; - break; - case 24: - attr &= ~ATTR_UNDER; - break; - case 27: - attr &= ~ATTR_REVERSE; - break; - case 5: - case 25: /* blink/no blink */ - break; - default: - vc_color_set(par[i]); - break; - } - } - break; - case 'r': /* Set scroll region */ - x = y = 0; - /* ensure top < bottom, and both within limits */ - if ((numpars > 0) && (par[0] < vinfo.v_rows)) { - scrreg_top = par[0] ? par[0] - 1 : 0; - if (scrreg_top < 0) - scrreg_top = 0; - } else { - scrreg_top = 0; - } - if ((numpars > 1) && (par[1] <= vinfo.v_rows) && (par[1] > par[0])) { - scrreg_bottom = par[1]; - if (scrreg_bottom > vinfo.v_rows) - scrreg_bottom = vinfo.v_rows; - } else { - scrreg_bottom = vinfo.v_rows; - } - if (vc_relative_origin) - y = scrreg_top; - break; - } - -} - -static void -putc_getpars(unsigned char ch) -{ - if (ch == '?') { - vt100state = ESask; - return; - } - if (ch == '[') { - vt100state = ESnormal; - /* Not supported */ - return; - } - if (ch == ';' && numpars < MAXPARS - 1) { - numpars++; - } else - if (ch >= '0' && ch <= '9') { - par[numpars] *= 10; - par[numpars] += ch - '0'; - } else { - numpars++; - vt100state = ESgotpars; - putc_gotpars(ch); - } -} - -static void -putc_square(unsigned char ch) -{ - int i; - - for (i = 0; i < MAXPARS; i++) { - par[i] = 0; - } - - numpars = 0; - vt100state = ESgetpars; - - putc_getpars(ch); - -} - -void -vc_putchar(char ch) -{ - if (!ch) { - return; /* ignore null characters */ - } - - switch (vt100state) { - default:vt100state = ESnormal; /* FALLTHROUGH */ - case ESnormal: - putc_normal(ch); - break; - case ESesc: - putc_esc(ch); - break; - case ESsquare: - putc_square(ch); - break; - case ESgetpars: - putc_getpars(ch); - break; - case ESgotpars: - putc_gotpars(ch); - break; - case ESask: - putc_askcmd(ch); - break; - case EScharsize: - putc_charsizecmd(ch); - break; - case ESsetG0: - putc_charsetcmd(0, ch); - break; - case ESsetG1: - putc_charsetcmd(1, ch); - break; - } - - if (x >= vinfo.v_columns) { - x = vinfo.v_columns - 1; - } - if (x < 0) { - x = 0; - } - if (y >= vinfo.v_rows) { - y = vinfo.v_rows - 1; - } - if (y < 0) { - y = 0; - } - -} - -/* - * Actually draws the buffer, handle the jump scroll - */ -void vc_flush_forward_buffer(void) -{ - if (vc_forward_buffer_size) { - int start = 0; - vc_ops.hide_cursor(x, y); - do { - int i; - int plaintext = 1; - int drawlen = start; - int jump = 0; - int param = 0, changebackground = 0; - enum vt100state_e vtState = vt100state; - /* - * In simple words, here we're pre-parsing the text to look for - * + Newlines, for computing jump scroll - * + /\033\[[0-9;]*]m/ to continue on - * any other sequence will stop. We don't want to have cursor - * movement escape sequences while we're trying to pre-scroll - * the screen. - * We have to be extra carefull about the sequences that changes - * the background color to prevent scrolling in those - * particular cases. - * That parsing was added to speed up 'man' and 'color-ls' a - * zillion time (at least). It's worth it, trust me. - * (mail Nick Stephen for a True Performance Graph) - * Michel Pollet - */ - for (i = start; i < vc_forward_buffer_size && plaintext; i++) { - drawlen++; - switch (vtState) { - case ESnormal: - switch (vc_forward_buffer[i]) { - case '\033': - vtState = ESesc; - break; - case '\n': - jump++; - break; - } - break; - case ESesc: - switch (vc_forward_buffer[i]) { - case '[': - vtState = ESgetpars; - param = 0; - changebackground = 0; - break; - default: - plaintext = 0; - break; - } - break; - case ESgetpars: - if ((vc_forward_buffer[i] >= '0' && - vc_forward_buffer[i] <= '9') || - vc_forward_buffer[i] == ';') { - if (vc_forward_buffer[i] >= '0' && - vc_forward_buffer[i] <= '9') - param = (param*10)+(vc_forward_buffer[i]-'0'); - else { - if (param >= 40 && param <= 47) - changebackground = 1; - if (!vc_normal_background && - !param) - changebackground = 1; - param = 0; - } - break; /* continue on */ - } - vtState = ESgotpars; - /* fall */ - case ESgotpars: - switch (vc_forward_buffer[i]) { - case 'm': - vtState = ESnormal; - if (param >= 40 && param <= 47) - changebackground = 1; - if (!vc_normal_background && - !param) - changebackground = 1; - if (changebackground) { - plaintext = 0; - jump = 0; - /* REALLY don't jump */ - } - /* Yup ! we've got it */ - break; - default: - plaintext = 0; - break; - } - break; - default: - plaintext = 0; - break; - } - - } - - /* - * Then we look if it would be appropriate to forward jump - * the screen before drawing - */ - if (jump && (scrreg_bottom - scrreg_top) > 2) { - jump -= scrreg_bottom - y - 1; - if (jump > 0 ) { - if (jump >= scrreg_bottom - scrreg_top) - jump = scrreg_bottom - scrreg_top -1; - y -= jump; - vc_ops.scrollup(jump); - } - } - /* - * and we draw what we've found to the parser - */ - for (i = start; i < drawlen; i++) - vc_putchar(vc_forward_buffer[start++]); - /* - * Continue sending characters to the parser until we're sure we're - * back on normal characters. - */ - for (i = start; i < vc_forward_buffer_size && - vt100state != ESnormal ; i++) - vc_putchar(vc_forward_buffer[start++]); - /* Then loop again if there still things to draw */ - } while (start < vc_forward_buffer_size); - vc_forward_buffer_size = 0; - vc_ops.show_cursor(x, y); - } -} - -int -vcputc(int l, int u, int c) -{ - if ( vc_initialized ) - { - vc_store_char(c); - vc_flush_forward_buffer(); - } - return 0; -} - -/* - * Immediate character display.. kernel printf uses this. Make sure - * pre-clock printfs get flushed and that panics get fully displayed. - */ - -void cnputc(char ch) -{ - vcputc(0, 0, ch); -} - -/* - * Store characters to be drawn 'later', handle overflows - */ - -void -vc_store_char(unsigned char c) -{ - - /* Either we're really buffering stuff or we're not yet because - * the probe hasn't been done. If we're not, then we can only - * ever have a maximum of one character in the buffer waiting to - * be flushed - */ - - vc_forward_buffer[vc_forward_buffer_size++] = (unsigned char)c; - - switch (vc_forward_buffer_size) { - case 1: - /* If we're adding the first character to the buffer, - * start the timer, otherwise it is already running. - */ - break; - case VC_MAX_FORWARD_SIZE: - vc_flush_forward_buffer(); - break; - default: - /* - * the character will be flushed on timeout - */ - break; - } -} - -static void -vc_initialize(struct vc_info * vinfo_p) -{ - vinfo.v_rows = vinfo.v_height / CHARHEIGHT; - vinfo.v_columns = vinfo.v_width / CHARWIDTH; - - if (vinfo.v_depth >= 8) { - vinfo.v_rowscanbytes = (vinfo.v_depth / 8) * vinfo.v_width; - } else { - vinfo.v_rowscanbytes = vinfo.v_width / (8 / vinfo.v_depth); - } - - vc_render_font(1, vinfo.v_depth); - vc_color_mask = vc_color_depth_masks[vc_color_index_table[vinfo.v_depth]]; - vt100_reset(); - switch (vinfo.v_depth) { - default: - case 1: - vc_ops.paintchar = vc_paint_char1; - break; - case 2: - vc_ops.paintchar = vc_paint_char2; - break; - case 4: - vc_ops.paintchar = vc_paint_char4; - break; - case 8: - vc_ops.paintchar = vc_paint_char8c; - break; - case 16: - vc_ops.paintchar = vc_paint_char16c; - break; - case 32: - vc_ops.paintchar = vc_paint_char32c; - break; - } -} - -void -vcattach(void) -{ - if (vinfo.v_depth >= 8) - printf("\033[31mC\033[32mO\033[33mL\033[34mO\033[35mR\033[0m "); - printf("video console at 0x%lx (%ldx%ldx%ld)\n", vinfo.v_baseaddr, - vinfo.v_width, vinfo.v_height, vinfo.v_depth); - -#if 0 // XXX - FIXME - /* - * Added for the buffering and jump scrolling - */ - /* Init our lock */ - simple_lock_init(&vc_forward_lock, ETAP_IO_TTY); - - vc_forward_buffer_enabled = 1; -#else // FIXME TOO!!! - /* Init our lock */ - simple_lock_init(&vc_forward_lock, ETAP_IO_TTY); -#endif -} - - -struct vc_progress_element { - unsigned int version; - unsigned int flags; - unsigned int time; - unsigned char count; - unsigned char res[3]; - int width; - int height; - int dx; - int dy; - int transparent; - unsigned int res2[3]; - unsigned char data[0]; -}; -typedef struct vc_progress_element vc_progress_element; - -static vc_progress_element * vc_progress; -static const unsigned char * vc_progress_data; -static const unsigned char * vc_progress_alpha; -static boolean_t vc_progress_enable; -static const unsigned char * vc_clut; -static const unsigned char * vc_clut8; -static unsigned int vc_progress_tick; -static boolean_t vc_graphics_mode; -static boolean_t vc_acquired; -static boolean_t vc_need_clear; -static boolean_t vc_needsave; -static vm_address_t vc_saveunder; -static vm_size_t vc_saveunder_len; - -void vc_blit_rect_8c( int x, int y, - int width, int height, - const unsigned char * dataPtr, const unsigned char * alphaPtr, - unsigned char * backPtr, boolean_t save ) -{ - volatile unsigned char * dst; - int line, col; - unsigned char data; - - dst = (unsigned char *)(vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x)); - - dst = (unsigned char *)(vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x)); - - for( line = 0; line < height; line++) { - for( col = 0; col < width; col++) - *(dst + col) = *dataPtr++; - dst = (volatile unsigned char *) (((int)dst) + vinfo.v_rowbytes); - } -} - -void vc_blit_rect_16( int x, int y, - int width, int height, - const unsigned char * dataPtr, const unsigned char * alphaPtr, - unsigned short * backPtr, boolean_t save ) -{ - volatile unsigned short * dst; - int line, col; - unsigned int data, index, alpha, back; - - dst = (volatile unsigned short *)(vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 2)); - - for( line = 0; line < height; line++) { - for( col = 0; col < width; col++) { - index = *dataPtr++; - index *= 3; - - if( alphaPtr && backPtr && (alpha = *alphaPtr++)) { - - data = 0; - if( vc_clut[index + 0] > alpha) - data |= (((vc_clut[index + 0] - alpha) & 0xf8) << 7); - if( vc_clut[index + 1] > alpha) - data |= (((vc_clut[index + 1] - alpha) & 0xf8) << 2); - if( vc_clut[index + 2] > alpha) - data |= (((vc_clut[index + 2] - alpha) & 0xf8) >> 3); - - if( save) { - back = *(dst + col); - alpha >>= 3; - back = (((((back & 0x7c1f) * alpha) + 0x7c1f) >> 5) & 0x7c1f) - | (((((back & 0x03e0) * alpha) + 0x03e0) >> 5) & 0x03e0); - *backPtr++ = back; - } else - back = *backPtr++; - - data += back; - - } else - data = ( (0xf8 & (vc_clut[index + 0])) << 7) - | ( (0xf8 & (vc_clut[index + 1])) << 2) - | ( (0xf8 & (vc_clut[index + 2])) >> 3); - - *(dst + col) = data; - } - dst = (volatile unsigned short *) (((int)dst) + vinfo.v_rowbytes); - } -} - -void vc_blit_rect_32( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - const unsigned char * dataPtr, const unsigned char * alphaPtr, - unsigned int * backPtr, boolean_t save ) -{ - volatile unsigned int * dst; - int line, col; - unsigned int data, index, alpha, back; - - dst = (volatile unsigned int *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 4)); - - for( line = 0; line < height; line++) { - for( col = 0; col < width; col++) { - index = *dataPtr++; - index *= 3; - if( alphaPtr && backPtr && (alpha = *alphaPtr++)) { - - data = 0; - if( vc_clut[index + 0] > alpha) - data |= ((vc_clut[index + 0] - alpha) << 16); - if( vc_clut[index + 1] > alpha) - data |= ((vc_clut[index + 1] - alpha) << 8); - if( vc_clut[index + 2] > alpha) - data |= ((vc_clut[index + 2] - alpha)); - - if( save) { - back = *(dst + col); - back = (((((back & 0x00ff00ff) * alpha) + 0x00ff00ff) >> 8) & 0x00ff00ff) - | (((((back & 0x0000ff00) * alpha) + 0x0000ff00) >> 8) & 0x0000ff00); - *backPtr++ = back; - } else - back = *backPtr++; - - data += back; - - } else - data = (vc_clut[index + 0] << 16) - | (vc_clut[index + 1] << 8) - | (vc_clut[index + 2]); - - *(dst + col) = data; - } - dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); - } -} - -void vc_blit_rect( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - const unsigned char * dataPtr, const unsigned char * alphaPtr, - vm_address_t backBuffer, boolean_t save ) -{ - if(!vinfo.v_baseaddr) - return; - - switch( vinfo.v_depth) { - case 8: - if( vc_clut8 == vc_clut) - vc_blit_rect_8c( x, y, width, height, dataPtr, alphaPtr, (unsigned char *) backBuffer, save ); - break; - case 16: - vc_blit_rect_16( x, y, width, height, dataPtr, alphaPtr, (unsigned short *) backBuffer, save ); - break; - case 32: - vc_blit_rect_32( x, y, width, height, dataPtr, alphaPtr, (unsigned int *) backBuffer, save ); - break; - } -} - -void vc_progress_task( void * arg ) -{ - spl_t s; - int count = (int) arg; - int x, y, width, height; - const unsigned char * data; - - s = splhigh(); - simple_lock(&vc_forward_lock); - - if( vc_progress_enable) { - - count++; - if( count >= vc_progress->count) - count = 0; - - width = vc_progress->width; - height = vc_progress->height; - x = vc_progress->dx; - y = vc_progress->dy; - data = vc_progress_data; - data += count * width * height; - if( 1 & vc_progress->flags) { - x += ((vinfo.v_width - width) / 2); - y += ((vinfo.v_height - height) / 2); - } - vc_blit_rect( x, y, width, height, - data, vc_progress_alpha, vc_saveunder, vc_needsave ); - vc_needsave = FALSE; - - timeout( vc_progress_task, (void *) count, - vc_progress_tick ); - } - simple_unlock(&vc_forward_lock); - splx(s); -} - -void vc_display_icon( vc_progress_element * desc, - const unsigned char * data ) -{ - int x, y, width, height; - - if( vc_acquired && vc_graphics_mode && vc_clut) { - - width = desc->width; - height = desc->height; - x = desc->dx; - y = desc->dy; - if( 1 & desc->flags) { - x += ((vinfo.v_width - width) / 2); - y += ((vinfo.v_height - height) / 2); - } - vc_blit_rect( x, y, width, height, data, NULL, (vm_address_t) NULL, FALSE ); - } -} - -boolean_t -vc_progress_set( boolean_t enable, unsigned int initial_tick ) -{ - spl_t s; - vm_address_t saveBuf = 0; - vm_size_t saveLen = 0; - - if( !vc_progress) - return( FALSE ); - - if( enable) { - saveLen = vc_progress->width * vc_progress->height * vinfo.v_depth / 8; - saveBuf = kalloc( saveLen ); - } - - s = splhigh(); - simple_lock(&vc_forward_lock); - - if( vc_progress_enable != enable) { - vc_progress_enable = enable; - if( enable) { - vc_needsave = TRUE; - vc_saveunder = saveBuf; - vc_saveunder_len = saveLen; - saveBuf = 0; - saveLen = 0; - timeout(vc_progress_task, (void *) 0, - initial_tick ); - } - else { - if( vc_saveunder) { - saveBuf = vc_saveunder; - saveLen = vc_saveunder_len; - vc_saveunder = 0; - vc_saveunder_len = 0; - } - untimeout( vc_progress_task, (void *) 0 ); - } - } - - simple_unlock(&vc_forward_lock); - splx(s); - - if( saveBuf) - kfree( saveBuf, saveLen ); - - return( TRUE ); -} - - -boolean_t -vc_progress_initialize( vc_progress_element * desc, - const unsigned char * data, - const unsigned char * clut ) -{ - if( (!clut) || (!desc) || (!data)) - return( FALSE ); - vc_clut = clut; - vc_clut8 = clut; - - vc_progress = desc; - vc_progress_data = data; - if( 2 & vc_progress->flags) - vc_progress_alpha = vc_progress_data - + vc_progress->count * vc_progress->width * vc_progress->height; - else - vc_progress_alpha = NULL; - vc_progress_tick = vc_progress->time * hz / 1000; - - return( TRUE ); -} - -extern int disableConsoleOutput; - -void vc_clear_screen( void ) -{ - vc_ops.hide_cursor(x, y); - vt100_reset(); - x = y = 0; - vc_ops.clear_screen(x, y, 2); - vc_ops.show_cursor(x, y); -}; - -void -initialize_screen(Boot_Video * boot_vinfo, int op) -{ - if ( boot_vinfo ) - { - vinfo.v_width = boot_vinfo->v_width; - vinfo.v_height = boot_vinfo->v_height; - vinfo.v_depth = boot_vinfo->v_depth; - vinfo.v_rowbytes = boot_vinfo->v_rowBytes; - vinfo.v_baseaddr = boot_vinfo->v_baseAddr; - vinfo.v_type = boot_vinfo->v_display; - - if ( IS_TEXT_MODE ) - { - // Text mode setup by the booter. - - vc_ops.initialize = tc_initialize; - vc_ops.paintchar = tc_putchar; - vc_ops.scrolldown = tc_scrolldown; - vc_ops.scrollup = tc_scrollup; - vc_ops.clear_screen = tc_clear_screen; - vc_ops.hide_cursor = tc_hide_cursor; - vc_ops.show_cursor = tc_show_cursor; - vc_ops.update_color = tc_update_color; - } - else - { - // Graphics mode setup by the booter. - - vc_ops.initialize = vc_initialize; - vc_ops.paintchar = 0; - vc_ops.scrolldown = scrolldown; - vc_ops.scrollup = scrollup; - vc_ops.clear_screen = clear_screen; - vc_ops.hide_cursor = reversecursor; - vc_ops.show_cursor = reversecursor; - vc_ops.update_color = 0; - } - - vc_ops.initialize(&vinfo); - - // vc_clear_screen(); - - vc_initialized = 1; - } - - switch ( op ) { - - case kPEGraphicsMode: - vc_graphics_mode = TRUE; - disableConsoleOutput = TRUE; - vc_acquired = TRUE; - break; - - case kPETextMode: - vc_graphics_mode = FALSE; - disableConsoleOutput = FALSE; - vc_acquired = TRUE; - vc_clear_screen(); - break; - - case kPETextScreen: - vc_progress_set( FALSE, 0 ); - disableConsoleOutput = FALSE; - if( vc_need_clear) { - vc_need_clear = FALSE; - vc_clear_screen(); - } - break; - - case kPEEnableScreen: - if ( vc_acquired) { - if( vc_graphics_mode) - vc_progress_set( TRUE, vc_progress_tick ); - else - vc_clear_screen(); - } - break; - - case kPEDisableScreen: - vc_progress_set( FALSE, 0 ); - break; - - case kPEAcquireScreen: - vc_need_clear = (FALSE == vc_acquired); - vc_acquired = TRUE; - vc_progress_set( vc_graphics_mode, vc_need_clear ? 2 * hz : 0 ); - disableConsoleOutput = vc_graphics_mode; - if( vc_need_clear && !vc_graphics_mode) { - vc_need_clear = FALSE; - vc_clear_screen(); - } - break; - - case kPEReleaseScreen: - vc_acquired = FALSE; - vc_progress_set( FALSE, 0 ); - vc_clut8 = NULL; - disableConsoleOutput = TRUE; - break; - } -} diff --git a/osfmk/i386/AT386/video_console.h b/osfmk/i386/AT386/video_console.h deleted file mode 100644 index 130956005..000000000 --- a/osfmk/i386/AT386/video_console.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __PEXPERT_VIDEO_CONSOLE_H -#define __PEXPERT_VIDEO_CONSOLE_H - -/* - * Video console properties. - */ -struct vc_info { - unsigned long v_height; /* pixels */ - unsigned long v_width; /* pixels */ - unsigned long v_depth; - unsigned long v_rowbytes; - unsigned long v_baseaddr; - unsigned long v_type; - char v_name[32]; - unsigned long v_physaddr; - unsigned long v_rows; /* characters */ - unsigned long v_columns; /* characters */ - unsigned long v_rowscanbytes; /* Actualy number of bytes used for display per row */ - unsigned long v_reserved[5]; -}; - -/* - * From text_console.c - */ -extern void tc_putchar(unsigned char ch, int x, int y, int attrs); -extern void tc_scrolldown(int lines); -extern void tc_scrollup(int lines); -extern void tc_clear_screen(int x, int y, int operation); -extern void tc_show_cursor(int x, int y); -extern void tc_hide_cursor(int x, int y); -extern void tc_initialize(struct vc_info * vinfo_p); -extern void tc_update_color(int color, int fore); - -#endif /* !__PEXPERT_VIDEO_CONSOLE_H */ diff --git a/osfmk/i386/Makefile b/osfmk/i386/Makefile index cb8a18ef7..869a3f0cd 100644 --- a/osfmk/i386/Makefile +++ b/osfmk/i386/Makefile @@ -13,7 +13,12 @@ EXPORT_ONLY_FILES = \ hw_lock_types.h \ io_map_entries.h \ lock.h \ - machine_routines.h + machine_routines.h \ + machine_cpu.h \ + mp.h \ + mp_events.h \ + apic.h \ + cpuid.h INSTALL_MD_DIR = i386 diff --git a/osfmk/i386/apic.h b/osfmk/i386/apic.h index f18199a24..d7e684655 100644 --- a/osfmk/i386/apic.h +++ b/osfmk/i386/apic.h @@ -83,10 +83,12 @@ #define LAPIC_ICRD 0x00000310 #define LAPIC_ICRD_DEST_SHIFT 24 #define LAPIC_LVT_TIMER 0x00000320 +#define LAPIC_LVT_THERMAL 0x00000330 +#define LAPIC_LVT_PERFCNT 0x00000340 #define LAPIC_LVT_LINT0 0x00000350 #define LAPIC_LVT_LINT1 0x00000360 #define LAPIC_LVT_ERROR 0x00000370 -#define LAPIC_LVT_VECTOR_MASK 0x0000F +#define LAPIC_LVT_VECTOR_MASK 0x000FF #define LAPIC_LVT_DM_SHIFT 8 #define LAPIC_LVT_DM_MASK 0x00007 #define LAPIC_LVT_DM_FIXED 0x00000 diff --git a/osfmk/i386/asm.h b/osfmk/i386/asm.h index 6f2928ccf..62cc04096 100644 --- a/osfmk/i386/asm.h +++ b/osfmk/i386/asm.h @@ -85,7 +85,7 @@ /* There is another definition of ALIGN for .c sources */ #ifdef ASSEMBLER -#define ALIGN 2 +#define ALIGN 2,0x90 #endif /* ASSEMBLER */ #ifndef FALIGN @@ -158,7 +158,7 @@ .align ALIGN;\ LBc(x, 8) .long 0;\ .text;\ - movl $LBb(x,8),%edx;\ + movl LBb(x,8),%edx;\ call *EXT(_mcount_ptr); #endif /* GPROF */ diff --git a/osfmk/i386/bsd_i386.c b/osfmk/i386/bsd_i386.c index ad24633c2..b1d3cd11b 100644 --- a/osfmk/i386/bsd_i386.c +++ b/osfmk/i386/bsd_i386.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -83,18 +84,15 @@ struct i386_saved_state * get_user_regs( thread_act_t); -void -act_thread_dup( - thread_act_t, - thread_act_t -); - unsigned int get_msr_exportmask(void); unsigned int get_msr_nbits(void); unsigned int get_msr_rbits(void); +kern_return_t +thread_compose_cthread_desc(unsigned int addr, pcb_t pcb); + /* * thread_userstack: * @@ -208,8 +206,8 @@ get_user_regs(thread_act_t th) * Duplicate parent state in child * for U**X fork. */ -void -act_thread_dup( +kern_return_t +machine_thread_dup( thread_act_t parent, thread_act_t child ) @@ -225,12 +223,8 @@ act_thread_dup( } #endif - if (child->mact.pcb == NULL - || parent->mact.pcb == NULL) { - panic("[thread_dup, child (%x) or parent (%x) is NULL!]", - child->mact.pcb, parent->mact.pcb); - return; - } + if (child->mact.pcb == NULL || parent->mact.pcb == NULL) + return (KERN_FAILURE); /* Copy over the i386_saved_state registers */ child->mact.pcb->iss = parent->mact.pcb->iss; @@ -248,6 +242,8 @@ act_thread_dup( /* FIXME - should a user specified LDT, TSS and V86 info * be duplicated as well?? - probably not. */ + + return (KERN_SUCCESS); } /* @@ -362,10 +358,8 @@ unix_syscall_return(int error) KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, error, rval[0], rval[1], 0, 0); - if (callp->sy_funnel != NO_FUNNEL) { - assert(thread_funnel_get() == THR_FUNNEL_NULL); + if (callp->sy_funnel != NO_FUNNEL) (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); - } thread_exception_return(); /* NOTREACHED */ @@ -433,7 +427,7 @@ unix_syscall(struct i386_saved_state *regs) *ip, *(ip+1), *(ip+2), *(ip+3), 0); } - error = (*(callp->sy_call))(p, (void *) vt, rval); + error = (*(callp->sy_call))(p, (void *) vt, (int *) &rval[0]); #if 0 /* May be needed with vfork changes */ @@ -499,34 +493,55 @@ machdep_syscall( struct i386_saved_state *regs) /* NOTREACHED */ } - asm volatile(" - 1: - mov (%2),%%eax; - pushl %%eax; - sub $4,%2; - dec %1; - jne 1b; - mov %3,%%eax; - call *%%eax; - mov %%eax,%0" - - : "=r" (regs->eax) - : "r" (nargs), - "r" (&args[nargs - 1]), - "g" (entry->routine) - : "ax", "cx", "dx", "sp"); + switch (nargs) { + case 1: + regs->eax = (*entry->routine)(args[0]); + break; + case 2: + regs->eax = (*entry->routine)(args[0],args[1]); + break; + case 3: + regs->eax = (*entry->routine)(args[0],args[1],args[2]); + break; + case 4: + regs->eax = (*entry->routine)(args[0],args[1],args[2],args[3]); + break; + default: + panic("machdep_syscall(): too many args"); + } } else - regs->eax = (unsigned int)(*entry->routine)(); + regs->eax = (*entry->routine)(); - if (current_thread()->funnel_lock) - (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); + if (current_thread()->funnel_lock) + (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); thread_exception_return(); /* NOTREACHED */ } +kern_return_t +thread_compose_cthread_desc(unsigned int addr, pcb_t pcb) +{ + struct real_descriptor desc; + extern struct fake_descriptor *mp_ldt[]; + struct real_descriptor *ldtp; + int mycpu = cpu_number(); + + ldtp = (struct real_descriptor *)mp_ldt[mycpu]; + desc.limit_low = 1; + desc.limit_high = 0; + desc.base_low = addr & 0xffff; + desc.base_med = (addr >> 16) & 0xff; + desc.base_high = (addr >> 24) & 0xff; + desc.access = ACC_P|ACC_PL_U|ACC_DATA_W; + desc.granularity = SZ_32|SZ_G; + pcb->cthread_desc = desc; + ldtp[sel_idx(USER_CTHREAD)] = desc; + return(KERN_SUCCESS); +} + kern_return_t thread_set_cthread_self(int self) { @@ -541,6 +556,16 @@ thread_get_cthread_self(void) return ((kern_return_t)current_act()->mact.pcb->cthread_self); } +kern_return_t +thread_fast_set_cthread_self(int self) +{ + pcb_t pcb; + pcb = (pcb_t)current_act()->mact.pcb; + thread_compose_cthread_desc((unsigned int)self, pcb); + pcb->cthread_self = (unsigned int)self; /* preserve old func too */ + return (USER_CTHREAD); +} + void mach25_syscall(struct i386_saved_state *regs) { @@ -548,12 +573,52 @@ mach25_syscall(struct i386_saved_state *regs) regs->eip, regs->eax, -regs->eax); panic("FIXME!"); } - #endif /* MACH_BSD */ -#undef current_thread -thread_t -current_thread(void) + +/* This routine is called from assembly before each and every mach trap. + */ + +extern unsigned int mach_call_start(unsigned int, unsigned int *); + +__private_extern__ +unsigned int +mach_call_start(unsigned int call_number, unsigned int *args) { - return(current_thread_fast()); + int i, argc; + unsigned int kdarg[3]; + +/* Always prepare to trace mach system calls */ + + kdarg[0]=0; + kdarg[1]=0; + kdarg[2]=0; + + argc = mach_trap_table[call_number>>4].mach_trap_arg_count; + + if (argc > 3) + argc = 3; + + for (i=0; i < argc; i++) + kdarg[i] = (int)*(args + i); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number>>4)) | DBG_FUNC_START, + kdarg[0], kdarg[1], kdarg[2], 0, 0); + + return call_number; /* pass this back thru */ +} + +/* This routine is called from assembly after each mach system call + */ + +extern unsigned int mach_call_end(unsigned int, unsigned int); + +__private_extern__ +unsigned int +mach_call_end(unsigned int call_number, unsigned int retval) +{ + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number>>4)) | DBG_FUNC_END, + retval, 0, 0, 0, 0); + return retval; /* pass this back thru */ } + diff --git a/bsd/ufs/mfs/mfs_extern.h b/osfmk/i386/commpage/bcopy_scalar.s similarity index 57% rename from bsd/ufs/mfs/mfs_extern.h rename to osfmk/i386/commpage/bcopy_scalar.s index 88fbd483b..81db7da6f 100644 --- a/bsd/ufs/mfs/mfs_extern.h +++ b/osfmk/i386/commpage/bcopy_scalar.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,14 +22,16 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ + /*- - * Copyright (c) 1991, 1993 - * The Regents of the University of California. All rights reserved. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from locore.s. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions - * are met: + * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright @@ -37,8 +39,8 @@ * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. + * This product includes software developed by the University of + * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. @@ -54,39 +56,74 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * @(#)mfs_extern.h 8.2 (Berkeley) 6/16/94 */ -#ifndef __UFS_MFS_MFS_EXTERN_H__ -#define __UFS_MFS_MFS_EXTERN_H__ - + #include +#include +#include +#include -#ifdef __APPLE_API_OBSOLETE -struct buf; -struct mount; -struct nameidata; -struct proc; -struct statfs; -struct ucred; -struct vnode; + /* + * (ov)bcopy (src,dst,cnt) + * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800 + */ + +.text +.align 5, 0x90 +Lbcopy_scalar: + pushl %esi + pushl %edi + movl 12(%esp),%esi + movl 16(%esp),%edi + jmp 1f +/* +** These need to be 32 bytes from Lbcopy_scalar +*/ +.align 5, 0x90 +Lmemcpy_scalar: +Lmemmove_scalar: + pushl %esi + pushl %edi + movl 12(%esp),%edi + movl 16(%esp),%esi + movl %edi,%eax +1: + movl 20(%esp),%ecx + movl %edi,%edx + subl %esi,%edx + cmpl %ecx,%edx /* overlapping? */ + jb 2f + cld /* nope, copy forwards. */ + movl %ecx,%edx + shrl $2,%ecx /* copy by words */ + rep + movsl + movl %edx,%ecx + andl $3,%ecx /* any bytes left? */ + rep + movsb + popl %edi + popl %esi + ret +2: + addl %ecx,%edi /* copy backwards. */ + addl %ecx,%esi + std + movl %ecx,%edx + andl $3,%ecx /* any fractional bytes? */ + decl %edi + decl %esi + rep + movsb + movl %edx,%ecx /* copy remainder by words */ + shrl $2,%ecx + subl $3,%esi + subl $3,%edi + rep + movsl + popl %edi + popl %esi + cld + ret -__BEGIN_DECLS -int mfs_badop __P((void)); -int mfs_bmap __P((struct vop_bmap_args *)); -int mfs_close __P((struct vop_close_args *)); -void mfs_doio __P((struct buf *bp, caddr_t base)); -int mfs_inactive __P((struct vop_inactive_args *)); /* XXX */ -int mfs_reclaim __P((struct vop_reclaim_args *)); -int mfs_init __P((void)); -int mfs_ioctl __P((struct vop_ioctl_args *)); -int mfs_mount __P((struct mount *mp, - char *path, caddr_t data, struct nameidata *ndp, struct proc *p)); -int mfs_open __P((struct vop_open_args *)); -int mfs_print __P((struct vop_print_args *)); /* XXX */ -int mfs_start __P((struct mount *mp, int flags, struct proc *p)); -int mfs_statfs __P((struct mount *mp, struct statfs *sbp, struct proc *p)); -int mfs_strategy __P((struct vop_strategy_args *)); /* XXX */ -__END_DECLS -#endif /* __APPLE_API_OBSOLETE */ -#endif /* __UFS_MFS_MFS_EXTERN_H__ */ + COMMPAGE_DESCRIPTOR(bcopy_scalar,_COMM_PAGE_BCOPY,0,0) diff --git a/osfmk/i386/commpage/bzero_scalar.s b/osfmk/i386/commpage/bzero_scalar.s new file mode 100644 index 000000000..099871eee --- /dev/null +++ b/osfmk/i386/commpage/bzero_scalar.s @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 Winning Strategies, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Winning Strategies, Inc. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software withough specific prior written permission + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include + +/* + * bzero (void *b, size_t len) + * write len zero bytes to the string b. + * + * Written by: + * J.T. Conklin (jtc@wimsey.com), Winning Strategies, Inc. + */ + +.text +.align 5, 0x90 +Lbzero_scalar: + pushl %edi + pushl %ebx + movl 12(%esp),%edi + movl 16(%esp),%ecx + + cld /* set fill direction forward */ + xorl %eax,%eax /* set fill data to 0 */ + + /* + * if the string is too short, it's really not worth the overhead + * of aligning to word boundries, etc. So we jump to a plain + * unaligned set. + */ + cmpl $0x0f,%ecx + jle L1 + + movl %edi,%edx /* compute misalignment */ + negl %edx + andl $3,%edx + movl %ecx,%ebx + subl %edx,%ebx + + movl %edx,%ecx /* zero until word aligned */ + rep + stosb + + movl %ebx,%ecx /* zero by words */ + shrl $2,%ecx + rep + stosl + + movl %ebx,%ecx + andl $3,%ecx /* zero remainder by bytes */ +L1: rep + stosb + + popl %ebx + popl %edi + ret + + COMMPAGE_DESCRIPTOR(bzero_scalar,_COMM_PAGE_BZERO,0,0) diff --git a/osfmk/i386/commpage/cacheflush.s b/osfmk/i386/commpage/cacheflush.s new file mode 100644 index 000000000..5cf689ff3 --- /dev/null +++ b/osfmk/i386/commpage/cacheflush.s @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + + .text + .align 2, 0x90 + +Lsys_flush_dcache: + ret + + COMMPAGE_DESCRIPTOR(sys_flush_dcache,_COMM_PAGE_FLUSH_DCACHE,0,0) + +Lsys_icache_invalidate: + ret + + COMMPAGE_DESCRIPTOR(sys_icache_invalidate,_COMM_PAGE_FLUSH_ICACHE,0,0) diff --git a/osfmk/i386/commpage/commpage.c b/osfmk/i386/commpage/commpage.c index 2c41d6662..209d6e32b 100644 --- a/osfmk/i386/commpage/commpage.c +++ b/osfmk/i386/commpage/commpage.c @@ -23,13 +23,306 @@ * @APPLE_LICENSE_HEADER_END@ */ +/* + * Here's what to do if you want to add a new routine to the comm page: + * + * 1. Add a definition for it's address in osfmk/ppc/cpu_capabilities.h, + * being careful to reserve room for future expansion. + * + * 2. Write one or more versions of the routine, each with it's own + * commpage_descriptor. The tricky part is getting the "special", + * "musthave", and "canthave" fields right, so that exactly one + * version of the routine is selected for every machine. + * The source files should be in osfmk/ppc/commpage/. + * + * 3. Add a ptr to your new commpage_descriptor(s) in the "routines" + * array in commpage_populate(). Of course, you'll also have to + * declare them "extern" in commpage_populate(). + * + * 4. Write the code in Libc to use the new routine. + */ + +#include +#include +#include #include #include +#include +#include +#include + +static uintptr_t next = 0; // next available byte in comm page +static int cur_routine = 0; // comm page address of "current" routine +static int matched; // true if we've found a match for "current" routine + +int _cpu_capabilities = 0; // define the capability vector + +char *commPagePtr = NULL; // virtual address of comm page in kernel map + +/* Allocate the commpage and add to the shared submap created by vm: + * 1. allocate a page in the kernel map (RW) + * 2. wire it down + * 3. make a memory entry out of it + * 4. map that entry into the shared comm region map (R-only) + */ + +static void* +commpage_allocate( void ) +{ + extern vm_map_t com_region_map; // the shared submap, set up in vm init + vm_offset_t kernel_addr; // address of commpage in kernel map + vm_offset_t zero = 0; + vm_size_t size = _COMM_PAGE_AREA_LENGTH; + ipc_port_t handle; + + if (com_region_map == NULL) + panic("commpage map is null"); + + if (vm_allocate(kernel_map,&kernel_addr,_COMM_PAGE_AREA_LENGTH,VM_FLAGS_ANYWHERE)) + panic("cannot allocate commpage"); + + if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+_COMM_PAGE_AREA_LENGTH,VM_PROT_DEFAULT,FALSE)) + panic("cannot wire commpage"); + + if (mach_make_memory_entry( kernel_map, // target map + &size, // size + kernel_addr, // offset (address in kernel map) + VM_PROT_DEFAULT, // map it RW + &handle, // this is the object handle we get + NULL )) // parent_entry (what is this?) + panic("cannot make entry for commpage"); + + if (vm_map_64( com_region_map, // target map (shared submap) + &zero, // address (map into 1st page in submap) + _COMM_PAGE_AREA_LENGTH, // size + 0, // mask + VM_FLAGS_FIXED, // flags (it must be 1st page in submap) + handle, // port is the memory entry we just made + 0, // offset (map 1st page in memory entry) + FALSE, // copy + VM_PROT_READ, // cur_protection (R-only in user map) + VM_PROT_READ, // max_protection + VM_INHERIT_SHARE )) // inheritance + panic("cannot map commpage"); + + ipc_port_release(handle); + + return (void*) kernel_addr; // return address in kernel map +} + +/* Get address (in kernel map) of a commpage field. */ + +static void* +commpage_addr_of( + int addr_at_runtime ) +{ + return (void*) ((uintptr_t)commPagePtr + addr_at_runtime - _COMM_PAGE_BASE_ADDRESS); +} + +/* Determine number of CPUs on this system. We cannot rely on + * machine_info.max_cpus this early in the boot. + */ +static int +commpage_cpus( void ) +{ + int cpus; + + cpus = ml_get_max_cpus(); // NB: this call can block + + if (cpus == 0) + panic("commpage cpus==0"); + if (cpus > 0xFF) + cpus = 0xFF; + + return cpus; +} -int _cpu_capabilities = 0; /* define the capability vector */ +/* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */ -void commpage_populate( void ) { +static void +commpage_init_cpu_capabilities( void ) +{ + int bits; + int cpus; + ml_cpu_info_t cpu_info; - /* no commpage on Intel yet */ + bits = 0; + ml_cpu_get_info(&cpu_info); + + switch (cpu_info.vector_unit) { + case 5: + bits |= kHasPNI; + /* fall thru */ + case 4: + bits |= kHasSSE2; + /* fall thru */ + case 3: + bits |= kHasSSE; + /* fall thru */ + case 2: + bits |= kHasMMX; + default: + break; + } + switch (cpu_info.cache_line_size) { + case 128: + bits |= kCache128; + break; + case 64: + bits |= kCache64; + break; + case 32: + bits |= kCache32; + break; + default: + break; + } + cpus = commpage_cpus(); // how many CPUs do we have + + if (cpus == 1) + bits |= kUP; + + bits |= (cpus << kNumCPUsShift); + + _cpu_capabilities = bits; // set kernel version for use by drivers etc +} + +/* Copy data into commpage. */ + +static void +commpage_stuff( + int address, + void *source, + int length ) +{ + void *dest = commpage_addr_of(address); + + if ((uintptr_t)dest < next) + panic("commpage overlap"); + + bcopy(source,dest,length); + next = ((uintptr_t)dest + length); +} + +/* Copy a routine into comm page if it matches running machine. + */ +static void +commpage_stuff_routine( + commpage_descriptor *rd ) +{ + int must,cant; + + if (rd->commpage_address != cur_routine) { + if ((cur_routine!=0) && (matched==0)) + panic("commpage no match"); + cur_routine = rd->commpage_address; + matched = 0; + } + + must = _cpu_capabilities & rd->musthave; + cant = _cpu_capabilities & rd->canthave; + + if ((must == rd->musthave) && (cant == 0)) { + if (matched) + panic("commpage duplicate matches"); + matched = 1; + + commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length); + } +} + +/* Fill in commpage: called once, during kernel initialization, from the + * startup thread before user-mode code is running. + * See the top of this file for a list of what you have to do to add + * a new routine to the commpage. + */ + +void +commpage_populate( void ) +{ + commpage_descriptor **rd; + short version = _COMM_PAGE_THIS_VERSION; + void *sig_addr; + + extern char commpage_sigs_begin[]; + extern char commpage_sigs_end[]; + + extern commpage_descriptor commpage_mach_absolute_time; + extern commpage_descriptor commpage_spin_lock_try_mp; + extern commpage_descriptor commpage_spin_lock_try_up; + extern commpage_descriptor commpage_spin_lock_mp; + extern commpage_descriptor commpage_spin_lock_up; + extern commpage_descriptor commpage_spin_unlock; + extern commpage_descriptor commpage_pthread_getspecific; + extern commpage_descriptor commpage_gettimeofday; + extern commpage_descriptor commpage_sys_flush_dcache; + extern commpage_descriptor commpage_sys_icache_invalidate; + extern commpage_descriptor commpage_pthread_self; + extern commpage_descriptor commpage_relinquish; + extern commpage_descriptor commpage_bzero_scalar; + extern commpage_descriptor commpage_bcopy_scalar; + + static commpage_descriptor *routines[] = { + &commpage_mach_absolute_time, + &commpage_spin_lock_try_mp, + &commpage_spin_lock_try_up, + &commpage_spin_lock_mp, + &commpage_spin_lock_up, + &commpage_spin_unlock, + &commpage_pthread_getspecific, + &commpage_gettimeofday, + &commpage_sys_flush_dcache, + &commpage_sys_icache_invalidate, + &commpage_pthread_self, + &commpage_relinquish, + &commpage_bzero_scalar, + &commpage_bcopy_scalar, + NULL + }; + + commPagePtr = (char *)commpage_allocate(); + + commpage_init_cpu_capabilities(); + + /* Stuff in the constants. We move things into the comm page in strictly + * ascending order, so we can check for overlap and panic if so. + */ + + commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short)); + commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities, + sizeof(int)); + + for( rd = routines; *rd != NULL ; rd++ ) + commpage_stuff_routine(*rd); + + if (!matched) + panic("commpage no match on last routine"); + + if (next > ((uintptr_t)commPagePtr + PAGE_SIZE)) + panic("commpage overflow"); + +#define STUFF_SIG(addr, func) \ + extern char commpage_sig_ ## func []; \ + sig_addr = (void *)( (uintptr_t)_COMM_PAGE_BASE_ADDRESS + \ + (uintptr_t)_COMM_PAGE_SIGS_OFFSET + 0x1000 + \ + (uintptr_t)&commpage_sig_ ## func - \ + (uintptr_t)&commpage_sigs_begin ); \ + commpage_stuff(addr + _COMM_PAGE_SIGS_OFFSET, &sig_addr, sizeof(void *)); + + STUFF_SIG(_COMM_PAGE_ABSOLUTE_TIME, mach_absolute_time); + STUFF_SIG(_COMM_PAGE_SPINLOCK_TRY, spin_lock_try); + STUFF_SIG(_COMM_PAGE_SPINLOCK_LOCK, spin_lock); + STUFF_SIG(_COMM_PAGE_SPINLOCK_UNLOCK, spin_unlock); + STUFF_SIG(_COMM_PAGE_PTHREAD_GETSPECIFIC, pthread_getspecific); + STUFF_SIG(_COMM_PAGE_GETTIMEOFDAY, gettimeofday); + STUFF_SIG(_COMM_PAGE_FLUSH_DCACHE, sys_dcache_flush); + STUFF_SIG(_COMM_PAGE_FLUSH_ICACHE, sys_icache_invalidate); + STUFF_SIG(_COMM_PAGE_PTHREAD_SELF, pthread_self); + STUFF_SIG(_COMM_PAGE_BZERO, bzero); + STUFF_SIG(_COMM_PAGE_BCOPY, bcopy); + STUFF_SIG(_COMM_PAGE_MEMCPY, memmove); + + commpage_stuff(_COMM_PAGE_BASE_ADDRESS + _COMM_PAGE_SIGS_OFFSET + 0x1000, &commpage_sigs_begin, + (uintptr_t)&commpage_sigs_end - (uintptr_t)&commpage_sigs_begin); } diff --git a/osfmk/i386/commpage/commpage.h b/osfmk/i386/commpage/commpage.h index c16d89942..a7c8d80c4 100644 --- a/osfmk/i386/commpage/commpage.h +++ b/osfmk/i386/commpage/commpage.h @@ -26,6 +26,46 @@ #ifndef _I386_COMMPAGE_H #define _I386_COMMPAGE_H -/* we don't have a comm page on Intel, yet */ +#ifndef __ASSEMBLER__ +#include +#endif /* __ASSEMBLER__ */ + +#ifdef __ASSEMBLER__ +#include + +#define COMMPAGE_DESCRIPTOR(label,address,must,cant) \ +L ## label ## _end: ;\ +.const_data ;\ +L ## label ## _size = L ## label ## _end - L ## label ;\ +.private_extern _commpage_ ## label ;\ +_commpage_ ## label ## : ;\ + .long L ## label ;\ + .long L ## label ## _size ;\ + .long address ;\ + .long must ;\ + .long cant ;\ +.text + +#else /* __ASSEMBLER__ */ + +/* Each potential commpage routine is described by one of these. + * Note that the COMMPAGE_DESCRIPTOR macro (above), used in + * assembly language, must agree with this. + */ + +typedef struct commpage_descriptor { + void *code_address; // address of code + long code_length; // length in bytes + long commpage_address; // put at this address (_COMM_PAGE_BCOPY etc) + long musthave; // _cpu_capability bits we must have + long canthave; // _cpu_capability bits we can't have +} commpage_descriptor; + + +extern char *commPagePtr; // virt address of commpage in kernel map + +extern void commpage_set_timestamp(uint64_t tbr,uint32_t secs,uint32_t usecs,uint32_t ticks_per_sec); + +#endif /* __ASSEMBLER__ */ #endif /* _I386_COMMPAGE_H */ diff --git a/osfmk/ppc/POWERMAC/mp/mp.c b/osfmk/i386/commpage/commpage_gettimeofday.s similarity index 76% rename from osfmk/ppc/POWERMAC/mp/mp.c rename to osfmk/i386/commpage/commpage_gettimeofday.s index 097d419b5..08271a7b2 100644 --- a/osfmk/ppc/POWERMAC/mp/mp.c +++ b/osfmk/i386/commpage/commpage_gettimeofday.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,14 +22,16 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* - * @OSF_FREE_COPYRIGHT@ - */ -/* - * @APPLE_FREE_COPYRIGHT@ - */ -#include -#include -MPPlugInSpec MPspec; /* An area for the MP interfaces */ -MPEntryPts MPEntries; /* Real addresses of plugin routines */ +#include +#include +#include + + .text + .align 2, 0x90 + +Lgettimeofday: + int $0x3 + ret + + COMMPAGE_DESCRIPTOR(gettimeofday,_COMM_PAGE_GETTIMEOFDAY,0,0) diff --git a/osfmk/i386/commpage/commpage_mach_absolute_time.s b/osfmk/i386/commpage/commpage_mach_absolute_time.s new file mode 100644 index 000000000..85969ce93 --- /dev/null +++ b/osfmk/i386/commpage/commpage_mach_absolute_time.s @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + + .text + .align 2, 0x90 + +Lmach_absolute_time: + int $0x3 + ret + + COMMPAGE_DESCRIPTOR(mach_absolute_time,_COMM_PAGE_ABSOLUTE_TIME,0,0) diff --git a/osfmk/i386/commpage/commpage_sigs.h b/osfmk/i386/commpage/commpage_sigs.h new file mode 100644 index 000000000..0251ee075 --- /dev/null +++ b/osfmk/i386/commpage/commpage_sigs.h @@ -0,0 +1,57 @@ +#define BSWAP_32(x) \ + ((x & 0x000000ff) << 24) | \ + ((x & 0x0000ff00) << 8) | \ + ((x & 0x00ff0000) >> 8) | \ + ((x & 0xff000000) >> 24) + +#define COMMPAGE_SIGS_BEGIN \ +.const_data ; \ +.align 2 ; \ +.private_extern _commpage_sigs_begin ; \ +_commpage_sigs_begin: + +#define COMMPAGE_SIGS_DONE \ +.private_extern _commpage_sigs_end ; \ +_commpage_sigs_end: ; \ + +#define COMMPAGE_SIG_START(x) \ +.private_extern _commpage_sig ## x ; \ +_commpage_sig ## x ## : ; \ + .long BSWAP_32(0x14400000) ; \ + .long BSWAP_32(0x00000001) ; \ + .asciz # x ; \ + .align 2 ; \ + .long BSWAP_32(0x14400000) + +#define COMMPAGE_SIG_END(x) \ + .long BSWAP_32(0x4e800020) ; \ + .long BSWAP_32(0x14400000) ; \ + .long BSWAP_32(0x00000000) ; \ + .asciz # x ; \ + .align 2 ; \ + .long BSWAP_32(0x14400000) + +#define ARG(n) \ + ((((n * 2) + 6) << 20) + 4) + +#define COMMPAGE_SIG_ARG(n) \ + .long BSWAP_32(0x14400001) ; \ + .long BSWAP_32(ARG(n)) ; \ + .long BSWAP_32(0x14400001) + +#define COMMPAGE_SIG_CALL(x, n) \ + .long BSWAP_32(0x14400002) ; \ + .long BSWAP_32(n) ; \ + .long BSWAP_32(0x00000000) ; \ + .asciz # x ; \ + .align 2 ; \ + .long BSWAP_32(0x14400002) + +#define COMMPAGE_SIG_CALL_VOID(x) \ + COMMPAGE_SIG_CALL(x, 0) + +#define COMMPAGE_SIG_CALL_RET0(x) \ + COMMPAGE_SIG_CALL(x, ARG(0)) + +#define COMMPAGE_SIG_CALL_RET1(x) \ + COMMPAGE_SIG_CALL(x, ARG(1)) diff --git a/osfmk/i386/commpage/commpage_sigs.s b/osfmk/i386/commpage/commpage_sigs.s new file mode 100644 index 000000000..573c5f7e6 --- /dev/null +++ b/osfmk/i386/commpage/commpage_sigs.s @@ -0,0 +1,69 @@ +#include "commpage_sigs.h" + +COMMPAGE_SIGS_BEGIN + +COMMPAGE_SIG_START(_mach_absolute_time) +COMMPAGE_SIG_CALL_RET0(_mach_absolute_time_high) +COMMPAGE_SIG_CALL_RET1(_mach_absolute_time_low) +COMMPAGE_SIG_END(_mach_absolute_time) + +COMMPAGE_SIG_START(_spin_lock_try) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_CALL_RET0(_spin_lock_try_wrapper) +COMMPAGE_SIG_END(_spin_lock_try) + +COMMPAGE_SIG_START(_spin_lock) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_CALL_VOID(_spin_lock) +COMMPAGE_SIG_END(_spin_lock) + +COMMPAGE_SIG_START(_spin_unlock) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_CALL_VOID(_spin_unlock) +COMMPAGE_SIG_END(_spin_unlock) + +COMMPAGE_SIG_START(_pthread_getspecific) +COMMPAGE_SIG_END(_pthread_getspecific) + +COMMPAGE_SIG_START(_gettimeofday) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_CALL_RET0(_gettimeofday_wrapper) +COMMPAGE_SIG_END(_gettimeofday) + +COMMPAGE_SIG_START(_sys_dcache_flush) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_ARG(1) +COMMPAGE_SIG_CALL_VOID(_sys_dcache_flush) +COMMPAGE_SIG_END(_sys_dcache_flush) + +COMMPAGE_SIG_START(_sys_icache_invalidate) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_ARG(1) +COMMPAGE_SIG_CALL_VOID(_sys_icache_invalidate_wrapper) +COMMPAGE_SIG_END(_sys_icache_invalidate) + +COMMPAGE_SIG_START(_pthread_self) +COMMPAGE_SIG_END(_pthread_self) + +COMMPAGE_SIG_START(_bzero) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_ARG(1) +COMMPAGE_SIG_CALL_VOID(_bzero) +COMMPAGE_SIG_END(_bzero) + +COMMPAGE_SIG_START(_bcopy) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_ARG(1) +COMMPAGE_SIG_ARG(2) +COMMPAGE_SIG_CALL_VOID(_bcopy) +COMMPAGE_SIG_END(_bcopy) + +COMMPAGE_SIG_START(_memmove) +COMMPAGE_SIG_ARG(0) +COMMPAGE_SIG_ARG(1) +COMMPAGE_SIG_ARG(2) +COMMPAGE_SIG_CALL_VOID(_memmove) +COMMPAGE_SIG_END(_memmove) + +COMMPAGE_SIGS_DONE + diff --git a/osfmk/i386/commpage/pthreads.s b/osfmk/i386/commpage/pthreads.s new file mode 100644 index 000000000..bec8c6dd4 --- /dev/null +++ b/osfmk/i386/commpage/pthreads.s @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +#define _PTHREAD_TSD_OFFSET 0x48 + + .text + .align 2, 0x90 + +Lpthread_getspecific: + movl 4(%esp), %eax + movl %gs:_PTHREAD_TSD_OFFSET(,%eax,4), %eax + ret + + COMMPAGE_DESCRIPTOR(pthread_getspecific,_COMM_PAGE_PTHREAD_GETSPECIFIC,0,0) + +Lpthread_self: + movl 4(%esp), %eax + movl %gs:_PTHREAD_TSD_OFFSET, %eax + ret + + COMMPAGE_DESCRIPTOR(pthread_self,_COMM_PAGE_PTHREAD_SELF,0,0) diff --git a/osfmk/i386/commpage/spinlocks.s b/osfmk/i386/commpage/spinlocks.s new file mode 100644 index 000000000..6e3b0e64b --- /dev/null +++ b/osfmk/i386/commpage/spinlocks.s @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +/* + * We need a relative branch within the comm page, and don't want the linker + * to relocate it, so we have to hand-code the instructions. LEN is to account + * for the length of a .long, since the jmp is relative to the next instruction. + */ + +#define JNZ .byte 0x0f, 0x85; .long +#define JMP .byte 0xe9; .long +#define LEN 4 + +/* + * Branch prediction prefixes + */ + +#define LIKELY .byte 0x3e +#define UNLIKELY .byte 0x2e + +#define MP_SPIN_TRIES 1024 + + .text + .align 4, 0x90 + +Lspin_lock_try_up: + movl 4(%esp), %ecx + xorl %eax, %eax + cmpxchgl %ecx, (%ecx) + setz %dl + movzbl %dl, %eax + ret + + COMMPAGE_DESCRIPTOR(spin_lock_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,0) + + .align 4, 0x90 +Lspin_lock_try_mp: + movl 4(%esp), %ecx + xorl %eax, %eax + lock + cmpxchgl %ecx, (%ecx) + setz %dl + movzbl %dl, %eax + ret + + COMMPAGE_DESCRIPTOR(spin_lock_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,kUP) + +.set Lrelinquish_off, _COMM_PAGE_RELINQUISH - _COMM_PAGE_SPINLOCK_LOCK + + .align 4, 0x90 +Lspin_lock_up: + movl 4(%esp), %ecx + xorl %eax, %eax +.set Lretry, . - Lspin_lock_up + cmpxchgl %ecx, (%ecx) + UNLIKELY + JNZ Lrelinquish_off - . + Lspin_lock_up - LEN + ret + + COMMPAGE_DESCRIPTOR(spin_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,0) + + .align 4, 0x90 +Lspin_lock_mp: + movl 4(%esp), %ecx + xorl %eax, %eax +0: + lock + cmpxchgl %ecx, (%ecx) + UNLIKELY + jnz 1f + ret +1: + xorl %eax, %eax + movl $(MP_SPIN_TRIES), %edx +2: + pause + cmpl %eax, (%ecx) + LIKELY + jz 0b + decl %edx + LIKELY + jnz 2b + JMP Lrelinquish_off - . + Lspin_lock_mp - LEN + + COMMPAGE_DESCRIPTOR(spin_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,kUP) + + .align 4, 0x90 +Lspin_unlock: + movl 4(%esp), %ecx + movl $0, (%ecx) + ret + + COMMPAGE_DESCRIPTOR(spin_unlock,_COMM_PAGE_SPINLOCK_UNLOCK,0,0) + + .align 4, 0x90 +Lrelinquish: /* relinquish the processor */ + pushl $1 /* 1 ms */ + pushl $1 /* SWITCH_OPTION_DEPRESS */ + pushl $0 /* THREAD_NULL */ + movl $-61, %eax /* syscall_thread_switch */ + lcall $7, $0 + popl %eax /* set %eax to 0 again */ + popl %edx /* use %edx as scratch */ + popl %edx /* reg to fixup stack */ + JMP Lretry - Lrelinquish_off - . + Lrelinquish - LEN + + COMMPAGE_DESCRIPTOR(relinquish,_COMM_PAGE_RELINQUISH,0,0) diff --git a/osfmk/i386/cpu.c b/osfmk/i386/cpu.c index dea338a3c..3ef513a27 100644 --- a/osfmk/i386/cpu.c +++ b/osfmk/i386/cpu.c @@ -31,10 +31,16 @@ #include #include #include +#include #include #include +#include +#include +#include cpu_data_t cpu_data[NCPUS]; +int real_ncpus = 0; +int wncpu = NCPUS; /*ARGSUSED*/ kern_return_t @@ -74,3 +80,103 @@ cpu_sleep() { printf("cpu_sleep not implemented\n"); } + +void +cpu_init() +{ + int my_cpu = get_cpu_number(); + + machine_slot[my_cpu].is_cpu = TRUE; + machine_slot[my_cpu].running = TRUE; +#ifdef MACH_BSD + /* FIXME */ + machine_slot[my_cpu].cpu_type = CPU_TYPE_I386; + machine_slot[my_cpu].cpu_subtype = CPU_SUBTYPE_PENTPRO; +#else + machine_slot[my_cpu].cpu_type = cpuid_cputype(0); + machine_slot[my_cpu].cpu_subtype = CPU_SUBTYPE_AT386; +#endif + +#if NCPUS > 1 + mp_desc_init(my_cpu); +#endif /* NCPUS */ +} + +kern_return_t +cpu_register( + int *target_cpu) +{ + int cpu; + + if (real_ncpus == 0) { + /* + * Special case for the boot processor, + * it has been pre-registered by cpu_init(); + */ + *target_cpu = 0; + real_ncpus++; + return KERN_SUCCESS; + } + + /* + * TODO: + * - Run cpu_register() in exclusion mode + */ + + *target_cpu = -1; + for(cpu=0; cpu < wncpu; cpu++) { + if(!machine_slot[cpu].is_cpu) { + machine_slot[cpu].is_cpu = TRUE; +#ifdef MACH_BSD + /* FIXME */ + machine_slot[cpu].cpu_type = CPU_TYPE_I386; + machine_slot[cpu].cpu_subtype = CPU_SUBTYPE_PENTPRO; +#else + machine_slot[cpu].cpu_type = cpuid_cputype(0); + machine_slot[cpu].cpu_subtype = CPU_SUBTYPE_AT386; +#endif + *target_cpu = cpu; + break; + } + } + + if (*target_cpu != -1) { + real_ncpus++; + return KERN_SUCCESS; + } else + return KERN_FAILURE; +} + +kern_return_t +cpu_start( + int cpu) +{ + kern_return_t ret; + + if (cpu == cpu_number()) { + PE_cpu_machine_init(cpu_data[cpu].cpu_id, TRUE); + ml_init_interrupt(); + cpu_data[cpu].cpu_status = 1; + return KERN_SUCCESS; + } else { + /* + * Should call out through PE. + * But take the shortcut here. + */ + ret = intel_startCPU(cpu); + return(ret); + } +} + +void +cpu_machine_init( + void) +{ + int cpu; + + cpu = get_cpu_number(); + PE_cpu_machine_init(cpu_data[cpu].cpu_id, TRUE); + ml_init_interrupt(); + cpu_data[cpu].cpu_status = 1; +} + diff --git a/osfmk/i386/cpu_capabilities.h b/osfmk/i386/cpu_capabilities.h index bd0297633..cf5eaa44e 100644 --- a/osfmk/i386/cpu_capabilities.h +++ b/osfmk/i386/cpu_capabilities.h @@ -34,26 +34,144 @@ #define __APPLE_API_PRIVATE #endif /* __APPLE_API_PRIVATE */ #endif /* _APPLE_API_PRIVATE */ - + #ifndef __APPLE_API_PRIVATE #error cpu_capabilities.h is for Apple Internal use only #else /* __APPLE_API_PRIVATE */ -/* _cpu_capabilities - * +/* * This is the authoritative way to determine from user mode what * implementation-specific processor features are available. * This API only supported for Apple internal use. * */ +/* Bit definitions for _cpu_capabilities: */ + +#define kHasMMX 0x00000001 +#define kHasSSE 0x00000002 +#define kHasSSE2 0x00000004 +#define kHasPNI 0x00000008 // Prescott New Instructions +#define kCache32 0x00000010 // cache line size is 32 bytes +#define kCache64 0x00000020 +#define kCache128 0x00000040 + +#define kUP 0x00008000 // set if (kNumCPUs == 1) +#define kNumCPUs 0x00FF0000 // number of CPUs (see _NumCPUs() below) + +#define kNumCPUsShift 16 // see _NumCPUs() below + #ifndef __ASSEMBLER__ -extern int _cpu_capabilities; +extern uint32_t _get_cpu_capabilities( void ); + +inline static +int _NumCPUs( void ) +{ + return (_get_cpu_capabilities() & kNumCPUs) >> kNumCPUsShift; +} + +#endif /* __ASSEMBLER__ */ + + +/* + * The shared kernel/user "comm page(s)": + * + * The last eight pages of every address space are reserved for the kernel/user + * "comm area". During system initialization, the kernel populates the comm page with + * code customized for the particular processor and platform. + * + * Because Mach VM cannot map the last page of an address space, the max length of + * the comm area is seven pages. + */ + +#define _COMM_PAGE_BASE_ADDRESS 0xBFFF9000 // VM_MAX_ADDRESS - 7 * 4096 +#define _COMM_PAGE_SIGS_OFFSET 0x4000 // offset to routine signatures +#define _COMM_PAGE_AREA_LENGTH ( 7*4096) // reserved length of entire comm area + +/* data in the comm page */ -#endif +#define _COMM_PAGE_SIGNATURE (_COMM_PAGE_BASE_ADDRESS+0x000) // first few bytes are a signature +#define _COMM_PAGE_VERSION (_COMM_PAGE_BASE_ADDRESS+0x01E) // 16-bit version# +#define _COMM_PAGE_THIS_VERSION 1 // this is version 1 of the commarea format + +#define _COMM_PAGE_CPU_CAPABILITIES (_COMM_PAGE_BASE_ADDRESS+0x020) // uint32_t _cpu_capabilities +#define _COMM_PAGE_NCPUS (_COMM_PAGE_BASE_ADDRESS+0x021) // uint8_t number of configured CPUs +#define _COMM_PAGE_VECTOR_FLAVOR (_COMM_PAGE_BASE_ADDRESS+0x024) // uint8_t SSE/SSE2/PNI +#define _COMM_PAGE_CACHE_LINESIZE (_COMM_PAGE_BASE_ADDRESS+0x026) // uint16_t cache line size + +#define _COMM_PAGE_UNUSED1 (_COMM_PAGE_BASE_ADDRESS+0x030) // 16 unused bytes + +#define _COMM_PAGE_2_TO_52 (_COMM_PAGE_BASE_ADDRESS+0x040) // double float constant 2**52 +#define _COMM_PAGE_10_TO_6 (_COMM_PAGE_BASE_ADDRESS+0x048) // double float constant 10**6 + +#define _COMM_PAGE_UNUSED2 (_COMM_PAGE_BASE_ADDRESS+0x050) // 16 unused bytes + +#define _COMM_PAGE_TIMEBASE (_COMM_PAGE_BASE_ADDRESS+0x060) // used by gettimeofday() +#define _COMM_PAGE_TIMESTAMP (_COMM_PAGE_BASE_ADDRESS+0x068) // used by gettimeofday() +#define _COMM_PAGE_SEC_PER_TICK (_COMM_PAGE_BASE_ADDRESS+0x070) // used by gettimeofday() + +#define _COMM_PAGE_UNUSED3 (_COMM_PAGE_BASE_ADDRESS+0x080) // 384 unused bytes + + /* jump table (bla to this address, which may be a branch to the actual code somewhere else) */ + /* When new jump table entries are added, corresponding symbols should be added below */ + +#define _COMM_PAGE_ABSOLUTE_TIME (_COMM_PAGE_BASE_ADDRESS+0x200) // mach_absolute_time() +#define _COMM_PAGE_SPINLOCK_TRY (_COMM_PAGE_BASE_ADDRESS+0x220) // spinlock_try() +#define _COMM_PAGE_SPINLOCK_LOCK (_COMM_PAGE_BASE_ADDRESS+0x260) // spinlock_lock() +#define _COMM_PAGE_SPINLOCK_UNLOCK (_COMM_PAGE_BASE_ADDRESS+0x2a0) // spinlock_unlock() +#define _COMM_PAGE_PTHREAD_GETSPECIFIC (_COMM_PAGE_BASE_ADDRESS+0x2c0) // pthread_getspecific() +#define _COMM_PAGE_GETTIMEOFDAY (_COMM_PAGE_BASE_ADDRESS+0x2e0) // used by gettimeofday() +#define _COMM_PAGE_FLUSH_DCACHE (_COMM_PAGE_BASE_ADDRESS+0x4e0) // sys_dcache_flush() +#define _COMM_PAGE_FLUSH_ICACHE (_COMM_PAGE_BASE_ADDRESS+0x520) // sys_icache_invalidate() +#define _COMM_PAGE_PTHREAD_SELF (_COMM_PAGE_BASE_ADDRESS+0x580) // pthread_self() +#define _COMM_PAGE_UNUSED4 (_COMM_PAGE_BASE_ADDRESS+0x5a0) // 32 unused bytes +#define _COMM_PAGE_RELINQUISH (_COMM_PAGE_BASE_ADDRESS+0x5c0) // used by spinlocks + +#define _COMM_PAGE_UNUSED5 (_COMM_PAGE_BASE_ADDRESS+0x5e0) // 32 unused bytes + +#define _COMM_PAGE_BZERO (_COMM_PAGE_BASE_ADDRESS+0x600) // bzero() +#define _COMM_PAGE_BCOPY (_COMM_PAGE_BASE_ADDRESS+0x780) // bcopy() +#define _COMM_PAGE_MEMCPY (_COMM_PAGE_BASE_ADDRESS+0x7a0) // memcpy() +#define _COMM_PAGE_MEMMOVE (_COMM_PAGE_BASE_ADDRESS+0x7a0) // memmove() -/* Bit definitions for _cpu_capabilities: */ +#define _COMM_PAGE_UNUSED6 (_COMM_PAGE_BASE_ADDRESS+0xF80) // 128 unused bytes + +#define _COMM_PAGE_BIGCOPY (_COMM_PAGE_BASE_ADDRESS+0x1000)// very-long-operand copies + +#define _COMM_PAGE_END (_COMM_PAGE_BASE_ADDRESS+0x1600)// end of common page + +#ifdef __ASSEMBLER__ +#ifdef __COMM_PAGE_SYMBOLS + +#define CREATE_COMM_PAGE_SYMBOL(symbol_name, symbol_address) \ + .org (symbol_address - (_COMM_PAGE_BASE_ADDRESS & 0xFFFFE000)) ;\ +symbol_name: nop + + .text // Required to make a well behaved symbol file + + CREATE_COMM_PAGE_SYMBOL(___mach_absolute_time, _COMM_PAGE_ABSOLUTE_TIME) + CREATE_COMM_PAGE_SYMBOL(___spin_lock_try, _COMM_PAGE_SPINLOCK_TRY) + CREATE_COMM_PAGE_SYMBOL(___spin_lock, _COMM_PAGE_SPINLOCK_LOCK) + CREATE_COMM_PAGE_SYMBOL(___spin_unlock, _COMM_PAGE_SPINLOCK_UNLOCK) + CREATE_COMM_PAGE_SYMBOL(___pthread_getspecific, _COMM_PAGE_PTHREAD_GETSPECIFIC) + CREATE_COMM_PAGE_SYMBOL(___gettimeofday, _COMM_PAGE_GETTIMEOFDAY) + CREATE_COMM_PAGE_SYMBOL(___sys_dcache_flush, _COMM_PAGE_FLUSH_DCACHE) + CREATE_COMM_PAGE_SYMBOL(___sys_icache_invalidate, _COMM_PAGE_FLUSH_ICACHE) + CREATE_COMM_PAGE_SYMBOL(___pthread_self, _COMM_PAGE_PTHREAD_SELF) + CREATE_COMM_PAGE_SYMBOL(___spin_lock_relinquish, _COMM_PAGE_RELINQUISH) + CREATE_COMM_PAGE_SYMBOL(___bzero, _COMM_PAGE_BZERO) + CREATE_COMM_PAGE_SYMBOL(___bcopy, _COMM_PAGE_BCOPY) + CREATE_COMM_PAGE_SYMBOL(___memcpy, _COMM_PAGE_MEMCPY) +// CREATE_COMM_PAGE_SYMBOL(___memmove, _COMM_PAGE_MEMMOVE) + CREATE_COMM_PAGE_SYMBOL(___bigcopy, _COMM_PAGE_BIGCOPY) + CREATE_COMM_PAGE_SYMBOL(___end_comm_page, _COMM_PAGE_END) + + .data // Required to make a well behaved symbol file + .long 0 // Required to make a well behaved symbol file + +#endif /* __COMM_PAGE_SYMBOLS */ +#endif /* __ASSEMBLER__ */ #endif /* __APPLE_API_PRIVATE */ #endif /* _I386_CPU_CAPABILITIES_H */ diff --git a/osfmk/i386/cpu_data.h b/osfmk/i386/cpu_data.h index e4ba6fd33..878789bb2 100644 --- a/osfmk/i386/cpu_data.h +++ b/osfmk/i386/cpu_data.h @@ -37,103 +37,99 @@ #include #include +#include -#if 0 -#ifndef __OPTIMIZE__ -#define extern static -#endif -#endif +typedef struct +{ + thread_act_t *active_thread; + int preemption_level; + int simple_lock_count; + int interrupt_level; + int cpu_number; /* Logical CPU number */ + int cpu_phys_number; /* Physical CPU Number */ + cpu_id_t cpu_id; /* Platform Expert handle */ + int cpu_status; /* Boot Status */ + int cpu_signals; /* IPI events */ + int mcount_off; /* mcount recursion flag */ +} cpu_data_t; extern cpu_data_t cpu_data[NCPUS]; -#define get_cpu_data() &cpu_data[cpu_number()] +/* Macro to generate inline bodies to retrieve per-cpu data fields. */ +#define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#define CPU_DATA_GET(field,type) \ + type ret; \ + __asm__ volatile ("movl %%gs:%P1,%0" \ + : "=r" (ret) \ + : "i" (offsetof(cpu_data_t,field))); \ + return ret; /* * Everyone within the osfmk part of the kernel can use the fast * inline versions of these routines. Everyone outside, must call * the real thing, */ -extern thread_t __inline__ current_thread_fast(void); -extern thread_t __inline__ current_thread_fast(void) +extern thread_act_t __inline__ get_active_thread(void) { - register thread_t ct; - register int idx = (int)&((cpu_data_t *)0)->active_thread; - - __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (ct) : "r" (idx)); - - return (ct); + CPU_DATA_GET(active_thread,thread_act_t) } +#define current_act_fast() get_active_thread() +#define current_act() current_act_fast() +#define current_thread() current_act_fast()->thread -#define current_thread() current_thread_fast() - -extern int __inline__ get_preemption_level(void); -extern void __inline__ disable_preemption(void); -extern void __inline__ enable_preemption(void); -extern void __inline__ enable_preemption_no_check(void); -extern void __inline__ mp_disable_preemption(void); -extern void __inline__ mp_enable_preemption(void); -extern void __inline__ mp_enable_preemption_no_check(void); -extern int __inline__ get_simple_lock_count(void); -extern int __inline__ get_interrupt_level(void); - -extern int __inline__ get_preemption_level(void) +extern int __inline__ get_preemption_level(void) { - register int idx = (int)&((cpu_data_t *)0)->preemption_level; - register int pl; - - __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx)); - - return (pl); + CPU_DATA_GET(preemption_level,int) +} +extern int __inline__ get_simple_lock_count(void) +{ + CPU_DATA_GET(simple_lock_count,int) +} +extern int __inline__ get_interrupt_level(void) +{ + CPU_DATA_GET(interrupt_level,int) +} +extern int __inline__ get_cpu_number(void) +{ + CPU_DATA_GET(cpu_number,int) +} +extern int __inline__ get_cpu_phys_number(void) +{ + CPU_DATA_GET(cpu_phys_number,int) } extern void __inline__ disable_preemption(void) { -#if MACH_ASSERT - extern void _disable_preemption(void); - - _disable_preemption(); -#else /* MACH_ASSERT */ register int idx = (int)&((cpu_data_t *)0)->preemption_level; __asm__ volatile (" incl %%gs:(%0)" : : "r" (idx)); -#endif /* MACH_ASSERT */ } extern void __inline__ enable_preemption(void) { -#if MACH_ASSERT - extern void _enable_preemption(void); - - assert(get_preemption_level() > 0); - _enable_preemption(); -#else /* MACH_ASSERT */ extern void kernel_preempt_check (void); register int idx = (int)&((cpu_data_t *)0)->preemption_level; register void (*kpc)(void)= kernel_preempt_check; + assert(get_preemption_level() > 0); + __asm__ volatile ("decl %%gs:(%0); jne 1f; \ call %1; 1:" : /* no outputs */ : "r" (idx), "r" (kpc) : "%eax", "%ecx", "%edx", "cc", "memory"); -#endif /* MACH_ASSERT */ } extern void __inline__ enable_preemption_no_check(void) { -#if MACH_ASSERT - extern void _enable_preemption_no_check(void); + register int idx = (int)&((cpu_data_t *)0)->preemption_level; assert(get_preemption_level() > 0); - _enable_preemption_no_check(); -#else /* MACH_ASSERT */ - register int idx = (int)&((cpu_data_t *)0)->preemption_level; __asm__ volatile ("decl %%gs:(%0)" : /* no outputs */ : "r" (idx) : "cc", "memory"); -#endif /* MACH_ASSERT */ } extern void __inline__ mp_disable_preemption(void) @@ -157,26 +153,6 @@ extern void __inline__ mp_enable_preemption_no_check(void) #endif /* NCPUS > 1 */ } -extern int __inline__ get_simple_lock_count(void) -{ - register int idx = (int)&((cpu_data_t *)0)->simple_lock_count; - register int pl; - - __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx)); - - return (pl); -} - -extern int __inline__ get_interrupt_level(void) -{ - register int idx = (int)&((cpu_data_t *)0)->interrupt_level; - register int pl; - - __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx)); - - return (pl); -} - #if 0 #ifndef __OPTIMIZE__ #undef extern diff --git a/osfmk/i386/cpu_number.h b/osfmk/i386/cpu_number.h index b1da54868..97b7c0d3d 100644 --- a/osfmk/i386/cpu_number.h +++ b/osfmk/i386/cpu_number.h @@ -61,69 +61,17 @@ #ifndef _I386_CPU_NUMBER_H_ #define _I386_CPU_NUMBER_H_ -#include - -#ifdef __APPLE_API_UNSTABLE -extern int cpu_number(void); - -#ifdef MACH_KERNEL_PRIVATE - -#include -#include - -#include - #if MP_V1_1 -#include -#include - -extern int lapic_id; - -extern __inline__ int cpu_number(void) -{ - register int cpu; - __asm__ volatile ("movl " CC_SYM_PREFIX "lapic_id, %0\n" - " movl 0(%0), %0\n" - " shrl %1, %0\n" - " andl %2, %0" - : "=r" (cpu) - : "i" (LAPIC_ID_SHIFT), "i" (LAPIC_ID_MASK)); +/* Get the cpu number directly from the pre-processor data area */ +#include +#define cpu_number() get_cpu_number() - return(cpu); -} #else /* MP_V1_1 */ -/* - * At least one corollary cpu type does not have local memory at all. - * The only way I found to store the cpu number was in some 386/486 - * system register. cr3 has bits 0, 1, 2 and 5, 6, 7, 8, 9, 10, 11 - * available. Right now we use 0, 1 and 2. So we are limited to 8 cpus. - * For more cpus, we could use bits 5 - 11 with a shift. - * - * Even for other machines, like COMPAQ this is much faster the inb/outb - * 4 cycles instead of 10 to 30. - */ -#if defined(__GNUC__) -#if NCPUS > 8 -#error cpu_number() definition only works for #cpus <= 8 -#else - -extern __inline__ int cpu_number(void) -{ - register int cpu; - __asm__ volatile ("movl %%cr3, %0\n" - " andl $0x7, %0" - : "=r" (cpu)); - return(cpu); -} -#endif -#endif /* defined(__GNUC__) */ +/* Use a function to do this less directly. */ +extern int cpu_number(void); #endif /* MP_V1_1 */ -#endif /* MACH_KERNEL_PRIVATE */ - -#endif /* __APPLE_API_UNSTABLE */ - #endif /* _I386_CPU_NUMBER_H_ */ diff --git a/osfmk/i386/cpuid.c b/osfmk/i386/cpuid.c index 8ad0060bc..395ed4b6f 100644 --- a/osfmk/i386/cpuid.c +++ b/osfmk/i386/cpuid.c @@ -26,387 +26,431 @@ * @OSF_COPYRIGHT@ */ -/* - * Values from http://einstein.et.tudelft.nl/~offerman/chiplist.html - * (dated 18 Oct 1995) - */ +#include "cpuid.h" -#include -#include +#define min(a,b) ((a) < (b) ? (a) : (b)) /* - * Generic product array (before CPUID) + * CPU identification routines. + * + * Note that this code assumes a processor that supports the + * 'cpuid' instruction. */ -unsigned int cpuid_i386_freq[] = { 12, 16, 20, 25, 33, 0 }; -unsigned int cpuid_i486_freq[] = { 20, 25, 33, 50, 0 }; - -struct cpuid_product cpuid_generic[] = { - { - 0, CPUID_FAMILY_386, 0, - 80, cpuid_i386_freq, "i386" - }, - { - 0, CPUID_FAMILY_486, 0, - 240, cpuid_i486_freq, "i486" - }, -}; -/* - * INTEL product array - */ -unsigned int cpuid_i486_dx_freq[] = { 20, 25, 33, 0 }; -unsigned int cpuid_i486_dx_s_freq[] = { 50, 0 }; -unsigned int cpuid_i486_sx_freq[] = { 16, 20, 25, 33, 0 }; -unsigned int cpuid_i486_dx2_freq[] = { 32, 40, 50, 66, 0 }; -unsigned int cpuid_i486_sl_freq[] = { 25, 33, 0 }; -unsigned int cpuid_i486_sx2_freq[] = { 50, 0 }; -unsigned int cpuid_i486_dx2wb_freq[] = { 50, 66, 0 }; -unsigned int cpuid_i486_dx4_freq[] = { 90, 100, 0 }; - -unsigned int cpuid_i486_dx2wb_od_freq[] = { 32, 40, 50, 66, 0 }; -unsigned int cpuid_i486_dx4_od_freq[] = { 75, 99, 0 }; - -unsigned int cpuid_p5_freq[] = { 60, 66, 0 }; -unsigned int cpuid_p54_freq[] = { 60, 66, 75, 90, 100, 120, 133, 166, 200, 0 }; - -unsigned int cpuid_p24t_freq[] = { 25, 33, 0 }; -unsigned int cpuid_p24ct_freq[] = { 63, 83, 0 }; - -unsigned int cpuid_pii_freq[] = { 300, 0 }; - -struct cpuid_product cpuid_intel[] = { - { - CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX, - 240, cpuid_i486_dx_freq, "Intel 486DX" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX_S, - 240, cpuid_i486_dx_s_freq, "Intel 486DX-S" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_SX, - 240, cpuid_i486_sx_freq, "Intel 486SX" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX2, - 240, cpuid_i486_dx2_freq, "Intel 486DX2" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_SL, - 240, cpuid_i486_sl_freq, "Intel 486SL" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_SX2, - 240, cpuid_i486_sx2_freq, "Intel 486SX2" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX2WB, - 240, cpuid_i486_dx2wb_freq, "Intel 486DX2WB" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX4, - 240, cpuid_i486_dx4_freq, "Intel 486DX4" - }, - { - CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_486, CPUID_MODEL_I486_DX2, - 240, cpuid_i486_dx2_freq, "Intel 486DX2 OverDrive" - }, - { - CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_486, CPUID_MODEL_I486_DX2WB, - 240, cpuid_i486_dx2wb_od_freq, "Intel 486DX2WB OverDrive" - }, - { - CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_486, CPUID_MODEL_I486_DX4, - 240, cpuid_i486_dx4_od_freq, "Intel 486DX4 OverDrive" - }, - { - CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_P5, CPUID_MODEL_P24T, - 208, cpuid_p24t_freq, "Intel Pentium P24T OverDrive" - }, - { - CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_P5, CPUID_MODEL_P54, - 207, cpuid_p24ct_freq, "Intel Pentium P24CT OverDrive" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_P5, CPUID_MODEL_P5A, - 207, cpuid_p5_freq, "Intel Pentium P5 rev A" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_P5, CPUID_MODEL_P5, - 207, cpuid_p5_freq, "Intel Pentium P5" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_P5, CPUID_MODEL_P54, - 207, cpuid_p54_freq, "Intel Pentium P54" - }, - { - CPUID_TYPE_OEM, CPUID_FAMILY_PPRO, CPUID_MODEL_PII, - 480, cpuid_pii_freq, "Intel Pentium II" - } -}; -unsigned int cpuid_intel_size = sizeof (cpuid_intel) / sizeof (cpuid_intel[0]); +static unsigned int cpuid_maxcpuid; -/* - * AMD product arrays - */ -unsigned int cpuid_am486_dx_freq[] = { 33, 40, 0 }; -unsigned int cpuid_am486_dx2_freq[] = { 50, 66, 80, 99, 0 }; -unsigned int cpuid_am486_dx4_freq[] = { 99, 120, 133, 0 }; -unsigned int cpuid_am486_dx4wb_freq[] = { 99, 120, 133, 0 }; +static i386_cpu_info_t cpuid_cpu_info; -/* - * UMC product array - */ -unsigned int cpuid_u5sd_freq[] = { 25, 33, 40, 0 }; -unsigned int cpuid_u5s_freq[] = { 25, 33, 40, 0 }; +uint32_t cpuid_feature; /* XXX obsolescent for compat */ /* - * Vendor ID array + * We only identify Intel CPUs here. Adding support + * for others would be straightforward. */ -struct cpuid_name cpuid_name[] = { - { CPUID_VID_INTEL, - cpuid_intel, sizeof (cpuid_intel) / sizeof (cpuid_intel[0]) - }, - { CPUID_VID_UMC, - (struct cpuid_product *)0, - }, - { CPUID_VID_AMD, - (struct cpuid_product *)0, - }, - { CPUID_VID_CYRIX, - (struct cpuid_product *)0, - }, - { CPUID_VID_NEXTGEN, - (struct cpuid_product *)0 - }, - { "", - cpuid_generic, sizeof (cpuid_generic) / sizeof (cpuid_generic[0]) - }, - { (char *)0, - } +static void set_cpu_intel(i386_cpu_info_t *); +static void set_cpu_unknown(i386_cpu_info_t *); + +struct { + char *vendor; + void (* func)(i386_cpu_info_t *); +} cpu_vendors[] = { + {CPUID_VID_INTEL, set_cpu_intel}, + {0, set_cpu_unknown} }; +void +cpuid_get_info(i386_cpu_info_t *info_p) +{ + uint32_t cpuid_result[4]; + int i; + + bzero((void *)info_p, sizeof(i386_cpu_info_t)); + + /* do cpuid 0 to get vendor */ + do_cpuid(0, cpuid_result); + cpuid_maxcpuid = cpuid_result[0]; + bcopy((char *)&cpuid_result[1], &info_p->cpuid_vendor[0], 4); /* ugh */ + bcopy((char *)&cpuid_result[2], &info_p->cpuid_vendor[8], 4); + bcopy((char *)&cpuid_result[3], &info_p->cpuid_vendor[4], 4); + info_p->cpuid_vendor[12] = 0; + + /* look up vendor */ + for (i = 0; ; i++) { + if ((cpu_vendors[i].vendor == 0) || + (!strcmp(cpu_vendors[i].vendor, info_p->cpuid_vendor))) { + cpu_vendors[i].func(info_p); + break; + } + } +} + /* - * Feature Flag values + * A useful model name string takes some decoding. */ -char *cpuid_flag[] = { - "FPU", /* Floating point unit on-chip */ - "VME", /* Virtual Mode Extension */ - "DE", /* Debugging Extension */ - "PSE", /* Page Size Extension */ - "TSC", /* Time Stamp Counter */ - "MSR", /* Model Specific Registers */ - "PAE", /* Physical Address Extension */ - "MCE", /* Machine Check Exception */ - "CX8", /* CMPXCHG8 Instruction sSupported */ - "APIC", /* Local APIC Supported */ - "(bit 10)", - "(bit 11)", - "MTRR", /* Machine Type Range Register */ - "PGE", /* Page Global Enable */ - "MCA", /* Machine Check Architecture */ - "CMOV", /* Conditional Move Instruction Supported */ - "(bit 16)", - "(bit 17)", - "(bit 18)", - "(bit 19)", - "(bit 20)", - "(bit 21)", - "(bit 22)", - "MMX", /* Supports MMX instructions */ - "(bit 24)", - "(bit 25)", - "(bit 26)", - "(bit 27)", - "(bit 28)", - "(bit 29)", - "(bit 30)", - "(bit 31)", -}; +char * +cpuid_intel_get_model_name( + uint8_t brand, + uint8_t family, + uint8_t model, + uint32_t signature) +{ + /* check for brand id */ + switch(brand) { + case 0: + /* brand ID not supported; use alternate method. */ + switch(family) { + case CPUID_FAMILY_486: + return "486"; + case CPUID_FAMILY_P5: + return "Pentium"; + case CPUID_FAMILY_PPRO: + switch(model) { + case CPUID_MODEL_P6: + return "Pentium Pro"; + case CPUID_MODEL_PII: + return "Pentium II"; + case CPUID_MODEL_P65: + case CPUID_MODEL_P66: + return "Celeron"; + case CPUID_MODEL_P67: + case CPUID_MODEL_P68: + case CPUID_MODEL_P6A: + case CPUID_MODEL_P6B: + return "Pentium III"; + default: + return "Unknown P6 Family"; + } + case CPUID_FAMILY_PENTIUM4: + return "Pentium 4"; + default: + return "Unknown Family"; + } + case 0x01: + return "Celeron"; + case 0x02: + case 0x04: + return "Pentium III"; + case 0x03: + if (signature == 0x6B1) + return "Celeron"; + else + return "Pentium III Xeon"; + case 0x06: + return "Mobile Pentium III"; + case 0x07: + return "Mobile Celeron"; + case 0x08: + if (signature >= 0xF20) + return "Genuine Intel"; + else + return "Pentium 4"; + case 0x09: + return "Pentium 4"; + case 0x0b: + return "Xeon"; + case 0x0e: + case 0x0f: + return "Mobile Pentium 4"; + default: + return "Unknown Pentium"; + } +} /* - * Cache description array + * Cache descriptor table. Each row has the form: + * (descriptor_value, cache, size, linesize, + * description) + * Note: the CACHE_DESC macro does not expand description text in the kernel. */ -struct cpuid_cache_desc cpuid_cache_desc[] = { - { CPUID_CACHE_ITLB_4K, - "Instruction TBL, 4K, pages 4-way set associative, 64 entries" - }, - { CPUID_CACHE_ITLB_4M, - "Instruction TBL, 4M, pages 4-way set associative, 4 entries" - }, - { CPUID_CACHE_DTLB_4K, - "Data TBL, 4K pages, 4-way set associative, 64 entries" - }, - { CPUID_CACHE_DTLB_4M, - "Data TBL, 4M pages, 4-way set associative, 4 entries" - }, - { CPUID_CACHE_ICACHE_8K, - "Instruction L1 cache, 8K, 4-way set associative, 32byte line size" - }, - { CPUID_CACHE_DCACHE_8K, - "Data L1 cache, 8K, 2-way set associative, 32byte line size" - }, - { CPUID_CACHE_UCACHE_128K, - "Unified L2 cache, 128K, 4-way set associative, 32byte line size" - }, - { CPUID_CACHE_UCACHE_256K, - "Unified L2 cache, 256K, 4-way set associative, 32byte line size" - }, - { CPUID_CACHE_UCACHE_512K, - "Unified L2 cache, 512K, 4-way set associative, 32byte line size" - }, - { CPUID_CACHE_NULL, - (char *)0 - } +static cpuid_cache_desc_t cpuid_cache_desc_tab[] = { +CACHE_DESC(CPUID_CACHE_ITLB_4K, Lnone, 0, 0, \ + "Instruction TLB, 4K, pages 4-way set associative, 64 entries"), +CACHE_DESC(CPUID_CACHE_ITLB_4M, Lnone, 0, 0, \ + "Instruction TLB, 4M, pages 4-way set associative, 4 entries"), +CACHE_DESC(CPUID_CACHE_DTLB_4K, Lnone, 0, 0, \ + "Data TLB, 4K pages, 4-way set associative, 64 entries"), +CACHE_DESC(CPUID_CACHE_DTLB_4M, Lnone, 0, 0, \ + "Data TLB, 4M pages, 4-way set associative, 4 entries"), +CACHE_DESC(CPUID_CACHE_ITLB_64, Lnone, 0, 0, \ + "Instruction TLB, 4K and 2M or 4M pages, 64 entries"), +CACHE_DESC(CPUID_CACHE_ITLB_128, Lnone, 0, 0, \ + "Instruction TLB, 4K and 2M or 4M pages, 128 entries"), +CACHE_DESC(CPUID_CACHE_ITLB_256, Lnone, 0, 0, \ + "Instruction TLB, 4K and 2M or 4M pages, 256 entries"), +CACHE_DESC(CPUID_CACHE_DTLB_64, Lnone, 0, 0, \ + "Data TLB, 4K and 4M pages, 64 entries"), +CACHE_DESC(CPUID_CACHE_DTLB_128, Lnone, 0, 0, \ + "Data TLB, 4K and 4M pages, 128 entries"), +CACHE_DESC(CPUID_CACHE_DTLB_256, Lnone, 0, 0, \ + "Data TLB, 4K and 4M pages, 256 entries"), +CACHE_DESC(CPUID_CACHE_ICACHE_8K, L1I, 8*1024, 32, \ + "Instruction L1 cache, 8K, 4-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_DCACHE_8K, L1D, 8*1024, 32, \ + "Data L1 cache, 8K, 2-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_ICACHE_16K, L1I, 16*1024, 32, \ + "Instruction L1 cache, 16K, 4-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_DCACHE_16K, L1D, 16*1024, 32, \ + "Data L1 cache, 16K, 4-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_DCACHE_8K_64, L1D, 8*1024, 64, \ + "Data L1 cache, 8K, 4-way set associative, 64byte line size"), +CACHE_DESC(CPUID_CACHE_DCACHE_16K_64, L1D, 16*1024, 64, \ + "Data L1 cache, 16K, 4-way set associative, 64byte line size"), +CACHE_DESC(CPUID_CACHE_DCACHE_32K_64, L1D, 32*1024, 64, \ + "Data L1 cache, 32K, 4-way set associative, 64byte line size"), +CACHE_DESC(CPUID_CACHE_TRACE_12K, L1I, 12*1024, 64, \ + "Trace cache, 12K-uop, 8-way set associative"), +CACHE_DESC(CPUID_CACHE_TRACE_12K, L1I, 16*1024, 64, \ + "Trace cache, 16K-uop, 8-way set associative"), +CACHE_DESC(CPUID_CACHE_TRACE_12K, L1I, 32*1024, 64, \ + "Trace cache, 32K-uop, 8-way set associative"), +CACHE_DESC(CPUID_CACHE_UCACHE_128K, L2U, 128*1024, 32, \ + "Unified L2 cache, 128K, 4-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_256K, L2U, 128*1024, 32, \ + "Unified L2 cache, 256K, 4-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_512K, L2U, 512*1024, 32, \ + "Unified L2 cache, 512K, 4-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_1M, L2U, 1*1024*1024, 32, \ + "Unified L2 cache, 1M, 4-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_2M, L2U, 2*1024*1024, 32, \ + "Unified L2 cache, 2M, 4-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_128K_64, L2U, 128*1024, 64, \ + "Unified L2 cache, 128K, 8-way set associative, 64byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_256K_64, L2U, 256*1024, 64, \ + "Unified L2 cache, 256K, 8-way set associative, 64byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_512K_64, L2U, 512*1024, 64, \ + "Unified L2 cache, 512K, 8-way set associative, 64byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_1M_64, L2U, 1*1024*1024, 64, \ + "Unified L2 cache, 1M, 8-way set associative, 64byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_256K_32, L2U, 256*1024, 32, \ + "Unified L2 cache, 256K, 8-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_512K_32, L2U, 512*1024, 32, \ + "Unified L2 cache, 512K, 8-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_1M_32, L2U, 1*1024*1024, 32, \ + "Unified L2 cache, 1M, 8-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_UCACHE_2M_32, L2U, 2*1024*1024, 32, \ + "Unified L2 cache, 2M, 8-way set associative, 32byte line size"), +CACHE_DESC(CPUID_CACHE_NULL, Lnone, 0, 0, \ + (char *)0), }; + +static void +set_cpu_intel(i386_cpu_info_t *info_p) +{ + uint32_t cpuid_result[4]; + uint32_t max_extid; + char str[128], *p; + char *model; + int i; + int j; + + /* get extended cpuid results */ + do_cpuid(0x80000000, cpuid_result); + max_extid = cpuid_result[0]; + + /* check to see if we can get brand string */ + if (max_extid > 0x80000000) { + /* + * The brand string 48 bytes (max), guaranteed to + * be NUL terminated. + */ + do_cpuid(0x80000002, cpuid_result); + bcopy((char *)cpuid_result, &str[0], 16); + do_cpuid(0x80000003, cpuid_result); + bcopy((char *)cpuid_result, &str[16], 16); + do_cpuid(0x80000004, cpuid_result); + bcopy((char *)cpuid_result, &str[32], 16); + for (p = str; *p != '\0'; p++) { + if (*p != ' ') break; + } + strncpy(info_p->cpuid_brand_string, + p, sizeof(info_p->cpuid_brand_string)-1); + info_p->cpuid_brand_string[sizeof(info_p->cpuid_brand_string)-1] = '\0'; + } -/* - * CPU identification - */ -unsigned int cpuid_value; -unsigned char cpuid_type; -unsigned char cpuid_family; -unsigned char cpuid_model; -unsigned char cpuid_stepping; -unsigned int cpuid_feature; -char cpuid_vid[CPUID_VID_SIZE + 1]; -unsigned char cpuid_cache[CPUID_CACHE_SIZE]; + /* get processor signature and decode */ + do_cpuid(1, cpuid_result); + info_p->cpuid_signature = cpuid_result[0]; + info_p->cpuid_stepping = cpuid_result[0] & 0x0f; + info_p->cpuid_model = (cpuid_result[0] >> 4) & 0x0f; + info_p->cpuid_family = (cpuid_result[0] >> 8) & 0x0f; + info_p->cpuid_type = (cpuid_result[0] >> 12) & 0x03; + info_p->cpuid_extmodel = (cpuid_result[0] >> 16) & 0x0f; + info_p->cpuid_extfamily = (cpuid_result[0] >> 20) & 0xff; + info_p->cpuid_brand = cpuid_result[1] & 0xff; + info_p->cpuid_features = cpuid_result[3]; -/* - * Return correct CPU_TYPE - */ -/*ARGSUSED*/ -cpu_type_t -cpuid_cputype( - int my_cpu) + /* decode family/model/type */ + switch (info_p->cpuid_type) { + case CPUID_TYPE_OVERDRIVE: + strcat(info_p->model_string, "Overdrive "); + break; + case CPUID_TYPE_DUAL: + strcat(info_p->model_string, "Dual "); + break; + } + strcat(info_p->model_string, + cpuid_intel_get_model_name(info_p->cpuid_brand, + info_p->cpuid_family, + info_p->cpuid_model, + info_p->cpuid_signature)); + info_p->model_string[sizeof(info_p->model_string)-1] = '\0'; + + /* get processor cache descriptor info */ + do_cpuid(2, cpuid_result); + for (j = 0; j < 4; j++) { + if ((cpuid_result[j] >> 31) == 1) /* bit31 is validity */ + continue; + ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j]; + } + /* first byte gives number of cpuid calls to get all descriptors */ + for (i = 1; i < info_p->cache_info[0]; i++) { + if (i*16 > sizeof(info_p->cache_info)) + break; + do_cpuid(2, cpuid_result); + for (j = 0; j < 4; j++) { + if ((cpuid_result[j] >> 31) == 1) + continue; + ((uint32_t *) info_p->cache_info)[4*i+j] = + cpuid_result[j]; + } + } + + /* decode the descriptors looking for L1/L2/L3 size info */ + for (i = 1; i < sizeof(info_p->cache_info); i++) { + cpuid_cache_desc_t *descp; + uint8_t desc = info_p->cache_info[i]; + + if (desc == CPUID_CACHE_NULL) + continue; + for (descp = cpuid_cache_desc_tab; + descp->value != CPUID_CACHE_NULL; descp++) { + if (descp->value != desc) + continue; + info_p->cache_size[descp->type] = descp->size; + if (descp->type == L2U) + info_p->cache_linesize = descp->linesize; + break; + } + } + /* For P-IIIs, L2 could be 256k or 512k but we can't tell */ + if (info_p->cache_size[L2U] == 0 && + info_p->cpuid_family == 0x6 && info_p->cpuid_model == 0xb) { + info_p->cache_size[L2U] = 256*1024; + info_p->cache_linesize = 32; + } + + return; +} + +static void +set_cpu_unknown(i386_cpu_info_t *info_p) { -#ifndef MACH_BSD /* FIXME - add more family/chip types */ - switch (cpuid_family) { - case CPUID_FAMILY_PPRO: - return (CPU_TYPE_PENTIUMPRO); - case CPUID_FAMILY_P5: - return (CPU_TYPE_PENTIUM); - case CPUID_FAMILY_486: - return (CPU_TYPE_I486); - default: - break; - } -#endif - return (CPU_TYPE_I386); + strcat(info_p->model_string, "Unknown"); +} + + +static struct { + uint32_t mask; + char *name; +} feature_names[] = { + {CPUID_FEATURE_FPU, "FPU",}, + {CPUID_FEATURE_VME, "VME",}, + {CPUID_FEATURE_DE, "DE",}, + {CPUID_FEATURE_PSE, "PSE",}, + {CPUID_FEATURE_TSC, "TSC",}, + {CPUID_FEATURE_MSR, "MSR",}, + {CPUID_FEATURE_PAE, "PAE",}, + {CPUID_FEATURE_MCE, "MCE",}, + {CPUID_FEATURE_CX8, "CX8",}, + {CPUID_FEATURE_APIC, "APIC",}, + {CPUID_FEATURE_SEP, "SEP",}, + {CPUID_FEATURE_MTRR, "MTRR",}, + {CPUID_FEATURE_PGE, "PGE",}, + {CPUID_FEATURE_MCA, "MCA",}, + {CPUID_FEATURE_CMOV, "CMOV",}, + {CPUID_FEATURE_PAT, "PAT",}, + {CPUID_FEATURE_PSE36, "PSE36",}, + {CPUID_FEATURE_PSN, "PSN",}, + {CPUID_FEATURE_CLFSH, "CLFSH",}, + {CPUID_FEATURE_DS, "DS",}, + {CPUID_FEATURE_ACPI, "ACPI",}, + {CPUID_FEATURE_MMX, "MMX",}, + {CPUID_FEATURE_FXSR, "FXSR",}, + {CPUID_FEATURE_SSE, "SSE",}, + {CPUID_FEATURE_SSE2, "SSE2",}, + {CPUID_FEATURE_SS, "SS",}, + {CPUID_FEATURE_HTT, "HTT",}, + {CPUID_FEATURE_TM, "TM",}, + {0, 0} +}; + +char * +cpuid_get_feature_names(uint32_t feature, char *buf, unsigned buf_len) +{ + int i; + int len; + char *p = buf; + + for (i = 0; feature_names[i].mask != 0; i++) { + if ((feature & feature_names[i].mask) == 0) + continue; + if (i > 0) + *p++ = ' '; + len = min(strlen(feature_names[i].name), (buf_len-1) - (p-buf)); + if (len == 0) + break; + bcopy(feature_names[i].name, p, len); + p += len; + } + *p = '\0'; + return buf; +} + +void +cpuid_feature_display( + char *header, + int my_cpu) +{ + char buf[256]; + + printf("%s: %s\n", header, + cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf))); } -/* - * Display processor signature - */ -/*ARGSUSED*/ void cpuid_cpu_display( - char *header, - int my_cpu) + char *header, + int my_cpu) { - struct cpuid_name *name; - unsigned int i; - unsigned int *freq; - unsigned int mhz; - unsigned int feature; - char **flag; - extern unsigned int delaycount; - - /* - * Identify vendor ID - */ - for (name = cpuid_name; name->name != (char *)0; name++) { - char *p = name->name; - char *q = cpuid_vid; - while (*p == *q && *p != 0) { - p++; - q++; - } - if (*p == '\0' && *q == '\0') - break; - } - if (name->name == (char *)0) { - printf("Unrecognized processor vendor id = '%s'\n", cpuid_vid); - return; - } - - /* - * Identify Product ID - */ - for (i = 0; i < name->size; i++) - if (name->product[i].type == cpuid_type && - name->product[i].family == cpuid_family && - name->product[i].model == cpuid_model) - break; - if (i == name->size) { - printf("%s processor (type = 0x%x, family = 0x%x, model = 0x%x)\n", - "Unrecognized", cpuid_type, cpuid_family, cpuid_model); - return; - } - - /* - * Look for frequency and adjust it to known values - */ - mhz = (1000 * delaycount) / name->product[i].delay; - for (freq = name->product[i].frequency; *freq != 0; freq++) - if (*freq >= mhz) - break; - if (*freq == 0) - mhz = *(freq - 1); - else if (freq == name->product[i].frequency) - mhz = *freq; - else if (*freq - mhz > mhz - *(freq - 1)) - mhz = *(freq - 1); - else if (*freq != mhz) - mhz = *freq; - - /* - * Display product and frequency - */ - printf("%s: %s at %d MHz (signature = %d/%d/%d/%d)\n", - header, name->product[i].name, mhz, cpuid_type, - cpuid_family, cpuid_model, cpuid_stepping); - - /* - * Display feature (if any) - */ - if (cpuid_feature) { - i = 0; - flag = cpuid_flag; - for (feature = cpuid_feature; feature != 0; feature >>= 1) { - if (feature & 1) - if (i == 0) { - printf("%s: %s", header, *flag); - i = 1; - } else - printf(", %s", *flag); - flag++; - } - printf("\n"); - } + printf("%s: %s\n", header, + (cpuid_cpu_info.cpuid_brand_string[0] != '\0') ? + cpuid_cpu_info.cpuid_brand_string : + cpuid_cpu_info.model_string); } -/* - * Display processor configuration information - */ -/*ARGSUSED*/ +unsigned int +cpuid_family(void) +{ + return cpuid_cpu_info.cpuid_family; +} + +unsigned int +cpuid_features(void) +{ + return cpuid_cpu_info.cpuid_features; +} + +i386_cpu_info_t * +cpuid_info(void) +{ + return &cpuid_cpu_info; +} + +/* XXX for temporary compatibility */ void -cpuid_cache_display( - char *header, - int my_cpu) +set_cpu_model(void) { - struct cpuid_cache_desc *desc; - unsigned int i; - - if (cpuid_cache[CPUID_CACHE_VALID] == 1) - for (i = 0; i < CPUID_CACHE_SIZE; i++) { - if (i != CPUID_CACHE_VALID || cpuid_cache[i] == CPUID_CACHE_NULL) - continue; - for (desc = cpuid_cache_desc; - desc->description != (char *)0; desc++) - if (desc->value == cpuid_cache[i]) - break; - if (desc->description != (char *)0) - printf("%s: %s\n", header, desc->description); - } + cpuid_get_info(&cpuid_cpu_info); + cpuid_feature = cpuid_cpu_info.cpuid_features; /* XXX compat */ } + diff --git a/osfmk/i386/cpuid.h b/osfmk/i386/cpuid.h index 9234050ae..c8fad3ffb 100644 --- a/osfmk/i386/cpuid.h +++ b/osfmk/i386/cpuid.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -29,12 +29,15 @@ /* * x86 CPU identification * - * TODO : Add TI/Thomson processors */ #ifndef _MACHINE_CPUID_H_ #define _MACHINE_CPUID_H_ +#include + +#ifdef __APPLE_API_PRIVATE + #define CPUID_VID_SIZE 12 #define CPUID_VID_INTEL "GenuineIntel" #define CPUID_VID_UMC "UMC UMC UMC " @@ -44,17 +47,32 @@ #define CPUID_FEATURE_FPU 0x00000001 /* Floating point unit on-chip */ #define CPUID_FEATURE_VME 0x00000002 /* Virtual Mode Extension */ -#define CPUID_FEATURE_IOB 0x00000004 /* I/O Breakpoints */ +#define CPUID_FEATURE_DE 0x00000004 /* Debugging Extension */ #define CPUID_FEATURE_PSE 0x00000008 /* Page Size Extension */ #define CPUID_FEATURE_TSC 0x00000010 /* Time Stamp Counter */ #define CPUID_FEATURE_MSR 0x00000020 /* Model Specific Registers */ +#define CPUID_FEATURE_PAE 0x00000040 /* Physical Address Extension */ #define CPUID_FEATURE_MCE 0x00000080 /* Machine Check Exception */ #define CPUID_FEATURE_CX8 0x00000100 /* CMPXCHG8B */ #define CPUID_FEATURE_APIC 0x00000200 /* On-chip APIC */ +#define CPUID_FEATURE_SEP 0x00000800 /* Fast System Call */ #define CPUID_FEATURE_MTRR 0x00001000 /* Memory Type Range Register */ #define CPUID_FEATURE_PGE 0x00002000 /* Page Global Enable */ #define CPUID_FEATURE_MCA 0x00004000 /* Machine Check Architecture */ #define CPUID_FEATURE_CMOV 0x00008000 /* Conditional Move Instruction */ +#define CPUID_FEATURE_PAT 0x00010000 /* Page Attribute Table */ +#define CPUID_FEATURE_PSE36 0x00020000 /* 36-bit Page Size Extension */ +#define CPUID_FEATURE_PSN 0x00040000 /* Processor Serial Number */ +#define CPUID_FEATURE_CLFSH 0x00080000 /* CLFLUSH Instruction supported */ +#define CPUID_FEATURE_DS 0x00200000 /* Debug Store */ +#define CPUID_FEATURE_ACPI 0x00400000 /* Thermal Monitor, SW-controlled clock */ +#define CPUID_FEATURE_MMX 0x00800000 /* MMX supported */ +#define CPUID_FEATURE_FXSR 0x01000000 /* Fast floating point save/restore */ +#define CPUID_FEATURE_SSE 0x02000000 /* Streaming SIMD extensions */ +#define CPUID_FEATURE_SSE2 0x04000000 /* Streaming SIMD extensions 2 */ +#define CPUID_FEATURE_SS 0x08000000 /* Self-Snoop */ +#define CPUID_FEATURE_HTT 0x10000000 /* Hyper-Threading Technology */ +#define CPUID_FEATURE_TM 0x20000000 /* Thermal Monitor */ #define CPUID_TYPE_OEM 0x0 /* Original processor */ #define CPUID_TYPE_OVERDRIVE 0x1 /* Overdrive processor */ @@ -64,7 +82,8 @@ #define CPUID_FAMILY_386 0x3 /* Intel 386 (not part of CPUID) */ #define CPUID_FAMILY_486 0x4 /* Intel 486 */ #define CPUID_FAMILY_P5 0x5 /* Intel Pentium */ -#define CPUID_FAMILY_PPRO 0x6 /* Intel Pentium Pro */ +#define CPUID_FAMILY_PPRO 0x6 /* Intel Pentium Pro, II, III */ +#define CPUID_FAMILY_PENTIUM4 0xF /* Intel Pentium 4 */ #define CPUID_MODEL_I386_DX 0x0 /* Intel 386 (not part of CPUID) */ @@ -100,9 +119,14 @@ #define CPUID_MODEL_P6 0x1 /* Intel P6 */ #define CPUID_MODEL_PII 0x3 /* Intel PII */ +#define CPUID_MODEL_P65 0x5 /* Intel PII/Xeon/Celeron model 5 */ +#define CPUID_MODEL_P66 0x6 /* Intel Celeron model 6 */ +#define CPUID_MODEL_P67 0x7 /* Intel PIII/Xeon model 7 */ +#define CPUID_MODEL_P68 0x8 /* Intel PIII/Xeon/Celeron model 8 */ +#define CPUID_MODEL_P6A 0xA /* Intel PIII Xeon model A */ +#define CPUID_MODEL_P6B 0xB /* Intel PIII model B */ #define CPUID_CACHE_SIZE 16 /* Number of descriptor vales */ -#define CPUID_CACHE_VALID 4 /* Index of descriptor validity */ #define CPUID_CACHE_NULL 0x00 /* NULL */ #define CPUID_CACHE_ITLB_4K 0x01 /* Instruction TLB, 4K pages */ @@ -110,58 +134,119 @@ #define CPUID_CACHE_DTLB_4K 0x03 /* Data TLB, 4K pages */ #define CPUID_CACHE_DTLB_4M 0x04 /* Data TLB, 4M pages */ #define CPUID_CACHE_ICACHE_8K 0x06 /* Instruction cache, 8K */ +#define CPUID_CACHE_ICACHE_16K 0x08 /* Instruction cache, 16K */ #define CPUID_CACHE_DCACHE_8K 0x0A /* Data cache, 8K */ -#define CPUID_CACHE_UCACHE_128K 0x41 /* Unified cache, 128K */ -#define CPUID_CACHE_UCACHE_256K 0x42 /* Unified cache, 256K */ -#define CPUID_CACHE_UCACHE_512K 0x43 /* Unified cache, 512K */ +#define CPUID_CACHE_DCACHE_16K 0x0C /* Data cache, 16K */ +#define CPUID_CACHE_UCACHE_128K 0x41 /* 2nd-level cache, 128K */ +#define CPUID_CACHE_UCACHE_256K 0x42 /* 2nd-level cache, 256K */ +#define CPUID_CACHE_UCACHE_512K 0x43 /* 2nd-level cache, 512K */ +#define CPUID_CACHE_UCACHE_1M 0x44 /* 2nd-level cache, 1M */ +#define CPUID_CACHE_UCACHE_2M 0x45 /* 2nd-level cache, 2M */ +#define CPUID_CACHE_ITLB_64 0x50 /* Instruction TLB, 64 entries */ +#define CPUID_CACHE_ITLB_128 0x51 /* Instruction TLB, 128 entries */ +#define CPUID_CACHE_ITLB_256 0x52 /* Instruction TLB, 256 entries */ +#define CPUID_CACHE_DTLB_64 0x5B /* Data TLB, 64 entries */ +#define CPUID_CACHE_DTLB_128 0x5C /* Data TLB, 128 entries */ +#define CPUID_CACHE_DTLB_256 0x5D /* Data TLB, 256 entries */ +#define CPUID_CACHE_DCACHE_8K_64 0x66 /* Data cache, 8K, 64 byte line size */ +#define CPUID_CACHE_DCACHE_16K_64 0x67 /* Data cache, 16K, 64 byte line size */ +#define CPUID_CACHE_DCACHE_32K_64 0x68 /* Data cache, 32K, 64 byte line size */ +#define CPUID_CACHE_TRACE_12K 0x70 /* Trace cache 12K-uop, 8-way */ +#define CPUID_CACHE_TRACE_16K 0x71 /* Trace cache 16K-uop, 8-way */ +#define CPUID_CACHE_TRACE_32K 0x72 /* Trace cache 32K-uop, 8-way */ +#define CPUID_CACHE_UCACHE_128K_64 0x79 /* 2nd-level, 128K, 8-way, 64 bytes */ +#define CPUID_CACHE_UCACHE_256K_64 0x7A /* 2nd-level, 256K, 8-way, 64 bytes */ +#define CPUID_CACHE_UCACHE_512K_64 0x7B /* 2nd-level, 512K, 8-way, 64 bytes */ +#define CPUID_CACHE_UCACHE_1M_64 0x7C /* 2nd-level, 1M, 8-way, 64 bytes */ +#define CPUID_CACHE_UCACHE_256K_32 0x82 /* 2nd-level, 256K, 8-way, 32 bytes */ +#define CPUID_CACHE_UCACHE_512K_32 0x83 /* 2nd-level, 512K, 8-way, 32 bytes */ +#define CPUID_CACHE_UCACHE_1M_32 0x84 /* 2nd-level, 1M, 8-way, 32 bytes */ +#define CPUID_CACHE_UCACHE_2M_32 0x85 /* 2nd-level, 2M, 8-way, 32 bytes */ #ifndef ASSEMBLER +#include +#include +#include #include -extern unsigned int cpuid_value; -extern unsigned char cpuid_type; -extern unsigned char cpuid_family; -extern unsigned char cpuid_model; -extern unsigned char cpuid_stepping; -extern unsigned int cpuid_feature; -extern char cpuid_vid[]; -extern unsigned char cpuid_cache[]; -/* - * Product ID arrays per vendor - */ -struct cpuid_product { - unsigned char type; /* CPU type */ - unsigned char family; /* CPU family */ - unsigned char model; /* CPU model */ - unsigned int delay; /* 1MHz Delay (scale 1000) */ - unsigned int *frequency; /* Frequency array */ - char *name; /* Model name */ -}; +static inline void +do_cpuid(uint32_t selector, uint32_t *data) +{ + asm("cpuid" + : "=a" (data[0]), + "=b" (data[1]), + "=c" (data[2]), + "=d" (data[3]) + : "a"(selector)); +} /* - * Vendor ID structure + * Cache ID descriptor structure. + * Note: description string absent in kernel. */ -struct cpuid_name { - char *name; /* Vendor ID name */ - struct cpuid_product *product; /* product array */ - unsigned int size; /* #elements in product array */ -}; +typedef enum { Lnone, L1I, L1D, L2U, LCACHE_MAX } cache_type_t ; +typedef struct { + unsigned char value; /* Descriptor value */ + cache_type_t type; /* Cache type */ + unsigned int size; /* Cache size */ + unsigned int linesize; /* Cache line size */ +#ifdef KERNEL + char *description; /* Cache description */ +#endif /* KERNEL */ +} cpuid_cache_desc_t; + +#ifdef KERNEL +#define CACHE_DESC(value,type,size,linesize,text) \ + { value, type, size, linesize, text } +#else +#define CACHE_DESC(value,type,size,linesize,text) \ + { value, type, size, linesize } +#endif /* KERNEL */ + +/* Physical CPU info */ +typedef struct { + char cpuid_vendor[16]; + char cpuid_brand_string[48]; + + uint32_t cpuid_value; + cpu_type_t cpuid_type; + uint8_t cpuid_family; + uint8_t cpuid_model; + uint8_t cpuid_extmodel; + uint8_t cpuid_extfamily; + uint8_t cpuid_stepping; + uint32_t cpuid_features; + uint32_t cpuid_signature; + uint8_t cpuid_brand; + + uint32_t cache_size[LCACHE_MAX]; + uint32_t cache_linesize; + + char model_string[64]; /* sanitized model string */ + uint8_t cache_info[64]; /* list of cache descriptors */ + +} i386_cpu_info_t; -/* - * Cache ID description structure - */ -struct cpuid_cache_desc { - unsigned char value; /* Descriptor value */ - char *description; /* Cache description */ -}; /* * External declarations */ extern cpu_type_t cpuid_cputype(int); extern void cpuid_cpu_display(char *, int); -extern void cpuid_cache_display(char *, int); +extern void cpuid_features_display(char *, int); +extern char * cpuid_get_feature_names(uint32_t, char *, unsigned); + +extern uint32_t cpuid_features(void); +extern uint32_t cpuid_family(void); + +extern char * cpuid_intel_get_model_name(uint8_t, uint8_t, + uint8_t, uint32_t); +extern i386_cpu_info_t *cpuid_info(void); + +extern uint32_t cpuid_feature; /* XXX obsolescent */ #endif /* ASSEMBLER */ + +#endif /* __APPLE_API_PRIVATE */ #endif /* _MACHINE_CPUID_H_ */ diff --git a/osfmk/i386/cswitch.s b/osfmk/i386/cswitch.s index 5f7b715a6..3f31e2b31 100644 --- a/osfmk/i386/cswitch.s +++ b/osfmk/i386/cswitch.s @@ -67,7 +67,7 @@ #endif #if AT386 -#include +#include #endif /* AT386 */ #define CX(addr, reg) addr(,reg,4) @@ -116,14 +116,11 @@ Entry(Switch_context) popl KSS_EIP(%ecx) /* save return PC */ movl %esp,KSS_ESP(%ecx) /* save SP */ - movl 0(%esp),%eax /* get old thread */ - movl 4(%esp),%ebx /* get continuation */ - movl %ebx,TH_CONTINUATION(%eax) /* save continuation */ - movl %ecx,TH_KERNEL_STACK(%eax) /* save kernel stack */ - + movl 0(%esp),%eax /* return old thread */ movl 8(%esp),%esi /* get new thread */ - movl $ CPD_ACTIVE_THREAD,%ecx - movl %esi,%gs:(%ecx) /* new thread is active */ + movl TH_TOP_ACT(%esi),%ebx /* get new_thread->top_act */ + movl $ CPD_ACTIVE_THREAD,%ecx + movl %ebx,%gs:(%ecx) /* new thread is active */ movl TH_KERNEL_STACK(%esi),%ecx /* get its kernel stack */ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%ebx /* point to stack top */ @@ -131,14 +128,9 @@ Entry(Switch_context) movl %ecx,CX(EXT(active_stacks),%edx) /* set current stack */ movl %ebx,CX(EXT(kernel_stack),%edx) /* set stack top */ - movl TH_TOP_ACT(%esi),%esi /* get new_thread->top_act */ - cmpl $0,ACT_KLOADED(%esi) /* check kernel-loaded flag */ - je 0f - movl %esi,CX(EXT(active_kloaded),%edx) - jmp 1f -0: + movl $0,CX(EXT(active_kloaded),%edx) -1: + movl KSS_ESP(%ecx),%esp /* switch stacks */ movl KSS_ESI(%ecx),%esi /* restore registers */ movl KSS_EDI(%ecx),%edi @@ -176,7 +168,6 @@ Entry(switch_to_shutdown_context) movl %esp,KSS_ESP(%ecx) /* save SP */ movl 0(%esp),%eax /* get old thread */ - movl $0,TH_CONTINUATION(%eax) /* clear continuation */ movl %ecx,TH_KERNEL_STACK(%eax) /* save old stack */ movl 4(%esp),%ebx /* get routine to run next */ movl 8(%esp),%esi /* get its argument */ diff --git a/osfmk/i386/db_machdep.h b/osfmk/i386/db_machdep.h index 9a6292451..124a9e436 100644 --- a/osfmk/i386/db_machdep.h +++ b/osfmk/i386/db_machdep.h @@ -119,13 +119,9 @@ int db_inst_store(unsigned long); /* * Given pointer to i386_saved_state, determine if it represents - * a thread executing a) in user space, b) in the kernel, or c) - * in a kernel-loaded task. Return true for cases a) and c). + * a thread executing in user space. */ -#define IS_USER_TRAP(regs, etext) ((((regs)->cs & 3) != 0) || \ - (current_act() && \ - current_act()->kernel_loaded && \ - ((char *)(regs)->eip > (etext)))) +#define IS_USER_TRAP(regs, etext) (((regs)->cs & 3) != 0) extern boolean_t db_check_access( vm_offset_t addr, diff --git a/osfmk/i386/fpu.c b/osfmk/i386/fpu.c index 34bf4752a..3098f7a56 100644 --- a/osfmk/i386/fpu.c +++ b/osfmk/i386/fpu.c @@ -71,6 +71,7 @@ #include #include #include +#include #include #if 0 @@ -110,6 +111,8 @@ volatile thread_act_t fp_intr_act = THR_ACT_NULL; } #endif + +#define ALIGNED(addr,size) (((unsigned)(addr)&((size)-1))==0) /* Forward */ @@ -140,33 +143,22 @@ init_fpu(void) fnstcw(&control); if ((status & 0xff) == 0 && - (control & 0x103f) == 0x3f) - { -#if 0 - /* - * We have a FPU of some sort. - * Compare -infinity against +infinity - * to check whether we have a 287 or a 387. - */ - volatile double fp_infinity, fp_one, fp_zero; - fp_one = 1.0; - fp_zero = 0.0; - fp_infinity = fp_one / fp_zero; - if (fp_infinity == -fp_infinity) { - /* - * We have an 80287. - */ - fp_kind = FP_287; - __asm__ volatile(".byte 0xdb; .byte 0xe4"); /* fnsetpm */ - } - else -#endif - { - /* - * We have a 387. - */ - fp_kind = FP_387; + (control & 0x103f) == 0x3f) + { + fp_kind = FP_387; /* assume we have a 387 compatible instruction set */ + /* Use FPU save/restore instructions if available */ + if (cpuid_features() & CPUID_FEATURE_FXSR) { + fp_kind = FP_FXSR; + set_cr4(get_cr4() | CR4_FXS); + printf("Enabling XMM register save/restore"); + /* And allow SIMD instructions if present */ + if (cpuid_features() & CPUID_FEATURE_SSE) { + printf(" and SSE/SSE2"); + set_cr4(get_cr4() | CR4_XMM); + } + printf(" opcodes\n"); } + /* * Trap wait instructions. Turn off FPU for now. */ @@ -218,6 +210,153 @@ ASSERT_IPL(SPL0); zfree(ifps_zone, (vm_offset_t) fps); } +/* + * Set the floating-point state for a thread based + * on the FXSave formatted data. This is basically + * the same as fpu_set_state except it uses the + * expanded data structure. + * If the thread is not the current thread, it is + * not running (held). Locking needed against + * concurrent fpu_set_state or fpu_get_state. + */ +kern_return_t +fpu_set_fxstate( + thread_act_t thr_act, + struct i386_float_state *state) +{ + register pcb_t pcb; + register struct i386_fpsave_state *ifps; + register struct i386_fpsave_state *new_ifps; + +ASSERT_IPL(SPL0); + if (fp_kind == FP_NO) + return KERN_FAILURE; + + if (state->fpkind != FP_FXSR) { + /* strange if this happens, but in case someone builds one of these manually... */ + return fpu_set_state(thr_act, state); + } + + assert(thr_act != THR_ACT_NULL); + pcb = thr_act->mact.pcb; + +#if NCPUS == 1 + + /* + * If this thread`s state is in the FPU, + * discard it; we are replacing the entire + * FPU state. + */ + if (fp_act == thr_act) { + fwait(); /* wait for possible interrupt */ + clear_fpu(); /* no state in FPU */ + } +#endif + + if (state->initialized == 0) { + /* + * new FPU state is 'invalid'. + * Deallocate the fp state if it exists. + */ + simple_lock(&pcb->lock); + ifps = pcb->ims.ifps; + pcb->ims.ifps = 0; + simple_unlock(&pcb->lock); + + if (ifps != 0) { + zfree(ifps_zone, (vm_offset_t) ifps); + } + } + else { + /* + * Valid state. Allocate the fp state if there is none. + */ + + new_ifps = 0; + Retry: + simple_lock(&pcb->lock); + ifps = pcb->ims.ifps; + if (ifps == 0) { + if (new_ifps == 0) { + simple_unlock(&pcb->lock); + new_ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + assert(ALIGNED(new_ifps,16)); + goto Retry; + } + ifps = new_ifps; + new_ifps = 0; + bzero((char *)ifps, sizeof *ifps); + pcb->ims.ifps = ifps; + } + + /* + * now copy over the new data. + */ + bcopy((char *)&state->hw_state[0], (char *)&ifps->fx_save_state, sizeof(struct i386_fx_save)); + ifps->fp_save_flavor = FP_FXSR; + simple_unlock(&pcb->lock); + if (new_ifps != 0) + zfree(ifps_zone, (vm_offset_t) ifps); + } + + return KERN_SUCCESS; +} + +/* + * Get the floating-point state for a thread. + * If the thread is not the current thread, it is + * not running (held). Locking needed against + * concurrent fpu_set_state or fpu_get_state. + */ +kern_return_t +fpu_get_fxstate( + thread_act_t thr_act, + register struct i386_float_state *state) +{ + register pcb_t pcb; + register struct i386_fpsave_state *ifps; + +ASSERT_IPL(SPL0); + if (fp_kind == FP_NO) + return KERN_FAILURE; + + assert(thr_act != THR_ACT_NULL); + pcb = thr_act->mact.pcb; + + simple_lock(&pcb->lock); + ifps = pcb->ims.ifps; + if (ifps == 0) { + /* + * No valid floating-point state. + */ + simple_unlock(&pcb->lock); + bzero((char *)state, sizeof(struct i386_float_state)); + return KERN_SUCCESS; + } + + /* Make sure we`ve got the latest fp state info */ + /* If the live fpu state belongs to our target */ +#if NCPUS == 1 + if (thr_act == fp_act) +#else + if (thr_act == current_act()) +#endif + { + clear_ts(); + fp_save(thr_act); + clear_fpu(); + } + + state->fpkind = fp_kind; + state->exc_status = 0; + state->initialized = ifps->fp_valid; + bcopy( (char *)&ifps->fx_save_state, (char *)&state->hw_state[0], sizeof(struct i386_fx_save)); + + simple_unlock(&pcb->lock); + + return KERN_SUCCESS; +} + /* * Set the floating-point state for a thread. * If the thread is not the current thread, it is @@ -286,10 +425,12 @@ ASSERT_IPL(SPL0); if (new_ifps == 0) { simple_unlock(&pcb->lock); new_ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + assert(ALIGNED(new_ifps,16)); goto Retry; } ifps = new_ifps; new_ifps = 0; + bzero((char *)ifps, sizeof *ifps); // zero ALL fields first pcb->ims.ifps = ifps; } @@ -307,7 +448,7 @@ ASSERT_IPL(SPL0); ifps->fp_save_state.fp_dp = user_fp_state->fp_dp; ifps->fp_save_state.fp_ds = user_fp_state->fp_ds; ifps->fp_regs = *user_fp_regs; - + ifps->fp_save_flavor = FP_387; simple_unlock(&pcb->lock); if (new_ifps != 0) zfree(ifps_zone, (vm_offset_t) ifps); @@ -587,18 +728,21 @@ ASSERT_IPL(SPL0); * . if called from fpnoextflt or fp_intr, we are single-cpu * . otherwise, thread is running. */ - void fp_save( thread_act_t thr_act) { register pcb_t pcb = thr_act->mact.pcb; register struct i386_fpsave_state *ifps = pcb->ims.ifps; - if (ifps != 0 && !ifps->fp_valid) { /* registers are in FPU */ ifps->fp_valid = TRUE; - fnsave(&ifps->fp_save_state); + ifps->fp_save_flavor = FP_387; + if (FXSAFE()) { + fxsave(&ifps->fx_save_state); // save the SSE2/Fp state in addition is enabled + ifps->fp_save_flavor = FP_FXSR; + } + fnsave(&ifps->fp_save_state); // also update the old save area for now... } } @@ -619,6 +763,7 @@ ASSERT_IPL(SPL0); ifps = pcb->ims.ifps; if (ifps == 0) { ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + assert(ALIGNED(ifps,16)); bzero((char *)ifps, sizeof *ifps); pcb->ims.ifps = ifps; fpinit(); @@ -644,11 +789,13 @@ ASSERT_IPL(SPL0); /*NOTREACHED*/ #endif } else { - frstor(ifps->fp_save_state); + if (ifps->fp_save_flavor == FP_FXSR) fxrstor(&ifps->fx_save_state); + else frstor(ifps->fp_save_state); } ifps->fp_valid = FALSE; /* in FPU */ } + /* * Allocate and initialize FP state for current thread. * Don't load state. @@ -662,6 +809,7 @@ fp_state_alloc(void) struct i386_fpsave_state *ifps; ifps = (struct i386_fpsave_state *)zalloc(ifps_zone); + assert(ALIGNED(ifps,16)); bzero((char *)ifps, sizeof *ifps); pcb->ims.ifps = ifps; @@ -671,6 +819,11 @@ fp_state_alloc(void) | (FPC_PC_53|FPC_IC_AFF); ifps->fp_save_state.fp_status = 0; ifps->fp_save_state.fp_tag = 0xffff; /* all empty */ + ifps->fx_save_state.fx_control = ifps->fp_save_state.fp_control; + ifps->fx_save_state.fx_status = ifps->fp_save_state.fp_status; + ifps->fx_save_state.fx_tag = 0x00; + ifps->fx_save_state.fx_MXCSR = 0x1f80; + } diff --git a/osfmk/i386/fpu.h b/osfmk/i386/fpu.h index 85ff1b94e..ba8762902 100644 --- a/osfmk/i386/fpu.h +++ b/osfmk/i386/fpu.h @@ -93,7 +93,7 @@ extern __inline__ unsigned short fnstsw(void) #define fnclex() \ __asm__ volatile("fnclex") -#define fnsave(state) \ +#define fnsave(state) \ __asm__ volatile("fnsave %0" : "=m" (*state)) #define frstor(state) \ @@ -102,6 +102,10 @@ extern __inline__ unsigned short fnstsw(void) #define fwait() \ __asm__("fwait"); +#define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr))) +#define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) + +#define FXSAFE() (fp_kind == FP_FXSR) #define fpu_load_context(pcb) @@ -110,6 +114,8 @@ extern __inline__ unsigned short fnstsw(void) * If only one CPU, we just set the task-switched bit, * to keep the new thread from using the coprocessor. * If multiple CPUs, we save the entire state. + * NOTE: in order to provide backwards compatible support in the kernel. When saving SSE2 state, we also save the + * FP state in it's old location. Otherwise fpu_get_state() and fpu_set_state() will stop working */ #if NCPUS > 1 #define fpu_save_context(thread) \ @@ -119,7 +125,12 @@ extern __inline__ unsigned short fnstsw(void) if (ifps != 0 && !ifps->fp_valid) { \ /* registers are in FPU - save to memory */ \ ifps->fp_valid = TRUE; \ - fnsave(&ifps->fp_save_state); \ + ifps->fp_save_flavor = FP_387; \ + if (FXSAFE()) { \ + fxsave(&ifps->fx_save_state); \ + ifps->fp_save_flavor = FP_FXSR; \ + } \ + fnsave(&ifps->fp_save_state); \ } \ set_ts(); \ } @@ -145,6 +156,12 @@ extern kern_return_t fpu_set_state( extern kern_return_t fpu_get_state( thread_act_t thr_act, struct i386_float_state * st); +/* extern kern_return_t fpu_set_fxstate( + thread_act_t thr_act, + struct i386_float_state * st); +extern kern_return_t fpu_get_fxstate( + thread_act_t thr_act, + struct i386_float_state * st); */ extern void fpnoextflt(void); extern void fpextovrflt(void); extern void fpexterrflt(void); diff --git a/osfmk/i386/genassym.c b/osfmk/i386/genassym.c index f84ee345a..07a4d1c27 100644 --- a/osfmk/i386/genassym.c +++ b/osfmk/i386/genassym.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -97,6 +97,7 @@ cpu_data_t cpu_data[NCPUS]; * the values, but we cannot run anything on the target machine. */ +#undef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE)0)->MEMBER) #if 0 @@ -119,15 +120,30 @@ main( DECLARE("AST_URGENT", AST_URGENT); + /* Simple Lock structure */ + DECLARE("SLOCK_ILK", offsetof(usimple_lock_t, interlock)); +#if MACH_LDEBUG + DECLARE("SLOCK_TYPE", offsetof(usimple_lock_t, lock_type)); + DECLARE("SLOCK_PC", offsetof(usimple_lock_t, debug.lock_pc)); + DECLARE("SLOCK_THREAD", offsetof(usimple_lock_t, debug.lock_thread)); + DECLARE("SLOCK_DURATIONH",offsetof(usimple_lock_t, debug.duration[0])); + DECLARE("SLOCK_DURATIONL",offsetof(usimple_lock_t, debug.duration[1])); + DECLARE("USLOCK_TAG", USLOCK_TAG); +#endif /* MACH_LDEBUG */ + + /* Mutex structure */ + DECLARE("MUTEX_LOCKED", offsetof(mutex_t *, locked)); + DECLARE("MUTEX_WAITERS",offsetof(mutex_t *, waiters)); + DECLARE("MUTEX_PROMOTED_PRI",offsetof(mutex_t *, promoted_pri)); +#if MACH_LDEBUG + DECLARE("MUTEX_TYPE", offsetof(mutex_t *, type)); + DECLARE("MUTEX_PC", offsetof(mutex_t *, pc)); + DECLARE("MUTEX_THREAD", offsetof(mutex_t *, thread)); + DECLARE("MUTEX_TAG", MUTEX_TAG); +#endif /* MACH_LDEBUG */ + #if MACH_LDEBUG - /* - * XXX - */ -#define SIMPLE_LOCK_TAG 0x5353 -#define MUTEX_TAG 0x4d4d DECLARE("TH_MUTEX_COUNT", offsetof(thread_t, mutex_count)); - DECLARE("SIMPLE_LOCK_TAG", SIMPLE_LOCK_TAG); - DECLARE("MUTEX_TAG", MUTEX_TAG); #endif /* MACH_LDEBUG */ DECLARE("TH_RECOVER", offsetof(thread_t, recover)); DECLARE("TH_CONTINUATION", offsetof(thread_t, continuation)); @@ -145,15 +161,11 @@ main( DECLARE("ACT_THREAD", offsetof(thread_act_t, thread)); DECLARE("ACT_TASK", offsetof(thread_act_t, task)); DECLARE("ACT_PCB", offsetof(thread_act_t, mact.pcb)); - DECLARE("ACT_KLOADED", offsetof(thread_act_t, kernel_loaded)); - DECLARE("ACT_KLOADING", offsetof(thread_act_t, kernel_loading)); DECLARE("ACT_LOWER", offsetof(thread_act_t, lower)); DECLARE("ACT_MAP", offsetof(thread_act_t, map)); DECLARE("MAP_PMAP", offsetof(vm_map_t, pmap)); - DECLARE("HOST_NAME", offsetof(host_t, host_self)); - DECLARE("DISP_MIN", offsetof(eml_dispatch_t, disp_min)); DECLARE("DISP_COUNT", offsetof(eml_dispatch_t, disp_count)); DECLARE("DISP_VECTOR", offsetof(eml_dispatch_t, disp_vector[0])); @@ -240,6 +252,14 @@ main( offsetof(cpu_data_t *, interrupt_level)); DECLARE("CPD_SIMPLE_LOCK_COUNT", offsetof(cpu_data_t *,simple_lock_count)); + DECLARE("CPD_CPU_NUMBER", + offsetof(cpu_data_t *,cpu_number)); + DECLARE("CPD_CPU_PHYS_NUMBER", + offsetof(cpu_data_t *,cpu_phys_number)); + DECLARE("CPD_CPU_STATUS", + offsetof(cpu_data_t *,cpu_status)); + DECLARE("CPD_MCOUNT_OFF", + offsetof(cpu_data_t *,mcount_off)); DECLARE("PTES_PER_PAGE", NPTES); DECLARE("INTEL_PTE_KERNEL", INTEL_PTE_VALID|INTEL_PTE_WRITE); diff --git a/osfmk/i386/hardclock.c b/osfmk/i386/hardclock.c index ebdd3c1ee..266afbfd1 100644 --- a/osfmk/i386/hardclock.c +++ b/osfmk/i386/hardclock.c @@ -63,7 +63,6 @@ #include #include #include -#include #include #include #include @@ -76,6 +75,7 @@ #include #include #include +#include #include @@ -160,42 +160,6 @@ hardclock(struct i386_interrupt_state *regs) /* saved registers */ } #endif /* PARANOID_KDB */ -#if 0 -#if MACH_MP_DEBUG - /* - * Increments counter of clock ticks handled under a masked state. - * Debugger() is called if masked state is kept during 1 sec. - * The counter is reset by splx() when ipl mask is set back to SPL0, - * and by spl0(). - */ - if (SPL_CMP_GT((old_ipl & 0xFF), SPL0)) { - if (masked_state_cnt[mycpu]++ >= masked_state_max) { - int max_save = masked_state_max; - - masked_state_cnt[mycpu] = 0; - masked_state_max = 0x7fffffff; - - if (ret_addr == return_to_iret) { - usermode = (regs->efl & EFL_VM) || - ((regs->cs & 0x03) != 0); - pc = (unsigned)regs->eip; - } else { - usermode = FALSE; - pc = (unsigned) - ((struct i386_interrupt_state *)&old_ipl)->eip; - } - printf("looping at high IPL, usermode=%d pc=0x%x\n", - usermode, pc); - Debugger(""); - - masked_state_cnt[mycpu] = 0; - masked_state_max = max_save; - } - } else - masked_state_cnt[mycpu] = 0; -#endif /* MACH_MP_DEBUG */ -#endif - #if MACH_KPROF /* * If we were masked against the clock skip call @@ -203,7 +167,14 @@ hardclock(struct i386_interrupt_state *regs) /* saved registers */ * clock frequency of the master-cpu is confined * to the HZ rate. */ - if (SPL_CMP_LT(old_ipl & 0xFF, SPL7)) + if (SPL_CMP_GE((old_ipl & 0xFF), SPL7)) { + usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0); + pc = (unsigned)regs->eip; + assert(!usermode); + if (missed_clock[mycpu]++ && detect_lost_tick > 1) + Debugger("Mach_KPROF"); + masked_pc[mycpu] = pc; + } else #endif /* MACH_KPROF */ /* * The master processor executes the rtclock_intr() routine @@ -211,10 +182,14 @@ hardclock(struct i386_interrupt_state *regs) /* saved registers */ * a zero value on a HZ tick boundary. */ if (mycpu == master_cpu) { - if (rtclock_intr() != 0) { + if (rtclock_intr(regs) != 0) { mp_enable_preemption(); return; } + } else { + usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0); + pc = (unsigned)regs->eip; + hertz_tick(usermode, pc); } /* @@ -225,42 +200,6 @@ hardclock(struct i386_interrupt_state *regs) /* saved registers */ time_stamp_stat(); -#if 0 - if (ret_addr == return_to_iret) { - /* - * A kernel-loaded task executing within itself will look like - * "kernel mode", here. This is correct with syscalls - * implemented using migrating threads, because it means that - * the time spent in the server by a client thread will be - * treated as "system" time for the client thread (and nothing - * for the server). This conforms to the CPU reporting for an - * integrated kernel. - */ -#endif - usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0); - pc = (unsigned)regs->eip; -#if 0 - } else { - usermode = FALSE; - pc = (unsigned)((struct i386_interrupt_state *)&old_ipl)->eip; - } -#endif - -#if MACH_KPROF - /* - * If we were masked against the clock, just memorize pc - * and the fact that the clock interrupt is delayed - */ - if (SPL_CMP_GE((old_ipl & 0xFF), SPL7)) { - assert(!usermode); - if (missed_clock[mycpu]++ && detect_lost_tick > 1) - Debugger("Mach_KPROF"); - masked_pc[mycpu] = pc; - } else -#endif /* MACH_KPROF */ - - hertz_tick(usermode, pc); - #if NCPUS >1 /* * Instead of having the master processor interrupt diff --git a/osfmk/i386/hw_lock_types.h b/osfmk/i386/hw_lock_types.h index 1ec07fb80..cfd99dbaf 100644 --- a/osfmk/i386/hw_lock_types.h +++ b/osfmk/i386/hw_lock_types.h @@ -86,7 +86,7 @@ * dependent optimizations for the locking constructs defined * later in kern/lock.h.. */ -typedef volatile char hw_lock_data_t; +typedef volatile int hw_lock_data_t; typedef hw_lock_data_t *hw_lock_t; #define hw_lock_addr(hwl) (&(hwl)) diff --git a/osfmk/i386/i386_init.c b/osfmk/i386/i386_init.c new file mode 100644 index 000000000..f004a378a --- /dev/null +++ b/osfmk/i386/i386_init.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if MACH_KDB +#include +#endif /* MACH_KDB */ +#include +#ifdef __MACHO__ +#include +#include + +static KernelBootArgs_t *kernelBootArgs; +#endif + +vm_offset_t boot_args_start = 0; /* pointer to kernel arguments, set in start.s */ + +#ifdef __MACHO__ +#include +vm_offset_t edata, etext, end; + +/* + * Called first for a mach-o kernel before paging is set up. + * Returns the first available physical address in memory. + */ + +unsigned long +i386_preinit() +{ + struct segment_command *sgp; + struct section *sp; + + sgp = (struct segment_command *) getsegbyname("__DATA"); + if (sgp) { + sp = (struct section *) firstsect(sgp); + if (sp) { + do { + if (sp->flags & S_ZEROFILL) + bzero((char *) sp->addr, sp->size); + } while (sp = (struct section *)nextsect(sgp, sp)); + } + } + + kernelBootArgs = (KernelBootArgs_t *) boot_args_start; + end = round_page( kernelBootArgs->kaddr + kernelBootArgs->ksize ); + + return end; +} +#endif + +extern const char version[]; +extern const char version_variant[]; + +/* + * Cpu initialization. Running virtual, but without MACH VM + * set up. First C routine called, unless i386_preinit() was called first. + */ +void +i386_init(void) +{ + unsigned int maxmem; + + cpu_init(); + + /* + * Setup some processor related structures to satisfy funnels. + * Must be done before using unparallelized device drivers. + */ + processor_ptr[0] = &processor_array[0]; + master_cpu = 0; + master_processor = cpu_to_processor(master_cpu); + + PE_init_platform(FALSE, kernelBootArgs); + + /* + * Set up initial thread so current_thread() works early on + */ + thread_bootstrap(); + + printf_init(); /* Init this in case we need debugger */ + panic_init(); /* Init this in case we need debugger */ + + /* setup debugging output if one has been chosen */ + PE_init_kprintf(FALSE); + kprintf("kprintf initialized\n"); + + /* setup console output */ + PE_init_printf(FALSE); + + kprintf("version_variant = %s\n", version_variant); + kprintf("version = %s\n", version); + + + /* + * VM initialization, after this we're using page tables... + * The maximum number of cpus must be set beforehand. + */ + if (!PE_parse_boot_arg("maxmem", &maxmem)) + maxmem=0; + else + maxmem = maxmem * (1024 * 1024); + + if (PE_parse_boot_arg("cpus", &wncpu)) { + if (!((wncpu > 0) && (wncpu < NCPUS))) + wncpu = NCPUS; + } else + wncpu = NCPUS; + + i386_vm_init(maxmem, kernelBootArgs); + + PE_init_platform(TRUE, kernelBootArgs); + + /* create the console for verbose or pretty mode */ + PE_create_console(); + + machine_startup(); + +} diff --git a/osfmk/i386/i386_lock.s b/osfmk/i386/i386_lock.s index ed2710ccc..75430cd24 100644 --- a/osfmk/i386/i386_lock.s +++ b/osfmk/i386/i386_lock.s @@ -70,17 +70,17 @@ #endif /* BUILD_STACK_FRAMES */ -#define M_ILK (%edx) -#define M_LOCKED 1(%edx) -#define M_WAITERS 2(%edx) -#define M_PROMOTED_PRI 4(%edx) +#define M_ILK (%edx) +#define M_LOCKED MUTEX_LOCKED(%edx) +#define M_WAITERS MUTEX_WAITERS(%edx) +#define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx) #if MACH_LDEBUG -#define M_TYPE 6(%edx) -#define M_PC 10(%edx) -#define M_THREAD 14(%edx) +#define M_TYPE MUTEX_TYPE(%edx) +#define M_PC MUTEX_PC(%edx) +#define M_THREAD MUTEX_THREAD(%edx) #endif /* MACH_LDEBUG */ -#include +#include #if (NCPUS > 1) #define CX(addr,reg) addr(,reg,4) #else @@ -92,11 +92,11 @@ /* * Routines for general lock debugging. */ -#define S_TYPE 4(%edx) -#define S_PC 8(%edx) -#define S_THREAD 12(%edx) -#define S_DURATIONH 16(%edx) -#define S_DURATIONL 20(%edx) +#define S_TYPE SLOCK_TYPE(%edx) +#define S_PC SLOCK_PC(%edx) +#define S_THREAD SLOCK_THREAD(%edx) +#define S_DURATIONH SLOCK_DURATIONH(%edx) +#define S_DURATIONL SLOCK_DURATIONL(%edx) /* * Checks for expected lock types and calls "panic" on @@ -115,7 +115,7 @@ 1: #define CHECK_SIMPLE_LOCK_TYPE() \ - cmpl $ SIMPLE_LOCK_TAG,S_TYPE ; \ + cmpl $ USLOCK_TAG,S_TYPE ; \ je 1f ; \ pushl $2f ; \ call EXT(panic) ; \ @@ -223,26 +223,24 @@ ENTRY(hw_lock_init) FRAME movl L_ARG0,%edx /* fetch lock pointer */ xorl %eax,%eax - movb %al,0(%edx) /* clear the lock */ + movl %eax,0(%edx) /* clear the lock */ EMARF ret /* * void hw_lock_lock(hw_lock_t) - * unsigned int hw_lock_to(hw_lock_t, unsigned int) * * Acquire lock, spinning until it becomes available. - * XXX: For now, we don't actually implement the timeout. * MACH_RT: also return with preemption disabled. */ -ENTRY2(hw_lock_lock,hw_lock_to) +ENTRY(hw_lock_lock) FRAME movl L_ARG0,%edx /* fetch lock pointer */ 1: DISABLE_PREEMPTION(%eax) - movb $1,%cl - xchgb 0(%edx),%cl /* try to acquire the HW lock */ - testb %cl,%cl /* success? */ + movl $1,%ecx + xchgl 0(%edx),%ecx /* try to acquire the HW lock */ + testl %ecx,%ecx /* success? */ jne 3f movl $1,%eax /* In case this was a timeout call */ EMARF /* if yes, then nothing left to do */ @@ -250,11 +248,98 @@ ENTRY2(hw_lock_lock,hw_lock_to) 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */ - movb $1,%cl -2: testb %cl,0(%edx) /* spin checking lock value in cache */ + movl $1,%ecx +2: + rep; nop /* pause for hyper-threading */ + testl %ecx,0(%edx) /* spin checking lock value in cache */ jne 2b /* non-zero means locked, keep spinning */ jmp 1b /* zero means unlocked, try to grab it */ +/* + * unsigned int hw_lock_to(hw_lock_t, unsigned int) + * + * Acquire lock, spinning until it becomes available or timeout. + * MACH_RT: also return with preemption disabled. + */ +ENTRY(hw_lock_to) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ +1: + /* + * Attempt to grab the lock immediately + * - fastpath without timeout nonsense. + */ + DISABLE_PREEMPTION(%eax) + movl $1,%eax + xchgl 0(%edx),%eax /* try to acquire the HW lock */ + testl %eax,%eax /* success? */ + jne 2f /* no */ + movl $1,%eax /* yes, return true */ + EMARF + ret + +2: +#define INNER_LOOP_COUNT 1000 + /* + * Failed to get the lock so set the timeout + * and then spin re-checking the lock but pausing + * every so many (INNER_LOOP_COUNT) spins to check for timeout. + */ + movl L_ARG1,%ecx /* fetch timeout */ + push %edi + push %ebx + mov %edx,%edi + + rdtsc /* read cyclecount into %edx:%eax */ + addl %ecx,%eax /* fetch and timeout */ + adcl $0,%edx /* add carry */ + mov %edx,%ecx + mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */ +3: + ENABLE_PREEMPTION(%eax) /* no reason not to be preempted now */ +4: + /* + * The inner-loop spin to look for the lock being freed. + */ + movl $1,%eax + mov $(INNER_LOOP_COUNT),%edx +5: + rep; nop /* pause for hyper-threading */ + testl %eax,0(%edi) /* spin checking lock value in cache */ + je 6f /* zero => unlocked, try to grab it */ + decl %edx /* decrement inner loop count */ + jnz 5b /* time to check for timeout? */ + + /* + * Here after spinning INNER_LOOP_COUNT times, check for timeout + */ + rdtsc /* cyclecount into %edx:%eax */ + cmpl %ecx,%edx /* compare high-order 32-bits */ + jb 4b /* continue spinning if less, or */ + cmpl %ebx,%eax /* compare low-order 32-bits */ + jb 5b /* continue is less, else bail */ + xor %eax,%eax /* with 0 return value */ + pop %ebx + pop %edi + EMARF + ret + +6: + /* + * Here to try to grab the lock that now appears to be free + * after contention. + */ + DISABLE_PREEMPTION(%eax) + movl $1,%eax + xchgl 0(%edi),%eax /* try to acquire the HW lock */ + testl %eax,%eax /* success? */ + jne 3b /* no - spin again */ + movl $1,%eax /* yes */ + pop %ebx + pop %edi + EMARF + ret + /* * void hw_lock_unlock(hw_lock_t) * @@ -265,7 +350,7 @@ ENTRY(hw_lock_unlock) FRAME movl L_ARG0,%edx /* fetch lock pointer */ xorl %eax,%eax - xchgb 0(%edx),%al /* clear the lock... a mov instruction */ + xchgl 0(%edx),%eax /* clear the lock... a mov instruction */ /* ...might be cheaper and less paranoid */ ENABLE_PREEMPTION(%eax) EMARF @@ -280,9 +365,9 @@ ENTRY(hw_lock_try) movl L_ARG0,%edx /* fetch lock pointer */ DISABLE_PREEMPTION(%eax) - movb $1,%cl - xchgb 0(%edx),%cl /* try to acquire the HW lock */ - testb %cl,%cl /* success? */ + movl $1,%ecx + xchgl 0(%edx),%ecx /* try to acquire the HW lock */ + testl %ecx,%ecx /* success? */ jne 1f /* if yes, let the caller know */ movl $1,%eax /* success */ @@ -303,8 +388,8 @@ ENTRY(hw_lock_held) FRAME movl L_ARG0,%edx /* fetch lock pointer */ - movb $1,%cl - testb %cl,0(%edx) /* check lock value */ + movl $1,%ecx + testl %ecx,0(%edx) /* check lock value */ jne 1f /* non-zero means locked */ xorl %eax,%eax /* tell caller: lock wasn't locked */ EMARF @@ -323,7 +408,7 @@ ENTRY(_usimple_lock_init) FRAME movl L_ARG0,%edx /* fetch lock pointer */ xorl %eax,%eax - movb %al,USL_INTERLOCK(%edx) /* unlock the HW lock */ + movl %eax,USL_INTERLOCK(%edx) /* unlock the HW lock */ EMARF ret @@ -336,9 +421,9 @@ ENTRY(_simple_lock) DISABLE_PREEMPTION(%eax) sl_get_hw: - movb $1,%cl - xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */ - testb %cl,%cl /* did we succeed? */ + movl $1,%ecx + xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */ + testl %ecx,%ecx /* did we succeed? */ #if MACH_LDEBUG je 5f @@ -380,9 +465,9 @@ ENTRY(_simple_lock_try) DISABLE_PREEMPTION(%eax) - movb $1,%cl - xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */ - testb %cl,%cl /* did we succeed? */ + movl $1,%ecx + xchgl USL_INTERLOCK(%edx),%ecx/* try to acquire the HW lock */ + testl %ecx,%ecx /* did we succeed? */ jne 1f /* no, return failure */ #if MACH_LDEBUG @@ -445,8 +530,8 @@ ENTRY(_simple_unlock) #endif /* NCPUS == 1 */ #endif /* MACH_LDEBUG */ - xorb %cl,%cl - xchgb USL_INTERLOCK(%edx),%cl /* unlock the HW lock */ + xorl %ecx,%ecx + xchgl USL_INTERLOCK(%edx),%ecx /* unlock the HW lock */ ENABLE_PREEMPTION(%eax) @@ -460,8 +545,8 @@ ENTRY(mutex_init) FRAME movl L_ARG0,%edx /* fetch lock pointer */ xorl %eax,%eax - movb %al,M_ILK /* clear interlock */ - movb %al,M_LOCKED /* clear locked flag */ + movl %eax,M_ILK /* clear interlock */ + movl %eax,M_LOCKED /* clear locked flag */ movw %ax,M_WAITERS /* init waiter count */ movw %ax,M_PROMOTED_PRI @@ -501,14 +586,14 @@ ml_retry: DISABLE_PREEMPTION(%eax) ml_get_hw: - movb $1,%cl - xchgb %cl,M_ILK - testb %cl,%cl /* did we succeed? */ + movl $1,%ecx + xchgl %ecx,M_ILK + testl %ecx,%ecx /* did we succeed? */ jne ml_get_hw /* no, try again */ - movb $1,%cl - xchgb %cl,M_LOCKED /* try to set locked flag */ - testb %cl,%cl /* is the mutex locked? */ + movl $1,%ecx + xchgl %ecx,M_LOCKED /* try to set locked flag */ + testl %ecx,%ecx /* is the mutex locked? */ jne ml_fail /* yes, we lose */ pushl %edx @@ -528,8 +613,8 @@ ml_get_hw: 3: #endif - xorb %cl,%cl - xchgb %cl,M_ILK + xorl %ecx,%ecx + xchgl %ecx,M_ILK ENABLE_PREEMPTION(%eax) @@ -585,14 +670,14 @@ ENTRY2(mutex_try,_mutex_try) DISABLE_PREEMPTION(%eax) mt_get_hw: - movb $1,%cl - xchgb %cl,M_ILK - testb %cl,%cl + movl $1,%ecx + xchgl %ecx,M_ILK + testl %ecx,%ecx jne mt_get_hw - movb $1,%cl - xchgb %cl,M_LOCKED - testb %cl,%cl + movl $1,%ecx + xchgl %ecx,M_LOCKED + testl %ecx,%ecx jne mt_fail pushl %edx @@ -612,8 +697,8 @@ mt_get_hw: 1: #endif - xorb %cl,%cl - xchgb %cl,M_ILK + xorl %ecx,%ecx + xchgl %ecx,M_ILK ENABLE_PREEMPTION(%eax) @@ -639,20 +724,8 @@ mt_get_hw: ret mt_fail: -#if MACH_LDEBUG - movl L_PC,%ecx - movl %ecx,M_PC - movl $ CPD_ACTIVE_THREAD,%ecx - movl %gs:(%ecx),%ecx - movl %ecx,M_THREAD - testl %ecx,%ecx - je 1f - incl TH_MUTEX_COUNT(%ecx) -1: -#endif - - xorb %cl,%cl - xchgb %cl,M_ILK + xorl %ecx,%ecx + xchgl %ecx,M_ILK ENABLE_PREEMPTION(%eax) @@ -693,9 +766,9 @@ ENTRY(mutex_unlock) DISABLE_PREEMPTION(%eax) mu_get_hw: - movb $1,%cl - xchgb %cl,M_ILK - testb %cl,%cl /* did we succeed? */ + movl $1,%ecx + xchgl %ecx,M_ILK + testl %ecx,%ecx /* did we succeed? */ jne mu_get_hw /* no, try again */ cmpw $0,M_WAITERS /* are there any waiters? */ @@ -713,11 +786,11 @@ mu_doit: 0: #endif - xorb %cl,%cl - xchgb %cl,M_LOCKED /* unlock the mutex */ + xorl %ecx,%ecx + xchgl %ecx,M_LOCKED /* unlock the mutex */ - xorb %cl,%cl - xchgb %cl,M_ILK + xorl %ecx,%ecx + xchgl %ecx,M_ILK ENABLE_PREEMPTION(%eax) @@ -737,8 +810,8 @@ ENTRY(interlock_unlock) FRAME movl L_ARG0,%edx - xorb %cl,%cl - xchgb %cl,M_ILK + xorl %ecx,%ecx + xchgl %ecx,M_ILK ENABLE_PREEMPTION(%eax) diff --git a/osfmk/i386/i386_vm_init.c b/osfmk/i386/i386_vm_init.c new file mode 100644 index 000000000..0805b9e3d --- /dev/null +++ b/osfmk/i386/i386_vm_init.c @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef __MACHO__ +#include +#include +#endif + +vm_size_t mem_size = 0; +vm_offset_t first_addr = 0; /* set by start.s - keep out of bss */ +vm_offset_t first_avail = 0;/* first after page tables */ +vm_offset_t last_addr; + +uint64_t max_mem; +uint64_t sane_size; + +vm_offset_t avail_start, avail_end; +vm_offset_t virtual_avail, virtual_end; +vm_offset_t hole_start, hole_end; +vm_offset_t avail_next; +unsigned int avail_remaining; + +/* parameters passed from bootstrap loader */ +int cnvmem = 0; /* must be in .data section */ +int extmem = 0; + +#ifndef __MACHO__ +extern char edata, end; +#endif + +#ifdef __MACHO__ +#include +vm_offset_t edata, etext, end; + +extern struct mach_header _mh_execute_header; +void *sectTEXTB; int sectSizeTEXT; +void *sectDATAB; int sectSizeDATA; +void *sectOBJCB; int sectSizeOBJC; +void *sectLINKB; int sectSizeLINK; +void *sectPRELINKB; int sectSizePRELINK; + +#endif + +/* + * Basic VM initialization. + */ +void +i386_vm_init(unsigned int maxmem, KernelBootArgs_t *args) +{ + int i,j; /* Standard index vars. */ + vm_size_t bios_hole_size; + +#ifdef __MACHO__ + /* Now retrieve addresses for end, edata, and etext + * from MACH-O headers. + */ + + sectTEXTB = (void *) getsegdatafromheader( + &_mh_execute_header, "__TEXT", §SizeTEXT); + sectDATAB = (void *) getsegdatafromheader( + &_mh_execute_header, "__DATA", §SizeDATA); + sectOBJCB = (void *) getsegdatafromheader( + &_mh_execute_header, "__OBJC", §SizeOBJC); + sectLINKB = (void *) getsegdatafromheader( + &_mh_execute_header, "__LINKEDIT", §SizeLINK); + sectPRELINKB = (void *) getsegdatafromheader( + &_mh_execute_header, "__PRELINK", §SizePRELINK); + + etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; + edata = (vm_offset_t) sectDATAB + sectSizeDATA; +#endif +#ifndef __MACHO__ + /* + * Zero the BSS. + */ + + bzero((char *)&edata,(unsigned)(&end - &edata)); +#endif + + /* Now copy over various boot args bits.. */ + cnvmem = args->convmem; + extmem = args->extmem; + + /* + * Initialize the pic prior to any possible call to an spl. + */ + + set_cpu_model(); + vm_set_page_size(); + + /* + * Initialize the Event Trace Analysis Package + * Static Phase: 1 of 2 + */ + etap_init_phase1(); + + /* + * Compute the memory size. + */ + +#if NCPUS > 1 + /* First two pages are used to boot the other cpus. */ + /* TODO - reclaim pages after all cpus have booted */ + + first_addr = MP_FIRST_ADDR; +#else + first_addr = 0x1000; +#endif + + /* BIOS leaves data in low memory */ + last_addr = 1024*1024 + extmem*1024; + + /* extended memory starts at 1MB */ + + bios_hole_size = 1024*1024 - trunc_page((vm_offset_t)(1024 * cnvmem)); + + /* + * Initialize for pmap_free_pages and pmap_next_page. + * These guys should be page-aligned. + */ + + hole_start = trunc_page((vm_offset_t)(1024 * cnvmem)); + hole_end = round_page((vm_offset_t)first_avail); + + /* + * compute mem_size + */ + + /* + * We're currently limited to 512 MB max physical memory. + */ +#define M (1024*1024) +#define MAXMEM (512*M) + if ((maxmem == 0) && (last_addr - bios_hole_size > MAXMEM)) { + printf("Physical memory %d MB, "\ + "maximum usable memory limited to %d MB\n", + (last_addr - bios_hole_size)/M, MAXMEM/M); + maxmem = MAXMEM; + } + + if (maxmem != 0) { + if (maxmem < (last_addr) - bios_hole_size) + last_addr = maxmem + bios_hole_size; + } + + first_addr = round_page(first_addr); + last_addr = trunc_page(last_addr); + mem_size = last_addr - bios_hole_size; + + max_mem = (uint64_t)mem_size; + sane_size = max_mem; + + avail_start = first_addr; + avail_end = last_addr; + avail_next = avail_start; + +#if NCPUS > 1 + interrupt_stack_alloc(); +#endif /* NCPUS > 1 */ + + /* + * Initialize kernel physical map. + * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS. + */ + pmap_bootstrap(0); + + avail_remaining = atop((avail_end - avail_start) - + (hole_end - hole_start)); +} + +unsigned int +pmap_free_pages(void) +{ + return avail_remaining; +} + +boolean_t +pmap_next_page( + ppnum_t *pn) +{ + if (avail_next == avail_end) + return FALSE; + + /* skip the hole */ + + if (avail_next == hole_start) + avail_next = hole_end; + + *pn = (ppnum_t)i386_btop(avail_next); + avail_next += PAGE_SIZE; + avail_remaining--; + + return TRUE; +} + +boolean_t +pmap_valid_page( + vm_offset_t x) +{ + return ((avail_start <= x) && (x < avail_end)); +} diff --git a/osfmk/i386/io_map.c b/osfmk/i386/io_map.c index 9b7c83132..9df7b0016 100644 --- a/osfmk/i386/io_map.c +++ b/osfmk/i386/io_map.c @@ -86,3 +86,10 @@ io_map(phys_addr, size) VM_PROT_READ|VM_PROT_WRITE); return (start); } + +/* just wrap this since io_map handles it */ + +vm_offset_t io_map_spec(vm_offset_t phys_addr, vm_size_t size) +{ + return (io_map(phys_addr, size)); +} diff --git a/osfmk/i386/ldt.c b/osfmk/i386/ldt.c index 50429cb92..aa462b0c0 100644 --- a/osfmk/i386/ldt.c +++ b/osfmk/i386/ldt.c @@ -85,4 +85,9 @@ struct fake_descriptor ldt[LDTSZ] = { SZ_32|SZ_G, ACC_P|ACC_PL_U|ACC_DATA_W }, /* user data segment */ +/*027*/ { 0, + (VM_MAX_ADDRESS-VM_MIN_ADDRESS-1)>>12, + SZ_32|SZ_G, + ACC_P|ACC_PL_U|ACC_DATA_W + }, /* user cthread segment */ }; diff --git a/osfmk/i386/lock.h b/osfmk/i386/lock.h index 3abc1ea93..3090e9677 100644 --- a/osfmk/i386/lock.h +++ b/osfmk/i386/lock.h @@ -286,27 +286,6 @@ extern void bit_unlock( #endif /* !defined(__GNUC__) */ - -#if !(USLOCK_DEBUG || USLOCK_STATS) -/* - * Take responsibility for production-quality usimple_locks. - * Let the portable lock package build simple_locks in terms - * of usimple_locks, which is done efficiently with macros. - * Currently, these aren't inlined although they probably - * should be. The portable lock package is used for the - * usimple_lock prototypes and data declarations. - * - * For non-production configurations, punt entirely to the - * portable lock package. - * - * N.B. I've left in the hooks for ETAP, so we can - * compare the performance of stats-gathering on top - * of "production" locks v. stats-gathering on top - * of portable, C-based locks. - */ -#define USIMPLE_LOCK_CALLS -#endif /* !(USLOCK_DEBUG || USLOCK_STATS) */ - extern void kernel_preempt_check (void); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/i386/locore.s b/osfmk/i386/locore.s index 85c25b5a1..c2635613b 100644 --- a/osfmk/i386/locore.s +++ b/osfmk/i386/locore.s @@ -71,7 +71,7 @@ #include #include -#include +#include #define PREEMPT_DEBUG_LOG 0 @@ -651,12 +651,17 @@ Entry(t_debug) testl $3,4(%esp) /* is trap from kernel mode? */ jnz 0f /* if so: */ cmpl $syscall_entry,(%esp) /* system call entry? */ - jne 0f /* if so: */ + jne 1f /* if so: */ /* flags are sitting where syscall */ /* wants them */ addl $8,%esp /* remove eip/cs */ jmp syscall_entry_2 /* continue system call entry */ +1: cmpl $trap_unix_addr,(%esp) + jne 0f + addl $8,%esp + jmp trap_unix_2 + 0: pushl $0 /* otherwise: */ pushl $(T_DEBUG) /* handle as normal */ jmp EXT(alltraps) /* debug fault */ @@ -774,9 +779,6 @@ LEXT(return_to_user) jnz EXT(return_xfer_stack) movl $ CPD_ACTIVE_THREAD,%ebx movl %gs:(%ebx),%ebx /* get active thread */ - movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */ - cmpl $0,ACT_KLOADING(%ebx) /* check if kernel-loading */ - jnz EXT(return_kernel_loading) #if MACH_RT #if MACH_ASSERT @@ -842,13 +844,10 @@ LEXT(return_kernel_loading) movl CX(EXT(kernel_stack),%eax),%esp movl $ CPD_ACTIVE_THREAD,%ebx movl %gs:(%ebx),%ebx /* get active thread */ - movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */ movl %ebx,%edx /* save for later */ - movl $0,ACT_KLOADING(%edx) /* clear kernel-loading bit */ FRAME_PCB_TO_STACK(%ebx) movl %ebx,%esp /* start running on new stack */ - movl $1,ACT_KLOADED(%edx) /* set kernel-loaded bit */ - movl %edx,CX(EXT(active_kloaded),%eax) /* set cached indicator */ + movl $0,CX(EXT(active_kloaded),%eax) /* set cached indicator */ jmp EXT(return_from_kernel) /* @@ -1055,11 +1054,13 @@ Entry(call_continuation) #define CHECK_INTERRUPT_TIME(n) #endif +.data imsg_start: String "interrupt start" imsg_end: String "interrupt end" +.text /* * All interrupts enter here. * old %eax on stack; interrupt number in %eax. @@ -1074,6 +1075,8 @@ Entry(all_intrs) pushl %ds /* save segment registers */ pushl %es + pushl %fs + pushl %gs mov %ss,%dx /* switch to kernel segments */ mov %dx,%ds mov %dx,%es @@ -1083,7 +1086,7 @@ Entry(all_intrs) CPU_NUMBER(%edx) movl CX(EXT(int_stack_top),%edx),%ecx - movl 20(%esp),%edx /* get eip */ + movl %esp,%edx /* & i386_interrupt_state */ xchgl %ecx,%esp /* switch to interrupt stack */ #if STAT_TIME @@ -1095,7 +1098,7 @@ Entry(all_intrs) TIME_INT_ENTRY /* do timing */ #endif - pushl %edx /* pass eip to pe_incoming_interrupt */ + pushl %edx /* pass &i386_interrupt_state to pe_incoming_interrupt */ #if MACH_RT movl $ CPD_PREEMPTION_LEVEL,%edx @@ -1182,6 +1185,8 @@ LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */ #endif /* MACH_RT */ 1: + pop %gs + pop %fs pop %es /* restore segment regs */ pop %ds pop %edx @@ -1198,13 +1203,14 @@ int_from_intstack: movl $ CPD_INTERRUPT_LEVEL,%edx incl %gs:(%edx) - movl 12(%esp),%edx - pushl %edx /* push eip */ + subl $16, %esp /* dummy ds, es, fs, gs */ + movl %esp, %edx /* &i386_interrupt_state */ + pushl %edx /* pass &i386_interrupt_state to PE_incoming_interrupt /* pushl %eax /* Push trap number */ call EXT(PE_incoming_interrupt) - addl $4,%esp /* pop eip */ + addl $20,%esp /* pop i386_interrupt_state, dummy gs,fs,es,ds */ LEXT(return_to_iret_i) /* ( label for kdb_kintr) */ @@ -1238,6 +1244,8 @@ LEXT(return_to_iret_i) /* ( label for kdb_kintr) */ * ss */ ast_from_interrupt: + pop %gs + pop %fs pop %es /* restore all registers ... */ pop %ds popl %edx @@ -1561,9 +1569,22 @@ Entry(mach_rpc) * ebx contains user regs pointer */ 2: + + pushl %ebx /* arg ptr */ + pushl %eax /* call # - preserved across */ + call EXT(mach_call_start) + addl $ 8, %esp + movl %eax, %ebx /* need later */ + CAH(call_call) call *EXT(mach_trap_table)+4(%eax) /* call procedure */ + + pushl %eax /* retval */ + pushl %ebx /* call # */ + call EXT(mach_call_end) + addl $ 8, %esp + movl %esp,%ecx /* get kernel stack */ or $(KERNEL_STACK_SIZE-1),%ecx movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */ @@ -1687,8 +1708,6 @@ syscall_entry_3: 1: movl $ CPD_ACTIVE_THREAD,%edx movl %gs:(%edx),%edx /* get active thread */ - /* point to current thread */ - movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */ movl ACT_TASK(%edx),%edx /* point to task */ movl TASK_EMUL(%edx),%edx /* get emulation vector */ orl %edx,%edx /* if none, */ @@ -1725,8 +1744,6 @@ syscall_native: movl $ CPD_ACTIVE_THREAD,%edx movl %gs:(%edx),%edx /* get active thread */ - /* point to current thread */ - movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */ movl ACT_TASK(%edx),%edx /* point to task */ movl TASK_EMUL(%edx),%edx /* get emulation vector */ orl %edx,%edx /* if it exists, */ @@ -1794,7 +1811,20 @@ mach_call_call: #endif /* ETAP_EVENT_MONITOR */ make_syscall: + + pushl %ebx /* arg ptr */ + pushl %eax /* call # - preserved across */ + call EXT(mach_call_start) + addl $ 8, %esp + movl %eax, %ebx /* need later */ + call *EXT(mach_trap_table)+4(%eax) /* call procedure */ + + pushl %eax /* retval */ + pushl %ebx /* call # */ + call EXT(mach_call_end) + addl $ 8, %esp + skip_syscall: movl %esp,%ecx /* get kernel stack */ @@ -1829,8 +1859,6 @@ mach_call_addr: mach_call_range: movl $ CPD_ACTIVE_THREAD,%edx movl %gs:(%edx),%edx /* get active thread */ - - movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */ movl ACT_TASK(%edx),%edx /* point to task */ movl TASK_EMUL(%edx),%edx /* get emulation vector */ orl %edx,%edx /* if emulator, */ @@ -1946,7 +1974,6 @@ ENTRY(copyin) movl $ CPD_ACTIVE_THREAD,%ecx movl %gs:(%ecx),%ecx /* get active thread */ - movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */ movl ACT_MAP(%ecx),%ecx /* get act->map */ movl MAP_PMAP(%ecx),%ecx /* get map->pmap */ cmpl EXT(kernel_pmap), %ecx @@ -2001,7 +2028,6 @@ Entry(copyinstr) movl $ CPD_ACTIVE_THREAD,%ecx movl %gs:(%ecx),%ecx /* get active thread */ - movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */ movl ACT_MAP(%ecx),%ecx /* get act->map */ movl MAP_PMAP(%ecx),%ecx /* get map->pmap */ cmpl EXT(kernel_pmap), %ecx @@ -2029,11 +2055,10 @@ Entry(copyinstr) je 5f /* Zero count.. error out */ cmpl $0,%eax jne 2b /* .. a NUL found? */ - jmp 4f + jmp 4f /* return zero (%eax) */ 5: movl $ ENAMETOOLONG,%eax /* String is too long.. */ 4: - xorl %eax,%eax /* return zero for success */ movl 8+S_ARG3,%edi /* get OUT len ptr */ cmpl $0,%edi jz copystr_ret /* if null, just return */ @@ -2068,7 +2093,6 @@ ENTRY(copyout) movl $ CPD_ACTIVE_THREAD,%ecx movl %gs:(%ecx),%ecx /* get active thread */ - movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */ movl ACT_MAP(%ecx),%ecx /* get act->map */ movl MAP_PMAP(%ecx),%ecx /* get map->pmap */ cmpl EXT(kernel_pmap), %ecx @@ -2102,41 +2126,6 @@ copyout_retry: subl %edi,%edx / movl %edi,%ebx /* ebx = edi; */ - mov %es,%cx - cmpl $ USER_DS,%cx /* If kernel data segment */ - jnz 0f /* skip check */ - - cmpb $(CPUID_FAMILY_386), EXT(cpuid_family) - ja 0f - - movl %cr3,%ecx /* point to page directory */ -#if NCPUS > 1 - andl $(~0x7), %ecx /* remove cpu number */ -#endif /* NCPUS > 1 && AT386 */ - movl %edi,%eax /* get page directory bits */ - shrl $(PDESHIFT),%eax /* from user address */ - movl KERNELBASE(%ecx,%eax,4),%ecx - /* get page directory pointer */ - testl $(PTE_V),%ecx /* present? */ - jz 0f /* if not, fault is OK */ - andl $(PTE_PFN),%ecx /* isolate page frame address */ - movl %edi,%eax /* get page table bits */ - shrl $(PTESHIFT),%eax - andl $(PTEMASK),%eax /* from user address */ - leal KERNELBASE(%ecx,%eax,4),%ecx - /* point to page table entry */ - movl (%ecx),%eax /* get it */ - testl $(PTE_V),%eax /* present? */ - jz 0f /* if not, fault is OK */ - testl $(PTE_W),%eax /* writable? */ - jnz 0f /* OK if so */ -/* - * Not writable - must fake a fault. Turn off access to the page. - */ - andl $(PTE_INVALID),(%ecx) /* turn off valid bit */ - movl %cr3,%eax /* invalidate TLB */ - movl %eax,%cr3 -0: /* * Copy only what fits on the current destination page. * Check for write-fault again on the next page. @@ -2776,79 +2765,6 @@ ENTRY(dr_addr) .long 0,0,0,0 .text -/* - * Determine cpu model and set global cpuid_xxx variables - * - * Relies on 386 eflags bit 18 (AC) always being zero & 486 preserving it. - * Relies on 486 eflags bit 21 (ID) always being zero & 586 preserving it. - * Relies on CPUID instruction for next x86 generations - * (assumes cpuid-family-homogenous MPs; else convert to per-cpu array) - */ - -ENTRY(set_cpu_model) - FRAME - pushl %ebx /* save ebx */ - andl $~0x3,%esp /* Align stack to avoid AC fault */ - pushfl /* push EFLAGS */ - popl %eax /* pop into eax */ - movl %eax,%ecx /* Save original EFLAGS */ - xorl $(EFL_AC+EFL_ID),%eax /* toggle ID,AC bits */ - pushl %eax /* push new value */ - popfl /* through the EFLAGS register */ - pushfl /* and back */ - popl %eax /* into eax */ - movb $(CPUID_FAMILY_386),EXT(cpuid_family) - pushl %ecx /* push original EFLAGS */ - popfl /* restore EFLAGS */ - xorl %ecx,%eax /* see what changed */ - testl $ EFL_AC,%eax /* test AC bit */ - jz 0f /* if AC toggled (486 or higher) */ - - movb $(CPUID_FAMILY_486),EXT(cpuid_family) - testl $ EFL_ID,%eax /* test ID bit */ - jz 0f /* if ID toggled use cpuid instruction */ - - xorl %eax,%eax /* get vendor identification string */ - .word 0xA20F /* cpuid instruction */ - movl %eax,EXT(cpuid_value) /* Store high value */ - movl %ebx,EXT(cpuid_vid) /* Store byte 0-3 of Vendor ID */ - movl %edx,EXT(cpuid_vid)+4 /* Store byte 4-7 of Vendor ID */ - movl %ecx,EXT(cpuid_vid)+8 /* Store byte 8-B of Vendor ID */ - movl $1,%eax /* get processor signature */ - .word 0xA20F /* cpuid instruction */ - movl %edx,EXT(cpuid_feature) /* Store feature flags */ - movl %eax,%ecx /* Save original signature */ - andb $0xF,%al /* Get Stepping ID */ - movb %al,EXT(cpuid_stepping) /* Save Stepping ID */ - movl %ecx,%eax /* Get original signature */ - shrl $4,%eax /* Shift Stepping ID */ - movl %eax,%ecx /* Save original signature */ - andb $0xF,%al /* Get Model */ - movb %al,EXT(cpuid_model) /* Save Model */ - movl %ecx,%eax /* Get original signature */ - shrl $4,%eax /* Shift Stepping ID */ - movl %eax,%ecx /* Save original signature */ - andb $0xF,%al /* Get Family */ - movb %al,EXT(cpuid_family) /* Save Family */ - movl %ecx,%eax /* Get original signature */ - shrl $4,%eax /* Shift Stepping ID */ - andb $0x3,%al /* Get Type */ - movb %al,EXT(cpuid_type) /* Save Type */ - - movl EXT(cpuid_value),%eax /* Get high value */ - cmpl $2,%eax /* Test if processor configuration */ - jle 0f /* is present */ - movl $2,%eax /* get processor configuration */ - .word 0xA20F /* cpuid instruction */ - movl %eax,EXT(cpuid_cache) /* Store byte 0-3 of configuration */ - movl %ebx,EXT(cpuid_cache)+4 /* Store byte 4-7 of configuration */ - movl %ecx,EXT(cpuid_cache)+8 /* Store byte 8-B of configuration */ - movl %edx,EXT(cpuid_cache)+12 /* Store byte C-F of configuration */ -0: - popl %ebx /* restore ebx */ - EMARF - ret /* return */ - ENTRY(get_cr0) movl %cr0, %eax ret @@ -2982,25 +2898,6 @@ ENTRY(jail) #endif /* NCPUS > 1 */ -/* - * delay(microseconds) - */ - -ENTRY(delay) - movl 4(%esp),%eax - testl %eax, %eax - jle 3f - movl EXT(delaycount), %ecx -1: - movl %ecx, %edx -2: - decl %edx - jne 2b - decl %eax - jne 1b -3: - ret - /* * unsigned int * div_scale(unsigned int dividend, @@ -3074,19 +2971,15 @@ ENTRY(mul_scale) POP_FRAME ret -#if NCPUS > 1 -ENTRY(_cpu_number) - CPU_NUMBER(%eax) - ret -#endif /* NCPUS > 1 */ - #ifdef MACH_BSD /* * BSD System call entry point.. */ Entry(trap_unix_syscall) +trap_unix_addr: pushf /* save flags as soon as possible */ +trap_unix_2: pushl %eax /* save system call number */ pushl $0 /* clear trap number slot */ diff --git a/osfmk/i386/loose_ends.c b/osfmk/i386/loose_ends.c index 8fd9af175..e37bc2a7c 100644 --- a/osfmk/i386/loose_ends.c +++ b/osfmk/i386/loose_ends.c @@ -60,13 +60,24 @@ #include #include #include +#include +#include #include +#define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL) +#define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL)) + /* * Should be rewritten in asm anyway. */ +void +bzero_phys(addr64_t p, uint32_t len) +{ + bzero((char *)phystokv(low32(p)), len); +} + /* * copy 'size' bytes from physical to physical address * the caller must validate the physical ranges @@ -77,6 +88,8 @@ * if flush_action == 3, flush both source and dest */ +extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); + kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) { switch(flush_action) { @@ -92,7 +105,7 @@ kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, u break; } - bcopy_phys((char *)source, (char *)dest, size); /* Do a physical copy */ + bcopy_phys((addr64_t)source, (addr64_t)dest, (vm_size_t)size); /* Do a physical copy */ switch(flush_action) { case 1: @@ -107,6 +120,7 @@ kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, u break; } + return KERN_SUCCESS; } @@ -116,12 +130,26 @@ kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, u * move data from the kernel to user state. * */ - +#if 0 kern_return_t copyp2v(char *from, char *to, unsigned int size) { return(copyout(phystokv(from), to, size)); } +#endif + +/* + * Copies data from a virtual page to a physical page. This is used to + * move data from the user address space into the kernel. + * + */ +#if 0 +kern_return_t +copyv2p(char *from, char *to, unsigned int size) { + + return(copyin(from, phystokv(to), size)); +} +#endif /* * bcopy_phys - like bcopy but copies from/to physical addresses. @@ -130,9 +158,12 @@ copyp2v(char *from, char *to, unsigned int size) { */ void -bcopy_phys(const char *from, char *to, vm_size_t bytes) +bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes) { - bcopy((char *)phystokv(from), (char *)phystokv(to), bytes); + /* this will die horribly if we ever run off the end of a page */ + if ( value_64bit(from) || value_64bit(to)) panic("bcopy_phys: 64 bit value"); + bcopy((char *)phystokv(low32(from)), + (char *)phystokv(low32(to)), bytes); } @@ -306,3 +337,111 @@ void machine_callstack( } #endif /* MACH_ASSERT */ + + + + +void fillPage(ppnum_t pa, unsigned int fill) +{ + unsigned int *addr = (unsigned int *)phystokv(i386_ptob(pa)); + int i; + int cnt = NBPG/sizeof(unsigned int); + + for (i = 0; i < cnt ; i++ ) + *addr++ = fill; +} + +#define cppvPHYS (cppvPsnk|cppvPsrc) + +kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which) +{ + char *src32, *dst32; + + if (value_64bit(source) | value_64bit(sink)) panic("copypv: 64 bit value"); + + src32 = (char *)low32(source); + dst32 = (char *)low32(sink); + + if (which & cppvFsrc) flush_dcache(source, size, 1); /* If requested, flush source before move */ + if (which & cppvFsnk) flush_dcache(sink, size, 1); /* If requested, flush sink before move */ + + switch (which & cppvPHYS) { + + case cppvPHYS: + /* + * both destination and source are physical + */ + bcopy_phys(source, sink, (vm_size_t)size); + break; + + case cppvPsnk: + /* + * destination is physical, source is virtual + */ + if (which & cppvKmap) + /* + * source is kernel virtual + */ + bcopy(src32, (char *)phystokv(dst32), size); + else + /* + * source is user virtual + */ + copyin(src32, (char *)phystokv(dst32), size); + break; + + case cppvPsrc: + /* + * source is physical, destination is virtual + */ + if (which & cppvKmap) + /* + * destination is kernel virtual + */ + bcopy((char *)phystokv(src32), dst32, size); + else + /* + * destination is user virtual + */ + copyout((char *)phystokv(src32), dst32, size); + break; + + default: + panic("copypv: both virtual"); + } + + if (which & cppvFsrc) flush_dcache(source, size, 1); /* If requested, flush source before move */ + if (which & cppvFsnk) flush_dcache(sink, size, 1); /* If requested, flush sink before move */ + + return KERN_SUCCESS; +} + + +void flush_dcache64(addr64_t addr, unsigned count, int phys) +{ +} + +void invalidate_icache64(addr64_t addr, unsigned cnt, int phys) +{ +} + + +void switch_to_serial_console(void) +{ +} + +addr64_t vm_last_addr; + +void +mapping_set_mod(ppnum_t pn) +{ + pmap_set_modify(pn); +} + +boolean_t +mutex_preblock( + mutex_t *mutex, + thread_t thread) +{ + return (FALSE); +} diff --git a/osfmk/i386/machdep_call.c b/osfmk/i386/machdep_call.c index 2b141f2d1..0bfa52c96 100644 --- a/osfmk/i386/machdep_call.c +++ b/osfmk/i386/machdep_call.c @@ -40,6 +40,7 @@ extern kern_return_t kern_invalid(); extern kern_return_t thread_get_cthread_self(); extern kern_return_t thread_set_cthread_self(); +extern kern_return_t thread_fast_set_cthread_self(); extern kern_return_t PCcreate(), PCldt(), PCresume(); extern kern_return_t PCcopyBIOSData(), PCmapBIOSRom(); extern kern_return_t PCsizeBIOSExtData(), PCcopyBIOSExtData(); @@ -57,6 +58,10 @@ machdep_call_t machdep_call_table[] = { kern_invalid, /* old th_create() */ 0 }, + { + thread_fast_set_cthread_self, + 1 + }, #ifdef FIXME { PCcreate, diff --git a/osfmk/i386/machine_cpu.h b/osfmk/i386/machine_cpu.h new file mode 100644 index 000000000..1694cb38d --- /dev/null +++ b/osfmk/i386/machine_cpu.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _I386_MACHINE_CPU_H_ +#define _I386_MACHINE_CPU_H_ + +#include +#include +#include +#include + +void cpu_machine_init( + void); + +kern_return_t cpu_register( + int *); + +kern_return_t cpu_start( + int); + +void cpu_doshutdown( + void); + +void cpu_sleep( + void); + +struct i386_interrupt_state; +void cpu_signal_handler( + struct i386_interrupt_state *regs); + +static inline void cpu_pause(void) +{ + asm volatile( "rep; nop" ); +} +#endif /* _I386_MACHINE_CPU_H_ */ diff --git a/osfmk/i386/machine_routines.c b/osfmk/i386/machine_routines.c index 0e358d657..754f3b82f 100644 --- a/osfmk/i386/machine_routines.c +++ b/osfmk/i386/machine_routines.c @@ -24,8 +24,19 @@ */ #include #include +#include +#include +#include #include #include +#include +#include +#include + +static int max_cpus_initialized = 0; + +#define MAX_CPUS_SET 0x1 +#define MAX_CPUS_WAIT 0x2 /* IO memory map services */ @@ -68,6 +79,12 @@ vm_offset_t ml_vtophys( /* Interrupt handling */ +/* Initialize Interrupts */ +void ml_init_interrupt(void) +{ + (void) ml_set_interrupts_enabled(TRUE); +} + /* Get Interrupts Enabled */ boolean_t ml_get_interrupts_enabled(void) { @@ -109,7 +126,19 @@ void ml_thread_policy( unsigned policy_id, unsigned policy_info) { - return; + if (policy_id == MACHINE_GROUP) + thread_bind(thread, master_processor); + + if (policy_info & MACHINE_NETWORK_WORKLOOP) { + spl_t s = splsched(); + + thread_lock(thread); + + set_priority(thread, thread->priority + 1); + + thread_unlock(thread); + splx(s); + } } /* Initialize Interrupts */ @@ -128,34 +157,119 @@ void ml_install_interrupt_handler( (IOInterruptHandler) handler, refCon); (void) ml_set_interrupts_enabled(current_state); + + initialize_screen(0, kPEAcquireScreen); +} + +void +machine_idle(void) +{ + DBGLOG(cpu_handle, cpu_number(), MP_IDLE); + __asm__ volatile("sti; hlt": : :"memory"); + __asm__ volatile("cli"); + DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE); } void machine_signal_idle( processor_t processor) { + cpu_interrupt(processor->slot_num); +} + +kern_return_t +ml_processor_register( + cpu_id_t cpu_id, + uint32_t lapic_id, + processor_t *processor, + ipi_handler_t *ipi_handler, + boolean_t boot_cpu) +{ + kern_return_t ret; + int target_cpu; + + if (cpu_register(&target_cpu) != KERN_SUCCESS) + return KERN_FAILURE; + + assert((boot_cpu && (target_cpu == 0)) || + (!boot_cpu && (target_cpu != 0))); + + lapic_cpu_map(lapic_id, target_cpu); + cpu_data[target_cpu].cpu_id = cpu_id; + cpu_data[target_cpu].cpu_phys_number = lapic_id; + *processor = cpu_to_processor(target_cpu); + *ipi_handler = NULL; + + return KERN_SUCCESS; } void ml_cpu_get_info(ml_cpu_info_t *cpu_info) { + boolean_t os_supports_sse; + i386_cpu_info_t *cpuid_infop; + + if (cpu_info == NULL) + return; + + /* + * Are we supporting XMM/SSE/SSE2? + * As distinct from whether the cpu has these capabilities. + */ + os_supports_sse = get_cr4() & CR4_XMM; + if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse) + cpu_info->vector_unit = 4; + else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse) + cpu_info->vector_unit = 3; + else if (cpuid_features() & CPUID_FEATURE_MMX) + cpu_info->vector_unit = 2; + else + cpu_info->vector_unit = 0; + + cpuid_infop = cpuid_info(); + + cpu_info->cache_line_size = cpuid_infop->cache_linesize; + + cpu_info->l1_icache_size = cpuid_infop->cache_size[L1I]; + cpu_info->l1_dcache_size = cpuid_infop->cache_size[L1D]; + + cpu_info->l2_settings = 1; + cpu_info->l2_cache_size = cpuid_infop->cache_size[L2U]; + + /* XXX No L3 */ + cpu_info->l3_settings = 0; + cpu_info->l3_cache_size = 0xFFFFFFFF; } void ml_init_max_cpus(unsigned long max_cpus) { + boolean_t current_state; + + current_state = ml_set_interrupts_enabled(FALSE); + if (max_cpus_initialized != MAX_CPUS_SET) { + if (max_cpus > 0 && max_cpus < NCPUS) + machine_info.max_cpus = max_cpus; + if (max_cpus_initialized == MAX_CPUS_WAIT) + wakeup((event_t)&max_cpus_initialized); + max_cpus_initialized = MAX_CPUS_SET; + } + (void) ml_set_interrupts_enabled(current_state); } int ml_get_max_cpus(void) { - return(machine_info.max_cpus); -} + boolean_t current_state; -int -ml_get_current_cpus(void) -{ - return machine_info.avail_cpus; + current_state = ml_set_interrupts_enabled(FALSE); + if (max_cpus_initialized != MAX_CPUS_SET) { + max_cpus_initialized = MAX_CPUS_WAIT; + assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT); + (void)thread_block(THREAD_CONTINUE_NULL); + } + (void) ml_set_interrupts_enabled(current_state); + return(machine_info.max_cpus); } /* Stubs for pc tracing mechanism */ @@ -185,5 +299,12 @@ be_tracing() thread_act_t current_act(void) { - return(current_act_fast()); + return(current_act_fast()); } + +#undef current_thread +thread_t +current_thread(void) +{ + return(current_act_fast()); +} diff --git a/osfmk/i386/machine_routines.h b/osfmk/i386/machine_routines.h index 607db91d0..268262992 100644 --- a/osfmk/i386/machine_routines.h +++ b/osfmk/i386/machine_routines.h @@ -38,6 +38,9 @@ /* Interrupt handling */ +/* Initialize Interrupts */ +void ml_init_interrupt(void); + /* Get Interrupts Enabled */ boolean_t ml_get_interrupts_enabled(void); @@ -52,15 +55,30 @@ void ml_cause_interrupt(void); void ml_get_timebase(unsigned long long *timestamp); +/* Type for the Time Base Enable function */ +typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable); + /* Type for the IPI Hander */ typedef void (*ipi_handler_t)(void); +/* Struct for ml_processor_register */ +struct ml_processor_info { + cpu_id_t cpu_id; + boolean_t boot_cpu; + vm_offset_t start_paddr; + boolean_t supports_nap; + unsigned long l2cr_value; + time_base_enable_t time_base_enable; +}; + +typedef struct ml_processor_info ml_processor_info_t; + /* Register a processor */ kern_return_t ml_processor_register( - cpu_id_t cpu_id, - vm_offset_t start_paddr, - processor_t *processor, - ipi_handler_t *ipi_handler, + cpu_id_t cpu_id, + uint32_t lapic_id, + processor_t *processor, + ipi_handler_t *ipi_handler, boolean_t boot_cpu); /* Initialize Interrupts */ @@ -80,42 +98,65 @@ ml_static_ptovirt( boolean_t ml_probe_read( vm_offset_t paddr, unsigned int *val); +boolean_t ml_probe_read_64( + addr64_t paddr, + unsigned int *val); /* Read physical address byte */ unsigned int ml_phys_read_byte( vm_offset_t paddr); +unsigned int ml_phys_read_byte_64( + addr64_t paddr); /* Read physical address half word */ unsigned int ml_phys_read_half( vm_offset_t paddr); +unsigned int ml_phys_read_half_64( + addr64_t paddr); /* Read physical address word*/ unsigned int ml_phys_read( vm_offset_t paddr); +unsigned int ml_phys_read_64( + addr64_t paddr); unsigned int ml_phys_read_word( vm_offset_t paddr); +unsigned int ml_phys_read_word_64( + addr64_t paddr); /* Read physical address double word */ unsigned long long ml_phys_read_double( vm_offset_t paddr); +unsigned long long ml_phys_read_double_64( + addr64_t paddr); /* Write physical address byte */ void ml_phys_write_byte( vm_offset_t paddr, unsigned int data); +void ml_phys_write_byte_64( + addr64_t paddr, unsigned int data); /* Write physical address half word */ void ml_phys_write_half( vm_offset_t paddr, unsigned int data); +void ml_phys_write_half_64( + addr64_t paddr, unsigned int data); /* Write physical address word */ void ml_phys_write( vm_offset_t paddr, unsigned int data); +void ml_phys_write_64( + addr64_t paddr, unsigned int data); void ml_phys_write_word( vm_offset_t paddr, unsigned int data); +void ml_phys_write_word_64( + addr64_t paddr, unsigned int data); /* Write physical address double word */ void ml_phys_write_double( vm_offset_t paddr, unsigned long long data); +void ml_phys_write_double_64( + addr64_t paddr, unsigned long long data); void ml_static_mfree( vm_offset_t, @@ -159,9 +200,12 @@ vm_offset_t ml_static_malloc( #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */ +/* Zero bytes starting at a physical address */ +void bzero_phys( + addr64_t phys_address, + uint32_t length); + #ifdef MACH_KERNEL_PRIVATE -/* check pending timers */ -#define machine_clock_assist() void machine_idle(void); @@ -187,10 +231,6 @@ void ml_init_max_cpus( int ml_get_max_cpus( void); -/* Return the current number of CPUs */ -int ml_get_current_cpus( - void); - #endif /* __APPLE_API_PRIVATE */ #endif /* _I386_MACHINE_ROUTINES_H_ */ diff --git a/osfmk/i386/machparam.h b/osfmk/i386/machparam.h index d61055f83..256a1b8f2 100644 --- a/osfmk/i386/machparam.h +++ b/osfmk/i386/machparam.h @@ -58,3 +58,12 @@ * * SPLs are true functions on i386, defined elsewhere. */ + +/* + * XXX Temporary workaround to null out the call to compute_my_priority() + * from thread_quantum_expire() -- which for x86 may occur on the wrong cpu + * and this can lead to run queue corruption. + * Making this slimey re-definition here avoids the need for ifdefs in + * machine-independent code. + */ +#define compute_my_priority(x) diff --git a/osfmk/i386/mcount.s b/osfmk/i386/mcount.s new file mode 100644 index 000000000..77c5a07bc --- /dev/null +++ b/osfmk/i386/mcount.s @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define __NO_UNDERSCORES__ +#include +#include + +Entry(mcount) + pushl %ebp // setup mcount's frame + movl %esp,%ebp + pushf // save interrupt state + cli // disable interrupts + + // + // Check that %gs, with segment pointing at the per-cpu data area, + // has been set up. C routines (mp_desc_init() in particular) may + // be called very early before this happens. + // + mov %gs,%ax + test %ax,%ax + jz 1f + + // + // Check that this cpu is ready. + // This delays the start of mcounting until a cpu is really prepared. + // + movl %gs:CPD_CPU_STATUS,%eax + testl %eax,%eax + jz 1f + + // + // Test for recursion as indicated by a per-cpu flag. + // Skip if nested, otherwise set the flag and call the C mount(). + // + movl %gs:CPD_MCOUNT_OFF,%eax + testl %eax,%eax // test for recursion + jnz 1f + incl %gs:CPD_MCOUNT_OFF // set recursion flag + + movl (%ebp),%eax // frame pointer of mcount's caller + movl 4(%eax),%eax // mcount's caller's return address + pushl 4(%ebp) // push selfpc parameter for mcount() + pushl %eax // push frompc parameter for mcount() + call _mcount // call the C mcount + addl $8,%esp // pop args + + decl %gs:CPD_MCOUNT_OFF // turn off recursion flag +1: + popf // restore interrupt state + movl %ebp,%esp // tear down mcount's frame + popl %ebp + ret diff --git a/osfmk/i386/misc_protos.h b/osfmk/i386/misc_protos.h index 7b9caa113..1728a3ab6 100644 --- a/osfmk/i386/misc_protos.h +++ b/osfmk/i386/misc_protos.h @@ -36,6 +36,7 @@ extern void interrupt_processor( extern void mp_probe_cpus(void); extern void remote_kdb(void); extern void clear_kdb_intr(void); +extern void draw_panic_dialog(void); extern void set_cpu_model(void); extern void cpu_shutdown(void); extern void fix_desc( @@ -70,7 +71,5 @@ extern unsigned int mul_scale( unsigned int *scale); /* Move arbitrarily-aligned data from one physical address to another */ -extern void bcopy_phys( - const char *from, - char *to, - vm_size_t nbytes); +extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t nbytes); + diff --git a/osfmk/i386/mp.c b/osfmk/i386/mp.c new file mode 100644 index 000000000..2c67c8e45 --- /dev/null +++ b/osfmk/i386/mp.c @@ -0,0 +1,964 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if MP_DEBUG +#define PAUSE delay(1000000) +#define DBG(x...) kprintf(x) +#else +#define DBG(x...) +#define PAUSE +#endif /* MP_DEBUG */ + +/* Initialize lapic_id so cpu_number() works on non SMP systems */ +unsigned long lapic_id_initdata = 0; +unsigned long lapic_id = (unsigned long)&lapic_id_initdata; +vm_offset_t lapic_start; + +void lapic_init(void); +void slave_boot_init(void); + +static void mp_kdp_wait(void); +static void mp_rendezvous_action(void); + +boolean_t smp_initialized = FALSE; + +decl_simple_lock_data(,mp_kdp_lock); +decl_simple_lock_data(,mp_putc_lock); + +/* Variables needed for MP rendezvous. */ +static void (*mp_rv_setup_func)(void *arg); +static void (*mp_rv_action_func)(void *arg); +static void (*mp_rv_teardown_func)(void *arg); +static void *mp_rv_func_arg; +static int mp_rv_ncpus; +static volatile long mp_rv_waiters[2]; +decl_simple_lock_data(,mp_rv_lock); + +int lapic_to_cpu[LAPIC_ID_MAX+1]; +int cpu_to_lapic[NCPUS]; + +static void +lapic_cpu_map_init(void) +{ + int i; + + for (i = 0; i < NCPUS; i++) + cpu_to_lapic[i] = -1; + for (i = 0; i <= LAPIC_ID_MAX; i++) + lapic_to_cpu[i] = -1; +} + +void +lapic_cpu_map(int apic_id, int cpu_number) +{ + cpu_to_lapic[cpu_number] = apic_id; + lapic_to_cpu[apic_id] = cpu_number; +} + +#ifdef MP_DEBUG +static void +lapic_cpu_map_dump(void) +{ + int i; + + for (i = 0; i < NCPUS; i++) { + if (cpu_to_lapic[i] == -1) + continue; + kprintf("cpu_to_lapic[%d]: %d\n", + i, cpu_to_lapic[i]); + } + for (i = 0; i <= LAPIC_ID_MAX; i++) { + if (lapic_to_cpu[i] == -1) + continue; + kprintf("lapic_to_cpu[%d]: %d\n", + i, lapic_to_cpu[i]); + } +} +#endif /* MP_DEBUG */ + +#define LAPIC_REG(reg) \ + (*((volatile int *)(lapic_start + LAPIC_##reg))) +#define LAPIC_REG_OFFSET(reg,off) \ + (*((volatile int *)(lapic_start + LAPIC_##reg + (off)))) + + +void +smp_init(void) + +{ + int result; + vm_map_entry_t entry; + uint32_t lo; + uint32_t hi; + boolean_t is_boot_processor; + boolean_t is_lapic_enabled; + + /* Local APIC? */ + if ((cpuid_features() & CPUID_FEATURE_APIC) == 0) + return; + + simple_lock_init(&mp_kdp_lock, ETAP_MISC_PRINTF); + simple_lock_init(&mp_rv_lock, ETAP_MISC_PRINTF); + simple_lock_init(&mp_putc_lock, ETAP_MISC_PRINTF); + + /* Examine the local APIC state */ + rdmsr(MSR_IA32_APIC_BASE, lo, hi); + is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0; + is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0; + DBG("MSR_IA32_APIC_BASE 0x%x:0x%x %s %s\n", hi, lo, + is_lapic_enabled ? "enabled" : "disabled", + is_boot_processor ? "BSP" : "AP"); + assert(is_boot_processor); + assert(is_lapic_enabled); + + /* Establish a map to the local apic */ + lapic_start = vm_map_min(kernel_map); + result = vm_map_find_space(kernel_map, &lapic_start, + round_page(LAPIC_SIZE), 0, &entry); + if (result != KERN_SUCCESS) { + printf("smp_init: vm_map_find_entry FAILED (err=%d). " + "Only supporting ONE cpu.\n", result); + return; + } + vm_map_unlock(kernel_map); + pmap_enter(pmap_kernel(), + lapic_start, + (ppnum_t) i386_btop(i386_trunc_page(LAPIC_START)), + VM_PROT_READ|VM_PROT_WRITE, + VM_WIMG_USE_DEFAULT, + TRUE); + lapic_id = (unsigned long)(lapic_start + LAPIC_ID); + + /* Set up the lapic_id <-> cpu_number map and add this boot processor */ + lapic_cpu_map_init(); + lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0); + + lapic_init(); + + slave_boot_init(); + master_up(); + + smp_initialized = TRUE; + + return; +} + + +int +lapic_esr_read(void) +{ + /* write-read register */ + LAPIC_REG(ERROR_STATUS) = 0; + return LAPIC_REG(ERROR_STATUS); +} + +void +lapic_esr_clear(void) +{ + LAPIC_REG(ERROR_STATUS) = 0; + LAPIC_REG(ERROR_STATUS) = 0; +} + +static char *DM[8] = { + "Fixed", + "Lowest Priority", + "Invalid", + "Invalid", + "NMI", + "Reset", + "Invalid", + "ExtINT"}; + +void +lapic_dump(void) +{ + int i; + char buf[128]; + +#define BOOL(a) ((a)?' ':'!') + + kprintf("LAPIC %d at 0x%x version 0x%x\n", + (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, + lapic_start, + LAPIC_REG(VERSION)&LAPIC_VERSION_MASK); + kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n", + LAPIC_REG(TPR)&LAPIC_TPR_MASK, + LAPIC_REG(APR)&LAPIC_APR_MASK, + LAPIC_REG(PPR)&LAPIC_PPR_MASK); + kprintf("Destination Format 0x%x Logical Destination 0x%x\n", + LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT, + LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT); + kprintf("%cEnabled %cFocusChecking SV 0x%x\n", + BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE), + BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)), + LAPIC_REG(SVR) & LAPIC_SVR_MASK); + kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n", + LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK, + (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", + BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED), + (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot"); + kprintf("LVT_PERFCNT: Vector 0x%02x [%s][%s][%s] %s %cmasked\n", + LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK, + DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK], + (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ", + (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High", + (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", + BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED)); + kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n", + LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK, + DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK], + (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ", + (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High", + (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", + BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED)); + kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n", + LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK, + DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK], + (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ", + (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High", + (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", + BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED)); + kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n", + LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK, + (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle", + BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED)); + kprintf("ESR: %08x \n", lapic_esr_read()); + kprintf(" "); + for(i=0xf; i>=0; i--) + kprintf("%x%x%x%x",i,i,i,i); + kprintf("\n"); + kprintf("TMR: 0x"); + for(i=7; i>=0; i--) + kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10)); + kprintf("\n"); + kprintf("IRR: 0x"); + for(i=7; i>=0; i--) + kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10)); + kprintf("\n"); + kprintf("ISR: 0x"); + for(i=7; i >= 0; i--) + kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10)); + kprintf("\n"); +} + +void +lapic_init(void) +{ + int value; + + mp_disable_preemption(); + + /* Set flat delivery model, logical processor id */ + LAPIC_REG(DFR) = LAPIC_DFR_FLAT; + LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT; + + /* Accept all */ + LAPIC_REG(TPR) = 0; + + LAPIC_REG(SVR) = SPURIOUS_INTERRUPT | LAPIC_SVR_ENABLE; + + /* ExtINT */ + if (get_cpu_number() == master_cpu) { + value = LAPIC_REG(LVT_LINT0); + value |= LAPIC_LVT_DM_EXTINT; + LAPIC_REG(LVT_LINT0) = value; + } + + lapic_esr_clear(); + + LAPIC_REG(LVT_ERROR) = APIC_ERROR_INTERRUPT; + + mp_enable_preemption(); +} + + +void +lapic_end_of_interrupt(void) +{ + LAPIC_REG(EOI) = 0; +} + +void +lapic_interrupt(int interrupt, void *state) +{ + + switch(interrupt) { + case APIC_ERROR_INTERRUPT: + panic("Local APIC error\n"); + break; + case SPURIOUS_INTERRUPT: + kprintf("SPIV\n"); + break; + case INTERPROCESS_INTERRUPT: + cpu_signal_handler((struct i386_interrupt_state *) state); + break; + } + lapic_end_of_interrupt(); +} + +kern_return_t +intel_startCPU( + int slot_num) +{ + + int i = 1000; + int lapic_id = cpu_to_lapic[slot_num]; + + if (slot_num == get_cpu_number()) + return KERN_SUCCESS; + + assert(lapic_id != -1); + + DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic_id); + + mp_disable_preemption(); + + LAPIC_REG(ICRD) = lapic_id << LAPIC_ICRD_DEST_SHIFT; + LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT; + delay(10000); + + LAPIC_REG(ICRD) = lapic_id << LAPIC_ICRD_DEST_SHIFT; + LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12); + delay(200); + + while(i-- > 0) { + delay(10000); + if (machine_slot[slot_num].running) + break; + } + + mp_enable_preemption(); + + if (!machine_slot[slot_num].running) { + DBG("Failed to start CPU %02d\n", slot_num); + printf("Failed to start CPU %02d\n", slot_num); + return KERN_SUCCESS; + } else { + DBG("Started CPU %02d\n", slot_num); + printf("Started CPU %02d\n", slot_num); + return KERN_SUCCESS; + } +} + +void +slave_boot_init(void) +{ + extern char slave_boot_base[]; + extern char slave_boot_end[]; + extern void pstart(void); + + DBG("slave_base=%p slave_end=%p MP_BOOT P=%p V=%p\n", + slave_boot_base, slave_boot_end, MP_BOOT, phystokv(MP_BOOT)); + + /* + * Copy the boot entry code to the real-mode vector area MP_BOOT. + * This is in page 1 which has been reserved for this purpose by + * machine_startup() from the boot processor. + * The slave boot code is responsible for switching to protected + * mode and then jumping to the common startup, pstart(). + */ + bcopy(slave_boot_base, + (char *)phystokv(MP_BOOT), + slave_boot_end-slave_boot_base); + + /* + * Zero a stack area above the boot code. + */ + bzero((char *)(phystokv(MP_BOOTSTACK+MP_BOOT)-0x400), 0x400); + + /* + * Set the location at the base of the stack to point to the + * common startup entry. + */ + *((vm_offset_t *) phystokv(MP_MACH_START+MP_BOOT)) = + kvtophys((vm_offset_t)&pstart); + + /* Flush caches */ + __asm__("wbinvd"); +} + +#if MP_DEBUG +cpu_signal_event_log_t cpu_signal[NCPUS] = { 0, 0, 0 }; +cpu_signal_event_log_t cpu_handle[NCPUS] = { 0, 0, 0 }; + +MP_EVENT_NAME_DECL(); + +void +cpu_signal_dump_last(int cpu) +{ + cpu_signal_event_log_t *logp = &cpu_signal[cpu]; + int last; + cpu_signal_event_t *eventp; + + last = (logp->next_entry == 0) ? + LOG_NENTRIES - 1 : logp->next_entry - 1; + + eventp = &logp->entry[last]; + + kprintf("cpu%d: tsc=%lld cpu_signal(%d,%s)\n", + cpu, eventp->time, eventp->cpu, mp_event_name[eventp->event]); +} + +void +cpu_handle_dump_last(int cpu) +{ + cpu_signal_event_log_t *logp = &cpu_handle[cpu]; + int last; + cpu_signal_event_t *eventp; + + last = (logp->next_entry == 0) ? + LOG_NENTRIES - 1 : logp->next_entry - 1; + + eventp = &logp->entry[last]; + + kprintf("cpu%d: tsc=%lld cpu_signal_handle%s\n", + cpu, eventp->time, mp_event_name[eventp->event]); +} +#endif /* MP_DEBUG */ + +void +cpu_signal_handler(struct i386_interrupt_state *regs) +{ + register my_cpu; + volatile int *my_word; +#if MACH_KDB && MACH_ASSERT + int i=100; +#endif /* MACH_KDB && MACH_ASSERT */ + + mp_disable_preemption(); + + my_cpu = cpu_number(); + my_word = &cpu_data[my_cpu].cpu_signals; + + do { +#if MACH_KDB && MACH_ASSERT + if (i-- <= 0) + Debugger("cpu_signal_handler"); +#endif /* MACH_KDB && MACH_ASSERT */ +#if MACH_KDP + if (i_bit(MP_KDP, my_word)) { + DBGLOG(cpu_handle,my_cpu,MP_KDP); + i_bit_clear(MP_KDP, my_word); + mp_kdp_wait(); + } else +#endif /* MACH_KDP */ + if (i_bit(MP_CLOCK, my_word)) { + DBGLOG(cpu_handle,my_cpu,MP_CLOCK); + i_bit_clear(MP_CLOCK, my_word); + hardclock(regs); + } else if (i_bit(MP_TLB_FLUSH, my_word)) { + DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH); + i_bit_clear(MP_TLB_FLUSH, my_word); + pmap_update_interrupt(); + } else if (i_bit(MP_AST, my_word)) { + DBGLOG(cpu_handle,my_cpu,MP_AST); + i_bit_clear(MP_AST, my_word); + ast_check(cpu_to_processor(my_cpu)); +#if MACH_KDB + } else if (i_bit(MP_KDB, my_word)) { + extern kdb_is_slave[]; + + i_bit_clear(MP_KDB, my_word); + kdb_is_slave[my_cpu]++; + kdb_kintr(); +#endif /* MACH_KDB */ + } else if (i_bit(MP_RENDEZVOUS, my_word)) { + DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS); + i_bit_clear(MP_RENDEZVOUS, my_word); + mp_rendezvous_action(); + } + } while (*my_word); + + mp_enable_preemption(); + +} + +void +cpu_interrupt(int cpu) +{ + boolean_t state; + + if (smp_initialized) { + + /* Wait for previous interrupt to be delivered... */ + while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) + cpu_pause(); + + state = ml_set_interrupts_enabled(FALSE); + LAPIC_REG(ICRD) = + cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT; + LAPIC_REG(ICR) = + INTERPROCESS_INTERRUPT | LAPIC_ICR_DM_FIXED; + (void) ml_set_interrupts_enabled(state); + } + +} + +void +slave_clock(void) +{ + int cpu; + + /* + * Clock interrupts are chained from the boot processor + * to the next logical processor that is running and from + * there on to any further running processor etc. + */ + mp_disable_preemption(); + for (cpu=cpu_number()+1; cpu 0) { + cpu_pause(); + } + DBG("mp_kdp_exit() done\n"); +} +#endif /* MACH_KDP */ + +void +lapic_test(void) +{ + int cpu = 1; + + lapic_dump(); + i_bit_set(0, &cpu_data[cpu].cpu_signals); + cpu_interrupt(1); +} + +/*ARGSUSED*/ +void +init_ast_check( + processor_t processor) +{ +} + +void +cause_ast_check( + processor_t processor) +{ + int cpu = processor->slot_num; + + if (cpu != cpu_number()) { + i386_signal_cpu(cpu, MP_AST, ASYNC); + } +} + +/* + * invoke kdb on slave processors + */ + +void +remote_kdb(void) +{ + int my_cpu = cpu_number(); + int cpu; + + mp_disable_preemption(); + for (cpu = 0; cpu < NCPUS; cpu++) { + if (cpu == my_cpu || !machine_slot[cpu].running) + continue; + i386_signal_cpu(cpu, MP_KDB, SYNC); + } + mp_enable_preemption(); +} + +/* + * Clear kdb interrupt + */ + +void +clear_kdb_intr(void) +{ + mp_disable_preemption(); + i_bit_clear(MP_KDB, &cpu_data[cpu_number()].cpu_signals); + mp_enable_preemption(); +} + +void +slave_machine_init(void) +{ + int my_cpu; + + /* Ensure that caching and write-through are enabled */ + set_cr0(get_cr0() & ~(CR0_NW|CR0_CD)); + + mp_disable_preemption(); + my_cpu = get_cpu_number(); + + DBG("slave_machine_init() CPU%d: phys (%d) active.\n", + my_cpu, get_cpu_phys_number()); + + lapic_init(); + + init_fpu(); + + cpu_machine_init(); + + mp_enable_preemption(); + +#ifdef MP_DEBUG + lapic_dump(); + lapic_cpu_map_dump(); +#endif /* MP_DEBUG */ + +} + +#undef cpu_number() +int cpu_number(void) +{ + return get_cpu_number(); +} + +#if MACH_KDB +#include + +#define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */ + + +#if TRAP_DEBUG +#define MTRAPS 100 +struct mp_trap_hist_struct { + unsigned char type; + unsigned char data[5]; +} trap_hist[MTRAPS], *cur_trap_hist = trap_hist, + *max_trap_hist = &trap_hist[MTRAPS]; + +void db_trap_hist(void); + +/* + * SPL: + * 1: new spl + * 2: old spl + * 3: new tpr + * 4: old tpr + * INT: + * 1: int vec + * 2: old spl + * 3: new spl + * 4: post eoi tpr + * 5: exit tpr + */ + +void +db_trap_hist(void) +{ + int i,j; + for(i=0;i=cur_trap_hist)?"*":" ", + (trap_hist[i].type == 1)?"SPL":"INT"); + for(j=0;j<5;j++) + db_printf(" %02x", trap_hist[i].data[j]); + db_printf("\n"); + } + +} +#endif /* TRAP_DEBUG */ + +void db_lapic(int cpu); +unsigned int db_remote_read(int cpu, int reg); +void db_ioapic(unsigned int); +void kdb_console(void); + +void +kdb_console(void) +{ +} + +#define BOOLP(a) ((a)?' ':'!') + +static char *DM[8] = { + "Fixed", + "Lowest Priority", + "Invalid", + "Invalid", + "NMI", + "Reset", + "Invalid", + "ExtINT"}; + +unsigned int +db_remote_read(int cpu, int reg) +{ + return -1; +} + +void +db_lapic(int cpu) +{ +} + +void +db_ioapic(unsigned int ind) +{ +} + +#endif /* MACH_KDB */ + diff --git a/osfmk/i386/AT386/mp/mp.h b/osfmk/i386/mp.h similarity index 74% rename from osfmk/i386/AT386/mp/mp.h rename to osfmk/i386/mp.h index a444c5aeb..091347ad3 100644 --- a/osfmk/i386/AT386/mp/mp.h +++ b/osfmk/i386/mp.h @@ -57,21 +57,42 @@ #ifndef _I386AT_MP_H_ #define _I386AT_MP_H_ +#if !defined(NCPUS) #include -#include -#include -#include -#include +#endif /* !defined(NCPUS) */ #if NCPUS > 1 + +#ifndef DEBUG +#include +#endif +#if DEBUG +#define MP_DEBUG 1 +#endif + #include -#include +#include + +#define SPURIOUS_INTERRUPT 0xDD +#define INTERPROCESS_INTERRUPT 0xDE +#define APIC_ERROR_INTERRUPT 0xDF + +#define LAPIC_ID_MAX (LAPIC_ID_MASK) -#define CPU_NUMBER(r) \ - movl EXT(lapic_id), r ; \ - movl 0(r),r ; \ - shrl $ LAPIC_ID_SHIFT, r; \ - andl $ LAPIC_ID_MASK, r +#ifndef ASSEMBLER +extern void lapic_dump(void); +extern void lapic_interrupt(int interrupt, void *state); +extern int lapic_to_cpu[]; +extern int cpu_to_lapic[]; +extern void lapic_cpu_map(int lapic, int cpu_num); +#endif /* ASSEMBLER */ + +#define CPU_NUMBER(r) \ + movl EXT(lapic_id),r; \ + movl 0(r),r; \ + shrl $(LAPIC_ID_SHIFT),r; \ + andl $(LAPIC_ID_MASK),r; \ + movl EXT(lapic_to_cpu)(,r,4),r #define MP_IPL SPL6 /* software interrupt level */ @@ -80,15 +101,67 @@ #ifndef ASSEMBLER #include -extern cpu_int_word[]; -extern real_ncpus; /* real number of cpus */ -extern wncpu; /* wanted number of cpus */ +extern int real_ncpus; /* real number of cpus */ +extern int wncpu; /* wanted number of cpus */ decl_simple_lock_data(extern,kdb_lock) /* kdb lock */ +decl_simple_lock_data(extern,mp_putc_lock) extern int kdb_cpu; /* current cpu running kdb */ extern int kdb_debug; extern int kdb_is_slave[]; extern int kdb_active[]; + +extern volatile boolean_t mp_kdp_trap; +extern void mp_trap_enter(); +extern void mp_trap_exit(); + +/* + * All cpu rendezvous: + */ +extern void mp_rendezvous(void (*setup_func)(void *), + void (*action_func)(void *), + void (*teardown_func)(void *), + void *arg); + +#if MP_DEBUG +typedef struct { + uint64_t time; + int cpu; + mp_event_t event; +} cpu_signal_event_t; + +#define LOG_NENTRIES 100 +typedef struct { + uint64_t count[MP_LAST]; + int next_entry; + cpu_signal_event_t entry[LOG_NENTRIES]; +} cpu_signal_event_log_t; + +extern cpu_signal_event_log_t cpu_signal[NCPUS]; +extern cpu_signal_event_log_t cpu_handle[NCPUS]; + +#define DBGLOG(log,_cpu,_event) { \ + cpu_signal_event_log_t *logp = &log[cpu_number()]; \ + int next = logp->next_entry; \ + cpu_signal_event_t *eventp = &logp->entry[next]; \ + boolean_t spl = ml_set_interrupts_enabled(FALSE); \ + \ + logp->count[_event]++; \ + \ + eventp->time = rdtsc64(); \ + eventp->cpu = _cpu; \ + eventp->event = _event; \ + if (next == (LOG_NENTRIES - 1)) \ + logp->next_entry = 0; \ + else \ + logp->next_entry++; \ + \ + (void) ml_set_interrupts_enabled(spl); \ +} +#else /* MP_DEBUG */ +#define DBGLOG(log,_cpu,_event) +#endif /* MP_DEBUG */ + #endif /* ASSEMBLER */ #define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit))) @@ -121,9 +194,9 @@ extern int kdb_active[]; #define at386_io_lock_state() #define at386_io_lock(op) (TRUE) #define at386_io_unlock() -#if MP_V1_1 +#define mp_trap_enter() +#define mp_trap_exit() #include -#endif /* MP_V1_1 */ #endif /* NCPUS > 1 */ #if MACH_RT diff --git a/osfmk/i386/mp_desc.c b/osfmk/i386/mp_desc.c index 29b6aeb26..dc29d7bde 100644 --- a/osfmk/i386/mp_desc.c +++ b/osfmk/i386/mp_desc.c @@ -66,6 +66,7 @@ #include #include #include +#include #include @@ -131,6 +132,7 @@ struct i386_tss *mp_dbtss[NCPUS] = { 0 }; */ struct fake_descriptor *mp_gdt[NCPUS] = { 0 }; struct fake_descriptor *mp_idt[NCPUS] = { 0 }; +struct fake_descriptor *mp_ldt[NCPUS] = { 0 }; /* * Allocate and initialize the per-processor descriptor tables. @@ -173,6 +175,7 @@ mp_desc_init( #endif /* MACH_KDB */ mp_gdt[mycpu] = gdt; mp_idt[mycpu] = idt; + mp_ldt[mycpu] = ldt; return 0; } else { @@ -180,6 +183,7 @@ mp_desc_init( mp_ktss[mycpu] = &mpt->ktss; mp_gdt[mycpu] = mpt->gdt; mp_idt[mycpu] = mpt->idt; + mp_ldt[mycpu] = mpt->ldt; /* * Copy the tables @@ -195,8 +199,13 @@ mp_desc_init( sizeof(ldt)); bzero((char *)&mpt->ktss, sizeof(struct i386_tss)); +#if 0 bzero((char *)&cpu_data[mycpu], sizeof(cpu_data_t)); +#endif + /* I am myself */ + cpu_data[mycpu].cpu_number = mycpu; + #if MACH_KDB mp_dbtss[mycpu] = &mpt->dbtss; bcopy((char *)&dbtss, @@ -255,12 +264,9 @@ interrupt_stack_alloc(void) struct mp_desc_table *mpt; /* - * Count the number of CPUs. + * Number of CPUs possible. */ - cpu_count = 0; - for (i = 0; i < NCPUS; i++) - if (machine_slot[i].is_cpu) - cpu_count++; + cpu_count = wncpu; /* * Allocate an interrupt stack for each CPU except for @@ -273,12 +279,12 @@ interrupt_stack_alloc(void) /* * Set up pointers to the top of the interrupt stack. */ - for (i = 0; i < NCPUS; i++) { + for (i = 0; i < cpu_count; i++) { if (i == master_cpu) { interrupt_stack[i] = (vm_offset_t) intstack; int_stack_top[i] = (vm_offset_t) eintstack; } - else if (machine_slot[i].is_cpu) { + else { interrupt_stack[i] = stack_start; int_stack_top[i] = stack_start + INTSTACK_SIZE; @@ -294,7 +300,7 @@ interrupt_stack_alloc(void) mpt = (struct mp_desc_table *) phystokv(avail_start); avail_start = round_page((vm_offset_t)avail_start + sizeof(struct mp_desc_table)*(cpu_count-1)); - for (i = 0; i < NCPUS; i++) + for (i = 0; i < cpu_count; i++) if (i != master_cpu) mp_desc_table[i] = mpt++; diff --git a/osfmk/i386/mp_desc.h b/osfmk/i386/mp_desc.h index 29f2f26b9..7bcf98bd6 100644 --- a/osfmk/i386/mp_desc.h +++ b/osfmk/i386/mp_desc.h @@ -104,6 +104,7 @@ extern struct i386_tss *mp_dbtss[NCPUS]; */ extern struct fake_descriptor *mp_gdt[NCPUS]; extern struct fake_descriptor *mp_idt[NCPUS]; +extern struct fake_descriptor *mp_ldt[NCPUS]; /* diff --git a/osfmk/i386/mp_events.h b/osfmk/i386/mp_events.h new file mode 100644 index 000000000..ae42a5950 --- /dev/null +++ b/osfmk/i386/mp_events.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __AT386_MP_EVENTS__ +#define __AT386_MP_EVENTS__ + +/* Interrupt types */ + +#ifndef ASSEMBLER + +typedef enum { + MP_TLB_FLUSH = 0, + MP_CLOCK, + MP_KDP, + MP_KDB, + MP_AST, + MP_SOFTCLOCK, + MP_RENDEZVOUS, + MP_IDLE, + MP_UNIDLE, + MP_LAST +} mp_event_t; + +#define MP_EVENT_NAME_DECL() \ +char *mp_event_name[] = { \ + "MP_TLB_FLUSH", \ + "MP_CLOCK", \ + "MP_KDP", \ + "MP_KDB", \ + "MP_AST", \ + "MP_SOFTCLOCK", \ + "MP_RENDEZVOUS", \ + "MP_IDLE", \ + "MP_UNIDLE", \ + "MP_LAST" \ +} + +typedef enum { SYNC, ASYNC } mp_sync_t; + +extern void i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode); +extern void i386_signal_cpus(mp_event_t event, mp_sync_t mode); +extern int i386_active_cpus(void); +#endif + +#endif diff --git a/osfmk/i386/AT386/mp/boot.h b/osfmk/i386/mp_slave_boot.h similarity index 96% rename from osfmk/i386/AT386/mp/boot.h rename to osfmk/i386/mp_slave_boot.h index 04b61ecff..6bf6c351b 100644 --- a/osfmk/i386/AT386/mp/boot.h +++ b/osfmk/i386/mp_slave_boot.h @@ -99,9 +99,10 @@ * Define where to store boot code for slaves */ -#define MP_BOOT 0x1000 /* address where slave boots are loaded */ +#define MP_BOOT 0x1000 /* address where slave boots load */ #define MP_BOOTSEG 0x100 #define MP_GDT 0x1100 /* temporary gdt address for boot */ #define MP_BOOTSTACK 0x800 /* stack for boot */ #define MP_MACH_START MP_BOOTSTACK /* contains address where to jump after boot */ +#define MP_FIRST_ADDR 0x3000 /* 2 extra pages reserved */ diff --git a/osfmk/i386/AT386/mp/slave_boot.s b/osfmk/i386/mp_slave_boot.s similarity index 95% rename from osfmk/i386/AT386/mp/slave_boot.s rename to osfmk/i386/mp_slave_boot.s index a227728d0..63922d67c 100644 --- a/osfmk/i386/AT386/mp/slave_boot.s +++ b/osfmk/i386/mp_slave_boot.s @@ -53,8 +53,8 @@ */ -#include "i386/asm.h" -#include "i386/AT386/mp/boot.h" +#include +#include #define CR0_PE_ON 0x1 #define CR0_PE_OFF 0xfffffffe @@ -73,23 +73,23 @@ .byte 0x15 ;\ .long address-EXT(slave_boot_base) -ENTRY(slave_boot_base) +Entry(slave_boot_base) /* code is loaded at 0x0:0x1000 */ /* ljmp to the next instruction to set up %cs */ data16 LJMP(MP_BOOTSEG, EXT(slave_pstart)) -ENTRY(slave_pstart) +Entry(slave_pstart) /* set up %ds */ mov %cs, %ax mov %ax, %ds /* set up %ss and %esp */ data16 - mov $MP_BOOTSEG, %eax + mov $(MP_BOOTSEG), %eax mov %ax, %ss data16 - mov $MP_BOOTSTACK, %esp + mov $(MP_BOOTSTACK), %esp /*set up %es */ mov %ax, %es @@ -106,7 +106,7 @@ ENTRY(slave_pstart) transfer from real mode to protected mode. */ -ENTRY(real_to_prot) +Entry(real_to_prot) /* guarantee that interrupt is disabled when in prot mode */ cli @@ -119,7 +119,7 @@ ENTRY(real_to_prot) mov %cr0, %eax data16 - or $CR0_PE_ON, %eax + or $(CR0_PE_ON), %eax mov %eax, %cr0 /* make intrasegment jump to flush the processor pipeline and */ @@ -142,7 +142,7 @@ xprot: start the program on protected mode where phyaddr is the entry point */ -ENTRY(startprog) +Entry(startprog) push %ebp mov %esp, %ebp @@ -160,7 +160,7 @@ ENTRY(startprog) . = MP_GDT-MP_BOOT /* GDT location */ -ENTRY(Gdt) +Entry(Gdt) /* Segment Descriptor * @@ -193,12 +193,12 @@ ENTRY(Gdt) .word 0xffff,0 /* 0x28 : init code */ .byte 0,0x9e,0xcf,0 -ENTRY(gdtr) +Entry(gdtr) .short 48 /* limit (8*6 segs) */ .short MP_GDT /* base low */ .short 0 /* base high */ -ENTRY(slave_boot_end) +Entry(slave_boot_end) diff --git a/osfmk/i386/pcb.c b/osfmk/i386/pcb.c index 195018542..b2dd1d432 100644 --- a/osfmk/i386/pcb.c +++ b/osfmk/i386/pcb.c @@ -85,6 +85,10 @@ #include #include +vm_offset_t active_stacks[NCPUS]; +vm_offset_t kernel_stack[NCPUS]; +thread_act_t active_kloaded[NCPUS]; + /* * Maps state flavor to number of words in the state: */ @@ -146,15 +150,6 @@ machine_kernel_stack_init( stack = thread->kernel_stack; assert(stack); -#if MACH_ASSERT - if (watchacts & WA_PCB) { - printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", - thread,stack,start_pos); - printf("\tstack_iks=%x, stack_iel=%x\n", - STACK_IKS(stack), STACK_IEL(stack)); - } -#endif /* MACH_ASSERT */ - /* * We want to run at start_pos, giving it as an argument * the return value from Load_context/Switch_context. @@ -176,9 +171,11 @@ machine_kernel_stack_init( #if NCPUS > 1 #define curr_gdt(mycpu) (mp_gdt[mycpu]) +#define curr_ldt(mycpu) (mp_ldt[mycpu]) #define curr_ktss(mycpu) (mp_ktss[mycpu]) #else #define curr_gdt(mycpu) (gdt) +#define curr_ldt(mycpu) (ldt) #define curr_ktss(mycpu) (&ktss) #endif @@ -190,9 +187,9 @@ act_machine_switch_pcb( thread_act_t new_act ) { pcb_t pcb = new_act->mact.pcb; int mycpu; - { register iopb_tss_t tss = pcb->ims.io_tss; vm_offset_t pcb_stack_top; + register user_ldt_t ldt = pcb->ims.ldt; assert(new_act->thread != NULL); assert(new_act->thread->kernel_stack != 0); @@ -234,17 +231,17 @@ act_machine_switch_pcb( thread_act_t new_act ) set_tr(USER_TSS); gdt_desc_p(mycpu,KERNEL_TSS)->access &= ~ ACC_TSS_BUSY; } - } - { - register user_ldt_t ldt = pcb->ims.ldt; /* * Set the thread`s LDT. */ if (ldt == 0) { + struct real_descriptor *ldtp; /* * Use system LDT. */ + ldtp = (struct real_descriptor *)curr_ldt(mycpu); + ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc; set_ldt(KERNEL_LDT); } else { @@ -254,7 +251,7 @@ act_machine_switch_pcb( thread_act_t new_act ) *gdt_desc_p(mycpu,USER_LDT) = ldt->desc; set_ldt(USER_LDT); } - } + mp_enable_preemption(); /* * Load the floating-point context, if necessary. @@ -263,21 +260,11 @@ act_machine_switch_pcb( thread_act_t new_act ) } -/* - * flush out any lazily evaluated HW state in the - * owning thread's context, before termination. - */ -void -thread_machine_flush( thread_act_t cur_act ) -{ - fpflush(cur_act); -} - /* * Switch to the first thread on a CPU. */ void -load_context( +machine_load_context( thread_t new) { act_machine_switch_pcb(new->top_act); @@ -300,9 +287,10 @@ void machine_switch_act( thread_t thread, thread_act_t old, - thread_act_t new, - int cpu) + thread_act_t new) { + int cpu = cpu_number(); + /* * Switch the vm, ast and pcb context. * Save FP registers if in use and set TS (task switch) bit. @@ -322,7 +310,7 @@ machine_switch_act( * and return it. */ thread_t -switch_context( +machine_switch_context( thread_t old, void (*continuation)(void), thread_t new) @@ -331,9 +319,7 @@ switch_context( new_act = new->top_act; #if MACH_RT - assert(old_act->kernel_loaded || - active_stacks[cpu_number()] == old_act->thread->kernel_stack); - assert (get_preemption_level() == 1); + assert(active_stacks[cpu_number()] == old_act->thread->kernel_stack); #endif check_simple_locks(); @@ -342,12 +328,6 @@ switch_context( */ fpu_save_context(old); -#if MACH_ASSERT - if (watchacts & WA_SWITCH) - printf("\tswitch_context(old=%x con=%x new=%x)\n", - old, continuation, new); -#endif /* MACH_ASSERT */ - /* * Switch address maps if need be, even if not switching tasks. * (A server activation may be "borrowing" a client map.) @@ -364,97 +344,10 @@ switch_context( act_machine_switch_pcb(new_act); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, (int)old, (int)new, old->sched_pri, new->sched_pri, 0); + old->continuation = NULL; return(Switch_context(old, continuation, new)); } -void -pcb_module_init(void) -{ - fpu_module_init(); - iopb_init(); -} - -void -pcb_init( register thread_act_t thr_act ) -{ - register pcb_t pcb; - - assert(thr_act->mact.pcb == (pcb_t)0); - pcb = thr_act->mact.pcb = &thr_act->mact.xxx_pcb; - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("pcb_init(%x) pcb=%x\n", thr_act, pcb); -#endif /* MACH_ASSERT */ - - /* - * We can't let random values leak out to the user. - * (however, act_create() zeroed the entire thr_act, mact, pcb) - * bzero((char *) pcb, sizeof *pcb); - */ - simple_lock_init(&pcb->lock, ETAP_MISC_PCB); - - /* - * Guarantee that the bootstrapped thread will be in user - * mode. - */ - pcb->iss.cs = USER_CS; - pcb->iss.ss = USER_DS; - pcb->iss.ds = USER_DS; - pcb->iss.es = USER_DS; - pcb->iss.fs = USER_DS; - pcb->iss.gs = USER_DS; - pcb->iss.efl = EFL_USER_SET; -} - -/* - * Adjust saved register state for thread belonging to task - * created with kernel_task_create(). - */ -void -pcb_user_to_kernel( - thread_act_t thr_act) -{ - register pcb_t pcb = thr_act->mact.pcb; - - pcb->iss.cs = KERNEL_CS; - pcb->iss.ss = KERNEL_DS; - pcb->iss.ds = KERNEL_DS; - pcb->iss.es = KERNEL_DS; - pcb->iss.fs = KERNEL_DS; - pcb->iss.gs = CPU_DATA; -} - -void -pcb_terminate( - register thread_act_t thr_act) -{ - register pcb_t pcb = thr_act->mact.pcb; - - assert(pcb); - - if (pcb->ims.io_tss != 0) - iopb_destroy(pcb->ims.io_tss); - if (pcb->ims.ifps != 0) - fp_free(pcb->ims.ifps); - if (pcb->ims.ldt != 0) - user_ldt_free(pcb->ims.ldt); - thr_act->mact.pcb = (pcb_t)0; -} - -/* - * pcb_collect: - * - * Attempt to free excess pcb memory. - */ - -void -pcb_collect( - register thread_act_t thr_act) -{ - /* accomplishes very little */ -} - /* * act_machine_sv_free * release saveareas associated with an act. if flag is true, release @@ -463,7 +356,6 @@ pcb_collect( void act_machine_sv_free(thread_act_t act, int flag) { - } /* @@ -475,20 +367,13 @@ act_machine_sv_free(thread_act_t act, int flag) */ kern_return_t -act_machine_set_state( +machine_thread_set_state( thread_act_t thr_act, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count) { - int kernel_act = thr_act->kernel_loading || - thr_act->kernel_loaded; - -#if MACH_ASSERT - if (watchacts & WA_STATE) - printf("act_%x act_m_set_state(thr_act=%x,flav=%x,st=%x,cnt=%x)\n", - current_act(), thr_act, flavor, tstate, count); -#endif /* MACH_ASSERT */ + int kernel_act = 0; switch (flavor) { case THREAD_SYSCALL_STATE: @@ -569,17 +454,17 @@ act_machine_set_state( state->efl & (EFL_TF | EFL_IF); } } - else if (!kernel_act) { + else if (kernel_act) { /* * 386 mode. Set segment registers for flat * 32-bit address space. */ - saved_state->cs = USER_CS; - saved_state->ss = USER_DS; - saved_state->ds = USER_DS; - saved_state->es = USER_DS; - saved_state->fs = USER_DS; - saved_state->gs = USER_DS; + saved_state->cs = KERNEL_CS; + saved_state->ss = KERNEL_DS; + saved_state->ds = KERNEL_DS; + saved_state->es = KERNEL_DS; + saved_state->fs = KERNEL_DS; + saved_state->gs = CPU_DATA; } else { /* @@ -679,17 +564,17 @@ act_machine_set_state( state->efl & (EFL_TF | EFL_IF); } } - else if (flavor == i386_NEW_THREAD_STATE && !kernel_act) { + else if (flavor == i386_NEW_THREAD_STATE && kernel_act) { /* * 386 mode. Set segment registers for flat * 32-bit address space. */ - saved_state->cs = USER_CS; - saved_state->ss = USER_DS; - saved_state->ds = USER_DS; - saved_state->es = USER_DS; - saved_state->fs = USER_DS; - saved_state->gs = USER_DS; + saved_state->cs = KERNEL_CS; + saved_state->ss = KERNEL_DS; + saved_state->ds = KERNEL_DS; + saved_state->es = KERNEL_DS; + saved_state->fs = KERNEL_DS; + saved_state->gs = CPU_DATA; } else { /* @@ -709,11 +594,12 @@ act_machine_set_state( } case i386_FLOAT_STATE: { - - if (count < i386_FLOAT_STATE_COUNT) + struct i386_float_state *state = (struct i386_float_state*)tstate; + if (count < i386_old_FLOAT_STATE_COUNT) return(KERN_INVALID_ARGUMENT); - - return fpu_set_state(thr_act,(struct i386_float_state*)tstate); + if (count < i386_FLOAT_STATE_COUNT) + return fpu_set_state(thr_act,(struct i386_float_state*)tstate); + else return fpu_set_fxstate(thr_act,(struct i386_float_state*)tstate); } /* @@ -778,8 +664,8 @@ act_machine_set_state( saved_state->ss = USER_DS; saved_state->ds = USER_DS; saved_state->es = USER_DS; - saved_state->fs = USER_DS; - saved_state->gs = USER_DS; + saved_state->fs = state25->fs; + saved_state->gs = state25->gs; } break; @@ -798,19 +684,12 @@ act_machine_set_state( kern_return_t -act_machine_get_state( +machine_thread_get_state( thread_act_t thr_act, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count) { -#if MACH_ASSERT - if (watchacts & WA_STATE) - printf("act_%x act_m_get_state(thr_act=%x,flav=%x,st=%x,cnt@%x=%x)\n", - current_act(), thr_act, flavor, tstate, - count, (count ? *count : 0)); -#endif /* MACH_ASSERT */ - switch (flavor) { case i386_SAVED_STATE: @@ -946,12 +825,17 @@ act_machine_get_state( break; case i386_FLOAT_STATE: { + struct i386_float_state *state = (struct i386_float_state*)tstate; - if (*count < i386_FLOAT_STATE_COUNT) + if (*count < i386_old_FLOAT_STATE_COUNT) return(KERN_INVALID_ARGUMENT); - - *count = i386_FLOAT_STATE_COUNT; - return fpu_get_state(thr_act,(struct i386_float_state *)tstate); + if (*count< i386_FLOAT_STATE_COUNT) { + *count = i386_old_FLOAT_STATE_COUNT; + return fpu_get_state(thr_act,(struct i386_float_state *)tstate); + } else { + *count = i386_FLOAT_STATE_COUNT; + return fpu_get_fxstate(thr_act,(struct i386_float_state *)tstate); + } } /* @@ -1038,46 +922,49 @@ act_machine_get_state( return(KERN_SUCCESS); } -/* - * Alter the thread`s state so that a following thread_exception_return - * will make the thread return 'retval' from a syscall. - */ -void -thread_set_syscall_return( - thread_t thread, - kern_return_t retval) -{ - thread->top_act->mact.pcb->iss.eax = retval; -} - /* * Initialize the machine-dependent state for a new thread. */ kern_return_t -thread_machine_create(thread_t thread, thread_act_t thr_act, void (*start_pos)(thread_t)) +machine_thread_create( + thread_t thread, + task_t task) { - MachineThrAct_t mact = &thr_act->mact; + pcb_t pcb = &thread->mact.xxx_pcb; -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", - thread, thr_act, start_pos); -#endif /* MACH_ASSERT */ + thread->mact.pcb = pcb; - assert(thread != NULL); - assert(thr_act != NULL); + simple_lock_init(&pcb->lock, ETAP_MISC_PCB); + + /* + * Guarantee that the bootstrapped thread will be in user + * mode. + */ + pcb->iss.cs = USER_CS; + pcb->iss.ss = USER_DS; + pcb->iss.ds = USER_DS; + pcb->iss.es = USER_DS; + pcb->iss.fs = USER_DS; + pcb->iss.gs = USER_DS; + pcb->iss.efl = EFL_USER_SET; + { + extern struct fake_descriptor ldt[]; + struct real_descriptor *ldtp; + ldtp = (struct real_descriptor *)ldt; + pcb->cthread_desc = ldtp[sel_idx(USER_DS)]; + } /* * Allocate a kernel stack per shuttle */ - thread->kernel_stack = (int)stack_alloc(thread,start_pos); + thread->kernel_stack = (int)stack_alloc(thread, thread_continue); thread->state &= ~TH_STACK_HANDOFF; assert(thread->kernel_stack != 0); /* * Point top of kernel stack to user`s registers. */ - STACK_IEL(thread->kernel_stack)->saved_state = &mact->pcb->iss; + STACK_IEL(thread->kernel_stack)->saved_state = &pcb->iss; return(KERN_SUCCESS); } @@ -1086,15 +973,20 @@ thread_machine_create(thread_t thread, thread_act_t thr_act, void (*start_pos)(t * Machine-dependent cleanup prior to destroying a thread */ void -thread_machine_destroy( thread_t thread ) +machine_thread_destroy( + thread_t thread) { - spl_t s; + register pcb_t pcb = thread->mact.pcb; - if (thread->kernel_stack != 0) { - s = splsched(); - stack_free(thread); - splx(s); - } + assert(pcb); + + if (pcb->ims.io_tss != 0) + iopb_destroy(pcb->ims.io_tss); + if (pcb->ims.ifps != 0) + fp_free(pcb->ims.ifps); + if (pcb->ims.ldt != 0) + user_ldt_free(pcb->ims.ldt); + thread->mact.pcb = (pcb_t)0; } /* @@ -1102,75 +994,22 @@ thread_machine_destroy( thread_t thread ) * when starting up a new processor */ void -thread_machine_set_current( thread_t thread ) +machine_thread_set_current( thread_t thread ) { register int my_cpu; mp_disable_preemption(); my_cpu = cpu_number(); - cpu_data[my_cpu].active_thread = thread; - active_kloaded[my_cpu] = - thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL; + cpu_data[my_cpu].active_thread = thread->top_act; + active_kloaded[my_cpu] = THR_ACT_NULL; mp_enable_preemption(); } - -/* - * Pool of kernel activations. - */ - -void act_machine_init() -{ - int i; - thread_act_t thr_act; - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("act_machine_init()\n"); -#endif /* MACH_ASSERT */ - - /* Good to verify this once */ - assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); -} - -kern_return_t -act_machine_create(task_t task, thread_act_t thr_act) -{ - MachineThrAct_t mact = &thr_act->mact; - pcb_t pcb; - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("act_machine_create(task=%x,thr_act=%x) pcb=%x\n", - task,thr_act, &mact->xxx_pcb); -#endif /* MACH_ASSERT */ - - /* - * Clear & Init the pcb (sets up user-mode s regs) - */ - pcb_init(thr_act); - - return KERN_SUCCESS; -} - void -act_virtual_machine_destroy(thread_act_t thr_act) +machine_thread_terminate_self(void) { - return; -} - -void -act_machine_destroy(thread_act_t thr_act) -{ - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("act_machine_destroy(0x%x)\n", thr_act); -#endif /* MACH_ASSERT */ - - pcb_terminate(thr_act); } void @@ -1178,18 +1017,6 @@ act_machine_return(int code) { thread_act_t thr_act = current_act(); -#if MACH_ASSERT - /* - * We don't go through the locking dance here needed to - * acquire thr_act->thread safely. - */ - - if (watchacts & WA_EXIT) - printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n", - code, thr_act, thr_act->ref_count, - thr_act->thread, thr_act->thread->ref_count); -#endif /* MACH_ASSERT */ - /* * This code is called with nothing locked. * It also returns with nothing locked, if it returns. @@ -1218,9 +1045,10 @@ act_machine_return(int code) * Perform machine-dependent per-thread initializations */ void -thread_machine_init(void) +machine_thread_init(void) { - pcb_module_init(); + fpu_module_init(); + iopb_init(); } /* @@ -1278,8 +1106,7 @@ dump_act(thread_act_t thr_act) thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); - printf("\talerts=%x mask=%x susp=%d user_stop=%d active=%x ast=%x\n", - thr_act->alerts, thr_act->alert_mask, + printf("\tsusp=%d user_stop=%d active=%x ast=%x\n", thr_act->suspend_count, thr_act->user_stop_count, thr_act->active, thr_act->ast); printf("\thi=%x lo=%x\n", thr_act->higher, thr_act->lower); @@ -1322,7 +1149,7 @@ thread_swapin_mach_alloc(thread_t thread) */ vm_offset_t -stack_detach(thread_t thread) +machine_stack_detach(thread_t thread) { vm_offset_t stack; @@ -1341,7 +1168,7 @@ stack_detach(thread_t thread) */ void -stack_attach(struct thread_shuttle *thread, +machine_stack_attach(thread_t thread, vm_offset_t stack, void (*start_pos)(thread_t)) { @@ -1359,8 +1186,8 @@ stack_attach(struct thread_shuttle *thread, statep->k_eip = (unsigned long) Thread_continue; statep->k_ebx = (unsigned long) start_pos; statep->k_esp = (unsigned long) STACK_IEL(stack); - assert(thread->top_act); - STACK_IEL(stack)->saved_state = &thread->top_act->mact.pcb->iss; + + STACK_IEL(stack)->saved_state = &thread->mact.pcb->iss; return; } @@ -1370,12 +1197,11 @@ stack_attach(struct thread_shuttle *thread, */ void -stack_handoff(thread_t old, +machine_stack_handoff(thread_t old, thread_t new) { vm_offset_t stack; - pmap_t new_pmap; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF), thread, thread->priority, @@ -1385,17 +1211,15 @@ stack_handoff(thread_t old, assert(new->top_act); assert(old->top_act); - stack = stack_detach(old); - stack_attach(new, stack, 0); + stack = machine_stack_detach(old); + machine_stack_attach(new, stack, 0); - new_pmap = new->top_act->task->map->pmap; - if (old->top_act->task->map->pmap != new_pmap) - PMAP_ACTIVATE_MAP(new->top_act->task->map, cpu_number()); + PMAP_SWITCH_CONTEXT(old->top_act->task, new->top_act->task, cpu_number()); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE, (int)old, (int)new, old->sched_pri, new->sched_pri, 0); - thread_machine_set_current(new); + machine_thread_set_current(new); active_stacks[cpu_number()] = new->kernel_stack; @@ -1420,13 +1244,19 @@ int val; return((void *)0); val = i386_SAVED_STATE_COUNT; - kret = act_machine_get_state(current_act(), i386_SAVED_STATE, &ic->ss, &val); + kret = machine_thread_get_state(current_act(), + i386_SAVED_STATE, + (thread_state_t) &ic->ss, + &val); if (kret != KERN_SUCCESS) { kfree((vm_offset_t)ic,sizeof(struct i386_act_context)); return((void *)0); } val = i386_FLOAT_STATE_COUNT; - kret = act_machine_get_state(current_act(), i386_FLOAT_STATE, &ic->fs, &val); + kret = machine_thread_get_state(current_act(), + i386_FLOAT_STATE, + (thread_state_t) &ic->fs, + &val); if (kret != KERN_SUCCESS) { kfree((vm_offset_t)ic,sizeof(struct i386_act_context)); return((void *)0); @@ -1445,11 +1275,17 @@ int val; if (ic == (struct i386_act_context *)NULL) return; - kret = act_machine_set_state(current_act(), i386_SAVED_STATE, &ic->ss, i386_SAVED_STATE_COUNT); + kret = machine_thread_set_state(current_act(), + i386_SAVED_STATE, + (thread_state_t) &ic->ss, + i386_SAVED_STATE_COUNT); if (kret != KERN_SUCCESS) goto out; - kret = act_machine_set_state(current_act(), i386_FLOAT_STATE, &ic->fs, i386_FLOAT_STATE_COUNT); + kret = machine_thread_set_state(current_act(), + i386_FLOAT_STATE, + (thread_state_t) &ic->fs, + i386_FLOAT_STATE_COUNT); if (kret != KERN_SUCCESS) goto out; out: diff --git a/osfmk/i386/phys.c b/osfmk/i386/phys.c index 62ccfe5da..8ec71aa87 100644 --- a/osfmk/i386/phys.c +++ b/osfmk/i386/phys.c @@ -63,9 +63,11 @@ */ void pmap_zero_page( - vm_offset_t p) + ppnum_t pn) { - assert(p != vm_page_fictitious_addr); + vm_offset_t p; + assert(pn != vm_page_fictitious_addr); + p = (vm_offset_t)i386_ptob(pn); bzero((char *)phystokv(p), PAGE_SIZE); } @@ -75,14 +77,13 @@ pmap_zero_page( */ void pmap_zero_part_page( - vm_offset_t p, + ppnum_t pn, vm_offset_t offset, vm_size_t len) { - assert(p != vm_page_fictitious_addr); + assert(pn != vm_page_fictitious_addr); assert(offset + len <= PAGE_SIZE); - - bzero((char *)phystokv(p) + offset, len); + bzero((char *)phystokv(i386_ptob(pn)) + offset, len); } /* @@ -90,11 +91,16 @@ pmap_zero_part_page( */ void pmap_copy_page( - vm_offset_t src, - vm_offset_t dst) + ppnum_t psrc, + ppnum_t pdst) + { - assert(src != vm_page_fictitious_addr); - assert(dst != vm_page_fictitious_addr); + vm_offset_t src,dst; + + assert(psrc != vm_page_fictitious_addr); + assert(pdst != vm_page_fictitious_addr); + src = (vm_offset_t)i386_ptob(psrc); + dst = (vm_offset_t)i386_ptob(pdst); memcpy((void *)phystokv(dst), (void *)phystokv(src), PAGE_SIZE); } @@ -104,14 +110,18 @@ pmap_copy_page( */ void pmap_copy_part_page( - vm_offset_t src, + ppnum_t psrc, vm_offset_t src_offset, - vm_offset_t dst, + ppnum_t pdst, vm_offset_t dst_offset, vm_size_t len) { - assert(src != vm_page_fictitious_addr); - assert(dst != vm_page_fictitious_addr); + vm_offset_t src, dst; + + assert(psrc != vm_page_fictitious_addr); + assert(pdst != vm_page_fictitious_addr); + src = (vm_offset_t)i386_ptob(psrc); + dst = (vm_offset_t)i386_ptob(pdst); assert(((dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE); assert(((src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE); @@ -125,13 +135,16 @@ pmap_copy_part_page( */ void pmap_copy_part_lpage( - vm_offset_t src, - vm_offset_t dst, + vm_offset_t src, + ppnum_t pdst, vm_offset_t dst_offset, vm_size_t len) { + vm_offset_t dst; + assert(src != vm_page_fictitious_addr); - assert(dst != vm_page_fictitious_addr); + assert(pdst != vm_page_fictitious_addr); + dst = (vm_offset_t)i386_ptob(pdst); assert(((dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE); memcpy((void *)(phystokv(dst) + dst_offset), (void *)src, len); @@ -143,13 +156,16 @@ pmap_copy_part_lpage( */ void pmap_copy_part_rpage( - vm_offset_t src, + ppnum_t psrc, vm_offset_t src_offset, vm_offset_t dst, vm_size_t len) { - assert(src != vm_page_fictitious_addr); + vm_offset_t src; + + assert(psrc != vm_page_fictitious_addr); assert(dst != vm_page_fictitious_addr); + src = (vm_offset_t)i386_ptob(psrc); assert(((src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE); memcpy((void *)dst, (void *)(phystokv(src) + src_offset), len); diff --git a/osfmk/i386/pmap.c b/osfmk/i386/pmap.c index 14edd7326..243f0402d 100644 --- a/osfmk/i386/pmap.c +++ b/osfmk/i386/pmap.c @@ -116,6 +116,8 @@ #include #include +#include +#include #if MACH_KDB #include @@ -127,7 +129,7 @@ #include #if NCPUS > 1 -#include +#include #endif /* @@ -151,7 +153,7 @@ boolean_t phys_attribute_test( vm_offset_t phys, int bits); -void pmap_set_modify(vm_offset_t phys); +void pmap_set_modify(ppnum_t pn); void phys_attribute_set( vm_offset_t phys, @@ -170,6 +172,9 @@ pmap_t real_pmap[NCPUS]; #define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry); #define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry); +#define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL) +#define low32(x) ((unsigned int)((x) & 0x00000000ffffffffLL)) + /* * Private data structures. */ @@ -255,6 +260,7 @@ char *pmap_phys_attributes; */ #define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */ #define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */ +#define PHYS_NCACHE INTEL_PTE_NCACHE /* * Amount of virtual memory mapped by one @@ -307,7 +313,7 @@ vm_object_t pmap_object = VM_OBJECT_NULL; #if NCPUS > 1 /* - * We raise the interrupt level to splhigh, to block interprocessor + * We raise the interrupt level to splvm, to block interprocessor * interrupts during pmap operations. We must take the CPU out of * the cpus_active set while interrupts are blocked. */ @@ -361,23 +367,53 @@ lock_t pmap_system_lock; #define UNLOCK_PVH(index) unlock_pvh_pai(index) -#define PMAP_FLUSH_TLBS() \ -{ \ - flush_tlb(); \ - i386_signal_cpus(MP_TLB_FLUSH); \ -} - -#define PMAP_RELOAD_TLBS() { \ - i386_signal_cpus(MP_TLB_RELOAD); \ - set_cr3(kernel_pmap->pdirbase); \ -} +#if USLOCK_DEBUG +extern int max_lock_loops; +#define LOOP_VAR int loop_count = 0 +#define LOOP_CHECK(msg, pmap) \ + if (loop_count++ > max_lock_loops) { \ + mp_disable_preemption(); \ + kprintf("%s: cpu %d pmap %x, cpus_active %d\n", \ + msg, cpu_number(), pmap, cpus_active); \ + Debugger("deadlock detection"); \ + mp_enable_preemption(); \ + loop_count = 0; \ + } +#else /* USLOCK_DEBUG */ +#define LOOP_VAR +#define LOOP_CHECK(msg, pmap) +#endif /* USLOCK_DEBUG */ -#define PMAP_INVALIDATE_PAGE(map, addr) { \ - if (map == kernel_pmap) \ - invlpg((vm_offset_t) addr); \ - else \ - flush_tlb(); \ - i386_signal_cpus(MP_TLB_FLUSH); \ +#define PMAP_UPDATE_TLBS(pmap, s, e) \ +{ \ + cpu_set cpu_mask; \ + cpu_set users; \ + \ + mp_disable_preemption(); \ + cpu_mask = 1 << cpu_number(); \ + \ + /* Since the pmap is locked, other updates are locked */ \ + /* out, and any pmap_activate has finished. */ \ + \ + /* find other cpus using the pmap */ \ + users = (pmap)->cpus_using & ~cpu_mask; \ + if (users) { \ + LOOP_VAR; \ + /* signal them, and wait for them to finish */ \ + /* using the pmap */ \ + signal_cpus(users, (pmap), (s), (e)); \ + while (((pmap)->cpus_using & cpus_active & ~cpu_mask)) { \ + LOOP_CHECK("PMAP_UPDATE_TLBS", pmap); \ + cpu_pause(); \ + } \ + } \ + /* invalidate our own TLB if pmap is in use */ \ + \ + if ((pmap)->cpus_using & cpu_mask) { \ + INVALIDATE_TLB((pmap), (s), (e)); \ + } \ + \ + mp_enable_preemption(); \ } #else /* NCPUS > 1 */ @@ -406,17 +442,21 @@ lock_t pmap_system_lock; #define PMAP_FLUSH_TLBS() flush_tlb() #define PMAP_RELOAD_TLBS() set_cr3(kernel_pmap->pdirbase) -#define PMAP_INVALIDATE_PAGE(map, addr) { \ - if (map == kernel_pmap) \ - invlpg((vm_offset_t) addr); \ - else \ - flush_tlb(); \ +#define PMAP_INVALIDATE_PAGE(map, saddr, eaddr) { \ + if (map == kernel_pmap) \ + invlpg((vm_offset_t) saddr); \ + else \ + flush_tlb(); \ } #endif /* NCPUS > 1 */ #define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */ +#define INVALIDATE_TLB(m, s, e) { \ + flush_tlb(); \ +} + #if NCPUS > 1 /* * Structures to keep track of pending TLB invalidations @@ -425,6 +465,34 @@ cpu_set cpus_active; cpu_set cpus_idle; volatile boolean_t cpu_update_needed[NCPUS]; +#define UPDATE_LIST_SIZE 4 + +struct pmap_update_item { + pmap_t pmap; /* pmap to invalidate */ + vm_offset_t start; /* start address to invalidate */ + vm_offset_t end; /* end address to invalidate */ +}; + +typedef struct pmap_update_item *pmap_update_item_t; + +/* + * List of pmap updates. If the list overflows, + * the last entry is changed to invalidate all. + */ +struct pmap_update_list { + decl_simple_lock_data(,lock) + int count; + struct pmap_update_item item[UPDATE_LIST_SIZE]; +} ; +typedef struct pmap_update_list *pmap_update_list_t; + +struct pmap_update_list cpu_update_list[NCPUS]; + +extern void signal_cpus( + cpu_set use_list, + pmap_t pmap, + vm_offset_t start, + vm_offset_t end); #endif /* NCPUS > 1 */ @@ -555,7 +623,7 @@ pmap_map( ps = PAGE_SIZE; while (start < end) { - pmap_enter(kernel_pmap, virt, start, prot, 0, FALSE); + pmap_enter(kernel_pmap, virt, (ppnum_t)i386_btop(start), prot, 0, FALSE); virt += ps; start += ps; } @@ -598,8 +666,7 @@ pmap_map_bd( start += PAGE_SIZE; } - PMAP_FLUSH_TLBS(); - + flush_tlb(); return(virt); } @@ -632,10 +699,13 @@ pmap_bootstrap( vm_offset_t load_start) { vm_offset_t va, tva, paddr; + ppnum_t pn; pt_entry_t template; pt_entry_t *pde, *pte, *ptend; vm_size_t morevm; /* VM space for kernel map */ + vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address known to VM */ + /* * Set ptes_per_vm_page for general use. */ @@ -748,11 +818,11 @@ pmap_bootstrap( if (virtual_end + morevm < VM_MAX_KERNEL_LOADED_ADDRESS + 1) morevm = VM_MAX_KERNEL_LOADED_ADDRESS + 1 - virtual_end; - virtual_end += morevm; for (tva = va; tva < virtual_end; tva += INTEL_PGBYTES) { if (pte >= ptend) { - pmap_next_page(&paddr); + pmap_next_page(&pn); + paddr = i386_ptob(pn); pte = (pt_entry_t *)phystokv(paddr); ptend = pte + NPTES; *pde = PA_TO_PTE((vm_offset_t) pte) @@ -794,6 +864,19 @@ pmap_bootstrap( kernel_pmap->pdirbase = kvtophys((vm_offset_t)kernel_pmap->dirbase); + if (cpuid_features() & CPUID_FEATURE_PAT) + { + uint64_t pat; + uint32_t msr; + + msr = 0x277; + asm volatile("rdmsr" : "=A" (pat) : "c" (msr)); + + pat &= ~(0xfULL << 48); + pat |= 0x01ULL << 48; + + asm volatile("wrmsr" :: "A" (pat), "c" (msr)); + } } void @@ -854,6 +937,18 @@ pmap_init(void) s = (vm_size_t) sizeof(struct pv_entry); pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */ +#if NCPUS > 1 + /* + * Set up the pmap request lists + */ + for (i = 0; i < NCPUS; i++) { + pmap_update_list_t up = &cpu_update_list[i]; + + simple_lock_init(&up->lock, ETAP_VM_PMAP_UPDATE); + up->count = 0; + } +#endif /* NCPUS > 1 */ + /* * Only now, when all of the data structures are allocated, * can we set vm_first_phys and vm_last_phys. If we set them @@ -881,14 +976,16 @@ pmap_init(void) boolean_t pmap_verify_free( - vm_offset_t phys) + ppnum_t pn) { + vm_offset_t phys; pv_entry_t pv_h; int pai; spl_t spl; boolean_t result; - assert(phys != vm_page_fictitious_addr); + assert(pn != vm_page_fictitious_addr); + phys = (vm_offset_t)i386_ptob(pn); if (!pmap_initialized) return(TRUE); @@ -1002,8 +1099,6 @@ pmap_create( simple_lock(&pmap_cache_lock); } - assert(p->stats.resident_count == 0); - assert(p->stats.wired_count == 0); p->stats.resident_count = 0; p->stats.wired_count = 0; @@ -1050,11 +1145,17 @@ pmap_destroy( * physically on the right pmap: */ +#if NCPUS > 1 + /* force pmap/cr3 update */ + PMAP_UPDATE_TLBS(p, + VM_MIN_ADDRESS, + VM_MAX_KERNEL_ADDRESS); +#endif /* NCPUS > 1 */ if (real_pmap[my_cpu] == p) { PMAP_CPU_CLR(p, my_cpu); real_pmap[my_cpu] = kernel_pmap; - PMAP_RELOAD_TLBS(); + set_cr3(kernel_pmap->pdirbase); } mp_enable_preemption(); } @@ -1097,8 +1198,14 @@ pmap_destroy( } } + + /* + * XXX These asserts fail on system shutdown. + * assert(p->stats.resident_count == 0); assert(p->stats.wired_count == 0); + * + */ /* * Add to cache if not already full @@ -1245,7 +1352,7 @@ pmap_remove_range( do { prev = cur; if ((cur = prev->next) == PV_ENTRY_NULL) { - panic("pmap-remove: mapping not in pv_list!"); + panic("pmap-remove: mapping not in pv_list!"); } } while (cur->va != va || cur->pmap != pmap); prev->next = cur->next; @@ -1271,7 +1378,7 @@ pmap_remove_range( void pmap_remove_some_phys( pmap_t map, - vm_offset_t phys_addr) + ppnum_t pn) { /* Implement to support working set code */ @@ -1287,22 +1394,36 @@ pmap_remove_some_phys( * rounded to the hardware page size. */ + void pmap_remove( pmap_t map, - vm_offset_t s, - vm_offset_t e) + addr64_t s64, + addr64_t e64) { spl_t spl; register pt_entry_t *pde; register pt_entry_t *spte, *epte; vm_offset_t l; + vm_offset_t s, e; if (map == PMAP_NULL) return; PMAP_READ_LOCK(map, spl); + if (value_64bit(s64) || value_64bit(e64)) { + panic("pmap_remove addr overflow"); + } + + s = (vm_offset_t)low32(s64); + e = (vm_offset_t)low32(e64); + + /* + * Invalidate the translation buffer first + */ + PMAP_UPDATE_TLBS(map, s, e); + pde = pmap_pde(map, s); while (s < e) { @@ -1319,8 +1440,6 @@ pmap_remove( pde++; } - PMAP_FLUSH_TLBS(); - PMAP_READ_UNLOCK(map, spl); } @@ -1333,7 +1452,7 @@ pmap_remove( */ void pmap_page_protect( - vm_offset_t phys, + ppnum_t pn, vm_prot_t prot) { pv_entry_t pv_h, prev; @@ -1343,8 +1462,10 @@ pmap_page_protect( register pmap_t pmap; spl_t spl; boolean_t remove; + vm_offset_t phys; - assert(phys != vm_page_fictitious_addr); + assert(pn != vm_page_fictitious_addr); + phys = (vm_offset_t)i386_ptob(pn); if (!valid_page(phys)) { /* * Not a managed page. @@ -1407,7 +1528,7 @@ pmap_page_protect( /* * Invalidate TLBs for all CPUs using this mapping. */ - PMAP_INVALIDATE_PAGE(pmap, va); + PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE); } /* @@ -1516,7 +1637,7 @@ pmap_protect( case VM_PROT_ALL: return; /* nothing to do */ default: - pmap_remove(map, s, e); + pmap_remove(map, (addr64_t)s, (addr64_t)e); return; } @@ -1528,15 +1649,19 @@ pmap_protect( * XXX should be #if'd for i386 */ - if (cpuid_family == CPUID_FAMILY_386) + if (cpuid_family() == CPUID_FAMILY_386) if (map == kernel_pmap) { - pmap_remove(map, s, e); + pmap_remove(map, (addr64_t)s, (addr64_t)e); return; } SPLVM(spl); simple_lock(&map->lock); + /* + * Invalidate the translation buffer first + */ + PMAP_UPDATE_TLBS(map, s, e); pde = pmap_pde(map, s); while (s < e) { @@ -1558,8 +1683,6 @@ pmap_protect( pde++; } - PMAP_FLUSH_TLBS(); - simple_unlock(&map->lock); SPLX(spl); } @@ -1582,7 +1705,7 @@ void pmap_enter( register pmap_t pmap, vm_offset_t v, - register vm_offset_t pa, + ppnum_t pn, vm_prot_t prot, unsigned int flags, boolean_t wired) @@ -1594,19 +1717,20 @@ pmap_enter( pt_entry_t template; spl_t spl; vm_offset_t old_pa; + vm_offset_t pa = (vm_offset_t)i386_ptob(pn); XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n", current_thread()->top_act, current_thread(), - pmap, v, pa); + pmap, v, pn); - assert(pa != vm_page_fictitious_addr); + assert(pn != vm_page_fictitious_addr); if (pmap_debug) - printf("pmap(%x, %x)\n", v, pa); + printf("pmap(%x, %x)\n", v, pn); if (pmap == PMAP_NULL) return; - if (cpuid_family == CPUID_FAMILY_386) + if (cpuid_family() == CPUID_FAMILY_386) if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0 && !wired /* hack for io_wire */ ) { /* @@ -1624,7 +1748,7 @@ pmap_enter( * Invalidate the translation buffer, * then remove the mapping. */ - PMAP_INVALIDATE_PAGE(pmap, v); + PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE); pmap_remove_range(pmap, v, pte, pte + ptes_per_vm_page); } @@ -1668,8 +1792,15 @@ Retry: /* * May be changing its wired attribute or protection */ - + template = pa_to_pte(pa) | INTEL_PTE_VALID; + + if(flags & VM_MEM_NOT_CACHEABLE) { + if(!(flags & VM_MEM_GUARDED)) + template |= INTEL_PTE_PTA; + template |= INTEL_PTE_NCACHE; + } + if (pmap != kernel_pmap) template |= INTEL_PTE_USER; if (prot & VM_PROT_WRITE) @@ -1686,8 +1817,7 @@ Retry: } } - PMAP_INVALIDATE_PAGE(pmap, v); - + PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE); i = ptes_per_vm_page; do { if (*pte & INTEL_PTE_MOD) @@ -1720,7 +1850,7 @@ Retry: if (old_pa != (vm_offset_t) 0) { - PMAP_INVALIDATE_PAGE(pmap, v); + PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE); #if DEBUG_PTE_PAGE if (pmap != kernel_pmap) @@ -1884,7 +2014,7 @@ RetryPvList: * Invalidate the translation buffer, * then remove the mapping. */ - PMAP_INVALIDATE_PAGE(pmap, e->va); + PMAP_UPDATE_TLBS(pmap, e->va, e->va + PAGE_SIZE); pmap_remove_range(pmap, e->va, opte, opte + ptes_per_vm_page); /* @@ -1988,6 +2118,13 @@ RetryPvList: * only the pfn changes. */ template = pa_to_pte(pa) | INTEL_PTE_VALID; + + if(flags & VM_MEM_NOT_CACHEABLE) { + if(!(flags & VM_MEM_GUARDED)) + template |= INTEL_PTE_PTA; + template |= INTEL_PTE_NCACHE; + } + if (pmap != kernel_pmap) template |= INTEL_PTE_USER; if (prot & VM_PROT_WRITE) @@ -2027,7 +2164,7 @@ pmap_change_wiring( register int i; spl_t spl; -#if 0 +#if 1 /* * We must grab the pmap system lock because we may * change a pte_page queue. @@ -2067,6 +2204,22 @@ pmap_change_wiring( } +ppnum_t +pmap_find_phys(pmap_t pmap, addr64_t va) +{ + pt_entry_t *ptp; + vm_offset_t a32; + ppnum_t ppn; + + if (value_64bit(va)) panic("pmap_find_phys 64 bit value"); + a32 = (vm_offset_t)low32(va); + ptp = pmap_pte(pmap, a32); + if (PT_ENTRY_NULL == ptp) + return 0; + ppn = (ppnum_t)i386_btop(pte_to_pa(*ptp)); + return ppn; +} + /* * Routine: pmap_extract * Function: @@ -2121,6 +2274,7 @@ pmap_expand( register vm_offset_t pa; register int i; spl_t spl; + ppnum_t pn; if (map == kernel_pmap) panic("pmap_expand"); @@ -2143,9 +2297,10 @@ pmap_expand( * Map the page to its physical address so that it * can be found later. */ - pa = m->phys_addr; + pn = m->phys_page; + pa = i386_ptob(pn); vm_object_lock(pmap_object); - vm_page_insert(m, pmap_object, pa); + vm_page_insert(m, pmap_object, (vm_object_offset_t)pa); vm_page_lock_queues(); vm_page_wire(m); inuse_ptepages_count++; @@ -2215,6 +2370,22 @@ pmap_copy( } #endif/* 0 */ +/* + * pmap_sync_caches_phys(ppnum_t pa) + * + * Invalidates all of the instruction cache on a physical page and + * pushes any dirty data from the data cache for the same physical page + */ + +void pmap_sync_caches_phys(ppnum_t pa) +{ +// if (!(cpuid_features() & CPUID_FEATURE_SS)) + { + __asm__ volatile("wbinvd"); + } + return; +} + int collect_ref; int collect_unref; @@ -2249,7 +2420,7 @@ pmap_collect( * Garbage collect map. */ PMAP_READ_LOCK(p, spl); - PMAP_FLUSH_TLBS(); + PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS); for (pdp = p->dirbase; pdp < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)]; @@ -2477,7 +2648,7 @@ phys_attribute_clear( /* * Invalidate TLBs for all CPUs using this mapping. */ - PMAP_INVALIDATE_PAGE(pmap, va); + PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE); } /* @@ -2623,8 +2794,9 @@ phys_attribute_set( */ void pmap_set_modify( - register vm_offset_t phys) + ppnum_t pn) { + vm_offset_t phys = (vm_offset_t)i386_ptob(pn); phys_attribute_set(phys, PHYS_MODIFIED); } @@ -2634,8 +2806,9 @@ void pmap_set_modify( void pmap_clear_modify( - register vm_offset_t phys) + ppnum_t pn) { + vm_offset_t phys = (vm_offset_t)i386_ptob(pn); phys_attribute_clear(phys, PHYS_MODIFIED); } @@ -2648,8 +2821,9 @@ pmap_clear_modify( boolean_t pmap_is_modified( - register vm_offset_t phys) + ppnum_t pn) { + vm_offset_t phys = (vm_offset_t)i386_ptob(pn); return (phys_attribute_test(phys, PHYS_MODIFIED)); } @@ -2661,8 +2835,9 @@ pmap_is_modified( void pmap_clear_reference( - vm_offset_t phys) + ppnum_t pn) { + vm_offset_t phys = (vm_offset_t)i386_ptob(pn); phys_attribute_clear(phys, PHYS_REFERENCED); } @@ -2675,8 +2850,9 @@ pmap_clear_reference( boolean_t pmap_is_referenced( - vm_offset_t phys) + ppnum_t pn) { + vm_offset_t phys = (vm_offset_t)i386_ptob(pn); return (phys_attribute_test(phys, PHYS_REFERENCED)); } @@ -2703,6 +2879,11 @@ pmap_modify_pages( PMAP_READ_LOCK(map, spl); + /* + * Invalidate the translation buffer first + */ + PMAP_UPDATE_TLBS(map, s, e); + pde = pmap_pde(map, s); while (s && s < e) { l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1); @@ -2727,7 +2908,6 @@ pmap_modify_pages( s = l; pde++; } - PMAP_FLUSH_TLBS(); PMAP_READ_UNLOCK(map, spl); } @@ -2744,58 +2924,205 @@ flush_dcache(vm_offset_t addr, unsigned count, int phys) } #if NCPUS > 1 +/* +* TLB Coherence Code (TLB "shootdown" code) +* +* Threads that belong to the same task share the same address space and +* hence share a pmap. However, they may run on distinct cpus and thus +* have distinct TLBs that cache page table entries. In order to guarantee +* the TLBs are consistent, whenever a pmap is changed, all threads that +* are active in that pmap must have their TLB updated. To keep track of +* this information, the set of cpus that are currently using a pmap is +* maintained within each pmap structure (cpus_using). Pmap_activate() and +* pmap_deactivate add and remove, respectively, a cpu from this set. +* Since the TLBs are not addressable over the bus, each processor must +* flush its own TLB; a processor that needs to invalidate another TLB +* needs to interrupt the processor that owns that TLB to signal the +* update. +* +* Whenever a pmap is updated, the lock on that pmap is locked, and all +* cpus using the pmap are signaled to invalidate. All threads that need +* to activate a pmap must wait for the lock to clear to await any updates +* in progress before using the pmap. They must ACQUIRE the lock to add +* their cpu to the cpus_using set. An implicit assumption made +* throughout the TLB code is that all kernel code that runs at or higher +* than splvm blocks out update interrupts, and that such code does not +* touch pageable pages. +* +* A shootdown interrupt serves another function besides signaling a +* processor to invalidate. The interrupt routine (pmap_update_interrupt) +* waits for the both the pmap lock (and the kernel pmap lock) to clear, +* preventing user code from making implicit pmap updates while the +* sending processor is performing its update. (This could happen via a +* user data write reference that turns on the modify bit in the page +* table). It must wait for any kernel updates that may have started +* concurrently with a user pmap update because the IPC code +* changes mappings. +* Spinning on the VALUES of the locks is sufficient (rather than +* having to acquire the locks) because any updates that occur subsequent +* to finding the lock unlocked will be signaled via another interrupt. +* (This assumes the interrupt is cleared before the low level interrupt code +* calls pmap_update_interrupt()). +* +* The signaling processor must wait for any implicit updates in progress +* to terminate before continuing with its update. Thus it must wait for an +* acknowledgement of the interrupt from each processor for which such +* references could be made. For maintaining this information, a set +* cpus_active is used. A cpu is in this set if and only if it can +* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from +* this set; when all such cpus are removed, it is safe to update. +* +* Before attempting to acquire the update lock on a pmap, a cpu (A) must +* be at least at the priority of the interprocessor interrupt +* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a +* kernel update; it would spin forever in pmap_update_interrupt() trying +* to acquire the user pmap lock it had already acquired. Furthermore A +* must remove itself from cpus_active. Otherwise, another cpu holding +* the lock (B) could be in the process of sending an update signal to A, +* and thus be waiting for A to remove itself from cpus_active. If A is +* spinning on the lock at priority this will never happen and a deadlock +* will result. +*/ + +/* + * Signal another CPU that it must flush its TLB + */ +void +signal_cpus( + cpu_set use_list, + pmap_t pmap, + vm_offset_t start, + vm_offset_t end) +{ + register int which_cpu, j; + register pmap_update_list_t update_list_p; + + while ((which_cpu = ffs((unsigned long)use_list)) != 0) { + which_cpu -= 1; /* convert to 0 origin */ + + update_list_p = &cpu_update_list[which_cpu]; + simple_lock(&update_list_p->lock); + + j = update_list_p->count; + if (j >= UPDATE_LIST_SIZE) { + /* + * list overflowed. Change last item to + * indicate overflow. + */ + update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap; + update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS; + update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS; + } + else { + update_list_p->item[j].pmap = pmap; + update_list_p->item[j].start = start; + update_list_p->item[j].end = end; + update_list_p->count = j+1; + } + cpu_update_needed[which_cpu] = TRUE; + simple_unlock(&update_list_p->lock); + + /* if its the kernel pmap, ignore cpus_idle */ + if (((cpus_idle & (1 << which_cpu)) == 0) || + (pmap == kernel_pmap) || real_pmap[which_cpu] == pmap) + { + i386_signal_cpu(which_cpu, MP_TLB_FLUSH, ASYNC); + } + use_list &= ~(1 << which_cpu); + } +} -void inline -pmap_wait_for_clear() +void +process_pmap_updates( + register pmap_t my_pmap) { register int my_cpu; - spl_t s; - register pmap_t my_pmap; + register pmap_update_list_t update_list_p; + register int j; + register pmap_t pmap; mp_disable_preemption(); my_cpu = cpu_number(); - + update_list_p = &cpu_update_list[my_cpu]; + simple_lock(&update_list_p->lock); - my_pmap = real_pmap[my_cpu]; + for (j = 0; j < update_list_p->count; j++) { + pmap = update_list_p->item[j].pmap; + if (pmap == my_pmap || + pmap == kernel_pmap) { - if (!(my_pmap && pmap_in_use(my_pmap, my_cpu))) - my_pmap = kernel_pmap; + if (pmap->ref_count <= 0) { + PMAP_CPU_CLR(pmap, my_cpu); + real_pmap[my_cpu] = kernel_pmap; + set_cr3(kernel_pmap->pdirbase); + } else + INVALIDATE_TLB(pmap, + update_list_p->item[j].start, + update_list_p->item[j].end); + } + } + update_list_p->count = 0; + cpu_update_needed[my_cpu] = FALSE; + simple_unlock(&update_list_p->lock); + mp_enable_preemption(); +} + +/* + * Interrupt routine for TBIA requested from other processor. + * This routine can also be called at all interrupts time if + * the cpu was idle. Some driver interrupt routines might access + * newly allocated vm. (This is the case for hd) + */ +void +pmap_update_interrupt(void) +{ + register int my_cpu; + spl_t s; + register pmap_t my_pmap; + + mp_disable_preemption(); + my_cpu = cpu_number(); /* - * Raise spl to splhigh (above splip) to block out pmap_extract + * Raise spl to splvm (above splip) to block out pmap_extract * from IO code (which would put this cpu back in the active * set). */ s = splhigh(); + + my_pmap = real_pmap[my_cpu]; - /* - * Wait for any pmap updates in progress, on either user - * or kernel pmap. - */ - while (*(volatile hw_lock_t)&my_pmap->lock.interlock || - *(volatile hw_lock_t)&kernel_pmap->lock.interlock) { - continue; - } + if (!(my_pmap && pmap_in_use(my_pmap, my_cpu))) + my_pmap = kernel_pmap; - splx(s); - mp_enable_preemption(); -} + do { + LOOP_VAR; -void -pmap_flush_tlb_interrupt(void) { - pmap_wait_for_clear(); + /* + * Indicate that we're not using either user or kernel + * pmap. + */ + i_bit_clear(my_cpu, &cpus_active); - flush_tlb(); -} + /* + * Wait for any pmap updates in progress, on either user + * or kernel pmap. + */ + while (*(volatile hw_lock_t)&my_pmap->lock.interlock || + *(volatile hw_lock_t)&kernel_pmap->lock.interlock) { + LOOP_CHECK("pmap_update_interrupt", my_pmap); + cpu_pause(); + } -void -pmap_reload_tlb_interrupt(void) { - pmap_wait_for_clear(); + process_pmap_updates(my_pmap); - set_cr3(kernel_pmap->pdirbase); -} + i_bit_set(my_cpu, &cpus_active); + } while (cpu_update_needed[my_cpu]); + splx(s); + mp_enable_preemption(); +} #endif /* NCPUS > 1 */ #if MACH_KDB @@ -2919,21 +3246,22 @@ pmap_movepage(unsigned long from, unsigned long to, vm_size_t size) { spl_t spl; pt_entry_t *pte, saved_pte; + /* Lock the kernel map */ + PMAP_READ_LOCK(kernel_pmap, spl); while (size > 0) { - PMAP_READ_LOCK(kernel_pmap, spl); pte = pmap_pte(kernel_pmap, from); if (pte == NULL) panic("pmap_pagemove from pte NULL"); saved_pte = *pte; PMAP_READ_UNLOCK(kernel_pmap, spl); - pmap_enter(kernel_pmap, to, i386_trunc_page(*pte), + pmap_enter(kernel_pmap, to, (ppnum_t)i386_btop(i386_trunc_page(*pte)), VM_PROT_READ|VM_PROT_WRITE, 0, *pte & INTEL_PTE_WIRED); - pmap_remove(kernel_pmap, from, from+PAGE_SIZE); + pmap_remove(kernel_pmap, (addr64_t)from, (addr64_t)(from+PAGE_SIZE)); PMAP_READ_LOCK(kernel_pmap, spl); pte = pmap_pte(kernel_pmap, to); @@ -2941,7 +3269,6 @@ pmap_movepage(unsigned long from, unsigned long to, vm_size_t size) panic("pmap_pagemove 'to' pte NULL"); *pte = saved_pte; - PMAP_READ_UNLOCK(kernel_pmap, spl); from += PAGE_SIZE; to += PAGE_SIZE; @@ -2949,7 +3276,10 @@ pmap_movepage(unsigned long from, unsigned long to, vm_size_t size) } /* Get the processors to update the TLBs */ - PMAP_FLUSH_TLBS(); + PMAP_UPDATE_TLBS(kernel_pmap, from, from+size); + PMAP_UPDATE_TLBS(kernel_pmap, to, to+size); + + PMAP_READ_UNLOCK(kernel_pmap, spl); } diff --git a/osfmk/i386/pmap.h b/osfmk/i386/pmap.h index f5e1c8e25..9939be2a4 100644 --- a/osfmk/i386/pmap.h +++ b/osfmk/i386/pmap.h @@ -68,7 +68,6 @@ #ifndef ASSEMBLER #include -#include #include #include @@ -109,7 +108,14 @@ typedef unsigned int pt_entry_t; #define PTEMASK 0x3ff /* mask for page table index */ +#define VM_WIMG_COPYBACK VM_MEM_COHERENT #define VM_WIMG_DEFAULT VM_MEM_COHERENT +/* ?? intel ?? */ +#define VM_WIMG_IO (VM_MEM_COHERENT | \ + VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) +#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) +/* write combining mode, aka store gather */ +#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) /* * Convert kernel virtual address to linear address @@ -151,6 +157,7 @@ typedef unsigned int pt_entry_t; #define INTEL_PTE_MOD 0x00000040 #define INTEL_PTE_WIRED 0x00000200 #define INTEL_PTE_PFN 0xfffff000 +#define INTEL_PTE_PTA 0x00000080 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN) #define pte_to_pa(p) ((p) & INTEL_PTE_PFN) @@ -255,12 +262,19 @@ extern cpu_set cpus_active; extern cpu_set cpus_idle; +/* + * Quick test for pmap update requests. + */ +extern volatile +boolean_t cpu_update_needed[NCPUS]; + /* * External declarations for PMAP_ACTIVATE. */ extern void process_pmap_updates(struct pmap *pmap); extern void pmap_update_interrupt(void); +extern pmap_t kernel_pmap; #endif /* NCPUS > 1 */ @@ -303,7 +317,7 @@ extern int pmap_list_resident_pages( extern void flush_tlb(void); extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); - +extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va); /* * Macros for speed. @@ -313,6 +327,13 @@ extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); #include +#if defined(PMAP_ACTIVATE_KERNEL) +#undef PMAP_ACTIVATE_KERNEL +#undef PMAP_DEACTIVATE_KERNEL +#undef PMAP_ACTIVATE_USER +#undef PMAP_DEACTIVATE_USER +#endif + /* * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage * fields to control TLB invalidation on other CPUS. @@ -331,6 +352,12 @@ extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); */ \ simple_lock(&kernel_pmap->lock); \ \ + /* \ + * Process invalidate requests for the kernel pmap. \ + */ \ + if (cpu_update_needed[(my_cpu)]) \ + process_pmap_updates(kernel_pmap); \ + \ /* \ * Mark that this cpu is using the pmap. \ */ \ @@ -354,7 +381,7 @@ extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); } #define PMAP_ACTIVATE_MAP(map, my_cpu) { \ - register struct pmap *tpmap; \ + register pmap_t tpmap; \ \ tpmap = vm_map_pmap(map); \ if (tpmap == kernel_pmap) { \ @@ -430,11 +457,6 @@ extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); splx(spl); \ } -#if MP_V1_1 -#define set_led(cpu) -#define clear_led(cpu) -#endif /* MP_V1_1 */ - #define MARK_CPU_IDLE(my_cpu) { \ /* \ * Mark this cpu idle, and remove it from the active set, \ @@ -465,6 +487,9 @@ extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); */ \ i_bit_clear((my_cpu), &cpus_idle); \ \ + if (cpu_update_needed[(my_cpu)]) \ + pmap_update_interrupt(); \ + \ /* \ * Mark that this cpu is now active. \ */ \ @@ -527,8 +552,6 @@ extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); (KERN_INVALID_ADDRESS) #define pmap_attribute_cache_sync(addr,size,attr,value) \ (KERN_INVALID_ADDRESS) -#define pmap_sync_caches_phys(pa) \ - (KERN_INVALID_ADDRESS) #endif /* ASSEMBLER */ diff --git a/osfmk/i386/proc_reg.h b/osfmk/i386/proc_reg.h index 74d9d50eb..a598cb8d5 100644 --- a/osfmk/i386/proc_reg.h +++ b/osfmk/i386/proc_reg.h @@ -142,6 +142,8 @@ /* * CR4 */ +#define CR4_FXS 0x00000200 /* SSE/SSE2 OS supports FXSave */ +#define CR4_XMM 0x00000400 /* SSE/SSE2 instructions supported in OS */ #define CR4_MCE 0x00000040 /* p5: Machine Check Exceptions */ #define CR4_PSE 0x00000010 /* p5: Page Size Extensions */ #define CR4_DE 0x00000008 /* p5: Debugging Extensions */ @@ -250,7 +252,82 @@ extern __inline__ void invlpg(unsigned long addr) { __asm__ volatile("invlpg (%0)" :: "r" (addr) : "memory"); } + +/* + * Access to machine-specific registers (available on 586 and better only) + * Note: the rd* operations modify the parameters directly (without using + * pointer indirection), this allows gcc to optimize better + */ + +#define rdmsr(msr,lo,hi) \ + __asm__ volatile("rdmsr" : "=a" (lo), "=d" (hi) : "c" (msr)) + +#define wrmsr(msr,lo,hi) \ + __asm__ volatile("wrmsr" : : "c" (msr), "a" (lo), "d" (hi)) + +#define rdtsc(lo,hi) \ + __asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi)) + +#define write_tsc(lo,hi) wrmsr(0x10, lo, hi) + +#define rdpmc(counter,lo,hi) \ + __asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)) + +extern __inline__ uint64_t rdmsr64(uint32_t msr) +{ + uint64_t ret; + __asm__ volatile("rdmsr" : "=A" (ret) : "c" (msr)); + return ret; +} + +extern __inline__ void wrmsr64(uint32_t msr, uint64_t val) +{ + __asm__ volatile("wrmsr" : : "c" (msr), "A" (val)); +} + +extern __inline__ uint64_t rdtsc64(void) +{ + uint64_t ret; + __asm__ volatile("rdtsc" : "=A" (ret)); + return ret; +} #endif /* __GNUC__ */ #endif /* ASSEMBLER */ +#define MSR_IA32_P5_MC_ADDR 0 +#define MSR_IA32_P5_MC_TYPE 1 +#define MSR_IA32_PLATFORM_ID 0x17 +#define MSR_IA32_EBL_CR_POWERON 0x2a + +#define MSR_IA32_APIC_BASE 0x1b +#define MSR_IA32_APIC_BASE_BSP (1<<8) +#define MSR_IA32_APIC_BASE_ENABLE (1<<11) +#define MSR_IA32_APIC_BASE_BASE (0xfffff<<12) + +#define MSR_IA32_UCODE_WRITE 0x79 +#define MSR_IA32_UCODE_REV 0x8b + +#define MSR_IA32_PERFCTR0 0xc1 +#define MSR_IA32_PERFCTR1 0xc2 + +#define MSR_IA32_BBL_CR_CTL 0x119 + +#define MSR_IA32_MCG_CAP 0x179 +#define MSR_IA32_MCG_STATUS 0x17a +#define MSR_IA32_MCG_CTL 0x17b + +#define MSR_IA32_EVNTSEL0 0x186 +#define MSR_IA32_EVNTSEL1 0x187 + +#define MSR_IA32_DEBUGCTLMSR 0x1d9 +#define MSR_IA32_LASTBRANCHFROMIP 0x1db +#define MSR_IA32_LASTBRANCHTOIP 0x1dc +#define MSR_IA32_LASTINTFROMIP 0x1dd +#define MSR_IA32_LASTINTTOIP 0x1de + +#define MSR_IA32_MC0_CTL 0x400 +#define MSR_IA32_MC0_STATUS 0x401 +#define MSR_IA32_MC0_ADDR 0x402 +#define MSR_IA32_MC0_MISC 0x403 + #endif /* _I386_PROC_REG_H_ */ diff --git a/osfmk/i386/rtclock.c b/osfmk/i386/rtclock.c index 43ab1f3be..f592c7544 100644 --- a/osfmk/i386/rtclock.c +++ b/osfmk/i386/rtclock.c @@ -35,11 +35,14 @@ #include #include -#include #include + +#include + #include #include #include +#include #include #include #include @@ -53,6 +56,13 @@ #include #include #include +#include +#include +#include + +#define DISPLAYENTER(x) printf("[RTCLOCK] entering " #x "\n"); +#define DISPLAYEXIT(x) printf("[RTCLOCK] leaving " #x "\n"); +#define DISPLAYVALUE(x,y) printf("[RTCLOCK] " #x ":" #y " = 0x%08x \n",y); int sysclk_config(void); @@ -76,20 +86,6 @@ void sysclk_setalarm( extern void (*IOKitRegisterInterruptHook)(void *, int irq, int isclock); -/* - * Inlines to get timestamp counter value. - */ - -static inline void rdtsc_hilo(uint32_t *hi, uint32_t *lo) { - asm volatile("rdtsc": "=a" (*lo), "=d" (*hi)); -} - -static inline uint64_t rdtsc_64(void) { - uint64_t result; - asm volatile("rdtsc": "=A" (result)); - return result; -} - /* * Lists of clock routines. */ @@ -107,9 +103,6 @@ int calend_init(void); kern_return_t calend_gettime( mach_timespec_t *cur_time); -kern_return_t calend_settime( - mach_timespec_t *cur_time); - kern_return_t calend_getattr( clock_flavor_t flavor, clock_attr_t attr, @@ -117,7 +110,7 @@ kern_return_t calend_getattr( struct clock_ops calend_ops = { calend_config, calend_init, - calend_gettime, calend_settime, + calend_gettime, 0, calend_getattr, 0, 0, }; @@ -137,12 +130,16 @@ struct { mach_timespec_t calend_offset; boolean_t calend_is_set; + int64_t calend_adjtotal; + int32_t calend_adjdelta; + uint64_t timer_deadline; boolean_t timer_is_set; clock_timer_func_t timer_expire; clock_res_t new_ires; /* pending new resolution (nano ) */ clock_res_t intr_nsec; /* interrupt resolution (nano) */ + mach_timebase_info_data_t timebase_const; decl_simple_lock_data(,lock) /* real-time clock device lock */ } rtclock; @@ -152,14 +149,13 @@ unsigned int new_clknum; /* pending clknum */ unsigned int time_per_clk; /* time per clk in ZHZ */ unsigned int clks_per_int; /* clks per interrupt */ unsigned int clks_per_int_99; -int rtc_intr_count; /* interrupt counter */ -int rtc_intr_hertz; /* interrupts per HZ */ -int rtc_intr_freq; /* interrupt frequency */ -int rtc_print_lost_tick; /* print lost tick */ +int rtc_intr_count; /* interrupt counter */ +int rtc_intr_hertz; /* interrupts per HZ */ +int rtc_intr_freq; /* interrupt frequency */ +int rtc_print_lost_tick; /* print lost tick */ uint32_t rtc_cyc_per_sec; /* processor cycles per seconds */ -uint32_t rtc_last_int_tsc_lo; /* tsc values saved per interupt */ -uint32_t rtc_last_int_tsc_hi; +uint32_t rtc_quant_scale; /* used internally to convert clocks to nanos */ /* * Macros to lock/unlock real-time clock device. @@ -209,11 +205,19 @@ MACRO_END * * This sequence to do all this is in sysclk_gettime. For efficiency, this * sequence also needs the value that the counter will have if it has just - * overflowed, so we precompute that also. ALSO, certain platforms + * overflowed, so we precompute that also. + * + * The fix for certain really old certain platforms has been removed * (specifically the DEC XL5100) have been observed to have problem * with latching the counter, and they occasionally (say, one out of * 100,000 times) return a bogus value. Hence, the present code reads * the counter twice and checks for a consistent pair of values. + * the code was: + * do { + * READ_8254(val); + * READ_8254(val2); + * } while ( val2 > val || val2 < val - 10 ); + * * * Some attributes of the rt clock can be changed, including the * interrupt resolution. We default to the minimum resolution (10 ms), @@ -232,21 +236,288 @@ MACRO_END (val) = inb(PITCTR0_PORT); \ (val) |= inb(PITCTR0_PORT) << 8 ; } -/* - * Calibration delay counts. - */ -unsigned int delaycount = 100; -unsigned int microdata = 50; +#define UI_CPUFREQ_ROUNDING_FACTOR 10000000 + /* * Forward decl. */ -extern int measure_delay(int us); void rtc_setvals( unsigned int, clock_res_t ); static void rtc_set_cyc_per_sec(); +/* define assembly routines */ + + +/* + * Inlines to get timestamp counter value. + */ + +inline static uint64_t +rdtsc_64(void) +{ + uint64_t result; + asm volatile("rdtsc": "=A" (result)); + return result; +} + +// create_mul_quant_GHZ create a constant that can be used to multiply +// the TSC by to create nanoseconds. This is a 32 bit number +// and the TSC *MUST* have a frequency higher than 1000Mhz for this routine to work +// +// The theory here is that we know how many TSCs-per-sec the processor runs at. Normally to convert this +// to nanoseconds you would multiply the current time stamp by 1000000000 (a billion) then divide +// by TSCs-per-sec to get nanoseconds. Unfortunatly the TSC is 64 bits which would leave us with +// 96 bit intermediate results from the dultiply that must be divided by. +// usually thats +// uint96 = tsc * numer +// nanos = uint96 / denom +// Instead, we create this quant constant and it becomes the numerator, the denominator +// can then be 0x100000000 which makes our division as simple as forgetting the lower 32 bits +// of the result. We can also pass this number to user space as the numer and pass 0xFFFFFFFF +// as the denom to converting raw counts to nanos. the difference is so small as to be undetectable +// by anything. +// unfortunatly we can not do this for sub GHZ processors. In that case, all we do is pass the CPU +// speed in raw as the denom and we pass in 1000000000 as the numerator. No short cuts allowed + +inline static uint32_t +create_mul_quant_GHZ(uint32_t quant) +{ + return (uint32_t)((50000000ULL << 32) / quant); +} + +// this routine takes a value of raw TSC ticks and applies the passed mul_quant +// generated by create_mul_quant() This is our internal routine for creating +// nanoseconds +// since we don't really have uint96_t this routine basically does this.... +// uint96_t intermediate = (*value) * scale +// return (intermediate >> 32) +inline static uint64_t +fast_get_nano_from_abs(uint64_t value, int scale) +{ + asm (" movl %%edx,%%esi \n\t" + " mull %%ecx \n\t" + " movl %%edx,%%edi \n\t" + " movl %%esi,%%eax \n\t" + " mull %%ecx \n\t" + " xorl %%ecx,%%ecx \n\t" + " addl %%edi,%%eax \n\t" + " adcl %%ecx,%%edx " + : "+A" (value) + : "c" (scale) + : "%esi", "%edi"); + return value; +} + +/* + * this routine basically does this... + * ts.tv_sec = nanos / 1000000000; create seconds + * ts.tv_nsec = nanos % 1000000000; create remainder nanos + */ +inline static mach_timespec_t +nanos_to_timespec(uint64_t nanos) +{ + union { + mach_timespec_t ts; + uint64_t u64; + } ret; + ret.u64 = nanos; + asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC)); + return ret.ts; +} + +// the following two routine perform the 96 bit arithmetic we need to +// convert generic absolute<->nanoseconds +// the multiply routine takes a uint64_t and a uint32_t and returns the result in a +// uint32_t[3] array. the dicide routine takes this uint32_t[3] array and +// divides it by a uint32_t returning a uint64_t +inline static void +longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result) +{ + asm volatile( + " pushl %%ebx \n\t" + " movl %%eax,%%ebx \n\t" + " movl (%%eax),%%eax \n\t" + " mull %%ecx \n\t" + " xchg %%eax,%%ebx \n\t" + " pushl %%edx \n\t" + " movl 4(%%eax),%%eax \n\t" + " mull %%ecx \n\t" + " movl %2,%%ecx \n\t" + " movl %%ebx,(%%ecx) \n\t" + " popl %%ebx \n\t" + " addl %%ebx,%%eax \n\t" + " popl %%ebx \n\t" + " movl %%eax,4(%%ecx) \n\t" + " adcl $0,%%edx \n\t" + " movl %%edx,8(%%ecx) // and save it" + : : "a"(abstime), "c"(multiplicand), "m"(result)); + +} + +inline static uint64_t +longdiv(uint32_t *numer, uint32_t denom) +{ + uint64_t result; + asm volatile( + " pushl %%ebx \n\t" + " movl %%eax,%%ebx \n\t" + " movl 8(%%eax),%%edx \n\t" + " movl 4(%%eax),%%eax \n\t" + " divl %%ecx \n\t" + " xchg %%ebx,%%eax \n\t" + " movl (%%eax),%%eax \n\t" + " divl %%ecx \n\t" + " xchg %%ebx,%%edx \n\t" + " popl %%ebx \n\t" + : "=A"(result) : "a"(numer),"c"(denom)); + return result; +} + +#define PIT_Mode4 0x08 /* turn on mode 4 one shot software trigger */ + +// Enable or disable timer 2. +inline static void +enable_PIT2() +{ + asm volatile( + " inb $97,%%al \n\t" + " and $253,%%al \n\t" + " or $1,%%al \n\t" + " outb %%al,$97 \n\t" + : : : "%al" ); +} + +inline static void +disable_PIT2() +{ + asm volatile( + " inb $97,%%al \n\t" + " and $253,%%al \n\t" + " outb %%al,$97 \n\t" + : : : "%al" ); +} + +// ctimeRDTSC() routine sets up counter 2 to count down 1/20 of a second +// it pauses until the value is latched in the counter +// and then reads the time stamp counter to return to the caller +// utility routine +// Code to calculate how many processor cycles are in a second... +inline static void +set_PIT2(int value) +{ +// first, tell the clock we are going to write 16 bytes to the counter and enable one-shot mode +// then write the two bytes into the clock register. +// loop until the value is "realized" in the clock, this happens on the next tick +// + asm volatile( + " movb $184,%%al \n\t" + " outb %%al,$67 \n\t" + " movb %%dl,%%al \n\t" + " outb %%al,$66 \n\t" + " movb %%dh,%%al \n\t" + " outb %%al,$66 \n" +"1: inb $66,%%al \n\t" + " inb $66,%%al \n\t" + " cmp %%al,%%dh \n\t" + " jne 1b" + : : "d"(value) : "%al"); +} + +inline static uint64_t +get_PIT2(unsigned int *value) +{ +// this routine first latches the time, then gets the time stamp so we know +// how long the read will take later. Reads + register uint64_t result; + asm volatile( + " xorl %%ecx,%%ecx \n\t" + " movb $128,%%al \n\t" + " outb %%al,$67 \n\t" + " rdtsc \n\t" + " pushl %%eax \n\t" + " inb $66,%%al \n\t" + " movb %%al,%%cl \n\t" + " inb $66,%%al \n\t" + " movb %%al,%%ch \n\t" + " popl %%eax " + : "=A"(result), "=c"(*value)); + return result; +} + +static uint32_t +timeRDTSC(void) +{ + uint64_t latchTime; + uint64_t saveTime,intermediate; + unsigned int timerValue,x; + boolean_t int_enabled; + uint64_t fact[6] = { 2000011734ll, + 2000045259ll, + 2000078785ll, + 2000112312ll, + 2000145841ll, + 2000179371ll}; + + int_enabled = ml_set_interrupts_enabled(FALSE); + + enable_PIT2(); // turn on PIT2 + set_PIT2(0); // reset timer 2 to be zero + latchTime = rdtsc_64(); // get the time stamp to time + latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes + set_PIT2(59658); // set up the timer to count 1/20th a second + saveTime = rdtsc_64(); // now time how ling a 20th a second is... + get_PIT2(&x); + do { get_PIT2(&timerValue); x = timerValue;} while (timerValue > x); + do { + intermediate = get_PIT2(&timerValue); + if (timerValue>x) printf("Hey we are going backwards! %d, %d\n",timerValue,x); + x = timerValue; + } while ((timerValue != 0) && (timerValue >5)); + printf("Timer value:%d\n",timerValue); + printf("intermediate 0x%08x:0x%08x\n",intermediate); + printf("saveTime 0x%08x:0x%08x\n",saveTime); + + intermediate = intermediate - saveTime; // raw # of tsc's it takes for about 1/20 second + intermediate = intermediate * fact[timerValue]; // actual time spent + intermediate = intermediate / 2000000000ll; // rescale so its exactly 1/20 a second + intermediate = intermediate + latchTime; // add on our save fudge + set_PIT2(0); // reset timer 2 to be zero + disable_PIT2(0); // turn off PIT 2 + ml_set_interrupts_enabled(int_enabled); + return intermediate; +} + +static uint64_t +rdtsctime_to_nanoseconds( void ) +{ + uint32_t numer; + uint32_t denom; + uint64_t abstime; + + uint32_t intermediate[3]; + + numer = rtclock.timebase_const.numer; + denom = rtclock.timebase_const.denom; + abstime = rdtsc_64(); + if (denom == 0xFFFFFFFF) { + abstime = fast_get_nano_from_abs(abstime, numer); + } else { + longmul(&abstime, numer, intermediate); + abstime = longdiv(intermediate, denom); + } + return abstime; +} + +inline static mach_timespec_t +rdtsc_to_timespec(void) +{ + uint64_t currNanos; + currNanos = rdtsctime_to_nanoseconds(); + return nanos_to_timespec(currNanos); +} + /* * Initialize non-zero clock structure values. */ @@ -314,17 +585,7 @@ sysclk_config(void) /* * Setup device. */ -#if MP_V1_1 - { - extern boolean_t mp_v1_1_initialized; - if (mp_v1_1_initialized) - pic = 2; - else - pic = 0; - } -#else pic = 0; /* FIXME .. interrupt registration moved to AppleIntelClock */ -#endif /* @@ -363,6 +624,7 @@ sysclk_init(void) RtcTime = &rtclock.time; rtc_setvals( CLKNUM, RTC_MINRES ); /* compute constants */ rtc_set_cyc_per_sec(); /* compute number of tsc beats per second */ + clock_timebase_init(); return (1); } @@ -377,10 +639,6 @@ kern_return_t sysclk_gettime( mach_timespec_t *cur_time) /* OUT */ { - mach_timespec_t itime = {0, 0}; - unsigned int val, val2; - int s; - if (!RtcTime) { /* Uninitialized */ cur_time->tv_nsec = 0; @@ -388,31 +646,7 @@ sysclk_gettime( return (KERN_SUCCESS); } - /* - * Inhibit interrupts. Determine the incremental - * time since the last interrupt. (This could be - * done in assembler for a bit more speed). - */ - LOCK_RTC(s); - do { - READ_8254(val); /* read clock */ - READ_8254(val2); /* read clock */ - } while ( val2 > val || val2 < val - 10 ); - if ( val > clks_per_int_99 ) { - outb( 0x0a, 0x20 ); /* see if interrupt pending */ - if ( inb( 0x20 ) & 1 ) - itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ - } - itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; - if ( itime.tv_nsec < last_ival ) { - if (rtc_print_lost_tick) - printf( "rtclock: missed clock interrupt.\n" ); - } - last_ival = itime.tv_nsec; - cur_time->tv_sec = rtclock.time.tv_sec; - cur_time->tv_nsec = rtclock.time.tv_nsec; - UNLOCK_RTC(s); - ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); + *cur_time = rdtsc_to_timespec(); return (KERN_SUCCESS); } @@ -420,39 +654,13 @@ kern_return_t sysclk_gettime_internal( mach_timespec_t *cur_time) /* OUT */ { - mach_timespec_t itime = {0, 0}; - unsigned int val, val2; - if (!RtcTime) { /* Uninitialized */ cur_time->tv_nsec = 0; cur_time->tv_sec = 0; return (KERN_SUCCESS); } - - /* - * Inhibit interrupts. Determine the incremental - * time since the last interrupt. (This could be - * done in assembler for a bit more speed). - */ - do { - READ_8254(val); /* read clock */ - READ_8254(val2); /* read clock */ - } while ( val2 > val || val2 < val - 10 ); - if ( val > clks_per_int_99 ) { - outb( 0x0a, 0x20 ); /* see if interrupt pending */ - if ( inb( 0x20 ) & 1 ) - itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ - } - itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; - if ( itime.tv_nsec < last_ival ) { - if (rtc_print_lost_tick) - printf( "rtclock: missed clock interrupt.\n" ); - } - last_ival = itime.tv_nsec; - cur_time->tv_sec = rtclock.time.tv_sec; - cur_time->tv_nsec = rtclock.time.tv_nsec; - ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); + *cur_time = rdtsc_to_timespec(); return (KERN_SUCCESS); } @@ -466,39 +674,13 @@ void sysclk_gettime_interrupts_disabled( mach_timespec_t *cur_time) /* OUT */ { - mach_timespec_t itime = {0, 0}; - unsigned int val; - if (!RtcTime) { /* Uninitialized */ cur_time->tv_nsec = 0; cur_time->tv_sec = 0; return; } - - simple_lock(&rtclock.lock); - - /* - * Copy the current time knowing that we cant be interrupted - * between the two longwords and so dont need to use MTS_TO_TS - */ - READ_8254(val); /* read clock */ - if ( val > clks_per_int_99 ) { - outb( 0x0a, 0x20 ); /* see if interrupt pending */ - if ( inb( 0x20 ) & 1 ) - itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ - } - itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; - if ( itime.tv_nsec < last_ival ) { - if (rtc_print_lost_tick) - printf( "rtclock: missed clock interrupt.\n" ); - } - last_ival = itime.tv_nsec; - cur_time->tv_sec = rtclock.time.tv_sec; - cur_time->tv_nsec = rtclock.time.tv_nsec; - ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); - - simple_unlock(&rtclock.lock); + *cur_time = rdtsc_to_timespec(); } // utility routine @@ -508,59 +690,54 @@ static void rtc_set_cyc_per_sec() { - int x, y; - uint64_t cycles; - uint32_t c[15]; // array for holding sampled cycle counts - mach_timespec_t tst[15]; // array for holding time values. NOTE for some reason tv_sec not work + uint32_t twen_cycles; + uint32_t cycles; - for (x=0; x<15; x++) { // quick sample 15 times - tst[x].tv_sec = 0; - tst[x].tv_nsec = 0; - sysclk_gettime_internal(&tst[x]); - rdtsc_hilo(&y, &c[x]); - } - y = 0; - cycles = 0; - for (x=0; x<14; x++) { - // simple formula really. calculate the numerator as the number of elapsed processor - // cycles * 1000 to adjust for the resolution we want. The denominator is the - // elapsed "real" time in nano-seconds. The result will be the processor speed in - // Mhz. any overflows will be discarded before they are added - if ((c[x+1] > c[x]) && (tst[x+1].tv_nsec > tst[x].tv_nsec)) { - cycles += ((uint64_t)(c[x+1]-c[x]) * NSEC_PER_SEC ) / (uint64_t)(tst[x+1].tv_nsec - tst[x].tv_nsec); // elapsed nsecs - y +=1; - } - } - if (y>0) { // we got more than 1 valid sample. This also takes care of the case of if the clock isn't running - cycles = cycles / y; // calc our average + twen_cycles = timeRDTSC(); + if (twen_cycles> (1000000000/20)) { + // we create this value so that you can use just a "fast" multiply to get nanos + rtc_quant_scale = create_mul_quant_GHZ(twen_cycles); + rtclock.timebase_const.numer = rtc_quant_scale; // because ctimeRDTSC gives us 1/20 a seconds worth + rtclock.timebase_const.denom = 0xffffffff; // so that nanoseconds = (TSC * numer) / denom + + } else { + rtclock.timebase_const.numer = 1000000000/20; // because ctimeRDTSC gives us 1/20 a seconds worth + rtclock.timebase_const.denom = twen_cycles; // so that nanoseconds = (TSC * numer) / denom } - rtc_cyc_per_sec = cycles; - rdtsc_hilo(&rtc_last_int_tsc_hi, &rtc_last_int_tsc_lo); + cycles = twen_cycles; // number of cycles in 1/20th a second + rtc_cyc_per_sec = cycles*20; // multiply it by 20 and we are done.. BUT we also want to calculate... + + cycles = ((rtc_cyc_per_sec + UI_CPUFREQ_ROUNDING_FACTOR - 1) / UI_CPUFREQ_ROUNDING_FACTOR) * UI_CPUFREQ_ROUNDING_FACTOR; + gPEClockFrequencyInfo.cpu_clock_rate_hz = cycles; +DISPLAYVALUE(rtc_set_cyc_per_sec,rtc_cyc_per_sec); +DISPLAYEXIT(rtc_set_cyc_per_sec); } -static -natural_t -get_uptime_cycles(void) +void +clock_get_system_microtime( + uint32_t *secs, + uint32_t *microsecs) { - // get the time since the last interupt based on the processors TSC ignoring the - // RTC for speed - - uint32_t a,d,intermediate_lo,intermediate_hi,result; - uint64_t newTime; - - rdtsc_hilo(&d, &a); - if (d != rtc_last_int_tsc_hi) { - newTime = d-rtc_last_int_tsc_hi; - newTime = (newTime<<32) + (a-rtc_last_int_tsc_lo); - result = newTime; - } else { - result = a-rtc_last_int_tsc_lo; - } - __asm__ volatile ( " mul %3 ": "=eax" (intermediate_lo), "=edx" (intermediate_hi): "a"(result), "d"(NSEC_PER_SEC) ); - __asm__ volatile ( " div %3": "=eax" (result): "eax"(intermediate_lo), "edx" (intermediate_hi), "ecx" (rtc_cyc_per_sec) ); - return result; + mach_timespec_t now; + + sysclk_gettime(&now); + + *secs = now.tv_sec; + *microsecs = now.tv_nsec / NSEC_PER_USEC; } +void +clock_get_system_nanotime( + uint32_t *secs, + uint32_t *nanosecs) +{ + mach_timespec_t now; + + sysclk_gettime(&now); + + *secs = now.tv_sec; + *nanosecs = now.tv_nsec; +} /* * Get clock device attributes. @@ -578,12 +755,12 @@ sysclk_getattr( switch (flavor) { case CLOCK_GET_TIME_RES: /* >0 res */ -#if (NCPUS == 1 || (MP_V1_1 && 0)) +#if (NCPUS == 1) LOCK_RTC(s); *(clock_res_t *) attr = 1000; UNLOCK_RTC(s); break; -#endif /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ +#endif /* (NCPUS == 1) */ case CLOCK_ALARM_CURRES: /* =0 no alarm */ LOCK_RTC(s); *(clock_res_t *) attr = rtclock.intr_nsec; @@ -715,26 +892,51 @@ calend_gettime( return (KERN_SUCCESS); } -/* - * Set the current clock time. - */ -kern_return_t -calend_settime( - mach_timespec_t *new_time) +void +clock_get_calendar_microtime( + uint32_t *secs, + uint32_t *microsecs) +{ + mach_timespec_t now; + + calend_gettime(&now); + + *secs = now.tv_sec; + *microsecs = now.tv_nsec / NSEC_PER_USEC; +} + +void +clock_get_calendar_nanotime( + uint32_t *secs, + uint32_t *nanosecs) { - mach_timespec_t curr_time; + mach_timespec_t now; + + calend_gettime(&now); + + *secs = now.tv_sec; + *nanosecs = now.tv_nsec; +} + +void +clock_set_calendar_microtime( + uint32_t secs, + uint32_t microsecs) +{ + mach_timespec_t new_time, curr_time; spl_t s; LOCK_RTC(s); (void) sysclk_gettime_internal(&curr_time); - rtclock.calend_offset = *new_time; + rtclock.calend_offset.tv_sec = new_time.tv_sec = secs; + rtclock.calend_offset.tv_nsec = new_time.tv_nsec = microsecs * NSEC_PER_USEC; SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); rtclock.calend_is_set = TRUE; UNLOCK_RTC(s); - (void) bbc_settime(new_time); + (void) bbc_settime(&new_time); - return (KERN_SUCCESS); + host_notify_calendar_change(); } /* @@ -753,17 +955,17 @@ calend_getattr( switch (flavor) { case CLOCK_GET_TIME_RES: /* >0 res */ -#if (NCPUS == 1 || (MP_V1_1 && 0)) +#if (NCPUS == 1) LOCK_RTC(s); *(clock_res_t *) attr = 1000; UNLOCK_RTC(s); break; -#else /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ +#else /* (NCPUS == 1) */ LOCK_RTC(s); *(clock_res_t *) attr = rtclock.intr_nsec; UNLOCK_RTC(s); break; -#endif /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ +#endif /* (NCPUS == 1) */ case CLOCK_ALARM_CURRES: /* =0 no alarm */ case CLOCK_ALARM_MINRES: @@ -777,16 +979,89 @@ calend_getattr( return (KERN_SUCCESS); } -void -clock_adjust_calendar( - clock_res_t nsec) +#define tickadj (40*NSEC_PER_USEC) /* "standard" skew, ns / tick */ +#define bigadj (NSEC_PER_SEC) /* use 10x skew above bigadj ns */ + +uint32_t +clock_set_calendar_adjtime( + int32_t *secs, + int32_t *microsecs) { - spl_t s; + int64_t total, ototal; + uint32_t interval = 0; + spl_t s; + + total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC; LOCK_RTC(s); - if (rtclock.calend_is_set) - ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec); + ototal = rtclock.calend_adjtotal; + + if (total != 0) { + int32_t delta = tickadj; + + if (total > 0) { + if (total > bigadj) + delta *= 10; + if (delta > total) + delta = total; + } + else { + if (total < -bigadj) + delta *= 10; + delta = -delta; + if (delta < total) + delta = total; + } + + rtclock.calend_adjtotal = total; + rtclock.calend_adjdelta = delta; + + interval = (NSEC_PER_SEC / HZ); + } + else + rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0; + + UNLOCK_RTC(s); + + if (ototal == 0) + *secs = *microsecs = 0; + else { + *secs = ototal / NSEC_PER_SEC; + *microsecs = ototal % NSEC_PER_SEC; + } + + return (interval); +} + +uint32_t +clock_adjust_calendar(void) +{ + uint32_t interval = 0; + int32_t delta; + spl_t s; + + LOCK_RTC(s); + delta = rtclock.calend_adjdelta; + ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta); + + rtclock.calend_adjtotal -= delta; + + if (delta > 0) { + if (delta > rtclock.calend_adjtotal) + rtclock.calend_adjdelta = rtclock.calend_adjtotal; + } + else + if (delta < 0) { + if (delta < rtclock.calend_adjtotal) + rtclock.calend_adjdelta = rtclock.calend_adjtotal; + } + + if (rtclock.calend_adjdelta != 0) + interval = (NSEC_PER_SEC / HZ); + UNLOCK_RTC(s); + + return (interval); } void @@ -806,20 +1081,8 @@ clock_initialize_calendar(void) rtclock.calend_is_set = TRUE; } UNLOCK_RTC(s); -} -mach_timespec_t -clock_get_calendar_offset(void) -{ - mach_timespec_t result = MACH_TIMESPEC_ZERO; - spl_t s; - - LOCK_RTC(s); - if (rtclock.calend_is_set) - result = rtclock.calend_offset; - UNLOCK_RTC(s); - - return (result); + host_notify_calendar_change(); } void @@ -829,7 +1092,11 @@ clock_timebase_info( spl_t s; LOCK_RTC(s); - info->numer = info->denom = 1; + if (rtclock.timebase_const.denom == 0xFFFFFFFF) { + info->numer = info->denom = rtc_quant_scale; + } else { + info->numer = info->denom = 1; + } UNLOCK_RTC(s); } @@ -880,14 +1147,14 @@ rtclock_reset(void) { int s; -#if NCPUS > 1 && !(MP_V1_1 && 0) +#if NCPUS > 1 mp_disable_preemption(); if (cpu_number() != master_cpu) { mp_enable_preemption(); return; } mp_enable_preemption(); -#endif /* NCPUS > 1 && AT386 && !MP_V1_1 */ +#endif /* NCPUS > 1 */ LOCK_RTC(s); RTCLOCK_RESET(); UNLOCK_RTC(s); @@ -899,12 +1166,13 @@ rtclock_reset(void) * into the higher level clock code to deliver alarms. */ int -rtclock_intr(void) +rtclock_intr(struct i386_interrupt_state *regs) { - uint64_t abstime; + uint64_t abstime; mach_timespec_t clock_time; - int i; - spl_t s; + int i; + spl_t s; + boolean_t usermode; /* * Update clock time. Do the update so that the macro @@ -912,19 +1180,33 @@ rtclock_intr(void) * update in order: mtv_csec, mtv_time.tv_nsec, mtv_time.tv_sec). */ LOCK_RTC(s); - rdtsc_hilo(&rtc_last_int_tsc_hi, &rtc_last_int_tsc_lo); - i = rtclock.time.tv_nsec + rtclock.intr_nsec; - if (i < NSEC_PER_SEC) - rtclock.time.tv_nsec = i; - else { - rtclock.time.tv_nsec = i - NSEC_PER_SEC; - rtclock.time.tv_sec++; - } + abstime = rdtsctime_to_nanoseconds(); // get the time as of the TSC + clock_time = nanos_to_timespec(abstime); // turn it into a timespec + rtclock.time.tv_nsec = clock_time.tv_nsec; + rtclock.time.tv_sec = clock_time.tv_sec; + rtclock.abstime = abstime; + /* note time now up to date */ last_ival = 0; - rtclock.abstime += rtclock.intr_nsec; - abstime = rtclock.abstime; + /* + * On a HZ-tick boundary: return 0 and adjust the clock + * alarm resolution (if requested). Otherwise return a + * non-zero value. + */ + if ((i = --rtc_intr_count) == 0) { + if (rtclock.new_ires) { + rtc_setvals(new_clknum, rtclock.new_ires); + RTCLOCK_RESET(); /* lock clock register */ + rtclock.new_ires = 0; + } + rtc_intr_count = rtc_intr_hertz; + UNLOCK_RTC(s); + usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0); + hertz_tick(usermode, regs->eip); + LOCK_RTC(s); + } + if ( rtclock.timer_is_set && rtclock.timer_deadline <= abstime ) { rtclock.timer_is_set = FALSE; @@ -960,19 +1242,6 @@ rtclock_intr(void) LOCK_RTC(s); } - /* - * On a HZ-tick boundary: return 0 and adjust the clock - * alarm resolution (if requested). Otherwise return a - * non-zero value. - */ - if ((i = --rtc_intr_count) == 0) { - if (rtclock.new_ires) { - rtc_setvals(new_clknum, rtclock.new_ires); - RTCLOCK_RESET(); /* lock clock register */ - rtclock.new_ires = 0; - } - rtc_intr_count = rtc_intr_hertz; - } UNLOCK_RTC(s); return (i); } @@ -981,15 +1250,13 @@ void clock_get_uptime( uint64_t *result) { - uint32_t ticks; - spl_t s; - - LOCK_RTC(s); - ticks = get_uptime_cycles(); - *result = rtclock.abstime; - UNLOCK_RTC(s); + *result = rdtsctime_to_nanoseconds(); +} - *result += ticks; +uint64_t +mach_absolute_time(void) +{ + return rdtsctime_to_nanoseconds(); } void @@ -1043,86 +1310,38 @@ nanoseconds_to_absolutetime( } /* - * measure_delay(microseconds) - * - * Measure elapsed time for delay calls - * Returns microseconds. - * - * Microseconds must not be too large since the counter (short) - * will roll over. Max is about 13 ms. Values smaller than 1 ms are ok. - * This uses the assumed frequency of the rt clock which is emperically - * accurate to only about 200 ppm. + * Spin-loop delay primitives. */ - -int -measure_delay( - int us) +void +delay_for_interval( + uint32_t interval, + uint32_t scale_factor) { - unsigned int lsb, val; - - outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); - outb(PITCTR0_PORT, 0xff); /* set counter to max value */ - outb(PITCTR0_PORT, 0xff); - delay(us); - outb(PITCTL_PORT, PIT_C0); - lsb = inb(PITCTR0_PORT); - val = (inb(PITCTR0_PORT) << 8) | lsb; - val = 0xffff - val; - val *= 1000000; - val /= CLKNUM; - return(val); -} + uint64_t now, end; -/* - * calibrate_delay(void) - * - * Adjust delaycount. Called from startup before clock is started - * for normal interrupt generation. - */ + clock_interval_to_deadline(interval, scale_factor, &end); -void -calibrate_delay(void) -{ - unsigned val; - int prev = 0; - register int i; - - printf("adjusting delay count: %d", delaycount); - for (i=0; i<10; i++) { - prev = delaycount; - /* - * microdata must not be too large since measure_timer - * will not return accurate values if the counter (short) - * rolls over - */ - val = measure_delay(microdata); - if (val == 0) { - delaycount *= 2; - } else { - delaycount *= microdata; - delaycount += val-1; /* round up to upper us */ - delaycount /= val; - } - if (delaycount <= 0) - delaycount = 1; - if (delaycount != prev) - printf(" %d", delaycount); - } - printf("\n"); + do { + cpu_pause(); + now = mach_absolute_time(); + } while (now < end); } -#if MACH_KDB void -test_delay(void); +clock_delay_until( + uint64_t deadline) +{ + uint64_t now; + + do { + cpu_pause(); + now = mach_absolute_time(); + } while (now < deadline); +} void -test_delay(void) +delay( + int usec) { - register i; - - for (i = 0; i < 10; i++) - printf("%d, %d\n", i, measure_delay(i)); - for (i = 10; i <= 100; i+=10) - printf("%d, %d\n", i, measure_delay(i)); + delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC); } -#endif /* MACH_KDB */ diff --git a/osfmk/i386/rtclock_entries.h b/osfmk/i386/rtclock_entries.h index 04a2cb5a3..aba04ea19 100644 --- a/osfmk/i386/rtclock_entries.h +++ b/osfmk/i386/rtclock_entries.h @@ -45,5 +45,6 @@ extern kern_return_t rtc_setattr( extern void rtc_setalrm( mach_timespec_t * alarmtime); extern void rtclock_reset(void); -extern int rtclock_intr(void); +extern int rtclock_intr( + struct i386_interrupt_state *regs); extern void calibrate_delay(void); diff --git a/osfmk/i386/seg.h b/osfmk/i386/seg.h index 3afe55d5d..d39c251ca 100644 --- a/osfmk/i386/seg.h +++ b/osfmk/i386/seg.h @@ -153,8 +153,9 @@ struct fake_descriptor { #define USER_RPC 0x0f /* mach rpc call gate */ #define USER_CS 0x17 /* user code segment */ #define USER_DS 0x1f /* user data segment */ +#define USER_CTHREAD 0x27 /* user cthread area */ -#define LDTSZ 4 +#define LDTSZ 5 /* * Kernel descriptors for MACH - 32-bit flat address space. diff --git a/osfmk/i386/start.s b/osfmk/i386/start.s index 7618127ab..38362a816 100644 --- a/osfmk/i386/start.s +++ b/osfmk/i386/start.s @@ -72,7 +72,7 @@ #endif /* NCPUS > 1 */ -#include +#include /* * GAS won't handle an intersegment jump with a relocatable offset. @@ -197,6 +197,7 @@ EXT(mp_boot_pde): .globl EXT(_start) LEXT(_start) LEXT(pstart) + mov %eax, %ebx /* save pointer to kernbootstruct */ mov $0,%ax /* fs must be zeroed; */ mov %ax,%fs /* some bootstrappers don`t do this */ mov %ax,%gs @@ -377,7 +378,7 @@ LEXT(vstart) mov %ax,%gs lea EXT(eintstack),%esp /* switch to the bootup stack */ - call EXT(machine_startup) /* run C code */ + call EXT(i386_init) /* run C code */ /*NOTREACHED*/ hlt @@ -482,9 +483,9 @@ LEXT(svstart) movl %edx,2(%esp) /* point to local IDT (linear address) */ lidt 0(%esp) /* load new IDT */ - movw $(KERNEL_LDT),%ax - lldt %ax /* load new LDT */ - + movw $(KERNEL_LDT),%ax /* get LDT segment */ + lldt %ax /* load LDT */ + movw $(KERNEL_TSS),%ax ltr %ax /* load new KTSS */ diff --git a/osfmk/i386/thread.h b/osfmk/i386/thread.h index 99bcca4a0..df12f137f 100644 --- a/osfmk/i386/thread.h +++ b/osfmk/i386/thread.h @@ -140,18 +140,4 @@ extern void db_task_start(void); */ #define MACHINE_FAST_EXCEPTION 1 -/* - * MD Macro to fill up global stack state, - * keeping the MD structure sizes + games private - */ -#define MACHINE_STACK_STASH(stack) \ -MACRO_BEGIN \ - mp_disable_preemption(); \ - kernel_stack[cpu_number()] = (stack) + \ - (KERNEL_STACK_SIZE - sizeof (struct i386_exception_link) \ - - sizeof (struct i386_kernel_state)), \ - active_stacks[cpu_number()] = (stack); \ - mp_enable_preemption(); \ -MACRO_END - #endif /* _I386_THREAD_H_ */ diff --git a/osfmk/i386/thread_act.h b/osfmk/i386/thread_act.h index 20f8b7ed9..bd2b33645 100644 --- a/osfmk/i386/thread_act.h +++ b/osfmk/i386/thread_act.h @@ -38,6 +38,7 @@ #include #include +#include #include /* @@ -66,6 +67,8 @@ struct i386_fpsave_state { boolean_t fp_valid; struct i386_fp_save fp_save_state; struct i386_fp_regs fp_regs; + struct i386_fx_save fx_save_state __attribute__ ((aligned (16))); + int fp_save_flavor; }; /* @@ -91,6 +94,8 @@ struct v86_assist_state { */ struct i386_interrupt_state { + int gs; + int fs; int es; int ds; int edx; @@ -137,6 +142,7 @@ typedef struct pcb { struct i386_machine_state ims; #ifdef MACH_BSD unsigned long cthread_self; /* for use of cthread package */ + struct real_descriptor cthread_desc; #endif decl_simple_lock_data(,lock) } *pcb_t; @@ -175,11 +181,8 @@ extern void *act_thread_csave(void); extern void act_thread_catt(void *ctx); extern void act_thread_cfree(void *ctx); -#define current_act_fast() (current_thread()->top_act) -#define current_act_slow() ((current_thread()) ? \ - current_act_fast() : \ - THR_ACT_NULL) - -#define current_act() current_act_slow() /* JMM - til we find the culprit */ +extern vm_offset_t active_stacks[NCPUS]; +extern vm_offset_t kernel_stack[NCPUS]; +extern thread_act_t active_kloaded[NCPUS]; #endif /* _I386_THREAD_ACT_H_ */ diff --git a/osfmk/i386/trap.c b/osfmk/i386/trap.c index 29ead88d9..33f694966 100644 --- a/osfmk/i386/trap.c +++ b/osfmk/i386/trap.c @@ -485,7 +485,7 @@ user_trap( kern_return_t result; register thread_act_t thr_act = current_act(); thread_t thread = (thr_act ? thr_act->thread : THREAD_NULL); - boolean_t kernel_act = thr_act->kernel_loaded; + boolean_t kernel_act = FALSE; etap_data_t probe_data; if (regs->efl & EFL_VM) { @@ -1111,7 +1111,7 @@ i386_astintr(int preemption) splx(s); return; } - else mask = AST_PREEMPT; + else mask = AST_PREEMPTION; mp_enable_preemption(); /* diff --git a/osfmk/i386/xpr.h b/osfmk/i386/xpr.h index 0a8071dea..fe2cabe2c 100644 --- a/osfmk/i386/xpr.h +++ b/osfmk/i386/xpr.h @@ -61,16 +61,15 @@ #include #include -#include #include -#if NCPUS == 1 || MP_V1_1 +#if NCPUS == 1 extern int xpr_time(void); #define XPR_TIMESTAMP xpr_time() -#else /* NCPUS == 1 || MP_V1_1 */ +#else /* NCPUS == 1 */ #define XPR_TIMESTAMP (0) -#endif /* NCPUS == 1 || MP_V1_1 */ +#endif /* NCPUS == 1 */ diff --git a/osfmk/ipc/ipc_init.c b/osfmk/ipc/ipc_init.c index 7545baf93..3cd60dfa8 100644 --- a/osfmk/ipc/ipc_init.c +++ b/osfmk/ipc/ipc_init.c @@ -66,6 +66,8 @@ #include #include #include +#include +#include #include #include #include @@ -172,11 +174,12 @@ ipc_bootstrap(void) #endif mig_init(); ipc_table_init(); - ipc_notify_init(); ipc_hash_init(); ipc_kmsg_init(); semaphore_init(); lock_set_init(); + mk_timer_init(); + host_notify_init(); } /* diff --git a/osfmk/ipc/ipc_kmsg.c b/osfmk/ipc/ipc_kmsg.c index e01db1a02..2798dfa5d 100644 --- a/osfmk/ipc/ipc_kmsg.c +++ b/osfmk/ipc/ipc_kmsg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -91,6 +91,12 @@ #include +#ifdef ppc +#include +#include +#endif + + extern vm_map_t ipc_kernel_copy_map; extern vm_size_t ipc_kmsg_max_vm_space; extern vm_size_t msg_ool_size_small; @@ -650,7 +656,7 @@ ipc_kmsg_get( { mach_msg_size_t msg_and_trailer_size; ipc_kmsg_t kmsg; - mach_msg_format_0_trailer_t *trailer; + mach_msg_max_trailer_t *trailer; mach_port_name_t dest_name; ipc_entry_t dest_entry; ipc_port_t dest_port; @@ -678,11 +684,17 @@ ipc_kmsg_get( * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize * the cases where no implicit data is requested. */ - trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t)&kmsg->ikm_header + size); - trailer->msgh_sender = current_thread()->top_act->task->sec_token; + trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)&kmsg->ikm_header + size); + trailer->msgh_sender = current_act()->task->sec_token; + trailer->msgh_audit = current_act()->task->audit_token; trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; - + +#ifdef ppc + if(trcWork.traceMask) dbgTrace((unsigned int)kmsg->ikm_header.msgh_id, + (unsigned int)kmsg->ikm_header.msgh_remote_port, + (unsigned int)kmsg->ikm_header.msgh_local_port, 0); +#endif *kmsgp = kmsg; return MACH_MSG_SUCCESS; } @@ -709,7 +721,7 @@ ipc_kmsg_get_from_kernel( { ipc_kmsg_t kmsg; mach_msg_size_t msg_and_trailer_size; - mach_msg_format_0_trailer_t *trailer; + mach_msg_max_trailer_t *trailer; ipc_port_t dest_port; assert(size >= sizeof(mach_msg_header_t)); @@ -759,9 +771,10 @@ ipc_kmsg_get_from_kernel( * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to * optimize the cases where no implicit data is requested. */ - trailer = (mach_msg_format_0_trailer_t *) + trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)&kmsg->ikm_header + size); trailer->msgh_sender = KERNEL_SECURITY_TOKEN; + trailer->msgh_audit = KERNEL_AUDIT_TOKEN; trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; @@ -1394,7 +1407,7 @@ ipc_kmsg_copyin_body( * Out-of-line memory descriptor, accumulate kernel * memory requirements */ - space_needed += round_page(sstart->out_of_line.size); + space_needed += round_page_32(sstart->out_of_line.size); if (space_needed > ipc_kmsg_max_vm_space) { /* @@ -1501,7 +1514,7 @@ ipc_kmsg_copyin_body( */ if (!page_aligned(length)) { (void) memset((void *) (paddr + length), 0, - round_page(length) - length); + round_page_32(length) - length); } if (vm_map_copyin(ipc_kernel_copy_map, paddr, length, TRUE, ©) != KERN_SUCCESS) { @@ -1510,8 +1523,8 @@ ipc_kmsg_copyin_body( return MACH_MSG_VM_KERNEL; } dsc->address = (void *) copy; - paddr += round_page(length); - space_needed -= round_page(length); + paddr += round_page_32(length); + space_needed -= round_page_32(length); } else { /* @@ -1594,9 +1607,8 @@ ipc_kmsg_copyin_body( for(k = 0; k < j; k++) { object = objects[k]; - if (!MACH_PORT_VALID(port)) - continue; - ipc_object_destroy(object, dsc->disposition); + if (IPC_OBJECT_VALID(object)) + ipc_object_destroy(object, dsc->disposition); } kfree(data, length); ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed); diff --git a/osfmk/ipc/ipc_mqueue.c b/osfmk/ipc/ipc_mqueue.c index d00ebe34f..4321242c6 100644 --- a/osfmk/ipc/ipc_mqueue.c +++ b/osfmk/ipc/ipc_mqueue.c @@ -349,6 +349,7 @@ ipc_mqueue_send( imq_unlock(mqueue); splx(s); } else { + thread_t cur_thread = current_thread(); /* * We have to wait for space to be granted to us. @@ -359,12 +360,14 @@ ipc_mqueue_send( return MACH_SEND_TIMED_OUT; } mqueue->imq_fullwaiters = TRUE; + thread_lock(cur_thread); wresult = wait_queue_assert_wait64_locked( &mqueue->imq_wait_queue, IPC_MQUEUE_FULL, THREAD_ABORTSAFE, - TRUE); /* unlock? */ - /* wait/mqueue is unlocked */ + cur_thread); + thread_unlock(cur_thread); + imq_unlock(mqueue); splx(s); if (wresult == THREAD_WAITING) { @@ -733,15 +736,17 @@ ipc_mqueue_receive( } } + thread_lock(self); self->ith_state = MACH_RCV_IN_PROGRESS; self->ith_option = option; self->ith_msize = max_size; - + wresult = wait_queue_assert_wait64_locked(&mqueue->imq_wait_queue, IPC_MQUEUE_RECEIVE, interruptible, - TRUE); /* unlock? */ - /* mqueue/waitq is unlocked */ + self); + thread_unlock(self); + imq_unlock(mqueue); splx(s); if (wresult == THREAD_WAITING) { diff --git a/osfmk/ipc/ipc_notify.c b/osfmk/ipc/ipc_notify.c index e00829214..d82595514 100644 --- a/osfmk/ipc/ipc_notify.c +++ b/osfmk/ipc/ipc_notify.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -60,188 +60,13 @@ * Notification-sending functions. */ -#include - #include #include -#include -#include +#include #include -#include -#include #include #include -/* - * Forward declarations - */ -void ipc_notify_init_port_deleted( - mach_port_deleted_notification_t *n); - -void ipc_notify_init_port_destroyed( - mach_port_destroyed_notification_t *n); - -void ipc_notify_init_no_senders( - mach_no_senders_notification_t *n); - -void ipc_notify_init_send_once( - mach_send_once_notification_t *n); - -void ipc_notify_init_dead_name( - mach_dead_name_notification_t *n); - -mach_port_deleted_notification_t ipc_notify_port_deleted_template; -mach_port_destroyed_notification_t ipc_notify_port_destroyed_template; -mach_no_senders_notification_t ipc_notify_no_senders_template; -mach_send_once_notification_t ipc_notify_send_once_template; -mach_dead_name_notification_t ipc_notify_dead_name_template; - -/* - * Routine: ipc_notify_init_port_deleted - * Purpose: - * Initialize a template for port-deleted notifications. - */ - -void -ipc_notify_init_port_deleted( - mach_port_deleted_notification_t *n) -{ - mach_msg_header_t *m = &n->not_header; - - m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); - m->msgh_local_port = MACH_PORT_NULL; - m->msgh_remote_port = MACH_PORT_NULL; - m->msgh_id = MACH_NOTIFY_PORT_DELETED; - m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); - - n->not_port = MACH_PORT_NULL; - n->NDR = NDR_record; - n->trailer.msgh_seqno = 0; - n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; - n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; - n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; -} - -/* - * Routine: ipc_notify_init_port_destroyed - * Purpose: - * Initialize a template for port-destroyed notifications. - */ - -void -ipc_notify_init_port_destroyed( - mach_port_destroyed_notification_t *n) -{ - mach_msg_header_t *m = &n->not_header; - - m->msgh_bits = MACH_MSGH_BITS_COMPLEX | - MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); - m->msgh_local_port = MACH_PORT_NULL; - m->msgh_remote_port = MACH_PORT_NULL; - m->msgh_id = MACH_NOTIFY_PORT_DESTROYED; - m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); - - n->not_body.msgh_descriptor_count = 1; - n->not_port.disposition = MACH_MSG_TYPE_PORT_RECEIVE; - n->not_port.name = MACH_PORT_NULL; - n->not_port.type = MACH_MSG_PORT_DESCRIPTOR; - n->trailer.msgh_seqno = 0; - n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; - n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; - n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; -} - -/* - * Routine: ipc_notify_init_no_senders - * Purpose: - * Initialize a template for no-senders notifications. - */ - -void -ipc_notify_init_no_senders( - mach_no_senders_notification_t *n) -{ - mach_msg_header_t *m = &n->not_header; - - m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); - m->msgh_local_port = MACH_PORT_NULL; - m->msgh_remote_port = MACH_PORT_NULL; - m->msgh_id = MACH_NOTIFY_NO_SENDERS; - m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); - - n->NDR = NDR_record; - n->trailer.msgh_seqno = 0; - n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; - n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; - n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; - n->not_count = 0; -} - -/* - * Routine: ipc_notify_init_send_once - * Purpose: - * Initialize a template for send-once notifications. - */ - -void -ipc_notify_init_send_once( - mach_send_once_notification_t *n) -{ - mach_msg_header_t *m = &n->not_header; - - m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); - m->msgh_local_port = MACH_PORT_NULL; - m->msgh_remote_port = MACH_PORT_NULL; - m->msgh_id = MACH_NOTIFY_SEND_ONCE; - m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); - n->trailer.msgh_seqno = 0; - n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; - n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; - n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; -} - -/* - * Routine: ipc_notify_init_dead_name - * Purpose: - * Initialize a template for dead-name notifications. - */ - -void -ipc_notify_init_dead_name( - mach_dead_name_notification_t *n) -{ - mach_msg_header_t *m = &n->not_header; - - m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); - m->msgh_local_port = MACH_PORT_NULL; - m->msgh_remote_port = MACH_PORT_NULL; - m->msgh_id = MACH_NOTIFY_DEAD_NAME; - m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); - - n->not_port = MACH_PORT_NULL; - n->NDR = NDR_record; - n->trailer.msgh_seqno = 0; - n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; - n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; - n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; -} - -/* - * Routine: ipc_notify_init - * Purpose: - * Initialize the notification subsystem. - */ - -void -ipc_notify_init(void) -{ - ipc_notify_init_port_deleted(&ipc_notify_port_deleted_template); - ipc_notify_init_port_destroyed(&ipc_notify_port_destroyed_template); - ipc_notify_init_no_senders(&ipc_notify_no_senders_template); - ipc_notify_init_send_once(&ipc_notify_send_once_template); - ipc_notify_init_dead_name(&ipc_notify_dead_name_template); -} - /* * Routine: ipc_notify_port_deleted * Purpose: @@ -256,23 +81,13 @@ ipc_notify_port_deleted( ipc_port_t port, mach_port_name_t name) { - ipc_kmsg_t kmsg; - mach_port_deleted_notification_t *n; + kern_return_t kr; - kmsg = ipc_kmsg_alloc(sizeof *n); - if (kmsg == IKM_NULL) { + kr = mach_notify_port_deleted(port, name); + if (kr != KERN_SUCCESS) { printf("dropped port-deleted (0x%08x, 0x%x)\n", port, name); ipc_port_release_sonce(port); - return; } - - n = (mach_port_deleted_notification_t *) &kmsg->ikm_header; - *n = ipc_notify_port_deleted_template; - - n->not_header.msgh_remote_port = port; - n->not_port = name; - - ipc_kmsg_send_always(kmsg); } /* @@ -292,25 +107,15 @@ ipc_notify_port_destroyed( ipc_port_t port, ipc_port_t right) { - ipc_kmsg_t kmsg; - mach_port_destroyed_notification_t *n; + kern_return_t kr; - kmsg = ipc_kmsg_alloc(sizeof *n); - if (kmsg == IKM_NULL) { + kr = mach_notify_port_destroyed(port, right); + if (kr != KERN_SUCCESS) { printf("dropped port-destroyed (0x%08x, 0x%08x)\n", port, right); ipc_port_release_sonce(port); ipc_port_release_receive(right); - return; } - - n = (mach_port_destroyed_notification_t *) &kmsg->ikm_header; - *n = ipc_notify_port_destroyed_template; - - n->not_header.msgh_remote_port = port; - n->not_port.name = right; - - ipc_kmsg_send_always(kmsg); } /* @@ -327,23 +132,13 @@ ipc_notify_no_senders( ipc_port_t port, mach_port_mscount_t mscount) { - ipc_kmsg_t kmsg; - mach_no_senders_notification_t *n; + kern_return_t kr; - kmsg = ipc_kmsg_alloc(sizeof *n); - if (kmsg == IKM_NULL) { + kr = mach_notify_no_senders(port, mscount); + if (kr != KERN_SUCCESS) { printf("dropped no-senders (0x%08x, %u)\n", port, mscount); ipc_port_release_sonce(port); - return; } - - n = (mach_no_senders_notification_t *) &kmsg->ikm_header; - *n = ipc_notify_no_senders_template; - - n->not_header.msgh_remote_port = port; - n->not_count = mscount; - - ipc_kmsg_send_always(kmsg); } /* @@ -359,22 +154,13 @@ void ipc_notify_send_once( ipc_port_t port) { - ipc_kmsg_t kmsg; - mach_send_once_notification_t *n; + kern_return_t kr; - kmsg = ipc_kmsg_alloc(sizeof *n); - if (kmsg == IKM_NULL) { + kr = mach_notify_send_once(port); + if (kr != KERN_SUCCESS) { printf("dropped send-once (0x%08x)\n", port); ipc_port_release_sonce(port); - return; } - - n = (mach_send_once_notification_t *) &kmsg->ikm_header; - *n = ipc_notify_send_once_template; - - n->not_header.msgh_remote_port = port; - - ipc_kmsg_send_always(kmsg); } /* @@ -391,21 +177,11 @@ ipc_notify_dead_name( ipc_port_t port, mach_port_name_t name) { - ipc_kmsg_t kmsg; - mach_dead_name_notification_t *n; + kern_return_t kr; - kmsg = ipc_kmsg_alloc(sizeof *n); - if (kmsg == IKM_NULL) { + kr = mach_notify_dead_name(port, name); + if (kr != KERN_SUCCESS) { printf("dropped dead-name (0x%08x, 0x%x)\n", port, name); ipc_port_release_sonce(port); - return; } - - n = (mach_dead_name_notification_t *) &kmsg->ikm_header; - *n = ipc_notify_dead_name_template; - - n->not_header.msgh_remote_port = port; - n->not_port = name; - - ipc_kmsg_send_always(kmsg); } diff --git a/osfmk/ipc/ipc_notify.h b/osfmk/ipc/ipc_notify.h index 0d4d0eea5..8ec1ea248 100644 --- a/osfmk/ipc/ipc_notify.h +++ b/osfmk/ipc/ipc_notify.h @@ -67,9 +67,6 @@ * Exported interfaces */ -/* Initialize the notification subsystem */ -extern void ipc_notify_init(void); - /* Send a port-deleted notification */ extern void ipc_notify_port_deleted( ipc_port_t port, diff --git a/osfmk/ipc/ipc_object.c b/osfmk/ipc/ipc_object.c index df9081bef..a899d9bbb 100644 --- a/osfmk/ipc/ipc_object.c +++ b/osfmk/ipc/ipc_object.c @@ -520,7 +520,8 @@ ipc_object_copyin( * Copyin a naked capability from the kernel. * * MACH_MSG_TYPE_MOVE_RECEIVE - * The receiver must be ipc_space_kernel. + * The receiver must be ipc_space_kernel + * or the receive right must already be in limbo. * Consumes the naked receive right. * MACH_MSG_TYPE_COPY_SEND * A naked send right must be supplied. @@ -554,14 +555,15 @@ ipc_object_copyin_from_kernel( ip_lock(port); assert(ip_active(port)); - assert(port->ip_receiver_name != MACH_PORT_NULL); - assert(port->ip_receiver == ipc_space_kernel); + if (port->ip_destination != IP_NULL) { + assert(port->ip_receiver == ipc_space_kernel); - /* relevant part of ipc_port_clear_receiver */ - ipc_port_set_mscount(port, 0); + /* relevant part of ipc_port_clear_receiver */ + ipc_port_set_mscount(port, 0); - port->ip_receiver_name = MACH_PORT_NULL; - port->ip_destination = IP_NULL; + port->ip_receiver_name = MACH_PORT_NULL; + port->ip_destination = IP_NULL; + } ip_unlock(port); break; } @@ -594,9 +596,12 @@ ipc_object_copyin_from_kernel( break; } - case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MOVE_SEND: { /* move naked send right into the message */ + ipc_port_t port = (ipc_port_t) object; + assert(port->ip_srights); break; + } case MACH_MSG_TYPE_MAKE_SEND_ONCE: { ipc_port_t port = (ipc_port_t) object; @@ -611,9 +616,12 @@ ipc_object_copyin_from_kernel( break; } - case MACH_MSG_TYPE_MOVE_SEND_ONCE: + case MACH_MSG_TYPE_MOVE_SEND_ONCE: { /* move naked send-once right into the message */ + ipc_port_t port = (ipc_port_t) object; + assert(port->ip_sorights); break; + } default: panic("ipc_object_copyin_from_kernel: strange rights"); diff --git a/osfmk/ipc/ipc_port.c b/osfmk/ipc/ipc_port.c index a2e300188..bddaecf3d 100644 --- a/osfmk/ipc/ipc_port.c +++ b/osfmk/ipc/ipc_port.c @@ -501,6 +501,10 @@ ipc_port_alloc( ipc_port_init(port, space, name); + if (task_is_classic(current_task())) { + IP_SET_CLASSIC(port); + } + *namep = name; *portp = port; @@ -540,6 +544,10 @@ ipc_port_alloc_name( ipc_port_init(port, space, name); + if (task_is_classic(current_task())) { + IP_SET_CLASSIC(port); + } + *portp = port; return KERN_SUCCESS; @@ -618,24 +626,9 @@ ipc_port_destroy( port->ip_destination = IP_NULL; ip_unlock(port); - if (!ipc_port_check_circularity(port, pdrequest)) { - /* consumes our refs for port and pdrequest */ - ipc_notify_port_destroyed(pdrequest, port); - return; - } else { - /* consume pdrequest and destroy port */ - ipc_port_release_sonce(pdrequest); - } - - ip_lock(port); - assert(ip_active(port)); - assert(port->ip_pset_count == 0); - assert(port->ip_mscount == 0); - assert(port->ip_pdrequest == IP_NULL); - assert(port->ip_receiver_name == MACH_PORT_NULL); - assert(port->ip_destination == IP_NULL); - - /* fall through and destroy the port */ + /* consumes our refs for port and pdrequest */ + ipc_notify_port_destroyed(pdrequest, port); + return; } /* once port is dead, we don't need to keep it locked */ @@ -1159,7 +1152,7 @@ ipc_port_dealloc_special( { ip_lock(port); assert(ip_active(port)); - assert(port->ip_receiver_name != MACH_PORT_NULL); +// assert(port->ip_receiver_name != MACH_PORT_NULL); assert(port->ip_receiver == space); /* diff --git a/osfmk/ipc/ipc_port.h b/osfmk/ipc/ipc_port.h index 2cf4e699c..8c2e1b36f 100644 --- a/osfmk/ipc/ipc_port.h +++ b/osfmk/ipc/ipc_port.h @@ -200,6 +200,14 @@ MACRO_BEGIN \ (port)->ip_premsg = IKM_NULL; \ MACRO_END +#define IP_BIT_CLASSIC 0x00004000 +#define IP_CLASSIC(port) ((port)->ip_bits & IP_BIT_CLASSIC) + +#define IP_SET_CLASSIC(port) \ +MACRO_BEGIN \ + (port)->ip_bits |= IP_BIT_CLASSIC; \ +MACRO_END + typedef ipc_table_index_t ipc_port_request_index_t; typedef struct ipc_port_request { diff --git a/osfmk/ipc/ipc_table.h b/osfmk/ipc/ipc_table.h index 63348ef86..d099f63cb 100644 --- a/osfmk/ipc/ipc_table.h +++ b/osfmk/ipc/ipc_table.h @@ -208,21 +208,21 @@ extern void ipc_table_free( #define it_entries_alloc(its) \ ((ipc_entry_t) \ ipc_table_alloc(it_entries_reallocable(its) ? \ - round_page((its)->its_size * sizeof(struct ipc_entry)) : \ + round_page_32((its)->its_size * sizeof(struct ipc_entry)) : \ (its)->its_size * sizeof(struct ipc_entry) \ )) #define it_entries_realloc(its, table, nits) \ ((ipc_entry_t) \ ipc_table_realloc( \ - round_page((its)->its_size * sizeof(struct ipc_entry)), \ + round_page_32((its)->its_size * sizeof(struct ipc_entry)), \ (vm_offset_t)(table), \ - round_page((nits)->its_size * sizeof(struct ipc_entry)) \ + round_page_32((nits)->its_size * sizeof(struct ipc_entry)) \ )) #define it_entries_free(its, table) \ ipc_table_free(it_entries_reallocable(its) ? \ - round_page((its)->its_size * sizeof(struct ipc_entry)) : \ + round_page_32((its)->its_size * sizeof(struct ipc_entry)) : \ (its)->its_size * sizeof(struct ipc_entry), \ (vm_offset_t)(table) \ ) diff --git a/osfmk/ipc/mach_debug.c b/osfmk/ipc/mach_debug.c index be0c55076..ead49e1bc 100644 --- a/osfmk/ipc/mach_debug.c +++ b/osfmk/ipc/mach_debug.c @@ -170,7 +170,7 @@ host_ipc_hash_info( if (info != *infop) kmem_free(ipc_kernel_map, addr, size); - size = round_page(actual * sizeof *info); + size = round_page_32(actual * sizeof *info); kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; @@ -191,7 +191,7 @@ host_ipc_hash_info( vm_map_copy_t copy; vm_size_t used; - used = round_page(actual * sizeof *info); + used = round_page_32(actual * sizeof *info); if (used != size) kmem_free(ipc_kernel_map, addr + used, size - used); @@ -285,7 +285,7 @@ mach_port_space_info( kmem_free(ipc_kernel_map, table_addr, table_size); - table_size = round_page(table_actual * + table_size = round_page_32(table_actual * sizeof *table_info); kr = kmem_alloc(ipc_kernel_map, &table_addr, table_size); @@ -306,7 +306,7 @@ mach_port_space_info( kmem_free(ipc_kernel_map, tree_addr, tree_size); - tree_size = round_page(tree_actual * + tree_size = round_page_32(tree_actual * sizeof *tree_info); kr = kmem_alloc(ipc_kernel_map, &tree_addr, tree_size); @@ -396,7 +396,7 @@ mach_port_space_info( /* kmem_alloc doesn't zero memory */ size_used = table_actual * sizeof *table_info; - rsize_used = round_page(size_used); + rsize_used = round_page_32(size_used); if (rsize_used != table_size) kmem_free(ipc_kernel_map, @@ -434,7 +434,7 @@ mach_port_space_info( /* kmem_alloc doesn't zero memory */ size_used = tree_actual * sizeof *tree_info; - rsize_used = round_page(size_used); + rsize_used = round_page_32(size_used); if (rsize_used != tree_size) kmem_free(ipc_kernel_map, diff --git a/osfmk/ipc/mach_msg.c b/osfmk/ipc/mach_msg.c index 9d43eece5..3e58c244a 100644 --- a/osfmk/ipc/mach_msg.c +++ b/osfmk/ipc/mach_msg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -122,6 +122,7 @@ mach_msg_return_t msg_receive_error( ipc_space_t space); security_token_t KERNEL_SECURITY_TOKEN = KERNEL_SECURITY_TOKEN_VALUE; +audit_token_t KERNEL_AUDIT_TOKEN = KERNEL_AUDIT_TOKEN_VALUE; mach_msg_format_0_trailer_t trailer_template = { /* mach_msg_trailer_type_t */ MACH_MSG_TRAILER_FORMAT_0, @@ -1048,21 +1049,23 @@ mach_msg_overwrite_trap( assert(ip_active(dest_port)); assert(dest_port->ip_receiver != ipc_space_kernel); - assert(!imq_full(&dest_port->ip_messages) || - (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == - MACH_MSG_TYPE_PORT_SEND_ONCE)); +// assert(!imq_full(&dest_port->ip_messages) || +// (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == +// MACH_MSG_TYPE_PORT_SEND_ONCE)); assert((hdr->msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0); { register ipc_mqueue_t dest_mqueue; wait_queue_t waitq; thread_t receiver; -#if THREAD_SWAPPER - thread_act_t rcv_act; -#endif + processor_t processor; spl_t s; s = splsched(); + processor = current_processor(); + if (processor->current_pri >= BASEPRI_RTQUEUES) + goto abort_send_receive1; + dest_mqueue = &dest_port->ip_messages; waitq = &dest_mqueue->imq_wait_queue; imq_lock(dest_mqueue); @@ -1070,9 +1073,10 @@ mach_msg_overwrite_trap( wait_queue_peek64_locked(waitq, IPC_MQUEUE_RECEIVE, &receiver, &waitq); /* queue still locked, thread locked - but still on q */ - if (receiver == THREAD_NULL) { + if ( receiver == THREAD_NULL ) { abort_send_receive: imq_unlock(dest_mqueue); + abort_send_receive1: splx(s); ip_unlock(dest_port); ipc_object_release(rcv_object); @@ -1084,18 +1088,21 @@ mach_msg_overwrite_trap( assert(receiver->wait_event == IPC_MQUEUE_RECEIVE); /* - * See if it is still running on another processor (trying to - * block itself). If so, fall off. + * Make sure that the scheduling state of the receiver is such + * that we can handoff to it here. If not, fall off. * - * JMM - We have an opportunity here. Since the thread is locked - * and we find it runnable, it must still be trying to get into + * JMM - We have an opportunity here. If the thread is locked + * and we find it runnable, it may still be trying to get into * thread_block on itself. We could just "hand him the message" * and let him go (thread_go_locked()) and then fall down into a * slow receive for ourselves. Only his RECEIVE_TOO_LARGE handling * runs afoul of that. Clean this up! */ - if ((receiver->state & (TH_RUN|TH_WAIT)) != TH_WAIT) { - assert(NCPUS > 1); + if ((receiver->state & (TH_RUN|TH_WAIT)) != TH_WAIT || + receiver->sched_pri >= BASEPRI_RTQUEUES || + receiver->processor_set != processor->processor_set || + (receiver->bound_processor != PROCESSOR_NULL && + receiver->bound_processor != processor)) { HOT(c_mmot_cold_033++); fall_off: thread_unlock(receiver); @@ -1116,52 +1123,6 @@ mach_msg_overwrite_trap( goto fall_off; } -#if THREAD_SWAPPER - /* - * Receiver looks okay -- is it swapped in? - */ - rcv_act = receiver->top_act; - if (rcv_act->swap_state != TH_SW_IN && - rcv_act->swap_state != TH_SW_UNSWAPPABLE) { - HOT(c_mmot_rcvr_swapped++); - goto fall_off; - } - - /* - * Make sure receiver stays swapped in (if we can). - */ - if (!act_lock_try(rcv_act)) { /* out of order! */ - HOT(c_mmot_rcvr_locked++); - goto fall_off; - } - - /* - * Check for task swapping in progress affecting - * receiver. Since rcv_act is attached to a shuttle, - * its swap_state is covered by shuttle's thread_lock() - * (sigh). - */ - if ((rcv_act->swap_state != TH_SW_IN && - rcv_act->swap_state != TH_SW_UNSWAPPABLE) || - rcv_act->ast & AST_SWAPOUT) { - act_unlock(rcv_act); - HOT(c_mmot_rcvr_tswapped++); - goto fall_off; - } - - /* - * We don't need to make receiver unswappable here -- holding - * act_lock() of rcv_act is sufficient to prevent either thread - * or task swapping from changing its state (see swapout_scan(), - * task_swapout()). Don't release lock till receiver's state - * is consistent. Its task may then be marked for swapout, - * but that's life. - */ - /* - * NB: act_lock(rcv_act) still held - */ -#endif /* THREAD_SWAPPER */ - /* * Before committing to the handoff, make sure that we are * really going to block (i.e. there are no messages already @@ -1203,25 +1164,21 @@ mach_msg_overwrite_trap( receiver->ith_seqno = dest_mqueue->imq_seqno++; /* - * Inline thread_go_locked - * - * JMM - Including hacked in version of setrun scheduler op - * that doesn't try to put thread on a runq. + * Update the scheduling state for the handoff. */ - { - receiver->state &= ~(TH_WAIT|TH_UNINT); - hw_atomic_add(&receiver->processor_set->run_count, 1); - receiver->state |= TH_RUN; - receiver->wait_result = THREAD_AWAKENED; - - receiver->computation_metered = 0; - receiver->reason = AST_NONE; - } - + receiver->state &= ~(TH_WAIT|TH_UNINT); + receiver->state |= TH_RUN; + + pset_run_incr(receiver->processor_set); + if (receiver->sched_mode & TH_MODE_TIMESHARE) + pset_share_incr(receiver->processor_set); + + receiver->wait_result = THREAD_AWAKENED; + + receiver->computation_metered = 0; + receiver->reason = AST_NONE; + thread_unlock(receiver); -#if THREAD_SWAPPER - act_unlock(rcv_act); -#endif /* THREAD_SWAPPER */ imq_unlock(dest_mqueue); ip_unlock(dest_port); @@ -1234,6 +1191,7 @@ mach_msg_overwrite_trap( * our reply message needs to determine if it * can hand off directly back to us. */ + thread_lock(self); self->ith_msg = (rcv_msg) ? rcv_msg : msg; self->ith_object = rcv_object; /* still holds reference */ self->ith_msize = rcv_size; @@ -1245,8 +1203,9 @@ mach_msg_overwrite_trap( (void)wait_queue_assert_wait64_locked(waitq, IPC_MQUEUE_RECEIVE, THREAD_ABORTSAFE, - TRUE); /* unlock? */ - /* rcv_mqueue is unlocked */ + self); + thread_unlock(self); + imq_unlock(rcv_mqueue); /* * Switch directly to receiving thread, and block diff --git a/osfmk/ipc/mach_port.c b/osfmk/ipc/mach_port.c index e46e596e1..bed9efd82 100644 --- a/osfmk/ipc/mach_port.c +++ b/osfmk/ipc/mach_port.c @@ -240,7 +240,7 @@ mach_port_names( /* upper bound on number of names in the space */ bound = space->is_table_size + space->is_tree_total; - size_needed = round_page(bound * sizeof(mach_port_name_t)); + size_needed = round_page_32(bound * sizeof(mach_port_name_t)); if (size_needed <= size) break; @@ -332,7 +332,7 @@ mach_port_names( vm_size_t vm_size_used; size_used = actual * sizeof(mach_port_name_t); - vm_size_used = round_page(size_used); + vm_size_used = round_page_32(size_used); /* * Make used memory pageable and get it into @@ -1128,7 +1128,7 @@ mach_port_get_set_status( /* didn't have enough memory; allocate more */ kmem_free(ipc_kernel_map, addr, size); - size = round_page(actual * sizeof(mach_port_name_t)) + PAGE_SIZE; + size = round_page_32(actual * sizeof(mach_port_name_t)) + PAGE_SIZE; } if (actual == 0) { @@ -1140,7 +1140,7 @@ mach_port_get_set_status( vm_size_t vm_size_used; size_used = actual * sizeof(mach_port_name_t); - vm_size_used = round_page(size_used); + vm_size_used = round_page_32(size_used); /* * Make used memory pageable and get it into diff --git a/osfmk/kdp/kdp.c b/osfmk/kdp/kdp.c index 35e5b6105..5cfe516f2 100644 --- a/osfmk/kdp/kdp.c +++ b/osfmk/kdp/kdp.c @@ -85,7 +85,9 @@ typedef struct{ static kdp_breakpoint_record_t breakpoint_list[MAX_BREAKPOINTS]; static unsigned int breakpoints_initialized = 0; + int reattach_wait = 0; +int noresume_on_disconnect = 0; boolean_t kdp_packet( @@ -216,6 +218,11 @@ kdp_disconnect( kdp.is_halted = kdp.is_conn = FALSE; kdp.exception_seq = kdp.conn_seq = 0; + if (noresume_on_disconnect == 1) { + reattach_wait = 1; + noresume_on_disconnect = 0; + } + rp->hdr.is_reply = 1; rp->hdr.len = sizeof (*rp); diff --git a/osfmk/kdp/kdp_core.h b/osfmk/kdp/kdp_core.h new file mode 100644 index 000000000..f56992958 --- /dev/null +++ b/osfmk/kdp/kdp_core.h @@ -0,0 +1,50 @@ +/* Various protocol definitions + * for the core transfer protocol, which is a variant of TFTP + */ + +/* + * Packet types. + */ +#define KDP_RRQ 1 /* read request */ +#define KDP_WRQ 2 /* write request */ +#define KDP_DATA 3 /* data packet */ +#define KDP_ACK 4 /* acknowledgement */ +#define KDP_ERROR 5 /* error code */ +#define KDP_SEEK 6 /* Seek to specified offset */ +#define KDP_EOF 7 /* signal end of file */ +struct corehdr { + short th_opcode; /* packet type */ + union { + unsigned int tu_block; /* block # */ + unsigned int tu_code; /* error code */ + char tu_rpl[1]; /* request packet payload */ + } th_u; + char th_data[1]; /* data or error string */ +}__attribute__((packed)); + +#define th_block th_u.tu_block +#define th_code th_u.tu_code +#define th_stuff th_u.tu_rpl +#define th_msg th_data + +/* + * Error codes. + */ +#define EUNDEF 0 /* not defined */ +#define ENOTFOUND 1 /* file not found */ +#define EACCESS 2 /* access violation */ +#define ENOSPACE 3 /* disk full or allocation exceeded */ +#define EBADOP 4 /* illegal TFTP operation */ +#define EBADID 5 /* unknown transfer ID */ +#define EEXISTS 6 /* file already exists */ +#define ENOUSER 7 /* no such user */ + +#define CORE_REMOTE_PORT 1069 /* hardwired, we can't really query the services file */ + +void kdp_panic_dump (void); + +void abort_panic_transfer (void); + +struct corehdr *create_panic_header(unsigned int request, const char *corename, unsigned length, unsigned block); + +int kdp_send_panic_pkt (unsigned int request, char *corename, unsigned int length, void *panic_data); diff --git a/osfmk/kdp/kdp_internal.h b/osfmk/kdp/kdp_internal.h index b9fbc8ca9..517b2d7b6 100644 --- a/osfmk/kdp/kdp_internal.h +++ b/osfmk/kdp/kdp_internal.h @@ -47,8 +47,11 @@ extern int kdp_flag; #define KDP_READY 0x1 #define KDP_ARP 0x2 #define KDP_BP_DIS 0x4 - - +#define KDP_GETC_ENA 0x8 +#define KDP_PANIC_DUMP_ENABLED 0x10 +#define PANIC_CORE_ON_NMI 0x20 +#define DBG_POST_CORE 0x40 +#define PANIC_LOG_DUMP 0x80 typedef boolean_t (*kdp_dispatch_t) ( kdp_pkt_t *, diff --git a/osfmk/kdp/kdp_udp.c b/osfmk/kdp/kdp_udp.c index c62dc4ed2..a37f80873 100644 --- a/osfmk/kdp/kdp_udp.c +++ b/osfmk/kdp/kdp_udp.c @@ -33,8 +33,8 @@ #include #include -#include #include +#include #include #include @@ -42,6 +42,13 @@ #include #include +#include + +#include +#include + +#include + #define DO_ALIGN 1 /* align all packet data accesses */ extern int kdp_getc(void); @@ -56,7 +63,7 @@ static u_short ip_id; /* ip packet ctr, for ids */ * Per RFC 768, August, 1980. */ #define UDP_TTL 60 /* deflt time to live for UDP packets */ -int udp_ttl=UDP_TTL; +int udp_ttl = UDP_TTL; static unsigned char exception_seq; static struct { @@ -93,25 +100,73 @@ static kdp_send_t kdp_en_send_pkt = 0; static kdp_receive_t kdp_en_recv_pkt = 0; -static unsigned int kdp_current_ip_address = 0; +static u_long kdp_current_ip_address = 0; static struct ether_addr kdp_current_mac_address = {{0, 0, 0, 0, 0, 0}}; -static char kdp_arp_init = 0; -static void kdp_handler( void *); +static void kdp_handler( void *); + +static unsigned int panic_server_ip = 0; +static unsigned int parsed_router_ip = 0; +static unsigned int router_ip = 0; +static unsigned int panicd_specified = 0; +static unsigned int router_specified = 0; + +static struct ether_addr router_mac = {{0, 0, 0 , 0, 0, 0}}; + +static u_char flag_panic_dump_in_progress = 0; +static u_char flag_router_mac_initialized = 0; + +static unsigned int panic_timeout = 100000; +static unsigned int last_panic_port = CORE_REMOTE_PORT; + +unsigned int SEGSIZE = 512; + +static unsigned int PANIC_PKTSIZE = 518; +static char panicd_ip_str[20]; +static char router_ip_str[20]; + +static unsigned int panic_block = 0; +static volatile unsigned int kdp_trigger_core_dump = 0; + +extern unsigned int not_in_kdp; void kdp_register_send_receive( - kdp_send_t send, + kdp_send_t send, kdp_receive_t receive) { unsigned int debug; kdp_en_send_pkt = send; kdp_en_recv_pkt = receive; + debug_log_init(); + PE_parse_boot_arg("debug", &debug); + if (debug & DB_KDP_BP_DIS) kdp_flag |= KDP_BP_DIS; + if (debug & DB_KDP_GETC_ENA) + kdp_flag |= KDP_GETC_ENA; + if (debug & DB_ARP) + kdp_flag |= KDP_ARP; + + if (debug & DB_KERN_DUMP_ON_PANIC) + kdp_flag |= KDP_PANIC_DUMP_ENABLED; + if (debug & DB_KERN_DUMP_ON_NMI) + kdp_flag |= PANIC_CORE_ON_NMI; + + if (debug & DB_DBG_POST_CORE) + kdp_flag |= DBG_POST_CORE; + + if (debug & DB_PANICLOG_DUMP) + kdp_flag |= PANIC_LOG_DUMP; + + if (PE_parse_boot_arg ("_panicd_ip", panicd_ip_str)) + panicd_specified = 1; + /* For the future, currently non-functional */ + if (PE_parse_boot_arg ("_router_ip", router_ip_str)) + router_specified = 1; kdp_flag |= KDP_READY; if (current_debugger == NO_CUR_DB) @@ -199,7 +254,6 @@ kdp_reply( ui->ui_sum = 0; #if DO_ALIGN bcopy((char *)ui, (char *)&pkt.data[pkt.off], sizeof(*ui)); - bcopy((char *)&pkt.data[pkt.off], (char *)ip, sizeof(*ip)); #else ip = (struct ip *)&pkt.data[pkt.off]; @@ -305,16 +359,14 @@ kdp_set_ip_and_mac_addresses( kdp_current_ip_address = ipaddr->s_addr; kdp_current_mac_address = *macaddr; - - /* Get the debug boot-arg to decide if ARP replies are allowed */ - if (kdp_arp_init == 0) { - PE_parse_boot_arg("debug", &debug); - if (debug & DB_ARP) - kdp_flag |= KDP_ARP; - kdp_arp_init = 1; - } } +void +kdp_set_gateway_mac(void *gatewaymac) +{ + router_mac = *(struct ether_addr *)gatewaymac; +} + struct ether_addr kdp_get_mac_addr(void) { @@ -332,23 +384,22 @@ kdp_get_ip_address(void) kdpDEBUGFlag &= DB_ARP when connected (but that certainly isn't a published interface!) */ - static void kdp_arp_reply(void) { struct ether_header *eh; - struct ether_arp aligned_ea, *ea = &aligned_ea; + struct ether_arp aligned_ea, *ea = &aligned_ea; struct in_addr isaddr, itaddr, myaddr; - struct ether_addr my_enaddr; + struct ether_addr my_enaddr; eh = (struct ether_header *)&pkt.data[pkt.off]; pkt.off += sizeof(struct ether_header); - memcpy((void *)ea, (void *)&pkt.data[pkt.off],sizeof(*ea)); + memcpy((void *)ea, (void *)&pkt.data[pkt.off],sizeof(*ea)); - if(ntohs(ea->arp_op) != ARPOP_REQUEST) - return; + if(ntohs(ea->arp_op) != ARPOP_REQUEST) + return; myaddr.s_addr = kdp_get_ip_address(); my_enaddr = kdp_get_mac_addr(); @@ -382,9 +433,9 @@ static void kdp_poll(void) { struct ether_header *eh; - struct udpiphdr aligned_ui, *ui = &aligned_ui; - struct ip aligned_ip, *ip = &aligned_ip; - static int msg_printed; + struct udpiphdr aligned_ui, *ui = &aligned_ui; + struct ip aligned_ip, *ip = &aligned_ip; + static int msg_printed; if (pkt.input) @@ -444,10 +495,25 @@ kdp_poll(void) } if (ntohs(ui->ui_dport) != KDP_REMOTE_PORT) { + if (CORE_REMOTE_PORT == (ntohs(ui->ui_dport)) && + flag_panic_dump_in_progress) { + last_panic_port = ui->ui_sport; + } + else return; } - - if (!kdp.is_conn) { + /* If we receive a kernel debugging packet whilst a + * core dump is in progress, abort the transfer and + * enter the debugger. + */ + else + if (flag_panic_dump_in_progress) + { + abort_panic_transfer(); + return; + } + + if (!kdp.is_conn && !flag_panic_dump_in_progress) { enaddr_copy(eh->ether_dhost, &adr.loc.ea); adr.loc.in = ui->ui_dst; @@ -460,7 +526,6 @@ kdp_poll(void) */ pkt.len = ntohs((u_short)ui->ui_ulen) - sizeof (struct udphdr); pkt.input = TRUE; - } static void @@ -518,10 +583,10 @@ again: static void kdp_connection_wait(void) { - unsigned short reply_port; - boolean_t kdp_call_kdb(); - struct ether_addr kdp_mac_addr = kdp_get_mac_addr(); - unsigned int ip_addr = kdp_get_ip_address(); + unsigned short reply_port; + boolean_t kdp_call_kdb(); + struct ether_addr kdp_mac_addr = kdp_get_mac_addr(); + unsigned int ip_addr = ntohl(kdp_get_ip_address()); printf( "ethernet MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", kdp_mac_addr.ether_addr_octet[0] & 0xff, @@ -537,85 +602,80 @@ kdp_connection_wait(void) (ip_addr & 0xff00) >> 8, (ip_addr & 0xff)); - printf("\nWaiting for remote debugger connection.\n"); - - if (reattach_wait == 0) - { -#ifdef MACH_PE - if( 0 != kdp_getc()) -#endif - { - printf("Options..... Type\n"); - printf("------------ ----\n"); - printf("continue.... 'c'\n"); - printf("reboot...... 'r'\n"); + printf("\nWaiting for remote debugger connection.\n"); + + if (reattach_wait == 0) { + if((kdp_flag & KDP_GETC_ENA) && (0 != kdp_getc())) + { + printf("Options..... Type\n"); + printf("------------ ----\n"); + printf("continue.... 'c'\n"); + printf("reboot...... 'r'\n"); #if MACH_KDB - printf("enter kdb... 'k'\n"); + printf("enter kdb... 'k'\n"); #endif - } - } - else - reattach_wait = 0; + } + } else + reattach_wait = 0; - exception_seq = 0; - do { - kdp_hdr_t aligned_hdr, *hdr = &aligned_hdr; + exception_seq = 0; + + do { + kdp_hdr_t aligned_hdr, *hdr = &aligned_hdr; - while (!pkt.input) { - int c; - c = kdp_getc(); - switch(c) { - case 'c': - printf("Continuing...\n"); - return; - case 'r': - printf("Rebooting...\n"); - kdp_reboot(); - break; + while (!pkt.input) { + if (kdp_flag & KDP_GETC_ENA) { + switch(kdp_getc()) { + case 'c': + printf("Continuing...\n"); + return; + case 'r': + printf("Rebooting...\n"); + kdp_reboot(); + break; #if MACH_KDB - case 'k': - printf("calling kdb...\n"); - if (kdp_call_kdb()) - return; - else - printf("not implemented...\n"); + case 'k': + printf("calling kdb...\n"); + if (kdp_call_kdb()) + return; + else + printf("not implemented...\n"); #endif - default: - break; - } - kdp_poll(); - } + default: + break; + } + } + kdp_poll(); + } - // check for sequence number of 0 #if DO_ALIGN - bcopy((char *)&pkt.data[pkt.off], (char *)hdr, sizeof(*hdr)); + bcopy((char *)&pkt.data[pkt.off], (char *)hdr, sizeof(*hdr)); #else - hdr = (kdp_hdr_t *)&pkt.data[pkt.off]; + hdr = (kdp_hdr_t *)&pkt.data[pkt.off]; #endif - if (hdr->request == KDP_HOSTREBOOT) { - kdp_reboot(); - /* should not return! */ - } - if (((hdr->request == KDP_CONNECT) || (hdr->request == KDP_REATTACH)) && - !hdr->is_reply && (hdr->seq == exception_seq)) { - if (kdp_packet((unsigned char *)&pkt.data[pkt.off], - (int *)&pkt.len, - (unsigned short *)&reply_port)) - kdp_reply(reply_port); - if (hdr->request == KDP_REATTACH) - { - reattach_wait = 0; - hdr->request=KDP_DISCONNECT; - exception_seq = 0; - } - } - - pkt.input = FALSE; - } while (!kdp.is_conn); + if (hdr->request == KDP_HOSTREBOOT) { + kdp_reboot(); + /* should not return! */ + } + if (((hdr->request == KDP_CONNECT) || (hdr->request == KDP_REATTACH)) && + !hdr->is_reply && (hdr->seq == exception_seq)) { + if (kdp_packet((unsigned char *)&pkt.data[pkt.off], + (int *)&pkt.len, + (unsigned short *)&reply_port)) + kdp_reply(reply_port); + if (hdr->request == KDP_REATTACH) { + reattach_wait = 0; + hdr->request=KDP_DISCONNECT; + exception_seq = 0; + } + } + + pkt.input = FALSE; + } while (!kdp.is_conn); - if (current_debugger == KDP_CUR_DB) - active_debugger=1; - printf("Connected to remote debugger.\n"); + if (current_debugger == KDP_CUR_DB) + active_debugger=1; + printf("Connected to remote debugger.\n"); } static void @@ -679,6 +739,9 @@ kdp_raise_exception( { int index; + extern unsigned int disableDebugOuput; + extern unsigned int disableConsoleOutput; + disable_preemption(); if (saved_state == 0) @@ -700,17 +763,33 @@ kdp_raise_exception( * do this. I think the client and the host can get out of sync. */ kdp.saved_state = saved_state; - + if (pkt.input) kdp_panic("kdp_raise_exception"); + + if (((kdp_flag & KDP_PANIC_DUMP_ENABLED) || (kdp_flag & PANIC_LOG_DUMP)) + && (panicstr != (char *) 0)) { + + kdp_panic_dump(); + + } + else + if ((kdp_flag & PANIC_CORE_ON_NMI) && (panicstr == (char *) 0) && + !kdp.is_conn) { + + disableDebugOuput = disableConsoleOutput = FALSE; + kdp_panic_dump(); + + if (!(kdp_flag & DBG_POST_CORE)) + goto exit_raise_exception; + } + again: if (!kdp.is_conn) kdp_connection_wait(); - else - { + else { kdp_send_exception(exception, code, subcode); - if (kdp.exception_ack_needed) - { + if (kdp.exception_ack_needed) { kdp.exception_ack_needed = FALSE; kdp_remove_all_breakpoints(); printf("Remote debugger disconnected.\n"); @@ -726,12 +805,21 @@ kdp_raise_exception( printf("Remote debugger disconnected.\n"); } } + /* Allow triggering a panic core dump when connected to the machine + * Continuing after setting kdp_trigger_core_dump should do the + * trick. + */ + if (1 == kdp_trigger_core_dump) { + kdp_flag &= ~PANIC_LOG_DUMP; + kdp_flag |= KDP_PANIC_DUMP_ENABLED; + kdp_panic_dump(); + } kdp_sync_cache(); if (reattach_wait == 1) goto again; - + exit_raise_exception: enable_preemption(); } @@ -743,3 +831,392 @@ kdp_reset(void) kdp.exception_seq = kdp.conn_seq = 0; } +struct corehdr * +create_panic_header(unsigned int request, const char *corename, + unsigned length, unsigned int block) +{ + struct udpiphdr aligned_ui, *ui = &aligned_ui; + struct ip aligned_ip, *ip = &aligned_ip; + struct ether_header *eh; + struct corehdr *coreh; + const char *mode = "octet"; + char modelen = strlen(mode); + + pkt.off = sizeof (struct ether_header); + pkt.len = length + ((request == KDP_WRQ) ? modelen : 0) + + (corename ? strlen(corename): 0) + sizeof(struct corehdr); + +#if DO_ALIGN + bcopy((char *)&pkt.data[pkt.off], (char *)ui, sizeof(*ui)); +#else + ui = (struct udpiphdr *)&pkt.data[pkt.off]; +#endif + ui->ui_next = ui->ui_prev = 0; + ui->ui_x1 = 0; + ui->ui_pr = IPPROTO_UDP; + ui->ui_len = htons((u_short)pkt.len + sizeof (struct udphdr)); + ui->ui_src.s_addr = htonl(kdp_current_ip_address); + ui->ui_dst.s_addr = panic_server_ip; + ui->ui_sport = htons(CORE_REMOTE_PORT); + ui->ui_dport = ((request == KDP_WRQ) ? htons(CORE_REMOTE_PORT) : last_panic_port); + ui->ui_ulen = ui->ui_len; + ui->ui_sum = 0; +#if DO_ALIGN + bcopy((char *)ui, (char *)&pkt.data[pkt.off], sizeof(*ui)); + bcopy((char *)&pkt.data[pkt.off], (char *)ip, sizeof(*ip)); +#else + ip = (struct ip *)&pkt.data[pkt.off]; +#endif + ip->ip_len = htons(sizeof (struct udpiphdr) + pkt.len); + ip->ip_v = IPVERSION; + ip->ip_id = htons(ip_id++); + ip->ip_hl = sizeof (struct ip) >> 2; + ip->ip_ttl = udp_ttl; + ip->ip_sum = 0; + ip->ip_sum = htons(~ip_sum((unsigned char *)ip, ip->ip_hl)); +#if DO_ALIGN + bcopy((char *)ip, (char *)&pkt.data[pkt.off], sizeof(*ip)); +#endif + + pkt.len += sizeof (struct udpiphdr); + + pkt.off += sizeof (struct udpiphdr); + + coreh = (struct corehdr *) &pkt.data[pkt.off]; + coreh->th_opcode = htons((u_short)request); + + if (request == KDP_WRQ) + { + register char *cp; + + cp = coreh->th_u.tu_rpl; + strcpy (cp, corename); + cp += strlen(corename); + *cp++ = '\0'; + strcpy (cp, mode); + cp+= modelen; + *cp++ = '\0'; + } + else + { + coreh->th_block = htonl((unsigned int) block); + } + + pkt.off -= sizeof (struct udpiphdr); + pkt.off -= sizeof (struct ether_header); + + eh = (struct ether_header *)&pkt.data[pkt.off]; + enaddr_copy(&kdp_current_mac_address, eh->ether_shost); + enaddr_copy(&router_mac, eh->ether_dhost); + eh->ether_type = htons(ETHERTYPE_IP); + + pkt.len += sizeof (struct ether_header); + return coreh; +} + +int kdp_send_panic_packets (unsigned int request, char *corename, + unsigned int length, unsigned int txstart) +{ + unsigned int txend = txstart + length; + int panic_error = 0; + + if (length <= SEGSIZE) { + if ((panic_error = kdp_send_panic_pkt (request, corename, length, (caddr_t) txstart)) < 0) { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + return panic_error ; + } + } + else + { + while (txstart <= (txend - SEGSIZE)) { + if ((panic_error = kdp_send_panic_pkt (KDP_DATA, NULL, SEGSIZE, (caddr_t) txstart)) < 0) { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + return panic_error; + } + txstart += SEGSIZE; + if (!(panic_block % 2000)) + printf("."); + } + if (txstart < txend) { + kdp_send_panic_pkt(request, corename, (txend - txstart), (caddr_t) txstart); + } + } +} + +int +kdp_send_panic_pkt (unsigned int request, char *corename, + unsigned int length, void *panic_data) +{ + struct corehdr *th = NULL; + int poll_count = 2500; + + char rretries = 0, tretries = 0; + /* + extern signed long gIODebuggerSemaphore; + */ + pkt.off = pkt.len = 0; + + if (request == KDP_WRQ) /* longer timeout for initial request */ + poll_count += 1000; + +TRANSMIT_RETRY: + tretries++; + + if (tretries > 2) + printf("TX retry #%d ", tretries ); + + if (tretries >=15) { + /* This iokit layer issue can potentially + *cause a hang, uncomment to check if it's happening. + */ + /* + if (gIODebuggerSemaphore) + printf("The gIODebuggerSemaphore is raised, preventing packet transmission (2760413)\n"); + */ + + printf ("Cannot contact panic server, timing out.\n"); + return (-3); + } + + th = create_panic_header(request, corename, length, panic_block); + + if (request == KDP_DATA || request == KDP_SEEK) { + if (!kdp_vm_read ((caddr_t) panic_data, (caddr_t) th->th_data, length)) { + memset ((caddr_t) th->th_data, 'X', length); + } + } + + (*kdp_en_send_pkt)(&pkt.data[pkt.off], pkt.len); + + /* Now we have to listen for the ACK */ + RECEIVE_RETRY: + + while (!pkt.input && flag_panic_dump_in_progress && poll_count) { + kdp_poll(); + poll_count--; + } + + if (pkt.input) { + + pkt.input = FALSE; + + th = (struct corehdr *) &pkt.data[pkt.off]; + /* These will eventually have to be ntoh[ls]'ed as appropriate */ + + if (th->th_opcode == KDP_ACK && th->th_block == panic_block) { + } + else + if (th->th_opcode == KDP_ERROR) { + printf("Panic server returned error %d, retrying\n", th->th_code); + poll_count = 1000; + goto TRANSMIT_RETRY; + } + else + if (th->th_block == (panic_block -1)) { + printf("RX retry "); + if (++rretries > 1) + goto TRANSMIT_RETRY; + else + goto RECEIVE_RETRY; + } + } + else + if (!flag_panic_dump_in_progress) /* we received a debugging packet, bail*/ + { + printf("Received a debugger packet,transferring control to debugger\n"); + /* Configure that if not set ..*/ + kdp_flag |= DBG_POST_CORE; + return (-2); + } + else /* We timed out */ + if (0 == poll_count) { + poll_count = 1000; + kdp_us_spin ((tretries%4) * panic_timeout); /* capped linear backoff */ + goto TRANSMIT_RETRY; + } + + panic_block++; + + if (request == KDP_EOF) + printf ("\nTotal number of packets transmitted: %d\n", panic_block); + + return 1; +} + +/* Since we don't seem to have an isdigit() .. */ +static int +isdigit (char c) +{ + return ((c > 47) && (c < 58)); +} +/* From user mode Libc - this ought to be in a library */ +static char * +strnstr(s, find, slen) + const char *s; + const char *find; + size_t slen; +{ + char c, sc; + size_t len; + + if ((c = *find++) != '\0') { + len = strlen(find); + do { + do { + if ((sc = *s++) == '\0' || slen-- < 1) + return (NULL); + } while (sc != c); + if (len > slen) + return (NULL); + } while (strncmp(s, find, len) != 0); + s--; + } + return ((char *)s); +} + +/* Horrid hack to extract xnu version if possible - a much cleaner approach + * would be to have the integrator run a script which would copy the + * xnu version into a string or an int somewhere at project submission + * time - makes assumptions about sizeof(version), but will not fail if + * it changes, but may be incorrect. + */ + +static int +kdp_get_xnu_version(char *versionbuf) +{ + extern const char version[]; + char *versionpos; + char vstr[10]; + int retval = -1; + + strcpy(vstr, "custom"); + if (version) { + if (kdp_vm_read(version, versionbuf, 90)) { + + versionbuf[89] = '\0'; + + versionpos = strnstr(versionbuf, "xnu-", 80); + + if (versionpos) { + strncpy (vstr, versionpos, (isdigit (versionpos[7]) ? 8 : 7)); + vstr[(isdigit (versionpos[7]) ? 8 : 7)] = '\0'; + retval = 0; + } + } + } + strcpy(versionbuf, vstr); + return retval; +} +/* Primary dispatch routine for the system dump */ +void +kdp_panic_dump() +{ + char corename[50]; + char coreprefix[10]; + int panic_error; + extern char *debug_buf; + extern vm_map_t kernel_map; + + extern char *inet_aton(const char *cp, struct in_addr *pin); + + extern char *debug_buf; + extern char *debug_buf_ptr; + uint64_t abstime; + + printf ("Entering system dump routine\n"); + + if (!panicd_specified) { + printf ("A panic server was not specified in the boot-args, terminating kernel core dump.\n"); + goto panic_dump_exit; + } + + flag_panic_dump_in_progress = 1; + not_in_kdp = 0; + + if (pkt.input) + kdp_panic("kdp_panic_dump"); + + kdp_get_xnu_version((char *) &pkt.data[0]); + + /* Panic log bit takes precedence over core dump bit */ + if ((panicstr != (char *) 0) && (kdp_flag & PANIC_LOG_DUMP)) + strncpy(coreprefix, "paniclog", sizeof(coreprefix)); + else + strncpy(coreprefix, "core", sizeof(coreprefix)); + + abstime = mach_absolute_time(); + pkt.data[10] = '\0'; + snprintf (corename, sizeof(corename), "%s-%s-%d.%d.%d.%d-%x", + coreprefix, &pkt.data[0], + (kdp_current_ip_address & 0xff000000) >> 24, + (kdp_current_ip_address & 0xff0000) >> 16, + (kdp_current_ip_address & 0xff00) >> 8, + (kdp_current_ip_address & 0xff), + (unsigned int) (abstime & 0xffffffff)); + + if (0 == inet_aton(panicd_ip_str, (struct in_addr *) &panic_server_ip)) { + printf("inet_aton() failed interpreting %s as a panic server IP\n", + panicd_ip_str); + } + else + printf("Attempting connection to panic server configured at IP %s\n", + panicd_ip_str); + + if (router_specified) { + if (0 == inet_aton(router_ip_str, (struct in_addr *) &parsed_router_ip)){ + printf("inet_aton() failed interpreting %s as an IP\n", router_ip); + } + else { + router_ip = parsed_router_ip; + printf("Routing through specified router IP %s (%d)\n", router_ip_str, router_ip); + /* We will eventually need to resolve the router's MAC ourselves, + * if one is specified,rather than being set through the BSD callback + * but the _router_ip option does not function currently + */ + } + } + /* These & 0xffs aren't necessary,but cut&paste is ever so convenient */ + printf("Routing via router MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", + router_mac.ether_addr_octet[0] & 0xff, + router_mac.ether_addr_octet[1] & 0xff, + router_mac.ether_addr_octet[2] & 0xff, + router_mac.ether_addr_octet[3] & 0xff, + router_mac.ether_addr_octet[4] & 0xff, + router_mac.ether_addr_octet[5] & 0xff); + + printf("Kernel map size is %d\n", get_vmmap_size(kernel_map)); + printf ("Sending write request for %s\n", corename); + + if ((panic_error = kdp_send_panic_pkt (KDP_WRQ, corename, 0 , NULL) < 0)) { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + goto panic_dump_exit; + } + + /* Just the panic log requested */ + if ((panicstr != (char *) 0) && (kdp_flag & PANIC_LOG_DUMP)) { + printf("Transmitting panic log, please wait: "); + kdp_send_panic_packets (KDP_DATA, corename, (debug_buf_ptr - debug_buf), (unsigned int) debug_buf); + kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0)); + printf("Please file a bug report on this panic, if possible.\n"); + goto panic_dump_exit; + } + + /* We want a core dump if we're here */ + kern_dump(); +panic_dump_exit: + not_in_kdp = 1; + flag_panic_dump_in_progress = 0; + panic_block = 0; + pkt.input = FALSE; + pkt.len = 0; + kdp_reset(); + return; +} + +void +abort_panic_transfer() +{ + flag_panic_dump_in_progress = 0; + not_in_kdp = 1; + panic_block = 0; +} diff --git a/osfmk/kdp/kdp_udp.h b/osfmk/kdp/kdp_udp.h index 0dda441bd..7b8c42916 100644 --- a/osfmk/kdp/kdp_udp.h +++ b/osfmk/kdp/kdp_udp.h @@ -85,13 +85,13 @@ struct ip { u_long ip_w; struct { unsigned int -#if _BIG_ENDIAN == __LITTLE_ENDIAN__ +#ifdef __LITTLE_ENDIAN__ ip_xhl:4, /* header length */ ip_xv:4, /* version */ ip_xtos:8, /* type of service */ ip_xlen:16; /* total length */ #endif -#if _BIG_ENDIAN == __BIG_ENDIAN__ +#ifdef __BIG_ENDIAN__ ip_xv:4, /* version */ ip_xhl:4, /* header length */ ip_xtos:8, /* type of service */ @@ -128,8 +128,9 @@ typedef struct ether_header ether_header_t; #define ETHERTYPE_IP 0x0800 /* IP protocol */ #define ntohs(x) OSSwapBigToHostInt16(x) +#define ntohl(x) OSSwapBigToHostInt32(x) #define htons(x) OSSwapHostToBigInt16(x) - +#define htonl(x) OSSwapHostToBigInt32(x) /* * Ethernet Address Resolution Protocol. * diff --git a/osfmk/kdp/ml/i386/kdp_machdep.c b/osfmk/kdp/ml/i386/kdp_machdep.c index 408732427..6211fdae9 100644 --- a/osfmk/kdp/ml/i386/kdp_machdep.c +++ b/osfmk/kdp/ml/i386/kdp_machdep.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #define KDP_TEST_HARNESS 0 @@ -340,7 +341,7 @@ kdp_i386_backtrace(void *_frame, int nframes) } return; invalid: - printf("invalid frame pointer %x\n",frame->prev); + printf("invalid frame pointer %x\n",frame); } void @@ -353,6 +354,8 @@ kdp_i386_trap( { unsigned int exception, subcode = 0, code; + mp_kdp_enter(); + if (trapno != T_INT3 && trapno != T_DEBUG) printf("unexpected kernel trap %x eip %x\n", trapno, saved_state->eip); @@ -419,9 +422,11 @@ kdp_i386_trap( break; } - kdp_i386_backtrace((void *) saved_state->ebp, 10); +// kdp_i386_backtrace((void *) saved_state->ebp, 10); kdp_raise_exception(exception, code, subcode, saved_state); + + mp_kdp_exit(); } boolean_t diff --git a/osfmk/kdp/ml/i386/kdp_vm.c b/osfmk/kdp/ml/i386/kdp_vm.c index 53783b91f..a837bc512 100644 --- a/osfmk/kdp/ml/i386/kdp_vm.c +++ b/osfmk/kdp/ml/i386/kdp_vm.c @@ -31,6 +31,8 @@ unsigned kdp_vm_read( caddr_t, caddr_t, unsigned); unsigned kdp_vm_write( caddr_t, caddr_t, unsigned); unsigned kdp_copy_kmem( caddr_t, caddr_t, unsigned); +unsigned int not_in_kdp = 1; /* Cleared when we begin to access vm functions in kdp */ + /* * */ @@ -52,4 +54,8 @@ unsigned kdp_vm_write( { return kdp_copy_kmem(src, dst, len); } - +/* A stub until i386 support is added for remote kernel core dumps */ +int kern_dump() +{ + return 0; +} diff --git a/osfmk/kdp/ml/ppc/kdp_asm.s b/osfmk/kdp/ml/ppc/kdp_asm.s index ae3409cc4..3cb2a0a74 100644 --- a/osfmk/kdp/ml/ppc/kdp_asm.s +++ b/osfmk/kdp/ml/ppc/kdp_asm.s @@ -41,11 +41,11 @@ ENTRY(kdp_call_with_ctx, TAG_NO_FRAME_USED) - mfmsr r7 /* Get the MSR */ + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable + mfmsr r7 ; Get the MSR + ori r2,r2,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Get FP and EE mflr r0 - rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interruptions enable bit */ - rlwinm r7,r7,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r7,r7,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + andc r7,r7,r2 ; Clear FP, VEC, and EE mtmsr r7 isync ; Need this because we may have ditched fp/vec mfsprg r8,0 /* Get the per_proc block address */ @@ -72,13 +72,13 @@ ENTRY(kdp_call_with_ctx, TAG_NO_FRAME_USED) bl EXT(kdp_trap) + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable mfmsr r0 /* Get the MSR */ + ori r2,r2,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Get FP and EE addi r1, r1, FM_SIZE - rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interruptions enable bit */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + andc r0,r0,r2 ; Clear FP, VEC, and EE mtmsr r0 - isync ; Need this because we may have ditched fp/vec + isync ; Need this because we may have ditched fp/vec mfsprg r8,0 /* Get the per_proc block address */ diff --git a/osfmk/kdp/ml/ppc/kdp_machdep.c b/osfmk/kdp/ml/ppc/kdp_machdep.c index 456c5a764..d1dce210a 100644 --- a/osfmk/kdp/ml/ppc/kdp_machdep.c +++ b/osfmk/kdp/ml/ppc/kdp_machdep.c @@ -120,6 +120,57 @@ kdp_getintegerstate( bzero((char *)state,sizeof (struct ppc_thread_state)) ; + state->srr0 = (unsigned int)saved_state->save_srr0; + state->srr1 = (unsigned int)saved_state->save_srr1; + state->r0 = (unsigned int)saved_state->save_r0; + state->r1 = (unsigned int)saved_state->save_r1; + state->r2 = (unsigned int)saved_state->save_r2; + state->r3 = (unsigned int)saved_state->save_r3; + state->r4 = (unsigned int)saved_state->save_r4; + state->r5 = (unsigned int)saved_state->save_r5; + state->r6 = (unsigned int)saved_state->save_r6; + state->r7 = (unsigned int)saved_state->save_r7; + state->r8 = (unsigned int)saved_state->save_r8; + state->r9 = (unsigned int)saved_state->save_r9; + state->r10 = (unsigned int)saved_state->save_r10; + state->r11 = (unsigned int)saved_state->save_r11; + state->r12 = (unsigned int)saved_state->save_r12; + state->r13 = (unsigned int)saved_state->save_r13; + state->r14 = (unsigned int)saved_state->save_r14; + state->r15 = (unsigned int)saved_state->save_r15; + state->r16 = (unsigned int)saved_state->save_r16; + state->r17 = (unsigned int)saved_state->save_r17; + state->r18 = (unsigned int)saved_state->save_r18; + state->r19 = (unsigned int)saved_state->save_r19; + state->r20 = (unsigned int)saved_state->save_r20; + state->r21 = (unsigned int)saved_state->save_r21; + state->r22 = (unsigned int)saved_state->save_r22; + state->r23 = (unsigned int)saved_state->save_r23; + state->r24 = (unsigned int)saved_state->save_r24; + state->r25 = (unsigned int)saved_state->save_r25; + state->r26 = (unsigned int)saved_state->save_r26; + state->r27 = (unsigned int)saved_state->save_r27; + state->r28 = (unsigned int)saved_state->save_r28; + state->r29 = (unsigned int)saved_state->save_r29; + state->r30 = (unsigned int)saved_state->save_r30; + state->r31 = (unsigned int)saved_state->save_r31; + state->cr = (unsigned int)saved_state->save_cr; + state->xer = (unsigned int)saved_state->save_xer; + state->lr = (unsigned int)saved_state->save_lr; + state->ctr = (unsigned int)saved_state->save_ctr; +} + +static void +kdp_getintegerstate64( + struct ppc_thread_state64 *state +) +{ + struct savearea *saved_state; + + saved_state = kdp.saved_state; + + bzero((char *)state,sizeof (struct ppc_thread_state64)) ; + state->srr0 = saved_state->save_srr0; state->srr1 = saved_state->save_srr1; state->r0 = saved_state->save_r0; @@ -175,13 +226,19 @@ kdp_machine_read_regs( kdp_getintegerstate((struct ppc_thread_state *)data); *size = PPC_THREAD_STATE_COUNT * sizeof(int); return KDPERR_NO_ERROR; - + + case PPC_THREAD_STATE64: + dprintf(("kdp_readregs THREAD_STATE\n")); + kdp_getintegerstate64((struct ppc_thread_state64 *)data); + *size = PPC_THREAD_STATE64_COUNT * sizeof(int); + return KDPERR_NO_ERROR; + case PPC_FLOAT_STATE: dprintf(("kdp_readregs THREAD_FPSTATE\n")); bzero((char *)data ,sizeof(struct ppc_float_state)); *size = PPC_FLOAT_STATE_COUNT * sizeof(int); return KDPERR_NO_ERROR; - + default: dprintf(("kdp_readregs bad flavor %d\n")); return KDPERR_BADFLAVOR; @@ -237,6 +294,55 @@ kdp_setintegerstate( saved_state->save_ctr = state->ctr; } +static void +kdp_setintegerstate64( + struct ppc_thread_state64 *state +) +{ + struct savearea *saved_state; + + saved_state = kdp.saved_state; + + saved_state->save_srr0 = state->srr0; + saved_state->save_srr1 = state->srr1; + saved_state->save_r0 = state->r0; + saved_state->save_r1 = state->r1; + saved_state->save_r2 = state->r2; + saved_state->save_r3 = state->r3; + saved_state->save_r4 = state->r4; + saved_state->save_r5 = state->r5; + saved_state->save_r6 = state->r6; + saved_state->save_r7 = state->r7; + saved_state->save_r8 = state->r8; + saved_state->save_r9 = state->r9; + saved_state->save_r10 = state->r10; + saved_state->save_r11 = state->r11; + saved_state->save_r12 = state->r12; + saved_state->save_r13 = state->r13; + saved_state->save_r14 = state->r14; + saved_state->save_r15 = state->r15; + saved_state->save_r16 = state->r16; + saved_state->save_r17 = state->r17; + saved_state->save_r18 = state->r18; + saved_state->save_r19 = state->r19; + saved_state->save_r20 = state->r20; + saved_state->save_r21 = state->r21; + saved_state->save_r22 = state->r22; + saved_state->save_r23 = state->r23; + saved_state->save_r24 = state->r24; + saved_state->save_r25 = state->r25; + saved_state->save_r26 = state->r26; + saved_state->save_r27 = state->r27; + saved_state->save_r28 = state->r28; + saved_state->save_r29 = state->r29; + saved_state->save_r30 = state->r30; + saved_state->save_r31 = state->r31; + saved_state->save_cr = state->cr; + saved_state->save_xer = state->xer; + saved_state->save_lr = state->lr; + saved_state->save_ctr = state->ctr; +} + kdp_error_t kdp_machine_write_regs( unsigned int cpu, @@ -255,11 +361,19 @@ kdp_machine_write_regs( DumpTheSave((struct savearea *)data); /* (TEST/DEBUG) */ #endif return KDPERR_NO_ERROR; - + + case PPC_THREAD_STATE64: + dprintf(("kdp_writeregs THREAD_STATE64\n")); + kdp_setintegerstate64((struct ppc_thread_state64 *)data); + +#if KDP_TEST_HARNESS + DumpTheSave((struct savearea *)data); /* (TEST/DEBUG) */ +#endif + return KDPERR_NO_ERROR; case PPC_FLOAT_STATE: dprintf(("kdp_writeregs THREAD_FPSTATE\n")); return KDPERR_NO_ERROR; - + default: dprintf(("kdp_writeregs bad flavor %d\n")); return KDPERR_BADFLAVOR; @@ -421,7 +535,7 @@ kdp_trap( ) { unsigned int *fp; - unsigned int register sp; + unsigned int sp; struct savearea *state; if (kdp_noisy) { @@ -449,7 +563,7 @@ kdp_trap( if (kdp_noisy) printf("kdp_trap: kdp_raise_exception() ret\n"); - if (*((int *)saved_state->save_srr0) == 0x7c800008) + if ((unsigned int)(saved_state->save_srr0) == 0x7c800008) saved_state->save_srr0 += 4; /* BKPT_SIZE */ if(saved_state->save_srr1 & (MASK(MSR_SE) | MASK(MSR_BE))) { /* Are we just stepping or continuing */ diff --git a/osfmk/kdp/ml/ppc/kdp_misc.s b/osfmk/kdp/ml/ppc/kdp_misc.s index be69e2ab0..9eb3f6c6f 100644 --- a/osfmk/kdp/ml/ppc/kdp_misc.s +++ b/osfmk/kdp/ml/ppc/kdp_misc.s @@ -28,59 +28,41 @@ #include #include -.set kLog2CacheLineSize, 5 -.set kCacheLineSize, 32 - -ENTRY(kdp_flush_cache, TAG_NO_FRAME_USED) - cmpi cr0,0,r4,0 /* is this zero length? */ - add r4,r3,r4 /* calculate last byte + 1 */ - subi r4,r4,1 /* calculate last byte */ - - srwi r5,r3,kLog2CacheLineSize /* calc first cache line index */ - srwi r4,r4,kLog2CacheLineSize /* calc last cache line index */ - beq cr0, LdataToCodeDone /* done if zero length */ - - subf r4,r5,r4 /* calc diff (# lines minus 1) */ - addi r4,r4,1 /* # of cache lines to flush */ - slwi r5,r5,kLog2CacheLineSize /* calc addr of first cache line */ - - /* flush the data cache lines */ - mr r3,r5 /* starting address for loop */ - mtctr r4 /* loop count */ -LdataToCodeFlushLoop: - dcbf 0, r3 /* flush the data cache line */ - addi r3,r3,kCacheLineSize /* advance to next cache line */ - bdnz LdataToCodeFlushLoop /* loop until count is zero */ - sync /* wait until RAM is valid */ - - /* invalidate the code cache lines */ - mr r3,r5 /* starting address for loop */ - mtctr r4 /* loop count */ -LdataToCodeInvalidateLoop: - icbi 0, r3 /* invalidate code cache line */ - addi r3,r3,kCacheLineSize /* advance to next cache line */ - bdnz LdataToCodeInvalidateLoop /* loop until count is zero */ - sync /* wait until last icbi completes */ - isync /* discard prefetched instructions */ -LdataToCodeDone: - blr /* return nothing */ - ENTRY(kdp_sync_cache, TAG_NO_FRAME_USED) sync /* data sync */ isync /* inst sync */ blr /* return nothing */ -ENTRY(kdp_xlate_off, TAG_NO_FRAME_USED) - mfmsr r3 - rlwinm r3,r3,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r3,r3,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r4, r3, 0, MSR_DR_BIT+1, MSR_IR_BIT-1 - mtmsr r4 - isync - blr -ENTRY(kdp_xlate_restore, TAG_NO_FRAME_USED) - mtmsr r3 - isync - blr +; +; This is a really stupid physical copy. 1 whole byte at a time... +; Source and dest are long longs. We do this with 64-bit on if +; supported. +; + + .align 5 + .globl EXT(kdp_copy_phys) + +LEXT(kdp_copy_phys) + mflr r12 ; Save return + + bl EXT(ml_set_physical_disabled) ; No DR and get 64-bit + + rlwinm r3,r3,0,1,0 ; Dup low to high source + rlwinm r5,r5,0,1,0 ; Dup low to high dest + rlwimi r3,r4,0,0,31 ; Copy bottom on in source + rlwimi r5,r6,0,0,31 ; Copy bottom on in dest + +kcpagain: addic. r7,r7,-1 ; Drop count + blt-- kcpdone ; All done... + lbz r0,0(r3) ; Grab a whole one + stb r0,0(r5) ; Lay it gently down + addi r3,r3,1 ; Next source + addi r5,r5,1 ; Next destination + b kcpagain ; Once more with feeling... + +kcpdone: bl EXT(ml_restore) ; Put trans, etc back + mtlr r12 ; Restore return + blr ; Come again please... + diff --git a/osfmk/kdp/ml/ppc/kdp_vm.c b/osfmk/kdp/ml/ppc/kdp_vm.c index e7a5e1333..241ce0bf2 100644 --- a/osfmk/kdp/ml/ppc/kdp_vm.c +++ b/osfmk/kdp/ml/ppc/kdp_vm.c @@ -32,138 +32,151 @@ #include #include #include -#include #include +#include +#include +#include +#include + +#include +#include +#include +#include + + pmap_t kdp_pmap=0; boolean_t kdp_trans_off=0; +boolean_t kdp_read_io =0; -unsigned kdp_xlate_off(void); -void kdp_xlate_restore(unsigned); -void kdp_flush_cache(vm_offset_t, unsigned); -vm_offset_t kdp_vtophys(pmap_t pmap, vm_offset_t vaddr); -void kdp_bcopy( unsigned char *, unsigned char *, unsigned); -void kdp_pmemcpy( vm_offset_t , vm_offset_t, unsigned); unsigned kdp_vm_read( caddr_t, caddr_t, unsigned); unsigned kdp_vm_write( caddr_t, caddr_t, unsigned); -extern vm_offset_t kvtophys(vm_offset_t); -extern vm_offset_t mem_actual; -/* - * - */ -vm_offset_t kdp_vtophys( - pmap_t pmap, - vm_offset_t va) -{ - register mapping *mp; - register vm_offset_t pa; - - pa = (vm_offset_t)LRA(pmap->space,(void *)va); +typedef struct { + int flavor; /* the number for this flavor */ + int count; /* count of ints in this flavor */ +} mythread_state_flavor_t; - if (pa != 0) - return(pa); +/* These will need to be uncommented and completed + *if we support other architectures + */ - mp = hw_lock_phys_vir(pmap->space, va); - if((unsigned int)mp&1) { - return 0; - } +/* +#if defined (__ppc__) +*/ +static mythread_state_flavor_t thread_flavor_array[] = { + {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT}, +}; +/* +#elif defined (__i386__) +mythread_state_flavor_t thread_flavor_array [] = { + {i386_THREAD_STATE, i386_THREAD_STATE_COUNT}, +}; +#else +#error architecture not supported +#endif +*/ +static int kdp_mynum_flavors = 1; +static int MAX_TSTATE_FLAVORS = 1; - if(!mp) { /* If it was not a normal page */ - pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ - return pa; /* Return physical address */ - } +typedef struct { + vm_offset_t header; + int hoffset; + mythread_state_flavor_t *flavors; + int tstate_size; +} tir_t; - mp = hw_cpv(mp); +unsigned int not_in_kdp = 1; /* Cleared when we begin to access vm functions in kdp */ - if(!mp->physent) { - pa = (vm_offset_t)((mp->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); - } else { - pa = (vm_offset_t)((mp->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); - hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); - } +char command_buffer[512]; - return(pa); -} +static struct vm_object test_object; /* * */ -void kdp_bcopy( - unsigned char *src, - unsigned char *dst, - unsigned cnt) +addr64_t kdp_vtophys( + pmap_t pmap, + addr64_t va) { - while (cnt--) - *dst++ = *src++; + addr64_t pa; + ppnum_t pp; + + pp = pmap_find_phys(pmap, va); /* Get the page number */ + if(!pp) return 0; /* Just return if no translation */ + + pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL); /* Shove in the page offset */ + return(pa); } /* - * + * Note that kdp_vm_read() does not translate the destination address.Therefore + * there's an implicit assumption that the destination will be a statically + * allocated structure, since those map to the same phys. and virt. addresses */ unsigned kdp_vm_read( caddr_t src, caddr_t dst, unsigned len) { - vm_offset_t cur_virt_src, cur_virt_dst; - vm_offset_t cur_phys_src; + addr64_t cur_virt_src, cur_virt_dst; + addr64_t cur_phys_src; unsigned resid, cnt; - unsigned msr; + unsigned int dummy; + pmap_t pmap; #ifdef KDP_VM_READ_DEBUG kprintf("kdp_vm_read1: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]); #endif + + cur_virt_src = (addr64_t)((unsigned int)src); + cur_virt_dst = (addr64_t)((unsigned int)dst); + if (kdp_trans_off) { - cur_virt_src = (vm_offset_t)src; - if((vm_offset_t)src >= mem_actual) return 0; /* Can't read where there's not any memory */ - cur_virt_dst = (vm_offset_t)dst; - resid = (mem_actual - (vm_offset_t)src) > len ? len : (mem_actual - (vm_offset_t)src); + + + resid = len; /* Get the length to copy */ while (resid != 0) { - cur_phys_src = cur_virt_src; - cnt = ((cur_virt_src + NBPG) & (-NBPG)) - cur_virt_src; + + if(kdp_read_io == 0) + if(!mapping_phys_lookup((ppnum_t)(cur_virt_src >> 12), &dummy)) return 0; /* Can't read where there's not any memory */ + + cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */ + if (cnt > resid) cnt = resid; - msr = kdp_xlate_off(); - kdp_bcopy((unsigned char *)cur_phys_src, - (unsigned char *)cur_virt_dst, cnt); - kdp_xlate_restore(msr); - cur_virt_src +=cnt; - cur_virt_dst +=cnt; + + bcopy_phys(cur_virt_src, cur_virt_dst, cnt); /* Copy stuff over */ + + cur_virt_src += cnt; + cur_virt_dst += cnt; resid -= cnt; } + } else { - cur_virt_src = (vm_offset_t)src; - cur_virt_dst = (vm_offset_t)dst; + resid = len; + if(kdp_pmap) pmap = kdp_pmap; /* If special pmap, use it */ + else pmap = kernel_pmap; /* otherwise, use kernel's */ + while (resid != 0) { - if (kdp_pmap) { - if ((cur_phys_src = - kdp_vtophys(kdp_pmap,trunc_page(cur_virt_src))) == 0) - goto exit; - cur_phys_src += (cur_virt_src & PAGE_MASK); - } else { - if ((cur_phys_src = kdp_vtophys(kernel_pmap,cur_virt_src)) == 0) - goto exit; - } - - cnt = ((cur_virt_src + NBPG) & (-NBPG)) - cur_virt_src; + + if((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0) goto exit; + if(kdp_read_io == 0) + if(!mapping_phys_lookup((ppnum_t)(cur_phys_src >> 12), &dummy)) goto exit; /* Can't read where there's not any memory */ + + cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */ if (cnt > resid) cnt = resid; - if (kdp_pmap) { + #ifdef KDP_VM_READ_DEBUG - kprintf("kdp_vm_read2: pmap %x, virt %x, phys %x\n", - kdp_pmap, cur_virt_src, cur_phys_src); + kprintf("kdp_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n", + pmap, cur_virt_src, cur_phys_src); #endif - msr = kdp_xlate_off(); - kdp_bcopy((unsigned char *)cur_phys_src, - (unsigned char *)cur_virt_dst, cnt); - kdp_xlate_restore(msr); - } else { - kdp_bcopy((unsigned char *)cur_virt_src, - (unsigned char *)cur_virt_dst, cnt); - } + + bcopy_phys(cur_phys_src, cur_virt_dst, cnt); /* Copy stuff over */ + cur_virt_src +=cnt; cur_virt_dst +=cnt; resid -= cnt; @@ -173,7 +186,7 @@ exit: #ifdef KDP_VM_READ_DEBUG kprintf("kdp_vm_read: ret %08X\n", len-resid); #endif - return (len-resid); + return (len - resid); } /* @@ -184,17 +197,17 @@ unsigned kdp_vm_write( caddr_t dst, unsigned len) { - vm_offset_t cur_virt_src, cur_virt_dst; - vm_offset_t cur_phys_src, cur_phys_dst; - unsigned resid, cnt, cnt_src, cnt_dst; - unsigned msr; + addr64_t cur_virt_src, cur_virt_dst; + addr64_t cur_phys_src, cur_phys_dst; + unsigned resid, cnt, cnt_src, cnt_dst; #ifdef KDP_VM_WRITE_DEBUG printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]); #endif - cur_virt_src = (vm_offset_t)src; - cur_virt_dst = (vm_offset_t)dst; + cur_virt_src = (addr64_t)((unsigned int)src); + cur_virt_dst = (addr64_t)((unsigned int)dst); + resid = len; while (resid != 0) { @@ -213,16 +226,341 @@ unsigned kdp_vm_write( if (cnt > resid) cnt = resid; - msr = kdp_xlate_off(); - kdp_bcopy((unsigned char *)cur_virt_src, (unsigned char *)cur_phys_dst, cnt); - kdp_flush_cache(cur_phys_dst, cnt); - kdp_xlate_restore(msr); + bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */ + sync_cache64(cur_phys_dst, cnt); /* Sync caches */ cur_virt_src +=cnt; cur_virt_dst +=cnt; resid -= cnt; } exit: - return (len-resid); + return (len - resid); +} + + +static void +kern_collectth_state(thread_act_t th_act, tir_t *t) +{ + vm_offset_t header; + int hoffset, i ; + mythread_state_flavor_t *flavors; + struct thread_command *tc; + /* + * Fill in thread command structure. + */ + header = t->header; + hoffset = t->hoffset; + flavors = t->flavors; + + tc = (struct thread_command *) (header + hoffset); + tc->cmd = LC_THREAD; + tc->cmdsize = sizeof(struct thread_command) + + t->tstate_size; + hoffset += sizeof(struct thread_command); + /* + * Follow with a struct thread_state_flavor and + * the appropriate thread state struct for each + * thread state flavor. + */ + for (i = 0; i < kdp_mynum_flavors; i++) { + *(mythread_state_flavor_t *)(header+hoffset) = + flavors[i]; + hoffset += sizeof(mythread_state_flavor_t); + + if (machine_thread_get_kern_state(th_act, flavors[i].flavor, + (thread_state_t) (header+hoffset), + &flavors[i].count) != KERN_SUCCESS) + printf ("Failure in machine_thread_get_kern_state()\n"); + hoffset += flavors[i].count*sizeof(int); + } + + t->hoffset = hoffset; } +int +kdp_dump_trap( + int type, + struct savearea *regs) +{ + extern int kdp_flag; + + printf ("An unexpected trap (type %d) occurred during the kernel dump, terminating.\n", type); + kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0)); + abort_panic_transfer(); + kdp_flag &= ~KDP_PANIC_DUMP_ENABLED; + kdp_flag &= ~PANIC_CORE_ON_NMI; + kdp_flag &= ~PANIC_LOG_DUMP; + + kdp_reset(); + + kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state); + return; +} + +int +kern_dump() +{ + int error = 0; + vm_map_t map; + unsigned int thread_count, segment_count; + unsigned int command_size = 0, header_size = 0, tstate_size = 0; + unsigned int hoffset = 0, foffset = 0, nfoffset = 0, vmoffset = 0; + unsigned int max_header_size = 0; + vm_offset_t header; + struct machine_slot *ms; + struct mach_header *mh; + struct segment_command *sc; + struct thread_command *tc; + vm_size_t size; + vm_prot_t prot = 0; + vm_prot_t maxprot = 0; + vm_inherit_t inherit = 0; + vm_offset_t offset; + int error1; + mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS]; + vm_size_t nflavors; + int i; + int nesting_depth = 0; + kern_return_t kret; + struct vm_region_submap_info_64 vbr; + int vbrcount = 0; + tir_t tir1; + + int panic_error = 0; + unsigned int txstart = 0; + unsigned int mach_section_count = 4; + unsigned int num_sects_txed = 0; + + + extern int SEGSIZE; + + extern vm_offset_t sectTEXTB, sectDATAB, sectLINKB, sectPRELINKB; + extern int sectSizeTEXT, sectSizeDATA, sectSizeLINK, sectSizePRELINK; + + map = kernel_map; + not_in_kdp = 0; /* Tell vm functions not to acquire locks */ + + thread_count = 1; + segment_count = get_vmmap_entries(map); + + printf("Kernel map has %d entries\n", segment_count); + + nflavors = kdp_mynum_flavors; + bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array)); + + for (i = 0; i < nflavors; i++) + tstate_size += sizeof(mythread_state_flavor_t) + + (flavors[i].count * sizeof(int)); + + command_size = (segment_count + mach_section_count) * + sizeof(struct segment_command) + + thread_count*sizeof(struct thread_command) + + tstate_size*thread_count; + + header_size = command_size + sizeof(struct mach_header); + header = (vm_offset_t) command_buffer; + + /* + * Set up Mach-O header. + */ + printf ("Generated Mach-O header size was %d\n", header_size); + + mh = (struct mach_header *) header; + ms = &machine_slot[cpu_number()]; + mh->magic = MH_MAGIC; + mh->cputype = ms->cpu_type; + mh->cpusubtype = ms->cpu_subtype; + mh->filetype = MH_CORE; + mh->ncmds = segment_count + thread_count + mach_section_count; + mh->sizeofcmds = command_size; + mh->flags = 0; + + hoffset = sizeof(struct mach_header); /* offset into header */ + foffset = round_page_32(header_size); /* offset into file */ + /* Padding.. */ + if ((foffset - header_size) < (4*sizeof(struct segment_command))) { + /* Hack */ + foffset += ((4*sizeof(struct segment_command)) - (foffset-header_size)); + } + + max_header_size = foffset; + + vmoffset = VM_MIN_ADDRESS; /* offset into VM */ + + /* Transmit the Mach-O MH_CORE header, and seek forward past the + * area reserved for the segment and thread commands + * to begin data transmission + */ + + if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + return -1; + } + + if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, sizeof(struct mach_header), (caddr_t) mh) < 0)) { + printf ("kdp_send_panic_packets failed with error %d\n", panic_error); + return -1 ; + } + + if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + return (-1); + } + printf ("Transmitting kernel state, please wait: "); + + while ((segment_count > 0) || (kret == KERN_SUCCESS)){ + /* Check if we've transmitted all the kernel sections */ + if (num_sects_txed == mach_section_count-1) { + + while (1) { + + /* + * Get region information for next region. + */ + + vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; + if((kret = vm_region_recurse_64(map, + &vmoffset, &size, &nesting_depth, + &vbr, &vbrcount)) != KERN_SUCCESS) { + break; + } + + if(vbr.is_submap) { + nesting_depth++; + continue; + } else { + break; + } + } + + if(kret != KERN_SUCCESS) + break; + + prot = vbr.protection; + maxprot = vbr.max_protection; + inherit = vbr.inheritance; + } + else + { + switch (num_sects_txed) { + case 0: + { + /* Transmit the kernel text section */ + vmoffset = sectTEXTB; + size = sectSizeTEXT; + } + break; + case 1: + { + vmoffset = sectDATAB; + size = sectSizeDATA; + } + break; + case 2: + { + vmoffset = sectPRELINKB; + size = sectSizePRELINK; + } + break; + case 3: + { + vmoffset = sectLINKB; + size = sectSizeLINK; + } + break; + /* TODO the lowmem vector area may be useful, but its transmission is + * disabled for now. The traceback table area should be transmitted + * as well - that's indirected from 0x5080. + */ + } + num_sects_txed++; + } + /* + * Fill in segment command structure. + */ + + if (hoffset > max_header_size) + break; + sc = (struct segment_command *) (header); + sc->cmd = LC_SEGMENT; + sc->cmdsize = sizeof(struct segment_command); + sc->segname[0] = 0; + sc->vmaddr = vmoffset; + sc->vmsize = size; + sc->fileoff = foffset; + sc->filesize = size; + sc->maxprot = maxprot; + sc->initprot = prot; + sc->nsects = 0; + + if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + return -1; + } + + if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, sizeof(struct segment_command) , (caddr_t) sc)) < 0) { + printf ("kdp_send_panic_packets failed with error %d\n", panic_error); + return -1 ; + } + + /* Do not transmit memory tagged VM_MEMORY_IOKIT - instead, seek past that + * region on the server - this creates a hole in the file + */ + + if ((vbr.user_tag != VM_MEMORY_IOKIT)) { + + if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + return (-1); + } + + txstart = vmoffset; + + if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, size, (caddr_t) txstart)) < 0) { + printf ("kdp_send_panic_packets failed with error %d\n", panic_error); + return -1 ; + } + } + + hoffset += sizeof(struct segment_command); + foffset += size; + vmoffset += size; + segment_count--; + } + tir1.header = header; + tir1.hoffset = 0; + tir1.flavors = flavors; + tir1.tstate_size = tstate_size; + + /* Now send out the LC_THREAD load command, with the thread information + * for the current activation. + * Note that the corefile can contain LC_SEGMENT commands with file offsets + * that point past the edge of the corefile, in the event that the last N + * VM regions were all I/O mapped or otherwise non-transferable memory, + * not followed by a normal VM region; i.e. there will be no hole that + * reaches to the end of the core file. + */ + kern_collectth_state (current_act(), &tir1); + + if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + return -1; + } + + if ((panic_error = kdp_send_panic_packets (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) { + printf ("kdp_send_panic_packets failed with error %d\n", panic_error); + return -1 ; + } + + /* last packet */ + if ((panic_error = kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0) + { + printf ("kdp_send_panic_pkt failed with error %d\n", panic_error); + return (-1) ; + } + + out: + if (error == 0) + error = error1; + return (error); +} diff --git a/osfmk/kdp/pe/POWERMAC/kdp_mace.c b/osfmk/kdp/pe/POWERMAC/kdp_mace.c deleted file mode 100644 index 563f7cdd5..000000000 --- a/osfmk/kdp/pe/POWERMAC/kdp_mace.c +++ /dev/null @@ -1,675 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1997 Apple Computer, Inc. - * - * ethernet driver for mace on-board ethernet - * - * HISTORY - * - * Dieter Siegmund (dieter@next.com) Thu Feb 27 18:25:33 PST 1997 - * - ripped off code from MK/LINUX, turned it into a polled-mode - * driver for the PCI (8500) class machines - * - * Dieter Siegmund (dieter@next.com) Fri Mar 21 12:41:29 PST 1997 - * - reworked to support a BSD-style interface, and to support kdb polled - * interface and interrupt-driven interface concurrently - * - * Justin Walker (justin@apple.com) Tue May 20 10:29:29 PDT 1997 - * - Added multicast support - * - * Dieter Siegmund (dieter@next.com) Thu May 29 15:02:29 PDT 1997 - * - fixed problem with sending arp packets for ip address 0.0.0.0 - * - use kdp_register_send_receive() instead of defining - * en_send_pkt/en_recv_pkt routines to avoid name space - * collisions with IOEthernetDebugger and allow these routines to be - * overridden by a driverkit-style driver - * - * Dieter Siegmund (dieter@apple.com) Tue Jun 24 18:29:15 PDT 1997 - * - don't let the adapter auto-strip 802.3 receive frames, it messes - * up the frame size logic - * - * Dieter Siegmund (dieter@apple.com) Tue Aug 5 16:24:52 PDT 1997 - * - handle multicast address deletion correctly - */ -#ifdef MACE_DEBUG -/* - * Caveat: MACE_DEBUG delimits some code that is getting kind of - * stale. Before blindly turning on MACE_DEBUG for your - * testing, take a look at the code enabled by it to check - * that it is reasonably sane. - */ -#endif - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "kdp_mace.h" - -struct kdp_mace_copy_desc { - int * len; - char * data; -}; -static mace_t mace; - -#define MACE_DMA_AREA_SIZE \ - (ETHER_RX_NUM_DBDMA_BUFS * ETHERNET_BUF_SIZE + PG_SIZE) -static unsigned long - mace_rx_dma_area[(MACE_DMA_AREA_SIZE + - sizeof(long))/sizeof(long)]; -static unsigned long - mace_tx_dma_area[(ETHERNET_BUF_SIZE + PG_SIZE + - sizeof(long))/sizeof(long)]; - -#ifdef MACE_DEBUG -static unsigned char testBuffer[PG_SIZE * 4]; -static unsigned char testMsg[] = "mace ethernet interface test"; -#endif - -static void polled_send_pkt(char * data, int len); -static void polled_receive_pkt(char *data, int *len, int timeout_ms); - -void kdp_mace_reset(mace_t *); -void kdp_mace_geteh(unsigned char *); -void kdp_mace_setup_dbdma(void); -boolean_t kdp_mace_init(void * baseAddresses[3], unsigned char * netAddr); -#ifdef MACE_DEBUG -static void printContiguousEtherPacket(u_char *, int); -static void send_test_packet(void); -#endif - -typedef int (*funcptr)(char *, int, void *); -int kdp_mace_recv_pkt(funcptr , void *); - -#ifdef MACE_DEBUG -static int -macAddrsEqual(unsigned char * one, unsigned char * two) -{ - int i; - - for (i = 0; i < NUM_EN_ADDR_BYTES; i++) - if (*one++ != *two++) - return 0; - return 1; -} - -static __inline__ int -isprint(unsigned char c) -{ - return (c >= 0x20 && c <= 0x7e); -} - -static void -printEtherHeader(enet_addr_t * dh, enet_addr_t * sh, u_short etype) -{ - u_char * dhost = dh->ether_addr_octet; - u_char * shost = sh->ether_addr_octet; - - printf("Dst: %x:%x:%x:%x:%x:%x Src: %x:%x:%x:%x:%x:%x Type: 0x%x\n", - dhost[0], dhost[1], dhost[2], dhost[3], dhost[4], dhost[5], - shost[0], shost[1], shost[2], shost[3], shost[4], shost[5], - etype); -} - -static void -printData(u_char * data_p, int n_bytes) -{ -#define CHARS_PER_LINE 16 - char line_buf[CHARS_PER_LINE + 1]; - int line_pos; - int offset; - - for (line_pos = 0, offset = 0; offset < n_bytes; offset++, data_p++) { - if (line_pos == 0) { - printf("%04d ", offset); - } - - line_buf[line_pos] = isprint(*data_p) ? *data_p : '.'; - printf(" %02x", *data_p); - line_pos++; - if (line_pos == CHARS_PER_LINE) { - line_buf[CHARS_PER_LINE] = '\0'; - printf(" %s\n", line_buf); - line_pos = 0; - } - } - if (line_pos) { /* need to finish up the line */ - for (; line_pos < CHARS_PER_LINE; line_pos++) { - printf(" "); - line_buf[line_pos] = ' '; - } - line_buf[CHARS_PER_LINE] = '\0'; - printf(" %s\n", line_buf); - } -} - -static void -printEtherPacket(enet_addr_t * dhost, enet_addr_t * shost, u_short type, - u_char * data_p, int n_bytes) -{ - printEtherHeader(dhost, shost, type); - printData(data_p, n_bytes); -} - -static void -printContiguousEtherPacket(u_char * data_p, int n_bytes) -{ - printEtherPacket((enet_addr_t *)data_p, - (enet_addr_t *)(data_p + NUM_EN_ADDR_BYTES), - *((u_short *)(data_p + (NUM_EN_ADDR_BYTES * 2))), - data_p, n_bytes); -} -#endif - - -/* - * kdp_mace_reset - * - * Reset the board.. - */ -void -kdp_mace_reset(mace_t * m) -{ - dbdma_reset(m->rv_dbdma); - dbdma_reset(m->tx_dbdma); -} - - -/* - * kdp_mace_geteh: - * - * This function gets the ethernet address (array of 6 unsigned - * bytes) from the MACE board registers. - * - */ -void -kdp_mace_geteh(unsigned char *ep) -{ - int i; - unsigned char ep_temp; - - mace.ereg->iac = IAC_PHYADDR; eieio(); - - for (i = 0; i < ETHER_ADD_SIZE; i++) { - ep_temp = mace.ereg->padr; eieio(); - *ep++ = ep_temp; - } -} - -/* - * mace_seteh: - * - * This function sets the ethernet address (array of 6 unsigned - * bytes) on the MACE board. - */ -static void -mace_seteh(unsigned char *ep) -{ - int i; - unsigned char status; - - if (mace.chip_id != MACE_REVISION_A2) { - mace.ereg->iac = IAC_ADDRCHG|IAC_PHYADDR; eieio(); - - while ((status = mace.ereg->iac)) { - if ((status & IAC_ADDRCHG) == 0) { - eieio(); - break; - } - eieio(); - } - } - else { - /* start to load the address.. */ - mace.ereg->iac = IAC_PHYADDR; eieio(); - } - - for (i = 0; i < NUM_EN_ADDR_BYTES; i++) { - mace.ereg->padr = *(ep+i); eieio(); - } - return; -} - -/* - * kdp_mace_setup_dbdma - * - * Setup various dbdma pointers. - */ -void -kdp_mace_setup_dbdma() -{ - mace_t * m = &mace; - int i; - dbdma_command_t * d; - vm_offset_t address; - dbdma_regmap_t * regmap; - -#define ALIGN_MASK 0xfffffffcUL - if (m->rv_dma_area == 0) { - m->rv_dma_area = (unsigned char *) - ((((unsigned long)mace_rx_dma_area) + 3) & ALIGN_MASK); - m->rv_dma = dbdma_alloc(ETHER_RX_NUM_DBDMA_BUFS + 2); - m->tx_dma = dbdma_alloc(TX_NUM_DBDMA); - m->tx_dma_area = (unsigned char *) - ((((unsigned long)mace_tx_dma_area) + 3) & ALIGN_MASK); - } - - /* set up a ring of buffers */ - d = m->rv_dma; - for (i = 0; i < ETHER_RX_NUM_DBDMA_BUFS; i++, d++) { - address = (vm_offset_t) kvtophys((vm_offset_t)&m->rv_dma_area[i*ETHERNET_BUF_SIZE]); - DBDMA_BUILD(d, DBDMA_CMD_IN_LAST, 0, ETHERNET_BUF_SIZE, - address, DBDMA_INT_ALWAYS, - DBDMA_WAIT_NEVER, - DBDMA_BRANCH_NEVER); - } - - /* stop when we hit the end of the list */ - DBDMA_BUILD(d, DBDMA_CMD_STOP, 0, 0, 0, DBDMA_INT_ALWAYS, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - d++; - - /* branch to command at "address" ie. element 0 of the "array" */ - DBDMA_BUILD(d, DBDMA_CMD_NOP, 0, 0, 0, DBDMA_INT_NEVER, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_ALWAYS); - address = (vm_offset_t) kvtophys((vm_offset_t)m->rv_dma); - dbdma_st4_endian(&d->d_cmddep, address); - - m->rv_head = 0; - m->rv_tail = ETHER_RX_NUM_DBDMA_BUFS; /* always contains DBDMA_CMD_STOP */ - regmap = m->rv_dbdma; - - /* stop/init/restart dma channel */ - dbdma_reset(regmap); - dbdma_reset(m->tx_dbdma); - - /* Set the wait value.. */ - dbdma_st4_endian(®map->d_wait, DBDMA_SET_CNTRL(0x00)); - - /* Set the tx wait value */ - regmap = m->tx_dbdma; - dbdma_st4_endian(®map->d_wait, DBDMA_SET_CNTRL(0x20)); - - flush_dcache((vm_offset_t)m->rv_dma, - sizeof(dbdma_command_t) * (ETHER_RX_NUM_DBDMA_BUFS + 2), - FALSE); - /* start receiving */ - dbdma_start(m->rv_dbdma, m->rv_dma); -} - -#ifdef MACE_DEBUG -static void -send_test_packet() -{ - unsigned char * tp; - - bzero((char *)testBuffer, sizeof(testBuffer)); - - tp = testBuffer; - - /* send self-addressed packet */ - bcopy((char *)&mace.macaddr[0], (char *)tp, NUM_EN_ADDR_BYTES); - tp += NUM_EN_ADDR_BYTES; - bcopy((char *)&mace.macaddr[0], (char *)tp, NUM_EN_ADDR_BYTES); - tp += NUM_EN_ADDR_BYTES; - *tp++ = 0; - *tp++ = 0; - bcopy((char *)testMsg, (char *)tp, sizeof(testMsg)); - polled_send_pkt((char *)testBuffer, 80); - return; -} -#endif - -/* - * Function: kdp_mace_init - * - * Purpose: - * Called early on, initializes the adapter and readies it for - * kdb kernel debugging. - */ -boolean_t -kdp_mace_init(void * baseAddresses[3], unsigned char * netAddr) -{ - unsigned char status; - mace_t * m = &mace; - struct mace_board * ereg; - int mpc = 0; - int i; - - bzero((char *)&mace, sizeof(mace)); - - /* get the ethernet registers' mapped address */ - ereg = m->ereg - = (struct mace_board *) baseAddresses[0]; - m->tx_dbdma = (dbdma_regmap_t *) baseAddresses[1]; - m->rv_dbdma = (dbdma_regmap_t *) baseAddresses[2]; - - for (i = 0; i < NUM_EN_ADDR_BYTES; i++) - m->macaddr[i] = netAddr[i]; - - /* Reset the board & AMIC.. */ - kdp_mace_reset(m); - - /* grab the MACE chip rev */ - m->chip_id = (ereg->chipid2 << 8 | ereg->chipid1); - - /* don't auto-strip for 802.3 */ - m->ereg->rcvfc &= ~(RCVFC_ASTRPRCV); - - /* set the ethernet address */ - mace_seteh(mace.macaddr); - { - unsigned char macaddr[NUM_EN_ADDR_BYTES]; - kdp_mace_geteh(macaddr); - printf("mace ethernet [%02x:%02x:%02x:%02x:%02x:%02x]\n", - macaddr[0], macaddr[1], macaddr[2], - macaddr[3], macaddr[4], macaddr[5]); - } - - /* Now clear the Multicast filter */ - if (m->chip_id != MACE_REVISION_A2) { - ereg->iac = IAC_ADDRCHG|IAC_LOGADDR; eieio(); - - while ((status = ereg->iac)) { - if ((status & IAC_ADDRCHG) == 0) - break; - eieio(); - } - eieio(); - } - else { - ereg->iac = IAC_LOGADDR; eieio(); - } - { - int i; - - for (i=0; i < 8; i++) - { ereg->ladrf = 0; - eieio(); - } - } - - /* register interrupt routines */ - kdp_mace_setup_dbdma(); - - /* Start the chip... */ - m->ereg->maccc = MACCC_ENXMT|MACCC_ENRCV; eieio(); - { - volatile char ch = mace.ereg->ir; eieio(); - } - - delay(500); /* paranoia */ - mace.ereg->imr = 0xfe; eieio(); - - /* register our debugger routines */ - kdp_register_send_receive((kdp_send_t)polled_send_pkt, - (kdp_receive_t)polled_receive_pkt); - -#ifdef MACE_DEBUG - printf("Testing 1 2 3\n"); - send_test_packet(); - printf("Testing 1 2 3\n"); - send_test_packet(); - printf("Testing 1 2 3\n"); - send_test_packet(); - do { - static unsigned char buf[ETHERNET_BUF_SIZE]; - int len; - int nmpc = mace.ereg->mpc; eieio(); - - if (nmpc > mpc) { - mpc = nmpc; - printf("mpc %d\n", mpc); - } - polled_receive_pkt((char *)buf, &len, 100); - if (len > 0) { - printf("rx %d\n", len); - printContiguousEtherPacket(buf, len); - } - } while(1); -#endif - - return TRUE; -} - -#ifdef MACE_DEBUG -static void -kdp_mace_txstatus(char * msg) -{ - dbdma_regmap_t * dmap = mace.tx_dbdma; - volatile unsigned long status; - volatile unsigned long intr; - volatile unsigned long branch; - volatile unsigned long wait; - - status = dbdma_ld4_endian(&dmap->d_status); eieio(); - intr = dbdma_ld4_endian(&dmap->d_intselect); eieio(); - branch = dbdma_ld4_endian(&dmap->d_branch); eieio(); - wait = dbdma_ld4_endian(&dmap->d_wait); eieio(); - printf("(%s s=0x%x i=0x%x b=0x%x w=0x%x)", msg, status, intr, branch, - wait); - return; -} -#endif - -static void -kdp_mace_tx_dbdma(char * data, int len) -{ - unsigned long count; - dbdma_command_t * d; - unsigned long page; - - d = mace.tx_dma; - page = ((unsigned long) data) & PG_MASK; - if ((page + len) <= PG_SIZE) { /* one piece dma */ - DBDMA_BUILD(d, DBDMA_CMD_OUT_LAST, DBDMA_KEY_STREAM0, - len, - (vm_offset_t) kvtophys((vm_offset_t) data), - DBDMA_INT_NEVER, - DBDMA_WAIT_IF_FALSE, DBDMA_BRANCH_NEVER); - } - else { /* two piece dma */ - count = PG_SIZE - page; - DBDMA_BUILD(d, DBDMA_CMD_OUT_MORE, DBDMA_KEY_STREAM0, - count, - (vm_offset_t)kvtophys((vm_offset_t) data), - DBDMA_INT_NEVER, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - d++; - DBDMA_BUILD(d, DBDMA_CMD_OUT_LAST, DBDMA_KEY_STREAM0, - len - count, (vm_offset_t) - kvtophys((vm_offset_t)((unsigned char *)data + count)), - DBDMA_INT_NEVER, - DBDMA_WAIT_IF_FALSE, DBDMA_BRANCH_NEVER); - } - d++; - DBDMA_BUILD(d, DBDMA_CMD_LOAD_QUAD, DBDMA_KEY_SYSTEM, - 1, kvtophys((vm_offset_t) &mace.ereg->xmtfs),DBDMA_INT_NEVER, -// 1, &mace.ereg->xmtfs,DBDMA_INT_NEVER, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - d++; - DBDMA_BUILD(d, DBDMA_CMD_LOAD_QUAD, DBDMA_KEY_SYSTEM, - 1, kvtophys((vm_offset_t) &mace.ereg->ir), DBDMA_INT_ALWAYS, -// 1, &mace.ereg->ir, DBDMA_INT_ALWAYS, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - d++; - DBDMA_BUILD(d, DBDMA_CMD_STOP, 0, 0, 0, 0, 0, 0); - - flush_dcache((vm_offset_t)mace.tx_dma, - sizeof(dbdma_command_t) * TX_NUM_DBDMA, - FALSE); - dbdma_start(mace.tx_dbdma, mace.tx_dma); - return; - -} - -static void -waitForDBDMADone(char * msg) -{ - { - /* wait for tx dma completion */ - dbdma_regmap_t * dmap = mace.tx_dbdma; - int i; - volatile unsigned long val; - - i = 0; - do { - val = dbdma_ld4_endian(&dmap->d_status); eieio(); - delay(50); - i++; - } while ((i < 100000) && (val & DBDMA_CNTRL_ACTIVE)); - if (i == 100000) - printf("mace(%s): kdp_mace_tx_dbdma poll timed out 0x%x", msg, val); - } -} - -int -kdp_mace_recv_pkt(funcptr pktfunc, void * p) -{ - vm_offset_t address; - struct mace_board * board; - long bytes; - int done = 0; - int doContinue = 0; - mace_t * m; - unsigned long resid; - unsigned short status; - int tail; - - m = &mace; - board = m->ereg; - - /* remember where the tail was */ - tail = m->rv_tail; - for (done = 0; (done == 0) && (m->rv_head != tail);) { - dbdma_command_t * dmaHead; - - dmaHead = &m->rv_dma[m->rv_head]; - resid = dbdma_ld4_endian(&dmaHead->d_status_resid); - status = (resid >> 16); - bytes = resid & 0xffff; - bytes = ETHERNET_BUF_SIZE - bytes - 8; /* strip off FCS/CRC */ - - if ((status & DBDMA_ETHERNET_EOP) == 0) { - /* no packets are ready yet */ - break; - } - doContinue = 1; - /* if the packet is good, pass it up */ - if (bytes >= (ETHER_MIN_PACKET - 4)) { - char * dmaPacket; - dmaPacket = (char *)&m->rv_dma_area[m->rv_head * ETHERNET_BUF_SIZE]; - done = (*pktfunc)(dmaPacket, bytes, p); - } - /* mark the head as the new tail in the dma channel command list */ - DBDMA_BUILD(dmaHead, DBDMA_CMD_STOP, 0, 0, 0, DBDMA_INT_ALWAYS, - DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); - flush_dcache((vm_offset_t)dmaHead, - sizeof(*dmaHead), - FALSE); - eieio(); - - /* make the tail an available dma'able entry */ - { - dbdma_command_t * dmaTail; - dmaTail = &m->rv_dma[m->rv_tail]; - address = kvtophys((vm_offset_t) - &m->rv_dma_area[m->rv_tail*ETHERNET_BUF_SIZE]); - // this command is live so write it carefully - DBDMA_ST4_ENDIAN(&dmaTail->d_address, address); - dmaTail->d_status_resid = 0; - dmaTail->d_cmddep = 0; - eieio(); - DBDMA_ST4_ENDIAN(&dmaTail->d_cmd_count, - ((DBDMA_CMD_IN_LAST) << 28) | ((0) << 24) | - ((DBDMA_INT_ALWAYS) << 20) | - ((DBDMA_BRANCH_NEVER) << 18) | ((DBDMA_WAIT_NEVER) << 16) | - (ETHERNET_BUF_SIZE)); - eieio(); - flush_dcache((vm_offset_t)dmaTail, - sizeof(*dmaTail), - FALSE); - } - /* head becomes the tail */ - m->rv_tail = m->rv_head; - - /* advance the head */ - m->rv_head++; - if (m->rv_head == (ETHER_RX_NUM_DBDMA_BUFS + 1)) - m->rv_head = 0; - } - if (doContinue) { - sync(); - dbdma_continue(m->rv_dbdma); - } - return (done); -} - -static int -kdp_mace_copy(char * pktBuf, int len, void * p) -{ - struct kdp_mace_copy_desc * cp = (struct kdp_mace_copy_desc *)p; - - bcopy((char *)pktBuf, (char *)cp->data, len); - *cp->len = len; - return (1); /* signal that we're done */ -} - -/* kdb debugger routines */ -static void -polled_send_pkt(char * data, int len) -{ - waitForDBDMADone("mace: polled_send_pkt start"); - kdp_mace_tx_dbdma(data, len); - waitForDBDMADone("mace: polled_send_pkt end"); - return; -} - -static void -polled_receive_pkt(char *data, int *len, int timeout_ms) -{ - struct kdp_mace_copy_desc cp; - - cp.len = len; - cp.data = data; - - timeout_ms *= 1000; - *len = 0; - while (kdp_mace_recv_pkt(kdp_mace_copy, (void *)&cp) == 0) { - if (timeout_ms <= 0) - break; - delay(50); - timeout_ms -= 50; - } - return; -} diff --git a/osfmk/kdp/pe/POWERMAC/kdp_mace.h b/osfmk/kdp/pe/POWERMAC/kdp_mace.h deleted file mode 100644 index 4382c2152..000000000 --- a/osfmk/kdp/pe/POWERMAC/kdp_mace.h +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ -/* - * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 - * All Rights Reserved - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby granted, - * provided that the above copyright notice appears in all copies and - * that both the copyright notice and this permission notice appear in - * supporting documentation. - * - * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, - * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION - * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -/* - * MKLINUX-1.0DR2 - */ -/* - * PMach Operating System - * Copyright (c) 1995 Santa Clara University - * All Rights Reserved. - */ -/* - * Mach Operating System - * Copyright (c) 1991,1990,1989 Carnegie Mellon University - * All Rights Reserved. - * - * Permission to use, copy, modify and distribute this software and its - * documentation is hereby granted, provided that both the copyright - * notice and this permission notice appear in all copies of the - * software, derivative works or modified versions, and any portions - * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" - * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR - * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * - * Carnegie Mellon requests users of this software to return to - * - * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU - * School of Computer Science - * Carnegie Mellon University - * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon - * the rights to redistribute these changes. - */ -/* - * File: if_3c501.h - * Author: Philippe Bernadat - * Date: 1989 - * Copyright (c) 1989 OSF Research Institute - * - * 3COM Etherlink 3C501 Mach Ethernet drvier - */ -/* - Copyright 1990 by Open Software Foundation, -Cambridge, MA. - - All Rights Reserved - - Permission to use, copy, modify, and distribute this software and -its documentation for any purpose and without fee is hereby granted, -provided that the above copyright notice appears in all copies and -that both the copyright notice and this permission notice appear in -supporting documentation, and that the name of OSF or Open Software -Foundation not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior -permission. - - OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE -INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, -IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, -NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION -WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - - - -#define ENETPAD(n) char n[15] - -/* 0x50f0a000 */ -struct mace_board { - volatile unsigned char rcvfifo; /* 00 receive fifo */ - ENETPAD(epad0); - volatile unsigned char xmtfifo; /* 01 transmit fifo */ - ENETPAD(epad1); - volatile unsigned char xmtfc; /* 02 transmit frame control */ - ENETPAD(epad2); - volatile unsigned char xmtfs; /* 03 transmit frame status */ - ENETPAD(epad3); - volatile unsigned char xmtrc; /* 04 transmit retry count */ - ENETPAD(epad4); - volatile unsigned char rcvfc; /* 05 receive frame control -- 4 bytes */ - ENETPAD(epad5); - volatile unsigned char rcvfs; /* 06 receive frame status */ - ENETPAD(epad6); - volatile unsigned char fifofc; /* 07 fifo frame count */ - ENETPAD(epad7); - volatile unsigned char ir; /* 08 interrupt */ - ENETPAD(epad8); - volatile unsigned char imr; /* 09 interrupt mask */ - ENETPAD(epad9); - volatile unsigned char pr; /* 10 poll */ - ENETPAD(epad10); - volatile unsigned char biucc; /* 11 bus interface unit configuration control */ - ENETPAD(epad11); - volatile unsigned char fifocc; /* 12 fifo configuration control */ - ENETPAD(epad12); - volatile unsigned char maccc; /* 13 media access control configuration control */ - ENETPAD(epad13); - volatile unsigned char plscc; /* 14 physical layer signalling configuration control */ - ENETPAD(epad14); - volatile unsigned char phycc; /* 15 physical layer configuration control */ - ENETPAD(epad15); - volatile unsigned char chipid1; /* 16 chip identification LSB */ - ENETPAD(epad16); - volatile unsigned char chipid2; /* 17 chip identification MSB */ - ENETPAD(epad17); - volatile unsigned char iac; /* 18 internal address configuration */ - ENETPAD(epad18); - volatile unsigned char res1; /* 19 */ - ENETPAD(epad19); - volatile unsigned char ladrf; /* 20 logical address filter -- 8 bytes */ - ENETPAD(epad20); - volatile unsigned char padr; /* 21 physical address -- 6 bytes */ - ENETPAD(epad21); - volatile unsigned char res2; /* 22 */ - ENETPAD(epad22); - volatile unsigned char res3; /* 23 */ - ENETPAD(epad23); - volatile unsigned char mpc; /* 24 missed packet count */ - ENETPAD(epad24); - volatile unsigned char res4; /* 25 */ - ENETPAD(epad25); - volatile unsigned char rntpc; /* 26 runt packet count */ - ENETPAD(epad26); - volatile unsigned char rcvcc; /* 27 receive collision count */ - ENETPAD(epad27); - volatile unsigned char res5; /* 28 */ - ENETPAD(epad28); - volatile unsigned char utr; /* 29 user test */ - ENETPAD(epad29); - volatile unsigned char res6; /* 30 */ - ENETPAD(epad30); - volatile unsigned char res7; /* 31 */ - }; - -/* - * Chip Revisions.. - */ - -#define MACE_REVISION_B0 0x0940 -#define MACE_REVISION_A2 0x0941 - -/* xmtfc */ -#define XMTFC_DRTRY 0X80 -#define XMTFC_DXMTFCS 0x08 -#define XMTFC_APADXNT 0x01 - -/* xmtfs */ -#define XMTFS_XNTSV 0x80 -#define XMTFS_XMTFS 0x40 -#define XMTFS_LCOL 0x20 -#define XMTFS_MORE 0x10 -#define XMTFS_ONE 0x08 -#define XMTFS_DEFER 0x04 -#define XMTFS_LCAR 0x02 -#define XMTFS_RTRY 0x01 - -/* xmtrc */ -#define XMTRC_EXDEF 0x80 - -/* rcvfc */ -#define RCVFC_LLRCV 0x08 -#define RCVFC_M_R 0x04 -#define RCVFC_ASTRPRCV 0x01 - -/* rcvfs */ -#define RCVFS_OFLO 0x80 -#define RCVFS_CLSN 0x40 -#define RCVFS_FRAM 0x20 -#define RCVFS_FCS 0x10 -#define RCVFS_REVCNT 0x0f - -/* fifofc */ -#define FIFOCC_XFW_8 0x00 -#define FIFOCC_XFW_16 0x40 -#define FIFOCC_XFW_32 0x80 -#define FIFOCC_XFW_XX 0xc0 -#define FIFOCC_RFW_16 0x00 -#define FIFOCC_RFW_32 0x10 -#define FIFOCC_RFW_64 0x20 -#define FIFOCC_RFW_XX 0x30 -#define FIFOCC_XFWU 0x08 -#define FIFOCC_RFWU 0x04 -#define FIFOCC_XBRST 0x02 -#define FIFOCC_RBRST 0x01 - - -/* ir */ -#define IR_JAB 0x80 -#define IR_BABL 0x40 -#define IR_CERR 0x20 -#define IR_RCVCCO 0x10 -#define IR_RNTPCO 0x08 -#define IR_MPCO 0x04 -#define IR_RCVINT 0x02 -#define IR_XMTINT 0x01 - -/* imr */ -#define IMR_MJAB 0x80 -#define IMR_MBABL 0x40 -#define IMR_MCERR 0x20 -#define IMR_MRCVCCO 0x10 -#define IMR_MRNTPCO 0x08 -#define IMR_MMPCO 0x04 -#define IMR_MRCVINT 0x02 -#define IMR_MXMTINT 0x01 - -/* pr */ -#define PR_XMTSV 0x80 -#define PR_TDTREQ 0x40 -#define PR_RDTREQ 0x20 - -/* biucc */ -#define BIUCC_BSWP 0x40 -#define BIUCC_XMTSP04 0x00 -#define BIUCC_XMTSP16 0x10 -#define BIUCC_XMTSP64 0x20 -#define BIUCC_XMTSP112 0x30 -#define BIUCC_SWRST 0x01 - -/* fifocc */ -#define FIFOCC_XMTFW08W 0x00 -#define FIFOCC_XMTFW16W 0x40 -#define FIFOCC_XMTFW32W 0x80 - -#define FIFOCC_RCVFW16 0x00 -#define FIFOCC_RCVFW32 0x10 -#define FIFOCC_RCVFW64 0x20 - -#define FIFOCC_XMTFWU 0x08 -#define FIFOCC_RCVFWU 0x04 -#define FIFOCC_XMTBRST 0x02 -#define FIFOCC_RCVBRST 0x01 - -/* maccc */ -#define MACCC_PROM 0x80 -#define MACCC_DXMT2PD 0x40 -#define MACCC_EMBA 0x20 -#define MACCC_DRCVPA 0x08 -#define MACCC_DRCVBC 0x04 -#define MACCC_ENXMT 0x02 -#define MACCC_ENRCV 0x01 - -/* plscc */ -#define PLSCC_XMTSEL 0x08 -#define PLSCC_AUI 0x00 -#define PLSCC_TENBASE 0x02 -#define PLSCC_DAI 0x04 -#define PLSCC_GPSI 0x06 -#define PLSCC_ENPLSIO 0x01 - -/* phycc */ -#define PHYCC_LNKFL 0x80 -#define PHYCC_DLNKTST 0x40 -#define PHYCC_REVPOL 0x20 -#define PHYCC_DAPC 0x10 -#define PHYCC_LRT 0x08 -#define PHYCC_ASEL 0x04 -#define PHYCC_RWAKE 0x02 -#define PHYCC_AWAKE 0x01 - -/* iac */ -#define IAC_ADDRCHG 0x80 -#define IAC_PHYADDR 0x04 -#define IAC_LOGADDR 0x02 - -/* utr */ -#define UTR_RTRE 0x80 -#define UTR_RTRD 0x40 -#define UTR_RPA 0x20 -#define UTR_FCOLL 0x10 -#define UTR_RCVFCSE 0x08 - -#define UTR_NOLOOP 0x00 -#define UTR_EXTLOOP 0x02 -#define UTR_INLOOP 0x04 -#define UTR_INLOOP_M 0x06 - -#define ENET_PHYADDR_LEN 6 -#define ENET_HEADER 14 - -#define BFRSIZ 2048 -#define ETHER_ADD_SIZE 6 /* size of a MAC address */ -#define DSF_LOCK 1 -#define DSF_RUNNING 2 -#define MOD_ENAL 1 -#define MOD_PROM 2 - -/* - * MACE Chip revision codes - */ -#define MACERevA2 0x0941 -#define MACERevB0 0x0940 - -/* - * Defines and device state - * Dieter Siegmund (dieter@next.com) Thu Feb 27 18:25:33 PST 1997 - */ - -#define PG_SIZE 0x1000UL -#define PG_MASK (PG_SIZE - 1UL) - -#define ETHERMTU 1500 -#define ETHER_RX_NUM_DBDMA_BUFS 32 -#define ETHERNET_BUF_SIZE (ETHERMTU + 36) -#define ETHER_MIN_PACKET 64 -#define TX_NUM_DBDMA 6 -#define NUM_EN_ADDR_BYTES 6 - -#define DBDMA_ETHERNET_EOP 0x40 - -typedef struct mace_s { - struct mace_board * ereg; /* ethernet register set address */ - dbdma_regmap_t * tx_dbdma; - dbdma_regmap_t * rv_dbdma; - unsigned char macaddr[NUM_EN_ADDR_BYTES]; /* mac address */ - int chip_id; - dbdma_command_t *rv_dma; - dbdma_command_t *tx_dma; - unsigned char *rv_dma_area; - unsigned char *tx_dma_area; - int rv_tail; - int rv_head; -} mace_t; - - diff --git a/osfmk/kern/ast.c b/osfmk/kern/ast.c index 0764da904..9ed7fd91f 100644 --- a/osfmk/kern/ast.c +++ b/osfmk/kern/ast.c @@ -93,92 +93,89 @@ ast_init(void) #endif /* MACHINE_AST */ } +/* + * Called at splsched. + */ void ast_taken( ast_t reasons, boolean_t enable ) { - register int mycpu; - register processor_t myprocessor; register thread_t self = current_thread(); - boolean_t preempt_trap = (reasons == AST_PREEMPT); + register int mycpu = cpu_number(); + boolean_t preempt_trap = (reasons == AST_PREEMPTION); - disable_preemption(); - mycpu = cpu_number(); reasons &= need_ast[mycpu]; need_ast[mycpu] &= ~reasons; - enable_preemption(); /* - * No ast for an idle thread + * Handle ASTs for all threads + * except idle processor threads. */ - if (self->state & TH_IDLE) - goto enable_and_return; - - /* - * Check for urgent preemption - */ - if ((reasons & AST_URGENT) && wait_queue_assert_possible(self)) { - if (reasons & AST_BLOCK) { - counter(c_ast_taken_block++); - thread_block_reason((void (*)(void))0, AST_BLOCK); + if (!(self->state & TH_IDLE)) { + /* + * Check for urgent preemption. + */ + if ( (reasons & AST_URGENT) && + wait_queue_assert_possible(self) ) { + if (reasons & AST_PREEMPT) { + counter(c_ast_taken_block++); + thread_block_reason(THREAD_CONTINUE_NULL, + AST_PREEMPT | AST_URGENT); + } + + reasons &= ~AST_PREEMPTION; } - reasons &= ~AST_PREEMPT; - if (reasons == 0) - goto enable_and_return; - } - - if (preempt_trap) - goto enable_and_return; - - ml_set_interrupts_enabled(enable); + /* + * The kernel preempt traps + * skip all other ASTs. + */ + if (!preempt_trap) { + ml_set_interrupts_enabled(enable); #ifdef MACH_BSD - /* - * Check for BSD hook - */ - if (reasons & AST_BSD) { - extern void bsd_ast(thread_act_t act); - thread_act_t act = self->top_act; - - thread_ast_clear(act, AST_BSD); - bsd_ast(act); - } + /* + * Handle BSD hook. + */ + if (reasons & AST_BSD) { + extern void bsd_ast(thread_act_t act); + thread_act_t act = self->top_act; + + thread_ast_clear(act, AST_BSD); + bsd_ast(act); + } #endif - /* - * migration APC hook - */ - if (reasons & AST_APC) { - act_execute_returnhandlers(); - } - - /* - * Check for normal preemption - */ - reasons &= AST_BLOCK; - if (reasons == 0) { - disable_preemption(); - myprocessor = current_processor(); - if (csw_needed(self, myprocessor)) - reasons = AST_BLOCK; - enable_preemption(); - } - if ( (reasons & AST_BLOCK) && - wait_queue_assert_possible(self) ) { - counter(c_ast_taken_block++); - thread_block_reason(thread_exception_return, AST_BLOCK); + /* + * Thread APC hook. + */ + if (reasons & AST_APC) + act_execute_returnhandlers(); + + ml_set_interrupts_enabled(FALSE); + + /* + * Check for preemption. + */ + if (reasons & AST_PREEMPT) { + processor_t myprocessor = current_processor(); + + if (csw_needed(self, myprocessor)) + reasons = AST_PREEMPT; + else + reasons = AST_NONE; + } + if ( (reasons & AST_PREEMPT) && + wait_queue_assert_possible(self) ) { + counter(c_ast_taken_block++); + thread_block_reason(thread_exception_return, AST_PREEMPT); + } + } } - goto just_return; - -enable_and_return: - ml_set_interrupts_enabled(enable); - -just_return: - return; + ml_set_interrupts_enabled(enable); } /* @@ -188,7 +185,7 @@ void ast_check( processor_t processor) { - register thread_t self = processor->cpu_data->active_thread; + register thread_t self = processor->active_thread; processor->current_pri = self->sched_pri; if (processor->state == PROCESSOR_RUNNING) { @@ -214,7 +211,4 @@ processor_running: else if (processor->state == PROCESSOR_SHUTDOWN) goto processor_running; - else - if (processor->state == PROCESSOR_ASSIGN) - ast_on(AST_BLOCK); } diff --git a/osfmk/kern/ast.h b/osfmk/kern/ast.h index 3df0a0460..5b0d3ab2a 100644 --- a/osfmk/kern/ast.h +++ b/osfmk/kern/ast.h @@ -82,24 +82,24 @@ typedef uint32_t ast_t; /* * Bits for reasons */ -#define AST_BLOCK 0x01 -#define AST_QUANTUM 0x02 -#define AST_HANDOFF 0x04 -#define AST_YIELD 0x08 -#define AST_URGENT 0x10 +#define AST_PREEMPT 0x01 +#define AST_QUANTUM 0x02 +#define AST_URGENT 0x04 +#define AST_HANDOFF 0x08 +#define AST_YIELD 0x10 #define AST_APC 0x20 /* migration APC hook */ /* * JMM - This is here temporarily. AST_BSD is used to simulate a * general purpose mechanism for setting asynchronous procedure calls * from the outside. */ -#define AST_BSD 0x80 +#define AST_BSD 0x80 #define AST_NONE 0x00 -#define AST_ALL (~AST_NONE) +#define AST_ALL (~AST_NONE) -#define AST_SCHEDULING (AST_PREEMPT | AST_YIELD | AST_HANDOFF) -#define AST_PREEMPT (AST_BLOCK | AST_QUANTUM | AST_URGENT) +#define AST_SCHEDULING (AST_PREEMPTION | AST_YIELD | AST_HANDOFF) +#define AST_PREEMPTION (AST_PREEMPT | AST_QUANTUM | AST_URGENT) extern volatile ast_t need_ast[NCPUS]; @@ -181,10 +181,4 @@ MACRO_END * be followed by ast_propagate(). */ -#ifdef MACH_KERNEL_PRIVATE - -#define ast_urgency() (need_ast[cpu_number()] & AST_URGENT) - -#endif /* MACH_KERNEL_PRIVATE */ - #endif /* _KERN_AST_H_ */ diff --git a/osfmk/kern/bsd_kern.c b/osfmk/kern/bsd_kern.c index 4f6aa60af..5b01ba1ec 100644 --- a/osfmk/kern/bsd_kern.c +++ b/osfmk/kern/bsd_kern.c @@ -38,27 +38,22 @@ #undef thread_should_halt #undef ipc_port_release -decl_simple_lock_data(extern,reaper_lock) -extern queue_head_t reaper_queue; - /* BSD KERN COMPONENT INTERFACE */ task_t bsd_init_task = TASK_NULL; char init_task_failure_data[1024]; +extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */ thread_act_t get_firstthread(task_t); vm_map_t get_task_map(task_t); ipc_space_t get_task_ipcspace(task_t); boolean_t is_kerneltask(task_t); boolean_t is_thread_idle(thread_t); -boolean_t is_thread_running(thread_act_t); -thread_shuttle_t getshuttle_thread( thread_act_t); -thread_act_t getact_thread( thread_shuttle_t); vm_offset_t get_map_min( vm_map_t); vm_offset_t get_map_max( vm_map_t); int get_task_userstop(task_t); int get_thread_userstop(thread_act_t); -boolean_t thread_should_abort(thread_shuttle_t); +boolean_t thread_should_abort(thread_t); boolean_t current_thread_aborted(void); void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *); void ipc_port_release(ipc_port_t); @@ -68,7 +63,7 @@ vm_size_t get_vmmap_size(vm_map_t); int get_vmmap_entries(vm_map_t); int get_task_numacts(task_t); thread_act_t get_firstthread(task_t task); -kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int); +kern_return_t get_signalact(task_t , thread_act_t *, int); void astbsd_on(void); /* @@ -105,15 +100,15 @@ thread_act_t get_firstthread(task_t task) { thread_act_t thr_act; - thr_act = (thread_act_t)queue_first(&task->thr_acts); - if (thr_act == (thread_act_t)&task->thr_acts) + thr_act = (thread_act_t)queue_first(&task->threads); + if (queue_end(&task->threads, (queue_entry_t)thr_act)) thr_act = THR_ACT_NULL; if (!task->active) return(THR_ACT_NULL); return(thr_act); } -kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast) +kern_return_t get_signalact(task_t task,thread_act_t * thact, int setast) { thread_act_t inc; @@ -128,8 +123,8 @@ kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, } thr_act = THR_ACT_NULL; - for (inc = (thread_act_t)queue_first(&task->thr_acts); - inc != (thread_act_t)&task->thr_acts; + for (inc = (thread_act_t)queue_first(&task->threads); + !queue_end(&task->threads, (queue_entry_t)inc); inc = ninc) { th = act_lock_thread(inc); if ((inc->active) && @@ -138,14 +133,11 @@ kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, break; } act_unlock_thread(inc); - ninc = (thread_act_t)queue_next(&inc->thr_acts); + ninc = (thread_act_t)queue_next(&inc->task_threads); } out: if (thact) *thact = thr_act; - - if (thshut) - *thshut = thr_act? thr_act->thread: THREAD_NULL ; if (thr_act) { if (setast) act_set_astbsd(thr_act); @@ -161,7 +153,7 @@ out: } -kern_return_t check_actforsig(task_t task, thread_act_t thact, thread_t * thshut, int setast) +kern_return_t check_actforsig(task_t task, thread_act_t thact, int setast) { thread_act_t inc; @@ -177,12 +169,12 @@ kern_return_t check_actforsig(task_t task, thread_act_t thact, thread_t * thshut } thr_act = THR_ACT_NULL; - for (inc = (thread_act_t)queue_first(&task->thr_acts); - inc != (thread_act_t)&task->thr_acts; + for (inc = (thread_act_t)queue_first(&task->threads); + !queue_end(&task->threads, (queue_entry_t)inc); inc = ninc) { if (inc != thact) { - ninc = (thread_act_t)queue_next(&inc->thr_acts); + ninc = (thread_act_t)queue_next(&inc->task_threads); continue; } th = act_lock_thread(inc); @@ -198,8 +190,6 @@ kern_return_t check_actforsig(task_t task, thread_act_t thact, thread_t * thshut } out: if (found) { - if (thshut) - *thshut = thr_act? thr_act->thread: THREAD_NULL ; if (setast) act_set_astbsd(thr_act); @@ -231,41 +221,42 @@ ipc_space_t get_task_ipcspace(task_t t) int get_task_numacts(task_t t) { - return(t->thr_act_count); + return(t->thread_count); +} + +/* does this machine need 64bit register set for signal handler */ +int is_64signalregset(void) +{ + task_t t = current_task(); + if(t->taskFeatures[0] & tf64BitData) + return(1); + else + return(0); } /* - * Reset the current task's map by taking a reference - * on the new map. The old map reference is returned. + * The old map reference is returned. */ vm_map_t swap_task_map(task_t task,vm_map_t map) { + thread_act_t act = current_act(); vm_map_t old_map; - vm_map_reference(map); + if (task != act->task) + panic("swap_task_map"); + task_lock(task); old_map = task->map; - task->map = map; + act->map = task->map = map; task_unlock(task); return old_map; } -/* - * Reset the current act map. - * The caller donates us a reference to the new map - * and we donote our reference to the old map to him. - */ vm_map_t swap_act_map(thread_act_t thr_act,vm_map_t map) { - vm_map_t old_map; - - act_lock(thr_act); - old_map = thr_act->map; - thr_act->map = map; - act_unlock(thr_act); - return old_map; + panic("swap_act_map"); } /* @@ -303,36 +294,29 @@ boolean_t is_thread_idle(thread_t th) /* * */ -boolean_t is_thread_running(thread_act_t thact) +boolean_t is_thread_running(thread_t th) { - thread_t th = thact->thread; return((th->state & TH_RUN) == TH_RUN); } /* * */ -thread_shuttle_t +thread_t getshuttle_thread( - thread_act_t th) + thread_t th) { -#ifdef DEBUG - assert(th->thread); -#endif - return(th->thread); + return(th); } /* * */ -thread_act_t +thread_t getact_thread( - thread_shuttle_t th) + thread_t th) { -#ifdef DEBUG - assert(th->top_act); -#endif - return(th->top_act); + return(th); } /* @@ -370,7 +354,8 @@ get_vmsubmap_entries( int total_entries = 0; vm_map_entry_t entry; - vm_map_lock(map); + if (not_in_kdp) + vm_map_lock(map); entry = vm_map_first_entry(map); while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { entry = entry->vme_next; @@ -388,7 +373,8 @@ get_vmsubmap_entries( } entry = entry->vme_next; } - vm_map_unlock(map); + if (not_in_kdp) + vm_map_unlock(map); return(total_entries); } @@ -399,7 +385,8 @@ get_vmmap_entries( int total_entries = 0; vm_map_entry_t entry; - vm_map_lock(map); + if (not_in_kdp) + vm_map_lock(map); entry = vm_map_first_entry(map); while(entry != vm_map_to_entry(map)) { @@ -414,7 +401,8 @@ get_vmmap_entries( } entry = entry->vme_next; } - vm_map_unlock(map); + if (not_in_kdp) + vm_map_unlock(map); return(total_entries); } @@ -446,9 +434,9 @@ get_thread_userstop( */ boolean_t thread_should_abort( - thread_shuttle_t th) + thread_t th) { - return(!th->top_act || !th->top_act->active || + return(!th->top_act || (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT); } @@ -494,10 +482,10 @@ task_act_iterate_wth_args( thread_act_t inc, ninc; task_lock(task); - for (inc = (thread_act_t)queue_first(&task->thr_acts); - inc != (thread_act_t)&task->thr_acts; + for (inc = (thread_act_t)queue_first(&task->threads); + !queue_end(&task->threads, (queue_entry_t)inc); inc = ninc) { - ninc = (thread_act_t)queue_next(&inc->thr_acts); + ninc = (thread_act_t)queue_next(&inc->task_threads); (void) (*func_callback)(inc, func_arg); } task_unlock(task); @@ -512,14 +500,14 @@ ipc_port_release( boolean_t is_thread_active( - thread_shuttle_t th) + thread_t th) { return(th->active); } kern_return_t get_thread_waitresult( - thread_shuttle_t th) + thread_t th) { return(th->wait_result); } diff --git a/osfmk/kern/clock.c b/osfmk/kern/clock.c index 8f7480979..79b348c77 100644 --- a/osfmk/kern/clock.c +++ b/osfmk/kern/clock.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -56,8 +57,6 @@ #include #include -#include - /* * Exported interface */ @@ -74,12 +73,12 @@ static long alrm_seqno; /* uniquely identifies alarms */ static thread_call_data_t alarm_deliver; decl_simple_lock_data(static,calend_adjlock) -static int64_t calend_adjtotal; -static uint32_t calend_adjdelta; static timer_call_data_t calend_adjcall; static uint64_t calend_adjinterval, calend_adjdeadline; +static thread_call_data_t calend_wakecall; + /* backwards compatibility */ int hz = HZ; /* GET RID OF THIS !!! */ int tick = (1000000 / HZ); /* GET RID OF THIS !!! */ @@ -110,10 +109,15 @@ void clock_alarm_deliver( thread_call_param_t p1); static -void clock_calend_adjust( +void calend_adjust_call( timer_call_param_t p0, timer_call_param_t p1); +static +void calend_dowakeup( + thread_call_param_t p0, + thread_call_param_t p1); + /* * Macros to lock/unlock clock system. */ @@ -138,11 +142,17 @@ clock_config(void) if (cpu_number() != master_cpu) panic("clock_config"); + simple_lock_init(&ClockLock, ETAP_MISC_CLOCK); + thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL); + + simple_lock_init(&calend_adjlock, ETAP_MISC_CLOCK); + timer_call_setup(&calend_adjcall, calend_adjust_call, NULL); + + thread_call_setup(&calend_wakecall, calend_dowakeup, NULL); + /* * Configure clock devices. */ - simple_lock_init(&calend_adjlock, ETAP_MISC_CLOCK); - simple_lock_init(&ClockLock, ETAP_MISC_CLOCK); for (i = 0; i < clock_count; i++) { clock = &clock_list[i]; if (clock->cl_ops) { @@ -174,6 +184,18 @@ clock_init(void) } } +/* + * Called by machine dependent code + * to initialize areas dependent on the + * timebase value. May be called multiple + * times during start up. + */ +void +clock_timebase_init(void) +{ + sched_timebase_init(); +} + /* * Initialize the clock ipc service facility. */ @@ -183,8 +205,6 @@ clock_service_create(void) clock_t clock; register int i; - mk_timer_initialize(); - /* * Initialize ipc clock services. */ @@ -196,15 +216,12 @@ clock_service_create(void) } } - timer_call_setup(&calend_adjcall, clock_calend_adjust, NULL); - /* - * Initialize clock service alarms. + * Perform miscellaneous late + * initialization. */ i = sizeof(struct alarm); alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms"); - - thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL); } /* @@ -294,16 +311,11 @@ clock_set_time( mach_timespec_t *clock_time; kern_return_t (*settime)( mach_timespec_t *clock_time); - extern kern_return_t - calend_settime( - mach_timespec_t *clock_time); if (clock == CLOCK_NULL) return (KERN_INVALID_ARGUMENT); if ((settime = clock->cl_ops->c_settime) == 0) return (KERN_FAILURE); - if (settime == calend_settime) - return (KERN_FAILURE); clock_time = &new_time; if (BAD_MACH_TIMESPEC(clock_time)) return (KERN_INVALID_VALUE); @@ -805,15 +817,6 @@ clock_get_calendar_value(void) return value; } -void -clock_set_calendar_value( - mach_timespec_t value) -{ - clock_t clock = &clock_list[CALENDAR_CLOCK]; - - (void) (*clock->cl_ops->c_settime)(&value); -} - void clock_deadline_for_periodic_event( uint64_t interval, @@ -825,14 +828,11 @@ clock_deadline_for_periodic_event( *deadline += interval; if (*deadline <= abstime) { - *deadline = abstime; - clock_get_uptime(&abstime); - *deadline += interval; + *deadline = abstime + interval; + abstime = mach_absolute_time(); - if (*deadline <= abstime) { - *deadline = abstime; - *deadline += interval; - } + if (*deadline <= abstime) + *deadline = abstime + interval; } } @@ -888,90 +888,48 @@ mach_wait_until( return ((wait_result == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS); } -int64_t -clock_set_calendar_adjtime( - int64_t total, - uint32_t delta) +void +clock_adjtime( + int32_t *secs, + int32_t *microsecs) { - int64_t ototal; - spl_t s; + uint32_t interval; + spl_t s; s = splclock(); simple_lock(&calend_adjlock); - if (calend_adjinterval == 0) - clock_interval_to_absolutetime_interval(10000, NSEC_PER_USEC, - &calend_adjinterval); - - ototal = calend_adjtotal; - - if (total != 0) { - uint64_t abstime; - - if (total > 0) { - if (delta > total) - delta = total; - } - else { - if (delta > -total) - delta = -total; - } - - calend_adjtotal = total; - calend_adjdelta = delta; - - if (calend_adjdeadline >= calend_adjinterval) - calend_adjdeadline -= calend_adjinterval; - clock_get_uptime(&abstime); - clock_deadline_for_periodic_event(calend_adjinterval, abstime, - &calend_adjdeadline); + interval = clock_set_calendar_adjtime(secs, microsecs); + if (interval != 0) { + if (calend_adjdeadline >= interval) + calend_adjdeadline -= interval; + clock_deadline_for_periodic_event(interval, mach_absolute_time(), + &calend_adjdeadline); timer_call_enter(&calend_adjcall, calend_adjdeadline); } - else { - calend_adjtotal = 0; - + else timer_call_cancel(&calend_adjcall); - } simple_unlock(&calend_adjlock); splx(s); - - return (ototal); } static void -clock_calend_adjust( +calend_adjust_call( timer_call_param_t p0, timer_call_param_t p1) { + uint32_t interval; spl_t s; s = splclock(); simple_lock(&calend_adjlock); - if (calend_adjtotal > 0) { - clock_adjust_calendar((clock_res_t)calend_adjdelta); - calend_adjtotal -= calend_adjdelta; - - if (calend_adjdelta > calend_adjtotal) - calend_adjdelta = calend_adjtotal; - } - else - if (calend_adjtotal < 0) { - clock_adjust_calendar(-(clock_res_t)calend_adjdelta); - calend_adjtotal += calend_adjdelta; - - if (calend_adjdelta > -calend_adjtotal) - calend_adjdelta = -calend_adjtotal; - } - - if (calend_adjtotal != 0) { - uint64_t abstime; - - clock_get_uptime(&abstime); - clock_deadline_for_periodic_event(calend_adjinterval, abstime, - &calend_adjdeadline); + interval = clock_adjust_calendar(); + if (interval != 0) { + clock_deadline_for_periodic_event(interval, mach_absolute_time(), + &calend_adjdeadline); timer_call_enter(&calend_adjcall, calend_adjdeadline); } @@ -979,3 +937,19 @@ clock_calend_adjust( simple_unlock(&calend_adjlock); splx(s); } + +void +clock_wakeup_calendar(void) +{ + thread_call_enter(&calend_wakecall); +} + +static void +calend_dowakeup( + thread_call_param_t p0, + thread_call_param_t p1) +{ + void IOKitResetTime(void); + + IOKitResetTime(); +} diff --git a/osfmk/kern/clock.h b/osfmk/kern/clock.h index b96848a08..dd32f211b 100644 --- a/osfmk/kern/clock.h +++ b/osfmk/kern/clock.h @@ -132,11 +132,14 @@ typedef struct clock clock_data_t; * Configure the clock system. */ extern void clock_config(void); + /* * Initialize the clock system. */ extern void clock_init(void); +extern void clock_timebase_init(void); + /* * Initialize the clock ipc service facility. */ @@ -172,23 +175,46 @@ extern void mk_timebase_info( uint32_t *proc_to_abs_numer, uint32_t *proc_to_abs_denom); -extern void clock_adjust_calendar( - clock_res_t nsec); +extern uint32_t clock_set_calendar_adjtime( + int32_t *secs, + int32_t *microsecs); -extern mach_timespec_t - clock_get_calendar_offset(void); +extern uint32_t clock_adjust_calendar(void); #endif /* MACH_KERNEL_PRIVATE */ -extern void clock_set_calendar_value( - mach_timespec_t value); +extern void clock_get_calendar_microtime( + uint32_t *secs, + uint32_t *microsecs); + +extern void clock_get_calendar_nanotime( + uint32_t *secs, + uint32_t *nanosecs); + +extern void clock_set_calendar_microtime( + uint32_t secs, + uint32_t microsecs); -extern int64_t clock_set_calendar_adjtime( - int64_t total, - uint32_t delta); +extern void clock_get_system_microtime( + uint32_t *secs, + uint32_t *microsecs); + +extern void clock_get_system_nanotime( + uint32_t *secs, + uint32_t *nanosecs); + +extern void clock_adjtime( + int32_t *secs, + int32_t *microsecs); extern void clock_initialize_calendar(void); +extern void clock_wakeup_calendar(void); + +extern void clock_gettimeofday( + uint32_t *secs, + uint32_t *microsecs); + #endif /* __APPLE_API_PRIVATE */ #ifdef __APPLE_API_UNSTABLE diff --git a/osfmk/kern/cpu_data.h b/osfmk/kern/cpu_data.h index 9a2ffafc6..3b32a875c 100644 --- a/osfmk/kern/cpu_data.h +++ b/osfmk/kern/cpu_data.h @@ -38,18 +38,6 @@ #include #include -typedef struct -{ - thread_t active_thread; - int preemption_level; - int simple_lock_count; - int interrupt_level; -#ifdef __I386__ - int cpu_number; /* Logical CPU number */ - int cpu_phys_number; /* Physical CPU Number */ -#endif -} cpu_data_t; - #include #else /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/debug.c b/osfmk/kern/debug.c index f698c52cf..d5ddb8b1a 100644 --- a/osfmk/kern/debug.c +++ b/osfmk/kern/debug.c @@ -79,16 +79,16 @@ unsigned int active_debugger = 0; unsigned int debug_mode=0; unsigned int disableDebugOuput = TRUE; unsigned int systemLogDiags = FALSE; -unsigned int panicDebugging = FALSE; +unsigned int logPanicDataToScreen = FALSE; #ifdef __ppc__ - unsigned int logPanicDataToScreen = FALSE; + unsigned int panicDebugging = FALSE; #else - unsigned int logPanicDataToScreen = TRUE; + unsigned int panicDebugging = TRUE; #endif int mach_assert = 1; -const char *panicstr; +const char *panicstr = (char *) 0; decl_simple_lock_data(,panic_lock) int paniccpu; volatile int panicwait; @@ -148,6 +148,7 @@ panic(const char *str, ...) thread_t thread; s = splhigh(); + disable_preemption(); #ifdef __ppc__ lastTrace = LLTraceSet(0); /* Disable low-level tracing */ @@ -157,8 +158,6 @@ panic(const char *str, ...) save_waits[cpu_number()] = thread->wait_queue; /* Save the old value */ thread->wait_queue = 0; /* Clear the wait so we do not get double panics when we try locks */ - mp_disable_preemption(); - if( logPanicDataToScreen ) disableDebugOuput = FALSE; @@ -179,8 +178,6 @@ restart: nestedpanic +=1; PANIC_UNLOCK(); Debugger("double panic"); - mp_enable_preemption(); - splx(s); printf("double panic: We are hanging here...\n"); while(1); /* NOTREACHED */ @@ -208,11 +205,12 @@ restart: PANIC_LOCK(); panicstr = (char *)0; PANIC_UNLOCK(); - mp_enable_preemption(); - splx(s); thread->wait_queue = save_waits[cpu_number()]; /* Restore the wait queue */ - if (return_on_panic) + if (return_on_panic) { + enable_preemption(); + splx(s); return; + } kdb_printf("panic: We are hanging here...\n"); while(1); /* NOTREACHED */ diff --git a/osfmk/kern/debug.h b/osfmk/kern/debug.h index a2d96f047..81cf13be0 100644 --- a/osfmk/kern/debug.h +++ b/osfmk/kern/debug.h @@ -45,9 +45,9 @@ extern unsigned int current_debugger; extern unsigned int active_debugger; extern unsigned int debug_mode; -extern unsigned int disableDebugOuput; +extern unsigned int disableDebugOuput; -extern unsigned int panicDebugging; +extern unsigned int panicDebugging; extern unsigned int logPanicDataToScreen; extern int db_run_mode; @@ -85,7 +85,12 @@ extern void debug_putc(char); #define DB_ARP 0x40 #define DB_KDP_BP_DIS 0x80 #define DB_LOG_PI_SCRN 0x100 +#define DB_KDP_GETC_ENA 0x200 +#define DB_KERN_DUMP_ON_PANIC 0x400 /* Trigger core dump on panic*/ +#define DB_KERN_DUMP_ON_NMI 0x800 /* Trigger core dump on NMI */ +#define DB_DBG_POST_CORE 0x1000 /*Wait in debugger after NMI core */ +#define DB_PANICLOG_DUMP 0x2000 /* Send paniclog on panic,not core*/ #endif /* __APPLE_API_PRIVATE */ #endif /* _KERN_DEBUG_H_ */ diff --git a/osfmk/kern/exception.c b/osfmk/kern/exception.c index b6c703180..75a4df259 100644 --- a/osfmk/kern/exception.c +++ b/osfmk/kern/exception.c @@ -55,6 +55,7 @@ #include +#include #include #include #include @@ -170,7 +171,7 @@ exception_deliver( switch (behavior) { case EXCEPTION_STATE: { mach_msg_type_number_t state_cnt; - natural_t state[ THREAD_MACHINE_STATE_MAX ]; + thread_state_data_t state; c_thr_exc_raise_state++; state_cnt = state_count[flavor]; @@ -210,7 +211,7 @@ exception_deliver( case EXCEPTION_STATE_IDENTITY: { mach_msg_type_number_t state_cnt; - natural_t state[ THREAD_MACHINE_STATE_MAX ]; + thread_state_data_t state; c_thr_exc_raise_state_id++; state_cnt = state_count[flavor]; @@ -379,7 +380,7 @@ bsd_exception( switch (behavior) { case EXCEPTION_STATE: { mach_msg_type_number_t state_cnt; - natural_t state[ THREAD_MACHINE_STATE_MAX ]; + thread_state_data_t state; c_thr_exc_raise_state++; state_cnt = state_count[flavor]; @@ -418,7 +419,7 @@ bsd_exception( case EXCEPTION_STATE_IDENTITY: { mach_msg_type_number_t state_cnt; - natural_t state[ THREAD_MACHINE_STATE_MAX ]; + thread_state_data_t state; c_thr_exc_raise_state_id++; state_cnt = state_count[flavor]; @@ -452,3 +453,67 @@ bsd_exception( return(KERN_FAILURE); } + + + +/* + * Handle interface for special perfomance monitoring + * This is a special case of the host exception handler + */ + +kern_return_t sys_perf_notify(struct task *task, + exception_data_t code, + mach_msg_type_number_t codeCnt) +{ + host_priv_t hostp; + struct exception_action *excp; + thread_act_t act = current_act(); + thread_t thr = current_thread(); + ipc_port_t xport; + kern_return_t ret; + int abrt; + spl_t ints; + wait_interrupt_t wsave; + + hostp = host_priv_self(); /* Get the host privileged ports */ + excp = &hostp->exc_actions[EXC_RPC_ALERT]; /* Point to the RPC_ALERT action */ + + mutex_lock(&hostp->lock); /* Lock the priv port */ + xport = excp->port; /* Get the port for this exception */ + if (!IP_VALID(xport)) { /* Is it valid? */ + mutex_unlock(&hostp->lock); /* Unlock */ + return(KERN_FAILURE); /* Go away... */ + } + + ip_lock(xport); /* Lock the exception port */ + if (!ip_active(xport)) { /* and is it active? */ + ip_unlock(xport); /* Nope, fail */ + mutex_unlock(&hostp->lock); /* Unlock */ + return(KERN_FAILURE); /* Go away... */ + } + + if (task->itk_space == xport->data.receiver) { /* Are we trying to send to ourselves? */ + ip_unlock(xport); /* Yes, fail */ + mutex_unlock(&hostp->lock); /* Unlock */ + return(KERN_FAILURE); /* Go away... */ + } + + ip_reference(xport); /* Bump reference so it doesn't go away */ + xport->ip_srights++; /* Bump send rights */ + ip_unlock(xport); /* We can unlock it now */ + + mutex_unlock(&hostp->lock); /* All done with the lock */ + + wsave = thread_interrupt_level(THREAD_UNINT); /* Make sure we aren't aborted here */ + + ret = exception_raise(xport, /* Send the exception to the perf handler */ + retrieve_act_self_fast(act), /* Not always the dying guy */ + retrieve_task_self_fast(act->task), /* Not always the dying guy */ + EXC_RPC_ALERT, /* Unused exception type until now */ + code, codeCnt); + + (void)thread_interrupt_level(wsave); /* Restore interrupt level */ + + return(ret); /* Tell caller how it went */ +} + diff --git a/osfmk/kern/exception.h b/osfmk/kern/exception.h index a75a36c76..e7a0225b5 100644 --- a/osfmk/kern/exception.h +++ b/osfmk/kern/exception.h @@ -50,4 +50,9 @@ extern void exception( exception_data_t code, mach_msg_type_number_t codeCnt); +/* Notify system performance monitor */ +extern kern_return_t sys_perf_notify(struct task *task, + exception_data_t code, + mach_msg_type_number_t codeCnt); + #endif /* _EXCEPTION_H_ */ diff --git a/osfmk/kern/host.c b/osfmk/kern/host.c index b442135eb..09612e730 100644 --- a/osfmk/kern/host.c +++ b/osfmk/kern/host.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -70,6 +70,7 @@ #include #include #include +#include #include #include #include @@ -514,7 +515,7 @@ host_processor_info( if (machine_slot[i].is_cpu) num++; - size = (vm_size_t)round_page(num * count * sizeof(natural_t)); + size = (vm_size_t)round_page_32(num * count * sizeof(natural_t)); kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); if (kr != KERN_SUCCESS) @@ -559,44 +560,24 @@ host_processor_info( return(KERN_SUCCESS); } - -/* - * host_get_io_master - * - * Return the IO master access port for this host. - */ -kern_return_t -host_get_io_master( - host_t host, - io_master_t *io_master) -{ - if (host == HOST_NULL) - return KERN_INVALID_ARGUMENT; - *io_master = ipc_port_copy_send(realhost.io_master); - return KERN_SUCCESS; -} - -#define io_master_deallocate(x) - /* - * host_get_io_master - * - * Return the IO master access port for this host. + * Kernel interface for setting a special port. */ kern_return_t -host_set_io_master( - host_priv_t host_priv, - io_master_t io_master) +kernel_set_special_port( + host_priv_t host_priv, + int id, + ipc_port_t port) { - io_master_t old_master; - - if (host_priv == HOST_PRIV_NULL) - return KERN_INVALID_ARGUMENT; - - old_master = realhost.io_master; - realhost.io_master = io_master; - io_master_deallocate(old_master); - return KERN_SUCCESS; + ipc_port_t old_port; + + host_lock(host_priv); + old_port = host_priv->special[id]; + host_priv->special[id] = port; + host_unlock(host_priv); + if (IP_VALID(old_port)) + ipc_port_release_send(old_port); + return KERN_SUCCESS; } /* @@ -614,27 +595,20 @@ host_set_special_port( int id, ipc_port_t port) { -#if DIPC - return norma_set_special_port(host_priv, id, port); -#else - return KERN_FAILURE; -#endif + if (host_priv == HOST_PRIV_NULL || + id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT ) { + if (IP_VALID(port)) + ipc_port_release_send(port); + return KERN_INVALID_ARGUMENT; + } + + return kernel_set_special_port(host_priv, id, port); } /* * User interface for retrieving a special port. * - * When all processing is local, this call does not block. - * If processing goes remote to discover a remote UID, - * this call blocks but not indefinitely. If the remote - * node does not exist, has panic'ed, or is booting but - * hasn't yet turned on DIPC, then we expect the transport - * to return an error. - * - * This routine always returns SUCCESS, even if there's - * no resulting port. - * * Note that there is nothing to prevent a user special * port from disappearing after it has been discovered by * the caller; thus, using a special port can always result @@ -648,11 +622,40 @@ host_get_special_port( int id, ipc_port_t *portp) { + ipc_port_t port; + + if (host_priv == HOST_PRIV_NULL || + id == HOST_SECURITY_PORT ) + return KERN_INVALID_ARGUMENT; + #if DIPC - return norma_get_special_port(host_priv, node, id, portp); -#else - return KERN_FAILURE; + if (node != HOST_LOCAL_NODE) + return norma_get_special_port(host_priv, node, id, portp); #endif + + host_lock(host_priv); + port = realhost.special[id]; + *portp = ipc_port_copy_send(port); + host_unlock(host_priv); + + return KERN_SUCCESS; +} + + +/* + * host_get_io_master + * + * Return the IO master access port for this host. + */ +kern_return_t +host_get_io_master( + host_t host, + io_master_t *io_masterp) +{ + if (host == HOST_NULL) + return KERN_INVALID_ARGUMENT; + + return (host_get_io_master_port(host_priv_self(), io_masterp)); } host_t diff --git a/osfmk/kern/host.h b/osfmk/kern/host.h index bf3800d50..16fc1208d 100644 --- a/osfmk/kern/host.h +++ b/osfmk/kern/host.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -71,15 +71,13 @@ #include #include #include +#include #include struct host { decl_mutex_data(,lock) /* lock to protect exceptions */ - ipc_port_t host_self; - ipc_port_t host_priv_self; - ipc_port_t host_security_self; - ipc_port_t io_master; + ipc_port_t special[HOST_MAX_SPECIAL_PORT + 1]; struct exception_action exc_actions[EXC_TYPES_COUNT]; }; diff --git a/osfmk/kern/host_notify.c b/osfmk/kern/host_notify.c new file mode 100644 index 000000000..c18bea534 --- /dev/null +++ b/osfmk/kern/host_notify.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 16 January 2003 (debo) + * Created. + */ + +#include + +#include +#include + +#include + +#include "mach/host_notify_reply.h" + +static zone_t host_notify_zone; +decl_mutex_data(static,host_notify_lock) + +static queue_head_t host_notify_queue[HOST_NOTIFY_TYPE_MAX+1]; + +static mach_msg_id_t host_notify_replyid[HOST_NOTIFY_TYPE_MAX+1] = + { HOST_CALENDAR_CHANGED_REPLYID }; + +struct host_notify_entry { + queue_chain_t entries; + ipc_port_t port; +}; + +typedef struct host_notify_entry *host_notify_t; + +void +host_notify_init(void) +{ + int i; + + for (i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++) + queue_init(&host_notify_queue[i]); + + mutex_init(&host_notify_lock, ETAP_MISC_EVENT); + + i = sizeof (struct host_notify_entry); + host_notify_zone = + zinit(i, (4096 * i), (16 * i), "host_notify"); +} + +kern_return_t +host_request_notification( + host_t host, + host_flavor_t notify_type, + ipc_port_t port) +{ + host_notify_t entry; + + if (host == HOST_NULL) + return (KERN_INVALID_ARGUMENT); + + if (!IP_VALID(port)) + return (KERN_INVALID_CAPABILITY); + + if (notify_type > HOST_NOTIFY_TYPE_MAX || notify_type < 0) + return (KERN_INVALID_ARGUMENT); + + entry = (host_notify_t)zalloc(host_notify_zone); + if (entry == NULL) + return (KERN_RESOURCE_SHORTAGE); + + mutex_lock(&host_notify_lock); + + ip_lock(port); + if (!ip_active(port) || ip_kotype(port) != IKOT_NONE) { + ip_unlock(port); + + mutex_unlock(&host_notify_lock); + zfree(host_notify_zone, (vm_offset_t)entry); + + return (KERN_FAILURE); + } + + entry->port = port; + ipc_kobject_set_atomically(port, (ipc_kobject_t)entry, IKOT_HOST_NOTIFY); + ip_unlock(port); + + enqueue_tail(&host_notify_queue[notify_type], (queue_entry_t)entry); + mutex_unlock(&host_notify_lock); + + return (KERN_SUCCESS); +} + +void +host_notify_port_destroy( + ipc_port_t port) +{ + host_notify_t entry; + + mutex_lock(&host_notify_lock); + + ip_lock(port); + if (ip_kotype(port) == IKOT_HOST_NOTIFY) { + entry = (host_notify_t)port->ip_kobject; + assert(entry != NULL); + ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE); + ip_unlock(port); + + assert(entry->port == port); + remqueue(NULL, (queue_entry_t)entry); + mutex_unlock(&host_notify_lock); + zfree(host_notify_zone, (vm_offset_t)entry); + + ipc_port_release_sonce(port); + return; + } + ip_unlock(port); + + mutex_unlock(&host_notify_lock); +} + +static void +host_notify_all( + host_flavor_t notify_type, + mach_msg_header_t *msg, + mach_msg_size_t msg_size) +{ + queue_t notify_queue = &host_notify_queue[notify_type]; + + mutex_lock(&host_notify_lock); + + if (!queue_empty(notify_queue)) { + queue_head_t send_queue; + host_notify_t entry; + + send_queue = *notify_queue; + queue_init(notify_queue); + + send_queue.next->prev = &send_queue; + send_queue.prev->next = &send_queue; + + msg->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0); + msg->msgh_local_port = MACH_PORT_NULL; + msg->msgh_id = host_notify_replyid[notify_type]; + msg->msgh_reserved = 0; + + while ((entry = (host_notify_t)dequeue(&send_queue)) != NULL) { + ipc_port_t port; + + port = entry->port; + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_kotype(port) == IKOT_HOST_NOTIFY); + assert(port->ip_kobject == (ipc_kobject_t)entry); + ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE); + ip_unlock(port); + + mutex_unlock(&host_notify_lock); + zfree(host_notify_zone, (vm_offset_t)entry); + + msg->msgh_remote_port = port; + + (void) mach_msg_send_from_kernel(msg, msg_size); + + mutex_lock(&host_notify_lock); + } + } + + mutex_unlock(&host_notify_lock); +} + +void +host_notify_calendar_change(void) +{ + __Request__host_calendar_changed_t msg; + + host_notify_all(HOST_NOTIFY_CALENDAR_CHANGE, &msg.Head, sizeof (msg)); +} diff --git a/osfmk/kern/host_notify.h b/osfmk/kern/host_notify.h new file mode 100644 index 000000000..6b99ac916 --- /dev/null +++ b/osfmk/kern/host_notify.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 14 January 2003 (debo) + * Created. + */ + +#ifndef _KERN_HOST_NOTIFY_H_ +#define _KERN_HOST_NOTIFY_H_ + +#ifdef MACH_KERNEL_PRIVATE +#include + +void host_notify_port_destroy( + ipc_port_t port); + +void host_notify_calendar_change(void); + +void host_notify_init(void); + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_HOST_NOTIFY_H_ */ diff --git a/osfmk/kern/ipc_host.c b/osfmk/kern/ipc_host.c index 23d830ffd..58c2dc6bc 100644 --- a/osfmk/kern/ipc_host.c +++ b/osfmk/kern/ipc_host.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -108,24 +108,27 @@ void ipc_host_init(void) if (port == IP_NULL) panic("ipc_host_init"); - ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST); - realhost.host_self = port; + ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_SECURITY); + kernel_set_special_port(&realhost, HOST_SECURITY_PORT, + ipc_port_make_send(port)); port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); - ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV); - realhost.host_priv_self = port; + ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST); + kernel_set_special_port(&realhost, HOST_PORT, + ipc_port_make_send(port)); port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); - ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_SECURITY); - realhost.host_security_self = port; + ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV); + kernel_set_special_port(&realhost, HOST_PRIV_PORT, + ipc_port_make_send(port)); - realhost.io_master = IP_NULL; + /* the rest of the special ports will be set up later */ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { realhost.exc_actions[i].port = IP_NULL; @@ -504,8 +507,7 @@ convert_host_to_port( { ipc_port_t port; - port = ipc_port_make_send(host->host_self); - + host_get_host_port(host, &port); return port; } diff --git a/osfmk/kern/ipc_kobject.c b/osfmk/kern/ipc_kobject.c index 06898cc5f..3c9304e5a 100644 --- a/osfmk/kern/ipc_kobject.c +++ b/osfmk/kern/ipc_kobject.c @@ -77,8 +77,9 @@ #include #include #include -#include +#include #include +#include #include #include #include @@ -492,7 +493,11 @@ ipc_kobject_destroy( mach_destroy_memory_entry(port); break; - default: /* XXX (bogon) */ + case IKOT_HOST_NOTIFY: + host_notify_port_destroy(port); + break; + + default: break; } } diff --git a/osfmk/kern/ipc_kobject.h b/osfmk/kern/ipc_kobject.h index a3595a6a2..fbd49c435 100644 --- a/osfmk/kern/ipc_kobject.h +++ b/osfmk/kern/ipc_kobject.h @@ -94,7 +94,7 @@ typedef natural_t ipc_kobject_type_t; #define IKOT_XMM_KERNEL 13 #define IKOT_XMM_REPLY 14 #define IKOT_UND_REPLY 15 -/* NOT DEFINED 16 */ +#define IKOT_HOST_NOTIFY 16 #define IKOT_HOST_SECURITY 17 #define IKOT_LEDGER 18 #define IKOT_MASTER_DEVICE 19 diff --git a/osfmk/kern/ipc_tt.c b/osfmk/kern/ipc_tt.c index 96ff6602b..b9ccb0cbd 100644 --- a/osfmk/kern/ipc_tt.c +++ b/osfmk/kern/ipc_tt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -59,6 +59,7 @@ * Task and thread related IPC functions. */ +#include #include #include #include @@ -113,14 +114,21 @@ ipc_task_init( task->itk_self = kport; task->itk_sself = ipc_port_make_send(kport); task->itk_space = space; - space->is_fast = task->kernel_loaded; + space->is_fast = FALSE; if (parent == TASK_NULL) { + ipc_port_t port; + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { task->exc_actions[i].port = IP_NULL; }/* for */ - task->itk_host = ipc_port_make_send(realhost.host_self); + + kr = host_get_host_port(host_priv_self(), &port); + assert(kr == KERN_SUCCESS); + task->itk_host = port; + task->itk_bootstrap = IP_NULL; + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = IP_NULL; } else { @@ -250,6 +258,74 @@ ipc_task_terminate( ipc_port_dealloc_kernel(kport); } +/* + * Routine: ipc_task_reset + * Purpose: + * Reset a task's IPC state to protect it when + * it enters an elevated security context. + * Conditions: + * Nothing locked. The task must be suspended. + * (Or the current thread must be in the task.) + */ + +void +ipc_task_reset( + task_t task) +{ + ipc_port_t old_kport, new_kport; + ipc_port_t old_sself; +#if 0 + ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; + int i; +#endif + + new_kport = ipc_port_alloc_kernel(); + if (new_kport == IP_NULL) + panic("ipc_task_reset"); + + itk_lock(task); + + old_kport = task->itk_self; + + if (old_kport == IP_NULL) { + /* the task is already terminated (can this happen?) */ + itk_unlock(task); + ipc_port_dealloc_kernel(new_kport); + return; + } + + task->itk_self = new_kport; + old_sself = task->itk_sself; + task->itk_sself = ipc_port_make_send(new_kport); + ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); + ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK); + +#if 0 + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + old_exc_actions[i] = task->exc_action[i].port; + task->exc_actions[i].port = IP_NULL; + }/* for */ +#endif + + itk_unlock(task); + + /* release the naked send rights */ + + if (IP_VALID(old_sself)) + ipc_port_release_send(old_sself); + +#if 0 + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (IP_VALID(old_exc_actions[i])) { + ipc_port_release_send(old_exc_actions[i]); + } + }/* for */ +#endif + + /* destroy the kernel port */ + ipc_port_dealloc_kernel(old_kport); +} + /* * Routine: ipc_thread_init * Purpose: @@ -1091,16 +1167,15 @@ ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act ) assert(thr_act != THR_ACT_NULL); /* - * Normal lock ordering is act_lock(), then ip_lock(). - * Allow out-of-order locking here, using - * act_reference_act_locked() to accomodate it. + * Out of order locking here, normal + * ordering is act_lock(), then ip_lock(). */ if (!act_lock_try(thr_act)) { ip_unlock(port); mutex_pause(); return (FALSE); } - act_locked_act_reference(thr_act); + act_reference_locked(thr_act); act_unlock(thr_act); } *pthr_act = thr_act; diff --git a/osfmk/kern/kalloc.c b/osfmk/kern/kalloc.c index 4f21c6008..7a135a654 100644 --- a/osfmk/kern/kalloc.c +++ b/osfmk/kern/kalloc.c @@ -438,8 +438,8 @@ krealloc( /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */ if (old_size >= kalloc_max_prerounded) { - old_size = round_page(old_size); - new_size = round_page(new_size); + old_size = round_page_32(old_size); + new_size = round_page_32(new_size); if (new_size > old_size) { if (kmem_realloc(kalloc_map, *addrp, old_size, &naddr, diff --git a/osfmk/kern/kern_types.h b/osfmk/kern/kern_types.h index 34d5ed68f..0160c1754 100644 --- a/osfmk/kern/kern_types.h +++ b/osfmk/kern/kern_types.h @@ -119,17 +119,16 @@ typedef struct mig_object *mig_object_t; typedef struct mig_notify *mig_notify_t; #define MIG_NOTIFY_NULL ((mig_notify_t) 0) -typedef boolean_t (*thread_roust_t)(wait_result_t); /* how to roust it */ +typedef boolean_t (*thread_roust_t)(thread_t, wait_result_t); #define THREAD_ROUST_NULL ((thread_roust_t) 0) #endif /* __APPLE_API_EVOLVING */ #ifdef __APPLE_API_UNSTABLE -typedef struct thread_shuttle *thread_shuttle_t; -#define THREAD_SHUTTLE_NULL ((thread_shuttle_t)0) - /* legacy definitions - going away */ +typedef struct thread *thread_shuttle_t; +#define THREAD_SHUTTLE_NULL ((thread_shuttle_t)0) struct wait_queue_sub ; typedef struct wait_queue_sub *wait_queue_sub_t; #define WAIT_QUEUE_SUB_NULL ((wait_queue_sub_t)0) diff --git a/osfmk/kern/kmod.c b/osfmk/kern/kmod.c index 568896096..b2c5baecc 100644 --- a/osfmk/kern/kmod.c +++ b/osfmk/kern/kmod.c @@ -226,10 +226,14 @@ kmod_send_generic(int type, void *generic_data, int size) return kmod_queue_cmd((vm_address_t)data, size + sizeof(int)); } +extern vm_offset_t sectPRELINKB; +extern int sectSizePRELINK; + kern_return_t kmod_create_internal(kmod_info_t *info, kmod_t *id) { kern_return_t rc; + boolean_t isPrelink; if (!info) return KERN_INVALID_ADDRESS; @@ -238,10 +242,13 @@ kmod_create_internal(kmod_info_t *info, kmod_t *id) return KERN_INVALID_ADDRESS; } - rc = vm_map_wire(kernel_map, info->address + info->hdr_size, - info->address + info->size, VM_PROT_DEFAULT, FALSE); - if (rc != KERN_SUCCESS) { - return rc; + isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK))); + if (!isPrelink) { + rc = vm_map_wire(kernel_map, info->address + info->hdr_size, + info->address + info->size, VM_PROT_DEFAULT, FALSE); + if (rc != KERN_SUCCESS) { + return rc; + } } #if WRITE_PROTECT_MODULE_TEXT { @@ -253,16 +260,18 @@ kmod_create_internal(kmod_info_t *info, kmod_t *id) VM_PROT_READ|VM_PROT_EXECUTE, TRUE); } } -#endif +#endif /* WRITE_PROTECT_MODULE_TEXT */ simple_lock(&kmod_lock); // check to see if already loaded if (kmod_lookupbyname(info->name)) { simple_unlock(&kmod_lock); - rc = vm_map_unwire(kernel_map, info->address + info->hdr_size, - info->address + info->size, FALSE); - assert(rc == KERN_SUCCESS); + if (!isPrelink) { + rc = vm_map_unwire(kernel_map, info->address + info->hdr_size, + info->address + info->size, FALSE); + assert(rc == KERN_SUCCESS); + } return KERN_INVALID_ARGUMENT; } @@ -279,7 +288,7 @@ kmod_create_internal(kmod_info_t *info, kmod_t *id) #if DEBUG printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n", info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size); -#endif DEBUG +#endif /* DEBUG */ return KERN_SUCCESS; } @@ -375,15 +384,25 @@ kmod_destroy_internal(kmod_t id) #if DEBUG printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n", k->name, k->id, k->size / PAGE_SIZE, k->address); -#endif DEBUG - - rc = vm_map_unwire(kernel_map, k->address + k->hdr_size, - k->address + k->size, FALSE); - assert(rc == KERN_SUCCESS); - - rc = vm_deallocate(kernel_map, k->address, k->size); - assert(rc == KERN_SUCCESS); - +#endif /* DEBUG */ + + if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK))) + { + vm_offset_t + virt = ml_static_ptovirt(k->address); + if( virt) { + ml_static_mfree( virt, k->size); + } + } + else + { + rc = vm_map_unwire(kernel_map, k->address + k->hdr_size, + k->address + k->size, FALSE); + assert(rc == KERN_SUCCESS); + + rc = vm_deallocate(kernel_map, k->address, k->size); + assert(rc == KERN_SUCCESS); + } return KERN_SUCCESS; } p = k; @@ -791,7 +810,7 @@ kmod_dump(vm_offset_t *addr, unsigned int cnt) if (!k->address) { continue; // skip fake entries for built-in kernel components } - if (pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { + if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) { kdb_printf(" kmod scan stopped due to missing " "kmod page: %08x\n", stop_kmod); break; @@ -811,7 +830,7 @@ kmod_dump(vm_offset_t *addr, unsigned int cnt) for (r = k->reference_list; r; r = r->next) { kmod_info_t * rinfo; - if (pmap_extract(kernel_pmap, (vm_offset_t)r) == 0) { + if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) { kdb_printf(" kmod dependency scan stopped " "due to missing dependency page: %08x\n", r); break; @@ -823,7 +842,7 @@ kmod_dump(vm_offset_t *addr, unsigned int cnt) continue; // skip fake entries for built-ins } - if (pmap_extract(kernel_pmap, (vm_offset_t)rinfo) == 0) { + if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) { kdb_printf(" kmod dependency scan stopped " "due to missing kmod page: %08x\n", rinfo); break; diff --git a/osfmk/kern/lock.c b/osfmk/kern/lock.c index 4883f2bef..ef600c40f 100644 --- a/osfmk/kern/lock.c +++ b/osfmk/kern/lock.c @@ -81,7 +81,6 @@ #ifdef __ppc__ #include -#include #endif #include @@ -216,9 +215,13 @@ usimple_lock_init( usimple_lock_t l, etap_event_t event) { +#ifndef MACHINE_SIMPLE_LOCK USLDBG(usld_lock_init(l, event)); ETAPCALL(etap_simplelock_init((l),(event))); hw_lock_init(&l->interlock); +#else + simple_lock_init((simple_lock_t)l,event); +#endif } @@ -233,6 +236,7 @@ void usimple_lock( usimple_lock_t l) { +#ifndef MACHINE_SIMPLE_LOCK int i; pc_t pc; #if ETAP_LOCK_TRACE @@ -254,6 +258,9 @@ usimple_lock( ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time)); USLDBG(usld_lock_post(l, pc)); +#else + simple_lock((simple_lock_t)l); +#endif } @@ -268,6 +275,7 @@ void usimple_unlock( usimple_lock_t l) { +#ifndef MACHINE_SIMPLE_LOCK pc_t pc; // checkNMI(); /* (TEST/DEBUG) */ @@ -275,7 +283,13 @@ usimple_unlock( OBTAIN_PC(pc, l); USLDBG(usld_unlock(l, pc)); ETAPCALL(etap_simplelock_unlock(l)); +#ifdef __ppc__ + sync(); +#endif hw_lock_unlock(&l->interlock); +#else + simple_unlock_rwmb((simple_lock_t)l); +#endif } @@ -295,6 +309,7 @@ unsigned int usimple_lock_try( usimple_lock_t l) { +#ifndef MACHINE_SIMPLE_LOCK pc_t pc; unsigned int success; etap_time_t zero_time; @@ -307,6 +322,9 @@ usimple_lock_try( ETAPCALL(etap_simplelock_hold(l, pc, zero_time)); } return success; +#else + return(simple_lock_try((simple_lock_t)l)); +#endif } #if ETAP_LOCK_TRACE @@ -1702,14 +1720,14 @@ mutex_free( void mutex_lock_wait ( mutex_t *mutex, - thread_act_t holder) + thread_t holder) { - thread_t thread, self = current_thread(); + thread_t self = current_thread(); #if !defined(i386) integer_t priority; spl_t s = splsched(); - priority = self->last_processor->current_pri; + priority = self->sched_pri; if (priority < self->priority) priority = self->priority; if (priority > MINPRI_KERNEL) @@ -1718,23 +1736,22 @@ mutex_lock_wait ( if (priority < BASEPRI_DEFAULT) priority = BASEPRI_DEFAULT; - thread = holder->thread; - assert(thread->top_act == holder); /* XXX */ - thread_lock(thread); + assert(holder->thread == holder); /* XXX */ + thread_lock(holder); if (mutex->promoted_pri == 0) - thread->promotions++; - if (thread->priority < MINPRI_KERNEL) { - thread->sched_mode |= TH_MODE_PROMOTED; + holder->promotions++; + if (holder->priority < MINPRI_KERNEL) { + holder->sched_mode |= TH_MODE_PROMOTED; if ( mutex->promoted_pri < priority && - thread->sched_pri < priority ) { + holder->sched_pri < priority ) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE, - thread->sched_pri, priority, (int)thread, (int)mutex, 0); + holder->sched_pri, priority, (int)holder, (int)mutex, 0); - set_sched_pri(thread, priority); + set_sched_pri(holder, priority); } } - thread_unlock(thread); + thread_unlock(holder); splx(s); if (mutex->promoted_pri < priority) @@ -1817,7 +1834,7 @@ mutex_lock_acquire( void mutex_unlock_wakeup ( mutex_t *mutex, - thread_act_t holder) + thread_t holder) { #if !defined(i386) thread_t thread = current_thread(); @@ -1860,6 +1877,79 @@ mutex_unlock_wakeup ( thread_wakeup_one(mutex); } +boolean_t +mutex_preblock_wait( + mutex_t *mutex, + thread_t thread, + thread_t holder) +{ + wait_result_t wresult; + integer_t priority; + wait_queue_t wq; + + assert(holder == NULL || holder->thread == holder); + + wq = wait_event_wait_queue((event_t)mutex); + if (!wait_queue_lock_try(wq)) + return (FALSE); + + if (holder != NULL && !thread_lock_try(holder)) { + wait_queue_unlock(wq); + return (FALSE); + } + + wresult = wait_queue_assert_wait64_locked(wq, (uint32_t)mutex, + THREAD_UNINT, thread); + wait_queue_unlock(wq); + assert(wresult == THREAD_WAITING); + + priority = thread->sched_pri; + if (priority < thread->priority) + priority = thread->priority; + if (priority > MINPRI_KERNEL) + priority = MINPRI_KERNEL; + else + if (priority < BASEPRI_DEFAULT) + priority = BASEPRI_DEFAULT; + + if (holder != NULL) { + if (mutex->promoted_pri == 0) + holder->promotions++; + if (holder->priority < MINPRI_KERNEL) { + holder->sched_mode |= TH_MODE_PROMOTED; + if ( mutex->promoted_pri < priority && + holder->sched_pri < priority ) { + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE, + holder->sched_pri, priority, + (int)holder, (int)mutex, 0); + + set_sched_pri(holder, priority); + } + } + thread_unlock(holder); + } + + if (mutex->promoted_pri < priority) + mutex->promoted_pri = priority; + + if (thread->pending_promoter[thread->pending_promoter_index] == NULL) { + thread->pending_promoter[thread->pending_promoter_index] = mutex; + mutex->waiters++; + } + else + if (thread->pending_promoter[thread->pending_promoter_index] != mutex) { + thread->pending_promoter[++thread->pending_promoter_index] = mutex; + mutex->waiters++; + } + + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_PREBLOCK_MUTEX) | DBG_FUNC_NONE, + (int)thread, thread->sched_pri, (int)mutex, 0, 0); + + return (TRUE); +} + /* * mutex_pause: Called by former callers of simple_lock_pause(). */ diff --git a/osfmk/kern/lock.h b/osfmk/kern/lock.h index 6ffdea90d..63e27bc15 100644 --- a/osfmk/kern/lock.h +++ b/osfmk/kern/lock.h @@ -156,15 +156,36 @@ typedef struct { #define decl_mutex_data(class,name) class mutex_t name; #define mutex_addr(m) (&(m)) -extern void mutex_init (mutex_t*, etap_event_t); -extern void mutex_lock_wait (mutex_t *, thread_act_t); -extern int mutex_lock_acquire (mutex_t *); -extern void mutex_unlock_wakeup (mutex_t*, thread_act_t); -extern void interlock_unlock (hw_lock_t); +extern void mutex_init( + mutex_t *mutex, + etap_event_t tag); + +extern void mutex_lock_wait( + mutex_t *mutex, + thread_t holder); + +extern int mutex_lock_acquire( + mutex_t *mutex); + +extern void mutex_unlock_wakeup( + mutex_t *mutex, + thread_t holder); + +extern boolean_t mutex_preblock( + mutex_t *mutex, + thread_t thread); + +extern boolean_t mutex_preblock_wait( + mutex_t *mutex, + thread_t thread, + thread_t holder); + +extern void interlock_unlock( + hw_lock_t lock); #endif /* MACH_KERNEL_PRIVATE */ -extern void mutex_pause (void); +extern void mutex_pause(void); #endif /* __APPLE_API_PRIVATE */ @@ -174,11 +195,20 @@ typedef struct __mutex__ mutex_t; #endif /* MACH_KERNEL_PRIVATE */ -extern mutex_t *mutex_alloc (etap_event_t); -extern void mutex_free (mutex_t*); -extern void mutex_lock (mutex_t*); -extern void mutex_unlock (mutex_t*); -extern boolean_t mutex_try (mutex_t*); +extern mutex_t *mutex_alloc( + etap_event_t tag); + +extern void mutex_free( + mutex_t *mutex); + +extern void mutex_lock( + mutex_t *mutex); + +extern void mutex_unlock( + mutex_t *mutex); + +extern boolean_t mutex_try( + mutex_t *mutex); #ifdef __APPLE_API_PRIVATE diff --git a/osfmk/kern/mach_clock.c b/osfmk/kern/mach_clock.c index c0b234c63..accc3fe3d 100644 --- a/osfmk/kern/mach_clock.c +++ b/osfmk/kern/mach_clock.c @@ -174,18 +174,14 @@ hertz_tick( #endif } else { - switch(processor_ptr[my_cpu]->state) { + TICKBUMP(&thread->system_timer); - case PROCESSOR_IDLE: - TICKBUMP(&thread->system_timer); + state = processor_ptr[my_cpu]->state; + if ( state == PROCESSOR_IDLE || + state == PROCESSOR_DISPATCHING ) state = CPU_STATE_IDLE; - break; - - default: - TICKBUMP(&thread->system_timer); + else state = CPU_STATE_SYSTEM; - break; - } #if GPROF if (pv->active) { if (state == CPU_STATE_SYSTEM) diff --git a/osfmk/kern/mach_factor.c b/osfmk/kern/mach_factor.c index 47a8b3e25..77671e043 100644 --- a/osfmk/kern/mach_factor.c +++ b/osfmk/kern/mach_factor.c @@ -94,46 +94,49 @@ static uint32_t fract[3] = { void compute_mach_factor(void) { - register processor_set_t pset; + register processor_set_t pset = &default_pset; register int ncpus; - register int nthreads; + register int nthreads, nshared; register uint32_t factor_now = 0; register uint32_t average_now = 0; register uint32_t load_now = 0; - pset = &default_pset; if ((ncpus = pset->processor_count) > 0) { /* - * Number of threads running in pset. + * Retrieve thread counts. */ nthreads = pset->run_count; + nshared = pset->share_count; /* - * The current thread (running this calculation) - * doesn't count; it's always in the default pset. + * Don't include the current thread. */ - if (pset == &default_pset) - nthreads -= 1; - - if (nthreads > ncpus) { - factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1); - load_now = (nthreads << SCHED_SHIFT) / ncpus; - } - else - factor_now = (ncpus - nthreads) * LOAD_SCALE; + nthreads -= 1; /* * Load average and mach factor calculations for - * those that ask about these things. + * those which ask about these things. */ average_now = nthreads * LOAD_SCALE; + if (nthreads > ncpus) + factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1); + else + factor_now = (ncpus - nthreads) * LOAD_SCALE; + pset->mach_factor = ((pset->mach_factor << 2) + factor_now) / 5; pset->load_average = ((pset->load_average << 2) + average_now) / 5; /* - * sched_load is used by the timesharing algorithm. + * Compute the load factor used by the timesharing + * algorithm. */ + if (nshared > nthreads) + nshared = nthreads; + + if (nshared > ncpus) + load_now = (nshared << SCHED_SHIFT) / ncpus; + pset->sched_load = (pset->sched_load + load_now) >> 1; } else { diff --git a/osfmk/kern/mach_param.h b/osfmk/kern/mach_param.h index 6451b3769..ff7e09321 100644 --- a/osfmk/kern/mach_param.h +++ b/osfmk/kern/mach_param.h @@ -69,17 +69,11 @@ #ifdef __APPLE_API_PRIVATE #ifdef __APPLE_API_EVOLVING -#define THREAD_MAX 1024 /* Max number of threads */ +#define THREAD_MAX 2560 /* Max number of threads */ #define THREAD_CHUNK 64 /* Allocation chunk */ #define TASK_MAX 1024 /* Max number of tasks */ -#define TASK_CHUNK 64 /* Allocation chunk */ - -#define ACT_MAX 1024 /* Max number of acts */ -#define ACT_CHUNK 64 /* Allocation chunk */ - -#define THREAD_POOL_MAX 1024 /* Max number of thread_pools */ -#define THREAD_POOL_CHUNK 64 /* Allocation chunk */ +#define TASK_CHUNK 64 /* Allocation chunk */ #define PORT_MAX ((TASK_MAX * 3 + THREAD_MAX) /* kernel */ \ + (THREAD_MAX * 2) /* user */ \ diff --git a/osfmk/kern/machine.c b/osfmk/kern/machine.c index 20f13346e..33ff1338e 100644 --- a/osfmk/kern/machine.c +++ b/osfmk/kern/machine.c @@ -92,22 +92,9 @@ struct machine_info machine_info; struct machine_slot machine_slot[NCPUS]; -static queue_head_t processor_action_queue; -static boolean_t processor_action_active; -static thread_call_t processor_action_call; -static thread_call_data_t processor_action_call_data; -decl_simple_lock_data(static,processor_action_lock) - thread_t machine_wake_thread; /* Forwards */ -processor_set_t processor_request_action( - processor_t processor, - processor_set_t new_pset); - -void processor_doaction( - processor_t processor); - void processor_doshutdown( processor_t processor); @@ -125,13 +112,6 @@ cpu_up( processor_set_t pset = &default_pset; struct machine_slot *ms; spl_t s; - - /* - * Just twiddle our thumbs; we've got nothing better to do - * yet, anyway. - */ - while (!simple_lock_try(&pset->processors_lock)) - continue; s = splsched(); processor_lock(processor); @@ -139,15 +119,14 @@ cpu_up( ms = &machine_slot[cpu]; ms->running = TRUE; machine_info.avail_cpus++; - pset_add_processor(pset, processor); simple_lock(&pset->sched_lock); + pset_add_processor(pset, processor); enqueue_tail(&pset->active_queue, (queue_entry_t)processor); + processor->deadline = UINT64_MAX; processor->state = PROCESSOR_RUNNING; simple_unlock(&pset->sched_lock); processor_unlock(processor); splx(s); - - simple_unlock(&pset->processors_lock); } /* @@ -174,7 +153,6 @@ cpu_down( /* * processor has already been removed from pset. */ - processor->processor_set_next = PROCESSOR_SET_NULL; processor->state = PROCESSOR_OFF_LINE; processor_unlock(processor); splx(s); @@ -182,7 +160,7 @@ cpu_down( kern_return_t host_reboot( - host_priv_t host_priv, + host_priv_t host_priv, int options) { if (host_priv == HOST_PRIV_NULL) @@ -192,97 +170,12 @@ host_reboot( if (options & HOST_REBOOT_DEBUGGER) { Debugger("Debugger"); - } - else - halt_all_cpus(!(options & HOST_REBOOT_HALT)); - - return (KERN_SUCCESS); -} - -/* - * processor_request_action: - * - * Common internals of processor_assign and processor_shutdown. - * If new_pset is null, this is a shutdown, else it's an assign - * and caller must donate a reference. - * For assign operations, it returns an old pset that must be deallocated - * if it's not NULL. - * For shutdown operations, it always returns PROCESSOR_SET_NULL. - */ -processor_set_t -processor_request_action( - processor_t processor, - processor_set_t new_pset) -{ - processor_set_t pset, old_pset; - - /* - * Processor must be in a processor set. Must lock its idle lock to - * get at processor state. - */ - pset = processor->processor_set; - simple_lock(&pset->sched_lock); - - /* - * If the processor is dispatching, let it finish - it will set its - * state to running very soon. - */ - while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) { - simple_unlock(&pset->sched_lock); - - simple_lock(&pset->sched_lock); - } - - assert( processor->state == PROCESSOR_IDLE || - processor->state == PROCESSOR_RUNNING || - processor->state == PROCESSOR_ASSIGN ); - - /* - * Now lock the action queue and do the dirty work. - */ - simple_lock(&processor_action_lock); - - if (processor->state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; - } - else - if (processor->state == PROCESSOR_RUNNING) - remqueue(&pset->active_queue, (queue_entry_t)processor); - - if (processor->state != PROCESSOR_ASSIGN) - enqueue_tail(&processor_action_queue, (queue_entry_t)processor); - - /* - * And ask the action_thread to do the work. - */ - if (new_pset != PROCESSOR_SET_NULL) { - processor->state = PROCESSOR_ASSIGN; - old_pset = processor->processor_set_next; - processor->processor_set_next = new_pset; - } - else { - processor->state = PROCESSOR_SHUTDOWN; - old_pset = PROCESSOR_SET_NULL; - } - - simple_unlock(&pset->sched_lock); - - if (processor_action_active) { - simple_unlock(&processor_action_lock); - - return (old_pset); + return (KERN_SUCCESS); } - processor_action_active = TRUE; - simple_unlock(&processor_action_lock); + halt_all_cpus(!(options & HOST_REBOOT_HALT)); - processor_unlock(processor); - - thread_call_enter(processor_action_call); - processor_lock(processor); - - return (old_pset); + return (KERN_SUCCESS); } kern_return_t @@ -297,22 +190,19 @@ processor_assign( return (KERN_FAILURE); } -/* - * processor_shutdown() queues a processor up for shutdown. - * Any assignment in progress is overriden. - */ kern_return_t processor_shutdown( - processor_t processor) + processor_t processor) { - spl_t s; + processor_set_t pset; + spl_t s; s = splsched(); processor_lock(processor); if ( processor->state == PROCESSOR_OFF_LINE || processor->state == PROCESSOR_SHUTDOWN ) { /* - * Already shutdown or being shutdown -- nothing to do. + * Success if already shutdown or being shutdown. */ processor_unlock(processor); splx(s); @@ -320,135 +210,113 @@ processor_shutdown( return (KERN_SUCCESS); } - processor_request_action(processor, PROCESSOR_SET_NULL); - - assert_wait((event_t)processor, THREAD_UNINT); - - processor_unlock(processor); - splx(s); + if (processor->state == PROCESSOR_START) { + /* + * Failure if currently being started. + */ + processor_unlock(processor); + splx(s); - thread_block(THREAD_CONTINUE_NULL); + return (KERN_FAILURE); + } - return (KERN_SUCCESS); -} + /* + * Processor must be in a processor set. Must lock the scheduling + * lock to get at the processor state. + */ + pset = processor->processor_set; + simple_lock(&pset->sched_lock); -/* - * processor_action() shuts down processors or changes their assignment. - */ -static void -_processor_action( - thread_call_param_t p0, - thread_call_param_t p1) -{ - register processor_t processor; - spl_t s; + /* + * If the processor is dispatching, let it finish - it will set its + * state to running very soon. + */ + while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) { + simple_unlock(&pset->sched_lock); + delay(1); + simple_lock(&pset->sched_lock); + } - s = splsched(); - simple_lock(&processor_action_lock); + if (processor->state == PROCESSOR_IDLE) { + remqueue(&pset->idle_queue, (queue_entry_t)processor); + pset->idle_count--; + } + else + if (processor->state == PROCESSOR_RUNNING) + remqueue(&pset->active_queue, (queue_entry_t)processor); + else + panic("processor_request_action"); - while (!queue_empty(&processor_action_queue)) { - processor = (processor_t)dequeue_head(&processor_action_queue); - simple_unlock(&processor_action_lock); - splx(s); + processor->state = PROCESSOR_SHUTDOWN; - processor_doaction(processor); + simple_unlock(&pset->sched_lock); - s = splsched(); - simple_lock(&processor_action_lock); - } + processor_unlock(processor); - processor_action_active = FALSE; - simple_unlock(&processor_action_lock); + processor_doshutdown(processor); splx(s); -} - -void -processor_action(void) -{ - queue_init(&processor_action_queue); - simple_lock_init(&processor_action_lock, ETAP_THREAD_ACTION); - processor_action_active = FALSE; - thread_call_setup(&processor_action_call_data, _processor_action, NULL); - processor_action_call = &processor_action_call_data; + return (KERN_SUCCESS); } /* - * processor_doaction actually does the shutdown. The trick here - * is to schedule ourselves onto a cpu and then save our - * context back into the runqs before taking out the cpu. + * Called at splsched. */ void -processor_doaction( - processor_t processor) +processor_doshutdown( + processor_t processor) { - thread_t self = current_thread(); + thread_t old_thread, self = current_thread(); processor_set_t pset; - thread_t old_thread; - spl_t s; + processor_t prev; /* * Get onto the processor to shutdown */ - thread_bind(self, processor); + prev = thread_bind(self, processor); thread_block(THREAD_CONTINUE_NULL); + processor_lock(processor); pset = processor->processor_set; - simple_lock(&pset->processors_lock); + simple_lock(&pset->sched_lock); if (pset->processor_count == 1) { thread_t thread; extern void start_cpu_thread(void); - simple_unlock(&pset->processors_lock); + simple_unlock(&pset->sched_lock); + processor_unlock(processor); /* * Create the thread, and point it at the routine. */ - thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL, - start_cpu_thread, TRUE, FALSE); - - disable_preemption(); + thread = kernel_thread_create(start_cpu_thread, MAXPRI_KERNEL); - s = splsched(); thread_lock(thread); machine_wake_thread = thread; - thread_go_locked(thread, THREAD_AWAKENED); - (void)rem_runq(thread); + thread->state = TH_RUN; + pset_run_incr(thread->processor_set); thread_unlock(thread); - splx(s); - simple_lock(&pset->processors_lock); - enable_preemption(); + processor_lock(processor); + simple_lock(&pset->sched_lock); } - s = splsched(); - processor_lock(processor); - - /* - * Do shutdown, make sure we live when processor dies. - */ - if (processor->state != PROCESSOR_SHUTDOWN) { - panic("action_thread -- bad processor state"); - } + assert(processor->state == PROCESSOR_SHUTDOWN); pset_remove_processor(pset, processor); + simple_unlock(&pset->sched_lock); processor_unlock(processor); - simple_unlock(&pset->processors_lock); /* * Clean up. */ - thread_bind(self, PROCESSOR_NULL); - self->continuation = 0; + thread_bind(self, prev); old_thread = switch_to_shutdown_context(self, - processor_doshutdown, processor); + processor_offline, processor); if (processor != current_processor()) timer_call_shutdown(processor); thread_dispatch(old_thread); - thread_wakeup((event_t)processor); - splx(s); } /* @@ -457,20 +325,22 @@ processor_doaction( */ void -processor_doshutdown( +processor_offline( processor_t processor) { - register int cpu = processor->slot_num; + register thread_t old_thread = processor->active_thread; + register int cpu = processor->slot_num; timer_call_cancel(&processor->quantum_timer); - thread_dispatch(current_thread()); timer_switch(&kernel_timer[cpu]); + processor->active_thread = processor->idle_thread; + machine_thread_set_current(processor->active_thread); + thread_dispatch(old_thread); /* * OK, now exit this cpu. */ PMAP_DEACTIVATE_KERNEL(cpu); - thread_machine_set_current(processor->idle_thread); cpu_down(cpu); cpu_sleep(); panic("zombie processor"); diff --git a/osfmk/kern/machine.h b/osfmk/kern/machine.h index 4ad3e6931..41dc71973 100644 --- a/osfmk/kern/machine.h +++ b/osfmk/kern/machine.h @@ -40,14 +40,15 @@ extern thread_t machine_wake_thread; -extern void processor_action(void); - extern void cpu_down( int cpu); extern void cpu_up( int cpu); +extern void processor_offline( + processor_t processor); + /* * Must be implemented in machine dependent code. */ diff --git a/osfmk/kern/misc_protos.h b/osfmk/kern/misc_protos.h index 14770bdac..1a60cc781 100644 --- a/osfmk/kern/misc_protos.h +++ b/osfmk/kern/misc_protos.h @@ -120,6 +120,8 @@ extern integer_t sprintf(char *buf, const char *fmt, ...); extern void printf(const char *format, ...); +extern void dbugprintf(const char *format, ...); + extern void kdp_printf(const char *format, ...); extern void printf_init(void); @@ -201,4 +203,9 @@ extern boolean_t no_bootstrap_task(void); extern ipc_port_t get_root_master_device_port(void); #endif /* DIPC */ +extern kern_return_t kernel_set_special_port( + host_priv_t host_priv, + int which, + ipc_port_t port); + #endif /* _MISC_PROTOS_H_ */ diff --git a/osfmk/kern/mk_sp.c b/osfmk/kern/mk_sp.c index f976f5f51..13bdcbf3b 100644 --- a/osfmk/kern/mk_sp.c +++ b/osfmk/kern/mk_sp.c @@ -73,21 +73,22 @@ ***/ #include #include -#include void _mk_sp_thread_unblock( thread_t thread) { - thread_setrun(thread, TAIL_Q); + if (thread->state & TH_IDLE) + return; + + if (thread->sched_mode & TH_MODE_REALTIME) { + thread->realtime.deadline = mach_absolute_time(); + thread->realtime.deadline += thread->realtime.constraint; + } thread->current_quantum = 0; thread->computation_metered = 0; thread->reason = AST_NONE; - - KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE, - (int)thread, (int)thread->sched_pri, 0, 0, 0); } void @@ -99,49 +100,58 @@ _mk_sp_thread_done( /* * A running thread is being taken off a processor: */ - clock_get_uptime(&processor->last_dispatch); - if (!(old_thread->state & TH_IDLE)) { + processor->last_dispatch = mach_absolute_time(); + + if (old_thread->state & TH_IDLE) + return; + + /* + * Compute remainder of current quantum. + */ + if ( first_timeslice(processor) && + processor->quantum_end > processor->last_dispatch ) + old_thread->current_quantum = + (processor->quantum_end - processor->last_dispatch); + else + old_thread->current_quantum = 0; + + if (old_thread->sched_mode & TH_MODE_REALTIME) { /* - * Compute remainder of current quantum. + * Cancel the deadline if the thread has + * consumed the entire quantum. */ - if ( first_quantum(processor) && - processor->quantum_end > processor->last_dispatch ) - old_thread->current_quantum = - (processor->quantum_end - processor->last_dispatch); - else - old_thread->current_quantum = 0; - + if (old_thread->current_quantum == 0) { + old_thread->realtime.deadline = UINT64_MAX; + old_thread->reason |= AST_QUANTUM; + } + } + else { /* * For non-realtime threads treat a tiny * remaining quantum as an expired quantum * but include what's left next time. */ - if (!(old_thread->sched_mode & TH_MODE_REALTIME)) { - if (old_thread->current_quantum < min_std_quantum) { - old_thread->reason |= AST_QUANTUM; - old_thread->current_quantum += std_quantum; - } - } - else - if (old_thread->current_quantum == 0) - old_thread->reason |= AST_QUANTUM; - - /* - * If we are doing a direct handoff then - * give the remainder of our quantum to - * the next guy. - */ - if ((old_thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) { - new_thread->current_quantum = old_thread->current_quantum; + if (old_thread->current_quantum < min_std_quantum) { old_thread->reason |= AST_QUANTUM; - old_thread->current_quantum = 0; + old_thread->current_quantum += std_quantum; } + } - old_thread->last_switch = processor->last_dispatch; - - old_thread->computation_metered += - (old_thread->last_switch - old_thread->computation_epoch); + /* + * If we are doing a direct handoff then + * give the remainder of our quantum to + * the next guy. + */ + if ((old_thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) { + new_thread->current_quantum = old_thread->current_quantum; + old_thread->reason |= AST_QUANTUM; + old_thread->current_quantum = 0; } + + old_thread->last_switch = processor->last_dispatch; + + old_thread->computation_metered += + (old_thread->last_switch - old_thread->computation_epoch); } void @@ -153,30 +163,26 @@ _mk_sp_thread_begin( /* * The designated thread is beginning execution: */ - if (!(thread->state & TH_IDLE)) { - if (thread->current_quantum == 0) - thread->current_quantum = - (thread->sched_mode & TH_MODE_REALTIME)? - thread->realtime.computation: std_quantum; + if (thread->state & TH_IDLE) { + timer_call_cancel(&processor->quantum_timer); + processor->timeslice = 1; - processor->quantum_end = - (processor->last_dispatch + thread->current_quantum); - timer_call_enter1(&processor->quantum_timer, - thread, processor->quantum_end); + return; + } - processor->slice_quanta = - (thread->sched_mode & TH_MODE_TIMESHARE)? - processor->processor_set->set_quanta: 1; + if (thread->current_quantum == 0) + thread_quantum_init(thread); - thread->last_switch = processor->last_dispatch; + processor->quantum_end = + (processor->last_dispatch + thread->current_quantum); + timer_call_enter1(&processor->quantum_timer, + thread, processor->quantum_end); - thread->computation_epoch = thread->last_switch; - } - else { - timer_call_cancel(&processor->quantum_timer); + processor_timeslice_setup(processor, thread); - processor->slice_quanta = 1; - } + thread->last_switch = processor->last_dispatch; + + thread->computation_epoch = thread->last_switch; } void @@ -184,9 +190,12 @@ _mk_sp_thread_dispatch( thread_t thread) { if (thread->reason & AST_QUANTUM) - thread_setrun(thread, TAIL_Q); + thread_setrun(thread, SCHED_TAILQ); else - thread_setrun(thread, HEAD_Q); + if (thread->reason & AST_PREEMPT) + thread_setrun(thread, SCHED_HEADQ); + else + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); thread->reason = AST_NONE; } @@ -214,10 +223,21 @@ thread_policy_common( if ( !(thread->sched_mode & TH_MODE_REALTIME) && !(thread->safe_mode & TH_MODE_REALTIME) ) { if (!(thread->sched_mode & TH_MODE_FAILSAFE)) { - if (policy == POLICY_TIMESHARE) + integer_t oldmode = (thread->sched_mode & TH_MODE_TIMESHARE); + + if (policy == POLICY_TIMESHARE && !oldmode) { thread->sched_mode |= TH_MODE_TIMESHARE; + + if (thread->state & TH_RUN) + pset_share_incr(thread->processor_set); + } else + if (policy != POLICY_TIMESHARE && oldmode) { thread->sched_mode &= ~TH_MODE_TIMESHARE; + + if (thread->state & TH_RUN) + pset_share_decr(thread->processor_set); + } } else { if (policy == POLICY_TIMESHARE) @@ -755,13 +775,16 @@ update_priority( thread->sched_stamp >= thread->safe_release ) { if (!(thread->safe_mode & TH_MODE_TIMESHARE)) { if (thread->safe_mode & TH_MODE_REALTIME) { - thread->priority = BASEPRI_REALTIME; + thread->priority = BASEPRI_RTQUEUES; thread->sched_mode |= TH_MODE_REALTIME; } thread->sched_mode &= ~TH_MODE_TIMESHARE; + if (thread->state & TH_RUN) + pset_share_decr(thread->processor_set); + if (!(thread->sched_mode & TH_MODE_ISDEPRESSED)) set_sched_pri(thread, thread->priority); } @@ -782,10 +805,10 @@ update_priority( if (new_pri != thread->sched_pri) { run_queue_t runq; - runq = rem_runq(thread); + runq = run_queue_remove(thread); thread->sched_pri = new_pri; if (runq != RUN_QUEUE_NULL) - thread_setrun(thread, TAIL_Q); + thread_setrun(thread, SCHED_TAILQ); } } } @@ -831,7 +854,6 @@ _mk_sp_thread_switch( mach_msg_timeout_t option_time) { register thread_t self = current_thread(); - register processor_t myprocessor; int s; /* @@ -844,16 +866,26 @@ _mk_sp_thread_switch( if ( thread != THREAD_NULL && thread != self && thread->top_act == hint_act ) { + processor_t processor; + s = splsched(); thread_lock(thread); /* - * Check if the thread is in the right pset. Then - * pull it off its run queue. If it - * doesn't come, then it's not eligible. + * Check if the thread is in the right pset, + * is not bound to a different processor, + * and that realtime is not involved. + * + * Next, pull it off its run queue. If it + * doesn't come, it's not eligible. */ - if ( thread->processor_set == self->processor_set && - rem_runq(thread) != RUN_QUEUE_NULL ) { + processor = current_processor(); + if (processor->current_pri < BASEPRI_RTQUEUES && + thread->sched_pri < BASEPRI_RTQUEUES && + thread->processor_set == processor->processor_set && + (thread->bound_processor == PROCESSOR_NULL || + thread->bound_processor == processor) && + run_queue_remove(thread) != RUN_QUEUE_NULL ) { /* * Hah, got it!! */ @@ -890,29 +922,16 @@ _mk_sp_thread_switch( * highest priority thread (can easily happen with a collection * of timesharing threads). */ - mp_disable_preemption(); - myprocessor = current_processor(); - if ( option != SWITCH_OPTION_NONE || - myprocessor->processor_set->runq.count > 0 || - myprocessor->runq.count > 0 ) { - mp_enable_preemption(); - - if (option == SWITCH_OPTION_WAIT) - assert_wait_timeout(option_time, THREAD_ABORTSAFE); - else - if (option == SWITCH_OPTION_DEPRESS) - _mk_sp_thread_depress_ms(option_time); + if (option == SWITCH_OPTION_WAIT) + assert_wait_timeout(option_time, THREAD_ABORTSAFE); + else + if (option == SWITCH_OPTION_DEPRESS) + _mk_sp_thread_depress_ms(option_time); - self->saved.swtch.option = option; + self->saved.swtch.option = option; - thread_block_reason(_mk_sp_thread_switch_continue, - (option == SWITCH_OPTION_DEPRESS)? - AST_YIELD: AST_NONE); - } - else - mp_enable_preemption(); + thread_block_reason(_mk_sp_thread_switch_continue, AST_YIELD); -out: if (option == SWITCH_OPTION_WAIT) thread_cancel_timer(); else @@ -935,7 +954,6 @@ _mk_sp_thread_depress_abstime( spl_t s; s = splsched(); - wake_lock(self); thread_lock(self); if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) { processor_t myprocessor = self->last_processor; @@ -944,7 +962,6 @@ _mk_sp_thread_depress_abstime( myprocessor->current_pri = self->sched_pri; self->sched_mode &= ~TH_MODE_PREEMPT; self->sched_mode |= TH_MODE_DEPRESS; - thread_unlock(self); if (interval != 0) { clock_absolutetime_interval_to_deadline(interval, &deadline); @@ -952,9 +969,7 @@ _mk_sp_thread_depress_abstime( self->depress_timer_active++; } } - else - thread_unlock(self); - wake_unlock(self); + thread_unlock(self); splx(s); } @@ -981,17 +996,12 @@ thread_depress_expire( spl_t s; s = splsched(); - wake_lock(thread); + thread_lock(thread); if (--thread->depress_timer_active == 1) { - thread_lock(thread); thread->sched_mode &= ~TH_MODE_ISDEPRESSED; compute_priority(thread, FALSE); - thread_unlock(thread); } - else - if (thread->depress_timer_active == 0) - thread_wakeup_one(&thread->depress_timer_active); - wake_unlock(thread); + thread_unlock(thread); splx(s); } @@ -1007,7 +1017,6 @@ _mk_sp_thread_depress_abort( spl_t s; s = splsched(); - wake_lock(thread); thread_lock(thread); if (abortall || !(thread->sched_mode & TH_MODE_POLLDEPRESS)) { if (thread->sched_mode & TH_MODE_ISDEPRESSED) { @@ -1016,14 +1025,10 @@ _mk_sp_thread_depress_abort( result = KERN_SUCCESS; } - thread_unlock(thread); - if (timer_call_cancel(&thread->depress_timer)) thread->depress_timer_active--; } - else - thread_unlock(thread); - wake_unlock(thread); + thread_unlock(thread); splx(s); return (result); @@ -1041,16 +1046,15 @@ _mk_sp_thread_perhaps_yield( if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) { extern uint64_t max_poll_computation; extern int sched_poll_yield_shift; - uint64_t abstime, total_computation; + uint64_t total_computation, abstime; - clock_get_uptime(&abstime); + abstime = mach_absolute_time(); total_computation = abstime - self->computation_epoch; total_computation += self->computation_metered; if (total_computation >= max_poll_computation) { processor_t myprocessor = current_processor(); ast_t preempt; - wake_lock(self); thread_lock(self); if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) { self->sched_pri = DEPRESSPRI; @@ -1060,12 +1064,11 @@ _mk_sp_thread_perhaps_yield( self->computation_epoch = abstime; self->computation_metered = 0; self->sched_mode |= TH_MODE_POLLDEPRESS; - thread_unlock(self); abstime += (total_computation >> sched_poll_yield_shift); if (!timer_call_enter(&self->depress_timer, abstime)) self->depress_timer_active++; - wake_unlock(self); + thread_unlock(self); if ((preempt = csw_check(self, myprocessor)) != AST_NONE) ast_on(preempt); diff --git a/osfmk/kern/mk_timer.c b/osfmk/kern/mk_timer.c index da3c92583..df1d3fa70 100644 --- a/osfmk/kern/mk_timer.c +++ b/osfmk/kern/mk_timer.c @@ -125,7 +125,7 @@ mk_timer_port_destroy( } void -mk_timer_initialize(void) +mk_timer_init(void) { int s = sizeof (mk_timer_data_t); @@ -139,12 +139,9 @@ mk_timer_expire( void *p0, void *p1) { - uint64_t time_of_posting; mk_timer_t timer = p0; ipc_port_t port; - clock_get_uptime(&time_of_posting); - simple_lock(&timer->lock); if (timer->active > 1) { @@ -155,17 +152,12 @@ mk_timer_expire( port = timer->port; assert(port != IP_NULL); + assert(timer->active == 1); - while ( timer->is_armed && - !thread_call_is_delayed(&timer->call_entry, NULL) ) { + while (timer->is_armed && timer->active == 1) { mk_timer_expire_msg_t msg; timer->is_armed = FALSE; - - msg.time_of_arming = timer->time_of_arming; - msg.armed_time = timer->call_entry.deadline; - msg.time_of_posting = time_of_posting; - simple_unlock(&timer->lock); msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); @@ -173,6 +165,8 @@ mk_timer_expire( msg.header.msgh_local_port = MACH_PORT_NULL; msg.header.msgh_reserved = msg.header.msgh_id = 0; + msg.unused[0] = msg.unused[1] = msg.unused[2] = 0; + (void) mach_msg_send_from_kernel(&msg.header, sizeof (msg)); simple_lock(&timer->lock); @@ -237,11 +231,14 @@ mk_timer_arm( assert(timer->port == port); ip_unlock(port); - timer->time_of_arming = time_of_arming; - timer->is_armed = TRUE; + if (!timer->is_dead) { + timer->time_of_arming = time_of_arming; + timer->is_armed = TRUE; + + if (!thread_call_enter_delayed(&timer->call_entry, expire_time)) + timer->active++; + } - if (!thread_call_enter_delayed(&timer->call_entry, expire_time)) - timer->active++; simple_unlock(&timer->lock); } else { diff --git a/osfmk/kern/mk_timer.h b/osfmk/kern/mk_timer.h index e630fdbe9..20d008c85 100644 --- a/osfmk/kern/mk_timer.h +++ b/osfmk/kern/mk_timer.h @@ -43,7 +43,7 @@ struct mk_timer { decl_simple_lock_data(,lock) call_entry_data_t call_entry; uint64_t time_of_arming; - boolean_t is_dead:1, + uint32_t is_dead:1, is_armed:1; int active; ipc_port_t port; @@ -54,7 +54,7 @@ typedef struct mk_timer *mk_timer_t, mk_timer_data_t; void mk_timer_port_destroy( ipc_port_t port); -void mk_timer_initialize(void); +void mk_timer_init(void); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/printf.c b/osfmk/kern/printf.c index 6aeaa8056..ece7a7806 100644 --- a/osfmk/kern/printf.c +++ b/osfmk/kern/printf.c @@ -153,6 +153,9 @@ * output bases such as x, X, u, U, o, and O also work. */ +#include +#include +#include #include #include #include @@ -721,6 +724,22 @@ conslog_putc( #endif } +void +dbugprintf(const char *fmt, ...) +{ + +#if MACH_KDB + + extern void db_putchar(char c); + va_list listp; + + va_start(listp, fmt); + _doprnt(fmt, &listp, db_putchar, 16); + va_end(listp); +#endif + return; +} + void printf(const char *fmt, ...) { diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c index 23f5e43f0..23470cc84 100644 --- a/osfmk/kern/priority.c +++ b/osfmk/kern/priority.c @@ -111,6 +111,8 @@ thread_quantum_expire( thread->sched_mode &= ~TH_MODE_REALTIME; } + pset_share_incr(thread->processor_set); + thread->safe_release = sched_tick + sched_safe_duration; thread->sched_mode |= (TH_MODE_FAILSAFE|TH_MODE_TIMESHARE); thread->sched_mode &= ~TH_MODE_PREEMPT; @@ -141,11 +143,10 @@ thread_quantum_expire( /* * This quantum is up, give this thread another. */ - if (first_quantum(myprocessor)) - myprocessor->slice_quanta--; + if (first_timeslice(myprocessor)) + myprocessor->timeslice--; - thread->current_quantum = (thread->sched_mode & TH_MODE_REALTIME)? - thread->realtime.computation: std_quantum; + thread_quantum_init(thread); myprocessor->quantum_end += thread->current_quantum; timer_call_enter1(&myprocessor->quantum_timer, thread, myprocessor->quantum_end); diff --git a/osfmk/kern/processor.c b/osfmk/kern/processor.c index c9251cc75..964a47324 100644 --- a/osfmk/kern/processor.c +++ b/osfmk/kern/processor.c @@ -100,7 +100,7 @@ void processor_init( register processor_t pr, int slot_num); -void pset_quanta_set( +void pset_quanta_setup( processor_set_t pset); kern_return_t processor_set_base( @@ -131,6 +131,7 @@ pset_sys_bootstrap(void) register int i; pset_init(&default_pset); + for (i = 0; i < NCPUS; i++) { /* * Initialize processor data structures. @@ -139,8 +140,9 @@ pset_sys_bootstrap(void) processor_ptr[i] = &processor_array[i]; processor_init(processor_ptr[i], i); } + master_processor = cpu_to_processor(master_cpu); - master_processor->cpu_data = get_cpu_data(); + default_pset.active = TRUE; } @@ -154,11 +156,10 @@ void pset_init( register int i; /* setup run queue */ - simple_lock_init(&pset->runq.lock, ETAP_THREAD_PSET_RUNQ); + pset->runq.highq = IDLEPRI; for (i = 0; i < NRQBM; i++) pset->runq.bitmap[i] = 0; setbit(MAXPRI - IDLEPRI, pset->runq.bitmap); - pset->runq.highq = IDLEPRI; pset->runq.urgency = pset->runq.count = 0; for (i = 0; i < NRQS; i++) queue_init(&pset->runq.queues[i]); @@ -167,12 +168,11 @@ void pset_init( pset->idle_count = 0; queue_init(&pset->active_queue); simple_lock_init(&pset->sched_lock, ETAP_THREAD_PSET_IDLE); - pset->run_count = 0; + pset->run_count = pset->share_count = 0; pset->mach_factor = pset->load_average = 0; pset->sched_load = 0; queue_init(&pset->processors); pset->processor_count = 0; - simple_lock_init(&pset->processors_lock, ETAP_THREAD_PSET); queue_init(&pset->tasks); pset->task_count = 0; queue_init(&pset->threads); @@ -182,10 +182,10 @@ void pset_init( mutex_init(&pset->lock, ETAP_THREAD_PSET); pset->pset_self = IP_NULL; pset->pset_name_self = IP_NULL; - pset->set_quanta = 1; + pset->timeshare_quanta = 1; for (i = 0; i <= NCPUS; i++) - pset->machine_quanta[i] = 1; + pset->quantum_factors[i] = 1; } /* @@ -200,23 +200,21 @@ processor_init( register int i; /* setup run queue */ - simple_lock_init(&p->runq.lock, ETAP_THREAD_PROC_RUNQ); + p->runq.highq = IDLEPRI; for (i = 0; i < NRQBM; i++) p->runq.bitmap[i] = 0; setbit(MAXPRI - IDLEPRI, p->runq.bitmap); - p->runq.highq = IDLEPRI; p->runq.urgency = p->runq.count = 0; for (i = 0; i < NRQS; i++) queue_init(&p->runq.queues[i]); p->state = PROCESSOR_OFF_LINE; + p->active_thread = p->next_thread = p->idle_thread = THREAD_NULL; + p->processor_set = PROCESSOR_SET_NULL; p->current_pri = MINPRI; - p->next_thread = THREAD_NULL; - p->idle_thread = THREAD_NULL; timer_call_setup(&p->quantum_timer, thread_quantum_expire, p); - p->slice_quanta = 0; - p->processor_set = PROCESSOR_SET_NULL; - p->processor_set_next = PROCESSOR_SET_NULL; + p->timeslice = 0; + p->deadline = UINT64_MAX; simple_lock_init(&p->lock, ETAP_THREAD_PROC); p->processor_self = IP_NULL; p->slot_num = slot_num; @@ -269,7 +267,7 @@ pset_remove_processor( queue_remove(&pset->processors, processor, processor_t, processors); processor->processor_set = PROCESSOR_SET_NULL; pset->processor_count--; - pset_quanta_set(pset); + pset_quanta_setup(pset); } /* @@ -286,7 +284,7 @@ pset_add_processor( queue_enter(&pset->processors, processor, processor_t, processors); processor->processor_set = pset; pset->processor_count++; - pset_quanta_set(pset); + pset_quanta_setup(pset); } /* @@ -469,31 +467,34 @@ kern_return_t processor_start( processor_t processor) { - int state; - spl_t s; - kern_return_t kr; + kern_return_t result; + spl_t s; if (processor == PROCESSOR_NULL) return(KERN_INVALID_ARGUMENT); if (processor == master_processor) { - thread_bind(current_thread(), processor); + processor_t prev; + + prev = thread_bind(current_thread(), processor); thread_block(THREAD_CONTINUE_NULL); - kr = cpu_start(processor->slot_num); - thread_bind(current_thread(), PROCESSOR_NULL); - return(kr); + result = cpu_start(processor->slot_num); + + thread_bind(current_thread(), prev); + + return (result); } s = splsched(); processor_lock(processor); - - state = processor->state; - if (state != PROCESSOR_OFF_LINE) { + if (processor->state != PROCESSOR_OFF_LINE) { processor_unlock(processor); splx(s); - return(KERN_FAILURE); + + return (KERN_FAILURE); } + processor->state = PROCESSOR_START; processor_unlock(processor); splx(s); @@ -502,31 +503,35 @@ processor_start( thread_t thread; extern void start_cpu_thread(void); - thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL, - start_cpu_thread, TRUE, FALSE); + thread = kernel_thread_create(start_cpu_thread, MAXPRI_KERNEL); s = splsched(); thread_lock(thread); - thread_bind_locked(thread, processor); - thread_go_locked(thread, THREAD_AWAKENED); - (void)rem_runq(thread); + thread->bound_processor = processor; processor->next_thread = thread; + thread->state = TH_RUN; + pset_run_incr(thread->processor_set); thread_unlock(thread); splx(s); } - kr = cpu_start(processor->slot_num); + if (processor->processor_self == IP_NULL) + ipc_processor_init(processor); - if (kr != KERN_SUCCESS) { + result = cpu_start(processor->slot_num); + if (result != KERN_SUCCESS) { s = splsched(); processor_lock(processor); processor->state = PROCESSOR_OFF_LINE; processor_unlock(processor); splx(s); + + return (result); } - return(kr); + ipc_processor_enable(processor); + + return (KERN_SUCCESS); } kern_return_t @@ -553,23 +558,23 @@ processor_control( /* * Precalculate the appropriate timesharing quanta based on load. The - * index into machine_quanta is the number of threads on the + * index into quantum_factors[] is the number of threads on the * processor set queue. It is limited to the number of processors in * the set. */ void -pset_quanta_set( +pset_quanta_setup( processor_set_t pset) { register int i, count = pset->processor_count; for (i = 1; i <= count; i++) - pset->machine_quanta[i] = (count + (i / 2)) / i; + pset->quantum_factors[i] = (count + (i / 2)) / i; - pset->machine_quanta[0] = pset->machine_quanta[1]; + pset->quantum_factors[0] = pset->quantum_factors[1]; - pset_quanta_update(pset); + timeshare_quanta_update(pset); } kern_return_t @@ -905,9 +910,9 @@ processor_set_things( thread = (thread_t) queue_next(&thread->pset_threads)) { thr_act = thread_lock_act(thread); - if (thr_act && thr_act->ref_count > 0) { + if (thr_act && thr_act->act_ref_count > 0) { /* take ref for convert_act_to_port */ - act_locked_act_reference(thr_act); + act_reference_locked(thr_act); thr_acts[i++] = thr_act; } thread_unlock_act(thread); diff --git a/osfmk/kern/processor.h b/osfmk/kern/processor.h index 5ecbbc539..6c238db71 100644 --- a/osfmk/kern/processor.h +++ b/osfmk/kern/processor.h @@ -80,7 +80,6 @@ #include #include #include -#include #include @@ -88,11 +87,10 @@ struct processor_set { queue_head_t idle_queue; /* idle processors */ int idle_count; /* how many ? */ queue_head_t active_queue; /* active processors */ - decl_simple_lock_data(,sched_lock) /* lock for above */ queue_head_t processors; /* all processors here */ int processor_count;/* how many ? */ - decl_simple_lock_data(,processors_lock) /* lock for above */ + decl_simple_lock_data(,sched_lock) /* lock for above */ struct run_queue runq; /* runq for this set */ @@ -104,13 +102,14 @@ struct processor_set { boolean_t active; /* is pset in use */ decl_mutex_data(, lock) /* lock for above */ - int set_quanta; /* timeslice quanta for timesharing */ - int machine_quanta[NCPUS+1]; + int timeshare_quanta; /* timeshare quantum factor */ + int quantum_factors[NCPUS+1]; struct ipc_port * pset_self; /* port for operations */ struct ipc_port * pset_name_self; /* port for information */ - uint32_t run_count; /* number of threads running in set */ + uint32_t run_count; /* threads running in set */ + uint32_t share_count; /* timeshare threads running in set */ integer_t mach_factor; /* mach_factor */ integer_t load_average; /* load_average */ @@ -121,23 +120,27 @@ struct processor { queue_chain_t processor_queue;/* idle/active/action queue link, * MUST remain the first element */ int state; /* See below */ - int current_pri; /* priority of current thread */ - struct thread_shuttle + struct thread + *active_thread, /* thread running on processor */ *next_thread, /* next thread to run if dispatched */ *idle_thread; /* this processor's idle thread. */ + + processor_set_t processor_set; /* current membership */ + + int current_pri; /* priority of current thread */ + timer_call_data_t quantum_timer; /* timer for quantum expiration */ - int slice_quanta; /* quanta before timeslice ends */ uint64_t quantum_end; /* time when current quantum ends */ uint64_t last_dispatch; /* time of last dispatch */ + int timeslice; /* quanta before timeslice ends */ + uint64_t deadline; /* current deadline */ + struct run_queue runq; /* local runq for this processor */ - processor_set_t processor_set; /* current membership */ - processor_set_t processor_set_next; /* set to join in progress */ - queue_chain_t processors; /* all processors in set */ + queue_chain_t processors; /* all processors in set */ decl_simple_lock_data(,lock) struct ipc_port *processor_self;/* port for operations */ - cpu_data_t *cpu_data; /* machine-dep per-cpu data */ int slot_num; /* machine-indep slot number */ }; @@ -170,13 +173,12 @@ extern struct processor processor_array[NCPUS]; * will often lock both. */ -#define PROCESSOR_OFF_LINE 0 /* Not in system */ -#define PROCESSOR_RUNNING 1 /* Running a normal thread */ -#define PROCESSOR_IDLE 2 /* idle */ -#define PROCESSOR_DISPATCHING 3 /* dispatching (idle -> running) */ -#define PROCESSOR_ASSIGN 4 /* Assignment is changing */ -#define PROCESSOR_SHUTDOWN 5 /* Being shutdown */ -#define PROCESSOR_START 6 /* Being start */ +#define PROCESSOR_OFF_LINE 0 /* Not available */ +#define PROCESSOR_RUNNING 1 /* Normal execution */ +#define PROCESSOR_IDLE 2 /* Idle */ +#define PROCESSOR_DISPATCHING 3 /* Dispatching (idle -> running) */ +#define PROCESSOR_SHUTDOWN 4 /* Going off-line */ +#define PROCESSOR_START 5 /* Being started */ /* * Use processor ptr array to find current processor's data structure. @@ -190,7 +192,6 @@ extern processor_t processor_ptr[NCPUS]; #define cpu_to_processor(i) (processor_ptr[i]) #define current_processor() (processor_ptr[cpu_number()]) -#define current_processor_set() (current_processor()->processor_set) /* Compatibility -- will go away */ @@ -208,76 +209,86 @@ extern processor_t processor_ptr[NCPUS]; extern void pset_sys_bootstrap(void); -#define pset_quanta_update(pset) \ +#define timeshare_quanta_update(pset) \ MACRO_BEGIN \ int proc_count = (pset)->processor_count; \ int runq_count = (pset)->runq.count; \ \ - (pset)->set_quanta = (pset)->machine_quanta[ \ + (pset)->timeshare_quanta = (pset)->quantum_factors[ \ (runq_count > proc_count)? \ proc_count: runq_count]; \ MACRO_END -/* Implemented by MD layer */ +#define pset_run_incr(pset) \ + hw_atomic_add(&(pset)->run_count, 1) + +#define pset_run_decr(pset) \ + hw_atomic_sub(&(pset)->run_count, 1) + +#define pset_share_incr(pset) \ + hw_atomic_add(&(pset)->share_count, 1) + +#define pset_share_decr(pset) \ + hw_atomic_sub(&(pset)->share_count, 1) extern void cpu_up( - int cpu); + int cpu); extern kern_return_t processor_shutdown( - processor_t processor); + processor_t processor); extern void pset_remove_processor( - processor_set_t pset, - processor_t processor); + processor_set_t pset, + processor_t processor); extern void pset_add_processor( - processor_set_t pset, - processor_t processor); + processor_set_t pset, + processor_t processor); extern void pset_remove_task( - processor_set_t pset, - task_t task); + processor_set_t pset, + task_t task); extern void pset_add_task( - processor_set_t pset, - task_t task); + processor_set_t pset, + task_t task); extern void pset_remove_thread( - processor_set_t pset, - thread_t thread); + processor_set_t pset, + thread_t thread); extern void pset_add_thread( - processor_set_t pset, - thread_t thread); + processor_set_t pset, + thread_t thread); extern void thread_change_psets( - thread_t thread, - processor_set_t old_pset, - processor_set_t new_pset); + thread_t thread, + processor_set_t old_pset, + processor_set_t new_pset); extern kern_return_t processor_assign( - processor_t processor, - processor_set_t new_pset, - boolean_t wait); + processor_t processor, + processor_set_t new_pset, + boolean_t wait); extern kern_return_t processor_info_count( - processor_flavor_t flavor, - mach_msg_type_number_t *count); + processor_flavor_t flavor, + mach_msg_type_number_t *count); #endif /* MACH_KERNEL_PRIVATE */ extern kern_return_t processor_start( - processor_t processor); + processor_t processor); extern kern_return_t processor_exit( - processor_t processor); + processor_t processor); #endif /* __APPLE_API_PRIVATE */ extern void pset_deallocate( - processor_set_t pset); + processor_set_t pset); extern void pset_reference( - processor_set_t pset); + processor_set_t pset); #endif /* _KERN_PROCESSOR_H_ */ diff --git a/osfmk/kern/profile.c b/osfmk/kern/profile.c index 1b3bfa0c3..29f57a3eb 100644 --- a/osfmk/kern/profile.c +++ b/osfmk/kern/profile.c @@ -464,10 +464,10 @@ task_sample( profile_thread_id = /* then start profile thread. */ kernel_thread(kernel_task, profile_thread); task->task_profiled = turnon; - actual = task->thr_act_count; - for (i = 0, thr_act = (thread_act_t)queue_first(&task->thr_acts); + actual = task->thread_count; + for (i = 0, thr_act = (thread_act_t)queue_first(&task->threads); i < actual; - i++, thr_act = (thread_act_t)queue_next(&thr_act->thr_acts)) { + i++, thr_act = (thread_act_t)queue_next(&thr_act->task_threads)) { if (!thr_act->act_profiled_own) { thr_act->act_profiled = turnon; if (turnon) { diff --git a/osfmk/kern/sched.h b/osfmk/kern/sched.h index 6f5c9d398..187a62b2b 100644 --- a/osfmk/kern/sched.h +++ b/osfmk/kern/sched.h @@ -156,12 +156,14 @@ ************************************************************************* */ +#define BASEPRI_RTQUEUES (BASEPRI_REALTIME + 1) /* 97 */ #define BASEPRI_REALTIME (MAXPRI - (NRQS / 4) + 1) /* 96 */ #define MAXPRI_STANDARD (BASEPRI_REALTIME - 1) /* 95 */ #define MAXPRI_KERNEL MAXPRI_STANDARD /* 95 */ #define BASEPRI_PREEMPT (MAXPRI_KERNEL - 2) /* 93 */ +#define BASEPRI_KERNEL (MINPRI_KERNEL + 1) /* 81 */ #define MINPRI_KERNEL (MAXPRI_KERNEL - (NRQS / 8) + 1) /* 80 */ #define MAXPRI_SYSTEM (MINPRI_KERNEL - 1) /* 79 */ @@ -182,23 +184,36 @@ #define invalid_pri(pri) ((pri) < MINPRI || (pri) > MAXPRI) struct run_queue { - queue_head_t queues[NRQS]; /* one for each priority */ - decl_simple_lock_data(,lock) /* one lock for all queues */ - int bitmap[NRQBM]; /* run queue bitmap array */ int highq; /* highest runnable queue */ + int bitmap[NRQBM]; /* run queue bitmap array */ + int count; /* # of threads total */ int urgency; /* level of preemption urgency */ - int count; /* # of threads in queue */ + queue_head_t queues[NRQS]; /* one for each priority */ }; typedef struct run_queue *run_queue_t; #define RUN_QUEUE_NULL ((run_queue_t) 0) -#define first_quantum(processor) ((processor)->slice_quanta > 0) +#define first_timeslice(processor) ((processor)->timeslice > 0) + +#define processor_timeslice_setup(processor, thread) \ +MACRO_BEGIN \ + (processor)->timeslice = \ + ((thread)->sched_mode & TH_MODE_TIMESHARE)? \ + (processor)->processor_set->timeshare_quanta: 1; \ +MACRO_END + +#define thread_quantum_init(thread) \ +MACRO_BEGIN \ + (thread)->current_quantum = \ + ((thread)->sched_mode & TH_MODE_REALTIME)? \ + (thread)->realtime.computation: std_quantum; \ +MACRO_END /* Invoked at splsched by a thread on itself */ #define csw_needed(thread, processor) ( \ ((thread)->state & TH_SUSP) || \ - (first_quantum(processor)? \ + (first_timeslice(processor)? \ ((processor)->runq.highq > (thread)->sched_pri || \ (processor)->processor_set->runq.highq > (thread)->sched_pri) : \ ((processor)->runq.highq >= (thread)->sched_pri || \ @@ -209,7 +224,7 @@ typedef struct run_queue *run_queue_t; */ /* Remove thread from its run queue */ -extern run_queue_t rem_runq( +extern run_queue_t run_queue_remove( thread_t thread); /* Periodic computation of load factors */ diff --git a/osfmk/kern/sched_prim.c b/osfmk/kern/sched_prim.c index 2e7a7fc5d..da682c811 100644 --- a/osfmk/kern/sched_prim.c +++ b/osfmk/kern/sched_prim.c @@ -65,8 +65,6 @@ #include #include #include -#include -#include #include #include @@ -98,13 +96,6 @@ #include /*** ??? fix so this can be removed ***/ #include -#if TASK_SWAPPER -#include -extern int task_swap_on; -#endif /* TASK_SWAPPER */ - -extern int hz; - #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */ int default_preemption_rate = DEFAULT_PREEMPTION_RATE; @@ -119,6 +110,18 @@ int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT; uint32_t std_quantum_us; +uint64_t max_unsafe_computation; +uint32_t sched_safe_duration; +uint64_t max_poll_computation; + +uint32_t std_quantum; +uint32_t min_std_quantum; + +uint32_t max_rt_quantum; +uint32_t min_rt_quantum; + +static uint32_t sched_tick_interval; + unsigned sched_tick; #if SIMPLE_CLOCK @@ -128,38 +131,13 @@ int sched_usec; /* Forwards */ void wait_queues_init(void); -thread_t choose_pset_thread( - processor_t myprocessor, - processor_set_t pset); - -thread_t choose_thread( - processor_t myprocessor); - -boolean_t run_queue_enqueue( - run_queue_t runq, - thread_t thread, - boolean_t tail); +static thread_t choose_thread( + processor_set_t pset, + processor_t processor); -void do_thread_scan(void); +static void do_thread_scan(void); #if DEBUG -void dump_run_queues( - run_queue_t rq); -void dump_run_queue_struct( - run_queue_t rq); -void dump_processor( - processor_t p); -void dump_processor_set( - processor_set_t ps); - -void checkrq( - run_queue_t rq, - char *msg); - -void thread_check( - thread_t thread, - run_queue_t runq); - static boolean_t thread_runnable( thread_t thread); @@ -239,9 +217,11 @@ sched_init(void) printf("standard timeslicing quantum is %d us\n", std_quantum_us); + sched_safe_duration = (2 * max_unsafe_quanta / default_preemption_rate) * + (1 << SCHED_TICK_SHIFT); + wait_queues_init(); pset_sys_bootstrap(); /* initialize processor mgmt. */ - processor_action(); sched_tick = 0; #if SIMPLE_CLOCK sched_usec = 0; @@ -249,6 +229,41 @@ sched_init(void) ast_init(); } +void +sched_timebase_init(void) +{ + uint64_t abstime; + + clock_interval_to_absolutetime_interval( + std_quantum_us, NSEC_PER_USEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + std_quantum = abstime; + + /* 250 us */ + clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + min_std_quantum = abstime; + + /* 50 us */ + clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + min_rt_quantum = abstime; + + /* 50 ms */ + clock_interval_to_absolutetime_interval( + 50, 1000*NSEC_PER_USEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + max_rt_quantum = abstime; + + clock_interval_to_absolutetime_interval(1000 >> SCHED_TICK_SHIFT, + USEC_PER_SEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + sched_tick_interval = abstime; + + max_unsafe_computation = max_unsafe_quanta * std_quantum; + max_poll_computation = max_poll_quanta * std_quantum; +} + void wait_queues_init(void) { @@ -271,20 +286,14 @@ thread_timer_expire( spl_t s; s = splsched(); - wake_lock(thread); + thread_lock(thread); if (--thread->wait_timer_active == 1) { if (thread->wait_timer_is_set) { thread->wait_timer_is_set = FALSE; - thread_lock(thread); - if (thread->active) - clear_wait_internal(thread, THREAD_TIMED_OUT); - thread_unlock(thread); + clear_wait_internal(thread, THREAD_TIMED_OUT); } } - else - if (thread->wait_timer_active == 0) - thread_wakeup_one(&thread->wait_timer_active); - wake_unlock(thread); + thread_unlock(thread); splx(s); } @@ -305,7 +314,6 @@ thread_set_timer( spl_t s; s = splsched(); - wake_lock(thread); thread_lock(thread); if ((thread->state & TH_WAIT) != 0) { clock_interval_to_deadline(interval, scale_factor, &deadline); @@ -315,7 +323,6 @@ thread_set_timer( thread->wait_timer_is_set = TRUE; } thread_unlock(thread); - wake_unlock(thread); splx(s); } @@ -327,7 +334,6 @@ thread_set_timer_deadline( spl_t s; s = splsched(); - wake_lock(thread); thread_lock(thread); if ((thread->state & TH_WAIT) != 0) { timer_call_enter(&thread->wait_timer, deadline); @@ -336,7 +342,6 @@ thread_set_timer_deadline( thread->wait_timer_is_set = TRUE; } thread_unlock(thread); - wake_unlock(thread); splx(s); } @@ -347,13 +352,13 @@ thread_cancel_timer(void) spl_t s; s = splsched(); - wake_lock(thread); + thread_lock(thread); if (thread->wait_timer_is_set) { if (timer_call_cancel(&thread->wait_timer)) thread->wait_timer_active--; thread->wait_timer_is_set = FALSE; } - wake_unlock(thread); + thread_unlock(thread); splx(s); } @@ -386,7 +391,7 @@ thread_timer_terminate(void) spl_t s; s = splsched(); - wake_lock(thread); + thread_lock(thread); if (thread->wait_timer_is_set) { if (timer_call_cancel(&thread->wait_timer)) thread->wait_timer_active--; @@ -396,34 +401,28 @@ thread_timer_terminate(void) thread->wait_timer_active--; while (thread->wait_timer_active > 0) { - res = assert_wait((event_t)&thread->wait_timer_active, THREAD_UNINT); - assert(res == THREAD_WAITING); - wake_unlock(thread); + thread_unlock(thread); splx(s); - res = thread_block(THREAD_CONTINUE_NULL); - assert(res == THREAD_AWAKENED); + delay(1); s = splsched(); - wake_lock(thread); + thread_lock(thread); } thread->depress_timer_active--; while (thread->depress_timer_active > 0) { - res = assert_wait((event_t)&thread->depress_timer_active, THREAD_UNINT); - assert(res == THREAD_WAITING); - wake_unlock(thread); + thread_unlock(thread); splx(s); - res = thread_block(THREAD_CONTINUE_NULL); - assert(res == THREAD_AWAKENED); + delay(1); s = splsched(); - wake_lock(thread); + thread_lock(thread); } - wake_unlock(thread); + thread_unlock(thread); splx(s); thread_deallocate(thread); @@ -443,30 +442,54 @@ thread_timer_terminate(void) kern_return_t thread_go_locked( thread_t thread, - wait_result_t result) + wait_result_t wresult) { assert(thread->at_safe_point == FALSE); assert(thread->wait_event == NO_EVENT64); assert(thread->wait_queue == WAIT_QUEUE_NULL); if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT) { + thread_roust_t roust_hint; + thread->state &= ~(TH_WAIT|TH_UNINT); + _mk_sp_thread_unblock(thread); + + roust_hint = thread->roust; + thread->roust = NULL; + if ( roust_hint != NULL && + (*roust_hint)(thread, wresult) ) { + if (thread->wait_timer_is_set) { + if (timer_call_cancel(&thread->wait_timer)) + thread->wait_timer_active--; + thread->wait_timer_is_set = FALSE; + } + + return (KERN_SUCCESS); + } + + thread->wait_result = wresult; + if (!(thread->state & TH_RUN)) { thread->state |= TH_RUN; if (thread->active_callout) call_thread_unblock(); - if (!(thread->state & TH_IDLE)) { - _mk_sp_thread_unblock(thread); - hw_atomic_add(&thread->processor_set->run_count, 1); - } + pset_run_incr(thread->processor_set); + if (thread->sched_mode & TH_MODE_TIMESHARE) + pset_share_incr(thread->processor_set); + + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); } - thread->wait_result = result; - return KERN_SUCCESS; + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE, + (int)thread, (int)thread->sched_pri, 0, 0, 0); + + return (KERN_SUCCESS); } - return KERN_NOT_WAITING; + + return (KERN_NOT_WAITING); } /* @@ -484,10 +507,7 @@ thread_mark_wait_locked( thread_t thread, wait_interrupt_t interruptible) { - wait_result_t wait_result; - boolean_t at_safe_point; - - assert(thread == current_thread()); + boolean_t at_safe_point; /* * The thread may have certain types of interrupts/aborts masked @@ -500,16 +520,19 @@ thread_mark_wait_locked( at_safe_point = (interruptible == THREAD_ABORTSAFE); - if ((interruptible == THREAD_UNINT) || - !(thread->state & TH_ABORT) || - (!at_safe_point && (thread->state & TH_ABORT_SAFELY))) { + if ( interruptible == THREAD_UNINT || + !(thread->state & TH_ABORT) || + (!at_safe_point && + (thread->state & TH_ABORT_SAFELY)) ) { thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT); thread->at_safe_point = at_safe_point; thread->sleep_stamp = sched_tick; return (thread->wait_result = THREAD_WAITING); - } else if (thread->state & TH_ABORT_SAFELY) { - thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); } + else + if (thread->state & TH_ABORT_SAFELY) + thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); + return (thread->wait_result = THREAD_INTERRUPTED); } @@ -597,13 +620,62 @@ assert_wait( register int index; assert(event != NO_EVENT); - assert(assert_wait_possible()); index = wait_hash(event); wq = &wait_queues[index]; return wait_queue_assert_wait(wq, event, interruptible); } +__private_extern__ +wait_queue_t +wait_event_wait_queue( + event_t event) +{ + assert(event != NO_EVENT); + + return (&wait_queues[wait_hash(event)]); +} + +wait_result_t +assert_wait_prim( + event_t event, + thread_roust_t roust_hint, + uint64_t deadline, + wait_interrupt_t interruptible) +{ + thread_t thread = current_thread(); + wait_result_t wresult; + wait_queue_t wq; + spl_t s; + + assert(event != NO_EVENT); + + wq = &wait_queues[wait_hash(event)]; + + s = splsched(); + wait_queue_lock(wq); + thread_lock(thread); + + wresult = wait_queue_assert_wait64_locked(wq, (uint32_t)event, + interruptible, thread); + if (wresult == THREAD_WAITING) { + if (roust_hint != NULL) + thread->roust = roust_hint; + + if (deadline != 0) { + timer_call_enter(&thread->wait_timer, deadline); + assert(!thread->wait_timer_is_set); + thread->wait_timer_active++; + thread->wait_timer_is_set = TRUE; + } + } + + thread_unlock(thread); + wait_queue_unlock(wq); + splx(s); + + return (wresult); +} /* * thread_sleep_fast_usimple_lock: @@ -816,9 +888,9 @@ thread_stop( wait_result_t result; processor_t processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->cpu_data->active_thread == thread ) + if ( processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread ) cause_ast_check(processor); thread_unlock(thread); @@ -864,9 +936,17 @@ thread_unstop( thread->state &= ~TH_SUSP; thread->state |= TH_RUN; - assert(!(thread->state & TH_IDLE)); _mk_sp_thread_unblock(thread); - hw_atomic_add(&thread->processor_set->run_count, 1); + + pset_run_incr(thread->processor_set); + if (thread->sched_mode & TH_MODE_TIMESHARE) + pset_share_incr(thread->processor_set); + + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); + + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE, + (int)thread, (int)thread->sched_pri, 0, 0, 0); } else if (thread->state & TH_SUSP) { @@ -904,9 +984,9 @@ thread_wait( wait_result_t result; processor_t processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->cpu_data->active_thread == thread ) + if ( processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread ) cause_ast_check(processor); thread_unlock(thread); @@ -952,38 +1032,39 @@ thread_wait( __private_extern__ kern_return_t clear_wait_internal( thread_t thread, - wait_result_t result) + wait_result_t wresult) { wait_queue_t wq = thread->wait_queue; - kern_return_t ret; - int loop_count; + int i = LockTimeOut; - loop_count = 0; do { - if ((result == THREAD_INTERRUPTED) && (thread->state & TH_UNINT)) - return KERN_FAILURE; + if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) + return (KERN_FAILURE); if (wq != WAIT_QUEUE_NULL) { if (wait_queue_lock_try(wq)) { wait_queue_pull_thread_locked(wq, thread, TRUE); /* wait queue unlocked, thread still locked */ - } else { + } + else { thread_unlock(thread); delay(1); + thread_lock(thread); + if (wq != thread->wait_queue) + return (KERN_NOT_WAITING); - if (wq != thread->wait_queue) { - return KERN_NOT_WAITING; /* we know it moved */ - } continue; } } - ret = thread_go_locked(thread, result); - return ret; - } while (++loop_count < LockTimeOut); + + return (thread_go_locked(thread, wresult)); + } while (--i > 0); + panic("clear_wait_internal: deadlock: thread=0x%x, wq=0x%x, cpu=%d\n", thread, wq, cpu_number()); - return KERN_FAILURE; + + return (KERN_FAILURE); } @@ -1042,113 +1123,196 @@ thread_wakeup_prim( * thread_bind: * * Force a thread to execute on the specified processor. - * If the thread is currently executing, it may wait until its - * time slice is up before switching onto the specified processor. * - * A processor of PROCESSOR_NULL causes the thread to be unbound. - * xxx - DO NOT export this to users. + * Returns the previous binding. PROCESSOR_NULL means + * not bound. + * + * XXX - DO NOT export this to users - XXX */ -void +processor_t thread_bind( register thread_t thread, processor_t processor) { - spl_t s; + processor_t prev; + run_queue_t runq = RUN_QUEUE_NULL; + spl_t s; s = splsched(); thread_lock(thread); - thread_bind_locked(thread, processor); + prev = thread->bound_processor; + if (prev != PROCESSOR_NULL) + runq = run_queue_remove(thread); + + thread->bound_processor = processor; + + if (runq != RUN_QUEUE_NULL) + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); thread_unlock(thread); splx(s); + + return (prev); } +struct { + uint32_t idle_pset_last, + idle_pset_any, + idle_bound; + + uint32_t pset_self, + pset_last, + pset_other, + bound_self, + bound_other; + + uint32_t realtime_self, + realtime_last, + realtime_other; + + uint32_t missed_realtime, + missed_other; +} dispatch_counts; + /* - * Select a thread for this processor (the current processor) to run. - * May select the current thread, which must already be locked. + * Select a thread for the current processor to run. + * + * May select the current thread, which must be locked. */ thread_t thread_select( - register processor_t myprocessor) + register processor_t processor) { register thread_t thread; processor_set_t pset; - register run_queue_t runq = &myprocessor->runq; boolean_t other_runnable; /* * Check for other non-idle runnable threads. */ - pset = myprocessor->processor_set; - thread = myprocessor->cpu_data->active_thread; + pset = processor->processor_set; + thread = processor->active_thread; /* Update the thread's priority */ if (thread->sched_stamp != sched_tick) update_priority(thread); - myprocessor->current_pri = thread->sched_pri; + processor->current_pri = thread->sched_pri; - simple_lock(&runq->lock); - simple_lock(&pset->runq.lock); + simple_lock(&pset->sched_lock); - other_runnable = runq->count > 0 || pset->runq.count > 0; + other_runnable = processor->runq.count > 0 || pset->runq.count > 0; if ( thread->state == TH_RUN && - (!other_runnable || - (runq->highq < thread->sched_pri && - pset->runq.highq < thread->sched_pri)) && thread->processor_set == pset && (thread->bound_processor == PROCESSOR_NULL || - thread->bound_processor == myprocessor) ) { + thread->bound_processor == processor) ) { + if ( thread->sched_pri >= BASEPRI_RTQUEUES && + first_timeslice(processor) ) { + if (pset->runq.highq >= BASEPRI_RTQUEUES) { + register run_queue_t runq = &pset->runq; + register queue_t q; + + q = runq->queues + runq->highq; + if (((thread_t)q->next)->realtime.deadline < + processor->deadline) { + thread = (thread_t)q->next; + ((queue_entry_t)thread)->next->prev = q; + q->next = ((queue_entry_t)thread)->next; + thread->runq = RUN_QUEUE_NULL; + assert(thread->sched_mode & TH_MODE_PREEMPT); + runq->count--; runq->urgency--; + if (queue_empty(q)) { + if (runq->highq != IDLEPRI) + clrbit(MAXPRI - runq->highq, runq->bitmap); + runq->highq = MAXPRI - ffsbit(runq->bitmap); + } + } + } + + processor->deadline = thread->realtime.deadline; + + simple_unlock(&pset->sched_lock); + + return (thread); + } - /* I am the highest priority runnable (non-idle) thread */ - simple_unlock(&pset->runq.lock); - simple_unlock(&runq->lock); + if ( (!other_runnable || + (processor->runq.highq < thread->sched_pri && + pset->runq.highq < thread->sched_pri)) ) { - myprocessor->slice_quanta = - (thread->sched_mode & TH_MODE_TIMESHARE)? pset->set_quanta: 1; + /* I am the highest priority runnable (non-idle) thread */ + + processor->deadline = UINT64_MAX; + + simple_unlock(&pset->sched_lock); + + return (thread); + } } - else + if (other_runnable) - thread = choose_thread(myprocessor); + thread = choose_thread(pset, processor); else { - simple_unlock(&pset->runq.lock); - simple_unlock(&runq->lock); - /* * Nothing is runnable, so set this processor idle if it - * was running. If it was in an assignment or shutdown, - * leave it alone. Return its idle thread. + * was running. Return its idle thread. */ - simple_lock(&pset->sched_lock); - if (myprocessor->state == PROCESSOR_RUNNING) { - remqueue(&pset->active_queue, (queue_entry_t)myprocessor); - myprocessor->state = PROCESSOR_IDLE; - - if (myprocessor == master_processor) - enqueue_tail(&pset->idle_queue, (queue_entry_t)myprocessor); - else - enqueue_head(&pset->idle_queue, (queue_entry_t)myprocessor); + if (processor->state == PROCESSOR_RUNNING) { + remqueue(&pset->active_queue, (queue_entry_t)processor); + processor->state = PROCESSOR_IDLE; + enqueue_tail(&pset->idle_queue, (queue_entry_t)processor); pset->idle_count++; } - simple_unlock(&pset->sched_lock); - thread = myprocessor->idle_thread; + processor->deadline = UINT64_MAX; + + thread = processor->idle_thread; } + simple_unlock(&pset->sched_lock); + return (thread); } - /* - * Stop running the current thread and start running the new thread. - * If continuation is non-zero, and the current thread is blocked, - * then it will resume by executing continuation on a new stack. + * Perform a context switch and start executing the new thread. + * + * If continuation is non-zero, resume the old (current) thread + * next by executing at continuation on a new stack, in lieu + * of returning. + * * Returns TRUE if the hand-off succeeds. * - * Assumes splsched. + * Called at splsched. */ +#define funnel_release_check(thread, debug) \ +MACRO_BEGIN \ + if ((thread)->funnel_state & TH_FN_OWNED) { \ + (thread)->funnel_state = TH_FN_REFUNNEL; \ + KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, \ + (thread)->funnel_lock, (debug), 0, 0, 0); \ + funnel_unlock((thread)->funnel_lock); \ + } \ +MACRO_END + +#define funnel_refunnel_check(thread, debug) \ +MACRO_BEGIN \ + if ((thread)->funnel_state & TH_FN_REFUNNEL) { \ + kern_return_t result = (thread)->wait_result; \ + \ + (thread)->funnel_state = 0; \ + KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, \ + (thread)->funnel_lock, (debug), 0, 0, 0); \ + funnel_lock((thread)->funnel_lock); \ + KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, \ + (thread)->funnel_lock, (debug), 0, 0, 0); \ + (thread)->funnel_state = TH_FN_OWNED; \ + (thread)->wait_result = result; \ + } \ +MACRO_END + static thread_t __current_thread(void) { @@ -1184,8 +1348,8 @@ thread_invoke( * a stack. */ if ( (old_thread->sched_mode & TH_MODE_REALTIME) && - !old_thread->stack_privilege ) { - old_thread->stack_privilege = old_thread->kernel_stack; + !old_thread->reserved_stack ) { + old_thread->reserved_stack = old_thread->kernel_stack; } if (old_cont != NULL) { @@ -1195,8 +1359,8 @@ thread_invoke( * check to see whether we can exchange it with * that of the new thread. */ - if ( old_thread->kernel_stack == old_thread->stack_privilege && - !new_thread->stack_privilege) + if ( old_thread->kernel_stack == old_thread->reserved_stack && + !new_thread->reserved_stack) goto need_stack; new_thread->state &= ~TH_STACK_HANDOFF; @@ -1208,8 +1372,9 @@ thread_invoke( * to its timer. */ processor = current_processor(); - new_thread->last_processor = processor; + processor->active_thread = new_thread; processor->current_pri = new_thread->sched_pri; + new_thread->last_processor = processor; ast_context(new_thread->top_act, processor->slot_num); timer_switch(&new_thread->system_timer); thread_unlock(new_thread); @@ -1221,7 +1386,7 @@ thread_invoke( _mk_sp_thread_done(old_thread, new_thread, processor); - stack_handoff(old_thread, new_thread); + machine_stack_handoff(old_thread, new_thread); _mk_sp_thread_begin(new_thread, processor); @@ -1251,7 +1416,7 @@ thread_invoke( case TH_RUN | TH_WAIT | TH_UNINT: case TH_RUN | TH_WAIT: { - boolean_t reap, wake, callblock; + boolean_t term, wake, callout; /* * Waiting. @@ -1259,22 +1424,26 @@ thread_invoke( old_thread->sleep_stamp = sched_tick; old_thread->state |= TH_STACK_HANDOFF; old_thread->state &= ~TH_RUN; - hw_atomic_sub(&old_thread->processor_set->run_count, 1); - callblock = old_thread->active_callout; + + term = (old_thread->state & TH_TERMINATE)? TRUE: FALSE; + callout = old_thread->active_callout; wake = old_thread->wake_active; old_thread->wake_active = FALSE; - reap = (old_thread->state & TH_TERMINATE)? TRUE: FALSE; + + if (old_thread->sched_mode & TH_MODE_TIMESHARE) + pset_share_decr(old_thread->processor_set); + pset_run_decr(old_thread->processor_set); thread_unlock(old_thread); wake_unlock(old_thread); - if (callblock) + if (callout) call_thread_block(); if (wake) thread_wakeup((event_t)&old_thread->wake_active); - if (reap) + if (term) thread_reaper_enqueue(old_thread); break; } @@ -1295,18 +1464,7 @@ thread_invoke( counter_always(c_thread_invoke_hits++); - if (new_thread->funnel_state & TH_FN_REFUNNEL) { - kern_return_t wait_result = new_thread->wait_result; - - new_thread->funnel_state = 0; - KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, - new_thread->funnel_lock, 2, 0, 0, 0); - funnel_lock(new_thread->funnel_lock); - KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, - new_thread->funnel_lock, 2, 0, 0, 0); - new_thread->funnel_state = TH_FN_OWNED; - new_thread->wait_result = wait_result; - } + funnel_refunnel_check(new_thread, 2); (void) spllo(); assert(new_cont); @@ -1329,19 +1487,9 @@ thread_invoke( counter(++c_thread_invoke_same); thread_unlock(new_thread); - if (new_thread->funnel_state & TH_FN_REFUNNEL) { - kern_return_t wait_result = new_thread->wait_result; - - new_thread->funnel_state = 0; - KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, - new_thread->funnel_lock, 3, 0, 0, 0); - funnel_lock(new_thread->funnel_lock); - KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, - new_thread->funnel_lock, 3, 0, 0, 0); - new_thread->funnel_state = TH_FN_OWNED; - new_thread->wait_result = wait_result; - } + funnel_refunnel_check(new_thread, 3); (void) spllo(); + call_continuation(old_cont); /*NOTREACHED*/ } @@ -1381,8 +1529,9 @@ need_stack: * Set up ast context of new thread and switch to its timer. */ processor = current_processor(); - new_thread->last_processor = processor; + processor->active_thread = new_thread; processor->current_pri = new_thread->sched_pri; + new_thread->last_processor = processor; ast_context(new_thread->top_act, processor->slot_num); timer_switch(&new_thread->system_timer); assert(thread_runnable(new_thread)); @@ -1398,11 +1547,11 @@ need_stack: _mk_sp_thread_done(old_thread, new_thread, processor); /* - * switch_context is machine-dependent. It does the - * machine-dependent components of a context-switch, like - * changing address spaces. It updates active_threads. + * Here is where we actually change register context, + * and address space if required. Note that control + * will not return here immediately. */ - old_thread = switch_context(old_thread, old_cont, new_thread); + old_thread = machine_switch_context(old_thread, old_cont, new_thread); /* Now on new thread's stack. Set a local variable to refer to it. */ new_thread = __current_thread(); @@ -1418,19 +1567,9 @@ need_stack: thread_dispatch(old_thread); if (old_cont) { - if (new_thread->funnel_state & TH_FN_REFUNNEL) { - kern_return_t wait_result = new_thread->wait_result; - - new_thread->funnel_state = 0; - KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, - new_thread->funnel_lock, 3, 0, 0, 0); - funnel_lock(new_thread->funnel_lock); - KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, - new_thread->funnel_lock, 3, 0, 0, 0); - new_thread->funnel_state = TH_FN_OWNED; - new_thread->wait_result = wait_result; - } + funnel_refunnel_check(new_thread, 3); (void) spllo(); + call_continuation(old_cont); /*NOTREACHED*/ } @@ -1441,7 +1580,8 @@ need_stack: /* * thread_continue: * - * Called when a thread gets a new stack, at splsched(); + * Called at splsched when a thread first receives + * a new stack after a continuation. */ void thread_continue( @@ -1464,123 +1604,20 @@ thread_continue( if (old_thread != THREAD_NULL) thread_dispatch(old_thread); - if (self->funnel_state & TH_FN_REFUNNEL) { - kern_return_t wait_result = self->wait_result; - - self->funnel_state = 0; - KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0); - funnel_lock(self->funnel_lock); - KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0); - self->funnel_state = TH_FN_OWNED; - self->wait_result = wait_result; - } + funnel_refunnel_check(self, 4); (void)spllo(); - assert(continuation); + call_continuation(continuation); /*NOTREACHED*/ } -#if MACH_LDEBUG || MACH_KDB - -#define THREAD_LOG_SIZE 300 - -struct t64 { - unsigned long h; - unsigned long l; -}; - -struct { - struct t64 stamp; - thread_t thread; - long info1; - long info2; - long info3; - char * action; -} thread_log[THREAD_LOG_SIZE]; - -int thread_log_index; - -void check_thread_time(long n); - - -int check_thread_time_crash; - -#if 0 -void -check_thread_time(long us) -{ - struct t64 temp; - - if (!check_thread_time_crash) - return; - - temp = thread_log[0].stamp; - cyctm05_diff (&thread_log[1].stamp, &thread_log[0].stamp, &temp); - - if (temp.l >= us && thread_log[1].info != 0x49) /* HACK!!! */ - panic ("check_thread_time"); -} -#endif - -void -log_thread_action(char * action, long info1, long info2, long info3) -{ - int i; - spl_t x; - static unsigned int tstamp; - - x = splhigh(); - - for (i = THREAD_LOG_SIZE-1; i > 0; i--) { - thread_log[i] = thread_log[i-1]; - } - - thread_log[0].stamp.h = 0; - thread_log[0].stamp.l = tstamp++; - thread_log[0].thread = current_thread(); - thread_log[0].info1 = info1; - thread_log[0].info2 = info2; - thread_log[0].info3 = info3; - thread_log[0].action = action; -/* strcpy (&thread_log[0].action[0], action);*/ - - splx(x); -} -#endif /* MACH_LDEBUG || MACH_KDB */ - -#if MACH_KDB -#include -void db_show_thread_log(void); - -void -db_show_thread_log(void) -{ - int i; - - db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ", - " Info3 ", " Timestamp ", "Action"); - - for (i = 0; i < THREAD_LOG_SIZE; i++) { - db_printf ("%08x %08x %08x %08x %08x/%08x %s\n", - thread_log[i].thread, - thread_log[i].info1, - thread_log[i].info2, - thread_log[i].info3, - thread_log[i].stamp.h, - thread_log[i].stamp.l, - thread_log[i].action); - } -} -#endif /* MACH_KDB */ - /* * thread_block_reason: * - * Block the current thread if a wait has been asserted, - * otherwise unconditionally yield the remainder of the - * current quantum unless reason contains AST_BLOCK. - * - * If a continuation is specified, then thread_block will + * Forces a reschedule, blocking the caller if a wait + * has been asserted. + * + * If a continuation is specified, then thread_invoke will * attempt to discard the thread's kernel stack. When the * thread resumes, it will execute the continuation function * on a new kernel stack. @@ -1593,7 +1630,7 @@ thread_block_reason( ast_t reason) { register thread_t thread = current_thread(); - register processor_t myprocessor; + register processor_t processor; register thread_t new_thread; spl_t s; @@ -1601,50 +1638,32 @@ thread_block_reason( check_simple_locks(); - machine_clock_assist(); - s = splsched(); - if ((thread->funnel_state & TH_FN_OWNED) && !(reason & AST_PREEMPT)) { - thread->funnel_state = TH_FN_REFUNNEL; - KERNEL_DEBUG( - 0x603242c | DBG_FUNC_NONE, thread->funnel_lock, 2, 0, 0, 0); - funnel_unlock(thread->funnel_lock); - } + if (!(reason & AST_PREEMPT)) + funnel_release_check(thread, 2); - myprocessor = current_processor(); + processor = current_processor(); /* If we're explicitly yielding, force a subsequent quantum */ if (reason & AST_YIELD) - myprocessor->slice_quanta = 0; + processor->timeslice = 0; /* We're handling all scheduling AST's */ ast_off(AST_SCHEDULING); thread_lock(thread); - new_thread = thread_select(myprocessor); + new_thread = thread_select(processor); assert(new_thread && thread_runnable(new_thread)); thread_unlock(thread); while (!thread_invoke(thread, new_thread, reason, continuation)) { thread_lock(thread); - new_thread = thread_select(myprocessor); + new_thread = thread_select(processor); assert(new_thread && thread_runnable(new_thread)); thread_unlock(thread); } - if (thread->funnel_state & TH_FN_REFUNNEL) { - kern_return_t wait_result = thread->wait_result; - - thread->funnel_state = 0; - KERNEL_DEBUG( - 0x6032428 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0); - funnel_lock(thread->funnel_lock); - KERNEL_DEBUG( - 0x6032430 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0); - thread->funnel_state = TH_FN_OWNED; - thread->wait_result = wait_result; - } - + funnel_refunnel_check(thread, 5); splx(s); return (thread->wait_result); @@ -1666,12 +1685,11 @@ thread_block( * thread_run: * * Switch directly from the current (old) thread to the - * specified thread, handing off our quantum if possible. + * new thread, handing off our quantum if appropriate. * * New thread must be runnable, and not on a run queue. * - * Assumption: - * at splsched. + * Called at splsched. */ int thread_run( @@ -1683,43 +1701,26 @@ thread_run( assert(old_thread == current_thread()); - machine_clock_assist(); - - if (old_thread->funnel_state & TH_FN_OWNED) { - old_thread->funnel_state = TH_FN_REFUNNEL; - KERNEL_DEBUG( - 0x603242c | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0); - funnel_unlock(old_thread->funnel_lock); - } + funnel_release_check(old_thread, 3); while (!thread_invoke(old_thread, new_thread, handoff, continuation)) { - register processor_t myprocessor = current_processor(); + register processor_t processor = current_processor(); thread_lock(old_thread); - new_thread = thread_select(myprocessor); + new_thread = thread_select(processor); thread_unlock(old_thread); handoff = AST_NONE; } - /* if we fell thru */ - if (old_thread->funnel_state & TH_FN_REFUNNEL) { - kern_return_t wait_result = old_thread->wait_result; - - old_thread->funnel_state = 0; - KERNEL_DEBUG( - 0x6032428 | DBG_FUNC_NONE, old_thread->funnel_lock, 6, 0, 0, 0); - funnel_lock(old_thread->funnel_lock); - KERNEL_DEBUG( - 0x6032430 | DBG_FUNC_NONE, old_thread->funnel_lock, 6, 0, 0, 0); - old_thread->funnel_state = TH_FN_OWNED; - old_thread->wait_result = wait_result; - } + funnel_refunnel_check(old_thread, 6); return (old_thread->wait_result); } /* - * Dispatches a running thread that is not on a runq. + * Dispatches a running thread that is not on a + * run queue. + * * Called at splsched. */ void @@ -1754,29 +1755,33 @@ thread_dispatch( case TH_RUN | TH_WAIT | TH_UNINT: case TH_RUN | TH_WAIT: { - boolean_t reap, wake, callblock; + boolean_t term, wake, callout; /* * Waiting */ thread->sleep_stamp = sched_tick; thread->state &= ~TH_RUN; - hw_atomic_sub(&thread->processor_set->run_count, 1); - callblock = thread->active_callout; + + term = (thread->state & TH_TERMINATE)? TRUE: FALSE; + callout = thread->active_callout; wake = thread->wake_active; thread->wake_active = FALSE; - reap = (thread->state & TH_TERMINATE)? TRUE: FALSE; + + if (thread->sched_mode & TH_MODE_TIMESHARE) + pset_share_decr(thread->processor_set); + pset_run_decr(thread->processor_set); thread_unlock(thread); wake_unlock(thread); - if (callblock) + if (callout) call_thread_block(); if (wake) thread_wakeup((event_t)&thread->wake_active); - if (reap) + if (term) thread_reaper_enqueue(thread); return; @@ -1790,7 +1795,7 @@ thread_dispatch( break; default: - panic("thread_dispatch: bad thread state 0x%x\n", thread->state); + panic("thread_dispatch: state 0x%x\n", thread->state); } thread_unlock(thread); @@ -1798,15 +1803,19 @@ thread_dispatch( } /* - * Enqueue thread on run queue. Thread must be locked, - * and not already be on a run queue. Returns TRUE iff - * the particular queue level was empty beforehand. + * Enqueue thread on run queue. Thread must be locked, + * and not already be on a run queue. Returns TRUE + * if a preemption is indicated based on the state + * of the run queue. + * + * Run queue must be locked, see run_queue_remove() + * for more info. */ -boolean_t +static boolean_t run_queue_enqueue( register run_queue_t rq, register thread_t thread, - boolean_t tail) + integer_t options) { register int whichq = thread->sched_pri; register queue_t queue = &rq->queues[whichq]; @@ -1814,81 +1823,173 @@ run_queue_enqueue( assert(whichq >= MINPRI && whichq <= MAXPRI); - simple_lock(&rq->lock); assert(thread->runq == RUN_QUEUE_NULL); if (queue_empty(queue)) { enqueue_tail(queue, (queue_entry_t)thread); setbit(MAXPRI - whichq, rq->bitmap); - if (whichq > rq->highq) + if (whichq > rq->highq) { rq->highq = whichq; - result = TRUE; + result = TRUE; + } } else - if (tail) - enqueue_tail(queue, (queue_entry_t)thread); - else + if (options & SCHED_HEADQ) enqueue_head(queue, (queue_entry_t)thread); + else + enqueue_tail(queue, (queue_entry_t)thread); thread->runq = rq; if (thread->sched_mode & TH_MODE_PREEMPT) rq->urgency++; rq->count++; -#if DEBUG - thread_check(thread, rq); -#endif /* DEBUG */ - simple_unlock(&rq->lock); return (result); } -struct { - uint32_t pset_idle_last, - pset_idle_any, - pset_self, - pset_last, - pset_other, - bound_idle, - bound_self, - bound_other; -} dispatch_counts; - /* - * thread_setrun: - * - * Dispatch thread for execution, directly onto an idle - * processor if possible. Else put on appropriate run - * queue. (local if bound, else processor set) - * - * Thread must be locked. - * - * The tail parameter indicates the proper placement of - * the thread on a run queue. + * Enqueue a thread for realtime execution, similar + * to above. Handles preemption directly. */ -void -thread_setrun( - register thread_t new_thread, - boolean_t tail) +static void +realtime_schedule_insert( + register processor_set_t pset, + register thread_t thread) { - register processor_t processor; - register processor_set_t pset; - register thread_t thread; - boolean_t try_preempt = FALSE; - ast_t preempt = AST_BLOCK; + register run_queue_t rq = &pset->runq; + register int whichq = thread->sched_pri; + register queue_t queue = &rq->queues[whichq]; + uint64_t deadline = thread->realtime.deadline; + boolean_t try_preempt = FALSE; - assert(thread_runnable(new_thread)); - - /* - * Update priority if needed. - */ - if (new_thread->sched_stamp != sched_tick) - update_priority(new_thread); + assert(whichq >= BASEPRI_REALTIME && whichq <= MAXPRI); - /* + assert(thread->runq == RUN_QUEUE_NULL); + if (queue_empty(queue)) { + enqueue_tail(queue, (queue_entry_t)thread); + + setbit(MAXPRI - whichq, rq->bitmap); + if (whichq > rq->highq) + rq->highq = whichq; + try_preempt = TRUE; + } + else { + register thread_t entry = (thread_t)queue_first(queue); + + while (TRUE) { + if ( queue_end(queue, (queue_entry_t)entry) || + deadline < entry->realtime.deadline ) { + entry = (thread_t)queue_prev((queue_entry_t)entry); + break; + } + + entry = (thread_t)queue_next((queue_entry_t)entry); + } + + if ((queue_entry_t)entry == queue) + try_preempt = TRUE; + + insque((queue_entry_t)thread, (queue_entry_t)entry); + } + + thread->runq = rq; + assert(thread->sched_mode & TH_MODE_PREEMPT); + rq->count++; rq->urgency++; + + if (try_preempt) { + register processor_t processor; + + processor = current_processor(); + if ( pset == processor->processor_set && + (thread->sched_pri > processor->current_pri || + deadline < processor->deadline ) ) { + dispatch_counts.realtime_self++; + simple_unlock(&pset->sched_lock); + + ast_on(AST_PREEMPT | AST_URGENT); + return; + } + + if ( pset->processor_count > 1 || + pset != processor->processor_set ) { + processor_t myprocessor, lastprocessor; + queue_entry_t next; + + myprocessor = processor; + processor = thread->last_processor; + if ( processor != myprocessor && + processor != PROCESSOR_NULL && + processor->processor_set == pset && + processor->state == PROCESSOR_RUNNING && + (thread->sched_pri > processor->current_pri || + deadline < processor->deadline ) ) { + dispatch_counts.realtime_last++; + cause_ast_check(processor); + simple_unlock(&pset->sched_lock); + return; + } + + lastprocessor = processor; + queue = &pset->active_queue; + processor = (processor_t)queue_first(queue); + while (!queue_end(queue, (queue_entry_t)processor)) { + next = queue_next((queue_entry_t)processor); + + if ( processor != myprocessor && + processor != lastprocessor && + (thread->sched_pri > processor->current_pri || + deadline < processor->deadline ) ) { + if (!queue_end(queue, next)) { + remqueue(queue, (queue_entry_t)processor); + enqueue_tail(queue, (queue_entry_t)processor); + } + dispatch_counts.realtime_other++; + cause_ast_check(processor); + simple_unlock(&pset->sched_lock); + return; + } + + processor = (processor_t)next; + } + } + } + + simple_unlock(&pset->sched_lock); +} + +/* + * thread_setrun: + * + * Dispatch thread for execution, directly onto an idle + * processor if possible. Else put on appropriate run + * queue. (local if bound, else processor set) + * + * Thread must be locked. + */ +void +thread_setrun( + register thread_t new_thread, + integer_t options) +{ + register processor_t processor; + register processor_set_t pset; + register thread_t thread; + ast_t preempt = (options & SCHED_PREEMPT)? + AST_PREEMPT: AST_NONE; + + assert(thread_runnable(new_thread)); + + /* + * Update priority if needed. + */ + if (new_thread->sched_stamp != sched_tick) + update_priority(new_thread); + + /* * Check for urgent preemption. */ if (new_thread->sched_mode & TH_MODE_PREEMPT) - preempt |= AST_URGENT; + preempt = (AST_PREEMPT | AST_URGENT); assert(new_thread->runq == RUN_QUEUE_NULL); @@ -1902,22 +2003,26 @@ thread_setrun( if ( pset->processor_count > 1 && processor != PROCESSOR_NULL && processor->state == PROCESSOR_IDLE ) { - simple_lock(&processor->lock); + processor_lock(processor); simple_lock(&pset->sched_lock); if ( processor->processor_set == pset && processor->state == PROCESSOR_IDLE ) { remqueue(&pset->idle_queue, (queue_entry_t)processor); pset->idle_count--; processor->next_thread = new_thread; + if (new_thread->sched_pri >= BASEPRI_RTQUEUES) + processor->deadline = new_thread->realtime.deadline; + else + processor->deadline = UINT64_MAX; processor->state = PROCESSOR_DISPATCHING; + dispatch_counts.idle_pset_last++; simple_unlock(&pset->sched_lock); - simple_unlock(&processor->lock); + processor_unlock(processor); if (processor != current_processor()) machine_signal_idle(processor); - dispatch_counts.pset_idle_last++; return; } - simple_unlock(&processor->lock); + processor_unlock(processor); } else simple_lock(&pset->sched_lock); @@ -1930,158 +2035,162 @@ thread_setrun( processor = (processor_t)dequeue_head(&pset->idle_queue); pset->idle_count--; processor->next_thread = new_thread; + if (new_thread->sched_pri >= BASEPRI_RTQUEUES) + processor->deadline = new_thread->realtime.deadline; + else + processor->deadline = UINT64_MAX; processor->state = PROCESSOR_DISPATCHING; + dispatch_counts.idle_pset_any++; simple_unlock(&pset->sched_lock); if (processor != current_processor()) machine_signal_idle(processor); - dispatch_counts.pset_idle_any++; return; } - /* - * Place thread on run queue. - */ - if (run_queue_enqueue(&pset->runq, new_thread, tail)) - try_preempt = TRUE; + if (new_thread->sched_pri >= BASEPRI_RTQUEUES) + realtime_schedule_insert(pset, new_thread); + else { + if (!run_queue_enqueue(&pset->runq, new_thread, options)) + preempt = AST_NONE; - /* - * Update the timesharing quanta. - */ - pset_quanta_update(pset); - - /* - * Preempt check. - */ - processor = current_processor(); - thread = processor->cpu_data->active_thread; - if (try_preempt) { /* - * First try the current processor - * if it is a member of the correct - * processor set. + * Update the timesharing quanta. */ - if ( pset == processor->processor_set && - csw_needed(thread, processor) ) { - simple_unlock(&pset->sched_lock); - - ast_on(preempt); - dispatch_counts.pset_self++; - return; - } - + timeshare_quanta_update(pset); + /* - * If that failed and we have other - * processors available keep trying. + * Preempt check. */ - if ( pset->processor_count > 1 || - pset != processor->processor_set ) { - queue_t active = &pset->active_queue; - processor_t myprocessor, lastprocessor; - queue_entry_t next; - + if (preempt != AST_NONE) { /* - * Next try the last processor - * dispatched on. + * First try the current processor + * if it is a member of the correct + * processor set. */ - myprocessor = processor; - processor = new_thread->last_processor; - if ( processor != myprocessor && - processor != PROCESSOR_NULL && - processor->processor_set == pset && - processor->state == PROCESSOR_RUNNING && - new_thread->sched_pri > processor->current_pri ) { - cause_ast_check(processor); + processor = current_processor(); + thread = processor->active_thread; + if ( pset == processor->processor_set && + csw_needed(thread, processor) ) { + dispatch_counts.pset_self++; simple_unlock(&pset->sched_lock); - dispatch_counts.pset_last++; + + ast_on(preempt); return; } /* - * Lastly, pick any other - * available processor. + * If that failed and we have other + * processors available keep trying. */ - lastprocessor = processor; - processor = (processor_t)queue_first(active); - while (!queue_end(active, (queue_entry_t)processor)) { - next = queue_next((queue_entry_t)processor); - + if ( pset->processor_count > 1 || + pset != processor->processor_set ) { + queue_t queue = &pset->active_queue; + processor_t myprocessor, lastprocessor; + queue_entry_t next; + + /* + * Next try the last processor + * dispatched on. + */ + myprocessor = processor; + processor = new_thread->last_processor; if ( processor != myprocessor && - processor != lastprocessor && + processor != PROCESSOR_NULL && + processor->processor_set == pset && + processor->state == PROCESSOR_RUNNING && new_thread->sched_pri > processor->current_pri ) { - if (!queue_end(active, next)) { - remqueue(active, (queue_entry_t)processor); - enqueue_tail(active, (queue_entry_t)processor); - } + dispatch_counts.pset_last++; cause_ast_check(processor); simple_unlock(&pset->sched_lock); - dispatch_counts.pset_other++; return; } - processor = (processor_t)next; + /* + * Lastly, pick any other + * available processor. + */ + lastprocessor = processor; + processor = (processor_t)queue_first(queue); + while (!queue_end(queue, (queue_entry_t)processor)) { + next = queue_next((queue_entry_t)processor); + + if ( processor != myprocessor && + processor != lastprocessor && + new_thread->sched_pri > + processor->current_pri ) { + if (!queue_end(queue, next)) { + remqueue(queue, (queue_entry_t)processor); + enqueue_tail(queue, (queue_entry_t)processor); + } + dispatch_counts.pset_other++; + cause_ast_check(processor); + simple_unlock(&pset->sched_lock); + return; + } + + processor = (processor_t)next; + } } } - } - simple_unlock(&pset->sched_lock); + simple_unlock(&pset->sched_lock); + } } else { /* * Bound, can only run on bound processor. Have to lock * processor here because it may not be the current one. */ - if (processor->state == PROCESSOR_IDLE) { - simple_lock(&processor->lock); - pset = processor->processor_set; + processor_lock(processor); + pset = processor->processor_set; + if (pset != PROCESSOR_SET_NULL) { simple_lock(&pset->sched_lock); if (processor->state == PROCESSOR_IDLE) { remqueue(&pset->idle_queue, (queue_entry_t)processor); pset->idle_count--; processor->next_thread = new_thread; + processor->deadline = UINT64_MAX; processor->state = PROCESSOR_DISPATCHING; + dispatch_counts.idle_bound++; simple_unlock(&pset->sched_lock); - simple_unlock(&processor->lock); + processor_unlock(processor); if (processor != current_processor()) machine_signal_idle(processor); - dispatch_counts.bound_idle++; return; } - simple_unlock(&pset->sched_lock); - simple_unlock(&processor->lock); } - if (run_queue_enqueue(&processor->runq, new_thread, tail)) - try_preempt = TRUE; + if (!run_queue_enqueue(&processor->runq, new_thread, options)) + preempt = AST_NONE; - if (processor == current_processor()) { - if (try_preempt) { - thread = processor->cpu_data->active_thread; + if (preempt != AST_NONE) { + if (processor == current_processor()) { + thread = processor->active_thread; if (csw_needed(thread, processor)) { - ast_on(preempt); dispatch_counts.bound_self++; + ast_on(preempt); } } - } - else { - if (try_preempt) { - if ( processor->state == PROCESSOR_RUNNING && - new_thread->sched_pri > processor->current_pri ) { - cause_ast_check(processor); - dispatch_counts.bound_other++; - return; - } - } - - if (processor->state == PROCESSOR_IDLE) { - machine_signal_idle(processor); - dispatch_counts.bound_idle++; + else + if ( processor->state == PROCESSOR_RUNNING && + new_thread->sched_pri > processor->current_pri ) { + dispatch_counts.bound_other++; + cause_ast_check(processor); } } + + if (pset != PROCESSOR_SET_NULL) + simple_unlock(&pset->sched_lock); + + processor_unlock(processor); } } /* - * Called at splsched by a thread on itself. + * Check for a possible preemption point in + * the (current) thread. + * + * Called at splsched. */ ast_t csw_check( @@ -2092,38 +2201,41 @@ csw_check( ast_t result = AST_NONE; run_queue_t runq; - if (first_quantum(processor)) { + if (first_timeslice(processor)) { runq = &processor->processor_set->runq; + if (runq->highq >= BASEPRI_RTQUEUES) + return (AST_PREEMPT | AST_URGENT); + if (runq->highq > current_pri) { if (runq->urgency > 0) - return (AST_BLOCK | AST_URGENT); + return (AST_PREEMPT | AST_URGENT); - result |= AST_BLOCK; + result |= AST_PREEMPT; } runq = &processor->runq; if (runq->highq > current_pri) { if (runq->urgency > 0) - return (AST_BLOCK | AST_URGENT); + return (AST_PREEMPT | AST_URGENT); - result |= AST_BLOCK; + result |= AST_PREEMPT; } } else { runq = &processor->processor_set->runq; if (runq->highq >= current_pri) { if (runq->urgency > 0) - return (AST_BLOCK | AST_URGENT); + return (AST_PREEMPT | AST_URGENT); - result |= AST_BLOCK; + result |= AST_PREEMPT; } runq = &processor->runq; if (runq->highq >= current_pri) { if (runq->urgency > 0) - return (AST_BLOCK | AST_URGENT); + return (AST_PREEMPT | AST_URGENT); - result |= AST_BLOCK; + result |= AST_PREEMPT; } } @@ -2131,7 +2243,7 @@ csw_check( return (result); if (thread->state & TH_SUSP) - result |= AST_BLOCK; + result |= AST_PREEMPT; return (result); } @@ -2139,17 +2251,18 @@ csw_check( /* * set_sched_pri: * - * Set the current scheduled priority of the specified thread. + * Set the scheduled priority of the specified thread. + * * This may cause the thread to change queues. * - * The thread *must* be locked by the caller. + * Thread must be locked. */ void set_sched_pri( thread_t thread, int priority) { - register struct run_queue *rq = rem_runq(thread); + register struct run_queue *rq = run_queue_remove(thread); if ( !(thread->sched_mode & TH_MODE_TIMESHARE) && (priority >= BASEPRI_PREEMPT || @@ -2163,9 +2276,9 @@ set_sched_pri( thread->sched_pri = priority; if (rq != RUN_QUEUE_NULL) - thread_setrun(thread, TAIL_Q); + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); else - if ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN) { + if (thread->state & TH_RUN) { processor_t processor = thread->last_processor; if (thread == current_thread()) { @@ -2177,42 +2290,53 @@ set_sched_pri( } else if ( processor != PROCESSOR_NULL && - processor->cpu_data->active_thread == thread ) + processor->active_thread == thread ) cause_ast_check(processor); } } /* - * rem_runq: + * run_queue_remove: * - * Remove a thread from its run queue. - * The run queue that the process was on is returned - * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked - * before calling this routine. Unusual locking protocol on runq - * field in thread structure makes this code interesting; see thread.h. + * Remove a thread from its current run queue and + * return the run queue if successful. + * + * Thread must be locked. */ run_queue_t -rem_runq( +run_queue_remove( thread_t thread) { - register struct run_queue *rq; + register run_queue_t rq = thread->runq; - rq = thread->runq; /* * If rq is RUN_QUEUE_NULL, the thread will stay out of the - * run_queues because the caller locked the thread. Otherwise - * the thread is on a runq, but could leave. + * run queues because the caller locked the thread. Otherwise + * the thread is on a run queue, but could be chosen for dispatch + * and removed. */ if (rq != RUN_QUEUE_NULL) { - simple_lock(&rq->lock); + processor_set_t pset = thread->processor_set; + processor_t processor = thread->bound_processor; + + /* + * The run queues are locked by the pset scheduling + * lock, except when a processor is off-line the + * local run queue is locked by the processor lock. + */ + if (processor != PROCESSOR_NULL) { + processor_lock(processor); + pset = processor->processor_set; + } + + if (pset != PROCESSOR_SET_NULL) + simple_lock(&pset->sched_lock); + if (rq == thread->runq) { /* - * Thread is in a runq and we have a lock on - * that runq. + * Thread is on a run queue and we have a lock on + * that run queue. */ -#if DEBUG - thread_check(thread, rq); -#endif /* DEBUG */ remqueue(&rq->queues[0], (queue_entry_t)thread); rq->count--; if (thread->sched_mode & TH_MODE_PREEMPT) @@ -2225,20 +2349,23 @@ rem_runq( clrbit(MAXPRI - thread->sched_pri, rq->bitmap); rq->highq = MAXPRI - ffsbit(rq->bitmap); } + thread->runq = RUN_QUEUE_NULL; - simple_unlock(&rq->lock); } else { /* - * The thread left the runq before we could - * lock the runq. It is not on a runq now, and - * can't move again because this routine's - * caller locked the thread. + * The thread left the run queue before we could + * lock the run queue. */ assert(thread->runq == RUN_QUEUE_NULL); - simple_unlock(&rq->lock); rq = RUN_QUEUE_NULL; } + + if (pset != PROCESSOR_SET_NULL) + simple_unlock(&pset->sched_lock); + + if (processor != PROCESSOR_NULL) + processor_unlock(processor); } return (rq); @@ -2247,133 +2374,70 @@ rem_runq( /* * choose_thread: * - * Choose a thread to execute. The thread chosen is removed - * from its run queue. Note that this requires only that the runq - * lock be held. + * Remove a thread to execute from the run queues + * and return it. * - * Strategy: - * Check processor runq first; if anything found, run it. - * Else check pset runq; if nothing found, return idle thread. - * - * Second line of strategy is implemented by choose_pset_thread. - * - * Called with both the local & pset run queues locked, returned - * unlocked. + * Called with pset scheduling lock held. */ -thread_t +static thread_t choose_thread( - processor_t myprocessor) + processor_set_t pset, + processor_t processor) { - thread_t thread; - register queue_t q; register run_queue_t runq; - processor_set_t pset; + register thread_t thread; + register queue_t q; - runq = &myprocessor->runq; - pset = myprocessor->processor_set; + runq = &processor->runq; if (runq->count > 0 && runq->highq >= pset->runq.highq) { - simple_unlock(&pset->runq.lock); q = runq->queues + runq->highq; -#if MACH_ASSERT - if (!queue_empty(q)) { -#endif /*MACH_ASSERT*/ - thread = (thread_t)q->next; - ((queue_entry_t)thread)->next->prev = q; - q->next = ((queue_entry_t)thread)->next; - thread->runq = RUN_QUEUE_NULL; - runq->count--; - if (thread->sched_mode & TH_MODE_PREEMPT) - runq->urgency--; - assert(runq->urgency >= 0); - if (queue_empty(q)) { - if (runq->highq != IDLEPRI) - clrbit(MAXPRI - runq->highq, runq->bitmap); - runq->highq = MAXPRI - ffsbit(runq->bitmap); - } - simple_unlock(&runq->lock); - return (thread); -#if MACH_ASSERT + + thread = (thread_t)q->next; + ((queue_entry_t)thread)->next->prev = q; + q->next = ((queue_entry_t)thread)->next; + thread->runq = RUN_QUEUE_NULL; + runq->count--; + if (thread->sched_mode & TH_MODE_PREEMPT) + runq->urgency--; + assert(runq->urgency >= 0); + if (queue_empty(q)) { + if (runq->highq != IDLEPRI) + clrbit(MAXPRI - runq->highq, runq->bitmap); + runq->highq = MAXPRI - ffsbit(runq->bitmap); } - panic("choose_thread"); -#endif /*MACH_ASSERT*/ - /*NOTREACHED*/ - } - simple_unlock(&myprocessor->runq.lock); - return (choose_pset_thread(myprocessor, pset)); -} + processor->deadline = UINT64_MAX; -/* - * choose_pset_thread: choose a thread from processor_set runq or - * set processor idle and choose its idle thread. - * - * This routine chooses and removes a thread from the runq if there - * is one (and returns it), else it sets the processor idle and - * returns its idle thread. - * - * Called with both local & pset run queues locked, returned - * unlocked. - */ -thread_t -choose_pset_thread( - register processor_t myprocessor, - processor_set_t pset) -{ - register run_queue_t runq; - register thread_t thread; - register queue_t q; - - runq = &pset->runq; - if (runq->count > 0) { - q = runq->queues + runq->highq; -#if MACH_ASSERT - if (!queue_empty(q)) { -#endif /*MACH_ASSERT*/ - thread = (thread_t)q->next; - ((queue_entry_t)thread)->next->prev = q; - q->next = ((queue_entry_t)thread)->next; - thread->runq = RUN_QUEUE_NULL; - runq->count--; - if (thread->sched_mode & TH_MODE_PREEMPT) - runq->urgency--; - assert(runq->urgency >= 0); - if (queue_empty(q)) { - if (runq->highq != IDLEPRI) - clrbit(MAXPRI - runq->highq, runq->bitmap); - runq->highq = MAXPRI - ffsbit(runq->bitmap); - } - pset_quanta_update(pset); - simple_unlock(&runq->lock); - return (thread); -#if MACH_ASSERT - } - panic("choose_pset_thread"); -#endif /*MACH_ASSERT*/ - /*NOTREACHED*/ + return (thread); } - simple_unlock(&runq->lock); - /* - * Nothing is runnable, so set this processor idle if it - * was running. If it was in an assignment or shutdown, - * leave it alone. Return its idle thread. - */ - simple_lock(&pset->sched_lock); - if (myprocessor->state == PROCESSOR_RUNNING) { - remqueue(&pset->active_queue, (queue_entry_t)myprocessor); - myprocessor->state = PROCESSOR_IDLE; + runq = &pset->runq; - if (myprocessor == master_processor) - enqueue_tail(&pset->idle_queue, (queue_entry_t)myprocessor); - else - enqueue_head(&pset->idle_queue, (queue_entry_t)myprocessor); + assert(runq->count > 0); + q = runq->queues + runq->highq; - pset->idle_count++; + thread = (thread_t)q->next; + ((queue_entry_t)thread)->next->prev = q; + q->next = ((queue_entry_t)thread)->next; + thread->runq = RUN_QUEUE_NULL; + runq->count--; + if (runq->highq >= BASEPRI_RTQUEUES) + processor->deadline = thread->realtime.deadline; + else + processor->deadline = UINT64_MAX; + if (thread->sched_mode & TH_MODE_PREEMPT) + runq->urgency--; + assert(runq->urgency >= 0); + if (queue_empty(q)) { + if (runq->highq != IDLEPRI) + clrbit(MAXPRI - runq->highq, runq->bitmap); + runq->highq = MAXPRI - ffsbit(runq->bitmap); } - simple_unlock(&pset->sched_lock); - return (myprocessor->idle_thread); + timeshare_quanta_update(pset); + + return (thread); } /* @@ -2389,7 +2453,7 @@ int no_dispatch_count = 0; void idle_thread_continue(void) { - register processor_t myprocessor; + register processor_t processor; register volatile thread_t *threadp; register volatile int *gcount; register volatile int *lcount; @@ -2399,161 +2463,183 @@ idle_thread_continue(void) int mycpu; mycpu = cpu_number(); - myprocessor = cpu_to_processor(mycpu); - threadp = (volatile thread_t *) &myprocessor->next_thread; - lcount = (volatile int *) &myprocessor->runq.count; + processor = cpu_to_processor(mycpu); + threadp = (volatile thread_t *) &processor->next_thread; + lcount = (volatile int *) &processor->runq.count; - for (;;) { - gcount = (volatile int *)&myprocessor->processor_set->runq.count; + gcount = (volatile int *)&processor->processor_set->runq.count; - (void)splsched(); - while ( (*threadp == (volatile thread_t)THREAD_NULL) && - (*gcount == 0) && (*lcount == 0) ) { - - /* check for ASTs while we wait */ - if (need_ast[mycpu] &~ ( AST_SCHEDULING | AST_BSD )) { - /* don't allow scheduling ASTs */ - need_ast[mycpu] &= ~( AST_SCHEDULING | AST_BSD ); - ast_taken(AST_ALL, TRUE); /* back at spllo */ - } - else -#ifdef __ppc__ - machine_idle(); -#else - (void)spllo(); -#endif - machine_clock_assist(); + (void)splsched(); + while ( (*threadp == (volatile thread_t)THREAD_NULL) && + (*gcount == 0) && (*lcount == 0) ) { - (void)splsched(); + /* check for ASTs while we wait */ + if (need_ast[mycpu] &~ ( AST_SCHEDULING | AST_BSD )) { + /* no ASTs for us */ + need_ast[mycpu] &= AST_NONE; + (void)spllo(); } + else + machine_idle(); + + (void)splsched(); + } + + /* + * This is not a switch statement to avoid the + * bounds checking code in the common case. + */ + pset = processor->processor_set; + simple_lock(&pset->sched_lock); + state = processor->state; + if (state == PROCESSOR_DISPATCHING) { /* - * This is not a switch statement to avoid the - * bounds checking code in the common case. + * Commmon case -- cpu dispatched. */ - pset = myprocessor->processor_set; - simple_lock(&pset->sched_lock); -retry: - state = myprocessor->state; - if (state == PROCESSOR_DISPATCHING) { - /* - * Commmon case -- cpu dispatched. - */ - new_thread = *threadp; - *threadp = (volatile thread_t) THREAD_NULL; - myprocessor->state = PROCESSOR_RUNNING; - enqueue_tail(&pset->active_queue, (queue_entry_t)myprocessor); - simple_unlock(&pset->sched_lock); + new_thread = *threadp; + *threadp = (volatile thread_t) THREAD_NULL; + processor->state = PROCESSOR_RUNNING; + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); + + if ( pset->runq.highq >= BASEPRI_RTQUEUES && + new_thread->sched_pri >= BASEPRI_RTQUEUES ) { + register run_queue_t runq = &pset->runq; + register queue_t q; + + q = runq->queues + runq->highq; + if (((thread_t)q->next)->realtime.deadline < + processor->deadline) { + thread_t thread = new_thread; + + new_thread = (thread_t)q->next; + ((queue_entry_t)new_thread)->next->prev = q; + q->next = ((queue_entry_t)new_thread)->next; + new_thread->runq = RUN_QUEUE_NULL; + processor->deadline = new_thread->realtime.deadline; + assert(new_thread->sched_mode & TH_MODE_PREEMPT); + runq->count--; runq->urgency--; + if (queue_empty(q)) { + if (runq->highq != IDLEPRI) + clrbit(MAXPRI - runq->highq, runq->bitmap); + runq->highq = MAXPRI - ffsbit(runq->bitmap); + } + dispatch_counts.missed_realtime++; + simple_unlock(&pset->sched_lock); - if ( myprocessor->runq.highq > new_thread->sched_pri || - pset->runq.highq > new_thread->sched_pri ) { - thread_lock(new_thread); - thread_setrun(new_thread, HEAD_Q); - thread_unlock(new_thread); + thread_lock(thread); + thread_setrun(thread, SCHED_HEADQ); + thread_unlock(thread); - counter(c_idle_thread_block++); - thread_block(idle_thread_continue); - /* NOTREACHED */ - } - else { counter(c_idle_thread_handoff++); - thread_run(myprocessor->idle_thread, + thread_run(processor->idle_thread, idle_thread_continue, new_thread); - /* NOTREACHED */ + /*NOTREACHED*/ } - } - else - if (state == PROCESSOR_IDLE) { - if (myprocessor->state != PROCESSOR_IDLE) { - /* - * Something happened, try again. - */ - goto retry; - } - /* - * Processor was not dispatched (Rare). - * Set it running again. - */ - no_dispatch_count++; - pset->idle_count--; - remqueue(&pset->idle_queue, (queue_entry_t)myprocessor); - myprocessor->state = PROCESSOR_RUNNING; - enqueue_tail(&pset->active_queue, (queue_entry_t)myprocessor); simple_unlock(&pset->sched_lock); - counter(c_idle_thread_block++); - thread_block(idle_thread_continue); - /* NOTREACHED */ + counter(c_idle_thread_handoff++); + thread_run(processor->idle_thread, + idle_thread_continue, new_thread); + /*NOTREACHED*/ } - else - if ( state == PROCESSOR_ASSIGN || - state == PROCESSOR_SHUTDOWN ) { - /* - * Changing processor sets, or going off-line. - * Release next_thread if there is one. Actual - * thread to run is on a runq. - */ - if ((new_thread = (thread_t)*threadp) != THREAD_NULL) { - *threadp = (volatile thread_t) THREAD_NULL; - simple_unlock(&pset->sched_lock); - thread_lock(new_thread); - thread_setrun(new_thread, TAIL_Q); - thread_unlock(new_thread); - } - else - simple_unlock(&pset->sched_lock); + if ( processor->runq.highq > new_thread->sched_pri || + pset->runq.highq > new_thread->sched_pri ) { + thread_t thread = new_thread; + + new_thread = choose_thread(pset, processor); + dispatch_counts.missed_other++; + simple_unlock(&pset->sched_lock); - counter(c_idle_thread_block++); - thread_block(idle_thread_continue); + thread_lock(thread); + thread_setrun(thread, SCHED_HEADQ); + thread_unlock(thread); + + counter(c_idle_thread_handoff++); + thread_run(processor->idle_thread, + idle_thread_continue, new_thread); /* NOTREACHED */ } else { simple_unlock(&pset->sched_lock); - panic("idle_thread: bad processor state %d\n", cpu_state(mycpu)); + counter(c_idle_thread_handoff++); + thread_run(processor->idle_thread, + idle_thread_continue, new_thread); + /* NOTREACHED */ } + } + else + if (state == PROCESSOR_IDLE) { + /* + * Processor was not dispatched (Rare). + * Set it running again and force a + * reschedule. + */ + no_dispatch_count++; + pset->idle_count--; + remqueue(&pset->idle_queue, (queue_entry_t)processor); + processor->state = PROCESSOR_RUNNING; + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); + simple_unlock(&pset->sched_lock); - (void)spllo(); + counter(c_idle_thread_block++); + thread_block(idle_thread_continue); + /* NOTREACHED */ } + else + if (state == PROCESSOR_SHUTDOWN) { + /* + * Going off-line. Force a + * reschedule. + */ + if ((new_thread = (thread_t)*threadp) != THREAD_NULL) { + *threadp = (volatile thread_t) THREAD_NULL; + processor->deadline = UINT64_MAX; + simple_unlock(&pset->sched_lock); + + thread_lock(new_thread); + thread_setrun(new_thread, SCHED_HEADQ); + thread_unlock(new_thread); + } + else + simple_unlock(&pset->sched_lock); + + counter(c_idle_thread_block++); + thread_block(idle_thread_continue); + /* NOTREACHED */ + } + + simple_unlock(&pset->sched_lock); + + panic("idle_thread: state %d\n", cpu_state(mycpu)); + /*NOTREACHED*/ } void idle_thread(void) { - thread_t self = current_thread(); - spl_t s; - - stack_privilege(self); - - s = splsched(); - thread_lock(self); - self->priority = IDLEPRI; - set_sched_pri(self, self->priority); - thread_unlock(self); - splx(s); - counter(c_idle_thread_block++); thread_block(idle_thread_continue); /*NOTREACHED*/ } -static uint64_t sched_tick_interval, sched_tick_deadline; +static uint64_t sched_tick_deadline; void sched_tick_thread(void); void sched_tick_init(void) { - kernel_thread_with_priority( - kernel_task, MAXPRI_STANDARD, - sched_tick_thread, TRUE, TRUE); + kernel_thread_with_priority(sched_tick_thread, MAXPRI_STANDARD); } /* * sched_tick_thread * - * Update the priorities of all threads periodically. + * Perform periodic bookkeeping functions about ten + * times per second. */ void sched_tick_thread_continue(void) @@ -2563,7 +2649,7 @@ sched_tick_thread_continue(void) int new_usec; #endif /* SIMPLE_CLOCK */ - clock_get_uptime(&abstime); + abstime = mach_absolute_time(); sched_tick++; /* age usage one more time */ #if SIMPLE_CLOCK @@ -2582,8 +2668,8 @@ sched_tick_thread_continue(void) compute_mach_factor(); /* - * Scan the run queues for runnable threads that need to - * have their priorities recalculated. + * Scan the run queues for timesharing threads which + * may need to have their priorities recalculated. */ do_thread_scan(); @@ -2599,32 +2685,20 @@ sched_tick_thread_continue(void) void sched_tick_thread(void) { - thread_t self = current_thread(); - natural_t rate; - spl_t s; - - stack_privilege(self); - - rate = (1000 >> SCHED_TICK_SHIFT); - clock_interval_to_absolutetime_interval(rate, USEC_PER_SEC, - &sched_tick_interval); - clock_get_uptime(&sched_tick_deadline); + sched_tick_deadline = mach_absolute_time(); thread_block(sched_tick_thread_continue); /*NOTREACHED*/ } -#define MAX_STUCK_THREADS 128 - /* - * do_thread_scan: scan for stuck threads. A thread is stuck if - * it is runnable but its priority is so low that it has not - * run for several seconds. Its priority should be higher, but - * won't be until it runs and calls update_priority. The scanner - * finds these threads and does the updates. + * do_thread_scan: + * + * Scan the run queues for timesharing threads which need + * to be aged, possibily adjusting their priorities upwards. * * Scanner runs in two passes. Pass one squirrels likely - * thread ids away in an array (takes out references for them). + * thread away in an array (takes out references for them). * Pass two does the priority updates. This is necessary because * the run queue lock is required for the candidate scan, but * cannot be held during updates. @@ -2633,62 +2707,49 @@ sched_tick_thread(void) * but restart logic is included. * */ -thread_t stuck_threads[MAX_STUCK_THREADS]; -int stuck_count = 0; + +#define MAX_STUCK_THREADS 128 + +static thread_t stuck_threads[MAX_STUCK_THREADS]; +static int stuck_count = 0; /* * do_runq_scan is the guts of pass 1. It scans a runq for * stuck threads. A boolean is returned indicating whether * a retry is needed. */ -boolean_t +static boolean_t do_runq_scan( run_queue_t runq) { register queue_t q; register thread_t thread; register int count; - spl_t s; boolean_t result = FALSE; - s = splsched(); - simple_lock(&runq->lock); if ((count = runq->count) > 0) { q = runq->queues + runq->highq; while (count > 0) { queue_iterate(q, thread, thread_t, links) { - if ( !(thread->state & (TH_WAIT|TH_SUSP)) && + if ( thread->sched_stamp != sched_tick && (thread->sched_mode & TH_MODE_TIMESHARE) ) { - if (thread->sched_stamp != sched_tick) { + /* + * Stuck, save its id for later. + */ + if (stuck_count == MAX_STUCK_THREADS) { /* - * Stuck, save its id for later. + * !@#$% No more room. */ - if (stuck_count == MAX_STUCK_THREADS) { - /* - * !@#$% No more room. - */ - simple_unlock(&runq->lock); - splx(s); - - return (TRUE); - } + return (TRUE); + } - /* - * Inline version of thread_reference - * XXX - lock ordering problem here: - * thread locks should be taken before runq - * locks: just try and get the thread's locks - * and ignore this thread if we fail, we might - * have better luck next time. - */ - if (thread_lock_try(thread)) { - thread->ref_count++; - thread_unlock(thread); - stuck_threads[stuck_count++] = thread; - } - else - result = TRUE; + if (thread_lock_try(thread)) { + thread->ref_count++; + thread_unlock(thread); + stuck_threads[stuck_count++] = thread; } + else + result = TRUE; } count--; @@ -2697,15 +2758,13 @@ do_runq_scan( q--; } } - simple_unlock(&runq->lock); - splx(s); return (result); } boolean_t thread_scan_enabled = TRUE; -void +static void do_thread_scan(void) { register boolean_t restart_needed = FALSE; @@ -2718,9 +2777,13 @@ do_thread_scan(void) return; do { + s = splsched(); + simple_lock(&pset->sched_lock); restart_needed = do_runq_scan(&pset->runq); + simple_unlock(&pset->sched_lock); + if (!restart_needed) { - simple_lock(&pset->processors_lock); + simple_lock(&pset->sched_lock); processor = (processor_t)queue_first(&pset->processors); while (!queue_end(&pset->processors, (queue_entry_t)processor)) { if (restart_needed = do_runq_scan(&processor->runq)) @@ -2738,26 +2801,29 @@ do_thread_scan(void) processor = (processor_t)queue_next(&processor->processors); } - simple_unlock(&pset->processors_lock); + simple_unlock(&pset->sched_lock); } + splx(s); /* * Ok, we now have a collection of candidates -- fix them. */ while (stuck_count > 0) { + boolean_t idle_thread; + thread = stuck_threads[--stuck_count]; stuck_threads[stuck_count] = THREAD_NULL; + s = splsched(); thread_lock(thread); - if ( (thread->sched_mode & TH_MODE_TIMESHARE) || - (thread->state & TH_IDLE) ) { - if ( !(thread->state & (TH_WAIT|TH_SUSP)) && - thread->sched_stamp != sched_tick ) - update_priority(thread); - } + idle_thread = (thread->state & TH_IDLE) != 0; + if ( !(thread->state & (TH_WAIT|TH_SUSP)) && + thread->sched_stamp != sched_tick ) + update_priority(thread); thread_unlock(thread); splx(s); - if (!(thread->state & TH_IDLE)) + + if (!idle_thread) thread_deallocate(thread); } @@ -2784,182 +2850,12 @@ thread_wakeup( #if DEBUG - static boolean_t thread_runnable( thread_t thread) { return ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN); } - -void -dump_processor_set( - processor_set_t ps) -{ - printf("processor_set: %08x\n",ps); - printf("idle_queue: %08x %08x, idle_count: 0x%x\n", - ps->idle_queue.next,ps->idle_queue.prev,ps->idle_count); - printf("processors: %08x %08x, processor_count: 0x%x\n", - ps->processors.next,ps->processors.prev,ps->processor_count); - printf("tasks: %08x %08x, task_count: 0x%x\n", - ps->tasks.next,ps->tasks.prev,ps->task_count); - printf("threads: %08x %08x, thread_count: 0x%x\n", - ps->threads.next,ps->threads.prev,ps->thread_count); - printf("ref_count: 0x%x, active: %x\n", - ps->ref_count,ps->active); - printf("pset_self: %08x, pset_name_self: %08x\n",ps->pset_self, ps->pset_name_self); - printf("set_quanta: 0x%x\n", ps->set_quanta); -} - -#define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s]) - -void -dump_processor( - processor_t p) -{ - char *states[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING", - "ASSIGN","SHUTDOWN"}; - - printf("processor: %08x\n",p); - printf("processor_queue: %08x %08x\n", - p->processor_queue.next,p->processor_queue.prev); - printf("state: %8s, next_thread: %08x, idle_thread: %08x\n", - processor_state(p->state), p->next_thread, p->idle_thread); - printf("slice_quanta: %x\n", p->slice_quanta); - printf("processor_set: %08x, processor_set_next: %08x\n", - p->processor_set, p->processor_set_next); - printf("processors: %08x %08x\n", p->processors.next,p->processors.prev); - printf("processor_self: %08x, slot_num: 0x%x\n", p->processor_self, p->slot_num); -} - -void -dump_run_queue_struct( - run_queue_t rq) -{ - char dump_buf[80]; - int i; - - for( i=0; i < NRQS; ) { - int j; - - printf("%6s",(i==0)?"runq:":""); - for( j=0; (j<8) && (i < NRQS); j++,i++ ) { - if( rq->queues[i].next == &rq->queues[i] ) - printf( " --------"); - else - printf(" %08x",rq->queues[i].next); - } - printf("\n"); - } - for( i=0; i < NRQBM; ) { - register unsigned int mask; - char *d=dump_buf; - - mask = ~0; - mask ^= (mask>>1); - - do { - *d++ = ((rq->bitmap[i]&mask)?'r':'e'); - mask >>=1; - } while( mask ); - *d = '\0'; - printf("%8s%s\n",((i==0)?"bitmap:":""),dump_buf); - i++; - } - printf("highq: 0x%x, count: %u\n", rq->highq, rq->count); -} - -void -dump_run_queues( - run_queue_t runq) -{ - register queue_t q1; - register int i; - register queue_entry_t e; - - q1 = runq->queues; - for (i = 0; i < NRQS; i++) { - if (q1->next != q1) { - int t_cnt; - - printf("[%u]",i); - for (t_cnt=0, e = q1->next; e != q1; e = e->next) { - printf("\t0x%08x",e); - if( (t_cnt = ++t_cnt%4) == 0 ) - printf("\n"); - } - if( t_cnt ) - printf("\n"); - } - /* else - printf("[%u]\t\n",i); - */ - q1++; - } -} - -void -checkrq( - run_queue_t rq, - char *msg) -{ - register queue_t q1; - register int i, j; - register queue_entry_t e; - register int highq; - - highq = NRQS; - j = 0; - q1 = rq->queues; - for (i = MAXPRI; i >= 0; i--) { - if (q1->next == q1) { - if (q1->prev != q1) { - panic("checkrq: empty at %s", msg); - } - } - else { - if (highq == -1) - highq = i; - - for (e = q1->next; e != q1; e = e->next) { - j++; - if (e->next->prev != e) - panic("checkrq-2 at %s", msg); - if (e->prev->next != e) - panic("checkrq-3 at %s", msg); - } - } - q1++; - } - if (j != rq->count) - panic("checkrq: count wrong at %s", msg); - if (rq->count != 0 && highq > rq->highq) - panic("checkrq: highq wrong at %s", msg); -} - -void -thread_check( - register thread_t thread, - register run_queue_t rq) -{ - register int whichq = thread->sched_pri; - register queue_entry_t queue, entry; - - if (whichq < MINPRI || whichq > MAXPRI) - panic("thread_check: bad pri"); - - queue = &rq->queues[whichq]; - entry = queue_first(queue); - while (!queue_end(queue, entry)) { - if (entry == (queue_entry_t)thread) - return; - - entry = queue_next(entry); - } - - panic("thread_check: not found"); -} - #endif /* DEBUG */ #if MACH_KDB @@ -2985,4 +2881,12 @@ db_sched(void) #endif /* MACH_COUNTERS */ db_indent -= 2; } + +#include +void db_show_thread_log(void); + +void +db_show_thread_log(void) +{ +} #endif /* MACH_KDB */ diff --git a/osfmk/kern/sched_prim.h b/osfmk/kern/sched_prim.h index 47ed03dbc..58b746195 100644 --- a/osfmk/kern/sched_prim.h +++ b/osfmk/kern/sched_prim.h @@ -89,6 +89,8 @@ /* Initialize scheduler module */ extern void sched_init(void); +extern void sched_timebase_init(void); + /* * Set up thread timeout element(s) when thread is created. */ @@ -97,9 +99,6 @@ extern void thread_timer_setup( extern void thread_timer_terminate(void); -#define thread_bind_locked(thread, processor) \ - (thread)->bound_processor = (processor) - /* * Stop a thread and wait for it to stop running. */ @@ -190,42 +189,11 @@ extern void thread_exception_return(void); extern void thread_syscall_return( kern_return_t ret); -extern thread_t switch_context( - thread_t old_thread, - thread_continue_t continuation, - thread_t new_thread); - -/* Attach stack to thread */ -extern void machine_kernel_stack_init( - thread_t thread, - void (*start_pos)(thread_t)); - -extern void load_context( - thread_t thread); - -extern thread_act_t switch_act( - thread_act_t act); - -extern void machine_switch_act( - thread_t thread, - thread_act_t old, - thread_act_t new, - int cpu); - /* * These functions are either defined in kern/thread.c * or are defined directly by machine-dependent code. */ -/* Allocate an activation stack */ -extern vm_offset_t stack_alloc(thread_t thread, void (*start_pos)(thread_t)); - -/* Free an activation stack */ -extern void stack_free(thread_t thread); - -/* Collect excess kernel stacks */ -extern void stack_collect(void); - /* Block current thread, indicating reason */ extern wait_result_t thread_block_reason( thread_continue_t continuation, @@ -234,15 +202,16 @@ extern wait_result_t thread_block_reason( /* Dispatch a thread for execution */ extern void thread_setrun( thread_t thread, - boolean_t tail); + integer_t options); -#define HEAD_Q 0 /* FALSE */ -#define TAIL_Q 1 /* TRUE */ +#define SCHED_TAILQ 0 +#define SCHED_HEADQ 1 +#define SCHED_PREEMPT 2 /* Bind thread to a particular processor */ -extern void thread_bind( - thread_t thread, - processor_t processor); +extern processor_t thread_bind( + thread_t thread, + processor_t processor); /* Set the maximum interrupt level for the thread */ __private_extern__ wait_interrupt_t thread_interrupt_level( @@ -263,8 +232,18 @@ __private_extern__ kern_return_t clear_wait_internal( thread_t thread, wait_result_t result); +__private_extern__ + wait_queue_t wait_event_wait_queue( + event_t event); + #endif /* MACH_KERNEL_PRIVATE */ +extern wait_result_t assert_wait_prim( + event_t event, + thread_roust_t roust_hint, + uint64_t deadline, + wait_interrupt_t interruptible); + /* ****************** Only exported until BSD stops using ******************** */ diff --git a/osfmk/kern/simple_lock.h b/osfmk/kern/simple_lock.h index 44436c742..af748b256 100644 --- a/osfmk/kern/simple_lock.h +++ b/osfmk/kern/simple_lock.h @@ -337,7 +337,7 @@ extern void simple_unlock_no_trace(simple_lock_t l); #define __slock_held_func__(l) usimple_lock_held(l) #define thread_sleep_simple_lock(l, e, i) \ thread_sleep_usimple_lock((l), (e), (i)) -#endif / * !defined(simple_lock_init) */ +#endif /* !defined(simple_lock_init) */ #if USLOCK_DEBUG /* diff --git a/osfmk/kern/startup.c b/osfmk/kern/startup.c index 54596deed..f4256a9ad 100644 --- a/osfmk/kern/startup.c +++ b/osfmk/kern/startup.c @@ -86,6 +86,7 @@ #include #include #include +#include #include #include #include @@ -106,9 +107,8 @@ extern void rtclock_reset(void); /* Forwards */ void cpu_launch_first_thread( - thread_t thread); + thread_t thread); void start_kernel_threads(void); -void swapin_thread(); /* * Running in virtual memory, on the interrupt stack. @@ -155,7 +155,6 @@ setup_main(void) */ ledger_init(); task_init(); - act_init(); thread_init(); /* @@ -168,21 +167,14 @@ setup_main(void) * Create a kernel thread to start the other kernel * threads. */ - startup_thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL, - start_kernel_threads, TRUE, FALSE); - /* - * Pretend it is already running. - * - * We can do this without locking, because nothing - * else is running yet. - */ - startup_thread->state = TH_RUN; - hw_atomic_add(&startup_thread->processor_set->run_count, 1); + startup_thread = kernel_thread_create(start_kernel_threads, MAXPRI_KERNEL); /* * Start the thread. */ + startup_thread->state = TH_RUN; + pset_run_incr(startup_thread->processor_set); + cpu_launch_first_thread(startup_thread); /*NOTREACHED*/ panic("cpu_launch_first_thread returns!"); @@ -208,16 +200,15 @@ start_kernel_threads(void) thread_t thread; spl_t s; - thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL, - idle_thread, TRUE, FALSE); + thread = kernel_thread_create(idle_thread, MAXPRI_KERNEL); + s = splsched(); thread_lock(thread); - thread_bind_locked(thread, processor); + thread->bound_processor = processor; processor->idle_thread = thread; thread->ref_count++; - thread->state |= TH_IDLE; - thread_go_locked(thread, THREAD_AWAKENED); + thread->sched_pri = thread->priority = IDLEPRI; + thread->state = (TH_RUN | TH_IDLE); thread_unlock(thread); splx(s); } @@ -259,7 +250,7 @@ start_kernel_threads(void) */ device_service_create(); - shared_file_boot_time_init(); + shared_file_boot_time_init(ENV_DEFAULT_ROOT, machine_slot[cpu_number()].cpu_type); #ifdef IOKIT { @@ -285,6 +276,10 @@ start_kernel_threads(void) } #endif +#if __ppc__ + serial_keyboard_init(); /* Start serial keyboard if wanted */ +#endif + thread_bind(current_thread(), PROCESSOR_NULL); /* @@ -301,16 +296,12 @@ slave_main(void) processor_t myprocessor = current_processor(); thread_t thread; - myprocessor->cpu_data = get_cpu_data(); thread = myprocessor->next_thread; myprocessor->next_thread = THREAD_NULL; if (thread == THREAD_NULL) { thread = machine_wake_thread; machine_wake_thread = THREAD_NULL; } - thread_machine_set_current(thread); - if (thread == machine_wake_thread) - thread_bind(thread, myprocessor); cpu_launch_first_thread(thread); /*NOTREACHED*/ @@ -323,17 +314,8 @@ slave_main(void) void start_cpu_thread(void) { - processor_t processor; - - processor = cpu_to_processor(cpu_number()); - slave_machine_init(); - if (processor->processor_self == IP_NULL) { - ipc_processor_init(processor); - ipc_processor_enable(processor); - } - (void) thread_terminate(current_act()); } @@ -347,22 +329,18 @@ cpu_launch_first_thread( register int mycpu = cpu_number(); processor_t processor = cpu_to_processor(mycpu); - processor->cpu_data->preemption_level = 0; - - cpu_up(mycpu); - start_timer(&kernel_timer[mycpu]); clock_get_uptime(&processor->last_dispatch); - - if (thread == THREAD_NULL || thread == processor->idle_thread) - panic("cpu_launch_first_thread"); + start_timer(&kernel_timer[mycpu]); + machine_thread_set_current(thread); + cpu_up(mycpu); rtclock_reset(); /* start realtime clock ticking */ PMAP_ACTIVATE_KERNEL(mycpu); - thread_machine_set_current(thread); thread_lock(thread); thread->state &= ~TH_UNINT; thread->last_processor = processor; + processor->active_thread = thread; processor->current_pri = thread->sched_pri; _mk_sp_thread_begin(thread, processor); thread_unlock(thread); @@ -371,6 +349,6 @@ cpu_launch_first_thread( PMAP_ACTIVATE_USER(thread->top_act, mycpu); /* preemption enabled by load_context */ - load_context(thread); + machine_load_context(thread); /*NOTREACHED*/ } diff --git a/osfmk/kern/sync_sema.c b/osfmk/kern/sync_sema.c index 6ca013208..eeec2a881 100644 --- a/osfmk/kern/sync_sema.c +++ b/osfmk/kern/sync_sema.c @@ -560,7 +560,7 @@ semaphore_wait_internal( void (*caller_cont)(kern_return_t)) { void (*continuation)(void); - uint64_t abstime, nsinterval; + uint64_t abstime; boolean_t nonblocking; int wait_result; spl_t spl_level; @@ -583,13 +583,17 @@ semaphore_wait_internal( kr = KERN_SUCCESS; } else if (nonblocking) { kr = KERN_OPERATION_TIMED_OUT; - } else { + } else { + thread_t self = current_thread(); + wait_semaphore->count = -1; /* we don't keep an actual count */ + thread_lock(self); (void)wait_queue_assert_wait64_locked( &wait_semaphore->wait_queue, SEMAPHORE_EVENT, THREAD_ABORTSAFE, - FALSE); /* unlock? */ + self); + thread_unlock(self); } semaphore_unlock(wait_semaphore); splx(spl_level); @@ -646,13 +650,8 @@ semaphore_wait_internal( * If it is a timed wait, go ahead and set up the timer. */ if (wait_timep != (mach_timespec_t *)0) { - clock_interval_to_absolutetime_interval(wait_timep->tv_sec, - NSEC_PER_SEC, - &abstime); - clock_interval_to_absolutetime_interval(wait_timep->tv_nsec, - 1, - &nsinterval); - abstime += nsinterval; + nanoseconds_to_absolutetime((uint64_t)wait_timep->tv_sec * + NSEC_PER_SEC + wait_timep->tv_nsec, &abstime); clock_absolutetime_interval_to_deadline(abstime, &abstime); thread_set_timer_deadline(abstime); continuation = semaphore_timedwait_continue; diff --git a/osfmk/kern/syscall_emulation.c b/osfmk/kern/syscall_emulation.c index fc7c02b81..308e3ef7c 100644 --- a/osfmk/kern/syscall_emulation.c +++ b/osfmk/kern/syscall_emulation.c @@ -377,10 +377,10 @@ task_set_emulation_vector( * Can't fault while we hold locks. */ kr = vm_map_wire(ipc_kernel_map, - trunc_page(emul_vector_addr), - round_page(emul_vector_addr + - emulation_vector_count * - sizeof(eml_dispatch_t)), + trunc_page_32(emul_vector_addr), + round_page_32(emul_vector_addr + + emulation_vector_count * + sizeof(eml_dispatch_t)), VM_PROT_READ|VM_PROT_WRITE, FALSE); assert(kr == KERN_SUCCESS); @@ -447,7 +447,7 @@ task_get_emulation_vector( */ vector_size = eml->disp_count * sizeof(vm_offset_t); - size_needed = round_page(vector_size); + size_needed = round_page_32(vector_size); if (size_needed <= size) break; @@ -484,7 +484,7 @@ task_get_emulation_vector( /* * Free any unused memory beyond the end of the last page used */ - size_used = round_page(vector_size); + size_used = round_page_32(vector_size); if (size_used != size) (void) kmem_free(ipc_kernel_map, addr + size_used, diff --git a/osfmk/kern/syscall_subr.c b/osfmk/kern/syscall_subr.c index dd4716262..413fc22be 100644 --- a/osfmk/kern/syscall_subr.c +++ b/osfmk/kern/syscall_subr.c @@ -197,7 +197,6 @@ thread_switch( int option, mach_msg_timeout_t option_time) { - register thread_t self = current_thread(); register thread_act_t hint_act = THR_ACT_NULL; /* @@ -217,7 +216,7 @@ thread_switch( if (thread_name != MACH_PORT_NULL) { ipc_port_t port; - if (ipc_port_translate_send(self->top_act->task->itk_space, + if (ipc_port_translate_send(current_task()->itk_space, thread_name, &port) == KERN_SUCCESS) { ip_reference(port); ip_unlock(port); diff --git a/osfmk/kern/syscall_sw.c b/osfmk/kern/syscall_sw.c index f44d5cd26..010ff4be8 100644 --- a/osfmk/kern/syscall_sw.c +++ b/osfmk/kern/syscall_sw.c @@ -91,7 +91,7 @@ int kern_invalid_debug = 0; extern kern_return_t iokit_user_client_trap(); -mach_trap_t mach_trap_table[] = { +mach_trap_t mach_trap_table[MACH_TRAP_TABLE_COUNT] = { MACH_TRAP(kern_invalid, 0), /* 0 */ /* Unix */ MACH_TRAP(kern_invalid, 0), /* 1 */ /* Unix */ MACH_TRAP(kern_invalid, 0), /* 2 */ /* Unix */ @@ -144,8 +144,8 @@ mach_trap_t mach_trap_table[] = { MACH_TRAP(macx_swapoff, 2), /* 49 */ MACH_TRAP(kern_invalid, 0), /* 50 */ MACH_TRAP(macx_triggers, 4), /* 51 */ - MACH_TRAP(kern_invalid, 0), /* 52 */ - MACH_TRAP(kern_invalid, 0), /* 53 */ + MACH_TRAP(macx_backing_store_suspend, 1), /* 52 */ + MACH_TRAP(macx_backing_store_recovery, 1), /* 53 */ MACH_TRAP(kern_invalid, 0), /* 54 */ MACH_TRAP(kern_invalid, 0), /* 55 */ MACH_TRAP(kern_invalid, 0), /* 56 */ diff --git a/osfmk/kern/syscall_sw.h b/osfmk/kern/syscall_sw.h index 1272c1863..076d2b35f 100644 --- a/osfmk/kern/syscall_sw.h +++ b/osfmk/kern/syscall_sw.h @@ -75,6 +75,9 @@ typedef struct { #endif /* !MACH_ASSERT */ } mach_trap_t; +#define MACH_TRAP_TABLE_COUNT 128 + + extern mach_trap_t mach_trap_table[]; extern int mach_trap_count; extern kern_return_t kern_invalid(void); diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c index ed0e8ffff..63dce983a 100644 --- a/osfmk/kern/task.c +++ b/osfmk/kern/task.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -115,6 +115,11 @@ #include #endif /* TASK_SWAPPER */ +#ifdef __ppc__ +#include +#include +#endif + /* * Exported interfaces */ @@ -146,6 +151,16 @@ kern_return_t task_set_ledger( ledger_t wired, ledger_t paged); +void +task_backing_store_privileged( + task_t task) +{ + task_lock(task); + task->priv_flags |= VM_BACKING_STORE_PRIV; + task_unlock(task); + return; +} + void task_init(void) { @@ -159,20 +174,12 @@ task_init(void) /* * Create the kernel task as the first task. - * Task_create_local must assign to kernel_task as a side effect, - * for other initialization. (:-() */ - if (task_create_local( - TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS) + if (task_create_internal(TASK_NULL, FALSE, &kernel_task) != KERN_SUCCESS) panic("task_init\n"); + vm_map_deallocate(kernel_task->map); kernel_task->map = kernel_map; - -#if MACH_ASSERT - if (watchacts & WA_TASK) - printf("task_init: kernel_task = %x map=%x\n", - kernel_task, kernel_map); -#endif /* MACH_ASSERT */ } #if MACH_HOST @@ -235,48 +242,7 @@ kernel_task_create( vm_size_t map_size, task_t *child_task) { - kern_return_t result; - task_t new_task; - vm_map_t old_map; - - /* - * Create the task. - */ - result = task_create_local(parent_task, FALSE, TRUE, &new_task); - if (result != KERN_SUCCESS) - return (result); - - /* - * Task_create_local creates the task with a user-space map. - * We attempt to replace the map and free it afterwards; else - * task_deallocate will free it (can NOT set map to null before - * task_deallocate, this impersonates a norma placeholder task). - * _Mark the memory as pageable_ -- this is what we - * want for images (like servers) loaded into the kernel. - */ - if (map_size == 0) { - vm_map_deallocate(new_task->map); - new_task->map = kernel_map; - *child_task = new_task; - } else { - old_map = new_task->map; - if ((result = kmem_suballoc(kernel_map, &map_base, - map_size, TRUE, FALSE, - &new_task->map)) != KERN_SUCCESS) { - /* - * New task created with ref count of 2 -- decrement by - * one to force task deletion. - */ - printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n", - kernel_map, map_base, map_size); - --new_task->ref_count; - task_deallocate(new_task); - return (result); - } - vm_map_deallocate(old_map); - *child_task = new_task; - } - return (KERN_SUCCESS); + return (KERN_INVALID_ARGUMENT); } kern_return_t @@ -290,8 +256,8 @@ task_create( if (parent_task == TASK_NULL) return(KERN_INVALID_ARGUMENT); - return task_create_local( - parent_task, inherit_memory, FALSE, child_task); + return task_create_internal( + parent_task, inherit_memory, child_task); } kern_return_t @@ -299,6 +265,7 @@ host_security_create_task_token( host_security_t host_security, task_t parent_task, security_token_t sec_token, + audit_token_t audit_token, host_priv_t host_priv, ledger_port_array_t ledger_ports, mach_msg_type_number_t num_ledger_ports, @@ -313,8 +280,8 @@ host_security_create_task_token( if (host_security == HOST_NULL) return(KERN_INVALID_SECURITY); - result = task_create_local( - parent_task, inherit_memory, FALSE, child_task); + result = task_create_internal( + parent_task, inherit_memory, child_task); if (result != KERN_SUCCESS) return(result); @@ -322,6 +289,7 @@ host_security_create_task_token( result = host_security_set_task_token(host_security, *child_task, sec_token, + audit_token, host_priv); if (result != KERN_SUCCESS) @@ -331,10 +299,9 @@ host_security_create_task_token( } kern_return_t -task_create_local( +task_create_internal( task_t parent_task, boolean_t inherit_memory, - boolean_t kernel_loaded, task_t *child_task) /* OUT */ { task_t new_task; @@ -352,19 +319,18 @@ task_create_local( new_task->map = vm_map_fork(parent_task->map); else new_task->map = vm_map_create(pmap_create(0), - round_page(VM_MIN_ADDRESS), - trunc_page(VM_MAX_ADDRESS), TRUE); + round_page_32(VM_MIN_ADDRESS), + trunc_page_32(VM_MAX_ADDRESS), TRUE); mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW); - queue_init(&new_task->thr_acts); + queue_init(&new_task->threads); new_task->suspend_count = 0; - new_task->thr_act_count = 0; - new_task->res_act_count = 0; - new_task->active_act_count = 0; + new_task->thread_count = 0; + new_task->res_thread_count = 0; + new_task->active_thread_count = 0; new_task->user_stop_count = 0; new_task->role = TASK_UNSPECIFIED; new_task->active = TRUE; - new_task->kernel_loaded = kernel_loaded; new_task->user_data = 0; new_task->faults = 0; new_task->cow_faults = 0; @@ -372,8 +338,11 @@ task_create_local( new_task->messages_sent = 0; new_task->messages_received = 0; new_task->syscalls_mach = 0; + new_task->priv_flags = 0; new_task->syscalls_unix=0; new_task->csw=0; + new_task->taskFeatures[0] = 0; /* Init task features */ + new_task->taskFeatures[1] = 0; /* Init task features */ new_task->dynamic_working_set = 0; task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT, @@ -383,6 +352,10 @@ task_create_local( new_task->bsd_info = 0; #endif /* MACH_BSD */ +#ifdef __ppc__ + if(per_proc_info[0].pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */ +#endif + #if TASK_SWAPPER new_task->swap_state = TASK_SW_IN; new_task->swap_flags = 0; @@ -425,6 +398,7 @@ task_create_local( pset = &default_pset; new_task->sec_token = parent_task->sec_token; + new_task->audit_token = parent_task->audit_token; shared_region_mapping_ref(parent_task->system_shared_region); new_task->system_shared_region = parent_task->system_shared_region; @@ -438,12 +412,13 @@ task_create_local( pset = &default_pset; new_task->sec_token = KERNEL_SECURITY_TOKEN; + new_task->audit_token = KERNEL_AUDIT_TOKEN; new_task->wired_ledger_port = ledger_copy(root_wired_ledger); new_task->paged_ledger_port = ledger_copy(root_paged_ledger); } if (kernel_task == TASK_NULL) { - new_task->priority = MINPRI_KERNEL; + new_task->priority = BASEPRI_KERNEL; new_task->max_priority = MAXPRI_KERNEL; } else { @@ -459,28 +434,11 @@ task_create_local( task_unfreeze(parent_task); #endif /* MACH_HOST */ -#if FAST_TAS - if (inherit_memory) { - new_task->fast_tas_base = parent_task->fast_tas_base; - new_task->fast_tas_end = parent_task->fast_tas_end; - } else { - new_task->fast_tas_base = (vm_offset_t)0; - new_task->fast_tas_end = (vm_offset_t)0; - } -#endif /* FAST_TAS */ + if (vm_backing_store_low && parent_task != NULL) + new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV); ipc_task_enable(new_task); -#if TASK_SWAPPER - task_swapout_eligible(new_task); -#endif /* TASK_SWAPPER */ - -#if MACH_ASSERT - if (watchacts & WA_TASK) - printf("*** task_create_local(par=%x inh=%x) == 0x%x\n", - parent_task, inherit_memory, new_task); -#endif /* MACH_ASSERT */ - *child_task = new_task; return(KERN_SUCCESS); } @@ -516,7 +474,6 @@ task_deallocate( if(task->dynamic_working_set) tws_hash_destroy((tws_hash_t)task->dynamic_working_set); - eml_task_deallocate(task); ipc_task_terminate(task); @@ -535,9 +492,6 @@ task_deallocate( task_unfreeze(task); #endif - if (task->kernel_loaded) - vm_map_remove(kernel_map, task->map->min_offset, - task->map->max_offset, VM_MAP_NO_FLAGS); vm_map_deallocate(task->map); is_release(task->itk_space); task_prof_deallocate(task); @@ -675,18 +629,17 @@ task_terminate_internal( * handed over to the reaper, who will finally remove the * thread from the task list and free the structures. */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) { thread_terminate_internal(thr_act); } /* - * Clean up any virtual machine state/resources associated - * with the current activation because it may hold wiring - * and other references on resources we will be trying to - * release below. + * Give the machine dependent code a chance + * to perform cleanup before ripping apart + * the task. */ if (cur_thr_act->task == task) - act_virtual_machine_destroy(cur_thr_act); + machine_thread_terminate_self(); task_unlock(task); @@ -698,8 +651,7 @@ task_terminate_internal( /* * Destroy the IPC space, leaving just a reference for it. */ - if (!task->kernel_loaded) - ipc_space_destroy(task->itk_space); + ipc_space_destroy(task->itk_space); /* * If the current thread is a member of the task @@ -728,6 +680,10 @@ task_terminate_internal( */ thread_interrupt_level(interrupt_save); +#if __ppc__ + perfmon_release_facility(task); // notify the perfmon facility +#endif + /* * Get rid of the task active reference on itself. */ @@ -781,7 +737,7 @@ task_halt( return(KERN_FAILURE); } - if (task->thr_act_count > 1) { + if (task->thread_count > 1) { /* * Mark all the threads to keep them from starting any more * user-level execution. The thread_terminate_internal code @@ -799,7 +755,7 @@ task_halt( * handed over to the reaper, who will finally remove the * thread from the task list and free the structures. */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) { + queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) { if (thr_act != cur_thr_act) thread_terminate_internal(thr_act); } @@ -807,12 +763,11 @@ task_halt( } /* - * If the current thread has any virtual machine state - * associated with it, we need to explicitly clean that - * up now (because we did not terminate the current act) - * before we try to clean up the task VM and port spaces. + * Give the machine dependent code a chance + * to perform cleanup before ripping apart + * the task. */ - act_virtual_machine_destroy(cur_thr_act); + machine_thread_terminate_self(); task_unlock(task); @@ -825,8 +780,7 @@ task_halt( * Destroy the contents of the IPC space, leaving just * a reference for it. */ - if (!task->kernel_loaded) - ipc_space_clean(task->itk_space); + ipc_space_clean(task->itk_space); /* * Clean out the address space, as we are going to be @@ -862,7 +816,7 @@ task_hold_locked( /* * Iterate through all the thread_act's and hold them. */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) { act_lock_thread(thr_act); thread_hold(thr_act); act_unlock_thread(thr_act); @@ -920,12 +874,12 @@ task_wait_locked( * stop. Do not wait for the current thread if it is within * the task. */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) { if (thr_act != cur_thr_act) { - thread_shuttle_t thr_shuttle; + thread_t thread; - thr_shuttle = act_lock_thread(thr_act); - thread_wait(thr_shuttle); + thread = act_lock_thread(thr_act); + thread_wait(thread); act_unlock_thread(thr_act); } } @@ -955,7 +909,7 @@ task_release_locked( * Do not hold the current thread_act if it is within the * task. */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) { act_lock_thread(thr_act); thread_release(thr_act); act_unlock_thread(thr_act); @@ -1017,7 +971,7 @@ task_threads( return KERN_FAILURE; } - actual = task->thr_act_count; + actual = task->thread_count; /* do we have the memory we need? */ size_needed = actual * sizeof(mach_port_t); @@ -1041,17 +995,17 @@ task_threads( /* OK, have memory and the task is locked & active */ thr_acts = (thread_act_t *) addr; - for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->thr_acts); + for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->threads); i < actual; - i++, thr_act = (thread_act_t) queue_next(&thr_act->thr_acts)) { + i++, thr_act = (thread_act_t) queue_next(&thr_act->task_threads)) { act_lock(thr_act); - if (thr_act->ref_count > 0) { - act_locked_act_reference(thr_act); + if (thr_act->act_ref_count > 0) { + act_reference_locked(thr_act); thr_acts[j++] = thr_act; } act_unlock(thr_act); } - assert(queue_end(&task->thr_acts, (queue_entry_t) thr_act)); + assert(queue_end(&task->threads, (queue_entry_t) thr_act)); actual = j; size_needed = actual * sizeof(mach_port_t); @@ -1184,8 +1138,10 @@ host_security_set_task_token( host_security_t host_security, task_t task, security_token_t sec_token, + audit_token_t audit_token, host_priv_t host_priv) { + ipc_port_t host_port; kern_return_t kr; if (task == TASK_NULL) @@ -1196,17 +1152,16 @@ host_security_set_task_token( task_lock(task); task->sec_token = sec_token; + task->audit_token = audit_token; task_unlock(task); if (host_priv != HOST_PRIV_NULL) { - kr = task_set_special_port(task, - TASK_HOST_PORT, - ipc_port_make_send(realhost.host_priv_self)); + kr = host_get_host_priv_port(host_priv, &host_port); } else { - kr = task_set_special_port(task, - TASK_HOST_PORT, - ipc_port_make_send(realhost.host_self)); + kr = host_get_host_port(host_priv_self(), &host_port); } + assert(kr == KERN_SUCCESS); + kr = task_set_special_port(task, TASK_HOST_PORT, host_port); return(kr); } @@ -1329,8 +1284,8 @@ task_info( times_info->system_time.microseconds = 0; task_lock(task); - queue_iterate(&task->thr_acts, thr_act, - thread_act_t, thr_acts) + queue_iterate(&task->threads, thr_act, + thread_act_t, task_threads) { time_value_t user_time, system_time; spl_t s; @@ -1433,6 +1388,24 @@ task_info( break; } + case TASK_AUDIT_TOKEN: + { + register audit_token_t *audit_token_p; + + if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) { + return(KERN_INVALID_ARGUMENT); + } + + audit_token_p = (audit_token_t *) task_info_out; + + task_lock(task); + *audit_token_p = task->audit_token; + task_unlock(task); + + *task_info_count = TASK_AUDIT_TOKEN_COUNT; + break; + } + case TASK_SCHED_INFO: return(KERN_INVALID_ARGUMENT); @@ -1719,6 +1692,25 @@ task_set_port_space( return kr; } +/* + * Routine: + * task_is_classic + * Purpose: + * Returns true if the task is a P_CLASSIC task. + */ +boolean_t +task_is_classic( + task_t task) +{ + boolean_t result = FALSE; + + if (task) { + struct proc *p = get_bsdtask_info(task); + result = proc_is_classic(p) ? TRUE : FALSE; + } + return result; +} + /* * We need to export some functions to other components that * are currently implemented in macros within the osfmk @@ -1727,9 +1719,9 @@ task_set_port_space( boolean_t is_kerneltask(task_t t) { if (t == kernel_task) - return(TRUE); - else - return((t->kernel_loaded)); + return (TRUE); + + return (FALSE); } #undef current_task diff --git a/osfmk/kern/task.h b/osfmk/kern/task.h index 8ac3686c4..07645a4a1 100644 --- a/osfmk/kern/task.h +++ b/osfmk/kern/task.h @@ -113,7 +113,6 @@ typedef struct task { decl_mutex_data(,lock) /* Task's lock */ int ref_count; /* Number of references to me */ boolean_t active; /* Task has not been terminated */ - boolean_t kernel_loaded; /* Created with kernel_task_create() */ /* Miscellaneous */ vm_map_t map; /* Address space description */ @@ -133,11 +132,11 @@ typedef struct task { queue_chain_t swapped_tasks; /* list of non-resident tasks */ #endif /* TASK_SWAPPER */ - /* Activations in this task */ - queue_head_t thr_acts; /* list of thread_activations */ - int thr_act_count; - int res_act_count; - int active_act_count; /* have not terminate_self yet */ + /* Threads in this task */ + queue_head_t threads; + int thread_count; + int res_thread_count; + int active_thread_count; processor_set_t processor_set; /* processor set for new threads */ #if MACH_HOST @@ -153,8 +152,9 @@ typedef struct task { integer_t priority; /* base priority for threads */ integer_t max_priority; /* maximum priority for threads */ - /* Task security token */ + /* Task security and audit tokens */ security_token_t sec_token; + audit_token_t audit_token; /* Statistics */ time_value_t total_user_time; /* user time for dead threads */ @@ -190,6 +190,7 @@ typedef struct task { /* Ledgers */ struct ipc_port *wired_ledger_port; struct ipc_port *paged_ledger_port; + unsigned long priv_flags; /* privelege resource flags */ #if NORMA_TASK long child_node; /* if != -1, node for new children */ @@ -212,6 +213,9 @@ typedef struct task { #endif vm_offset_t system_shared_region; vm_offset_t dynamic_working_set; + uint32_t taskFeatures[2]; /* Special feature for this task */ +#define tf64BitAddr 0x80000000 /* Task has 64-bit addressing */ +#define tf64BitData 0x40000000 /* Task has 64-bit data registers */ } Task; #define task_lock(task) mutex_lock(&(task)->lock) @@ -225,18 +229,25 @@ typedef struct task { #define task_reference_locked(task) ((task)->ref_count++) +/* + * priv_flags definitions + */ +#define VM_BACKING_STORE_PRIV 0x1 + /* * Internal only routines */ +extern void task_backing_store_privileged( + task_t task); + /* Initialize task module */ extern void task_init(void); /* task create */ -extern kern_return_t task_create_local( +extern kern_return_t task_create_internal( task_t parent_task, boolean_t inherit_memory, - boolean_t kernel_loaded, task_t *child_task); /* OUT */ extern void consider_task_collect(void); diff --git a/osfmk/kern/task_policy.c b/osfmk/kern/task_policy.c index 83aaa43e5..c382f56ac 100644 --- a/osfmk/kern/task_policy.c +++ b/osfmk/kern/task_policy.c @@ -140,7 +140,7 @@ task_priority( task->priority = priority; - queue_iterate(&task->thr_acts, act, thread_act_t, thr_acts) { + queue_iterate(&task->threads, act, thread_act_t, task_threads) { thread_t thread = act_lock_thread(act); if (act->active) diff --git a/osfmk/kern/task_swap.c b/osfmk/kern/task_swap.c index 2e9721870..53367bc4a 100644 --- a/osfmk/kern/task_swap.c +++ b/osfmk/kern/task_swap.c @@ -48,199 +48,11 @@ #include /* We use something from in here */ -/* - * Note: if TASK_SWAPPER is disabled, then this file defines only - * a stub version of task_swappable(), so that the service can always - * be defined, even if swapping has been configured out of the kernel. - */ -#if TASK_SWAPPER - -/* temporary debug flags */ -#define TASK_SW_DEBUG 1 -#define TASK_SW_STATS 1 - -int task_swap_debug = 0; -int task_swap_stats = 0; -int task_swap_enable = 1; -int task_swap_on = 1; - -queue_head_t swapped_tasks; /* completely swapped out tasks */ -queue_head_t swapout_thread_q; /* threads to be swapped out */ -mutex_t task_swapper_lock; /* protects above queue */ - -#define task_swapper_lock() mutex_lock(&task_swapper_lock) -#define task_swapper_unlock() mutex_unlock(&task_swapper_lock) -#define task_swapper_wakeup() thread_wakeup((event_t)&swapout_thread_q) -#define task_swapper_sleep() thread_sleep_mutex((event_t)&swapout_thread_q, \ - &task_swapper_lock, \ - THREAD_UNINT) - - -queue_head_t eligible_tasks; /* tasks eligible for swapout */ -mutex_t task_swapout_list_lock; /* protects above queue */ -#define task_swapout_lock() mutex_lock(&task_swapout_list_lock) -#define task_swapout_unlock() mutex_unlock(&task_swapout_list_lock) - -/* - * The next section of constants and globals are tunable parameters - * used in making swapping decisions. They may be changed dynamically - * without adversely affecting the robustness of the system; however, - * the policy will change, one way or the other. - */ - -#define SHORT_AVG_INTERVAL 5 /* in seconds */ -#define LONG_AVG_INTERVAL 30 /* in seconds */ -#define AVE_SCALE 1024 - -unsigned int short_avg_interval = SHORT_AVG_INTERVAL; -unsigned int long_avg_interval = LONG_AVG_INTERVAL; - -#ifndef MIN_SWAP_PAGEOUT_RATE -#define MIN_SWAP_PAGEOUT_RATE 10 -#endif - -/* - * The following are all stored in fixed-point representation (the actual - * value times AVE_SCALE), to allow more accurate computing of decaying - * averages. So all variables that end with "avg" must be divided by - * AVE_SCALE to convert them or compare them to ints. - */ -unsigned int vm_grab_rate_avg; -unsigned int vm_pageout_rate_avg = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE; -unsigned int vm_pageout_rate_longavg = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE; -unsigned int vm_pageout_rate_peakavg = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE; -unsigned int vm_page_free_avg; /* average free pages over short_avg_interval */ -unsigned int vm_page_free_longavg; /* avg free pages over long_avg_interval */ - -/* - * Trigger task swapping when paging activity reaches - * SWAP_HIGH_WATER_MARK per cent of the maximum paging activity ever observed. - * Turn off task swapping when paging activity goes back down to below - * SWAP_PAGEOUT_LOW_WATER_MARK per cent of the maximum. - * These numbers have been found empirically and might need some tuning... - */ -#ifndef SWAP_PAGEOUT_HIGH_WATER_MARK -#define SWAP_PAGEOUT_HIGH_WATER_MARK 30 -#endif -#ifndef SWAP_PAGEOUT_LOW_WATER_MARK -#define SWAP_PAGEOUT_LOW_WATER_MARK 10 -#endif - -#ifndef MAX_GRAB_RATE -#define MAX_GRAB_RATE ((unsigned int) -1) /* XXX no maximum */ -#endif - -/* - * swap_{start,stop}_pageout_rate start at the minimum value, then increase - * to adjust to the hardware's performance, following the paging rate peaks. - */ -unsigned int swap_pageout_high_water_mark = SWAP_PAGEOUT_HIGH_WATER_MARK; -unsigned int swap_pageout_low_water_mark = SWAP_PAGEOUT_LOW_WATER_MARK; -unsigned int swap_start_pageout_rate = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE * - SWAP_PAGEOUT_HIGH_WATER_MARK / 100; -unsigned int swap_stop_pageout_rate = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE * - SWAP_PAGEOUT_LOW_WATER_MARK / 100; -#if TASK_SW_DEBUG -unsigned int fixed_swap_start_pageout_rate = 0; /* only for testing purpose */ -unsigned int fixed_swap_stop_pageout_rate = 0; /* only for testing purpose */ -#endif /* TASK_SW_DEBUG */ -unsigned int max_grab_rate = MAX_GRAB_RATE; - -#ifndef MIN_SWAP_TIME -#define MIN_SWAP_TIME 1 -#endif - -int min_swap_time = MIN_SWAP_TIME; /* in seconds */ - -#ifndef MIN_RES_TIME -#define MIN_RES_TIME 6 -#endif - -int min_res_time = MIN_RES_TIME; /* in seconds */ - -#ifndef MIN_ACTIVE_TASKS -#define MIN_ACTIVE_TASKS 4 -#endif - -int min_active_tasks = MIN_ACTIVE_TASKS; - -#ifndef TASK_SWAP_CYCLE_TIME -#define TASK_SWAP_CYCLE_TIME 2 -#endif - -int task_swap_cycle_time = TASK_SWAP_CYCLE_TIME; /* in seconds */ - -int last_task_swap_cycle = 0; - -/* temporary statistics */ -int task_swapouts = 0; -int task_swapins = 0; -int task_swaprss_out = 0; /* total rss at swapout time */ -int task_swaprss_in = 0; /* total rss at swapin time */ -int task_swap_total_time = 0; /* total time spent swapped out */ -int tasks_swapped_out = 0; /* number of tasks swapped out now */ - -#ifdef TASK_SW_STATS -#define TASK_STATS_INCR(cnt) (cnt)++ -#else -#define TASK_STATS_INCR(cnt) -#endif /* TASK_SW_STATS */ - -#if TASK_SW_DEBUG -boolean_t on_swapped_list(task_t task); /* forward */ -/* - * Debug function to determine if a task is already on the - * swapped out tasks list. It also checks for tasks on the list - * that are in an illegal state (i.e. swapped in). - */ -boolean_t -on_swapped_list(task_t task) -{ - task_t ltask; - /* task_swapper_lock is locked. */ - - if (queue_empty(&swapped_tasks)) { - return(FALSE); - } - ltask = (task_t)queue_first(&swapped_tasks); - while (!queue_end(&swapped_tasks, (queue_entry_t)ltask)) { - /* check for illegal state */ - if (ltask->swap_state == TASK_SW_IN) { - printf("on_swapped_list and in: 0x%X\n",ltask); - Debugger(""); - } - if (ltask == task) - return(TRUE); - ltask = (task_t)queue_next(<ask->swapped_tasks); - } - return(FALSE); -} -#endif /* TASK_SW_DEBUG */ - -/* - * task_swapper_init: [exported] - */ -void -task_swapper_init() -{ - queue_init(&swapped_tasks); - queue_init(&eligible_tasks); - queue_init(&swapout_thread_q); - mutex_init(&task_swapper_lock, ETAP_THREAD_TASK_SWAP); - mutex_init(&task_swapout_list_lock, ETAP_THREAD_TASK_SWAPOUT); - vm_page_free_avg = vm_page_free_count * AVE_SCALE; - vm_page_free_longavg = vm_page_free_count * AVE_SCALE; -} - -#endif /* TASK_SWAPPER */ - /* * task_swappable: [exported] * * Make a task swappable or non-swappable. If made non-swappable, * it will be swapped in. - * - * Locking: task_swapout_lock is taken before task lock. */ kern_return_t task_swappable( @@ -249,1229 +61,13 @@ task_swappable( boolean_t make_swappable) { if (host_priv == HOST_PRIV_NULL) - return(KERN_INVALID_ARGUMENT); + return (KERN_INVALID_ARGUMENT); if (task == TASK_NULL) - return(KERN_INVALID_ARGUMENT); - -#if !TASK_SWAPPER - - /* - * If we don't support swapping, this call is purely advisory. - */ - return(KERN_SUCCESS); + return (KERN_INVALID_ARGUMENT); -#else /* TASK_SWAPPER */ - - task_lock(task); - if (make_swappable) { - /* make task swappable */ - if (task->swap_state == TASK_SW_UNSWAPPABLE) { - task->swap_state = TASK_SW_IN; - task_unlock(task); - task_swapout_eligible(task); - } - } else { - switch (task->swap_state) { - case TASK_SW_IN: - task->swap_state = TASK_SW_UNSWAPPABLE; - task_unlock(task); - task_swapout_ineligible(task); - break; - case TASK_SW_UNSWAPPABLE: - task_unlock(task); - break; - default: - /* - * swap_state could be TASK_SW_OUT, TASK_SW_GOING_OUT, - * or TASK_SW_COMING_IN. task_swapin handles all - * three, and its default case will catch any bad - * states. - */ - task_unlock(task); - task_swapin(task, TRUE); - break; - } - } - return(KERN_SUCCESS); - -#endif /* TASK_SWAPPER */ - -} - -#if TASK_SWAPPER - -/* - * task_swapout: - * A reference to the task must be held. - * - * Start swapping out a task by sending an AST_SWAPOUT to each thread. - * When the threads reach a clean point, they queue themselves up on the - * swapout_thread_q to be swapped out by the task_swap_swapout_thread. - * The task can be swapped in at any point in this process. - * - * A task will not be fully swapped out (i.e. its map residence count - * at zero) until all currently-swapped threads run and reach - * a clean point, at which time they will be swapped again, - * decrementing the swap_ast_waiting count on the task. - * - * Locking: no locks held upon entry and exit. - * Task_lock is held throughout this function. - */ -kern_return_t -task_swapout(task_t task) -{ - thread_act_t thr_act; - thread_t thread; - queue_head_t *list; - int s; - - task_swapout_lock(); - task_lock(task); /* - * NOTE: look into turning these into assertions if they - * are invariants. + * We don't support swapping, this call is purely advisory. */ - if ((task->swap_state != TASK_SW_IN) || (!task->active)) { - task_unlock(task); - task_swapout_unlock(); - return(KERN_FAILURE); - } - if (task->swap_flags & TASK_SW_ELIGIBLE) { - queue_remove(&eligible_tasks, task, task_t, swapped_tasks); - task->swap_flags &= ~TASK_SW_ELIGIBLE; - } - task_swapout_unlock(); - - /* set state to avoid races with task_swappable(FALSE) */ - task->swap_state = TASK_SW_GOING_OUT; - task->swap_rss = pmap_resident_count(task->map->pmap); - task_swaprss_out += task->swap_rss; - task->swap_ast_waiting = task->thr_act_count; - - /* - * halt all threads in this task: - * We don't need the thread list lock for traversal. - */ - list = &task->thr_acts; - thr_act = (thread_act_t) queue_first(list); - while (!queue_end(list, (queue_entry_t) thr_act)) { - boolean_t swappable; - thread_act_t ract; - - thread = act_lock_thread(thr_act); - s = splsched(); - if (!thread) - swappable = (thr_act->swap_state != TH_SW_UNSWAPPABLE); - else { - thread_lock(thread); - swappable = TRUE; - for (ract = thread->top_act; ract; ract = ract->lower) - if (ract->swap_state == TH_SW_UNSWAPPABLE) { - swappable = FALSE; - break; - } - } - if (swappable) - thread_ast_set(thr_act, AST_SWAPOUT); - if (thread) - thread_unlock(thread); - splx(s); - assert((thr_act->ast & AST_TERMINATE) == 0); - act_unlock_thread(thr_act); - thr_act = (thread_act_t) queue_next(&thr_act->thr_acts); - } - - task->swap_stamp = sched_tick; - task->swap_nswap++; - assert((task->swap_flags&TASK_SW_WANT_IN) == 0); - /* put task on the queue of swapped out tasks */ - task_swapper_lock(); -#if TASK_SW_DEBUG - if (task_swap_debug && on_swapped_list(task)) { - printf("task 0x%X already on list\n", task); - Debugger(""); - } -#endif /* TASK_SW_DEBUG */ - queue_enter(&swapped_tasks, task, task_t, swapped_tasks); - tasks_swapped_out++; - task_swapouts++; - task_swapper_unlock(); - task_unlock(task); - - return(KERN_SUCCESS); + return (KERN_SUCCESS); } - -#ifdef TASK_SW_STATS -int task_sw_race_in = 0; -int task_sw_race_coming_in = 0; -int task_sw_race_going_out = 0; -int task_sw_before_ast = 0; -int task_sw_before_swap = 0; -int task_sw_after_swap = 0; -int task_sw_race_in_won = 0; -int task_sw_unswappable = 0; -int task_sw_act_inactive = 0; -#endif /* TASK_SW_STATS */ - -/* - * thread_swapout_enqueue is called by thread_halt_self when it - * processes AST_SWAPOUT to enqueue threads to be swapped out. - * It must be called at normal interrupt priority for the - * sake of the task_swapper_lock. - * - * There can be races with task swapin here. - * First lock task and decrement swap_ast_waiting count, and if - * it's 0, we can decrement the residence count on the task's map - * and set the task's swap state to TASK_SW_OUT. - */ -void -thread_swapout_enqueue(thread_act_t thr_act) -{ - task_t task = thr_act->task; - task_lock(task); - /* - * If the swap_state is not TASK_SW_GOING_OUT, then - * task_swapin has beaten us to this operation, and - * we have nothing to do. - */ - if (task->swap_state != TASK_SW_GOING_OUT) { - task_unlock(task); - return; - } - if (--task->swap_ast_waiting == 0) { - vm_map_t map = task->map; - task->swap_state = TASK_SW_OUT; - task_unlock(task); - mutex_lock(&map->s_lock); - vm_map_res_deallocate(map); - mutex_unlock(&map->s_lock); - } else - task_unlock(task); - - task_swapper_lock(); - act_lock(thr_act); - if (! (thr_act->swap_state & TH_SW_TASK_SWAPPING)) { - /* - * We lost a race with task_swapin(): don't enqueue. - */ - } else { - queue_enter(&swapout_thread_q, thr_act, - thread_act_t, swap_queue); - task_swapper_wakeup(); - } - act_unlock(thr_act); - task_swapper_unlock(); -} - -/* - * task_swap_swapout_thread: [exported] - * - * Executes as a separate kernel thread. - * Its job is to swap out threads that have been halted by AST_SWAPOUT. - */ -void -task_swap_swapout_thread(void) -{ - thread_act_t thr_act; - thread_t thread, nthread; - task_t task; - int s; - - thread_swappable(current_act(), FALSE); - stack_privilege(current_thread()); - - spllo(); - - task_swapper_lock(); - while (TRUE) { - while (! queue_empty(&swapout_thread_q)) { - - queue_remove_first(&swapout_thread_q, thr_act, - thread_act_t, swap_queue); - /* - * If we're racing with task_swapin, we need - * to make it safe for it to do remque on the - * thread, so make its links point to itself. - * Allowing this ugliness is cheaper than - * making task_swapin search the entire queue. - */ - act_lock(thr_act); - queue_init((queue_t) &thr_act->swap_queue); - act_unlock(thr_act); - task_swapper_unlock(); - /* - * Wait for thread's RUN bit to be deasserted. - */ - thread = act_lock_thread(thr_act); - if (thread == THREAD_NULL) - act_unlock_thread(thr_act); - else { - boolean_t r; - - thread_reference(thread); - thread_hold(thr_act); - act_unlock_thread(thr_act); - r = thread_stop_wait(thread); - nthread = act_lock_thread(thr_act); - thread_release(thr_act); - thread_deallocate(thread); - act_unlock_thread(thr_act); - if (!r || nthread != thread) { - task_swapper_lock(); - continue; - } - } - task = thr_act->task; - task_lock(task); - /* - * we can race with swapin, which would set the - * state to TASK_SW_IN. - */ - if ((task->swap_state != TASK_SW_OUT) && - (task->swap_state != TASK_SW_GOING_OUT)) { - task_unlock(task); - task_swapper_lock(); - TASK_STATS_INCR(task_sw_race_in_won); - if (thread != THREAD_NULL) - thread_unstop(thread); - continue; - } - nthread = act_lock_thread(thr_act); - if (nthread != thread || thr_act->active == FALSE) { - act_unlock_thread(thr_act); - task_unlock(task); - task_swapper_lock(); - TASK_STATS_INCR(task_sw_act_inactive); - if (thread != THREAD_NULL) - thread_unstop(thread); - continue; - } - s = splsched(); - if (thread != THREAD_NULL) - thread_lock(thread); - /* - * Thread cannot have been swapped out yet because - * TH_SW_TASK_SWAPPING was set in AST. If task_swapin - * beat us here, we either wouldn't have found it on - * the queue, or the task->swap_state would have - * changed. The synchronization is on the - * task's swap_state and the task_lock. - * The thread can't be swapped in any other way - * because its task has been swapped. - */ - assert(thr_act->swap_state & TH_SW_TASK_SWAPPING); - assert(thread == THREAD_NULL || - !(thread->state & (TH_SWAPPED_OUT|TH_RUN))); - assert((thr_act->swap_state & TH_SW_STATE) == TH_SW_IN); - /* assert(thread->state & TH_HALTED); */ - /* this also clears TH_SW_TASK_SWAPPING flag */ - thr_act->swap_state = TH_SW_GOING_OUT; - if (thread != THREAD_NULL) { - if (thread->top_act == thr_act) { - thread->state |= TH_SWAPPED_OUT; - /* - * Once we unlock the task, things can happen - * to the thread, so make sure it's consistent - * for thread_swapout. - */ - } - thread->ref_count++; - thread_unlock(thread); - thread_unstop(thread); - } - splx(s); - act_locked_act_reference(thr_act); - act_unlock_thread(thr_act); - task_unlock(task); - - thread_swapout(thr_act); /* do the work */ - - if (thread != THREAD_NULL) - thread_deallocate(thread); - act_deallocate(thr_act); - task_swapper_lock(); - } - task_swapper_sleep(); - } -} - -/* - * task_swapin: - * - * Make a task resident. - * Performs all of the work to make a task resident and possibly - * non-swappable. If we race with a competing task_swapin call, - * we wait for its completion, then return. - * - * Locking: no locks held upon entry and exit. - * - * Note that TASK_SW_MAKE_UNSWAPPABLE can only be set when the - * state is TASK_SW_COMING_IN. - */ - -kern_return_t -task_swapin(task_t task, boolean_t make_unswappable) -{ - register queue_head_t *list; - register thread_act_t thr_act, next; - thread_t thread; - int s; - boolean_t swappable = TRUE; - - task_lock(task); - switch (task->swap_state) { - case TASK_SW_OUT: - { - vm_map_t map = task->map; - /* - * Task has made it all the way out, which means - * that vm_map_res_deallocate has been done; set - * state to TASK_SW_COMING_IN, then bring map - * back in. We could actually be racing with - * the thread_swapout_enqueue, which does the - * vm_map_res_deallocate, but that race is covered. - */ - task->swap_state = TASK_SW_COMING_IN; - assert(task->swap_ast_waiting == 0); - assert(map->res_count >= 0); - task_unlock(task); - mutex_lock(&map->s_lock); - vm_map_res_reference(map); - mutex_unlock(&map->s_lock); - task_lock(task); - assert(task->swap_state == TASK_SW_COMING_IN); - } - break; - - case TASK_SW_GOING_OUT: - /* - * Task isn't all the way out yet. There is - * still at least one thread not swapped, and - * vm_map_res_deallocate has not been done. - */ - task->swap_state = TASK_SW_COMING_IN; - assert(task->swap_ast_waiting > 0 || - (task->swap_ast_waiting == 0 && - task->thr_act_count == 0)); - assert(task->map->res_count > 0); - TASK_STATS_INCR(task_sw_race_going_out); - break; - case TASK_SW_IN: - assert(task->map->res_count > 0); -#if TASK_SW_DEBUG - task_swapper_lock(); - if (task_swap_debug && on_swapped_list(task)) { - printf("task 0x%X on list, state is SW_IN\n", - task); - Debugger(""); - } - task_swapper_unlock(); -#endif /* TASK_SW_DEBUG */ - TASK_STATS_INCR(task_sw_race_in); - if (make_unswappable) { - task->swap_state = TASK_SW_UNSWAPPABLE; - task_unlock(task); - task_swapout_ineligible(task); - } else - task_unlock(task); - return(KERN_SUCCESS); - case TASK_SW_COMING_IN: - /* - * Raced with another task_swapin and lost; - * wait for other one to complete first - */ - assert(task->map->res_count >= 0); - /* - * set MAKE_UNSWAPPABLE so that whoever is swapping - * the task in will make it unswappable, and return - */ - if (make_unswappable) - task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE; - task->swap_flags |= TASK_SW_WANT_IN; - assert_wait((event_t)&task->swap_state, THREAD_UNINT); - task_unlock(task); - thread_block(THREAD_CONTINUE_NULL); - TASK_STATS_INCR(task_sw_race_coming_in); - return(KERN_SUCCESS); - case TASK_SW_UNSWAPPABLE: - /* - * This can happen, since task_terminate - * unconditionally calls task_swapin. - */ - task_unlock(task); - return(KERN_SUCCESS); - default: - panic("task_swapin bad state"); - break; - } - if (make_unswappable) - task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE; - assert(task->swap_state == TASK_SW_COMING_IN); - task_swapper_lock(); -#if TASK_SW_DEBUG - if (task_swap_debug && !on_swapped_list(task)) { - printf("task 0x%X not on list\n", task); - Debugger(""); - } -#endif /* TASK_SW_DEBUG */ - queue_remove(&swapped_tasks, task, task_t, swapped_tasks); - tasks_swapped_out--; - task_swapins++; - task_swapper_unlock(); - - /* - * Iterate through all threads for this task and - * release them, as required. They may not have been swapped - * out yet. The task remains locked throughout. - */ - list = &task->thr_acts; - thr_act = (thread_act_t) queue_first(list); - while (!queue_end(list, (queue_entry_t) thr_act)) { - boolean_t need_to_release; - next = (thread_act_t) queue_next(&thr_act->thr_acts); - /* - * Keep task_swapper_lock across thread handling - * to synchronize with task_swap_swapout_thread - */ - task_swapper_lock(); - thread = act_lock_thread(thr_act); - s = splsched(); - if (thr_act->ast & AST_SWAPOUT) { - /* thread hasn't gotten the AST yet, just clear it */ - thread_ast_clear(thr_act, AST_SWAPOUT); - need_to_release = FALSE; - TASK_STATS_INCR(task_sw_before_ast); - splx(s); - act_unlock_thread(thr_act); - } else { - /* - * If AST_SWAPOUT was cleared, then thread_hold, - * or equivalent was done. - */ - need_to_release = TRUE; - /* - * Thread has hit AST, but it may not have - * been dequeued yet, so we need to check. - * NOTE: the thread may have been dequeued, but - * has not yet been swapped (the task_swapper_lock - * has been dropped, but the thread is not yet - * locked), and the TH_SW_TASK_SWAPPING flag may - * not have been cleared. In this case, we will do - * an extra remque, which the task_swap_swapout_thread - * has made safe, and clear the flag, which is also - * checked by the t_s_s_t before doing the swapout. - */ - if (thread) - thread_lock(thread); - if (thr_act->swap_state & TH_SW_TASK_SWAPPING) { - /* - * hasn't yet been dequeued for swapout, - * so clear flags and dequeue it first. - */ - thr_act->swap_state &= ~TH_SW_TASK_SWAPPING; - assert(thr_act->thread == THREAD_NULL || - !(thr_act->thread->state & - TH_SWAPPED_OUT)); - queue_remove(&swapout_thread_q, thr_act, - thread_act_t, swap_queue); - TASK_STATS_INCR(task_sw_before_swap); - } else { - TASK_STATS_INCR(task_sw_after_swap); - /* - * It's possible that the thread was - * made unswappable before hitting the - * AST, in which case it's still running. - */ - if (thr_act->swap_state == TH_SW_UNSWAPPABLE) { - need_to_release = FALSE; - TASK_STATS_INCR(task_sw_unswappable); - } - } - if (thread) - thread_unlock(thread); - splx(s); - act_unlock_thread(thr_act); - } - task_swapper_unlock(); - - /* - * thread_release will swap in the thread if it's been - * swapped out. - */ - if (need_to_release) { - act_lock_thread(thr_act); - thread_release(thr_act); - act_unlock_thread(thr_act); - } - thr_act = next; - } - - if (task->swap_flags & TASK_SW_MAKE_UNSWAPPABLE) { - task->swap_flags &= ~TASK_SW_MAKE_UNSWAPPABLE; - task->swap_state = TASK_SW_UNSWAPPABLE; - swappable = FALSE; - } else { - task->swap_state = TASK_SW_IN; - } - - task_swaprss_in += pmap_resident_count(task->map->pmap); - task_swap_total_time += sched_tick - task->swap_stamp; - /* note when task came back in */ - task->swap_stamp = sched_tick; - if (task->swap_flags & TASK_SW_WANT_IN) { - task->swap_flags &= ~TASK_SW_WANT_IN; - thread_wakeup((event_t)&task->swap_state); - } - assert((task->swap_flags & TASK_SW_ELIGIBLE) == 0); - task_unlock(task); -#if TASK_SW_DEBUG - task_swapper_lock(); - if (task_swap_debug && on_swapped_list(task)) { - printf("task 0x%X on list at end of swap in\n", task); - Debugger(""); - } - task_swapper_unlock(); -#endif /* TASK_SW_DEBUG */ - /* - * Make the task eligible to be swapped again - */ - if (swappable) - task_swapout_eligible(task); - return(KERN_SUCCESS); -} - -void wake_task_swapper(boolean_t now); /* forward */ - -/* - * wake_task_swapper: [exported] - * - * Wakes up task swapper if now == TRUE or if at least - * task_swap_cycle_time has elapsed since the last call. - * - * NOTE: this function is not multithreaded, so if there is - * more than one caller, it must be modified. - */ -void -wake_task_swapper(boolean_t now) -{ - /* last_task_swap_cycle may require locking */ - if (now || - (sched_tick > (last_task_swap_cycle + task_swap_cycle_time))) { - last_task_swap_cycle = sched_tick; - if (task_swap_debug) - printf("wake_task_swapper: waking swapper\n"); - thread_wakeup((event_t)&swapped_tasks); /* poke swapper */ - } -} - -task_t pick_intask(void); /* forward */ -/* - * pick_intask: - * returns a task to be swapped in, or TASK_NULL if nothing suitable is found. - * - * current algorithm: Return the task that has been swapped out the - * longest, as long as it is > min_swap_time. It will be dequeued - * if actually swapped in. - * - * NOTE:********************************************** - * task->swap_rss (the size when the task was swapped out) could be used to - * further refine the selection. Another possibility would be to look at - * the state of the thread(s) to see if the task/threads would run if they - * were swapped in. - * *************************************************** - * - * Locking: no locks held upon entry and exit. - */ -task_t -pick_intask(void) -{ - register task_t task = TASK_NULL; - - task_swapper_lock(); - /* the oldest task is the first one */ - if (!queue_empty(&swapped_tasks)) { - task = (task_t) queue_first(&swapped_tasks); - assert(task != TASK_NULL); - /* Make sure it's been out min_swap_time */ - if ((sched_tick - task->swap_stamp) < min_swap_time) - task = TASK_NULL; - } - task_swapper_unlock(); - return(task); -#if 0 - /* - * This code looks at the entire list of swapped tasks, but since - * it does not yet do anything but look at time swapped, we - * can simply use the fact that the queue is ordered, and take - * the first one off the queue. - */ - task = (task_t)queue_first(&swapped_tasks); - while (!queue_end(&swapped_tasks, (queue_entry_t)task)) { - task_lock(task); - tmp_time = sched_tick - task->swap_stamp; - if (tmp_time > min_swap_time && tmp_time > time_swapped) { - target_task = task; - time_swapped = tmp_time; - } - task_unlock(task); - task = (task_t)queue_next(&task->swapped_tasks); - } - task_swapper_unlock(); - return(target_task); -#endif -} - -task_t pick_outtask(void); /* forward */ -/* - * pick_outtask: - * returns a task to be swapped out, with a reference on the task, - * or NULL if no suitable task is found. - * - * current algorithm: - * - * Examine all eligible tasks. While looking, use the first thread in - * each task as an indication of the task's activity. Count up - * "active" threads (those either runnable or sleeping). If the task - * is active (by these criteria), swapped in, and resident - * for at least min_res_time, then select the task with the largest - * number of pages in memory. If there are less - * than min_active_tasks active tasks in the system, then don't - * swap anything out (this avoids swapping out the only running task - * in the system, for example). - * - * NOTE: the task selected will not be removed from the eligible list. - * This means that it will be selected again if it is not swapped - * out, where it is removed from the list. - * - * Locking: no locks held upon entry and exit. Task_swapout_lock must be - * taken before task locks. - * - * *************************************************** - * TBD: - * This algorithm only examines the first thread in the task. Currently, since - * most swappable tasks in the system are single-threaded, this generalization - * works reasonably well. However, the algorithm should be changed - * to consider all threads in the task if more multi-threaded tasks were used. - * *************************************************** - */ - -#ifdef TASK_SW_STATS -int inactive_task_count = 0; -int empty_task_count = 0; -#endif /* TASK_SW_STATS */ - -task_t -pick_outtask(void) -{ - register task_t task; - register task_t target_task = TASK_NULL; - unsigned long task_rss; - unsigned long target_rss = 0; - boolean_t wired; - boolean_t active; - int nactive = 0; - - task_swapout_lock(); - if (queue_empty(&eligible_tasks)) { - /* not likely to happen */ - task_swapout_unlock(); - return(TASK_NULL); - } - task = (task_t)queue_first(&eligible_tasks); - while (!queue_end(&eligible_tasks, (queue_entry_t)task)) { - int s; - register thread_act_t thr_act; - thread_t th; - - - task_lock(task); - /* - * Don't swap real-time tasks. - * XXX Should we enforce that or can we let really critical - * tasks use task_swappable() to make sure they never end up - * n the eligible list ? - */ - if (task->policy & POLICYCLASS_FIXEDPRI) { - goto tryagain; - } - if (!task->active) { - TASK_STATS_INCR(inactive_task_count); - goto tryagain; - } - if (task->res_act_count == 0) { - TASK_STATS_INCR(empty_task_count); - goto tryagain; - } - assert(!queue_empty(&task->thr_acts)); - thr_act = (thread_act_t)queue_first(&task->thr_acts); - active = FALSE; - th = act_lock_thread(thr_act); - s = splsched(); - if (th != THREAD_NULL) - thread_lock(th); - if ((th == THREAD_NULL) || - (th->state == TH_RUN) || - (th->state & TH_WAIT)) { - /* - * thread is "active": either runnable - * or sleeping. Count it and examine - * it further below. - */ - nactive++; - active = TRUE; - } - if (th != THREAD_NULL) - thread_unlock(th); - splx(s); - act_unlock_thread(thr_act); - if (active && - (task->swap_state == TASK_SW_IN) && - ((sched_tick - task->swap_stamp) > min_res_time)) { - long rescount = pmap_resident_count(task->map->pmap); - /* - * thread must be "active", task must be swapped - * in and resident for at least min_res_time - */ -#if 0 -/* DEBUG Test round-robin strategy. Picking biggest task could cause extreme - * unfairness to such large interactive programs as xterm. Instead, pick the - * first task that has any pages resident: - */ - if (rescount > 1) { - task->ref_count++; - target_task = task; - task_unlock(task); - task_swapout_unlock(); - return(target_task); - } -#else - if (rescount > target_rss) { - /* - * task is not swapped, and it has the - * largest rss seen so far. - */ - task->ref_count++; - target_rss = rescount; - assert(target_task != task); - if (target_task != TASK_NULL) - task_deallocate(target_task); - target_task = task; - } -#endif - } -tryagain: - task_unlock(task); - task = (task_t)queue_next(&task->swapped_tasks); - } - task_swapout_unlock(); - /* only swap out if there are at least min_active_tasks */ - if (nactive < min_active_tasks) { - if (target_task != TASK_NULL) { - task_deallocate(target_task); - target_task = TASK_NULL; - } - } - return(target_task); -} - -#if TASK_SW_DEBUG -void print_pid(task_t task, unsigned long n1, unsigned long n2, - const char *comp, const char *inout); /* forward */ -void -print_pid( - task_t task, - unsigned long n1, - unsigned long n2, - const char *comp, - const char *inout) -{ - long rescount; - task_lock(task); - rescount = pmap_resident_count(task->map->pmap); - task_unlock(task); - printf("task_swapper: swapped %s task %x; %d %s %d; res=%d\n", - inout, task, n1, comp, n2, rescount); -} -#endif - -/* - * task_swapper: [exported] - * - * Executes as a separate kernel thread. - */ -#define MAX_LOOP 3 -void -task_swapper(void) -{ - task_t outtask, intask; - int timeout; - int loopcnt = 0; - boolean_t start_swapping; - boolean_t stop_swapping; - int local_page_free_avg; - extern int hz; - - thread_swappable(current_act(), FALSE); - stack_privilege(current_thread()); - - spllo(); - - for (;;) { - local_page_free_avg = vm_page_free_avg; - while (TRUE) { -#if 0 - if (task_swap_debug) - printf("task_swapper: top of loop; cnt = %d\n",loopcnt); -#endif - intask = pick_intask(); - - start_swapping = ((vm_pageout_rate_avg > swap_start_pageout_rate) || - (vm_grab_rate_avg > max_grab_rate)); - stop_swapping = (vm_pageout_rate_avg < swap_stop_pageout_rate); - - /* - * If a lot of paging is going on, or another task should come - * in but memory is tight, find something to swap out and start - * it. Don't swap any task out if task swapping is disabled. - * vm_page_queue_free_lock protects the vm globals. - */ - outtask = TASK_NULL; - if (start_swapping || - (!stop_swapping && intask && - ((local_page_free_avg / AVE_SCALE) < vm_page_free_target)) - ) { - if (task_swap_enable && - (outtask = pick_outtask()) && - (task_swapout(outtask) == KERN_SUCCESS)) { - unsigned long rss; -#if TASK_SW_DEBUG - if (task_swap_debug) - print_pid(outtask, local_page_free_avg / AVE_SCALE, - vm_page_free_target, "<", - "out"); -#endif - rss = outtask->swap_rss; - if (outtask->swap_nswap == 1) - rss /= 2; /* divide by 2 if never out */ - local_page_free_avg += (rss/short_avg_interval) * AVE_SCALE; - } - if (outtask != TASK_NULL) - task_deallocate(outtask); - } - - /* - * If there is an eligible task to bring in and there are at - * least vm_page_free_target free pages, swap it in. If task - * swapping has been disabled, bring the task in anyway. - */ - if (intask && ((local_page_free_avg / AVE_SCALE) >= - vm_page_free_target || - stop_swapping || !task_swap_enable)) { - if (task_swapin(intask, FALSE) == KERN_SUCCESS) { - unsigned long rss; -#if TASK_SW_DEBUG - if (task_swap_debug) - print_pid(intask, local_page_free_avg / AVE_SCALE, - vm_page_free_target, ">=", - "in"); -#endif - rss = intask->swap_rss; - if (intask->swap_nswap == 1) - rss /= 2; /* divide by 2 if never out */ - local_page_free_avg -= (rss/short_avg_interval) * AVE_SCALE; - } - } - /* - * XXX - * Here we have to decide whether to continue swapping - * in and/or out before sleeping. The decision should - * be made based on the previous action (swapin/out) and - * current system parameters, such as paging rates and - * demand. - * The function, compute_vm_averages, which does these - * calculations, depends on being called every second, - * so we can't just do the same thing. - */ - if (++loopcnt < MAX_LOOP) - continue; - - /* - * Arrange to be awakened if paging is still heavy or there are - * any tasks partially or completely swapped out. (Otherwise, - * the wakeup will come from the external trigger(s).) - */ - timeout = 0; - if (start_swapping) - timeout = task_swap_cycle_time; - else { - task_swapper_lock(); - if (!queue_empty(&swapped_tasks)) - timeout = min_swap_time; - task_swapper_unlock(); - } - assert_wait((event_t)&swapped_tasks, THREAD_UNINT); - if (timeout) { - if (task_swap_debug) - printf("task_swapper: set timeout of %d\n", - timeout); - thread_set_timeout(timeout, NSEC_PER_SEC); - } - if (task_swap_debug) - printf("task_swapper: blocking\n"); - thread_block(THREAD_CONTINUE_NULL); - if (timeout) { - thread_cancel_timeout(current_thread()); - } - /* reset locals */ - loopcnt = 0; - local_page_free_avg = vm_page_free_avg; - } - } -} - -/* from BSD */ -#define ave(smooth, cnt, time) \ - smooth = ((time - 1) * (smooth) + ((cnt) * AVE_SCALE)) / (time) - -/* - * We estimate the system paging load in more than one metric: - * 1) the total number of calls into the function, vm_page_grab, - * which allocates all page frames for real pages. - * 2) the total number of pages paged in and out of paging files. - * This is a measure of page cleaning and faulting from backing - * store. - * - * When either metric passes a threshold, tasks are swapped out. - */ -long last_grab_count = 0; -long last_pageout_count = 0; - -/* - * compute_vm_averages: [exported] - * - * This function is to be called once a second to calculate average paging - * demand and average numbers of free pages for use by the task swapper. - * Can also be used to wake up task swapper at desired thresholds. - * - * NOTE: this function is single-threaded, and requires locking if - * ever there are multiple callers. - */ -void -compute_vm_averages(void) -{ - extern unsigned long vm_page_grab_count; - long grab_count, pageout_count; - int i; - - ave(vm_page_free_avg, vm_page_free_count, short_avg_interval); - ave(vm_page_free_longavg, vm_page_free_count, long_avg_interval); - - /* - * NOTE: the vm_page_grab_count and vm_stat structure are - * under control of vm_page_queue_free_lock. We're simply reading - * memory here, and the numbers don't depend on each other, so - * no lock is taken. - */ - - grab_count = vm_page_grab_count; - pageout_count = 0; - for (i = 0; i < NCPUS; i++) { - pageout_count += vm_stat[i].pageouts; - } - - ave(vm_pageout_rate_avg, pageout_count - last_pageout_count, - short_avg_interval); - ave(vm_pageout_rate_longavg, pageout_count - last_pageout_count, - long_avg_interval); - ave(vm_grab_rate_avg, grab_count - last_grab_count, - short_avg_interval); - last_grab_count = grab_count; - last_pageout_count = pageout_count; - - /* - * Adjust swap_{start,stop}_pageout_rate to the paging rate peak. - * This is an attempt to find the optimum paging rates at which - * to trigger task swapping on or off to regulate paging activity, - * depending on the hardware capacity. - */ - if (vm_pageout_rate_avg > vm_pageout_rate_peakavg) { - unsigned int desired_max; - - vm_pageout_rate_peakavg = vm_pageout_rate_avg; - swap_start_pageout_rate = - vm_pageout_rate_peakavg * swap_pageout_high_water_mark / 100; - swap_stop_pageout_rate = - vm_pageout_rate_peakavg * swap_pageout_low_water_mark / 100; - } - -#if TASK_SW_DEBUG - /* - * For measurements, allow fixed values. - */ - if (fixed_swap_start_pageout_rate) - swap_start_pageout_rate = fixed_swap_start_pageout_rate; - if (fixed_swap_stop_pageout_rate) - swap_stop_pageout_rate = fixed_swap_stop_pageout_rate; -#endif /* TASK_SW_DEBUG */ - -#if TASK_SW_DEBUG - if (task_swap_stats) - printf("vm_avgs: pageout_rate: %d %d (on/off: %d/%d); page_free: %d %d (tgt: %d)\n", - vm_pageout_rate_avg / AVE_SCALE, - vm_pageout_rate_longavg / AVE_SCALE, - swap_start_pageout_rate / AVE_SCALE, - swap_stop_pageout_rate / AVE_SCALE, - vm_page_free_avg / AVE_SCALE, - vm_page_free_longavg / AVE_SCALE, - vm_page_free_target); -#endif /* TASK_SW_DEBUG */ - - if (vm_page_free_avg / AVE_SCALE <= vm_page_free_target) { - if (task_swap_on) { - /* The following is a delicate attempt to balance the - * need for reasonably rapid response to system - * thrashing, with the equally important desire to - * prevent the onset of swapping simply because of a - * short burst of paging activity. - */ - if ((vm_pageout_rate_longavg > swap_stop_pageout_rate) && - (vm_pageout_rate_avg > swap_start_pageout_rate) || - (vm_pageout_rate_avg > vm_pageout_rate_peakavg) || - (vm_grab_rate_avg > max_grab_rate)) - wake_task_swapper(FALSE); - } - } else /* page demand is low; should consider swapin */ { - if (tasks_swapped_out != 0) - wake_task_swapper(TRUE); - } -} - -void -task_swapout_eligible(task_t task) -{ -#if TASK_SW_DEBUG - task_swapper_lock(); - if (task_swap_debug && on_swapped_list(task)) { - printf("swapout_eligible: task 0x%X on swapped list\n", task); - Debugger(""); - } - task_swapper_unlock(); -#endif - task_swapout_lock(); - task_lock(task); -#if TASK_SW_DEBUG - if (task->swap_flags & TASK_SW_ELIGIBLE) { - printf("swapout_eligible: task 0x%X already eligible\n", task); - } -#endif /* TASK_SW_DEBUG */ - if ((task->swap_state == TASK_SW_IN) && - ((task->swap_flags & TASK_SW_ELIGIBLE) == 0)) { - queue_enter(&eligible_tasks,task,task_t,swapped_tasks); - task->swap_flags |= TASK_SW_ELIGIBLE; - } - task_unlock(task); - task_swapout_unlock(); -} - -void -task_swapout_ineligible(task_t task) -{ -#if TASK_SW_DEBUG - task_swapper_lock(); - if (task_swap_debug && on_swapped_list(task)) { - printf("swapout_ineligible: task 0x%X on swapped list\n", task); - Debugger(""); - } - task_swapper_unlock(); -#endif - task_swapout_lock(); - task_lock(task); -#if TASK_SW_DEBUG - if (!(task->swap_flags & TASK_SW_ELIGIBLE)) - printf("swapout_ineligible: task 0x%X already inel.\n", task); -#endif /* TASK_SW_DEBUG */ - if ((task->swap_state != TASK_SW_IN) && - (task->swap_flags & TASK_SW_ELIGIBLE)) { - queue_remove(&eligible_tasks, task, task_t, swapped_tasks); - task->swap_flags &= ~TASK_SW_ELIGIBLE; - } - task_unlock(task); - task_swapout_unlock(); -} - -int task_swap_ast_aborted = 0; - -/* - * Process an AST_SWAPOUT. - */ -void -swapout_ast() -{ - spl_t s; - thread_act_t act; - thread_t thread; - - act = current_act(); - - /* - * Task is being swapped out. First mark it as suspended - * and halted, then call thread_swapout_enqueue to put - * the thread on the queue for task_swap_swapout_threads - * to swap out the thread. - */ - /* - * Don't swap unswappable threads - */ - thread = act_lock_thread(act); - s = splsched(); - if (thread) - thread_lock(thread); - if ((act->ast & AST_SWAPOUT) == 0) { - /* - * Race with task_swapin. Abort swapout. - */ - task_swap_ast_aborted++; /* not locked XXX */ - if (thread) - thread_unlock(thread); - splx(s); - act_unlock_thread(act); - } else if (act->swap_state == TH_SW_IN) { - /* - * Mark swap_state as TH_SW_TASK_SWAPPING to avoid - * race with thread swapper, which will only - * swap thread if swap_state is TH_SW_IN. - * This way, the thread can only be swapped by - * the task swapping mechanism. - */ - act->swap_state |= TH_SW_TASK_SWAPPING; - /* assert(act->suspend_count == 0); XXX ? */ - if (thread) - thread_unlock(thread); - if (act->suspend_count++ == 0) /* inline thread_hold */ - install_special_handler(act); - /* self->state |= TH_HALTED; */ - thread_ast_clear(act, AST_SWAPOUT); - /* - * Initialize the swap_queue fields to allow an extra - * queue_remove() in task_swapin if we lose the race - * (task_swapin can be called before we complete - * thread_swapout_enqueue). - */ - queue_init((queue_t) &act->swap_queue); - splx(s); - act_unlock_thread(act); - /* this must be called at normal interrupt level */ - thread_swapout_enqueue(act); - } else { - /* thread isn't swappable; continue running */ - assert(act->swap_state == TH_SW_UNSWAPPABLE); - if (thread) - thread_unlock(thread); - thread_ast_clear(act, AST_SWAPOUT); - splx(s); - act_unlock_thread(act); - } -} - -#endif /* TASK_SWAPPER */ diff --git a/osfmk/kern/thread.c b/osfmk/kern/thread.c index 1466b190b..9598886f3 100644 --- a/osfmk/kern/thread.c +++ b/osfmk/kern/thread.c @@ -126,27 +126,15 @@ #include #include -/* - * Per-Cpu stashed global state - */ -vm_offset_t active_stacks[NCPUS]; /* per-cpu active stacks */ -vm_offset_t kernel_stack[NCPUS]; /* top of active stacks */ -thread_act_t active_kloaded[NCPUS]; /* + act if kernel loaded */ -boolean_t first_thread; +static struct zone *thread_zone; -struct zone *thread_shuttle_zone; - -queue_head_t reaper_queue; -decl_simple_lock_data(,reaper_lock) +static queue_head_t reaper_queue; +decl_simple_lock_data(static,reaper_lock) extern int tick; -extern void pcb_module_init(void); - -struct thread_shuttle pageout_thread; - /* private */ -static struct thread_shuttle thr_sh_template; +static struct thread thread_template, init_thread; #if MACH_DEBUG @@ -157,28 +145,6 @@ extern void stack_statistics( #endif /* MACHINE_STACK */ #endif /* MACH_DEBUG */ -/* Forwards */ -void thread_collect_scan(void); - -kern_return_t thread_create_shuttle( - thread_act_t thr_act, - integer_t priority, - void (*start)(void), - thread_t *new_thread); - -extern void Load_context( - thread_t thread); - - -/* - * Machine-dependent code must define: - * thread_machine_init - * thread_machine_terminate - * thread_machine_collect - * - * The thread->pcb field is reserved for machine-dependent code. - */ - #ifdef MACHINE_STACK /* * Machine-dependent code must define: @@ -200,18 +166,22 @@ extern void Load_context( * because stack_alloc_try/thread_invoke operate at splsched. */ -decl_simple_lock_data(,stack_lock_data) /* splsched only */ -#define stack_lock() simple_lock(&stack_lock_data) -#define stack_unlock() simple_unlock(&stack_lock_data) +decl_simple_lock_data(static,stack_lock_data) +#define stack_lock() simple_lock(&stack_lock_data) +#define stack_unlock() simple_unlock(&stack_lock_data) + +static vm_map_t stack_map; +static vm_offset_t stack_free_list; + +static vm_offset_t stack_free_cache[NCPUS]; -mutex_t stack_map_lock; /* Lock when allocating stacks maps */ -vm_map_t stack_map; /* Map for allocating stacks */ -vm_offset_t stack_free_list; /* splsched only */ unsigned int stack_free_max = 0; -unsigned int stack_free_count = 0; /* splsched only */ -unsigned int stack_free_limit = 1; /* Arbitrary */ +unsigned int stack_free_count = 0; /* splsched only */ +unsigned int stack_free_limit = 1; /* Arbitrary */ -unsigned int stack_alloc_hits = 0; /* debugging */ +unsigned int stack_cache_hits = 0; /* debugging */ + +unsigned int stack_alloc_hits = 0; /* debugging */ unsigned int stack_alloc_misses = 0; /* debugging */ unsigned int stack_alloc_total = 0; @@ -229,7 +199,7 @@ unsigned int stack_alloc_bndry = 0; /* * stack_alloc: * - * Allocate a kernel stack for an activation. + * Allocate a kernel stack for a thread. * May block. */ vm_offset_t @@ -243,26 +213,6 @@ stack_alloc( if (stack) return (stack); -/* - * We first try the free list. It is probably empty, or - * stack_alloc_try would have succeeded, but possibly a stack was - * freed before the swapin thread got to us. - * - * We allocate stacks from their own map which is submaps of the - * kernel map. Because we want to have a guard page (at least) in - * front of each stack to catch evil code that overruns its stack, we - * allocate the stack on aligned boundaries. The boundary is - * calculated as the next power of 2 above the stack size. For - * example, a stack of 4 pages would have a boundry of 8, likewise 5 - * would also be 8. - * - * We limit the number of stacks to be one allocation chunk - * (THREAD_CHUNK) more than the maximum number of threads - * (THREAD_MAX). The extra is to allow for priviliged threads that - * can sometimes have 2 stacks. - * - */ - s = splsched(); stack_lock(); stack = stack_free_list; @@ -273,9 +223,9 @@ stack_alloc( stack_unlock(); splx(s); - if (stack != 0) { /* Did we find a free one? */ - stack_attach(thread, stack, start_pos); /* Initialize it */ - return (stack); /* Send it on home */ + if (stack != 0) { + machine_stack_attach(thread, stack, start_pos); + return (stack); } if (kernel_memory_allocate( @@ -288,7 +238,7 @@ stack_alloc( if (stack_alloc_total > stack_alloc_hiwater) stack_alloc_hiwater = stack_alloc_total; - stack_attach(thread, stack, start_pos); + machine_stack_attach(thread, stack, start_pos); return (stack); } @@ -296,33 +246,52 @@ stack_alloc( * stack_free: * * Free a kernel stack. - * Called at splsched. */ void stack_free( thread_t thread) { - vm_offset_t stack = stack_detach(thread); + vm_offset_t stack = machine_stack_detach(thread); assert(stack); - if (stack != thread->stack_privilege) { + if (stack != thread->reserved_stack) { + spl_t s = splsched(); + vm_offset_t *cache; + + cache = &stack_free_cache[cpu_number()]; + if (*cache == 0) { + *cache = stack; + splx(s); + + return; + } + stack_lock(); stack_next(stack) = stack_free_list; stack_free_list = stack; if (++stack_free_count > stack_free_max) stack_free_max = stack_free_count; stack_unlock(); + splx(s); } } -static void +void stack_free_stack( vm_offset_t stack) { - spl_t s; + spl_t s = splsched(); + vm_offset_t *cache; + + cache = &stack_free_cache[cpu_number()]; + if (*cache == 0) { + *cache = stack; + splx(s); + + return; + } - s = splsched(); stack_lock(); stack_next(stack) = stack_free_list; stack_free_list = stack; @@ -342,14 +311,12 @@ stack_free_stack( void stack_collect(void) { - vm_offset_t stack; - int i; - spl_t s; + spl_t s = splsched(); - s = splsched(); stack_lock(); while (stack_free_count > stack_free_limit) { - stack = stack_free_list; + vm_offset_t stack = stack_free_list; + stack_free_list = stack_next(stack); stack_free_count--; stack_unlock(); @@ -368,6 +335,51 @@ stack_collect(void) splx(s); } +/* + * stack_alloc_try: + * + * Non-blocking attempt to allocate a kernel stack. + * Called at splsched with the thread locked. + */ + +boolean_t stack_alloc_try( + thread_t thread, + void (*start)(thread_t)) +{ + register vm_offset_t stack, *cache; + + cache = &stack_free_cache[cpu_number()]; + if (stack = *cache) { + *cache = 0; + machine_stack_attach(thread, stack, start); + stack_cache_hits++; + + return (TRUE); + } + + stack_lock(); + stack = stack_free_list; + if (stack != (vm_offset_t)0) { + stack_free_list = stack_next(stack); + stack_free_count--; + } + stack_unlock(); + + if (stack == 0) + stack = thread->reserved_stack; + + if (stack != 0) { + machine_stack_attach(thread, stack, start); + stack_alloc_hits++; + + return (TRUE); + } + else { + stack_alloc_misses++; + + return (FALSE); + } +} #if MACH_DEBUG /* @@ -410,79 +422,100 @@ stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_si *exhaustable = 0; } - -/* - * stack_privilege: - * - * stack_alloc_try on this thread must always succeed. - */ - void stack_privilege( - register thread_t thread) + register thread_t thread) +{ + /* OBSOLETE */ +} + +void +thread_bootstrap(void) { /* - * This implementation only works for the current thread. + * Fill in a template thread for fast initialization. */ - if (thread != current_thread()) - panic("stack_privilege"); + thread_template.runq = RUN_QUEUE_NULL; - if (thread->stack_privilege == 0) - thread->stack_privilege = current_stack(); -} + thread_template.ref_count = 1; -/* - * stack_alloc_try: - * - * Non-blocking attempt to allocate a kernel stack. - * Called at splsched with the thread locked. - */ + thread_template.reason = AST_NONE; + thread_template.at_safe_point = FALSE; + thread_template.wait_event = NO_EVENT64; + thread_template.wait_queue = WAIT_QUEUE_NULL; + thread_template.wait_result = THREAD_WAITING; + thread_template.interrupt_level = THREAD_ABORTSAFE; + thread_template.state = TH_STACK_HANDOFF | TH_WAIT | TH_UNINT; + thread_template.wake_active = FALSE; + thread_template.active_callout = FALSE; + thread_template.continuation = (void (*)(void))0; + thread_template.top_act = THR_ACT_NULL; -boolean_t stack_alloc_try( - thread_t thread, - void (*start_pos)(thread_t)) -{ - register vm_offset_t stack = thread->stack_privilege; + thread_template.importance = 0; + thread_template.sched_mode = 0; + thread_template.safe_mode = 0; - if (stack == 0) { - stack_lock(); + thread_template.priority = 0; + thread_template.sched_pri = 0; + thread_template.max_priority = 0; + thread_template.task_priority = 0; + thread_template.promotions = 0; + thread_template.pending_promoter_index = 0; + thread_template.pending_promoter[0] = + thread_template.pending_promoter[1] = NULL; - stack = stack_free_list; - if (stack != (vm_offset_t)0) { - stack_free_list = stack_next(stack); - stack_free_count--; - } + thread_template.realtime.deadline = UINT64_MAX; - stack_unlock(); - } + thread_template.current_quantum = 0; - if (stack != 0) { - stack_attach(thread, stack, start_pos); - stack_alloc_hits++; + thread_template.computation_metered = 0; + thread_template.computation_epoch = 0; - return (TRUE); - } - else { - stack_alloc_misses++; + thread_template.cpu_usage = 0; + thread_template.cpu_delta = 0; + thread_template.sched_usage = 0; + thread_template.sched_delta = 0; + thread_template.sched_stamp = 0; + thread_template.sleep_stamp = 0; + thread_template.safe_release = 0; - return (FALSE); - } -} + thread_template.bound_processor = PROCESSOR_NULL; + thread_template.last_processor = PROCESSOR_NULL; + thread_template.last_switch = 0; -uint64_t max_unsafe_computation; -extern int max_unsafe_quanta; + thread_template.vm_privilege = FALSE; -uint32_t sched_safe_duration; + timer_init(&(thread_template.user_timer)); + timer_init(&(thread_template.system_timer)); + thread_template.user_timer_save.low = 0; + thread_template.user_timer_save.high = 0; + thread_template.system_timer_save.low = 0; + thread_template.system_timer_save.high = 0; -uint64_t max_poll_computation; -extern int max_poll_quanta; + thread_template.processor_set = PROCESSOR_SET_NULL; -uint32_t std_quantum; -uint32_t min_std_quantum; + thread_template.act_ref_count = 2; -uint32_t max_rt_quantum; -uint32_t min_rt_quantum; + thread_template.special_handler.handler = special_handler; + thread_template.special_handler.next = 0; + +#if MACH_HOST + thread_template.may_assign = TRUE; + thread_template.assign_active = FALSE; +#endif /* MACH_HOST */ + thread_template.funnel_lock = THR_FUNNEL_NULL; + thread_template.funnel_state = 0; +#if MACH_LDEBUG + thread_template.mutex_count = 0; +#endif /* MACH_LDEBUG */ + + init_thread = thread_template; + + init_thread.top_act = &init_thread; + init_thread.thread = &init_thread; + machine_thread_set_current(&init_thread); +} void thread_init(void) @@ -490,89 +523,12 @@ thread_init(void) kern_return_t ret; unsigned int stack; - thread_shuttle_zone = zinit( - sizeof(struct thread_shuttle), - THREAD_MAX * sizeof(struct thread_shuttle), - THREAD_CHUNK * sizeof(struct thread_shuttle), + thread_zone = zinit( + sizeof(struct thread), + THREAD_MAX * sizeof(struct thread), + THREAD_CHUNK * sizeof(struct thread), "threads"); - /* - * Fill in a template thread_shuttle for fast initialization. - * [Fields that must be (or are typically) reset at - * time of creation are so noted.] - */ - - /* thr_sh_template.links (none) */ - thr_sh_template.runq = RUN_QUEUE_NULL; - - - /* thr_sh_template.task (later) */ - /* thr_sh_template.thread_list (later) */ - /* thr_sh_template.pset_threads (later) */ - - /* reference for activation */ - thr_sh_template.ref_count = 1; - - thr_sh_template.reason = AST_NONE; - thr_sh_template.at_safe_point = FALSE; - thr_sh_template.wait_event = NO_EVENT64; - thr_sh_template.wait_queue = WAIT_QUEUE_NULL; - thr_sh_template.wait_result = THREAD_WAITING; - thr_sh_template.interrupt_level = THREAD_ABORTSAFE; - thr_sh_template.state = TH_STACK_HANDOFF | TH_WAIT | TH_UNINT; - thr_sh_template.wake_active = FALSE; - thr_sh_template.active_callout = FALSE; - thr_sh_template.continuation = (void (*)(void))0; - thr_sh_template.top_act = THR_ACT_NULL; - - thr_sh_template.importance = 0; - thr_sh_template.sched_mode = 0; - thr_sh_template.safe_mode = 0; - - thr_sh_template.priority = 0; - thr_sh_template.sched_pri = 0; - thr_sh_template.max_priority = 0; - thr_sh_template.task_priority = 0; - thr_sh_template.promotions = 0; - thr_sh_template.pending_promoter_index = 0; - thr_sh_template.pending_promoter[0] = - thr_sh_template.pending_promoter[1] = NULL; - - thr_sh_template.current_quantum = 0; - - thr_sh_template.computation_metered = 0; - thr_sh_template.computation_epoch = 0; - - thr_sh_template.cpu_usage = 0; - thr_sh_template.cpu_delta = 0; - thr_sh_template.sched_usage = 0; - thr_sh_template.sched_delta = 0; - thr_sh_template.sched_stamp = 0; - thr_sh_template.sleep_stamp = 0; - thr_sh_template.safe_release = 0; - - thr_sh_template.bound_processor = PROCESSOR_NULL; - thr_sh_template.last_processor = PROCESSOR_NULL; - thr_sh_template.last_switch = 0; - - thr_sh_template.vm_privilege = FALSE; - - timer_init(&(thr_sh_template.user_timer)); - timer_init(&(thr_sh_template.system_timer)); - thr_sh_template.user_timer_save.low = 0; - thr_sh_template.user_timer_save.high = 0; - thr_sh_template.system_timer_save.low = 0; - thr_sh_template.system_timer_save.high = 0; - - thr_sh_template.active = FALSE; /* reset */ - - thr_sh_template.processor_set = PROCESSOR_SET_NULL; -#if MACH_HOST - thr_sh_template.may_assign = TRUE; - thr_sh_template.assign_active = FALSE; -#endif /* MACH_HOST */ - thr_sh_template.funnel_state = 0; - /* * Initialize other data structures used in * this module. @@ -580,12 +536,11 @@ thread_init(void) queue_init(&reaper_queue); simple_lock_init(&reaper_lock, ETAP_THREAD_REAPER); - thr_sh_template.funnel_lock = THR_FUNNEL_NULL; #ifndef MACHINE_STACK simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); /* Initialize the stack lock */ - if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */ + if (KERNEL_STACK_SIZE < round_page_32(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */ panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n", KERNEL_STACK_SIZE, PAGE_SIZE); } @@ -618,48 +573,11 @@ thread_init(void) #endif /* MACHINE_STACK */ -#if MACH_LDEBUG - thr_sh_template.mutex_count = 0; -#endif /* MACH_LDEBUG */ - - { - uint64_t abstime; - - clock_interval_to_absolutetime_interval( - std_quantum_us, NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - std_quantum = abstime; - - /* 250 us */ - clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - min_std_quantum = abstime; - - /* 50 us */ - clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - min_rt_quantum = abstime; - - /* 50 ms */ - clock_interval_to_absolutetime_interval( - 50, 1000*NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - max_rt_quantum = abstime; - - max_unsafe_computation = max_unsafe_quanta * std_quantum; - max_poll_computation = max_poll_quanta * std_quantum; - - sched_safe_duration = 2 * max_unsafe_quanta * - (std_quantum_us / (1000 * 1000)) * - (1 << SCHED_TICK_SHIFT); - } - - first_thread = TRUE; /* * Initialize any machine-dependent * per-thread structures necessary. */ - thread_machine_init(); + machine_thread_init(); } /* @@ -733,7 +651,7 @@ thread_terminate_self(void) * If so, and the task is associated with a BSD process, we * need to call BSD and let them clean up. */ - active_acts = hw_atomic_sub(&task->active_act_count, 1); + active_acts = hw_atomic_sub(&task->active_thread_count, 1); if (active_acts == 0 && task->bsd_info) proc_exit(task->bsd_info); @@ -741,17 +659,8 @@ thread_terminate_self(void) /* JMM - for now, no migration */ assert(!thr_act->lower); - s = splsched(); - thread_lock(thread); - thread->active = FALSE; - thread_unlock(thread); - splx(s); - thread_timer_terminate(); - /* flush any lazy HW state while in own context */ - thread_machine_flush(thr_act); - ipc_thread_terminate(thread); s = splsched(); @@ -770,53 +679,73 @@ thread_terminate_self(void) /* * Create a new thread. - * Doesn't start the thread running; It first must be attached to - * an activation - then use thread_go to start it. + * Doesn't start the thread running. */ -kern_return_t -thread_create_shuttle( - thread_act_t thr_act, +static kern_return_t +thread_create_internal( + task_t parent_task, integer_t priority, void (*start)(void), - thread_t *new_thread) + thread_t *out_thread) { - kern_return_t result; - thread_t new_shuttle; - task_t parent_task = thr_act->task; + thread_t new_thread; processor_set_t pset; + static thread_t first_thread; /* * Allocate a thread and initialize static fields */ - if (first_thread) { - new_shuttle = &pageout_thread; - first_thread = FALSE; - } else - new_shuttle = (thread_t)zalloc(thread_shuttle_zone); - if (new_shuttle == THREAD_NULL) + if (first_thread == NULL) + new_thread = first_thread = current_act(); + else + new_thread = (thread_t)zalloc(thread_zone); + if (new_thread == NULL) return (KERN_RESOURCE_SHORTAGE); -#ifdef DEBUG - if (new_shuttle != &pageout_thread) - assert(!thr_act->thread); -#endif + if (new_thread != first_thread) + *new_thread = thread_template; + +#ifdef MACH_BSD + { + extern void *uthread_alloc(task_t, thread_act_t); - *new_shuttle = thr_sh_template; + new_thread->uthread = uthread_alloc(parent_task, new_thread); + if (new_thread->uthread == NULL) { + zfree(thread_zone, (vm_offset_t)new_thread); + return (KERN_RESOURCE_SHORTAGE); + } + } +#endif /* MACH_BSD */ - thread_lock_init(new_shuttle); - wake_lock_init(new_shuttle); - new_shuttle->sleep_stamp = sched_tick; + if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { +#ifdef MACH_BSD + { + extern void uthread_free(task_t, void *, void *); + void *ut = new_thread->uthread; - /* - * Thread still isn't runnable yet (our caller will do - * that). Initialize runtime-dependent fields here. - */ - result = thread_machine_create(new_shuttle, thr_act, thread_continue); - assert (result == KERN_SUCCESS); + new_thread->uthread = NULL; + uthread_free(parent_task, ut, parent_task->bsd_info); + } +#endif /* MACH_BSD */ + zfree(thread_zone, (vm_offset_t)new_thread); + return (KERN_FAILURE); + } + + new_thread->task = parent_task; + + thread_lock_init(new_thread); + wake_lock_init(new_thread); + + mutex_init(&new_thread->lock, ETAP_THREAD_ACT); + + ipc_thr_act_init(parent_task, new_thread); - thread_start(new_shuttle, start); - thread_timer_setup(new_shuttle); - ipc_thread_init(new_shuttle); + ipc_thread_init(new_thread); + queue_init(&new_thread->held_ulocks); + act_prof_init(new_thread, parent_task); + + new_thread->continuation = start; + new_thread->sleep_stamp = sched_tick; pset = parent_task->processor_set; assert(pset == &default_pset); @@ -825,60 +754,78 @@ thread_create_shuttle( task_lock(parent_task); assert(parent_task->processor_set == pset); - /* - * Don't need to initialize because the context switch - * code will set it before it can be used. - */ - if (!parent_task->active) { + if ( !parent_task->active || + (parent_task->thread_count >= THREAD_MAX && + parent_task != kernel_task)) { task_unlock(parent_task); pset_unlock(pset); - thread_machine_destroy(new_shuttle); - zfree(thread_shuttle_zone, (vm_offset_t) new_shuttle); + +#ifdef MACH_BSD + { + extern void uthread_free(task_t, void *, void *); + void *ut = new_thread->uthread; + + new_thread->uthread = NULL; + uthread_free(parent_task, ut, parent_task->bsd_info); + } +#endif /* MACH_BSD */ + act_prof_deallocate(new_thread); + ipc_thr_act_terminate(new_thread); + machine_thread_destroy(new_thread); + zfree(thread_zone, (vm_offset_t) new_thread); return (KERN_FAILURE); } - act_attach(thr_act, new_shuttle, 0); + act_attach(new_thread, new_thread); + + task_reference_locked(parent_task); + + /* Cache the task's map */ + new_thread->map = parent_task->map; - /* Chain the thr_act onto the task's list */ - queue_enter(&parent_task->thr_acts, thr_act, thread_act_t, thr_acts); - parent_task->thr_act_count++; - parent_task->res_act_count++; + /* Chain the thread onto the task's list */ + queue_enter(&parent_task->threads, new_thread, thread_act_t, task_threads); + parent_task->thread_count++; + parent_task->res_thread_count++; /* So terminating threads don't need to take the task lock to decrement */ - hw_atomic_add(&parent_task->active_act_count, 1); + hw_atomic_add(&parent_task->active_thread_count, 1); /* Associate the thread with the processor set */ - pset_add_thread(pset, new_shuttle); + pset_add_thread(pset, new_thread); + + thread_timer_setup(new_thread); /* Set the thread's scheduling parameters */ if (parent_task != kernel_task) - new_shuttle->sched_mode |= TH_MODE_TIMESHARE; - new_shuttle->max_priority = parent_task->max_priority; - new_shuttle->task_priority = parent_task->priority; - new_shuttle->priority = (priority < 0)? parent_task->priority: priority; - if (new_shuttle->priority > new_shuttle->max_priority) - new_shuttle->priority = new_shuttle->max_priority; - new_shuttle->importance = - new_shuttle->priority - new_shuttle->task_priority; - new_shuttle->sched_stamp = sched_tick; - compute_priority(new_shuttle, FALSE); + new_thread->sched_mode |= TH_MODE_TIMESHARE; + new_thread->max_priority = parent_task->max_priority; + new_thread->task_priority = parent_task->priority; + new_thread->priority = (priority < 0)? parent_task->priority: priority; + if (new_thread->priority > new_thread->max_priority) + new_thread->priority = new_thread->max_priority; + new_thread->importance = + new_thread->priority - new_thread->task_priority; + new_thread->sched_stamp = sched_tick; + compute_priority(new_thread, FALSE); #if ETAP_EVENT_MONITOR new_thread->etap_reason = 0; new_thread->etap_trace = FALSE; #endif /* ETAP_EVENT_MONITOR */ - new_shuttle->active = TRUE; - thr_act->active = TRUE; + new_thread->active = TRUE; - *new_thread = new_shuttle; + *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; + kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); + KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, - (vm_address_t)new_shuttle, 0, 0, 0, 0); + (vm_address_t)new_thread, dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); @@ -896,34 +843,27 @@ extern void thread_bootstrap_return(void); kern_return_t thread_create( task_t task, - thread_act_t *new_act) + thread_act_t *new_thread) { kern_return_t result; thread_t thread; - thread_act_t act; - if (task == TASK_NULL) - return KERN_INVALID_ARGUMENT; + if (task == TASK_NULL || task == kernel_task) + return (KERN_INVALID_ARGUMENT); - result = act_create(task, &act); + result = thread_create_internal(task, -1, thread_bootstrap_return, &thread); if (result != KERN_SUCCESS) return (result); - result = thread_create_shuttle(act, -1, thread_bootstrap_return, &thread); - if (result != KERN_SUCCESS) { - act_deallocate(act); - return (result); - } - - act->user_stop_count = 1; - thread_hold(act); + thread->user_stop_count = 1; + thread_hold(thread); if (task->suspend_count > 0) - thread_hold(act); + thread_hold(thread); pset_unlock(task->processor_set); task_unlock(task); - *new_act = act; + *new_thread = thread; return (KERN_SUCCESS); } @@ -934,43 +874,36 @@ thread_create_running( int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, - thread_act_t *new_act) /* OUT */ + thread_act_t *new_thread) { register kern_return_t result; thread_t thread; - thread_act_t act; - if (task == TASK_NULL) - return KERN_INVALID_ARGUMENT; + if (task == TASK_NULL || task == kernel_task) + return (KERN_INVALID_ARGUMENT); - result = act_create(task, &act); + result = thread_create_internal(task, -1, thread_bootstrap_return, &thread); if (result != KERN_SUCCESS) return (result); - result = thread_create_shuttle(act, -1, thread_bootstrap_return, &thread); - if (result != KERN_SUCCESS) { - act_deallocate(act); - return (result); - } - - act_lock(act); - result = act_machine_set_state(act, flavor, new_state, new_state_count); + result = machine_thread_set_state(thread, flavor, new_state, new_state_count); if (result != KERN_SUCCESS) { - act_unlock(act); pset_unlock(task->processor_set); task_unlock(task); - (void)thread_terminate(act); + thread_terminate(thread); + act_deallocate(thread); return (result); } + act_lock(thread); clear_wait(thread, THREAD_AWAKENED); - act->inited = TRUE; - act_unlock(act); + thread->started = TRUE; + act_unlock(thread); pset_unlock(task->processor_set); task_unlock(task); - *new_act = act; + *new_thread = thread; return (result); } @@ -978,45 +911,53 @@ thread_create_running( /* * kernel_thread: * - * Create and kernel thread in the specified task, and - * optionally start it running. + * Create a thread in the kernel task + * to execute in kernel context. */ thread_t -kernel_thread_with_priority( - task_t task, - integer_t priority, +kernel_thread_create( void (*start)(void), - boolean_t alloc_stack, - boolean_t start_running) + integer_t priority) { kern_return_t result; + task_t task = kernel_task; thread_t thread; - thread_act_t act; - result = act_create(task, &act); + result = thread_create_internal(task, priority, start, &thread); if (result != KERN_SUCCESS) return (THREAD_NULL); - result = thread_create_shuttle(act, priority, start, &thread); - if (result != KERN_SUCCESS) { - act_deallocate(act); - return (THREAD_NULL); - } - pset_unlock(task->processor_set); task_unlock(task); - if (alloc_stack) - thread_doswapin(thread); + thread_doswapin(thread); + assert(thread->kernel_stack != 0); + thread->reserved_stack = thread->kernel_stack; + + act_deallocate(thread); + + return (thread); +} + +thread_t +kernel_thread_with_priority( + void (*start)(void), + integer_t priority) +{ + thread_t thread; - act_lock(act); - if (start_running) - clear_wait(thread, THREAD_AWAKENED); - act->inited = TRUE; - act_unlock(act); + thread = kernel_thread_create(start, priority); + if (thread == THREAD_NULL) + return (THREAD_NULL); - act_deallocate(act); + act_lock(thread); + clear_wait(thread, THREAD_AWAKENED); + thread->started = TRUE; + act_unlock(thread); +#ifdef i386 + thread_bind(thread, master_processor); +#endif /* i386 */ return (thread); } @@ -1025,7 +966,10 @@ kernel_thread( task_t task, void (*start)(void)) { - return kernel_thread_with_priority(task, -1, start, FALSE, TRUE); + if (task != kernel_task) + panic("kernel_thread"); + + return kernel_thread_with_priority(start, -1); } unsigned int c_weird_pset_ref_exit = 0; /* pset code raced us */ @@ -1065,7 +1009,7 @@ thread_deallocate( return; if (thread == current_thread()) - panic("thread deallocating itself"); + panic("thread_deallocate"); /* * There is a dangling pointer to the thread from the @@ -1090,15 +1034,18 @@ thread_deallocate( pset_deallocate(pset); - if (thread->stack_privilege != 0) { - if (thread->stack_privilege != thread->kernel_stack) - stack_free_stack(thread->stack_privilege); - thread->stack_privilege = 0; + if (thread->reserved_stack != 0) { + if (thread->reserved_stack != thread->kernel_stack) + stack_free_stack(thread->reserved_stack); + thread->reserved_stack = 0; } - /* frees kernel stack & other MD resources */ - thread_machine_destroy(thread); - zfree(thread_shuttle_zone, (vm_offset_t) thread); + if (thread->kernel_stack != 0) + stack_free(thread); + + machine_thread_destroy(thread); + + zfree(thread_zone, (vm_offset_t) thread); } void @@ -1312,7 +1259,7 @@ thread_doreap( thr_act = thread_lock_act(thread); assert(thr_act && thr_act->thread == thread); - act_locked_act_reference(thr_act); + act_reference_locked(thr_act); /* * Replace `act_unlock_thread()' with individual @@ -1364,10 +1311,6 @@ reaper_thread_continue(void) static void reaper_thread(void) { - thread_t self = current_thread(); - - stack_privilege(self); - reaper_thread_continue(); /*NOTREACHED*/ } @@ -1375,7 +1318,7 @@ reaper_thread(void) void thread_reaper_init(void) { - kernel_thread(kernel_task, reaper_thread); + kernel_thread_with_priority(reaper_thread, MINPRI_KERNEL); } kern_return_t @@ -1425,16 +1368,17 @@ thread_get_assignment( } /* - * thread_wire: + * thread_wire_internal: * * Specify that the target thread must always be able * to run and to allocate memory. */ kern_return_t -thread_wire( +thread_wire_internal( host_priv_t host_priv, thread_act_t thr_act, - boolean_t wired) + boolean_t wired, + boolean_t *prev_state) { spl_t s; thread_t thread; @@ -1453,7 +1397,6 @@ thread_wire( /* * This implementation only works for the current thread. - * See stack_privilege. */ if (thr_act != current_act()) return KERN_INVALID_ARGUMENT; @@ -1461,6 +1404,10 @@ thread_wire( s = splsched(); thread_lock(thread); + if (prev_state) { + *prev_state = thread->vm_privilege; + } + if (wired) { if (thread->vm_privilege == FALSE) vm_page_free_reserve(1); /* XXX */ @@ -1478,46 +1425,20 @@ thread_wire( return KERN_SUCCESS; } -/* - * thread_collect_scan: - * - * Attempt to free resources owned by threads. - */ - -void -thread_collect_scan(void) -{ - /* This code runs very quickly! */ -} - -/* Also disabled in vm/vm_pageout.c */ -boolean_t thread_collect_allowed = FALSE; -unsigned thread_collect_last_tick = 0; -unsigned thread_collect_max_rate = 0; /* in ticks */ /* - * consider_thread_collect: + * thread_wire: * - * Called by the pageout daemon when the system needs more free pages. + * User-api wrapper for thread_wire_internal() */ +kern_return_t +thread_wire( + host_priv_t host_priv, + thread_act_t thr_act, + boolean_t wired) -void -consider_thread_collect(void) { - /* - * By default, don't attempt thread collection more frequently - * than once a second. - */ - - if (thread_collect_max_rate == 0) - thread_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1; - - if (thread_collect_allowed && - (sched_tick > - (thread_collect_last_tick + thread_collect_max_rate))) { - thread_collect_last_tick = sched_tick; - thread_collect_scan(); - } + return thread_wire_internal(host_priv, thr_act, wired, NULL); } kern_return_t @@ -1545,7 +1466,7 @@ host_stack_usage( *reservedp = 0; *totalp = total; - *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE); + *spacep = *residentp = total * round_page_32(KERNEL_STACK_SIZE); *maxusagep = maxusage; *maxstackp = 0; return KERN_SUCCESS; @@ -1642,29 +1563,10 @@ processor_set_stack_usage( maxusage = 0; maxstack = 0; while (i > 0) { - int cpu; thread_t thread = threads[--i]; - vm_offset_t stack = 0; - - /* - * thread->kernel_stack is only accurate if the - * thread isn't swapped and is not executing. - * - * Of course, we don't have the appropriate locks - * for these shenanigans. - */ - stack = thread->kernel_stack; - - for (cpu = 0; cpu < NCPUS; cpu++) - if (cpu_to_processor(cpu)->cpu_data->active_thread == thread) { - stack = active_stacks[cpu]; - break; - } - - if (stack != 0) { + if (thread->kernel_stack != 0) total++; - } thread_deallocate(thread); } @@ -1673,7 +1575,7 @@ processor_set_stack_usage( kfree(addr, size); *totalp = total; - *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE); + *residentp = *spacep = total * round_page_32(KERNEL_STACK_SIZE); *maxusagep = maxusage; *maxstackp = maxstack; return KERN_SUCCESS; @@ -1735,6 +1637,23 @@ funnel_unlock( fnl->fnl_mtxrelease = current_thread(); } +int refunnel_hint_enabled = 0; + +boolean_t +refunnel_hint( + thread_t thread, + wait_result_t wresult) +{ + if ( !(thread->funnel_state & TH_FN_REFUNNEL) || + wresult != THREAD_AWAKENED ) + return (FALSE); + + if (!refunnel_hint_enabled) + return (FALSE); + + return (mutex_preblock(thread->funnel_lock->fnl_mutex, thread)); +} + funnel_t * thread_funnel_get( void) @@ -1855,7 +1774,17 @@ thread_get_cont_arg(void) #undef thread_should_halt boolean_t thread_should_halt( - thread_shuttle_t th) + thread_t th) { return(thread_should_halt_fast(th)); } + +vm_offset_t min_valid_stack_address(void) +{ + return vm_map_min(stack_map); +} + +vm_offset_t max_valid_stack_address(void) +{ + return vm_map_max(stack_map); +} diff --git a/osfmk/kern/thread.h b/osfmk/kern/thread.h index 13c981605..dfc7057aa 100644 --- a/osfmk/kern/thread.h +++ b/osfmk/kern/thread.h @@ -85,10 +85,10 @@ #include #include #include -#include -#include +#include #include #include +#include #include /* for current_thread */ #include @@ -98,23 +98,21 @@ /* * Logically, a thread of control consists of two parts: * - * a thread_shuttle, which may migrate due to resource contention - * and - * a thread_activation, which remains attached to a task. + * + A thread_shuttle, which may migrate due to resource contention + * + * + A thread_activation, which remains attached to a task. * * The thread_shuttle contains scheduling info, accounting info, * and links to the thread_activation within which the shuttle is * currently operating. * - * It might make sense to have the thread_shuttle be a proper sub-structure - * of the thread, with the thread containing links to both the shuttle and - * activation. In order to reduce the scope and complexity of source - * changes and the overhead of maintaining these linkages, we have subsumed - * the shuttle into the thread, calling it a thread_shuttle. + * An activation always has a valid task pointer, and it is always constant. + * The activation is only linked onto the task's activation list until + * the activation is terminated. * - * User accesses to threads always come in via the user's thread port, - * which gets translated to a pointer to the target thread_activation. + * The thread holds a reference on the activation while using it. */ + #include #ifdef __APPLE_API_PRIVATE @@ -122,7 +120,9 @@ #ifdef MACH_KERNEL_PRIVATE #include -#include +#include + +#include #include #include #include @@ -140,22 +140,15 @@ #include #include #include +#include +#include #include +#include + #include +#include -/* - * Kernel accesses intended to effect the entire thread, typically use - * a pointer to the thread_shuttle (current_thread()) as the target of - * their operations. This makes sense given that we have subsumed the - * shuttle into the thread_shuttle, eliminating one set of linkages. - * Operations effecting only the shuttle may use a thread_shuttle_t - * to indicate this. - * - * The current_act() macro returns a pointer to the current thread_act, while - * the current_thread() macro returns a pointer to the currently active - * thread_shuttle (representing the thread in its entirety). - */ -struct thread_shuttle { +struct thread { /* * NOTE: The runq field in the thread structure has an unusual * locking protocol. If its value is RUN_QUEUE_NULL, then it is @@ -181,7 +174,7 @@ struct thread_shuttle { /* Data updated during assert_wait/thread_wakeup */ - decl_simple_lock_data(,lock) /* scheduling lock (thread_lock()) */ + decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */ decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/ boolean_t wake_active; /* Someone is waiting for this */ int at_safe_point; /* thread_abort_safely allowed */ @@ -199,23 +192,23 @@ struct thread_shuttle { #define TH_FN_REFUNNEL 0x2 /* re-acquire funnel on dispatch */ vm_offset_t kernel_stack; /* current kernel stack */ - vm_offset_t stack_privilege; /* reserved kernel stack */ + vm_offset_t reserved_stack; /* reserved kernel stack */ /* Thread state: */ int state; /* * Thread states [bits or'ed] */ -#define TH_WAIT 0x01 /* thread is queued for waiting */ -#define TH_SUSP 0x02 /* thread has been asked to stop */ -#define TH_RUN 0x04 /* thread is running or on runq */ -#define TH_UNINT 0x08 /* thread is waiting uninteruptibly */ -#define TH_TERMINATE 0x10 /* thread is halting at termination */ +#define TH_WAIT 0x01 /* queued for waiting */ +#define TH_SUSP 0x02 /* stopped or requested to stop */ +#define TH_RUN 0x04 /* running or on runq */ +#define TH_UNINT 0x08 /* waiting uninteruptibly */ +#define TH_TERMINATE 0x10 /* halted at termination */ -#define TH_ABORT 0x20 /* abort interruptible waits */ -#define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */ +#define TH_ABORT 0x20 /* abort interruptible waits */ +#define TH_ABORT_SAFELY 0x40 /* ... but only those at safe point */ -#define TH_IDLE 0x80 /* thread is an idle thread */ +#define TH_IDLE 0x80 /* processor idle thread */ #define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT) @@ -246,12 +239,14 @@ struct thread_shuttle { integer_t importance; /* task-relative importance */ - /* time constraint parameters */ + /* real-time parameters */ struct { /* see mach/thread_policy.h */ uint32_t period; uint32_t computation; uint32_t constraint; boolean_t preemptible; + + uint64_t deadline; } realtime; uint32_t current_quantum; /* duration of current quantum */ @@ -269,7 +264,7 @@ struct thread_shuttle { integer_t safe_mode; /* saved mode during fail-safe */ natural_t safe_release; /* when to release fail-safe */ - /* Used in priority computations */ + /* Statistics and timesharing calculations */ natural_t sched_stamp; /* when priority was updated */ natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */ natural_t cpu_delta; /* cpu usage since last update */ @@ -323,17 +318,83 @@ struct thread_shuttle { mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */ /* Ast/Halt data structures */ - boolean_t active; /* thread is active */ vm_offset_t recover; /* page fault recover(copyin/out) */ int ref_count; /* number of references to me */ /* Processor set info */ - queue_chain_t pset_threads; /* list of all shuttles in pset */ + queue_chain_t pset_threads; /* list of all threads in pset */ #if MACH_HOST boolean_t may_assign; /* may assignment change? */ boolean_t assign_active; /* waiting for may_assign */ #endif /* MACH_HOST */ + /* Activation */ + queue_chain_t task_threads; + + /*** Machine-dependent state ***/ + struct MachineThrAct mact; + + /* Task membership */ + struct task *task; + vm_map_t map; + + decl_mutex_data(,lock) + int act_ref_count; + + /* Associated shuttle */ + struct thread *thread; + + /* + * Next higher and next lower activation on + * the thread's activation stack. + */ + struct thread *higher, *lower; + + /* Kernel holds on this thread */ + int suspend_count; + + /* User level suspensions */ + int user_stop_count; + + /* Pending thread ast(s) */ + ast_t ast; + + /* Miscellaneous bits guarded by lock mutex */ + uint32_t + /* Indicates that the thread has not been terminated */ + active:1, + + /* Indicates that the thread has been started after creation */ + started:1, + :0; + + /* Return Handers */ + struct ReturnHandler { + struct ReturnHandler *next; + void (*handler)( + struct ReturnHandler *rh, + struct thread *act); + } *handlers, special_handler; + + /* Ports associated with this thread */ + struct ipc_port *ith_self; /* not a right, doesn't hold ref */ + struct ipc_port *ith_sself; /* a send right */ + struct exception_action exc_actions[EXC_TYPES_COUNT]; + + /* Owned ulocks (a lock set element) */ + queue_head_t held_ulocks; + +#if MACH_PROF + /* Profiling */ + boolean_t profiled; + boolean_t profiled_own; + struct prof_data *profil_buffer; +#endif /* MACH_PROF */ + +#ifdef MACH_BSD + void *uthread; +#endif + /* BEGIN TRACING/DEBUG */ #if MACH_LOCK_MON @@ -380,197 +441,240 @@ struct thread_shuttle { #define sth_result saved.sema.result #define sth_continuation saved.sema.continuation -struct funnel_lock { - int fnl_type; /* funnel type */ - mutex_t *fnl_mutex; /* underlying mutex for the funnel */ - void * fnl_mtxholder; /* thread (last)holdng mutex */ - void * fnl_mtxrelease; /* thread (last)releasing mutex */ - mutex_t *fnl_oldmutex; /* Mutex before collapsing split funnel */ -}; - -typedef struct funnel_lock funnel_t; - -extern thread_act_t active_kloaded[NCPUS]; /* "" kernel-loaded acts */ -extern vm_offset_t active_stacks[NCPUS]; /* active kernel stacks */ -extern vm_offset_t kernel_stack[NCPUS]; +extern void thread_bootstrap(void); -extern struct thread_shuttle pageout_thread; +extern void thread_init(void); -#ifndef MACHINE_STACK_STASH -/* - * MD Macro to fill up global stack state, - * keeping the MD structure sizes + games private - */ -#define MACHINE_STACK_STASH(stack) \ -MACRO_BEGIN \ - mp_disable_preemption(); \ - active_stacks[cpu_number()] = (stack); \ - kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \ - mp_enable_preemption(); \ -MACRO_END -#endif /* MACHINE_STACK_STASH */ - -/* - * Kernel-only routines - */ - -/* Initialize thread module */ -extern void thread_init(void); - -/* Take reference on thread (make sure it doesn't go away) */ -extern void thread_reference( - thread_t thread); +extern void thread_reaper_init(void); -/* Release reference on thread */ -extern void thread_deallocate( - thread_t thread); +extern void thread_reference( + thread_t thread); -/* Set task priority of member thread */ -extern void thread_task_priority( - thread_t thread, - integer_t priority, - integer_t max_priority); +extern void thread_deallocate( + thread_t thread); -/* Start a thread at specified routine */ -#define thread_start(thread, start) \ - (thread)->continuation = (start) - -/* Reaps threads waiting to be destroyed */ -extern void thread_reaper_init(void); - - -/* Insure thread always has a kernel stack */ -extern void stack_privilege( - thread_t thread); +extern void thread_terminate_self(void); -extern void consider_thread_collect(void); +extern void thread_hold( + thread_act_t thread); -/* - * Arguments to specify aggressiveness to thread halt. - * Can't have MUST_HALT and SAFELY at the same time. - */ -#define THREAD_HALT_NORMAL 0 -#define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */ -#define THREAD_HALT_SAFELY 2 /* result must be restartable */ +extern void thread_release( + thread_act_t thread); -/* - * Macro-defined routines - */ - -#define thread_pcb(th) ((th)->pcb) - -#define thread_lock_init(th) simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK) -#define thread_lock(th) simple_lock(&(th)->lock) -#define thread_unlock(th) simple_unlock(&(th)->lock) -#define thread_lock_try(th) simple_lock_try(&(th)->lock) +#define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, ETAP_THREAD_LOCK) +#define thread_lock(th) simple_lock(&(th)->sched_lock) +#define thread_unlock(th) simple_unlock(&(th)->sched_lock) +#define thread_lock_try(th) simple_lock_try(&(th)->sched_lock) #define thread_should_halt_fast(thread) \ (!(thread)->top_act || !(thread)->top_act->active) -#define thread_should_halt(thread) thread_should_halt_fast(thread) - #define thread_reference_locked(thread) ((thread)->ref_count++) -/* - * Lock to cover wake_active only; like thread_lock(), is taken - * at splsched(). Used to avoid calling into scheduler with a - * thread_lock() held. Precedes thread_lock() (and other scheduling- - * related locks) in the system lock ordering. - */ #define wake_lock_init(th) \ simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE) #define wake_lock(th) simple_lock(&(th)->wake_lock) #define wake_unlock(th) simple_unlock(&(th)->wake_lock) #define wake_lock_try(th) simple_lock_try(&(th)->wake_lock) -static __inline__ vm_offset_t current_stack(void); -static __inline__ vm_offset_t -current_stack(void) -{ - vm_offset_t ret; - - mp_disable_preemption(); - ret = active_stacks[cpu_number()]; - mp_enable_preemption(); - return ret; -} +extern vm_offset_t stack_alloc( + thread_t thread, + void (*start)(thread_t)); -extern void pcb_module_init(void); - -extern void pcb_init( - thread_act_t thr_act); +extern boolean_t stack_alloc_try( + thread_t thread, + void (*start)(thread_t)); -extern void pcb_terminate( - thread_act_t thr_act); +extern void stack_free( + thread_t thread); -extern void pcb_collect( - thread_act_t thr_act); +extern void stack_free_stack( + vm_offset_t stack); -extern void pcb_user_to_kernel( - thread_act_t thr_act); +extern void stack_collect(void); extern kern_return_t thread_setstatus( - thread_act_t thr_act, + thread_act_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t count); extern kern_return_t thread_getstatus( - thread_act_t thr_act, + thread_act_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t *count); -extern boolean_t stack_alloc_try( - thread_t thread, - void (*start_pos)(thread_t)); - -/* This routine now used only internally */ extern kern_return_t thread_info_shuttle( - thread_act_t thr_act, + thread_act_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, mach_msg_type_number_t *thread_info_count); -/* Machine-dependent routines */ -extern void thread_machine_init(void); +extern void thread_task_priority( + thread_t thread, + integer_t priority, + integer_t max_priority); -extern void thread_machine_set_current( - thread_t thread ); +extern kern_return_t thread_get_special_port( + thread_act_t thread, + int which, + ipc_port_t *port); -extern kern_return_t thread_machine_create( - thread_t thread, - thread_act_t thr_act, - void (*start_pos)(thread_t)); +extern kern_return_t thread_set_special_port( + thread_act_t thread, + int which, + ipc_port_t port); -extern void thread_set_syscall_return( - thread_t thread, - kern_return_t retval); +extern thread_act_t switch_act( + thread_act_t act); -extern void thread_machine_destroy( - thread_t thread ); +extern thread_t kernel_thread_create( + void (*start)(void), + integer_t priority); -extern void thread_machine_flush( - thread_act_t thr_act); +extern thread_t kernel_thread_with_priority( + void (*start)(void), + integer_t priority); -extern thread_t kernel_thread_with_priority( - task_t task, - integer_t priority, - void (*start)(void), - boolean_t alloc_stack, - boolean_t start_running); +extern void machine_stack_attach( + thread_t thread, + vm_offset_t stack, + void (*start)(thread_t)); -extern void thread_terminate_self(void); +extern vm_offset_t machine_stack_detach( + thread_t thread); + +extern void machine_stack_handoff( + thread_t old, + thread_t new); + +extern thread_t machine_switch_context( + thread_t old_thread, + thread_continue_t continuation, + thread_t new_thread); + +extern void machine_load_context( + thread_t thread); + +extern void machine_switch_act( + thread_t thread, + thread_act_t old, + thread_act_t new); -extern void funnel_lock(funnel_t *); +extern kern_return_t machine_thread_set_state( + thread_act_t act, + thread_flavor_t flavor, + thread_state_t state, + mach_msg_type_number_t count); + +extern kern_return_t machine_thread_get_state( + thread_act_t act, + thread_flavor_t flavor, + thread_state_t state, + mach_msg_type_number_t *count); + +extern kern_return_t machine_thread_dup( + thread_act_t self, + thread_act_t target); + +extern void machine_thread_init(void); + +extern kern_return_t machine_thread_create( + thread_t thread, + task_t task); + +extern void machine_thread_destroy( + thread_t thread); + +extern void machine_thread_set_current( + thread_t thread); + +extern void machine_thread_terminate_self(void); + +/* + * XXX Funnel locks XXX + */ + +struct funnel_lock { + int fnl_type; /* funnel type */ + mutex_t *fnl_mutex; /* underlying mutex for the funnel */ + void * fnl_mtxholder; /* thread (last)holdng mutex */ + void * fnl_mtxrelease; /* thread (last)releasing mutex */ + mutex_t *fnl_oldmutex; /* Mutex before collapsing split funnel */ +}; + +typedef struct funnel_lock funnel_t; + +extern void funnel_lock( + funnel_t *lock); + +extern void funnel_unlock( + funnel_t *lock); + +typedef struct ReturnHandler ReturnHandler; + +#define act_lock(act) mutex_lock(&(act)->lock) +#define act_lock_try(act) mutex_try(&(act)->lock) +#define act_unlock(act) mutex_unlock(&(act)->lock) + +#define act_reference_locked(act) \ +MACRO_BEGIN \ + (act)->act_ref_count++; \ +MACRO_END + +#define act_deallocate_locked(act) \ +MACRO_BEGIN \ + if (--(act)->act_ref_count == 0) \ + panic("act_deallocate_locked"); \ +MACRO_END + +extern void act_reference( + thread_act_t act); + +extern void act_deallocate( + thread_act_t act); + +extern void act_attach( + thread_act_t act, + thread_t thread); + +extern void act_detach( + thread_act_t act); + +extern thread_t act_lock_thread( + thread_act_t act); + +extern void act_unlock_thread( + thread_act_t act); + +extern thread_act_t thread_lock_act( + thread_t thread); -extern void funnel_unlock(funnel_t *); +extern void thread_unlock_act( + thread_t thread); + +extern void act_execute_returnhandlers(void); + +extern void install_special_handler( + thread_act_t thread); + +extern void special_handler( + ReturnHandler *rh, + thread_act_t act); #else /* MACH_KERNEL_PRIVATE */ typedef struct funnel_lock funnel_t; -extern boolean_t thread_should_halt(thread_t); +extern boolean_t thread_should_halt( + thread_t thread); + +extern void act_reference( + thread_act_t act); + +extern void act_deallocate( + thread_act_t act); #endif /* MACH_KERNEL_PRIVATE */ @@ -578,7 +682,8 @@ extern thread_t kernel_thread( task_t task, void (*start)(void)); -extern void thread_set_cont_arg(int); +extern void thread_set_cont_arg( + int arg); extern int thread_get_cont_arg(void); @@ -587,20 +692,62 @@ extern boolean_t is_thread_running(thread_act_t); /* True is TH_RUN */ extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */ extern kern_return_t get_thread_waitresult(thread_t); +typedef void (thread_apc_handler_t)(thread_act_t); + +extern kern_return_t thread_apc_set(thread_act_t, thread_apc_handler_t); +extern kern_return_t thread_apc_clear(thread_act_t, thread_apc_handler_t); + +extern vm_map_t swap_act_map(thread_act_t, vm_map_t); + +extern void *get_bsdthread_info(thread_act_t); +extern void set_bsdthread_info(thread_act_t, void *); +extern task_t get_threadtask(thread_act_t); + #endif /* __APPLE_API_PRIVATE */ +#ifdef __APPLE_API_UNSTABLE + +#if !defined(MACH_KERNEL_PRIVATE) + +extern thread_act_t current_act(void); + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* __APPLE_API_UNSTABLE */ + #ifdef __APPLE_API_EVOLVING +/* + * XXX Funnel locks XXX + */ + #define THR_FUNNEL_NULL (funnel_t *)0 -extern funnel_t * funnel_alloc(int); +extern funnel_t *funnel_alloc( + int type); -extern funnel_t * thread_funnel_get(void); +extern funnel_t *thread_funnel_get(void); -extern boolean_t thread_funnel_set(funnel_t * fnl, boolean_t funneled); +extern boolean_t thread_funnel_set( + funnel_t *lock, + boolean_t funneled); -extern boolean_t thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl); +extern boolean_t thread_funnel_merge( + funnel_t *lock, + funnel_t *other); #endif /* __APPLE_API_EVOLVING */ +#ifdef __APPLE_API_PRIVATE + +extern boolean_t refunnel_hint( + thread_t thread, + wait_result_t wresult); + +/* For use by CHUD */ +vm_offset_t min_valid_stack_address(void); +vm_offset_t max_valid_stack_address(void); + +#endif /* __APPLE_API_PRIVATE */ + #endif /* _KERN_THREAD_H_ */ diff --git a/osfmk/kern/thread_act.c b/osfmk/kern/thread_act.c index 129f39d9a..31d24933b 100644 --- a/osfmk/kern/thread_act.c +++ b/osfmk/kern/thread_act.c @@ -75,19 +75,10 @@ #include #include -/* - * Debugging printf control - */ -#if MACH_ASSERT -unsigned int watchacts = 0 /* WA_ALL */ - ; /* Do-it-yourself & patchable */ -#endif - /* * Track the number of times we need to swapin a thread to deallocate it. */ int act_free_swapin = 0; -boolean_t first_act; /* * Forward declarations for functions local to this file. @@ -102,17 +93,12 @@ kern_return_t act_get_state_locked(thread_act_t, int, mach_msg_type_number_t *); void act_set_astbsd(thread_act_t); void act_set_apc(thread_act_t); -void act_user_to_kernel(thread_act_t); void act_ulock_release_all(thread_act_t thr_act); void install_special_handler_locked(thread_act_t); static void act_disable(thread_act_t); -struct thread_activation pageout_act; - -static zone_t thr_act_zone; - /* * Thread interfaces accessed via a thread_activation: */ @@ -155,7 +141,7 @@ thread_terminate_internal( act_lock(act); } - clear_wait(thread, act->inited? THREAD_INTERRUPTED: THREAD_AWAKENED); + clear_wait(thread, act->started? THREAD_INTERRUPTED: THREAD_AWAKENED); act_unlock_thread(act); return (result); @@ -173,8 +159,7 @@ thread_terminate( if (act == THR_ACT_NULL) return (KERN_INVALID_ARGUMENT); - if ( (act->task == kernel_task || - act->kernel_loaded ) && + if ( act->task == kernel_task && act != current_act() ) return (KERN_FAILURE); @@ -186,10 +171,10 @@ thread_terminate( * code - and all threads finish their own termination in the * special handler APC. */ - if ( act->task == kernel_task || - act->kernel_loaded ) { + if (act->task == kernel_task) { + ml_set_interrupts_enabled(FALSE); assert(act == current_act()); - ast_taken(AST_APC, FALSE); + ast_taken(AST_APC, TRUE); panic("thread_terminate"); } @@ -211,7 +196,7 @@ thread_hold( if (act->suspend_count++ == 0) { install_special_handler(act); - if ( act->inited && + if ( act->started && thread != THREAD_NULL && thread->top_act == act ) thread_wakeup_one(&act->suspend_count); @@ -234,9 +219,9 @@ thread_release( --act->suspend_count == 0 && thread != THREAD_NULL && thread->top_act == act ) { - if (!act->inited) { + if (!act->started) { clear_wait(thread, THREAD_AWAKENED); - act->inited = TRUE; + act->started = TRUE; } else thread_wakeup_one(&act->suspend_count); @@ -249,7 +234,7 @@ thread_suspend( { thread_t thread; - if (act == THR_ACT_NULL) + if (act == THR_ACT_NULL || act->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread = act_lock_thread(act); @@ -265,7 +250,7 @@ thread_suspend( if ( thread != current_thread() && thread != THREAD_NULL && thread->top_act == act ) { - assert(act->inited); + assert(act->started); thread_wakeup_one(&act->suspend_count); act_unlock_thread(act); @@ -287,7 +272,7 @@ thread_resume( kern_return_t result = KERN_SUCCESS; thread_t thread; - if (act == THR_ACT_NULL) + if (act == THR_ACT_NULL || act->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread = act_lock_thread(act); @@ -298,9 +283,9 @@ thread_resume( --act->suspend_count == 0 && thread != THREAD_NULL && thread->top_act == act ) { - if (!act->inited) { + if (!act->started) { clear_wait(thread, THREAD_AWAKENED); - act->inited = TRUE; + act->started = TRUE; } else thread_wakeup_one(&act->suspend_count); @@ -317,21 +302,6 @@ thread_resume( return (result); } -/* - * This routine walks toward the head of an RPC chain starting at - * a specified thread activation. An alert bit is set and a special - * handler is installed for each thread it encounters. - * - * The target thread act and thread shuttle are already locked. - */ -kern_return_t -post_alert( - register thread_act_t act, - unsigned alert_bits) -{ - panic("post_alert"); -} - /* * thread_depress_abort: * @@ -509,12 +479,6 @@ thread_get_special_port( ipc_port_t port; thread_t thread; -#if MACH_ASSERT - if (watchacts & WA_PORT) - printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n", - thr_act, which, portp, (portp ? *portp : 0)); -#endif /* MACH_ASSERT */ - if (!thr_act) return KERN_INVALID_ARGUMENT; thread = act_lock_thread(thr_act); @@ -565,12 +529,6 @@ thread_set_special_port( ipc_port_t old; thread_t thread; -#if MACH_ASSERT - if (watchacts & WA_PORT) - printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n", - thr_act, which, port); -#endif /* MACH_ASSERT */ - if (thr_act == 0) return KERN_INVALID_ARGUMENT; @@ -651,7 +609,7 @@ thread_get_state( } if (result == KERN_SUCCESS) - result = act_machine_get_state(act, flavor, state, state_count); + result = machine_thread_get_state(act, flavor, state, state_count); if ( thread != THREAD_NULL && thread->top_act == act ) @@ -713,7 +671,7 @@ thread_set_state( } if (result == KERN_SUCCESS) - result = act_machine_set_state(act, flavor, state, state_count); + result = machine_thread_set_state(act, flavor, state, state_count); if ( thread != THREAD_NULL && thread->top_act == act ) @@ -773,7 +731,7 @@ thread_dup( } if (result == KERN_SUCCESS) - result = act_thread_dup(self, target); + result = machine_thread_dup(self, target); if ( thread != THREAD_NULL && thread->top_act == target ) @@ -812,7 +770,7 @@ thread_setstatus( result = KERN_FAILURE; if (result == KERN_SUCCESS) - result = act_machine_set_state(act, flavor, tstate, count); + result = machine_thread_set_state(act, flavor, tstate, count); act_unlock_thread(act); @@ -844,7 +802,7 @@ thread_getstatus( result = KERN_FAILURE; if (result == KERN_SUCCESS) - result = act_machine_get_state(act, flavor, tstate, count); + result = machine_thread_get_state(act, flavor, tstate, count); act_unlock_thread(act); @@ -855,225 +813,81 @@ thread_getstatus( * Kernel-internal thread_activation interfaces used outside this file: */ -/* - * act_init() - Initialize activation handling code - */ void -act_init() +act_reference( + thread_act_t act) { - thr_act_zone = zinit( - sizeof(struct thread_activation), - ACT_MAX * sizeof(struct thread_activation), /* XXX */ - ACT_CHUNK * sizeof(struct thread_activation), - "activations"); - first_act = TRUE; - act_machine_init(); -} + if (act == NULL) + return; + act_lock(act); + act_reference_locked(act); + act_unlock(act); +} -/* - * act_create - Create a new activation in a specific task. - */ -kern_return_t -act_create(task_t task, - thread_act_t *new_act) +void +act_deallocate( + thread_act_t act) { - thread_act_t thr_act; - int rc; - vm_map_t map; - - if (first_act) { - thr_act = &pageout_act; - first_act = FALSE; - } else - thr_act = (thread_act_t)zalloc(thr_act_zone); - if (thr_act == 0) - return(KERN_RESOURCE_SHORTAGE); - -#if MACH_ASSERT - if (watchacts & WA_ACT_LNK) - printf("act_create(task=%x,thr_act@%x=%x)\n", - task, new_act, thr_act); -#endif /* MACH_ASSERT */ - - /* Start by zeroing everything; then init non-zero items only */ - bzero((char *)thr_act, sizeof(*thr_act)); - - if (thr_act == &pageout_act) - thr_act->thread = &pageout_thread; - -#ifdef MACH_BSD - { - /* - * Take care of the uthread allocation - * do it early in order to make KERN_RESOURCE_SHORTAGE - * handling trivial - * uthread_alloc() will bzero the storage allocated. - */ - extern void *uthread_alloc(task_t, thread_act_t); - - thr_act->uthread = uthread_alloc(task, thr_act); - if(thr_act->uthread == 0) { - /* Put the thr_act back on the thr_act zone */ - zfree(thr_act_zone, (vm_offset_t)thr_act); - return(KERN_RESOURCE_SHORTAGE); - } - } -#endif /* MACH_BSD */ - - /* - * Start with one reference for the caller and one for the - * act being alive. - */ - act_lock_init(thr_act); - thr_act->ref_count = 2; - - /* Latch onto the task. */ - thr_act->task = task; - task_reference(task); - - /* special_handler will always be last on the returnhandlers list. */ - thr_act->special_handler.next = 0; - thr_act->special_handler.handler = special_handler; - -#if MACH_PROF - thr_act->act_profiled = FALSE; - thr_act->act_profiled_own = FALSE; - thr_act->profil_buffer = NULLPROFDATA; -#endif - - /* Initialize the held_ulocks queue as empty */ - queue_init(&thr_act->held_ulocks); + task_t task; + thread_t thread; + void *task_proc; - /* Inherit the profiling status of the parent task */ - act_prof_init(thr_act, task); + if (act == NULL) + return; - ipc_thr_act_init(task, thr_act); - act_machine_create(task, thr_act); + act_lock(act); - /* - * If thr_act created in kernel-loaded task, alter its saved - * state to so indicate - */ - if (task->kernel_loaded) { - act_user_to_kernel(thr_act); + if (--act->act_ref_count > 0) { + act_unlock(act); + return; } - /* Cache the task's map and take a reference to it */ - map = task->map; - thr_act->map = map; + assert(!act->active); - /* Inline vm_map_reference cause we don't want to increment res_count */ - mutex_lock(&map->s_lock); - map->ref_count++; - mutex_unlock(&map->s_lock); + thread = act->thread; + assert(thread != NULL); - *new_act = thr_act; - return KERN_SUCCESS; -} + thread->top_act = NULL; -/* - * act_free - called when an thr_act's ref_count drops to zero. - * - * This can only happen after the activation has been reaped, and - * all other references to it have gone away. We can now release - * the last critical resources, unlink the activation from the - * task, and release the reference on the thread shuttle itself. - * - * Called with activation locked. - */ -#if MACH_ASSERT -int dangerous_bzero = 1; /* paranoia & safety */ -#endif + act_unlock(act); -void -act_free(thread_act_t thr_act) -{ - task_t task; - thread_t thr; - vm_map_t map; - unsigned int ref; - void * task_proc; - -#if MACH_ASSERT - if (watchacts & WA_EXIT) - printf("act_free(%x(%d)) thr=%x tsk=%x(%d) %sactive\n", - thr_act, thr_act->ref_count, thr_act->thread, - thr_act->task, - thr_act->task ? thr_act->task->ref_count : 0, - thr_act->active ? " " : " !"); -#endif /* MACH_ASSERT */ - - assert(!thr_act->active); - - task = thr_act->task; + task = act->task; task_lock(task); task_proc = task->bsd_info; - if (thr = thr_act->thread) { + + { time_value_t user_time, system_time; - thread_read_times(thr, &user_time, &system_time); + thread_read_times(thread, &user_time, &system_time); time_value_add(&task->total_user_time, &user_time); time_value_add(&task->total_system_time, &system_time); - /* Unlink the thr_act from the task's thr_act list, - * so it doesn't appear in calls to task_threads and such. - * The thr_act still keeps its ref on the task, however. - */ - queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts); - thr_act->thr_acts.next = NULL; - task->thr_act_count--; - task->res_act_count--; - task_unlock(task); - task_deallocate(task); - thread_deallocate(thr); - act_machine_destroy(thr_act); - } else { - /* - * Must have never really gotten started - * no unlinking from the task and no need - * to free the shuttle. - */ - task_unlock(task); - task_deallocate(task); + queue_remove(&task->threads, act, thread_act_t, task_threads); + act->task_threads.next = NULL; + task->thread_count--; + task->res_thread_count--; } - act_prof_deallocate(thr_act); - ipc_thr_act_terminate(thr_act); + task_unlock(task); - /* - * Drop the cached map reference. - * Inline version of vm_map_deallocate() because we - * don't want to decrement the map's residence count here. - */ - map = thr_act->map; - mutex_lock(&map->s_lock); - ref = --map->ref_count; - mutex_unlock(&map->s_lock); - if (ref == 0) - vm_map_destroy(map); + act_prof_deallocate(act); + ipc_thr_act_terminate(act); #ifdef MACH_BSD { - /* - * Free uthread BEFORE the bzero. - * Not doing so will result in a leak. - */ extern void uthread_free(task_t, void *, void *); + void *ut = act->uthread; - void *ut = thr_act->uthread; - thr_act->uthread = 0; + act->uthread = NULL; uthread_free(task, ut, task_proc); } #endif /* MACH_BSD */ -#if MACH_ASSERT - if (dangerous_bzero) /* dangerous if we're still using it! */ - bzero((char *)thr_act, sizeof(*thr_act)); -#endif /* MACH_ASSERT */ - /* Put the thr_act back on the thr_act zone */ - zfree(thr_act_zone, (vm_offset_t)thr_act); + task_deallocate(task); + + thread_deallocate(thread); } @@ -1088,37 +902,22 @@ act_free(thread_act_t thr_act) */ void act_attach( - thread_act_t thr_act, - thread_t thread, - unsigned init_alert_mask) + thread_act_t act, + thread_t thread) { - thread_act_t lower; - -#if MACH_ASSERT - assert(thread == current_thread() || thread->top_act == THR_ACT_NULL); - if (watchacts & WA_ACT_LNK) - printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n", - thr_act, thr_act->ref_count, thread, thread->ref_count, - init_alert_mask); -#endif /* MACH_ASSERT */ + thread_act_t lower; /* - * Chain the thr_act onto the thread's thr_act stack. - * Set mask and auto-propagate alerts from below. + * Chain the act onto the thread's act stack. */ - thr_act->ref_count++; - thr_act->thread = thread; - thr_act->higher = THR_ACT_NULL; /*safety*/ - thr_act->alerts = 0; - thr_act->alert_mask = init_alert_mask; - lower = thr_act->lower = thread->top_act; - - if (lower != THR_ACT_NULL) { - lower->higher = thr_act; - thr_act->alerts = (lower->alerts & init_alert_mask); - } - - thread->top_act = thr_act; + act->act_ref_count++; + act->thread = thread; + act->higher = THR_ACT_NULL; + lower = act->lower = thread->top_act; + if (lower != THR_ACT_NULL) + lower->higher = act; + + thread->top_act = act; } /* @@ -1134,20 +933,11 @@ act_detach( { thread_t cur_thread = cur_act->thread; -#if MACH_ASSERT - if (watchacts & (WA_EXIT|WA_ACT_LNK)) - printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n", - cur_act, cur_act->ref_count, - cur_thread, cur_thread->ref_count, - cur_act->task, - cur_act->task ? cur_act->task->ref_count : 0); -#endif /* MACH_ASSERT */ - /* Unlink the thr_act from the thread's thr_act stack */ cur_thread->top_act = cur_act->lower; cur_act->thread = 0; - cur_act->ref_count--; - assert(cur_act->ref_count > 0); + cur_act->act_ref_count--; + assert(cur_act->act_ref_count > 0); #if MACH_ASSERT cur_act->lower = cur_act->higher = THR_ACT_NULL; @@ -1241,15 +1031,11 @@ thread_act_t switch_act( thread_act_t act) { - thread_t thread; thread_act_t old, new; - unsigned cpu; - spl_t spl; - + thread_t thread; disable_preemption(); - cpu = cpu_number(); thread = current_thread(); /* @@ -1266,17 +1052,16 @@ switch_act( } assert(new != THR_ACT_NULL); - assert(cpu_to_processor(cpu)->cpu_data->active_thread == thread); - active_kloaded[cpu] = (new->kernel_loaded) ? new : 0; + assert(current_processor()->active_thread == thread); /* This is where all the work happens */ - machine_switch_act(thread, old, new, cpu); + machine_switch_act(thread, old, new); /* * Push or pop an activation on the chain. */ if (act) { - act_attach(new, thread, 0); + act_attach(new, thread); } else { act_detach(old); @@ -1302,11 +1087,6 @@ install_special_handler( spl_t spl; thread_t thread = thr_act->thread; -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act); -#endif /* MACH_ASSERT */ - spl = splsched(); thread_lock(thread); install_special_handler_locked(thr_act); @@ -1351,9 +1131,9 @@ install_special_handler_locked( else { processor_t processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->cpu_data->active_thread == thread ) + if ( processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread ) cause_ast_check(processor); } } @@ -1395,11 +1175,6 @@ act_execute_returnhandlers(void) { thread_act_t act = current_act(); -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("execute_rtn_hdlrs: act=%x\n", act); -#endif /* MACH_ASSERT */ - thread_ast_clear(act, AST_APC); spllo(); @@ -1421,12 +1196,6 @@ act_execute_returnhandlers(void) spllo(); act_unlock_thread(act); -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf( (rh == &act->special_handler) ? - "\tspecial_handler\n" : "\thandler=%x\n", rh->handler); -#endif /* MACH_ASSERT */ - /* Execute it */ (*rh->handler)(rh, act); } @@ -1488,13 +1257,10 @@ special_handler( thread_unlock(thread); splx(s); - /* - * If someone has killed this invocation, - * invoke the return path with a terminated exception. - */ if (!self->active) { act_unlock_thread(self); - act_machine_return(KERN_TERMINATED); + thread_terminate_self(); + /*NOTREACHED*/ } /* @@ -1505,7 +1271,7 @@ special_handler( assert_wait(&self->suspend_count, THREAD_ABORTSAFE); act_unlock_thread(self); thread_block(special_handler_continue); - /* NOTREACHED */ + /*NOTREACHED*/ } act_unlock_thread(self); @@ -1517,17 +1283,6 @@ special_handler( act_unlock_thread(self); } -/* - * Update activation that belongs to a task created via kernel_task_create(). - */ -void -act_user_to_kernel( - thread_act_t thr_act) -{ - pcb_user_to_kernel(thr_act); - thr_act->kernel_loading = TRUE; -} - /* * Already locked: activation (shuttle frozen within) * @@ -1538,17 +1293,6 @@ static void act_disable( thread_act_t thr_act) { - -#if MACH_ASSERT - if (watchacts & WA_EXIT) { - printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive", - current_act(), thr_act, thr_act->ref_count, - (thr_act->active ? " " : " !")); - printf("\n"); - (void) dump_act(thr_act); - } -#endif /* MACH_ASSERT */ - thr_act->active = 0; /* Drop the thr_act reference taken for being active. @@ -1556,46 +1300,7 @@ act_disable( * the one we were passed.) * Inline the deallocate because thr_act is locked. */ - act_locked_act_deallocate(thr_act); -} - -/* - * act_alert - Register an alert from this activation. - * - * Each set bit is propagated upward from (but not including) this activation, - * until the top of the chain is reached or the bit is masked. - */ -kern_return_t -act_alert(thread_act_t thr_act, unsigned alerts) -{ - thread_t thread = act_lock_thread(thr_act); - -#if MACH_ASSERT - if (watchacts & WA_ACT_LNK) - printf("act_alert %x: %x\n", thr_act, alerts); -#endif /* MACH_ASSERT */ - - if (thread) { - thread_act_t act_up = thr_act; - while ((alerts) && (act_up != thread->top_act)) { - act_up = act_up->higher; - alerts &= act_up->alert_mask; - act_up->alerts |= alerts; - } - /* - * XXXX If we reach the top, and it is blocked in glue - * code, do something to kick it. XXXX - */ - } - act_unlock_thread(thr_act); - - return KERN_SUCCESS; -} - -kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask) -{ - panic("act_alert_mask NOT YET IMPLEMENTED\n"); - return KERN_SUCCESS; + act_deallocate_locked(thr_act); } typedef struct GetSetState { @@ -1646,24 +1351,13 @@ get_set_state( act_set_apc(act); -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) { - printf("act_%x: get_set_state(act=%x flv=%x state=%x ptr@%x=%x)", - current_act(), act, flavor, state, - pcount, (pcount ? *pcount : 0)); - printf((handler == get_state_handler ? "get_state_hdlr\n" : - (handler == set_state_handler ? "set_state_hdlr\n" : - "hndler=%x\n")), handler); - } -#endif /* MACH_ASSERT */ - assert(act->thread); assert(act != current_act()); for (;;) { wait_result_t result; - if ( act->inited && + if ( act->started && act->thread->top_act == act ) thread_wakeup_one(&act->suspend_count); @@ -1692,12 +1386,6 @@ get_set_state( act_lock_thread(act); } -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: get_set_state returns %x\n", - current_act(), gss.result); -#endif /* MACH_ASSERT */ - return (gss.result); } @@ -1706,13 +1394,7 @@ set_state_handler(ReturnHandler *rh, thread_act_t thr_act) { GetSetState *gss = (GetSetState*)rh; -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n", - current_act(), rh, thr_act); -#endif /* MACH_ASSERT */ - - gss->result = act_machine_set_state(thr_act, gss->flavor, + gss->result = machine_thread_set_state(thr_act, gss->flavor, gss->state, *gss->pcount); thread_wakeup((event_t)gss); } @@ -1722,13 +1404,7 @@ get_state_handler(ReturnHandler *rh, thread_act_t thr_act) { GetSetState *gss = (GetSetState*)rh; -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n", - current_act(), rh, thr_act); -#endif /* MACH_ASSERT */ - - gss->result = act_machine_get_state(thr_act, gss->flavor, + gss->result = machine_thread_get_state(thr_act, gss->flavor, gss->state, (mach_msg_type_number_t *) gss->pcount); thread_wakeup((event_t)gss); @@ -1738,13 +1414,6 @@ kern_return_t act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state, mach_msg_type_number_t *pcount) { -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n", - current_act(), thr_act, flavor, state, pcount, - (pcount? *pcount : 0)); -#endif /* MACH_ASSERT */ - return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler)); } @@ -1752,12 +1421,6 @@ kern_return_t act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state, mach_msg_type_number_t count) { -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n", - current_act(), thr_act, flavor, state, count, count); -#endif /* MACH_ASSERT */ - return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler)); } @@ -1801,9 +1464,9 @@ act_set_astbsd( thread_lock(thread); thread_ast_set(act, AST_BSD); processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->cpu_data->active_thread == thread ) + if ( processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread ) cause_ast_check(processor); thread_unlock(thread); } @@ -1828,9 +1491,9 @@ act_set_apc( thread_lock(thread); thread_ast_set(act, AST_APC); processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->cpu_data->active_thread == thread ) + if ( processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread ) cause_ast_check(processor); thread_unlock(thread); } @@ -1871,19 +1534,3 @@ mach_thread_self(void) act_reference(self); return self; } - -#undef act_reference -void -act_reference( - thread_act_t thr_act) -{ - act_reference_fast(thr_act); -} - -#undef act_deallocate -void -act_deallocate( - thread_act_t thr_act) -{ - act_deallocate_fast(thr_act); -} diff --git a/osfmk/kern/thread_act.h b/osfmk/kern/thread_act.h index 4e7e1d58c..acbb110f2 100644 --- a/osfmk/kern/thread_act.h +++ b/osfmk/kern/thread_act.h @@ -51,338 +51,6 @@ #ifndef _KERN_THREAD_ACT_H_ #define _KERN_THREAD_ACT_H_ -#include -#include -#include -#include - -#include - -#ifdef __APPLE_API_PRIVATE - -#ifdef MACH_KERNEL_PRIVATE - -#include -#include -#include - -#include -#include -#include -#include #include -#include -#include - -/* - * Here is a description of the states an thread_activation may be in. - * - * An activation always has a valid task pointer, and it is always constant. - * The activation is only linked onto the task's activation list until - * the activation is terminated. - * - * The thread holds a reference on the activation while using it. - * - * An activation is active until thread_terminate is called on it; - * then it is inactive, waiting for all references to be dropped. - * Future control operations on the terminated activation will fail, - * with the exception that act_yank still works if the activation is - * still on an RPC chain. A terminated activation always has a null - * thread pointer. - * - * An activation is suspended when suspend_count > 0. - * - * Locking note: access to data relevant to scheduling state (user_stop_count, - * suspend_count, handlers, special_handler) is controlled by the combination - * of locks acquired by act_lock_thread(). That is, not only must act_lock() - * be held, but migration through the activation must be frozen (so that the - * thread pointer doesn't change). If a shuttle is associated with the - * activation, then its thread_lock() must also be acquired to change these - * data. Regardless of whether a shuttle is present, the data must be - * altered at splsched(). - */ - -typedef struct ReturnHandler { - struct ReturnHandler *next; - void (*handler)(struct ReturnHandler *rh, - struct thread_activation *thr_act); -} ReturnHandler; - -typedef struct thread_activation { - - /*** task linkage ***/ - - /* Links for task's circular list of activations. The activation - * is only on the task's activation list while active. Must be - * first. - */ - queue_chain_t thr_acts; - - /* Indicators for whether this activation is in the midst of - * resuming or has already been resumed in a kernel-loaded - * task -- these flags are basically for quick access to - * this information. - */ - boolean_t kernel_loaded; /* running in kernel-loaded task */ - boolean_t kernel_loading; /* about to run kernel-loaded */ - - boolean_t inited; - - /*** Machine-dependent state ***/ - struct MachineThrAct mact; - - /*** Consistency ***/ - decl_mutex_data(,lock) - decl_simple_lock_data(,sched_lock) - int ref_count; - - /* Reference to the task this activation is in. - * Constant for the life of the activation - */ - struct task *task; - vm_map_t map; /* cached current map */ - - /*** Thread linkage ***/ - /* Shuttle using this activation, zero if not in use. The shuttle - * holds a reference on the activation while this is nonzero. - */ - struct thread_shuttle *thread; - - /* The rest in this section is only valid when thread is nonzero. */ - - /* Next higher and next lower activation on the thread's activation - * stack. For a topmost activation or the null_act, higher is - * undefined. The bottommost activation is always the null_act. - */ - struct thread_activation *higher, *lower; - - /* Alert bits pending at this activation; some of them may have - * propagated from lower activations. - */ - unsigned alerts; - - /* Mask of alert bits to be allowed to pass through from lower levels. - */ - unsigned alert_mask; - - /*** Control information ***/ - - /* Number of outstanding suspensions on this activation. */ - int suspend_count; - - /* User-visible scheduling state */ - int user_stop_count; /* outstanding stops */ - - /* ast is needed - see ast.h */ - ast_t ast; - - /* This is normally true, but is set to false when the - * activation is terminated. - */ - int active; - - /* Chain of return handlers to be called before the thread is - * allowed to return to this invocation - */ - ReturnHandler *handlers; - - /* A special ReturnHandler attached to the above chain to - * handle suspension and such - */ - ReturnHandler special_handler; - - /* Special ports attached to this activation */ - struct ipc_port *ith_self; /* not a right, doesn't hold ref */ - struct ipc_port *ith_sself; /* a send right */ - struct exception_action exc_actions[EXC_TYPES_COUNT]; - - /* A list of ulocks (a lock set element) currently held by the thread - */ - queue_head_t held_ulocks; - -#if MACH_PROF - /* Profiling data structures */ - boolean_t act_profiled; /* is activation being profiled? */ - boolean_t act_profiled_own; - /* is activation being profiled - * on its own ? */ - struct prof_data *profil_buffer;/* prof struct if either is so */ -#endif /* MACH_PROF */ - -#ifdef MACH_BSD - void *uthread; -#endif - -} Thread_Activation; - -/* Alert bits */ -#define SERVER_TERMINATED 0x01 -#define ORPHANED 0x02 -#define CLIENT_TERMINATED 0x04 -#define TIME_CONSTRAINT_UNSATISFIED 0x08 - -#define act_lock_init(thr_act) mutex_init(&(thr_act)->lock, ETAP_THREAD_ACT) -#define act_lock(thr_act) mutex_lock(&(thr_act)->lock) -#define act_lock_try(thr_act) mutex_try(&(thr_act)->lock) -#define act_unlock(thr_act) mutex_unlock(&(thr_act)->lock) - -/* Sanity check the ref count. If it is 0, we may be doubly zfreeing. - * If it is larger than max int, it has been corrupted, probably by being - * modified into an address (this is architecture dependent, but it's - * safe to assume there cannot really be max int references). - */ -#define ACT_MAX_REFERENCES \ - (unsigned)(~0 ^ (1 << (sizeof(int)*BYTE_SIZE - 1))) - -#define act_reference_fast(thr_act) \ - MACRO_BEGIN \ - if (thr_act) { \ - act_lock(thr_act); \ - assert((thr_act)->ref_count < ACT_MAX_REFERENCES); \ - (thr_act)->ref_count++; \ - act_unlock(thr_act); \ - } \ - MACRO_END - -#define act_reference(thr_act) act_reference_fast(thr_act) - -#define act_locked_act_reference(thr_act) \ - MACRO_BEGIN \ - if (thr_act) { \ - assert((thr_act)->ref_count < ACT_MAX_REFERENCES); \ - (thr_act)->ref_count++; \ - } \ - MACRO_END - -#define act_deallocate_fast(thr_act) \ - MACRO_BEGIN \ - if (thr_act) { \ - int new_value; \ - act_lock(thr_act); \ - assert((thr_act)->ref_count > 0 && \ - (thr_act)->ref_count <= ACT_MAX_REFERENCES); \ - new_value = --(thr_act)->ref_count; \ - act_unlock(thr_act); \ - if (new_value == 0) \ - act_free(thr_act); \ - } \ - MACRO_END - -#define act_deallocate(thr_act) act_deallocate_fast(thr_act) - -#define act_locked_act_deallocate(thr_act) \ - MACRO_BEGIN \ - if (thr_act) { \ - int new_value; \ - assert((thr_act)->ref_count > 0 && \ - (thr_act)->ref_count <= ACT_MAX_REFERENCES); \ - new_value = --(thr_act)->ref_count; \ - if (new_value == 0) { \ - panic("a_l_act_deallocate: would free act"); \ - } \ - } \ - MACRO_END - -extern struct thread_activation pageout_act; - -extern void act_init(void); -extern void thread_release(thread_act_t); -extern kern_return_t thread_dowait(thread_act_t, boolean_t); -extern void thread_hold(thread_act_t); - -extern kern_return_t thread_get_special_port(thread_act_t, int, - ipc_port_t *); -extern kern_return_t thread_set_special_port(thread_act_t, int, - ipc_port_t); -extern thread_t act_lock_thread(thread_act_t); -extern void act_unlock_thread(thread_act_t); -extern void install_special_handler(thread_act_t); -extern thread_act_t thread_lock_act(thread_t); -extern void thread_unlock_act(thread_t); -extern void act_attach(thread_act_t, thread_t, unsigned); -extern void act_execute_returnhandlers(void); -extern void act_detach(thread_act_t); -extern void act_free(thread_act_t); - -/* machine-dependent functions */ -extern void act_machine_return(kern_return_t); -extern void act_machine_init(void); -extern kern_return_t act_machine_create(struct task *, thread_act_t); -extern void act_machine_destroy(thread_act_t); -extern kern_return_t act_machine_set_state(thread_act_t, - thread_flavor_t, thread_state_t, - mach_msg_type_number_t ); -extern kern_return_t act_machine_get_state(thread_act_t, - thread_flavor_t, thread_state_t, - mach_msg_type_number_t *); -extern void act_machine_switch_pcb(thread_act_t); -extern void act_virtual_machine_destroy(thread_act_t); - -extern kern_return_t act_create(task_t, thread_act_t *); -extern kern_return_t act_get_state(thread_act_t, int, thread_state_t, - mach_msg_type_number_t *); -extern kern_return_t act_set_state(thread_act_t, int, thread_state_t, - mach_msg_type_number_t); - -extern int dump_act(thread_act_t); /* debugging */ - -#if MACH_ASSERT -/* - * Debugging support - "watchacts", a patchable selective trigger - */ -extern unsigned int watchacts; /* debug printf trigger */ -#define WA_SCHED 0x001 /* kern/sched_prim.c */ -#define WA_THR 0x002 /* kern/thread.c */ -#define WA_ACT_LNK 0x004 /* kern/thread_act.c act mgmt */ -#define WA_ACT_HDLR 0x008 /* kern/thread_act.c act hldrs */ -#define WA_TASK 0x010 /* kern/task.c */ -#define WA_BOOT 0x020 /* bootstrap,startup.c */ -#define WA_PCB 0x040 /* machine/pcb.c */ -#define WA_PORT 0x080 /* ports + port sets */ -#define WA_EXIT 0x100 /* exit path */ -#define WA_SWITCH 0x200 /* context switch (!!) */ -#define WA_STATE 0x400 /* get/set state (!!) */ -#define WA_ALL (~0) -#endif /* MACH_ASSERT */ - -#else /* MACH_KERNEL_PRIVATE */ - -extern void act_reference(thread_act_t); -extern void act_deallocate(thread_act_t); - -#endif /* MACH_KERNEL_PRIVATE */ - -extern kern_return_t act_alert(thread_act_t, unsigned); -extern kern_return_t act_alert_mask(thread_act_t, unsigned ); -extern kern_return_t post_alert(thread_act_t, unsigned); - -typedef void (thread_apc_handler_t)(thread_act_t); - -extern kern_return_t thread_apc_set(thread_act_t, thread_apc_handler_t); -extern kern_return_t thread_apc_clear(thread_act_t, thread_apc_handler_t); - -extern vm_map_t swap_act_map(thread_act_t, vm_map_t); - -extern void *get_bsdthread_info(thread_act_t); -extern void set_bsdthread_info(thread_act_t, void *); -extern task_t get_threadtask(thread_act_t); - -#endif /* __APPLE_API_PRIVATE */ - -#ifdef __APPLE_API_UNSTABLE - -#if !defined(MACH_KERNEL_PRIVATE) - -extern thread_act_t current_act(void); - -#endif /* MACH_KERNEL_PRIVATE */ - -#endif /* __APPLE_API_UNSTABLE */ - -extern kern_return_t thread_abort(thread_act_t); -extern kern_return_t thread_abort_safely(thread_act_t); -extern kern_return_t thread_resume(thread_act_t); -extern kern_return_t thread_suspend(thread_act_t); -extern kern_return_t thread_terminate(thread_act_t); #endif /* _KERN_THREAD_ACT_H_ */ diff --git a/osfmk/kern/thread_call.c b/osfmk/kern/thread_call.c index 15cd7e8a4..a34caf6ec 100644 --- a/osfmk/kern/thread_call.c +++ b/osfmk/kern/thread_call.c @@ -47,6 +47,8 @@ #include +#include + #define internal_call_num 768 #define thread_call_thread_min 4 @@ -59,20 +61,16 @@ decl_simple_lock_data(static,thread_call_lock) static timer_call_data_t - thread_call_delayed_timer; + thread_call_delaytimer; static queue_head_t - internal_call_free_queue, - pending_call_queue, delayed_call_queue; + thread_call_xxx_queue, + thread_call_pending_queue, thread_call_delayed_queue; static struct wait_queue - call_thread_idle_queue; - -static -thread_t - activate_thread; + call_thread_waitqueue; static boolean_t @@ -90,10 +88,7 @@ static struct { int thread_num, thread_hiwat, thread_lowat; -} thread_calls; - -static boolean_t - thread_call_initialized = FALSE; +} thread_call_vars; static __inline__ thread_call_t _internal_call_allocate(void); @@ -167,40 +162,34 @@ thread_call_initialize(void) thread_call_t call; spl_t s; - if (thread_call_initialized) - panic("thread_call_initialize"); - simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER); s = splsched(); simple_lock(&thread_call_lock); - queue_init(&pending_call_queue); - queue_init(&delayed_call_queue); + queue_init(&thread_call_pending_queue); + queue_init(&thread_call_delayed_queue); - queue_init(&internal_call_free_queue); + queue_init(&thread_call_xxx_queue); for ( call = internal_call_storage; call < &internal_call_storage[internal_call_num]; call++) { - enqueue_tail(&internal_call_free_queue, qe(call)); + enqueue_tail(&thread_call_xxx_queue, qe(call)); } - timer_call_setup(&thread_call_delayed_timer, _delayed_call_timer, NULL); + timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL); - wait_queue_init(&call_thread_idle_queue, SYNC_POLICY_FIFO); - thread_calls.thread_lowat = thread_call_thread_min; + wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO); + thread_call_vars.thread_lowat = thread_call_thread_min; activate_thread_awake = TRUE; - thread_call_initialized = TRUE; simple_unlock(&thread_call_lock); splx(s); - activate_thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL - 2, - _activate_thread, TRUE, TRUE); + kernel_thread_with_priority(_activate_thread, MAXPRI_KERNEL - 2); } void @@ -228,10 +217,10 @@ _internal_call_allocate(void) { thread_call_t call; - if (queue_empty(&internal_call_free_queue)) + if (queue_empty(&thread_call_xxx_queue)) panic("_internal_call_allocate"); - call = TC(dequeue_head(&internal_call_free_queue)); + call = TC(dequeue_head(&thread_call_xxx_queue)); return (call); } @@ -255,7 +244,7 @@ _internal_call_release( { if ( call >= internal_call_storage && call < &internal_call_storage[internal_call_num] ) - enqueue_tail(&internal_call_free_queue, qe(call)); + enqueue_head(&thread_call_xxx_queue, qe(call)); } /* @@ -275,9 +264,9 @@ _pending_call_enqueue( thread_call_t call ) { - enqueue_tail(&pending_call_queue, qe(call)); - if (++thread_calls.pending_num > thread_calls.pending_hiwat) - thread_calls.pending_hiwat = thread_calls.pending_num; + enqueue_tail(&thread_call_pending_queue, qe(call)); + if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat) + thread_call_vars.pending_hiwat = thread_call_vars.pending_num; call->state = PENDING; } @@ -300,7 +289,7 @@ _pending_call_dequeue( ) { (void)remque(qe(call)); - thread_calls.pending_num--; + thread_call_vars.pending_num--; call->state = IDLE; } @@ -325,10 +314,10 @@ _delayed_call_enqueue( { thread_call_t current; - current = TC(queue_first(&delayed_call_queue)); + current = TC(queue_first(&thread_call_delayed_queue)); while (TRUE) { - if ( queue_end(&delayed_call_queue, qe(current)) || + if ( queue_end(&thread_call_delayed_queue, qe(current)) || call->deadline < current->deadline ) { current = TC(queue_prev(qe(current))); break; @@ -338,8 +327,8 @@ _delayed_call_enqueue( } insque(qe(call), qe(current)); - if (++thread_calls.delayed_num > thread_calls.delayed_hiwat) - thread_calls.delayed_hiwat = thread_calls.delayed_num; + if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat) + thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num; call->state = DELAYED; } @@ -362,7 +351,7 @@ _delayed_call_dequeue( ) { (void)remque(qe(call)); - thread_calls.delayed_num--; + thread_call_vars.delayed_num--; call->state = IDLE; } @@ -383,7 +372,7 @@ _set_delayed_call_timer( thread_call_t call ) { - timer_call_enter(&thread_call_delayed_timer, call->deadline); + timer_call_enter(&thread_call_delaytimer, call->deadline); } /* @@ -411,9 +400,9 @@ _remove_from_pending_queue( boolean_t call_removed = FALSE; thread_call_t call; - call = TC(queue_first(&pending_call_queue)); + call = TC(queue_first(&thread_call_pending_queue)); - while (!queue_end(&pending_call_queue, qe(call))) { + while (!queue_end(&thread_call_pending_queue, qe(call))) { if ( call->func == func && call->param0 == param0 ) { thread_call_t next = TC(queue_next(qe(call))); @@ -460,9 +449,9 @@ _remove_from_delayed_queue( boolean_t call_removed = FALSE; thread_call_t call; - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&thread_call_delayed_queue)); - while (!queue_end(&delayed_call_queue, qe(call))) { + while (!queue_end(&thread_call_delayed_queue, qe(call))) { if ( call->func == func && call->param0 == param0 ) { thread_call_t next = TC(queue_next(qe(call))); @@ -505,17 +494,14 @@ thread_call_func( ) { thread_call_t call; - int s; + spl_t s; - if (!thread_call_initialized) - panic("thread_call_func"); - s = splsched(); simple_lock(&thread_call_lock); - call = TC(queue_first(&pending_call_queue)); + call = TC(queue_first(&thread_call_pending_queue)); - while (unique_call && !queue_end(&pending_call_queue, qe(call))) { + while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) { if ( call->func == func && call->param0 == param ) { break; @@ -524,7 +510,7 @@ thread_call_func( call = TC(queue_next(qe(call))); } - if (!unique_call || queue_end(&pending_call_queue, qe(call))) { + if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) { call = _internal_call_allocate(); call->func = func; call->param0 = param; @@ -532,7 +518,7 @@ thread_call_func( _pending_call_enqueue(call); - if (thread_calls.active_num <= 0) + if (thread_call_vars.active_num <= 0) _call_thread_wake(); } @@ -560,11 +546,8 @@ thread_call_func_delayed( ) { thread_call_t call; - int s; + spl_t s; - if (!thread_call_initialized) - panic("thread_call_func_delayed"); - s = splsched(); simple_lock(&thread_call_lock); @@ -576,7 +559,7 @@ thread_call_func_delayed( _delayed_call_enqueue(call); - if (queue_first(&delayed_call_queue) == qe(call)) + if (queue_first(&thread_call_delayed_queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&thread_call_lock); @@ -609,7 +592,7 @@ thread_call_func_cancel( ) { boolean_t result; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -669,7 +652,7 @@ thread_call_free( thread_call_t call ) { - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -709,7 +692,7 @@ thread_call_enter( ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -722,7 +705,7 @@ thread_call_enter( _pending_call_enqueue(call); - if (thread_calls.active_num <= 0) + if (thread_call_vars.active_num <= 0) _call_thread_wake(); } @@ -741,7 +724,7 @@ thread_call_enter1( ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -754,7 +737,7 @@ thread_call_enter1( _pending_call_enqueue(call); - if (thread_calls.active_num <= 0) + if (thread_call_vars.active_num <= 0) _call_thread_wake(); } @@ -787,7 +770,7 @@ thread_call_enter_delayed( ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -804,7 +787,7 @@ thread_call_enter_delayed( _delayed_call_enqueue(call); - if (queue_first(&delayed_call_queue) == qe(call)) + if (queue_first(&thread_call_delayed_queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&thread_call_lock); @@ -821,7 +804,7 @@ thread_call_enter1_delayed( ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -838,7 +821,7 @@ thread_call_enter1_delayed( _delayed_call_enqueue(call); - if (queue_first(&delayed_call_queue) == qe(call)) + if (queue_first(&thread_call_delayed_queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&thread_call_lock); @@ -867,7 +850,7 @@ thread_call_cancel( ) { boolean_t result = TRUE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -905,7 +888,7 @@ thread_call_is_delayed( uint64_t *deadline) { boolean_t result = FALSE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); @@ -940,16 +923,16 @@ void _call_thread_wake(void) { if (wait_queue_wakeup_one( - &call_thread_idle_queue, &call_thread_idle_queue, + &call_thread_waitqueue, &call_thread_waitqueue, THREAD_AWAKENED) == KERN_SUCCESS) { - thread_calls.idle_thread_num--; + thread_call_vars.idle_thread_num--; - if (++thread_calls.active_num > thread_calls.active_hiwat) - thread_calls.active_hiwat = thread_calls.active_num; + if (++thread_call_vars.active_num > thread_call_vars.active_hiwat) + thread_call_vars.active_hiwat = thread_call_vars.active_num; } else if (!activate_thread_awake) { - clear_wait(activate_thread, THREAD_AWAKENED); + thread_wakeup_one(&activate_thread_awake); activate_thread_awake = TRUE; } } @@ -970,11 +953,11 @@ call_thread_block(void) { simple_lock(&thread_call_lock); - if (--thread_calls.active_num < thread_calls.active_lowat) - thread_calls.active_lowat = thread_calls.active_num; + if (--thread_call_vars.active_num < thread_call_vars.active_lowat) + thread_call_vars.active_lowat = thread_call_vars.active_num; - if ( thread_calls.active_num <= 0 && - thread_calls.pending_num > 0 ) + if ( thread_call_vars.active_num <= 0 && + thread_call_vars.pending_num > 0 ) _call_thread_wake(); simple_unlock(&thread_call_lock); @@ -996,8 +979,8 @@ call_thread_unblock(void) { simple_lock(&thread_call_lock); - if (++thread_calls.active_num > thread_calls.active_hiwat) - thread_calls.active_hiwat = thread_calls.active_num; + if (++thread_call_vars.active_num > thread_call_vars.active_hiwat) + thread_call_vars.active_hiwat = thread_call_vars.active_num; simple_unlock(&thread_call_lock); } @@ -1023,13 +1006,13 @@ _call_thread_continue(void) self->active_callout = TRUE; - while (thread_calls.pending_num > 0) { + while (thread_call_vars.pending_num > 0) { thread_call_t call; thread_call_func_t func; thread_call_param_t param0, param1; - call = TC(dequeue_head(&pending_call_queue)); - thread_calls.pending_num--; + call = TC(dequeue_head(&thread_call_pending_queue)); + thread_call_vars.pending_num--; func = call->func; param0 = call->param0; @@ -1042,6 +1025,10 @@ _call_thread_continue(void) simple_unlock(&thread_call_lock); (void) spllo(); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE, + (int)func, (int)param0, (int)param1, 0, 0); + (*func)(param0, param1); (void)thread_funnel_set(self->funnel_lock, FALSE); @@ -1052,14 +1039,14 @@ _call_thread_continue(void) self->active_callout = FALSE; - if (--thread_calls.active_num < thread_calls.active_lowat) - thread_calls.active_lowat = thread_calls.active_num; + if (--thread_call_vars.active_num < thread_call_vars.active_lowat) + thread_call_vars.active_lowat = thread_call_vars.active_num; - if (thread_calls.idle_thread_num < thread_calls.thread_lowat) { - thread_calls.idle_thread_num++; + if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) { + thread_call_vars.idle_thread_num++; wait_queue_assert_wait( - &call_thread_idle_queue, &call_thread_idle_queue, + &call_thread_waitqueue, &call_thread_waitqueue, THREAD_INTERRUPTIBLE); simple_unlock(&thread_call_lock); @@ -1069,7 +1056,7 @@ _call_thread_continue(void) /* NOTREACHED */ } - thread_calls.thread_num--; + thread_call_vars.thread_num--; simple_unlock(&thread_call_lock); (void) spllo(); @@ -1082,10 +1069,6 @@ static void _call_thread(void) { - thread_t self = current_thread(); - - stack_privilege(self); - _call_thread_continue(); /* NOTREACHED */ } @@ -1107,21 +1090,20 @@ _activate_thread_continue(void) (void) splsched(); simple_lock(&thread_call_lock); - while ( thread_calls.active_num <= 0 && - thread_calls.pending_num > 0 ) { + while ( thread_call_vars.active_num <= 0 && + thread_call_vars.pending_num > 0 ) { - if (++thread_calls.active_num > thread_calls.active_hiwat) - thread_calls.active_hiwat = thread_calls.active_num; + if (++thread_call_vars.active_num > thread_call_vars.active_hiwat) + thread_call_vars.active_hiwat = thread_call_vars.active_num; - if (++thread_calls.thread_num > thread_calls.thread_hiwat) - thread_calls.thread_hiwat = thread_calls.thread_num; + if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat) + thread_call_vars.thread_hiwat = thread_call_vars.thread_num; simple_unlock(&thread_call_lock); (void) spllo(); - (void) kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL - 1, - _call_thread, TRUE, TRUE); + kernel_thread_with_priority(_call_thread, MAXPRI_KERNEL - 1); + (void) splsched(); simple_lock(&thread_call_lock); } @@ -1140,11 +1122,10 @@ static void _activate_thread(void) { - thread_t self = current_thread(); + thread_t self = current_thread(); self->vm_privilege = TRUE; vm_page_free_reserve(2); /* XXX */ - stack_privilege(self); _activate_thread_continue(); /* NOTREACHED */ @@ -1160,16 +1141,16 @@ _delayed_call_timer( uint64_t timestamp; thread_call_t call; boolean_t new_pending = FALSE; - int s; + spl_t s; s = splsched(); simple_lock(&thread_call_lock); clock_get_uptime(×tamp); - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&thread_call_delayed_queue)); - while (!queue_end(&delayed_call_queue, qe(call))) { + while (!queue_end(&thread_call_delayed_queue, qe(call))) { if (call->deadline <= timestamp) { _delayed_call_dequeue(call); @@ -1179,13 +1160,13 @@ _delayed_call_timer( else break; - call = TC(queue_first(&delayed_call_queue)); + call = TC(queue_first(&thread_call_delayed_queue)); } - if (!queue_end(&delayed_call_queue, qe(call))) + if (!queue_end(&thread_call_delayed_queue, qe(call))) _set_delayed_call_timer(call); - if (new_pending && thread_calls.active_num <= 0) + if (new_pending && thread_call_vars.active_num <= 0) _call_thread_wake(); simple_unlock(&thread_call_lock); diff --git a/osfmk/kern/thread_policy.c b/osfmk/kern/thread_policy.c index cf5cb1f2e..874c0fea4 100644 --- a/osfmk/kern/thread_policy.c +++ b/osfmk/kern/thread_policy.c @@ -31,6 +31,7 @@ * Created. */ +#include #include static void @@ -77,13 +78,24 @@ thread_policy_set( thread_lock(thread); if (!(thread->sched_mode & TH_MODE_FAILSAFE)) { + integer_t oldmode = (thread->sched_mode & TH_MODE_TIMESHARE); + thread->sched_mode &= ~TH_MODE_REALTIME; - if (timeshare) + if (timeshare && !oldmode) { thread->sched_mode |= TH_MODE_TIMESHARE; + + if (thread->state & TH_RUN) + pset_share_incr(thread->processor_set); + } else + if (!timeshare && oldmode) { thread->sched_mode &= ~TH_MODE_TIMESHARE; + if (thread->state & TH_RUN) + pset_share_decr(thread->processor_set); + } + thread_recompute_priority(thread); } else { @@ -111,7 +123,8 @@ thread_policy_set( } info = (thread_time_constraint_policy_t)policy_info; - if ( info->computation > max_rt_quantum || + if ( info->constraint < info->computation || + info->computation > max_rt_quantum || info->computation < min_rt_quantum ) { result = KERN_INVALID_ARGUMENT; break; @@ -126,7 +139,12 @@ thread_policy_set( thread->realtime.preemptible = info->preemptible; if (!(thread->sched_mode & TH_MODE_FAILSAFE)) { - thread->sched_mode &= ~TH_MODE_TIMESHARE; + if (thread->sched_mode & TH_MODE_TIMESHARE) { + thread->sched_mode &= ~TH_MODE_TIMESHARE; + + if (thread->state & TH_RUN) + pset_share_decr(thread->processor_set); + } thread->sched_mode |= TH_MODE_REALTIME; thread_recompute_priority(thread); } @@ -182,7 +200,7 @@ thread_recompute_priority( integer_t priority; if (thread->sched_mode & TH_MODE_REALTIME) - priority = BASEPRI_REALTIME; + priority = BASEPRI_RTQUEUES; else { if (thread->importance > MAXPRI) priority = MAXPRI; diff --git a/osfmk/kern/thread_swap.c b/osfmk/kern/thread_swap.c index 60655208a..8d1983316 100644 --- a/osfmk/kern/thread_swap.c +++ b/osfmk/kern/thread_swap.c @@ -83,9 +83,7 @@ swapin_init(void) { queue_init(&swapin_queue); simple_lock_init(&swapin_lock, ETAP_THREAD_SWAPPER); - kernel_thread_with_priority( - kernel_task, BASEPRI_PREEMPT - 2, - swapin_thread, TRUE, TRUE); + kernel_thread_with_priority(swapin_thread, MINPRI_KERNEL); } /* @@ -154,7 +152,7 @@ thread_doswapin( thread_lock(thread); thread->state &= ~(TH_STACK_HANDOFF | TH_STACK_ALLOC); if (thread->state & TH_RUN) - thread_setrun(thread, HEAD_Q); + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); thread_unlock(thread); (void) splx(s); } @@ -195,10 +193,6 @@ swapin_thread_continue(void) void swapin_thread(void) { - thread_t self = current_thread(); - - stack_privilege(self); - swapin_thread_continue(); /*NOTREACHED*/ } diff --git a/osfmk/kern/timer_call.c b/osfmk/kern/timer_call.c index fe421d3e0..ce63a9e58 100644 --- a/osfmk/kern/timer_call.c +++ b/osfmk/kern/timer_call.c @@ -40,19 +40,25 @@ #include #include +#ifdef i386 +/* + * Until we arrange for per-cpu timers, use the master cpus queues only. + * Fortunately, the timer_call_lock synchronizes access to all queues. + */ +#undef cpu_number() +#define cpu_number() 0 +#endif /* i386 */ + decl_simple_lock_data(static,timer_call_lock) static queue_head_t - delayed_call_queues[NCPUS]; + timer_call_queues[NCPUS]; static struct { int delayed_num, delayed_hiwat; -} timer_calls; - -static boolean_t - timer_call_initialized = FALSE; +} timer_call_vars; static void timer_call_interrupt( @@ -67,21 +73,16 @@ timer_call_initialize(void) spl_t s; int i; - if (timer_call_initialized) - panic("timer_call_initialize"); - simple_lock_init(&timer_call_lock, ETAP_MISC_TIMER); s = splclock(); simple_lock(&timer_call_lock); for (i = 0; i < NCPUS; i++) - queue_init(&delayed_call_queues[i]); + queue_init(&timer_call_queues[i]); clock_set_timer_func((clock_timer_func_t)timer_call_interrupt); - timer_call_initialized = TRUE; - simple_unlock(&timer_call_lock); splx(s); } @@ -116,8 +117,8 @@ _delayed_call_enqueue( } insque(qe(call), qe(current)); - if (++timer_calls.delayed_num > timer_calls.delayed_hiwat) - timer_calls.delayed_hiwat = timer_calls.delayed_num; + if (++timer_call_vars.delayed_num > timer_call_vars.delayed_hiwat) + timer_call_vars.delayed_hiwat = timer_call_vars.delayed_num; call->state = DELAYED; } @@ -128,7 +129,7 @@ _delayed_call_dequeue( timer_call_t call) { (void)remque(qe(call)); - timer_calls.delayed_num--; + timer_call_vars.delayed_num--; call->state = IDLE; } @@ -147,7 +148,7 @@ timer_call_enter( uint64_t deadline) { boolean_t result = TRUE; - queue_t delayed; + queue_t queue; spl_t s; s = splclock(); @@ -161,11 +162,11 @@ timer_call_enter( call->param1 = 0; call->deadline = deadline; - delayed = &delayed_call_queues[cpu_number()]; + queue = &timer_call_queues[cpu_number()]; - _delayed_call_enqueue(delayed, call); + _delayed_call_enqueue(queue, call); - if (queue_first(delayed) == qe(call)) + if (queue_first(queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&timer_call_lock); @@ -181,7 +182,7 @@ timer_call_enter1( uint64_t deadline) { boolean_t result = TRUE; - queue_t delayed; + queue_t queue; spl_t s; s = splclock(); @@ -195,11 +196,11 @@ timer_call_enter1( call->param1 = param1; call->deadline = deadline; - delayed = &delayed_call_queues[cpu_number()]; + queue = &timer_call_queues[cpu_number()]; - _delayed_call_enqueue(delayed, call); + _delayed_call_enqueue(queue, call); - if (queue_first(delayed) == qe(call)) + if (queue_first(queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&timer_call_lock); @@ -261,28 +262,28 @@ timer_call_shutdown( processor_t processor) { timer_call_t call; - queue_t delayed, delayed1; + queue_t queue, myqueue; assert(processor != current_processor()); - delayed = &delayed_call_queues[processor->slot_num]; - delayed1 = &delayed_call_queues[cpu_number()]; + queue = &timer_call_queues[processor->slot_num]; + myqueue = &timer_call_queues[cpu_number()]; simple_lock(&timer_call_lock); - call = TC(queue_first(delayed)); + call = TC(queue_first(queue)); - while (!queue_end(delayed, qe(call))) { + while (!queue_end(queue, qe(call))) { _delayed_call_dequeue(call); - _delayed_call_enqueue(delayed1, call); + _delayed_call_enqueue(myqueue, call); - call = TC(queue_first(delayed)); + call = TC(queue_first(queue)); } - call = TC(queue_first(delayed1)); + call = TC(queue_first(myqueue)); - if (!queue_end(delayed1, qe(call))) + if (!queue_end(myqueue, qe(call))) _set_delayed_call_timer(call); simple_unlock(&timer_call_lock); @@ -294,13 +295,13 @@ timer_call_interrupt( uint64_t timestamp) { timer_call_t call; - queue_t delayed = &delayed_call_queues[cpu_number()]; + queue_t queue = &timer_call_queues[cpu_number()]; simple_lock(&timer_call_lock); - call = TC(queue_first(delayed)); + call = TC(queue_first(queue)); - while (!queue_end(delayed, qe(call))) { + while (!queue_end(queue, qe(call))) { if (call->deadline <= timestamp) { timer_call_func_t func; timer_call_param_t param0, param1; @@ -320,10 +321,10 @@ timer_call_interrupt( else break; - call = TC(queue_first(delayed)); + call = TC(queue_first(queue)); } - if (!queue_end(delayed, qe(call))) + if (!queue_end(queue, qe(call))) _set_delayed_call_timer(call); simple_unlock(&timer_call_lock); diff --git a/osfmk/kern/wait_queue.c b/osfmk/kern/wait_queue.c index d27ca92a1..48203732a 100644 --- a/osfmk/kern/wait_queue.c +++ b/osfmk/kern/wait_queue.c @@ -411,14 +411,12 @@ wait_queue_link_noalloc( */ s = splsched(); wait_queue_lock(wq); - wqs_lock(wq_set); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK && ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) { - wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); return KERN_ALREADY_IN_SET; @@ -430,6 +428,7 @@ wait_queue_link_noalloc( /* * Not already a member, so we can add it. */ + wqs_lock(wq_set); WAIT_QUEUE_SET_CHECK(wq_set); @@ -836,6 +835,7 @@ wait_queue_unlink_one( * * Conditions: * The wait queue is assumed locked. + * The waiting thread is assumed locked. * */ __private_extern__ wait_result_t @@ -843,18 +843,18 @@ wait_queue_assert_wait64_locked( wait_queue_t wq, event64_t event, wait_interrupt_t interruptible, - boolean_t unlock) + thread_t thread) { - thread_t thread; wait_result_t wait_result; + if (!wait_queue_assert_possible(thread)) + panic("wait_queue_assert_wait64_locked"); + if (wq->wq_type == _WAIT_QUEUE_SET_inited) { wait_queue_set_t wqs = (wait_queue_set_t)wq; - if (wqs->wqs_isprepost && wqs->wqs_refcount > 0) { - if (unlock) - wait_queue_unlock(wq); + + if (wqs->wqs_isprepost && wqs->wqs_refcount > 0) return(THREAD_AWAKENED); - } } /* @@ -863,8 +863,6 @@ wait_queue_assert_wait64_locked( * the front of the queue. Later, these queues will honor the policy * value set at wait_queue_init time. */ - thread = current_thread(); - thread_lock(thread); wait_result = thread_mark_wait_locked(thread, interruptible); if (wait_result == THREAD_WAITING) { if (thread->vm_privilege) @@ -874,9 +872,6 @@ wait_queue_assert_wait64_locked( thread->wait_event = event; thread->wait_queue = wq; } - thread_unlock(thread); - if (unlock) - wait_queue_unlock(wq); return(wait_result); } @@ -897,6 +892,7 @@ wait_queue_assert_wait( { spl_t s; wait_result_t ret; + thread_t cur_thread = current_thread(); /* If it is an invalid wait queue, you can't wait on it */ if (!wait_queue_is_valid(wq)) { @@ -906,10 +902,12 @@ wait_queue_assert_wait( s = splsched(); wait_queue_lock(wq); + thread_lock(cur_thread); ret = wait_queue_assert_wait64_locked( wq, (event64_t)((uint32_t)event), - interruptible, TRUE); - /* wait queue unlocked */ + interruptible, cur_thread); + thread_unlock(cur_thread); + wait_queue_unlock(wq); splx(s); return(ret); } @@ -930,6 +928,7 @@ wait_queue_assert_wait64( { spl_t s; wait_result_t ret; + thread_t cur_thread = current_thread(); /* If it is an invalid wait queue, you cant wait on it */ if (!wait_queue_is_valid(wq)) { @@ -939,8 +938,10 @@ wait_queue_assert_wait64( s = splsched(); wait_queue_lock(wq); - ret = wait_queue_assert_wait64_locked(wq, event, interruptible, TRUE); - /* wait queue unlocked */ + thread_lock(cur_thread); + ret = wait_queue_assert_wait64_locked(wq, event, interruptible, cur_thread); + thread_unlock(cur_thread); + wait_queue_unlock(wq); splx(s); return(ret); } diff --git a/osfmk/kern/wait_queue.h b/osfmk/kern/wait_queue.h index 40955cb27..a19c94e24 100644 --- a/osfmk/kern/wait_queue.h +++ b/osfmk/kern/wait_queue.h @@ -172,7 +172,7 @@ __private_extern__ wait_result_t wait_queue_assert_wait64_locked( wait_queue_t wait_queue, event64_t wait_event, wait_interrupt_t interruptible, - boolean_t unlock); + thread_t thread); /* peek to see which thread would be chosen for a wakeup - but keep on queue */ __private_extern__ void wait_queue_peek64_locked( diff --git a/osfmk/kern/zalloc.c b/osfmk/kern/zalloc.c index 81d4df620..d3e27f32e 100644 --- a/osfmk/kern/zalloc.c +++ b/osfmk/kern/zalloc.c @@ -148,6 +148,8 @@ MACRO_END #if ZONE_DEBUG #define zone_debug_enabled(z) z->active_zones.next +#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y)) +#define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16) #endif /* ZONE_DEBUG */ /* @@ -155,19 +157,11 @@ MACRO_END */ struct zone_page_table_entry { - struct zone_page_table_entry *next; - short in_free_list; + struct zone_page_table_entry *link; short alloc_count; + short collect_count; }; -extern struct zone_page_table_entry * zone_page_table; - -#define lock_zone_page_table() simple_lock(&zone_page_table_lock) -#define unlock_zone_page_table() simple_unlock(&zone_page_table_lock) - -#define zone_page(addr) \ - (&(zone_page_table[(atop(((vm_offset_t)addr) - zone_map_min_address))])) - /* Forwards */ void zone_page_init( vm_offset_t addr, @@ -178,19 +172,12 @@ void zone_page_alloc( vm_offset_t addr, vm_size_t size); -void zone_add_free_page_list( - struct zone_page_table_entry **free_list, - vm_offset_t addr, - vm_size_t size); -void zone_page_dealloc( +void zone_page_free_element( + struct zone_page_table_entry **free_pages, vm_offset_t addr, vm_size_t size); -void zone_page_in_use( - vm_offset_t addr, - vm_size_t size); - -void zone_page_free( +void zone_page_collect( vm_offset_t addr, vm_size_t size); @@ -260,7 +247,6 @@ vm_size_t zalloc_wasted_space; /* * Garbage collection map information */ -decl_simple_lock_data(, zone_page_table_lock) struct zone_page_table_entry * zone_page_table; vm_offset_t zone_map_min_address; vm_offset_t zone_map_max_address; @@ -271,9 +257,9 @@ integer_t zone_pages; */ decl_mutex_data(, zone_gc_lock) -#define from_zone_map(addr) \ +#define from_zone_map(addr, size) \ ((vm_offset_t)(addr) >= zone_map_min_address && \ - (vm_offset_t)(addr) < zone_map_max_address) + ((vm_offset_t)(addr) + size -1) < zone_map_max_address) #define ZONE_PAGE_USED 0 #define ZONE_PAGE_UNUSED -1 @@ -326,8 +312,8 @@ zinit( ((size-1) % sizeof(z->free_elements)); if (alloc == 0) alloc = PAGE_SIZE; - alloc = round_page(alloc); - max = round_page(max); + alloc = round_page_32(alloc); + max = round_page_32(max); /* * We look for an allocation size with least fragmentation * in the range of 1 - 5 pages. This size will be used unless @@ -398,14 +384,14 @@ zcram( /* Basic sanity checks */ assert(zone != ZONE_NULL && newmem != (vm_offset_t)0); assert(!zone->collectable || zone->allows_foreign - || (from_zone_map(newmem) && from_zone_map(newmem+size-1))); + || (from_zone_map(newmem, size))); elem_size = zone->elem_size; lock_zone(zone); while (size >= elem_size) { ADD_TO_ZONE(zone, newmem); - if (from_zone_map(newmem)) + if (from_zone_map(newmem, elem_size)) zone_page_alloc(newmem, elem_size); zone->count++; /* compensate for ADD_TO_ZONE */ size -= elem_size; @@ -434,7 +420,7 @@ zget_space( * Add at least one page to allocation area. */ - space_to_add = round_page(size); + space_to_add = round_page_32(size); if (new_space == 0) { kern_return_t retval; @@ -503,7 +489,7 @@ zget_space( void zone_steal_memory(void) { - zdata_size = round_page(128*sizeof(struct zone)); + zdata_size = round_page_32(128*sizeof(struct zone)); zdata = pmap_steal_memory(zdata_size); } @@ -529,7 +515,7 @@ zfill( if (nelem <= 0) return 0; size = nelem * zone->elem_size; - size = round_page(size); + size = round_page_32(size); kr = kmem_alloc_wired(kernel_map, &memory, size); if (kr != KERN_SUCCESS) return 0; @@ -587,20 +573,19 @@ zone_init( FALSE, TRUE, &zone_map); if (retval != KERN_SUCCESS) panic("zone_init: kmem_suballoc failed"); - zone_max = zone_min + round_page(max_zonemap_size); + zone_max = zone_min + round_page_32(max_zonemap_size); /* * Setup garbage collection information: */ - zone_table_size = atop(zone_max - zone_min) * + zone_table_size = atop_32(zone_max - zone_min) * sizeof(struct zone_page_table_entry); if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table, zone_table_size) != KERN_SUCCESS) panic("zone_init"); - zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size); - zone_pages = atop(zone_max - zone_min); + zone_min = (vm_offset_t)zone_page_table + round_page_32(zone_table_size); + zone_pages = atop_32(zone_max - zone_min); zone_map_min_address = zone_min; zone_map_max_address = zone_max; - simple_lock_init(&zone_page_table_lock, ETAP_MISC_ZONE_PTABLE); mutex_init(&zone_gc_lock, ETAP_NO_TRACE); zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED); } @@ -665,24 +650,31 @@ zalloc_canblock( if (zone->collectable) { vm_offset_t space; vm_size_t alloc_size; - - if (vm_pool_low()) - alloc_size = - round_page(zone->elem_size); - else - alloc_size = zone->alloc_size; - - retval = kernel_memory_allocate(zone_map, - &space, alloc_size, 0, - KMA_KOBJECT|KMA_NOPAGEWAIT); - if (retval == KERN_SUCCESS) { - zone_page_init(space, alloc_size, - ZONE_PAGE_USED); - zcram(zone, space, alloc_size); - } else if (retval != KERN_RESOURCE_SHORTAGE) { - /* would like to cause a zone_gc() */ - - panic("zalloc"); + boolean_t retry = FALSE; + + for (;;) { + + if (vm_pool_low() || retry == TRUE) + alloc_size = + round_page_32(zone->elem_size); + else + alloc_size = zone->alloc_size; + + retval = kernel_memory_allocate(zone_map, + &space, alloc_size, 0, + KMA_KOBJECT|KMA_NOPAGEWAIT); + if (retval == KERN_SUCCESS) { + zone_page_init(space, alloc_size, + ZONE_PAGE_USED); + zcram(zone, space, alloc_size); + + break; + } else if (retval != KERN_RESOURCE_SHORTAGE) { + /* would like to cause a zone_gc() */ + if (retry == TRUE) + panic("zalloc"); + retry = TRUE; + } } lock_zone(zone); zone->doing_alloc = FALSE; @@ -720,7 +712,7 @@ zalloc_canblock( zone_page_alloc(space, zone->elem_size); #if ZONE_DEBUG if (zone_debug_enabled(zone)) - space += sizeof(queue_chain_t); + space += ZONE_DEBUG_OFFSET; #endif return(space); } @@ -749,7 +741,7 @@ zalloc_canblock( #if ZONE_DEBUG if (addr && zone_debug_enabled(zone)) { enqueue_tail(&zone->active_zones, (queue_entry_t)addr); - addr += sizeof(queue_chain_t); + addr += ZONE_DEBUG_OFFSET; } #endif @@ -810,7 +802,7 @@ zget( #if ZONE_DEBUG if (addr && zone_debug_enabled(zone)) { enqueue_tail(&zone->active_zones, (queue_entry_t)addr); - addr += sizeof(queue_chain_t); + addr += ZONE_DEBUG_OFFSET; } #endif /* ZONE_DEBUG */ unlock_zone(zone); @@ -820,7 +812,10 @@ zget( /* Keep this FALSE by default. Large memory machine run orders of magnitude slower in debug mode when true. Use debugger to enable if needed */ -boolean_t zone_check = FALSE; +/* static */ boolean_t zone_check = FALSE; + +static zone_t zone_last_bogus_zone = ZONE_NULL; +static vm_offset_t zone_last_bogus_elem = 0; void zfree( @@ -835,17 +830,25 @@ zfree( /* zone_gc assumes zones are never freed */ if (zone == zone_zone) panic("zfree: freeing to zone_zone breaks zone_gc!"); +#endif + if (zone->collectable && !zone->allows_foreign && - (!from_zone_map(elem) || !from_zone_map(elem+zone->elem_size-1))) + !from_zone_map(elem, zone->elem_size)) { +#if MACH_ASSERT panic("zfree: non-allocated memory in collectable zone!"); +#else + zone_last_bogus_zone = zone; + zone_last_bogus_elem = elem; + return; #endif + } lock_zone(zone); #if ZONE_DEBUG if (zone_debug_enabled(zone)) { queue_t tmp_elem; - elem -= sizeof(queue_chain_t); + elem -= ZONE_DEBUG_OFFSET; if (zone_check) { /* check the zone's consistency */ @@ -962,62 +965,28 @@ zprealloc( /* * Zone garbage collection subroutines - * - * These routines have in common the modification of entries in the - * zone_page_table. The latter contains one entry for every page - * in the zone_map. - * - * For each page table entry in the given range: - * - * zone_page_collectable - test if one (in_free_list == alloc_count) - * zone_page_keep - reset in_free_list - * zone_page_in_use - decrements in_free_list - * zone_page_free - increments in_free_list - * zone_page_init - initializes in_free_list and alloc_count - * zone_page_alloc - increments alloc_count - * zone_page_dealloc - decrements alloc_count - * zone_add_free_page_list - adds the page to the free list - * - * Two counts are maintained for each page, the in_free_list count and - * alloc_count. The alloc_count is how many zone elements have been - * allocated from a page. (Note that the page could contain elements - * that span page boundaries. The count includes these elements so - * one element may be counted in two pages.) In_free_list is a count - * of how many zone elements are currently free. If in_free_list is - * equal to alloc_count then the page is eligible for garbage - * collection. - * - * Alloc_count and in_free_list are initialized to the correct values - * for a particular zone when a page is zcram'ed into a zone. Subsequent - * gets and frees of zone elements will call zone_page_in_use and - * zone_page_free which modify the in_free_list count. When the zones - * garbage collector runs it will walk through a zones free element list, - * remove the elements that reside on collectable pages, and use - * zone_add_free_page_list to create a list of pages to be collected. */ + boolean_t zone_page_collectable( vm_offset_t addr, vm_size_t size) { + struct zone_page_table_entry *zp; natural_t i, j; #if MACH_ASSERT - if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + if (!from_zone_map(addr, size)) panic("zone_page_collectable"); #endif - i = atop(addr-zone_map_min_address); - j = atop((addr+size-1) - zone_map_min_address); - lock_zone_page_table(); - for (; i <= j; i++) { - if (zone_page_table[i].in_free_list == - zone_page_table[i].alloc_count) { - unlock_zone_page_table(); + i = atop_32(addr-zone_map_min_address); + j = atop_32((addr+size-1) - zone_map_min_address); + + for (zp = zone_page_table + i; i <= j; zp++, i++) + if (zp->collect_count == zp->alloc_count) return (TRUE); - } - } - unlock_zone_page_table(); + return (FALSE); } @@ -1026,64 +995,39 @@ zone_page_keep( vm_offset_t addr, vm_size_t size) { + struct zone_page_table_entry *zp; natural_t i, j; #if MACH_ASSERT - if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + if (!from_zone_map(addr, size)) panic("zone_page_keep"); #endif - i = atop(addr-zone_map_min_address); - j = atop((addr+size-1) - zone_map_min_address); - lock_zone_page_table(); - for (; i <= j; i++) { - zone_page_table[i].in_free_list = 0; - } - unlock_zone_page_table(); -} - -void -zone_page_in_use( - vm_offset_t addr, - vm_size_t size) -{ - natural_t i, j; - -#if MACH_ASSERT - if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) - panic("zone_page_in_use"); -#endif + i = atop_32(addr-zone_map_min_address); + j = atop_32((addr+size-1) - zone_map_min_address); - i = atop(addr-zone_map_min_address); - j = atop((addr+size-1) - zone_map_min_address); - lock_zone_page_table(); - for (; i <= j; i++) { - if (zone_page_table[i].in_free_list > 0) - zone_page_table[i].in_free_list--; - } - unlock_zone_page_table(); + for (zp = zone_page_table + i; i <= j; zp++, i++) + zp->collect_count = 0; } void -zone_page_free( +zone_page_collect( vm_offset_t addr, vm_size_t size) { + struct zone_page_table_entry *zp; natural_t i, j; #if MACH_ASSERT - if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) - panic("zone_page_free"); + if (!from_zone_map(addr, size)) + panic("zone_page_collect"); #endif - i = atop(addr-zone_map_min_address); - j = atop((addr+size-1) - zone_map_min_address); - lock_zone_page_table(); - for (; i <= j; i++) { - assert(zone_page_table[i].in_free_list >= 0); - zone_page_table[i].in_free_list++; - } - unlock_zone_page_table(); + i = atop_32(addr-zone_map_min_address); + j = atop_32((addr+size-1) - zone_map_min_address); + + for (zp = zone_page_table + i; i <= j; zp++, i++) + ++zp->collect_count; } void @@ -1092,21 +1036,21 @@ zone_page_init( vm_size_t size, int value) { + struct zone_page_table_entry *zp; natural_t i, j; #if MACH_ASSERT - if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + if (!from_zone_map(addr, size)) panic("zone_page_init"); #endif - i = atop(addr-zone_map_min_address); - j = atop((addr+size-1) - zone_map_min_address); - lock_zone_page_table(); - for (; i <= j; i++) { - zone_page_table[i].alloc_count = value; - zone_page_table[i].in_free_list = 0; + i = atop_32(addr-zone_map_min_address); + j = atop_32((addr+size-1) - zone_map_min_address); + + for (zp = zone_page_table + i; i <= j; zp++, i++) { + zp->alloc_count = value; + zp->collect_count = 0; } - unlock_zone_page_table(); } void @@ -1114,85 +1058,73 @@ zone_page_alloc( vm_offset_t addr, vm_size_t size) { + struct zone_page_table_entry *zp; natural_t i, j; #if MACH_ASSERT - if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + if (!from_zone_map(addr, size)) panic("zone_page_alloc"); #endif - i = atop(addr-zone_map_min_address); - j = atop((addr+size-1) - zone_map_min_address); - lock_zone_page_table(); - for (; i <= j; i++) { - /* Set alloc_count to (ZONE_PAGE_USED + 1) if + i = atop_32(addr-zone_map_min_address); + j = atop_32((addr+size-1) - zone_map_min_address); + + for (zp = zone_page_table + i; i <= j; zp++, i++) { + /* + * Set alloc_count to (ZONE_PAGE_USED + 1) if * it was previously set to ZONE_PAGE_UNUSED. */ - if (zone_page_table[i].alloc_count == ZONE_PAGE_UNUSED) { - zone_page_table[i].alloc_count = 1; - } else { - zone_page_table[i].alloc_count++; - } + if (zp->alloc_count == ZONE_PAGE_UNUSED) + zp->alloc_count = 1; + else + ++zp->alloc_count; } - unlock_zone_page_table(); } void -zone_page_dealloc( +zone_page_free_element( + struct zone_page_table_entry **free_pages, vm_offset_t addr, vm_size_t size) { + struct zone_page_table_entry *zp; natural_t i, j; #if MACH_ASSERT - if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) - panic("zone_page_dealloc"); + if (!from_zone_map(addr, size)) + panic("zone_page_free_element"); #endif - i = atop(addr-zone_map_min_address); - j = atop((addr+size-1) - zone_map_min_address); - lock_zone_page_table(); - for (; i <= j; i++) { - zone_page_table[i].alloc_count--; - } - unlock_zone_page_table(); -} - -void -zone_add_free_page_list( - struct zone_page_table_entry **free_list, - vm_offset_t addr, - vm_size_t size) -{ - natural_t i, j; + i = atop_32(addr-zone_map_min_address); + j = atop_32((addr+size-1) - zone_map_min_address); -#if MACH_ASSERT - if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) - panic("zone_add_free_page_list"); -#endif + for (zp = zone_page_table + i; i <= j; zp++, i++) { + if (zp->collect_count > 0) + --zp->collect_count; + if (--zp->alloc_count == 0) { + zp->alloc_count = ZONE_PAGE_UNUSED; + zp->collect_count = 0; - i = atop(addr-zone_map_min_address); - j = atop((addr+size-1) - zone_map_min_address); - lock_zone_page_table(); - for (; i <= j; i++) { - if (zone_page_table[i].alloc_count == 0) { - zone_page_table[i].next = *free_list; - *free_list = &zone_page_table[i]; - zone_page_table[i].alloc_count = ZONE_PAGE_UNUSED; - zone_page_table[i].in_free_list = 0; + zp->link = *free_pages; + *free_pages = zp; } } - unlock_zone_page_table(); } /* This is used for walking through a zone's free element list. */ -struct zone_free_entry { - struct zone_free_entry * next; +struct zone_free_element { + struct zone_free_element * next; }; -int reclaim_page_count = 0; +struct { + uint32_t pgs_freed; + + uint32_t elems_collected, + elems_freed, + elems_kept; +} zgc_stats; /* Zone garbage collection * @@ -1205,35 +1137,28 @@ void zone_gc(void) { unsigned int max_zones; - zone_t z; + zone_t z; unsigned int i; - struct zone_page_table_entry *freep; - struct zone_page_table_entry *zone_free_page_list; + struct zone_page_table_entry *zp, *zone_free_pages; mutex_lock(&zone_gc_lock); - /* - * Note that this scheme of locking only to walk the zone list - * assumes that zones are never freed (checked by zfree) - */ simple_lock(&all_zones_lock); max_zones = num_zones; z = first_zone; simple_unlock(&all_zones_lock); #if MACH_ASSERT - lock_zone_page_table(); for (i = 0; i < zone_pages; i++) - assert(zone_page_table[i].in_free_list == 0); - unlock_zone_page_table(); + assert(zone_page_table[i].collect_count == 0); #endif /* MACH_ASSERT */ - zone_free_page_list = (struct zone_page_table_entry *) 0; + zone_free_pages = NULL; for (i = 0; i < max_zones; i++, z = z->next_zone) { - struct zone_free_entry * prev; - struct zone_free_entry * elt; - struct zone_free_entry * end; + unsigned int n; + vm_size_t elt_size, size_freed; + struct zone_free_element *elt, *prev, *scan, *keep, *tail; assert(z != ZONE_NULL); @@ -1242,82 +1167,170 @@ zone_gc(void) lock_zone(z); + elt_size = z->elem_size; + /* * Do a quick feasability check before we scan the zone: * skip unless there is likelihood of getting 1+ pages back. */ - if ((z->cur_size - z->count * z->elem_size) <= (2*PAGE_SIZE)){ + if (z->cur_size - z->count * elt_size <= 2 * PAGE_SIZE){ unlock_zone(z); continue; } - /* Count the free elements in each page. This loop - * requires that all in_free_list entries are zero. - * - * Exit the loop early if we need to hurry up and drop - * the lock to allow preemption - but we must fully process - * all elements we looked at so far. + /* + * Snatch all of the free elements away from the zone. */ - elt = (struct zone_free_entry *)(z->free_elements); - while (!ast_urgency() && (elt != (struct zone_free_entry *)0)) { - if (from_zone_map(elt)) - zone_page_free((vm_offset_t)elt, z->elem_size); - elt = elt->next; - } - end = elt; - /* Now determine which elements should be removed - * from the free list and, after all the elements - * on a page have been removed, add the element's - * page to a list of pages to be freed. + scan = (void *)z->free_elements; + (void *)z->free_elements = NULL; + + unlock_zone(z); + + /* + * Pass 1: + * + * Determine which elements we can attempt to collect + * and count them up in the page table. Foreign elements + * are returned to the zone. */ - prev = elt = (struct zone_free_entry *)(z->free_elements); - while (elt != end) { - if (!from_zone_map(elt)) { + + prev = (void *)&scan; + elt = scan; + n = 0; tail = keep = NULL; + while (elt != NULL) { + if (from_zone_map(elt, elt_size)) { + zone_page_collect((vm_offset_t)elt, elt_size); + prev = elt; elt = elt->next; - continue; + + ++zgc_stats.elems_collected; } - if (zone_page_collectable((vm_offset_t)elt, - z->elem_size)) { - z->cur_size -= z->elem_size; - zone_page_in_use((vm_offset_t)elt, - z->elem_size); - zone_page_dealloc((vm_offset_t)elt, - z->elem_size); - zone_add_free_page_list(&zone_free_page_list, - (vm_offset_t)elt, - z->elem_size); - if (elt == prev) { - elt = elt->next; - z->free_elements =(vm_offset_t)elt; - prev = elt; - } else { - prev->next = elt->next; - elt = elt->next; - } - } else { - /* This element is not eligible for collection - * so clear in_free_list in preparation for a - * subsequent garbage collection pass. - */ - zone_page_keep((vm_offset_t)elt, z->elem_size); - prev = elt; - elt = elt->next; + else { + if (keep == NULL) + keep = tail = elt; + else + tail = tail->next = elt; + + elt = prev->next = elt->next; + tail->next = NULL; } - } /* end while(elt != end) */ - unlock_zone(z); + /* + * Dribble back the elements we are keeping. + */ + + if (++n >= 50 && keep != NULL) { + lock_zone(z); + + tail->next = (void *)z->free_elements; + (void *)z->free_elements = keep; + + unlock_zone(z); + + n = 0; tail = keep = NULL; + } + } + + /* + * Return any remaining elements. + */ + + if (keep != NULL) { + lock_zone(z); + + tail->next = (void *)z->free_elements; + (void *)z->free_elements = keep; + + unlock_zone(z); + } + + /* + * Pass 2: + * + * Determine which pages we can reclaim and + * free those elements. + */ + + size_freed = 0; + prev = (void *)&scan; + elt = scan; + n = 0; tail = keep = NULL; + while (elt != NULL) { + if (zone_page_collectable((vm_offset_t)elt, elt_size)) { + size_freed += elt_size; + zone_page_free_element(&zone_free_pages, + (vm_offset_t)elt, elt_size); + + elt = prev->next = elt->next; + + ++zgc_stats.elems_freed; + } + else { + zone_page_keep((vm_offset_t)elt, elt_size); + + if (keep == NULL) + keep = tail = elt; + else + tail = tail->next = elt; + + elt = prev->next = elt->next; + tail->next = NULL; + + ++zgc_stats.elems_kept; + } + + /* + * Dribble back the elements we are keeping, + * and update the zone size info. + */ + + if (++n >= 50 && keep != NULL) { + lock_zone(z); + + z->cur_size -= size_freed; + size_freed = 0; + + tail->next = (void *)z->free_elements; + (void *)z->free_elements = keep; + + unlock_zone(z); + + n = 0; tail = keep = NULL; + } + } + + /* + * Return any remaining elements, and update + * the zone size info. + */ + + if (size_freed > 0 || keep != NULL) { + lock_zone(z); + + z->cur_size -= size_freed; + + if (keep != NULL) { + tail->next = (void *)z->free_elements; + (void *)z->free_elements = keep; + } + + unlock_zone(z); + } } - for (freep = zone_free_page_list; freep != 0; freep = freep->next) { - vm_offset_t free_addr; + /* + * Reclaim the pages we are freeing. + */ - free_addr = zone_map_min_address + - PAGE_SIZE * (freep - zone_page_table); - kmem_free(zone_map, free_addr, PAGE_SIZE); - reclaim_page_count++; + while ((zp = zone_free_pages) != NULL) { + zone_free_pages = zp->link; + kmem_free(zone_map, zone_map_min_address + PAGE_SIZE * + (zp - zone_page_table), PAGE_SIZE); + ++zgc_stats.pgs_freed; } + mutex_unlock(&zone_gc_lock); } @@ -1332,11 +1345,11 @@ consider_zone_gc(void) { /* * By default, don't attempt zone GC more frequently - * than once a second. + * than once / 2 seconds. */ if (zone_gc_max_rate == 0) - zone_gc_max_rate = (1 << SCHED_TICK_SHIFT) + 1; + zone_gc_max_rate = (2 << SCHED_TICK_SHIFT) + 1; if (zone_gc_allowed && ((sched_tick > (zone_gc_last_tick + zone_gc_max_rate)) || @@ -1398,7 +1411,7 @@ host_zone_info( names = *namesp; } else { - names_size = round_page(max_zones * sizeof *names); + names_size = round_page_32(max_zones * sizeof *names); kr = kmem_alloc_pageable(ipc_kernel_map, &names_addr, names_size); if (kr != KERN_SUCCESS) @@ -1411,7 +1424,7 @@ host_zone_info( info = *infop; } else { - info_size = round_page(max_zones * sizeof *info); + info_size = round_page_32(max_zones * sizeof *info); kr = kmem_alloc_pageable(ipc_kernel_map, &info_addr, info_size); if (kr != KERN_SUCCESS) { @@ -1611,8 +1624,7 @@ db_show_all_zones( } } db_printf("\nTotal %8x", total); - db_printf("\n\nzone_gc() has reclaimed %d pages\n", - reclaim_page_count); + db_printf("\n\nzone_gc() has reclaimed %d pages\n", zgc_stats.pgs_freed); } #if ZONE_DEBUG @@ -1734,11 +1746,11 @@ next_element( { if (!zone_debug_enabled(z)) return(0); - elt -= sizeof(queue_chain_t); + elt -= ZONE_DEBUG_OFFSET; elt = (vm_offset_t) queue_next((queue_t) elt); if ((queue_t) elt == &z->active_zones) return(0); - elt += sizeof(queue_chain_t); + elt += ZONE_DEBUG_OFFSET; return(elt); } @@ -1753,7 +1765,7 @@ first_element( if (queue_empty(&z->active_zones)) return(0); elt = (vm_offset_t) queue_first(&z->active_zones); - elt += sizeof(queue_chain_t); + elt += ZONE_DEBUG_OFFSET; return(elt); } @@ -1794,10 +1806,10 @@ zone_debug_enable( zone_t z) { if (zone_debug_enabled(z) || zone_in_use(z) || - z->alloc_size < (z->elem_size + sizeof(queue_chain_t))) + z->alloc_size < (z->elem_size + ZONE_DEBUG_OFFSET)) return; queue_init(&z->active_zones); - z->elem_size += sizeof(queue_chain_t); + z->elem_size += ZONE_DEBUG_OFFSET; } void @@ -1806,7 +1818,7 @@ zone_debug_disable( { if (!zone_debug_enabled(z) || zone_in_use(z)) return; - z->elem_size -= sizeof(queue_chain_t); + z->elem_size -= ZONE_DEBUG_OFFSET; z->active_zones.next = z->active_zones.prev = 0; } #endif /* ZONE_DEBUG */ diff --git a/osfmk/kern/zalloc.h b/osfmk/kern/zalloc.h index 659efb92e..045c1523a 100644 --- a/osfmk/kern/zalloc.h +++ b/osfmk/kern/zalloc.h @@ -213,7 +213,7 @@ extern void zone_debug_disable( #endif /* ZONE_DEBUG */ -#endif MACH_KERNEL_PRIVATE +#endif /* MACH_KERNEL_PRIVATE */ #endif /* __APPLE_API_PRIVATE */ diff --git a/osfmk/mach/Makefile b/osfmk/mach/Makefile index 147a0c962..0cda8846f 100644 --- a/osfmk/mach/Makefile +++ b/osfmk/mach/Makefile @@ -35,6 +35,7 @@ MIG_DEFS = \ clock_priv.defs \ clock_reply.defs \ exc.defs \ + host_notify_reply.defs \ host_priv.defs \ host_security.defs \ ledger.defs \ @@ -50,6 +51,7 @@ MIG_DEFS = \ upl.defs MACH_PRIVATE_DEFS = \ + mach_notify.defs \ memory_object.defs \ memory_object_control.defs \ memory_object_default.defs \ @@ -94,8 +96,10 @@ DATAFILES = \ error.h \ exception.h \ exception_types.h \ + host_notify.h \ host_info.h \ host_reboot.h \ + host_special_ports.h \ kern_return.h \ kmod.h \ mach_param.h \ @@ -200,6 +204,8 @@ MIGKUFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_USER=1 -maxonstack 1024 MIG_KUHDRS = \ clock_reply.h \ exc.h \ + host_notify_reply.h \ + mach_notify.h \ memory_object.h \ memory_object_control.h \ memory_object_default.h \ @@ -209,6 +215,8 @@ MIG_KUHDRS = \ MIG_KUSRC = \ clock_reply_user.c \ exc_user.c \ + host_notify_reply_user.c \ + mach_notify_user.c \ memory_object_user.c \ memory_object_control_user.c \ memory_object_default_user.c \ @@ -224,12 +232,12 @@ MIG_KSHDRS = \ ledger_server.h \ lock_set_server.h \ mach_host_server.h \ + mach_notify_server.h \ mach_port_server.h \ memory_object_server.h \ memory_object_control_server.h \ memory_object_default_server.h \ memory_object_name_server.h \ - notify_server.h \ processor_server.h \ processor_set_server.h \ semaphore_server.h \ @@ -247,12 +255,12 @@ MIG_KSSRC = \ ledger_server.c \ lock_set_server.c \ mach_host_server.c \ + mach_notify_server.c \ mach_port_server.c \ memory_object_server.c \ memory_object_control_server.c \ memory_object_default_server.c \ memory_object_name_server.c \ - notify_server.c \ processor_server.c \ processor_set_server.c \ semaphore_server.c \ @@ -302,7 +310,6 @@ ${MIG_KSSRC}: \ -server $*_server.c \ -sheader $*_server.h \ $< - include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/osfmk/mach/host_notify.h b/osfmk/mach/host_notify.h new file mode 100644 index 000000000..3b2a8e77d --- /dev/null +++ b/osfmk/mach/host_notify.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 14 January 2003 (debo) + * Created. + */ + +#ifndef _MACH_HOST_NOTIFY_H_ +#define _MACH_HOST_NOTIFY_H_ + +#define HOST_NOTIFY_CALENDAR_CHANGE 0 +#define HOST_NOTIFY_TYPE_MAX 0 + +#define HOST_CALENDAR_CHANGED_REPLYID 950 + +#endif /* _MACH_HOST_NOTIFY_H_ */ diff --git a/osfmk/mach/host_notify_reply.defs b/osfmk/mach/host_notify_reply.defs new file mode 100644 index 000000000..1f12f55a4 --- /dev/null +++ b/osfmk/mach/host_notify_reply.defs @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 23 January 2003 (debo) + * Created. + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif /* KERN_USER */ + host_notify_reply 950; + +#include + +simpleroutine host_calendar_changed( + notify_port : mach_port_move_send_once_t); diff --git a/osfmk/mach/host_priv.defs b/osfmk/mach/host_priv.defs index 48f5be860..dece96202 100644 --- a/osfmk/mach/host_priv.defs +++ b/osfmk/mach/host_priv.defs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -170,7 +170,7 @@ routine host_processors( /* - * Get control port for a processor set. + * Get control port for a system-wide clock. * Privileged. */ routine host_get_clock_control( @@ -201,7 +201,7 @@ routine kmod_control( /* * Get a given special port for a given node. - * Norma special ports are defined in norma_special_ports.h; + * Special ports are defined in host_special_ports.h; * examples include the master device port. * There are a limited number of slots available for system servers. */ @@ -212,8 +212,8 @@ routine host_get_special_port( out port : mach_port_t); /* - * Set a given special port for a given node. - * See norma_get_special_port. + * Set a given special port for the local node. + * See host_get_special_port. */ routine host_set_special_port( host_priv : host_priv_t; @@ -301,6 +301,12 @@ routine host_processor_set_priv( host_priv : host_priv_t; set_name : processor_set_name_t; out set : processor_set_t); + +/************************** Warning *************************************/ +/* The following routines are going away in a future release */ +/* use the appropriate variant of host_set_special_port instead */ +/************************************************************************/ + /* * Set the dynamic_pager control port. Other entities * can request a send right to this port to talk with @@ -343,4 +349,3 @@ routine host_set_UNDServer( routine host_get_UNDServer( host : host_priv_t; out server : UNDServerRef); - diff --git a/osfmk/mach/host_security.defs b/osfmk/mach/host_security.defs index c15f134c6..f884ab02b 100644 --- a/osfmk/mach/host_security.defs +++ b/osfmk/mach/host_security.defs @@ -79,6 +79,7 @@ routine host_security_create_task_token( host_security : host_security_t; parent_task : task_t; sec_token : security_token_t; + audit_token : audit_token_t; host : host_t; ledgers : ledger_array_t; inherit_memory : boolean_t; @@ -91,5 +92,6 @@ routine host_security_set_task_token( host_security : host_security_t; target_task : task_t; sec_token : security_token_t; + audit_token : audit_token_t; host : host_t); diff --git a/osfmk/mach/host_special_ports.h b/osfmk/mach/host_special_ports.h new file mode 100644 index 000000000..2d4493ea2 --- /dev/null +++ b/osfmk/mach/host_special_ports.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/host_special_ports.h + * + * Defines codes for access to host-wide special ports. + */ + +#ifndef _MACH_HOST_SPECIAL_PORTS_H_ +#define _MACH_HOST_SPECIAL_PORTS_H_ + +/* + * Cannot be set or gotten from user space + */ +#define HOST_SECURITY_PORT 0 + +/* + * Always provided by kernel (cannot be set from user-space). + */ +#define HOST_PORT 1 +#define HOST_PRIV_PORT 2 +#define HOST_IO_MASTER_PORT 3 +#define HOST_MAX_SPECIAL_KERNEL_PORT 7 /* room to grow */ + +/* + * Not provided by kernel + */ +#define HOST_DYNAMIC_PAGER_PORT (1 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_AUDIT_CONTROL_PORT (2 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_USER_NOTIFICATION_PORT (3 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_MAX_SPECIAL_PORT (8 + HOST_MAX_SPECIAL_KERNEL_PORT) + /* room to grow here as well */ + +/* + * Special node identifier to always represent the local node. + */ +#define HOST_LOCAL_NODE -1 + +/* + * Definitions for ease of use. + * + * In the get call, the host parameter can be any host, but will generally + * be the local node host port. In the set call, the host must the per-node + * host port for the node being affected. + */ +#define host_get_host_port(host, port) \ + (host_get_special_port((host), \ + HOST_LOCAL_NODE, HOST_PORT, (port))) +#define host_set_host_port(host, port) (KERN_INVALID_ARGUMENT) + +#define host_get_host_priv_port(host, port) \ + (host_get_special_port((host), \ + HOST_LOCAL_NODE, HOST_PRIV_PORT, (port))) +#define host_set_host_priv_port(host, port) (KERN_INVALID_ARGUMENT) + +#define host_get_io_master_port(host, port) \ + (host_get_special_port((host), \ + HOST_LOCAL_NODE, HOST_IO_MASTER_PORT, (port))) +#define host_set_io_master_port(host, port) (KERN_INVALID_ARGUMENT) + +/* + * User-settable special ports. + */ +#define host_get_dynamic_pager_port(host, port) \ + (host_get_special_port((host), \ + HOST_LOCAL_NODE, HOST_DYNAMIC_PAGER_PORT, (port))) +#define host_set_dynamic_pager_port(host, port) \ + (host_set_special_port((host), HOST_DYNAMIC_PAGER_PORT, (port))) + +#define host_get_audit_control_port(host, port) \ + (host_get_special_port((host), \ + HOST_LOCAL_NODE, HOST_AUDIT_CONTROL_PORT, (port))) +#define host_set_audit_control_port(host, port) \ + (host_set_special_port((host), HOST_AUDIT_CONTROL_PORT, (port))) + +#define host_get_user_notification_port(host, port) \ + (host_get_special_port((host), \ + HOST_LOCAL_NODE, HOST_USER_NOTIFICATION_PORT, (port))) +#define host_set_user_notification_port(host, port) \ + (host_set_special_port((host), HOST_USER_NOTIFICATION_PORT, (port))) + + +#endif /* _MACH_HOST_SPECIAL_PORTS_H_ */ diff --git a/osfmk/mach/i386/fp_reg.h b/osfmk/mach/i386/fp_reg.h index 4357deb73..169f069f0 100644 --- a/osfmk/mach/i386/fp_reg.h +++ b/osfmk/mach/i386/fp_reg.h @@ -129,6 +129,27 @@ struct i386_fp_regs { /* space for 8 80-bit FP registers */ }; +/* note when allocating this data structure, it must be 16 byte aligned. */ +struct i386_fx_save { + unsigned short fx_control; /* control */ + unsigned short fx_status; /* status */ + unsigned char fx_tag; /* register tags */ + unsigned char fx_bbz1; /* better be zero when calling fxrtstor */ + unsigned short fx_opcode; + unsigned int fx_eip; /* eip instruction */ + unsigned short fx_cs; /* cs instruction */ + unsigned short fx_bbz2; /* better be zero when calling fxrtstor */ + unsigned int fx_dp; /* data address */ + unsigned short fx_ds; /* data segment */ + unsigned short fx_bbz3; /* better be zero when calling fxrtstor */ + unsigned int fx_MXCSR; + unsigned int fx_MXCSR_MASK; + unsigned short fx_reg_word[8][8]; /* STx/MMx registers */ + unsigned short fx_XMM_reg[8][8]; /* XMM0-XMM7 */ + unsigned char fx_reserved[16*14]; /* reserved by intel for future expansion */ +}; + + /* * Control register */ @@ -183,5 +204,6 @@ struct i386_fp_regs { #define FP_SOFT 1 /* software FP emulator */ #define FP_287 2 /* 80287 */ #define FP_387 3 /* 80387 or 80486 */ +#define FP_FXSR 4 /* Fast save/restore SIMD Extension */ #endif /* _I386_FP_SAVE_H_ */ diff --git a/osfmk/mach/i386/machine_types.defs b/osfmk/mach/i386/machine_types.defs index 9ec567bd5..f0955f70b 100644 --- a/osfmk/mach/i386/machine_types.defs +++ b/osfmk/mach/i386/machine_types.defs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -54,8 +54,8 @@ type double = MACH_MSG_TYPE_REAL_64; * a port in user space as an integer and * in kernel space as a pointer. */ -type uintptr_t = MACH_MSG_TYPE_INTEGER_32; -type intptr_t = MACH_MSG_TYPE_INTEGER_32; +type uintptr_t = uint32_t; +type intptr_t = int32_t; /* * These are the legacy Mach types that are @@ -71,7 +71,7 @@ type natural_t = uint32_t; /* * For the old IPC interface */ -#define MSG_TYPE_PORT_NAME MACH_MSG_TYPE_INTEGER_32 +#define MSG_TYPE_PORT_NAME natural_t #endif /* MACH_IPC_COMPAT */ diff --git a/osfmk/mach/i386/thread_state.h b/osfmk/mach/i386/thread_state.h index 955f69212..a59da2ce1 100644 --- a/osfmk/mach/i386/thread_state.h +++ b/osfmk/mach/i386/thread_state.h @@ -51,7 +51,7 @@ #ifndef _MACH_I386_THREAD_STATE_H_ #define _MACH_I386_THREAD_STATE_H_ -#define I386_THREAD_STATE_MAX 32 +#define I386_THREAD_STATE_MAX 144 #if defined (__i386__) #define THREAD_STATE_MAX I386_THREAD_STATE_MAX diff --git a/osfmk/mach/i386/thread_status.h b/osfmk/mach/i386/thread_status.h index eb740972b..a279efdf8 100644 --- a/osfmk/mach/i386/thread_status.h +++ b/osfmk/mach/i386/thread_status.h @@ -213,13 +213,13 @@ struct i386_saved_state { * choose the most efficient state flavor for exception RPC's: */ #define MACHINE_THREAD_STATE i386_SAVED_STATE -#define MACHINE_THREAD_STATE_COUNT i386_SAVED_STATE_COUNT +#define MACHINE_THREAD_STATE_COUNT 144 /* * Largest state on this machine: * (be sure mach/machine/thread_state.h matches!) */ -#define THREAD_MACHINE_STATE_MAX i386_SAVED_STATE_COUNT +#define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX /* * Floating point state. @@ -246,8 +246,7 @@ struct i386_saved_state { * according to physical register number. */ -#define FP_STATE_BYTES \ - (sizeof (struct i386_fp_save) + sizeof (struct i386_fp_regs)) +#define FP_STATE_BYTES 512 struct i386_float_state { int fpkind; /* FP_NO..FP_387 (readonly) */ @@ -259,6 +258,19 @@ struct i386_float_state { (sizeof(struct i386_float_state)/sizeof(unsigned int)) +#define FP_old_STATE_BYTES \ + (sizeof (struct i386_fp_save) + sizeof (struct i386_fp_regs)) + +struct i386_old_float_state { + int fpkind; /* FP_NO..FP_387 (readonly) */ + int initialized; + unsigned char hw_state[FP_old_STATE_BYTES]; /* actual "hardware" state */ + int exc_status; /* exception status (readonly) */ +}; +#define i386_old_FLOAT_STATE_COUNT \ + (sizeof(struct i386_old_float_state)/sizeof(unsigned int)) + + #define PORT_MAP_BITS 0x400 struct i386_isa_port_map_state { unsigned char pm[PORT_MAP_BITS>>3]; diff --git a/osfmk/mach/i386/vm_param.h b/osfmk/mach/i386/vm_param.h index 10350ab9f..84f4a3063 100644 --- a/osfmk/mach/i386/vm_param.h +++ b/osfmk/mach/i386/vm_param.h @@ -111,8 +111,10 @@ ~(I386_PGBYTES-1)) #define i386_trunc_page(x) (((unsigned)(x)) & ~(I386_PGBYTES-1)) +#define VM_MAX_PAGE_ADDRESS 0x00000000C0000000ULL + #define VM_MIN_ADDRESS ((vm_offset_t) 0) -#define VM_MAX_ADDRESS ((vm_offset_t) 0xc0000000U) +#define VM_MAX_ADDRESS ((vm_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF)) #define LINEAR_KERNEL_ADDRESS ((vm_offset_t) 0xc0000000) @@ -126,30 +128,6 @@ #define INTSTACK_SIZE (I386_PGBYTES*4) #define KERNEL_STACK_SIZE (I386_PGBYTES*4) -#if 0 /* FIXME */ - -#include -#include -#include - -#if defined(AT386) -#include -#endif - -#if !NORMA_VM -#if !TASK_SWAPPER && !THREAD_SWAPPER -#define KERNEL_STACK_SIZE (I386_PGBYTES/2) -#else -/* stack needs to be a multiple of page size to get unwired when swapped */ -#define KERNEL_STACK_SIZE (I386_PGBYTES) -#endif /* TASK || THREAD SWAPPER */ -#define INTSTACK_SIZE (I386_PGBYTES) /* interrupt stack size */ -#else /* NORMA_VM */ -#define KERNEL_STACK_SIZE (I386_PGBYTES*2) -#define INTSTACK_SIZE (I386_PGBYTES*2) /* interrupt stack size */ -#endif /* NORMA_VM */ -#endif /* MACH_KERNEL */ - /* * Conversion between 80386 pages and VM pages */ @@ -202,7 +180,7 @@ pmap_enter( \ (pmap), \ (virtual_address), \ - (page)->phys_addr, \ + (page)->phys_page, \ __prot__, \ flags, \ (wired) \ diff --git a/osfmk/mach/mach_host.defs b/osfmk/mach/mach_host.defs index 943ba258b..1329f0b65 100644 --- a/osfmk/mach/mach_host.defs +++ b/osfmk/mach/mach_host.defs @@ -236,3 +236,8 @@ routine host_statistics( host_priv : host_t; flavor : host_flavor_t; out host_info_out : host_info_t, CountInOut); + +routine host_request_notification( + host : host_t; + notify_type : host_flavor_t; + notify_port : mach_port_make_send_once_t); diff --git a/iokit/Families/IOADBBus/IOADBBus.cpp b/osfmk/mach/mach_notify.defs similarity index 67% rename from iokit/Families/IOADBBus/IOADBBus.cpp rename to osfmk/mach/mach_notify.defs index 0d506b9f4..cf616a2dd 100644 --- a/iokit/Families/IOADBBus/IOADBBus.cpp +++ b/osfmk/mach/mach_notify.defs @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,19 +22,12 @@ * * @APPLE_LICENSE_HEADER_END@ */ -#include - -#define super IOService - -OSDefineMetaClass(IOADBBus,IOService) -OSDefineAbstractStructors(IOADBBus,IOService) - -// ********************************************************************************** -// init -// -// ********************************************************************************** -bool IOADBBus::init( OSDictionary * properties = 0 ) -{ -return super::init(properties); -} - +/* + * The mach/notify.h file is hand-crafted. + * It contains additional data that cannot be generated by MIG at this time. + * But its existence keeps us from having MIG generate the client-side + * routines to send Mach notifications. This file exists simply to give + * those routines a new home (mach/mach_notify.h and mach_notify_user.c) + * until the real notify.h becomes "MIG-safe." + */ +#include \ No newline at end of file diff --git a/osfmk/mach/mach_port.defs b/osfmk/mach/mach_port.defs index bac4fefaa..af83c518a 100644 --- a/osfmk/mach/mach_port.defs +++ b/osfmk/mach/mach_port.defs @@ -62,7 +62,7 @@ subsystem #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ mach_port 3200; #include diff --git a/osfmk/mach/mach_traps.h b/osfmk/mach/mach_traps.h index 4f845279a..632903dd9 100644 --- a/osfmk/mach/mach_traps.h +++ b/osfmk/mach/mach_traps.h @@ -131,10 +131,16 @@ kern_return_t macx_swapoff( char *name, int flags); -extern kern_return_t macx_triggers( +kern_return_t macx_triggers( int hi_water, int low_water, int flags, mach_port_t alert_port); +kern_return_t macx_backing_store_suspend( + boolean_t suspend); + +kern_return_t macx_backing_store_recovery( + int pid); + #endif /* _MACH_MACH_TRAPS_H_ */ diff --git a/osfmk/mach/mach_types.defs b/osfmk/mach/mach_types.defs index a13f14eac..384366afd 100644 --- a/osfmk/mach/mach_types.defs +++ b/osfmk/mach/mach_types.defs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -103,7 +103,7 @@ type thread_act_t = mach_port_t intran: thread_act_t convert_port_to_act(mach_port_t) outtran: mach_port_t convert_act_to_port(thread_act_t) destructor: act_deallocate(thread_act_t) -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ ; type thread_act_consume_ref_t = mach_port_move_send_t @@ -111,7 +111,7 @@ type thread_act_consume_ref_t = mach_port_move_send_t #if KERNEL_SERVER intran: thread_act_t convert_port_to_act(mach_port_t) destructor: act_deallocate(thread_act_t) -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ ; /* thread_state_t: This inline array can hold @@ -183,6 +183,8 @@ type thread_policy_t = array[*:16] of integer_t; * policy_timeshare_info_t (5 ints) * policy_fifo_info_t (4 ints) * policy_rr_info_t (5 ints) + * task security token (2 ints) + * task audit token (8 ints) * If other task_info flavors are added, this * definition may need to be changed. (See * mach/task_info.h and mach/policy.h) */ @@ -356,10 +358,11 @@ type ledger_t = mach_port_t #endif /* KERNEL_SERVER */ ; -type ledger_array_t = ^array[] of ledger_t; +type ledger_array_t = ^array[] of ledger_t; type ledger_item_t = integer_t; -type security_token_t = MACH_MSG_TYPE_INTEGER_64; +type security_token_t = struct[2] of uint32_t; +type audit_token_t = struct[8] of uint32_t; /* memory_object_info_t: variable-size inline array: * memory_object_attr_info_t (5 ints) diff --git a/osfmk/mach/mach_types.h b/osfmk/mach/mach_types.h index 7bf77076f..6c9d07daa 100644 --- a/osfmk/mach/mach_types.h +++ b/osfmk/mach/mach_types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -67,6 +67,8 @@ #include #include +#include +#include #include #include #include @@ -101,8 +103,7 @@ * the basic mach types. */ typedef struct task *task_t; -typedef struct thread_shuttle *thread_t; -typedef struct thread_activation *thread_act_t; +typedef struct thread *thread_t, *thread_act_t; typedef struct ipc_space *ipc_space_t; typedef struct host *host_t; typedef struct host *host_priv_t; @@ -126,8 +127,7 @@ typedef struct clock *clock_ctrl_t; * structures. */ struct task ; -struct thread_shuttle ; -struct thread_activation ; +struct thread ; struct host ; struct processor ; struct processor_set ; diff --git a/osfmk/mach/machine.h b/osfmk/mach/machine.h index 9538ed934..17694ea70 100644 --- a/osfmk/mach/machine.h +++ b/osfmk/mach/machine.h @@ -283,5 +283,6 @@ extern struct machine_slot machine_slot[]; #define CPU_SUBTYPE_POWERPC_750 ((cpu_subtype_t) 9) #define CPU_SUBTYPE_POWERPC_7400 ((cpu_subtype_t) 10) #define CPU_SUBTYPE_POWERPC_7450 ((cpu_subtype_t) 11) +#define CPU_SUBTYPE_POWERPC_970 ((cpu_subtype_t) 100) #endif /* _MACH_MACHINE_H_ */ diff --git a/osfmk/mach/memory_object.defs b/osfmk/mach/memory_object.defs index 23b942e79..742e708ca 100644 --- a/osfmk/mach/memory_object.defs +++ b/osfmk/mach/memory_object.defs @@ -62,10 +62,10 @@ subsystem #if KERNEL_USER KernelUser -#endif KERNEL_USER +#endif /* KERNEL_USER */ #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ memory_object 2200; #ifdef MACH_KERNEL diff --git a/osfmk/mach/memory_object_control.defs b/osfmk/mach/memory_object_control.defs index b174f03fa..0b30a4dab 100644 --- a/osfmk/mach/memory_object_control.defs +++ b/osfmk/mach/memory_object_control.defs @@ -62,10 +62,10 @@ subsystem #if KERNEL_USER KernelUser -#endif KERNEL_USER +#endif /* KERNEL_USER */ #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ memory_object_control 2000; #ifdef MACH_KERNEL @@ -165,7 +165,7 @@ routine memory_object_page_op( memory_control : memory_object_control_t; in offset : memory_object_offset_t; in ops : integer_t; - out phys_entry : vm_offset_t; + out phys_entry : uint32_t; out flags : integer_t); routine memory_object_recover_named( @@ -176,5 +176,11 @@ routine memory_object_release_name( memory_control : memory_object_control_t; flags : integer_t); +routine memory_object_range_op( + memory_control : memory_object_control_t; + in offset_beg : memory_object_offset_t; + in offset_end : memory_object_offset_t; + in ops : integer_t; + out range : integer_t); diff --git a/osfmk/mach/memory_object_default.defs b/osfmk/mach/memory_object_default.defs index a2db5c1bb..2c531e9aa 100644 --- a/osfmk/mach/memory_object_default.defs +++ b/osfmk/mach/memory_object_default.defs @@ -63,10 +63,10 @@ subsystem #if KERNEL_USER KernelUser -#endif KERNEL_USER +#endif /* KERNEL_USER */ #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ memory_object_default 2250; #include diff --git a/osfmk/mach/memory_object_name.defs b/osfmk/mach/memory_object_name.defs index 57eb3aa19..3df140784 100644 --- a/osfmk/mach/memory_object_name.defs +++ b/osfmk/mach/memory_object_name.defs @@ -62,7 +62,7 @@ subsystem #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ memory_object_name 2600; #include diff --git a/osfmk/mach/memory_object_types.h b/osfmk/mach/memory_object_types.h index d4278204b..6499b2a84 100644 --- a/osfmk/mach/memory_object_types.h +++ b/osfmk/mach/memory_object_types.h @@ -290,8 +290,28 @@ typedef struct upl_page_info upl_page_info_t; typedef upl_page_info_t *upl_page_info_array_t; typedef upl_page_info_array_t upl_page_list_ptr_t; +/* named entry processor mapping options */ +/* enumerated */ +#define MAP_MEM_NOOP 0 +#define MAP_MEM_COPYBACK 1 +#define MAP_MEM_IO 2 +#define MAP_MEM_WTHRU 3 +#define MAP_MEM_WCOMB 4 /* Write combining mode */ + /* aka store gather */ + +#define GET_MAP_MEM(flags) \ + ((((unsigned int)(flags)) >> 24) & 0xFF) + +#define SET_MAP_MEM(caching, flags) \ + ((flags) = ((((unsigned int)(caching)) << 24) \ + & 0xFF000000) | ((flags) & 0xFFFFFF)); + +/* leave room for vm_prot bits */ +#define MAP_MEM_ONLY 0x10000 /* change processor caching */ +#define MAP_MEM_NAMED_CREATE 0x20000 /* create extant object */ /* upl invocation flags */ +/* top nibble is used by super upl */ #define UPL_FLAGS_NONE 0x0 #define UPL_COPYOUT_FROM 0x1 @@ -304,6 +324,12 @@ typedef upl_page_info_array_t upl_page_list_ptr_t; #define UPL_QUERY_OBJECT_TYPE 0x80 #define UPL_RET_ONLY_ABSENT 0x100 /* used only for COPY_FROM = FALSE */ #define UPL_FILE_IO 0x200 +#define UPL_SET_LITE 0x400 +#define UPL_SET_INTERRUPTIBLE 0x800 +#define UPL_SET_IO_WIRE 0x1000 +#define UPL_FOR_PAGEOUT 0x2000 +#define UPL_WILL_BE_DUMPED 0x4000 + /* upl abort error flags */ #define UPL_ABORT_RESTART 0x1 @@ -382,6 +408,27 @@ typedef upl_page_info_array_t upl_page_list_ptr_t; #define UPL_POP_SET 0x40000000 #define UPL_POP_CLR 0x80000000 +/* + * Flags for the UPL range op routine. This routine is not exported + * out of the kernel at the moemet and so the defs live here. + */ +/* + * UPL_ROP_ABSENT: Returns the extent of the range presented which + * is absent, starting with the start address presented + */ +#define UPL_ROP_ABSENT 0x01 +/* + * UPL_ROP_PRESENT: Returns the extent of the range presented which + * is present (i.e. resident), starting with the start address presented + */ +#define UPL_ROP_PRESENT 0x02 +/* + * UPL_ROP_DUMP: Dump the pages which are found in the target object + * for the target range. + */ +#define UPL_ROP_DUMP 0x04 + + #ifdef KERNEL_PRIVATE @@ -420,6 +467,60 @@ extern void upl_set_dirty(upl_t upl); extern void upl_clear_dirty(upl_t upl); + +/* + * The following interface definitions should be generated automatically + * through Mig definitions or whatever follows the MIG tool as part of the + * component API. Until this is up and running however this explicit + * description will do. + */ + +#include + +/* supply a map and a range, a upl will be returned. */ +extern int kernel_vm_map_get_upl( + vm_map_t map, + vm_address_t offset, + vm_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + int *flags, + int force_data_sync); + +extern int kernel_upl_map( + vm_map_t map, + upl_t upl, + vm_offset_t *dst_addr); + +extern int kernel_upl_unmap( + vm_map_t map, + upl_t upl); + +extern int kernel_upl_commit( + upl_t upl, + upl_page_info_t *pl, + mach_msg_type_number_t count); + +extern int kernel_upl_commit_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + int flags, + upl_page_info_array_t pl, + mach_msg_type_number_t count); + +extern int kernel_upl_abort( + upl_t upl, + int abort_type); + +extern int kernel_upl_abort_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + int abort_flags); + + #endif /* KERNEL_PRIVATE */ #endif /* __APPLE_API_EVOLVING */ diff --git a/osfmk/mach/message.h b/osfmk/mach/message.h index 75d3531de..8310905e6 100644 --- a/osfmk/mach/message.h +++ b/osfmk/mach/message.h @@ -306,15 +306,50 @@ typedef struct security_token_t msgh_sender; } mach_msg_security_trailer_t; -typedef mach_msg_security_trailer_t mach_msg_format_0_trailer_t; +typedef struct +{ + unsigned int val[8]; +} audit_token_t; + +typedef struct +{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; + audit_token_t msgh_audit; +} mach_msg_audit_trailer_t; -#define MACH_MSG_TRAILER_FORMAT_0_SIZE sizeof(mach_msg_format_0_trailer_t) #define MACH_MSG_TRAILER_MINIMUM_SIZE sizeof(mach_msg_trailer_t) -#define MAX_TRAILER_SIZE MACH_MSG_TRAILER_FORMAT_0_SIZE + +/* + * These values can change from release to release - but clearly + * code cannot request additional trailer elements one was not + * compiled to understand. Therefore, it is safe to use this + * constant when the same module specified the receive options. + * Otherwise, you run the risk that the options requested by + * another module may exceed the local modules notion of + * MAX_TRAILER_SIZE. + */ +typedef mach_msg_audit_trailer_t mach_msg_max_trailer_t; +#define MAX_TRAILER_SIZE sizeof(mach_msg_max_trailer_t) + +/* + * Legacy requirements keep us from ever updating these defines (even + * when the format_0 trailers gain new option data fields in the future). + * Therefore, they shouldn't be used going forward. Instead, the sizes + * should be compared against the specific element size requested using + * REQUESTED_TRAILER_SIZE. + */ +typedef mach_msg_security_trailer_t mach_msg_format_0_trailer_t; +#define MACH_MSG_TRAILER_FORMAT_0_SIZE sizeof(mach_msg_format_0_trailer_t) #define KERNEL_SECURITY_TOKEN_VALUE { {0, 1} } extern security_token_t KERNEL_SECURITY_TOKEN; +#define KERNEL_AUDIT_TOKEN_VALUE { {0, 0, 0, 0, 0, 0, 0, 0} } +extern audit_token_t KERNEL_AUDIT_TOKEN; + typedef integer_t mach_msg_options_t; typedef struct @@ -451,6 +486,7 @@ typedef integer_t mach_msg_option_t; #define MACH_RCV_TRAILER_NULL 0 #define MACH_RCV_TRAILER_SEQNO 1 #define MACH_RCV_TRAILER_SENDER 2 +#define MACH_RCV_TRAILER_AUDIT 3 #define MACH_RCV_TRAILER_TYPE(x) (((x) & 0xf) << 28) #define MACH_RCV_TRAILER_ELEMENTS(x) (((x) & 0xf) << 24) @@ -463,7 +499,9 @@ typedef integer_t mach_msg_option_t; sizeof(mach_msg_trailer_t) : \ ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SEQNO) ? \ sizeof(mach_msg_seqno_trailer_t) : \ - sizeof(mach_msg_security_trailer_t)))) + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SENDER) ? \ + sizeof(mach_msg_security_trailer_t) : \ + sizeof(mach_msg_audit_trailer_t))))) /* * Much code assumes that mach_msg_return_t == kern_return_t. * This definition is useful for descriptive purposes. diff --git a/osfmk/mach/mig_errors.h b/osfmk/mach/mig_errors.h index e57e77f10..4c1c9eb7d 100644 --- a/osfmk/mach/mig_errors.h +++ b/osfmk/mach/mig_errors.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -96,4 +96,20 @@ typedef struct { kern_return_t RetCode; } mig_reply_error_t; +#define __NDR_convert__mig_reply_error_t__defined +#if mig_internal +mig_internal +#else +static +#endif +__inline__ void +__NDR_convert__mig_reply_error_t(mig_reply_error_t *x) +{ +#if defined(__NDR_convert__int_rep__kern_return_t__defined) + if (x->NDR.int_rep != NDR_record.int_rep) + __NDR_convert__int_rep__kern_return_t(&x->RetCode, x->NDR.int_rep); +#endif /* __NDR_convert__int_rep__kern_return_t__defined */ +} + #endif /* _MACH_MIG_ERRORS_H_ */ + diff --git a/osfmk/mach/mk_timer.h b/osfmk/mach/mk_timer.h index a1744efb1..eff7357af 100644 --- a/osfmk/mach/mk_timer.h +++ b/osfmk/mach/mk_timer.h @@ -51,9 +51,7 @@ kern_return_t mk_timer_cancel( struct mk_timer_expire_msg { mach_msg_header_t header; - uint64_t time_of_arming; - uint64_t armed_time; - uint64_t time_of_posting; + uint64_t unused[3]; }; typedef struct mk_timer_expire_msg mk_timer_expire_msg_t; diff --git a/osfmk/mach/ndr.h b/osfmk/mach/ndr.h index e034eae05..64ce2a60a 100644 --- a/osfmk/mach/ndr.h +++ b/osfmk/mach/ndr.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -25,40 +25,11 @@ /* * @OSF_COPYRIGHT@ */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.2.6.1 1994/09/23 02:40:51 ezf - * change marker to not FREE - * [1994/09/22 21:42:00 ezf] - * - * Revision 1.2.2.2 1993/06/09 02:42:37 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 21:17:34 jeffc] - * - * Revision 1.2 1993/04/19 16:38:03 devrcs - * Merge untyped ipc: - * New names for the fields - the structure isn't changed - * [1993/03/12 23:01:38 travos] - * Extended NDR record to include version number(s) - * [1993/03/05 23:10:21 travos] - * a new NDR structure - * 1993/02/13 00:47:46 travos] - * Created. [travos@osf.org] - * [1993/01/27 11:21:44 rod] - * [1993/03/16 13:23:15 rod] - * - * $EndLog$ - */ -#ifndef _NDR_H_ -#define _NDR_H_ +#ifndef _MACH_NDR_H_ +#define _MACH_NDR_H_ + +#include typedef struct { unsigned char mig_vers; @@ -90,4 +61,133 @@ typedef struct { extern NDR_record_t NDR_record; -#endif /* _NDR_H_ */ +#ifndef __NDR_convert__ +#define __NDR_convert__ 1 +#endif /* __NDR_convert__ */ + +#ifndef __NDR_convert__int_rep__ +#define __NDR_convert__int_rep__ 1 +#endif /* __NDR_convert__int_rep__ */ + +#ifndef __NDR_convert__char_rep__ +#define __NDR_convert__char_rep__ 0 +#endif /* __NDR_convert__char_rep__ */ + +#ifndef __NDR_convert__float_rep__ +#define __NDR_convert__float_rep__ 0 +#endif /* __NDR_convert__float_rep__ */ + +#if __NDR_convert__ + +#define __NDR_convert__NOOP do ; while (0) +#define __NDR_convert__UNKNOWN(s) __NDR_convert__NOOP +#define __NDR_convert__SINGLE(a, f, r) do { r((a), (f)); } while (0) +#define __NDR_convert__ARRAY(a, f, c, r) \ + do { int __i__, __C__ = (c); \ + for (__i__ = 0; __i__ < __C__; __i__++) \ + r(&(a)[__i__], f); } while (0) +#define __NDR_convert__2DARRAY(a, f, s, c, r) \ + do { int __i__, __C__ = (c), __S__ = (s); \ + for (__i__ = 0; __i__ < __C__; __i__++) \ + r(&(a)[__i__ * __S__], f, __S__); } while (0) + +#if __NDR_convert__int_rep__ + +#include + +#define __NDR_READSWAP_assign(a, rs) do { *(a) = rs(a); } while (0) + +#define __NDR_READSWAP__uint16_t(a) OSReadSwapInt16((void *)a, 0) +#define __NDR_READSWAP__int16_t(a) (int16_t)OSReadSwapInt16((void *)a, 0) +#define __NDR_READSWAP__uint32_t(a) OSReadSwapInt32((void *)a, 0) +#define __NDR_READSWAP__int32_t(a) (int32_t)OSReadSwapInt32((void *)a, 0) +#define __NDR_READSWAP__uint64_t(a) OSReadSwapInt64((void *)a, 0) +#define __NDR_READSWAP__int64_t(a) (int64_t)OSReadSwapInt64((void *)a, 0) + +static __inline__ float __NDR_READSWAP__float(float *argp) { + union { + float sv; + uint32_t ull; + } result; + result.ull = __NDR_READSWAP__uint32_t((uint32_t *)argp); + return result.sv; +} + +static __inline__ double __NDR_READSWAP__double(double *argp) { + union { + double sv; + uint64_t ull; + } result; + result.ull = __NDR_READSWAP__uint64_t((uint64_t *)argp); + return result.sv; +} + +#define __NDR_convert__int_rep__int16_t__defined +#define __NDR_convert__int_rep__int16_t(v,f) \ + __NDR_READSWAP_assign(v, __NDR_READSWAP__int16_t) + +#define __NDR_convert__int_rep__uint16_t__defined +#define __NDR_convert__int_rep__uint16_t(v,f) \ + __NDR_READSWAP_assign(v, __NDR_READSWAP__uint16_t) + +#define __NDR_convert__int_rep__int32_t__defined +#define __NDR_convert__int_rep__int32_t(v,f) \ + __NDR_READSWAP_assign(v, __NDR_READSWAP__int32_t) + +#define __NDR_convert__int_rep__uint32_t__defined +#define __NDR_convert__int_rep__uint32_t(v,f) \ + __NDR_READSWAP_assign(v, __NDR_READSWAP__uint32_t) + +#define __NDR_convert__int_rep__int64_t__defined +#define __NDR_convert__int_rep__int64_t(v,f) \ + __NDR_READSWAP_assign(v, __NDR_READSWAP__int64_t) + +#define __NDR_convert__int_rep__uint64_t__defined +#define __NDR_convert__int_rep__uint64_t(v,f) \ + __NDR_READSWAP_assign(v, __NDR_READSWAP__uint64_t) + +#define __NDR_convert__int_rep__float__defined +#define __NDR_convert__int_rep__float(v,f) \ + __NDR_READSWAP_assign(v, __NDR_READSWAP__float) + +#define __NDR_convert__int_rep__double__defined +#define __NDR_convert__int_rep__double(v,f) \ + __NDR_READSWAP_assign(v, __NDR_READSWAP__double) + +#define __NDR_convert__int_rep__boolean_t__defined +#define __NDR_convert__int_rep__boolean_t(v, f) \ + __NDR_convert__int_rep__int32_t(v,f) + +#define __NDR_convert__int_rep__kern_return_t__defined +#define __NDR_convert__int_rep__kern_return_t(v,f) \ + __NDR_convert__int_rep__int32_t(v,f) + +#define __NDR_convert__int_rep__mach_port_name_t__defined +#define __NDR_convert__int_rep__mach_port_name_t(v,f) \ + __NDR_convert__int_rep__uint32_t(v,f) + +#define __NDR_convert__int_rep__mach_msg_type_number_t__defined +#define __NDR_convert__int_rep__mach_msg_type_number_t(v,f) \ + __NDR_convert__int_rep__uint32_t(v,f) + +#endif /* __NDR_convert__int_rep__ */ + +#if __NDR_convert__char_rep__ + +#warning NDR character representation conversions not implemented yet! +#define __NDR_convert__char_rep__char(v,f) __NDR_convert__NOOP +#define __NDR_convert__char_rep__string(v,f,l) __NDR_convert__NOOP + +#endif /* __NDR_convert__char_rep__ */ + +#if __NDR_convert__float_rep__ + +#warning NDR floating point representation conversions not implemented yet! +#define __NDR_convert__float_rep__float(v,f) __NDR_convert__NOOP +#define __NDR_convert__float_rep__double(v,f) __NDR_convert__NOOP + +#endif /* __NDR_convert__float_rep__ */ + +#endif /* __NDR_convert__ */ + +#endif /* _MACH_NDR_H_ */ diff --git a/osfmk/mach/norma_special_ports.h b/osfmk/mach/norma_special_ports.h index 708511a18..f1a5e4520 100644 --- a/osfmk/mach/norma_special_ports.h +++ b/osfmk/mach/norma_special_ports.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -55,59 +55,36 @@ /* * File: mach/norma_special_ports.h * - * Defines codes for remote access to special ports. These are NOT - * port identifiers - they are only used for the norma_get_special_port - * and norma_set_special_port routines. + * Defines codes for remote access to special ports. */ #ifndef _MACH_NORMA_SPECIAL_PORTS_H_ #define _MACH_NORMA_SPECIAL_PORTS_H_ -#define MAX_SPECIAL_KERNEL_ID 10 -#define MAX_SPECIAL_ID 40 - -/* - * Provided by kernel - */ -#define NORMA_DEVICE_PORT 1 -#define NORMA_HOST_PORT 2 -#define NORMA_HOST_PRIV_PORT 3 - -/* - * Not provided by kernel - */ -#define NORMA_NAMESERVER_PORT (1 + MAX_SPECIAL_KERNEL_ID) - -/* - * Definitions for ease of use. - * - * In the get call, the host parameter can be any host, but will generally - * be the local node host port. In the set call, the host must the per-node - * host port for the node being affected. - */ - -#define norma_get_device_port(host, node, port) \ - (norma_get_special_port((host), (node), NORMA_DEVICE_PORT, (port))) - -#define norma_set_device_port(host, port) \ - (norma_set_special_port((host), NORMA_DEVICE_PORT, (port))) +#include #define norma_get_host_port(host, node, port) \ - (norma_get_special_port((host), (node), NORMA_HOST_PORT, (port))) - -#define norma_set_host_port(host, port) \ - (norma_set_special_port((host), NORMA_HOST_PORT, (port))) + (host_get_special_port((host), (node), \ + HOST_PORT, (port))) #define norma_get_host_priv_port(host, node, port) \ - (norma_get_special_port((host), (node), NORMA_HOST_PRIV_PORT, (port))) + (host_get_special_port((host), (node), \ + HOST_PRIV_PORT, (port))) + +#define norma_get_io_master_port(host, node, port) \ + (host_get_special_port((host), (node), \ + HOST_IO_MASTER_PORT, (port))) -#define norma_set_host_priv_port(host, port) \ - (norma_set_special_port((host), NORMA_HOST_PRIV_PORT, (port))) +#define norma_get_dynamic_pager_port(host, port) \ + (host_get_special_port((host), 0, \ + HOST_DYNAMIC_PAGER_PORT, (port))) -#define norma_get_nameserver_port(host, node, port) \ - (norma_get_special_port((host), (node), NORMA_NAMESERVER_PORT, (port))) +#define norma_get_audit_control_port(host, node, port) \ + (host_get_special_port((host), (node), \ + HOST_AUDIT_CONTROL_PORT, (port))) -#define norma_set_nameserver_port(host, port) \ - (norma_set_special_port((host), NORMA_NAMESERVER_PORT, (port))) +#define norma_get_user_notification_port(host, node, port) \ + (host_get_special_port((host), (node), \ + HOST_USER_NOTIFICATION_PORT, (port))) #endif /* _MACH_NORMA_SPECIAL_PORTS_H_ */ diff --git a/osfmk/mach/notify.defs b/osfmk/mach/notify.defs index 9549738aa..ec7829be1 100644 --- a/osfmk/mach/notify.defs +++ b/osfmk/mach/notify.defs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -53,30 +53,31 @@ /* */ -subsystem notify 64; +subsystem +#if KERNEL_USER + KernelUser +#endif + notify 64; #include #if SEQNOS serverprefix do_seqnos_; serverdemux seqnos_notify_server; -#else SEQNOS +#else /* !SEQNOS */ serverprefix do_; serverdemux notify_server; -#endif SEQNOS - -type notify_port_t = MACH_MSG_TYPE_MOVE_SEND_ONCE - ctype: mach_port_t; +#endif /* SEQNOS */ /* MACH_NOTIFY_FIRST: 0100 */ skip; /* MACH_NOTIFY_PORT_DELETED: 0101 */ simpleroutine mach_notify_port_deleted( - notify : notify_port_t; + notify : mach_port_move_send_once_t; #if SEQNOS msgseqno seqno : mach_port_seqno_t; -#endif SEQNOS +#endif /* SEQNOS */ name : mach_port_name_t); skip; /* was MACH_NOTIFY_MSG_ACCEPTED: 0102 */ @@ -87,32 +88,32 @@ skip; /* was NOTIFY_RECEIVE_RIGHTS: 0104 */ /* MACH_NOTIFY_PORT_DESTROYED: 0105 */ simpleroutine mach_notify_port_destroyed( - notify : notify_port_t; + notify : mach_port_move_send_once_t; #if SEQNOS msgseqno seqno : mach_port_seqno_t; -#endif SEQNOS - rights : mach_port_receive_t); +#endif /* SEQNOS */ + rights : mach_port_move_receive_t); /* MACH_NOTIFY_NO_SENDERS: 0106 */ simpleroutine mach_notify_no_senders( - notify : notify_port_t; + notify : mach_port_move_send_once_t; #if SEQNOS msgseqno seqno : mach_port_seqno_t; -#endif SEQNOS +#endif /* SEQNOS */ mscount : mach_port_mscount_t); /* MACH_NOTIFY_SEND_ONCE: 0107 */ simpleroutine mach_notify_send_once( - notify : notify_port_t + notify : mach_port_move_send_once_t #if SEQNOS ; msgseqno seqno : mach_port_seqno_t -#endif SEQNOS +#endif /* SEQNOS */ ); /* MACH_NOTIFY_DEAD_NAME: 0110 */ simpleroutine mach_notify_dead_name( - notify : notify_port_t; + notify : mach_port_move_send_once_t; #if SEQNOS msgseqno seqno : mach_port_seqno_t; -#endif SEQNOS +#endif /* SEQNOS */ name : mach_port_name_t); diff --git a/osfmk/mach/notify.h b/osfmk/mach/notify.h index 0c9dd1246..b6a583aec 100644 --- a/osfmk/mach/notify.h +++ b/osfmk/mach/notify.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -83,6 +83,18 @@ /* Send or send-once right died, leaving a dead-name */ #define MACH_NOTIFY_LAST (MACH_NOTIFY_FIRST + 015) +typedef mach_port_t notify_port_t; + +#include +#ifdef __APPLE_API_OBSOLETE +/* + * Hard-coded message structures for receiving Mach port notification + * messages. However, they are not actual large enough to receive + * the largest trailers current exported by Mach IPC (so they cannot + * be used for space allocations in situations using these new larger + * trailers). Instead, the MIG-generated server routines (and + * related prototypes should be used). + */ typedef struct { mach_msg_header_t not_header; NDR_record_t NDR; @@ -116,4 +128,6 @@ typedef struct { mach_msg_format_0_trailer_t trailer; } mach_dead_name_notification_t; +#endif /* __APPLE_API_OBSOLETE */ + #endif /* _MACH_NOTIFY_H_ */ diff --git a/osfmk/mach/ppc/exception.h b/osfmk/mach/ppc/exception.h index 3354231e7..67757395b 100644 --- a/osfmk/mach/ppc/exception.h +++ b/osfmk/mach/ppc/exception.h @@ -66,6 +66,7 @@ #define EXC_PPC_PRIVINST 3 /* priviledged instruction */ #define EXC_PPC_PRIVREG 4 /* priviledged register */ #define EXC_PPC_TRACE 5 /* trace/single-step */ +#define EXC_PPC_PERFMON 6 /* performance monitor */ /* * EXC_BAD_ACCESS diff --git a/osfmk/mach/ppc/machine_types.defs b/osfmk/mach/ppc/machine_types.defs index 007b9e455..0743ba150 100644 --- a/osfmk/mach/ppc/machine_types.defs +++ b/osfmk/mach/ppc/machine_types.defs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -54,8 +54,8 @@ type double = MACH_MSG_TYPE_REAL_64; * a port in user space as an integer and * in kernel space as a pointer. */ -type uintptr_t = MACH_MSG_TYPE_INTEGER_32; -type intptr_t = MACH_MSG_TYPE_INTEGER_32; +type uintptr_t = uint32_t; +type intptr_t = int32_t; /* * These are the legacy Mach types that are @@ -72,7 +72,7 @@ type register_t = int32_t; /* * For the old IPC interface */ -#define MSG_TYPE_PORT_NAME MACH_MSG_TYPE_INTEGER_32 +#define MSG_TYPE_PORT_NAME natural_t #endif /* MACH_IPC_COMPAT */ diff --git a/osfmk/mach/ppc/processor_info.h b/osfmk/mach/ppc/processor_info.h index 5d9257552..2a7fa52d0 100644 --- a/osfmk/mach/ppc/processor_info.h +++ b/osfmk/mach/ppc/processor_info.h @@ -107,12 +107,6 @@ typedef struct processor_pm_regs *processor_pm_regs_t; #define PROCESSOR_PM_REGS_COUNT \ (sizeof(processor_pm_regs_data_t) / sizeof (unsigned int)) -#define PROCESSOR_PM_REGS_COUNT_POWERPC_604 \ - (PROCESSOR_PM_REGS_COUNT * 1 ) - -#define PROCESSOR_PM_REGS_COUNT_POWERPC_604e \ - (PROCESSOR_PM_REGS_COUNT * 2 ) - #define PROCESSOR_PM_REGS_COUNT_POWERPC_750 \ (PROCESSOR_PM_REGS_COUNT * 2 ) diff --git a/osfmk/mach/ppc/syscall_sw.h b/osfmk/mach/ppc/syscall_sw.h index ac2a3d8ab..a2d4fd045 100644 --- a/osfmk/mach/ppc/syscall_sw.h +++ b/osfmk/mach/ppc/syscall_sw.h @@ -74,6 +74,9 @@ ppc_trap(bb_disable_bluebox,0x6006) ppc_trap(bb_settaskenv,0x6007) ppc_trap(vmm_stop_vm,0x6008) ppc_trap(CHUDCall,0x6009) +ppc_trap(ppcNull,0x600A) +ppc_trap(perfmon_control,0x600B) +ppc_trap(ppcNullinst,0x600C) #endif /* _MACH_SYSCALL_SW_H_ */ #endif /* _MACH_PPC_SYSCALL_SW_H_ */ diff --git a/osfmk/mach/ppc/thread_status.h b/osfmk/mach/ppc/thread_status.h index 9f39d4732..1decee68f 100644 --- a/osfmk/mach/ppc/thread_status.h +++ b/osfmk/mach/ppc/thread_status.h @@ -44,6 +44,8 @@ #define PPC_FLOAT_STATE 2 #define PPC_EXCEPTION_STATE 3 #define PPC_VECTOR_STATE 4 +#define PPC_THREAD_STATE64 5 +#define PPC_EXCEPTION_STATE64 6 #define THREAD_STATE_NONE 7 /* @@ -56,8 +58,10 @@ #define VALID_THREAD_STATE_FLAVOR(x) \ ((x == PPC_THREAD_STATE) || \ (x == PPC_FLOAT_STATE) || \ - (x == PPC_EXCEPTION_STATE) || \ + (x == PPC_EXCEPTION_STATE) || \ (x == PPC_VECTOR_STATE) || \ + (x == PPC_THREAD_STATE64) || \ + (x == PPC_EXCEPTION_STATE64) || \ (x == THREAD_STATE_NONE)) typedef struct ppc_thread_state { @@ -105,6 +109,52 @@ typedef struct ppc_thread_state { unsigned int vrsave; /* Vector Save Register */ } ppc_thread_state_t; +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct ppc_thread_state64 { + unsigned long long srr0; /* Instruction address register (PC) */ + unsigned long long srr1; /* Machine state register (supervisor) */ + unsigned long long r0; + unsigned long long r1; + unsigned long long r2; + unsigned long long r3; + unsigned long long r4; + unsigned long long r5; + unsigned long long r6; + unsigned long long r7; + unsigned long long r8; + unsigned long long r9; + unsigned long long r10; + unsigned long long r11; + unsigned long long r12; + unsigned long long r13; + unsigned long long r14; + unsigned long long r15; + unsigned long long r16; + unsigned long long r17; + unsigned long long r18; + unsigned long long r19; + unsigned long long r20; + unsigned long long r21; + unsigned long long r22; + unsigned long long r23; + unsigned long long r24; + unsigned long long r25; + unsigned long long r26; + unsigned long long r27; + unsigned long long r28; + unsigned long long r29; + unsigned long long r30; + unsigned long long r31; + + unsigned int cr; /* Condition register */ + unsigned long long xer; /* User's integer exception register */ + unsigned long long lr; /* Link register */ + unsigned long long ctr; /* Count register */ + + unsigned int vrsave; /* Vector Save Register */ +} ppc_thread_state64_t; +#pragma pack() + /* This structure should be double-word aligned for performance */ typedef struct ppc_float_state { @@ -149,14 +199,24 @@ typedef struct ppc_thread_state ppc_saved_state_t; */ typedef struct ppc_exception_state { - unsigned long dar; /* Fault registers for coredump */ + unsigned long dar; /* Fault registers for coredump */ unsigned long dsisr; - unsigned long exception;/* number of powerpc exception taken */ - unsigned long pad0; /* align to 16 bytes */ + unsigned long exception; /* number of powerpc exception taken */ + unsigned long pad0; /* align to 16 bytes */ - unsigned long pad1[4]; /* space in PCB "just in case" */ + unsigned long pad1[4]; /* space in PCB "just in case" */ } ppc_exception_state_t; +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct ppc_exception_state64 { + unsigned long long dar; /* Fault registers for coredump */ + unsigned long dsisr; + unsigned long exception; /* number of powerpc exception taken */ + + unsigned long pad1[4]; /* space in PCB "just in case" */ +} ppc_exception_state64_t; +#pragma pack() + /* * Save State Flags */ @@ -164,9 +224,15 @@ typedef struct ppc_exception_state { #define PPC_THREAD_STATE_COUNT \ (sizeof(struct ppc_thread_state) / sizeof(int)) +#define PPC_THREAD_STATE64_COUNT \ + (sizeof(struct ppc_thread_state64) / sizeof(int)) + #define PPC_EXCEPTION_STATE_COUNT \ (sizeof(struct ppc_exception_state) / sizeof(int)) +#define PPC_EXCEPTION_STATE64_COUNT \ + (sizeof(struct ppc_exception_state64) / sizeof(int)) + #define PPC_FLOAT_STATE_COUNT \ (sizeof(struct ppc_float_state) / sizeof(int)) @@ -183,6 +249,6 @@ typedef struct ppc_exception_state { /* * Largest state on this machine: */ -#define THREAD_MACHINE_STATE_MAX PPC_VECTOR_STATE_COUNT +#define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX #endif /* _MACH_PPC_THREAD_STATUS_H_ */ diff --git a/osfmk/mach/ppc/vm_param.h b/osfmk/mach/ppc/vm_param.h index e01151582..bc2697608 100644 --- a/osfmk/mach/ppc/vm_param.h +++ b/osfmk/mach/ppc/vm_param.h @@ -1,3 +1,4 @@ + /* * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * @@ -33,21 +34,17 @@ #define PPC_PGBYTES 4096 /* bytes per ppc page */ #define PPC_PGSHIFT 12 /* number of bits to shift for pages */ +#define VM_MAX_PAGE_ADDRESS 0xFFFFFFFFFFFFF000ULL + #define VM_MIN_ADDRESS ((vm_offset_t) 0) -#define VM_MAX_ADDRESS ((vm_offset_t) 0xfffff000U) +#define VM_MAX_ADDRESS ((vm_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF)) #define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0x00001000) -/* We map the kernel using only SR0,SR1,SR2,SR3 leaving segments alone */ -#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0x3fffffff) +#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0xDFFFFFFF) #define USER_STACK_END ((vm_offset_t) 0xffff0000U) -#define ppc_round_page(x) ((((unsigned)(x)) + PPC_PGBYTES - 1) & \ - ~(PPC_PGBYTES-1)) -#define ppc_trunc_page(x) (((unsigned)(x)) & ~(PPC_PGBYTES-1)) - - #define KERNEL_STACK_SIZE (4 * PPC_PGBYTES) #define INTSTACK_SIZE (5 * PPC_PGBYTES) diff --git a/osfmk/mach/ppc/vm_types.h b/osfmk/mach/ppc/vm_types.h index f4aa6101f..524003489 100644 --- a/osfmk/mach/ppc/vm_types.h +++ b/osfmk/mach/ppc/vm_types.h @@ -58,7 +58,7 @@ * Author: Avadis Tevanian, Jr. * Date: 1985 * - * Header file for VM data types. I386 version. + * Header file for VM data types. PPC version. */ #ifndef _MACH_PPC_VM_TYPES_H_ @@ -101,11 +101,6 @@ typedef natural_t vm_offset_t; */ typedef natural_t vm_size_t; -/* - * space_t is used in the pmap system - */ -typedef unsigned int space_t; - #endif /* ndef ASSEMBLER */ /* diff --git a/osfmk/mach/processor.defs b/osfmk/mach/processor.defs index 50c7c4698..8237f627a 100644 --- a/osfmk/mach/processor.defs +++ b/osfmk/mach/processor.defs @@ -62,7 +62,7 @@ subsystem #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ processor 3000; #include diff --git a/osfmk/mach/processor_set.defs b/osfmk/mach/processor_set.defs index 327755703..e3ad35314 100644 --- a/osfmk/mach/processor_set.defs +++ b/osfmk/mach/processor_set.defs @@ -62,7 +62,7 @@ subsystem #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ processor_set 4000; #include diff --git a/osfmk/mach/std_types.defs b/osfmk/mach/std_types.defs index 7b1cf5301..06a62dd43 100644 --- a/osfmk/mach/std_types.defs +++ b/osfmk/mach/std_types.defs @@ -75,10 +75,10 @@ type uint64_t = MACH_MSG_TYPE_INTEGER_64; * Legacy fixed-length Mach types which should * be replaced with the Standard types from above. */ -type int32 = MACH_MSG_TYPE_INTEGER_32; -type unsigned32 = MACH_MSG_TYPE_INTEGER_32; -type int64 = MACH_MSG_TYPE_INTEGER_64; -type unsigned64 = MACH_MSG_TYPE_INTEGER_64; +type int32 = int32_t; +type unsigned32 = uint32_t; +type int64 = int64_t; +type unsigned64 = uint64_t; /* * Other fixed length Mach types. @@ -142,4 +142,4 @@ type mach_port_poly_t = polymorphic import ; import ; -#endif _MACH_STD_TYPES_DEFS_ +#endif /* _MACH_STD_TYPES_DEFS_ */ diff --git a/osfmk/mach/syscall_sw.h b/osfmk/mach/syscall_sw.h index 1bbf2c75d..00784d2b9 100644 --- a/osfmk/mach/syscall_sw.h +++ b/osfmk/mach/syscall_sw.h @@ -92,6 +92,8 @@ kernel_trap(pid_for_task,-46,2) kernel_trap(macx_swapon,-48, 4) kernel_trap(macx_swapoff,-49, 2) kernel_trap(macx_triggers,-51, 4) +kernel_trap(macx_backing_store_suspend,-52, 1) +kernel_trap(macx_backing_store_recovery,-53, 1) kernel_trap(swtch_pri,-59,1) kernel_trap(swtch,-60,0) diff --git a/osfmk/mach/task.defs b/osfmk/mach/task.defs index b3d410599..efa28ba13 100644 --- a/osfmk/mach/task.defs +++ b/osfmk/mach/task.defs @@ -62,7 +62,7 @@ subsystem #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ task 3400; #include diff --git a/osfmk/mach/task_info.h b/osfmk/mach/task_info.h index f6dc3b1a1..df7da10a3 100644 --- a/osfmk/mach/task_info.h +++ b/osfmk/mach/task_info.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -148,4 +148,8 @@ typedef struct task_thread_times_info *task_thread_times_info_t; #define TASK_SECURITY_TOKEN_COUNT \ (sizeof(security_token_t) / sizeof(natural_t)) +#define TASK_AUDIT_TOKEN 15 +#define TASK_AUDIT_TOKEN_COUNT \ + (sizeof(audit_token_t) / sizeof(natural_t)) + #endif /* TASK_INFO_H_ */ diff --git a/osfmk/mach/thread_act.defs b/osfmk/mach/thread_act.defs index 530dd2263..d3aa0ab54 100644 --- a/osfmk/mach/thread_act.defs +++ b/osfmk/mach/thread_act.defs @@ -62,7 +62,7 @@ subsystem #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ thread_act 3600; #include diff --git a/osfmk/mach/upl.defs b/osfmk/mach/upl.defs index b10e5a2a1..513e07de3 100644 --- a/osfmk/mach/upl.defs +++ b/osfmk/mach/upl.defs @@ -62,10 +62,10 @@ subsystem #if KERNEL_USER KernelUser -#endif KERNEL_USER +#endif /* KERNEL_USER */ #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ upl 2050; #ifdef MACH_KERNEL diff --git a/osfmk/mach/vm_map.defs b/osfmk/mach/vm_map.defs index aeb2fe50a..29c9f4ba0 100644 --- a/osfmk/mach/vm_map.defs +++ b/osfmk/mach/vm_map.defs @@ -62,7 +62,7 @@ subsystem #if KERNEL_SERVER KernelServer -#endif KERNEL_SERVER +#endif /* KERNEL_SERVER */ vm_map 3800; #include diff --git a/osfmk/mach/vm_param.h b/osfmk/mach/vm_param.h index e4511b2a9..82d476449 100644 --- a/osfmk/mach/vm_param.h +++ b/osfmk/mach/vm_param.h @@ -83,7 +83,9 @@ #include +#ifndef ASSEMBLER #include +#endif /* ASSEMBLER */ /* * The machine independent pages are refered to as PAGES. A page @@ -102,11 +104,19 @@ * virtual memory system implementation. */ -#ifndef PAGE_SIZE_FIXED +#ifdef PAGE_SIZE_FIXED +#define PAGE_SIZE 4096 +#define PAGE_SHIFT 12 +#define PAGE_MASK (PAGE_SIZE-1) +#endif /* PAGE_SIZE_FIXED */ + +#ifndef ASSEMBLER + extern vm_size_t page_size; extern vm_size_t page_mask; extern int page_shift; +#ifndef PAGE_SIZE_FIXED #define PAGE_SIZE page_size /* pagesize in addr units */ #define PAGE_SHIFT page_shift /* number of bits to shift for pages */ #define PAGE_MASK page_mask /* mask for off in page */ @@ -114,15 +124,11 @@ extern int page_shift; #define PAGE_SIZE_64 (unsigned long long)page_size /* pagesize in addr units */ #define PAGE_MASK_64 (unsigned long long)page_mask /* mask for off in page */ #else /* PAGE_SIZE_FIXED */ -#define PAGE_SIZE 4096 -#define PAGE_SHIFT 12 -#define PAGE_MASK (PAGE_SIZE-1) + #define PAGE_SIZE_64 (unsigned long long)4096 #define PAGE_MASK_64 (PAGE_SIZE_64-1) #endif /* PAGE_SIZE_FIXED */ -#ifndef ASSEMBLER - /* * Convert addresses to pages and vice versa. No rounding is used. * The atop_32 and ptoa_32 macros should not be use on 64 bit types. @@ -243,8 +249,24 @@ extern int page_shift; #define page_aligned(x) ((((vm_object_offset_t) (x)) & PAGE_MASK) == 0) -extern vm_size_t mem_size; /* size of physical memory (bytes) */ +extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ +extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ +extern uint64_t sane_size; /* Memory size to use for defaults calculations */ +extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ + +/* We need a way to get rid of compiler warnings when we cast from */ +/* a 64 bit value to an address that is 32 bits. */ +/* We know at this point the cast is harmless but sometime in */ +/* the future it may not be. */ +/* When size of an int is no longer equal to size of uintptr_t then */ +/* the compile will fail and we know we need to fix our cast. */ +#include +#ifndef __CAST_DOWN_CHECK +#define __CAST_DOWN_CHECK +typedef char __NEED_TO_CHANGE_CAST_DOWN[ sizeof(uintptr_t) == sizeof(int) ? 0 : -1 ]; +#define CAST_DOWN( type, addr ) ( ((type)((uintptr_t) (addr))) ) +#endif /* __CAST_DOWN_CHECK */ #endif /* ASSEMBLER */ #endif /* _MACH_VM_PARAM_H_ */ diff --git a/osfmk/mach/vm_statistics.h b/osfmk/mach/vm_statistics.h index 754aec336..6f8d83de0 100644 --- a/osfmk/mach/vm_statistics.h +++ b/osfmk/mach/vm_statistics.h @@ -120,6 +120,8 @@ typedef struct pmap_statistics *pmap_statistics_t; #define VM_MEMORY_MALLOC_LARGE 3 #define VM_MEMORY_MALLOC_HUGE 4 #define VM_MEMORY_SBRK 5// uninteresting -- no one should call +#define VM_MEMORY_REALLOC 6 +#define VM_MEMORY_MALLOC_TINY 7 #define VM_MEMORY_ANALYSIS_TOOL 10 diff --git a/osfmk/mach/vm_types.h b/osfmk/mach/vm_types.h index 7bf53e479..cdfa02cd3 100644 --- a/osfmk/mach/vm_types.h +++ b/osfmk/mach/vm_types.h @@ -39,6 +39,34 @@ typedef vm_offset_t pointer_t; typedef vm_offset_t vm_address_t; typedef uint64_t vm_object_offset_t; +/* + * We use addr64_t for 64-bit addresses that are used on both + * 32 and 64-bit machines. On PPC, they are passed and returned as + * two adjacent 32-bit GPRs. We use addr64_t in places where + * common code must be useable both on 32 and 64-bit machines. + */ +typedef uint64_t addr64_t; /* Basic effective address */ + +/* + * We use reg64_t for addresses that are 32 bits on a 32-bit + * machine, and 64 bits on a 64-bit machine, but are always + * passed and returned in a single GPR on PPC. This type + * cannot be used in generic 32-bit c, since on a 64-bit + * machine the upper half of the register will be ignored + * by the c compiler in 32-bit mode. In c, we can only use the + * type in prototypes of functions that are written in and called + * from assembly language. This type is basically a comment. + */ +typedef uint32_t reg64_t; + +/* + * To minimize the use of 64-bit fields, we keep some physical + * addresses (that are page aligned) as 32-bit page numbers. + * This limits the physical address space to 16TB of RAM. + */ +typedef uint32_t ppnum_t; /* Physical page number */ + + #ifdef KERNEL_PRIVATE #if !defined(__APPLE_API_PRIVATE) || !defined(MACH_KERNEL_PRIVATE) diff --git a/osfmk/man/host_security_create_task_token.html b/osfmk/man/host_security_create_task_token.html new file mode 100755 index 000000000..9fdd8a2b2 --- /dev/null +++ b/osfmk/man/host_security_create_task_token.html @@ -0,0 +1 @@ +

host_security_create_task_token


Function - Create a new task with an explicit security token.

SYNOPSIS

kern_return_t   host_security_create_task_token
                (host_security_t                  host_security,
                 task_t                             parent_task,
                 security_token_t                security_token,
                 audit_token_t                      audit_token,
                 ledger_port_array_t                    ledgers,
                 boolean_t                       inherit_memory,
                 task_t                             child_task);

PARAMETERS

host_security
[in security send right] The host's security port.

parent_task
[in task send right] The port for the task from which to draw the child task's port rights and address space.

security_token
[in scalar] The task's security token.

audit_token
[in scalar] The task's audit token.

ledgers
[pointer to in array of ledger send rights] The set of ledgers from which the task will draw its resources.

inherit_memory
[in scalar] Address space inheritance indicator. If true, the child task in- herits the address space of the parent task. If false, the kernel assigns the child task an empty address space.

child_task
[out task send right] The kernel-assigned port name for the new task.

DESCRIPTION

The host_security_create_task_token function creates a new task from parent_task with explicit security and audit token values, returning the name of the new task in the parameter specified by child_task. Other than the security and audit token values, the child task is as if created by task_create.

NOTES

The host security port is a privileged port given to the system bootstrap task for the use of this call.

RETURN VALUES

KERN_INVALID_SECURITY
The value of host_security does not specify the security port for the host on which task lies.

RELATED INFORMATION

Functions: task_create, host_security_set_task_token, mach_msg. \ No newline at end of file diff --git a/osfmk/man/host_security_set_task_token.html b/osfmk/man/host_security_set_task_token.html new file mode 100755 index 000000000..052694e55 --- /dev/null +++ b/osfmk/man/host_security_set_task_token.html @@ -0,0 +1 @@ +

host_security_set_task_token


Function - Change the target task's security token.

SYNOPSIS

kern_return_t   host_security_set_task_token
                (host_security_t                  host_security,
                 task_t                                    task,
                 security_token_t                security_token,
                 audit_token_t                      audit_token,
                 host_t                                    host);

PARAMETERS

host_security
[in security send right] The host's security port.

task
[in task send right] The port for the task for which the token is to be set.

security_token
[in scalar] The new security token.

audit_token
[in scalar] The new audit token.

host
[in host send right] The task's new host-self port.

DESCRIPTION

The host_security_set_task_token function changes the specified task's security and audit tokens; the new tokens will be included in all subsequent messages sent from the task. The initial value of a task's security and audit tokens is that of its parent.

NOTES

The host security port is a privileged port given to the system bootstrap task for the use of this call.

RETURN VALUES

KERN_INVALID_SECURITY
The value of host_security does not specify the security port for the host on which task lies.

RELATED INFORMATION

Functions: task_create, task_info, mach_msg. \ No newline at end of file diff --git a/osfmk/man/index.html b/osfmk/man/index.html index 86568bf81..1df1cd2cd 100755 --- a/osfmk/man/index.html +++ b/osfmk/man/index.html @@ -1 +1 @@ - Mach Kernel Interface Reference Manual

Mach IPC Interface

Mach IPC presents itself in a few forms: message queues, lock-sets, and semaphores (more may be added in the future).  All share one common charateristic: the capabilities presented by each are represented through a handle known as a Mach port.  Specific rights represented in these Mach port capability handles allow the underlying IPC object to be used and manipulated in consistent ways.

Mach Message Queue Interface

mach_msg - Send and/or receive a message from the target port.
mach_msg_overwrite - Send and/or receive messages with possible overwrite.

Mach Message Queue Data Structures

mach_msg_descriptor - Specifies an element of a complex IPC message.
mach_msg_header - Specifies the content of an IPC message header.

Mach Lock-Set Interface

lock_acquire - Acquire ownership a lock
lock_handoff - Hand-off ownership of a lock.
lock_handoff_accept - Accept lock ownership from a handoff.
lock_make_stable - Stabilize the state of the specified lock.
lock_release - Release ownership of a lock.
lock_set_create - Create a new lock set.
lock_set_destroy - Destroy a lock set and its associated locks.
lock_try - Attempt to acquire access rights to a lock.

Mach Semaphore Interface

semaphore_create - Create a new semaphore.
semaphore_destroy - Destroy a semaphore.
semaphore_signal - Increments the semaphore count.
semaphore_signal_all - Wake up all threads blocked on a semaphore.
semaphore_wait - Wait on the specified semaphore.

Mach Port Management Interface

mach_port_allocate - Create caller-specified type of port right.
mach_port_allocate_full - Create a port right with full Mach port semantics.
mach_port_allocate_name - Create a port right with the caller-specified name.
mach_port_allocate_qos - Allocate a port with specified "quality of service".
mach_port_allocate_subsystem - Create a port right associated with the caller-specified subsystem.
mach_port_deallocate - Decrement the target port right's user reference count.
mach_port_destroy - Deallocate all port rights associated with specified name.
mach_port_extract_right - Remove the specified right from the target task and return it to the caller.
mach_port_get_attributes - Return information about target port as specified by the caller.
mach_port_get_refs - Return the current count of user references on the target port right.
mach_port_get_set_status - Return the port right names contained in the target port set.
mach_port_insert_right - Insert the specified port right into the target task.
mach_port_mod_refs - Modify the specified port right's count of user references.
mach_port_move_member - Move the specified receive right into or out of the specified port set.
mach_port_names - Return information about a task's port name space.
mach_port_request_notification - Request notification of the specified port event type.
mach_port_set_attributes - Set the target port's attributes.
mach_port_set_mscount - Change the target port's make-send count.
mach_port_set_seqno - Change the current value of the target port's sequence number.
mach_port_type - Return the characteristics of the target port name.
mach_reply_port - Allocate a new port and insert corresponding receive right in the calling task.
mach_subsystem_create - Used by a server to register information about an RPC subsystem with the kernel.

Mach Port Data Structures

mach_port_limits - Specifies a port's resource and message queue limits.
mach_port_qos - Specifies a port's attributes with respect to "Quality Of Service."
mach_port_status - Used to present a port's current status with respect to various important attributes.

Mach Port Notification Callbacks

do_mach_notify_dead_name - Handle the current instance of a dead-name notification.
do_mach_notify_no_senders - Handle the current instance of a no-more-senders notification.
do_mach_notify_port_deleted - Handle the current instance of a port-deleted notification.
do_mach_notify_port_destroyed - Handle the current instance of a port-destroyed notification.
do_mach_notify_send_once - Handle the current instance of a send-once notification.

Mach Port Notification Callback Server Helpers

notify_server - Detect and handle a kernel-generated IPC notification.

Mach Virtual Memory Interface

Mach Virtual Memory Address Space Manipulation Interface

host_page_size - Provide the system's virtual page size.
vm_allocate - Allocate a region of virtual memory.
vm_behavior_set - Specify expected access patterns for the target VM region.
vm_copy - Copy a region of virtual memory.
vm_deallocate - Deallocate a region of virtual memory.
vm_inherit - Set a VM region's inheritance attribute.
vm_machine_attribute - Get/set the target memory region's special attributes.
vm_map - Map the specified memory object to a region of virtual memory.
vm_msync - Synchronize the specified region of virtual memory.
vm_protect - Set access privilege attribute for a region of virtual memory.
vm_read - Read the specified range of target task's address space.
vm_region - Return description of a virtual memory region.
vm_remap - Map memory objects in one address space to that of another's.
vm_wire - Modify the target region's paging characteristics.
vm_write - Write data to the specified address in the target address space.

Data Structures

vm_region_basic_info - Defines the attributes of a task's memory region.
vm_statistics - Defines statistics for the kernel's use of virtual memory.

External Memory Management Interface

The External Memory Management Interface (EMMI) is undergoing significant change in the Darwin system. For this reason, the interface is not currently available to user-level programs. Even for kernel extensions, use of these interfaces in not supported. Instead, the BSD filesystem's Universal Buffer Cache (UBC) mechanism should be used.

memory_object_change_attributes - Modify subset of memory object attributes.
memory_object_destroy - Shut down a memory object.
memory_object_get_attributes - Return current attributes for a memory object.
memory_object_lock_request - Restrict access to memory object data.
memory_object_synchronize_completed - Synchronized data has been processed.

Data Structures

memory_object_attr_info - Defines memory object attributes.
memory_object_perf_info- Specifies performance-related memory object attributes.

External Memory Manager Interface Callbacks

memory_object_create - Assign a new memory object to the default memory manager.
memory_object_data_initialize - Provide initial data for a new memory object.
memory_object_data_request - Request that memory manager page-in specified data.
memory_object_data_return - Return memory object data to the appropriate memory manager.
memory_object_data_unlock - Request a memory manager release the lock on specific data.
memory_object_init - Inform a memory manager on first use of a memory object.
memory_object_synchronize - Request synchronization of data with backing store.
memory_object_terminate - Relinquish access to a memory object.

EMMI Callback Server Helpers

memory_object_default_server - Handle kernel operation request targeted for the default pager.
memory_object_server - Handle kernel operation request aimed at a given memory manager.

Default Memory Management Interface

default_pager_add_segment - Add additional backing storage for a default pager.
default_pager_backing_store_create - Create a backing storage object.
default_pager_backing_store_delete - Delete a backing storage object.
default_pager_backing_store_info - Return information about a backing storage object.
default_pager_info - Furnish caller with information about the default pager.
default_pager_object_create - Initialize a non-persistent memory object.
host_default_memory_manager - Register/Lookup the host's default pager.

Process Management Interface

Task Interface

mach_ports_lookup - Provide caller with an array of the target task's well-known ports.
mach_ports_register - Register an array of well-known ports on behalf of the target task.
mach_task_self - Return a send right to the caller's task_self port.
task_create - Create a new task.
task_get_emulation_vector - Return an array identifying the target task's user-level system call handlers.
task_get_exception_ports - Return send rights to the target task's exception ports.
task_get_special_port - Return a send write to the indicated special port.
task_info - Return per-task information according to specified flavor.
task_resume - Decrement the target task's suspend count.
task_sample - Sample the target task's thread program counters periodically.
task_set_emulation - Establish a user-level handler for a system call.
task_set_emulation_vector - Establish the target task's user-level system call handlers.
task_set_exception_ports - Set target task's exception ports.
task_set_info - Set task-specific information state.
task_set_port_space - Set the size of the target task's port name space table.
task_set_special_port - Set the indicated special port.
task_suspend - Suspend the target task.
task_swap_exception_ports - Set target task's exception ports, returning the previous exception ports.
task_terminate - Terminate the target task and deallocate its resources.
task_threads - Return the target task's list of threads.

Task Data Structures

task_basic_info - Defines basic information for a task.
task_thread_times_info - Defines thread execution times information for tasks.

Thread Interface

mach_thread_self - Returns the thread self port.
thread_abort - Abort a thread.
thread_abort_safely - Abort a thread, restartably.
thread_create - Create a thread within a task.
thread_create_running - Optimized creation of a running thread.
thread_depress_abort - Cancel thread scheduling depression.
thread_get_exception_ports - Return a send right to an exception port.
thread_get_special_port - Return a send right to the caller-specified special port.
thread_get_state - Return the execution state for a thread.
thread_info - Return information about a thread.
thread_resume - Resume a thread.
thread_sample - Perform periodic PC sampling for a thread.
thread_set_exception_ports - Set exception ports for a thread.
thread_set_special_port - Set caller-specified special port belonging to the target thread.
thread_set_state - Set the target thread's user-mode execution state.
thread_suspend - Suspend a thread.
thread_swap_exception_ports - Swap exception ports for a thread.
thread_terminate - Destroy a thread.
thread_wire - Mark the thread as privileged with respect to kernel resources.

Thread Data Structures

thread_basic_info - Defines basic information for a thread.

Thread Exception Callbacks

catch_exception_raise - Handles the occurrence of an exception within a thread.

Thread Exception Callback Server Helpers

exc_server - Handle kernel-reported thread exception.

Scheduling Interface

task_policy - Set target task's default scheduling policy state.
task_set_policy - Set target task's default scheduling policy state.
thread_policy - Set target thread's scheduling policy state.
thread_set_policy - Set target thread's scheduling policy state.
thread_switch - Cause context switch with options.

Scheduling Data Structures

policy_fifo_info - Specifies information associated with the system's First-In-First-Out scheduling policy.
policy_rr_info - Specifies information associated with the system's Round Robin scheduling policy.
policy_timeshare_info - Specifies information associated with the system's Timeshare scheduling policy.

System Management Interface

Host Interface

host_get_clock_service - Return a send right to a kernel clock's service port.
host_get_time - Returns the current time as seen by that host.
host_info - Return information about a host.
host_kernel_version - Return kernel version information for a host.
host_statistics - Return statistics for a host.
mach_host_self - Returns send rights to the task's host self port.

Data Structures

host_basic_info - Used to present basic information about a host.
host_load_info - Used to present a host's processor load information.
host_sched_info - - Used to present the set of scheduler limits associated with the host.
kernel_resource_sizes - Used to present the sizes of kernel's major structures.

Host Control Interface

host_adjust_time - Arranges for the time on a specified host to be gradually changed by an adjustment value.
host_default_memory_manager - Set the default memory manager.
host_get_boot_info - Return operator boot information.
host_get_clock_control - Return a send right to a kernel clock's control port.
host_processor_slots - Return a list of numbers that map processor slots to active processors.
host_processors - Return a list of send rights representing all processor ports.
host_reboot - Reboot this host.
host_set_time - Establishes the time on the specified host.

Host Security Interface

task_create_security_token - Create a new task with an explicit security token.
task_set_security_token - Change the target task's security token.

Resource Accounting Interface

The Mach resource accounting mechanism is not functional in the current Mac OS X/Darwin system. It will become functional in a future release.

ledger_create - Create a subordinate ledger.
ledger_read - Return the ledger limit and balance.
ledger_terminate - Destroy a ledger.
ledger_transfer - Transfer resources from a parent ledger to a child.

Processor Management Interface

processor_control - Perform caller-specified operation on target processor.
processor_exit - Exit a processor.
processor_info - Return information about a processor.
processor_start - Start a processor.

Processor Data Structures

processor_basic_info - Defines the basic information about a processor.

Processor Set Interface

The processor set interface allows for the grouping of tasks and processors for the purpose of exclusive scheduling. These interface are deprecated and should not be used in code that isn't tied to a particular release of Mac OS X/Darwin. These will likely change or disappear in a future release.

host_processor_sets - Return a list of send rights representing all processor set name ports.
host_processor_set_priv - Translate a processor set name port into a processor set control port.
processor_assign - Assign a processor to a processor set.
processor_get_assignment - Get current assignment for a processor.
processor_set_create - Create a new processor set.
processor_set_default - Return the default processor set.
processor_set_destroy - Destroy the target processor set.
processor_set_info - Return processor set state according to caller-specified flavor.
processor_set_max_priority - Sets the maximum scheduling priority for a processor set.
processor_set_policy_control - Set target processor set's scheduling policy state.
processor_set_policy_disable - Enables a scheduling policy for a processor set.
processor_set_policy_enable - Enables a scheduling policy for a processor set.
processor_set_statistics - Return scheduling statistics for a processor set.
processor_set_tasks - Return all tasks currently assigned to the target processor set.
processor_set_threads - Return all threads currently assigned to the target processor set.
task_assign - Assign a task to a processor set.
task_assign_default - Assign a task to the default processor set.
task_get_assignment - Create a new task with an explicit security token.
thread_assign - Assign a thread to a processor set.
thread_assign_default - Assign a thread to the default processor set.
thread_get_assignment - Return the processor set to which a thread is assigned.

Processor Set Data Structures

processor_set_basic_info - Defines the basic information about a processor set.
processor_set_load_info - Defines the scheduling statistics for a processor set.

Clock Interface

clock_alarm - Set up an alarm.
clock_get_attributes - Return attributes of a clock.
clock_get_time - Return the current time.
clock_map_time - Return a memory object that maps a clock.
clock_set_attributes - Set a particular clock's attributes.
clock_set_time - Set the current time.
clock_sleep - Delay the invoking thread until a specified time.

Clock Data Structures

mapped_tvalspec - Specifies the format the kernel uses to maintain a mapped clock's time.
tvalspec - Defines format of system time values.

Clock Interface Callbacks

clock_alarm_reply - Ring a preset alarm.

Clock Callback Server Helpers

clock_reply_server - Handle kernel-generated alarm.

Multi-Computer Support Interface

These multi-computer support interfaces are no longer supported by the Mac OS X/Darwin kernel. If and when multi-computer support is added back in, something like these will likely be added.

host_page_size - Returns the page size for the given host.
ledger_get_remote - Return send right to specified host's remote ledger port.
ledger_set_remote - Set this host's remote ledger port.
norma_get_special_port - Returns a send right for a specified node-specific special port.
norma_node_self - Return the node index of the current host.
norma_port_location_hint - Guess a port's current location.
norma_set_special_port - Set node-specific special port.
norma_task_clone - Create a remote task that shares access to parent task's memory.
norma_task_create - Create a remote task using task_create semantics.
norma_task_teleport - "Clone" a task on a specified node.

Machine Specific Interface

Intel 386 Support

i386_get_ldt - Returns per-thread segment descriptors from the local descriptor table (LDT).
i386_io_port_add - Adds a device to the I/O permission bitmap for a thread.
i386_io_port_list - Returns a list of the devices named in the thread's I/O permission bitmap.
i386_io_port_remove - Removes the specified device from the thread's I/O permission bitmap.
i386_set_ldt - Allows a thread to have a private local descriptor table (LDT).

PowerPC Support

\ No newline at end of file + Mach Kernel Interface Reference Manual

Mach IPC Interface

Mach IPC presents itself in a few forms: message queues, lock-sets, and semaphores (more may be added in the future).  All share one common charateristic: the capabilities presented by each are represented through a handle known as a Mach port.  Specific rights represented in these Mach port capability handles allow the underlying IPC object to be used and manipulated in consistent ways.

Mach Message Queue Interface

mach_msg - Send and/or receive a message from the target port.
mach_msg_overwrite - Send and/or receive messages with possible overwrite.

Mach Message Queue Data Structures

mach_msg_descriptor - Specifies an element of a complex IPC message.
mach_msg_header - Specifies the content of an IPC message header.

Mach Lock-Set Interface

lock_acquire - Acquire ownership a lock
lock_handoff - Hand-off ownership of a lock.
lock_handoff_accept - Accept lock ownership from a handoff.
lock_make_stable - Stabilize the state of the specified lock.
lock_release - Release ownership of a lock.
lock_set_create - Create a new lock set.
lock_set_destroy - Destroy a lock set and its associated locks.
lock_try - Attempt to acquire access rights to a lock.

Mach Semaphore Interface

semaphore_create - Create a new semaphore.
semaphore_destroy - Destroy a semaphore.
semaphore_signal - Increments the semaphore count.
semaphore_signal_all - Wake up all threads blocked on a semaphore.
semaphore_wait - Wait on the specified semaphore.

Mach Port Management Interface

mach_port_allocate - Create caller-specified type of port right.
mach_port_allocate_full - Create a port right with full Mach port semantics.
mach_port_allocate_name - Create a port right with the caller-specified name.
mach_port_allocate_qos - Allocate a port with specified "quality of service".
mach_port_allocate_subsystem - Create a port right associated with the caller-specified subsystem.
mach_port_deallocate - Decrement the target port right's user reference count.
mach_port_destroy - Deallocate all port rights associated with specified name.
mach_port_extract_right - Remove the specified right from the target task and return it to the caller.
mach_port_get_attributes - Return information about target port as specified by the caller.
mach_port_get_refs - Return the current count of user references on the target port right.
mach_port_get_set_status - Return the port right names contained in the target port set.
mach_port_insert_right - Insert the specified port right into the target task.
mach_port_mod_refs - Modify the specified port right's count of user references.
mach_port_move_member - Move the specified receive right into or out of the specified port set.
mach_port_names - Return information about a task's port name space.
mach_port_request_notification - Request notification of the specified port event type.
mach_port_set_attributes - Set the target port's attributes.
mach_port_set_mscount - Change the target port's make-send count.
mach_port_set_seqno - Change the current value of the target port's sequence number.
mach_port_type - Return the characteristics of the target port name.
mach_reply_port - Allocate a new port and insert corresponding receive right in the calling task.
mach_subsystem_create - Used by a server to register information about an RPC subsystem with the kernel.

Mach Port Data Structures

mach_port_limits - Specifies a port's resource and message queue limits.
mach_port_qos - Specifies a port's attributes with respect to "Quality Of Service."
mach_port_status - Used to present a port's current status with respect to various important attributes.

Mach Port Notification Callbacks

do_mach_notify_dead_name - Handle the current instance of a dead-name notification.
do_mach_notify_no_senders - Handle the current instance of a no-more-senders notification.
do_mach_notify_port_deleted - Handle the current instance of a port-deleted notification.
do_mach_notify_port_destroyed - Handle the current instance of a port-destroyed notification.
do_mach_notify_send_once - Handle the current instance of a send-once notification.

Mach Port Notification Callback Server Helpers

notify_server - Detect and handle a kernel-generated IPC notification.

Mach Virtual Memory Interface

Mach Virtual Memory Address Space Manipulation Interface

host_page_size - Provide the system's virtual page size.
vm_allocate - Allocate a region of virtual memory.
vm_behavior_set - Specify expected access patterns for the target VM region.
vm_copy - Copy a region of virtual memory.
vm_deallocate - Deallocate a region of virtual memory.
vm_inherit - Set a VM region's inheritance attribute.
vm_machine_attribute - Get/set the target memory region's special attributes.
vm_map - Map the specified memory object to a region of virtual memory.
vm_msync - Synchronize the specified region of virtual memory.
vm_protect - Set access privilege attribute for a region of virtual memory.
vm_read - Read the specified range of target task's address space.
vm_region - Return description of a virtual memory region.
vm_remap - Map memory objects in one address space to that of another's.
vm_wire - Modify the target region's paging characteristics.
vm_write - Write data to the specified address in the target address space.

Data Structures

vm_region_basic_info - Defines the attributes of a task's memory region.
vm_statistics - Defines statistics for the kernel's use of virtual memory.

External Memory Management Interface

The External Memory Management Interface (EMMI) is undergoing significant change in the Darwin system. For this reason, the interface is not currently available to user-level programs. Even for kernel extensions, use of these interfaces in not supported. Instead, the BSD filesystem's Universal Buffer Cache (UBC) mechanism should be used.

memory_object_change_attributes - Modify subset of memory object attributes.
memory_object_destroy - Shut down a memory object.
memory_object_get_attributes - Return current attributes for a memory object.
memory_object_lock_request - Restrict access to memory object data.
memory_object_synchronize_completed - Synchronized data has been processed.

Data Structures

memory_object_attr_info - Defines memory object attributes.
memory_object_perf_info- Specifies performance-related memory object attributes.

External Memory Manager Interface Callbacks

memory_object_create - Assign a new memory object to the default memory manager.
memory_object_data_initialize - Provide initial data for a new memory object.
memory_object_data_request - Request that memory manager page-in specified data.
memory_object_data_return - Return memory object data to the appropriate memory manager.
memory_object_data_unlock - Request a memory manager release the lock on specific data.
memory_object_init - Inform a memory manager on first use of a memory object.
memory_object_synchronize - Request synchronization of data with backing store.
memory_object_terminate - Relinquish access to a memory object.

EMMI Callback Server Helpers

memory_object_default_server - Handle kernel operation request targeted for the default pager.
memory_object_server - Handle kernel operation request aimed at a given memory manager.

Default Memory Management Interface

default_pager_add_segment - Add additional backing storage for a default pager.
default_pager_backing_store_create - Create a backing storage object.
default_pager_backing_store_delete - Delete a backing storage object.
default_pager_backing_store_info - Return information about a backing storage object.
default_pager_info - Furnish caller with information about the default pager.
default_pager_object_create - Initialize a non-persistent memory object.
host_default_memory_manager - Register/Lookup the host's default pager.

Process Management Interface

Task Interface

mach_ports_lookup - Provide caller with an array of the target task's well-known ports.
mach_ports_register - Register an array of well-known ports on behalf of the target task.
mach_task_self - Return a send right to the caller's task_self port.
task_create - Create a new task.
task_get_emulation_vector - Return an array identifying the target task's user-level system call handlers.
task_get_exception_ports - Return send rights to the target task's exception ports.
task_get_special_port - Return a send write to the indicated special port.
task_info - Return per-task information according to specified flavor.
task_resume - Decrement the target task's suspend count.
task_sample - Sample the target task's thread program counters periodically.
task_set_emulation - Establish a user-level handler for a system call.
task_set_emulation_vector - Establish the target task's user-level system call handlers.
task_set_exception_ports - Set target task's exception ports.
task_set_info - Set task-specific information state.
task_set_port_space - Set the size of the target task's port name space table.
task_set_special_port - Set the indicated special port.
task_suspend - Suspend the target task.
task_swap_exception_ports - Set target task's exception ports, returning the previous exception ports.
task_terminate - Terminate the target task and deallocate its resources.
task_threads - Return the target task's list of threads.

Task Data Structures

task_basic_info - Defines basic information for a task.
task_thread_times_info - Defines thread execution times information for tasks.

Thread Interface

mach_thread_self - Returns the thread self port.
thread_abort - Abort a thread.
thread_abort_safely - Abort a thread, restartably.
thread_create - Create a thread within a task.
thread_create_running - Optimized creation of a running thread.
thread_depress_abort - Cancel thread scheduling depression.
thread_get_exception_ports - Return a send right to an exception port.
thread_get_special_port - Return a send right to the caller-specified special port.
thread_get_state - Return the execution state for a thread.
thread_info - Return information about a thread.
thread_resume - Resume a thread.
thread_sample - Perform periodic PC sampling for a thread.
thread_set_exception_ports - Set exception ports for a thread.
thread_set_special_port - Set caller-specified special port belonging to the target thread.
thread_set_state - Set the target thread's user-mode execution state.
thread_suspend - Suspend a thread.
thread_swap_exception_ports - Swap exception ports for a thread.
thread_terminate - Destroy a thread.
thread_wire - Mark the thread as privileged with respect to kernel resources.

Thread Data Structures

thread_basic_info - Defines basic information for a thread.

Thread Exception Callbacks

catch_exception_raise - Handles the occurrence of an exception within a thread.

Thread Exception Callback Server Helpers

exc_server - Handle kernel-reported thread exception.

Scheduling Interface

task_policy - Set target task's default scheduling policy state.
task_set_policy - Set target task's default scheduling policy state.
thread_policy - Set target thread's scheduling policy state.
thread_set_policy - Set target thread's scheduling policy state.
thread_switch - Cause context switch with options.

Scheduling Data Structures

policy_fifo_info - Specifies information associated with the system's First-In-First-Out scheduling policy.
policy_rr_info - Specifies information associated with the system's Round Robin scheduling policy.
policy_timeshare_info - Specifies information associated with the system's Timeshare scheduling policy.

System Management Interface

Host Interface

host_get_clock_service - Return a send right to a kernel clock's service port.
host_get_time - Returns the current time as seen by that host.
host_info - Return information about a host.
host_kernel_version - Return kernel version information for a host.
host_statistics - Return statistics for a host.
mach_host_self - Returns send rights to the task's host self port.

Data Structures

host_basic_info - Used to present basic information about a host.
host_load_info - Used to present a host's processor load information.
host_sched_info - - Used to present the set of scheduler limits associated with the host.
kernel_resource_sizes - Used to present the sizes of kernel's major structures.

Host Control Interface

host_adjust_time - Arranges for the time on a specified host to be gradually changed by an adjustment value.
host_default_memory_manager - Set the default memory manager.
host_get_boot_info - Return operator boot information.
host_get_clock_control - Return a send right to a kernel clock's control port.
host_processor_slots - Return a list of numbers that map processor slots to active processors.
host_processors - Return a list of send rights representing all processor ports.
host_reboot - Reboot this host.
host_set_time - Establishes the time on the specified host.

Host Security Interface

host_security_create_task_token - Create a new task with an explicit security token.
host_security_set_task_token - Change the target task's security token.

Resource Accounting Interface

The Mach resource accounting mechanism is not functional in the current Mac OS X/Darwin system. It will become functional in a future release.

ledger_create - Create a subordinate ledger.
ledger_read - Return the ledger limit and balance.
ledger_terminate - Destroy a ledger.
ledger_transfer - Transfer resources from a parent ledger to a child.

Processor Management Interface

processor_control - Perform caller-specified operation on target processor.
processor_exit - Exit a processor.
processor_info - Return information about a processor.
processor_start - Start a processor.

Processor Data Structures

processor_basic_info - Defines the basic information about a processor.

Processor Set Interface

The processor set interface allows for the grouping of tasks and processors for the purpose of exclusive scheduling. These interface are deprecated and should not be used in code that isn't tied to a particular release of Mac OS X/Darwin. These will likely change or disappear in a future release.

host_processor_sets - Return a list of send rights representing all processor set name ports.
host_processor_set_priv - Translate a processor set name port into a processor set control port.
processor_assign - Assign a processor to a processor set.
processor_get_assignment - Get current assignment for a processor.
processor_set_create - Create a new processor set.
processor_set_default - Return the default processor set.
processor_set_destroy - Destroy the target processor set.
processor_set_info - Return processor set state according to caller-specified flavor.
processor_set_max_priority - Sets the maximum scheduling priority for a processor set.
processor_set_policy_control - Set target processor set's scheduling policy state.
processor_set_policy_disable - Enables a scheduling policy for a processor set.
processor_set_policy_enable - Enables a scheduling policy for a processor set.
processor_set_statistics - Return scheduling statistics for a processor set.
processor_set_tasks - Return all tasks currently assigned to the target processor set.
processor_set_threads - Return all threads currently assigned to the target processor set.
task_assign - Assign a task to a processor set.
task_assign_default - Assign a task to the default processor set.
task_get_assignment - Create a new task with an explicit security token.
thread_assign - Assign a thread to a processor set.
thread_assign_default - Assign a thread to the default processor set.
thread_get_assignment - Return the processor set to which a thread is assigned.

Processor Set Data Structures

processor_set_basic_info - Defines the basic information about a processor set.
processor_set_load_info - Defines the scheduling statistics for a processor set.

Clock Interface

clock_alarm - Set up an alarm.
clock_get_attributes - Return attributes of a clock.
clock_get_time - Return the current time.
clock_map_time - Return a memory object that maps a clock.
clock_set_attributes - Set a particular clock's attributes.
clock_set_time - Set the current time.
clock_sleep - Delay the invoking thread until a specified time.

Clock Data Structures

mapped_tvalspec - Specifies the format the kernel uses to maintain a mapped clock's time.
tvalspec - Defines format of system time values.

Clock Interface Callbacks

clock_alarm_reply - Ring a preset alarm.

Clock Callback Server Helpers

clock_reply_server - Handle kernel-generated alarm.

Multi-Computer Support Interface

These multi-computer support interfaces are no longer supported by the Mac OS X/Darwin kernel. If and when multi-computer support is added back in, something like these will likely be added.

host_page_size - Returns the page size for the given host.
ledger_get_remote - Return send right to specified host's remote ledger port.
ledger_set_remote - Set this host's remote ledger port.
norma_get_special_port - Returns a send right for a specified node-specific special port.
norma_node_self - Return the node index of the current host.
norma_port_location_hint - Guess a port's current location.
norma_set_special_port - Set node-specific special port.
norma_task_clone - Create a remote task that shares access to parent task's memory.
norma_task_create - Create a remote task using task_create semantics.
norma_task_teleport - "Clone" a task on a specified node.

Machine Specific Interface

Intel 386 Support

i386_get_ldt - Returns per-thread segment descriptors from the local descriptor table (LDT).
i386_io_port_add - Adds a device to the I/O permission bitmap for a thread.
i386_io_port_list - Returns a list of the devices named in the thread's I/O permission bitmap.
i386_io_port_remove - Removes the specified device from the thread's I/O permission bitmap.
i386_set_ldt - Allows a thread to have a private local descriptor table (LDT).

PowerPC Support

\ No newline at end of file diff --git a/osfmk/man/task_create.html b/osfmk/man/task_create.html index c2e5e7c78..0cdc648a4 100755 --- a/osfmk/man/task_create.html +++ b/osfmk/man/task_create.html @@ -1 +1 @@ -

task_create


Function - Create a new task.

SYNOPSIS

kern_return_t   task_create
                (task_t                             parent_task,
                 ledger_port_array_t                    ledgers,
                 int                               ledger_count,
                 boolean_t                       inherit_memory,
                 task_t                              child_task);

PARAMETERS

parent_task
[in task send right] The port for the task from which to draw the child task's port rights and address space.

ledgers
[pointer to in array of ledger send rights] Resource ledgers (on the destination host) from which the task will draw its resources. The first element of this array is the wired kernel ledger, the second the paged space ledger. If the number of ledgers supplied does not match the required number or one or more is null, the parent task's ledger is used.

ledger_count
[in scalar] The number of ledger ports in the ledgers array.

inherit_memory
[in scalar] Address space inheritance indicator. If true, the child task inherits the (inheritable) address space of the parent task. If false, the kernel assigns the child task an empty address space.

child_task
[out task send right] The kernel-assigned port for the new task.

DESCRIPTION

The task_create function creates a new task from parent_task and returns the name of the new task in child_task. The child task acquires shared or copied parts of the parent's address space (see vm_inherit). The child task initially contains no threads. The child task inherits the parent's security ID.

The child task receives the following "special" ports, which are created or copied for it at task creation:

[task-self send right]
The port by which the kernel knows the new child task and allows it to be manipulated. The child task holds a send right for this port. The port name is also returned to the calling task.

[bootstrap send right]
The port to which the child task can send a message requesting return of any system service ports that it needs (for example, a port to the Network Name Server or the Environment Manager). The child task inherits a send right for this port from the parent task. The task can use task_set_special_port to change this port.

[host-self send right]
The port by which the child task requests information about its host. The child task inherits a send right for this port from the parent task.

[ledger send rights]
The ports naming the ledgers from which the task draws its resources.

The child task also inherits the following ports:

[sample send right]
The port to which PC sampling messages are to be sent.

[exception send rights]
Ports to which exception messages are sent.

[registered send rights]
Ports to system services.

NOTES

The ledgers functionality mentioned above is not currently implemented.

RETURN VALUES

Only generic errors apply.

RELATED INFORMATION

Functions: task_create_security_token, task_resume, task_set_special_port, task_suspend, task_terminate, task_threads, thread_create, thread_resume, vm_inherit, task_sample, task_set_exception_ports, mach_ports_register, norma_task_create, task_set_security_token. \ No newline at end of file +

task_create


Function - Create a new task.

SYNOPSIS

kern_return_t   task_create
                (task_t                             parent_task,
                 ledger_port_array_t                    ledgers,
                 int                               ledger_count,
                 boolean_t                       inherit_memory,
                 task_t                              child_task);

PARAMETERS

parent_task
[in task send right] The port for the task from which to draw the child task's port rights and address space.

ledgers
[pointer to in array of ledger send rights] Resource ledgers (on the destination host) from which the task will draw its resources. The first element of this array is the wired kernel ledger, the second the paged space ledger. If the number of ledgers supplied does not match the required number or one or more is null, the parent task's ledger is used.

ledger_count
[in scalar] The number of ledger ports in the ledgers array.

inherit_memory
[in scalar] Address space inheritance indicator. If true, the child task inherits the (inheritable) address space of the parent task. If false, the kernel assigns the child task an empty address space.

child_task
[out task send right] The kernel-assigned port for the new task.

DESCRIPTION

The task_create function creates a new task from parent_task and returns the name of the new task in child_task. The child task acquires shared or copied parts of the parent's address space (see vm_inherit). The child task initially contains no threads. The child task inherits the parent's security ID.

The child task receives the following "special" ports, which are created or copied for it at task creation:

[task-self send right]
The port by which the kernel knows the new child task and allows it to be manipulated. The child task holds a send right for this port. The port name is also returned to the calling task.

[bootstrap send right]
The port to which the child task can send a message requesting return of any system service ports that it needs (for example, a port to the Network Name Server or the Environment Manager). The child task inherits a send right for this port from the parent task. The task can use task_set_special_port to change this port.

[host-self send right]
The port by which the child task requests information about its host. The child task inherits a send right for this port from the parent task.

[ledger send rights]
The ports naming the ledgers from which the task draws its resources.

The child task also inherits the following ports:

[sample send right]
The port to which PC sampling messages are to be sent.

[exception send rights]
Ports to which exception messages are sent.

[registered send rights]
Ports to system services.

NOTES

The ledgers functionality mentioned above is not currently implemented.

RETURN VALUES

Only generic errors apply.

RELATED INFORMATION

Functions: task_create_security_token, task_resume, task_set_special_port, task_suspend, task_terminate, task_threads, thread_create, thread_resume, vm_inherit, task_sample, task_set_exception_ports, mach_ports_register, norma_task_create, host_security_set_task_token. \ No newline at end of file diff --git a/osfmk/man/task_create_security_token.html b/osfmk/man/task_create_security_token.html deleted file mode 100755 index 00d489a88..000000000 --- a/osfmk/man/task_create_security_token.html +++ /dev/null @@ -1 +0,0 @@ -

task_create_security_token


Function - Create a new task with an explicit security token.

SYNOPSIS

kern_return_t   task_create_security_token
                (task_t                                    task,
                 security_port_t                  host_security,
                 security_token_t                security_token,
                 ledger_port_array_t                    ledgers,
                 boolean_t                       inherit_memory,
                 task_t                                  task_t);

PARAMETERS

parent_task
[in task send right] The port for the task from which to draw the child task's port rights and address space.

host_security
[in security send right] The host's security port.

security_token
[in scalar] The task's security token.

ledgers
[pointer to in array of ledger send rights] The set of ledgers from which the task will draw its resources.

inherit_memory
[in scalar] Address space inheritance indicator. If true, the child task in- herits the address space of the parent task. If false, the kernel assigns the child task an empty address space.

child_task
[out task send right] The kernel-assigned port name for the new task.

DESCRIPTION

The task_create_security_token function creates a new task from parent_task with an explicit security token, returning the name of the new task in the parameter specified by child_task. Other than the security token, the child task is as if created by task_create.

NOTES

The host security port is a privileged port given to the system bootstrap task for the use of this call.

RETURN VALUES

KERN_INVALID_SECURITY
The value of host_security does not specify the security port for the host on which task lies.

RELATED INFORMATION

Functions: task_create, task_set_security_token, mach_msg. \ No newline at end of file diff --git a/osfmk/man/task_info.html b/osfmk/man/task_info.html index dbf418fed..5fa772bc3 100755 --- a/osfmk/man/task_info.html +++ b/osfmk/man/task_info.html @@ -1 +1 @@ -

task_info


Function - Return per-task information according to specified flavor.

SYNOPSIS

kern_return_t   task_info
                (task_t                                    task,
                 task_flavor_t                           flavor,
                 task_info_t                          task_info,
                 mach_msg_type_number_t         task_info_count);

PARAMETERS

task
[in task send right] The port for the task for which the information is to be returned.

flavor
[in scalar] The type of information to be returned. Valid values are:

TASK_BASIC_INFO
Returns basic information about the task, such as the task's suspend count and number of resident pages. The structure returned is task_basic_info.

TASK_THREAD_TIMES_INFO
Returns system and user space run-times for live threads. The structure returned is task_thread_times_info.

TASK_SCHED_FIFO_INFO
Returns default FIFO scheduling policy attributes to be assigned to new threads. The structure returned is policy_fifo_base.

TASK_SCHED_RR_INFO
Returns default round-robin scheduling policy attributes to be assigned to new threads. The structure returned is policy_rr_base.

TASK_SCHED_TIMESHARE_INFO
Returns default timeshare scheduling policy attributes to be assigned to new threads. The structure returned is policy_timeshare_base.

TASK_SECURITY_TOKEN
Returns the security token for the task. The value returned is of type security_token_t.

TASK_USER_DATA
Returns user-specified information previously established via the task_set_info interface. The structure returned is task_user_data.

task_info
[out structure] Information about the specified task.

task_info_count
[in/out scalar] On input, the maximum size of the buffer; on output, the size returned (in natural-sized units).

DESCRIPTION

The task_info function returns an information structure of type flavor.

NOTES

At any given time, a task has one default scheduling policy assigned to it (as returned by TASK_BASIC_INFO). As such, only one of the scheduling flavors will return valid information.

RETURN VALUES

KERN_INVALID_POLICY
A request was made for the default scheduling policy attributes for the task but the requested policy is not the task's default policy.

RELATED INFORMATION

Functions: task_get_special_port, task_set_special_port, task_set_info, task_threads, thread_info, thread_get_state, thread_set_state.

Data Structures: task_basic_info, policy_timeshare_info, policy_fifo_info, policy_rr_info, task_thread_times_info. \ No newline at end of file +

task_info


Function - Return per-task information according to specified flavor.

SYNOPSIS

kern_return_t   task_info
                (task_t                                    task,
                 task_flavor_t                           flavor,
                 task_info_t                          task_info,
                 mach_msg_type_number_t         task_info_count);

PARAMETERS

task
[in task send right] The port for the task for which the information is to be returned.

flavor
[in scalar] The type of information to be returned. Valid values are:

TASK_BASIC_INFO
Returns basic information about the task, such as the task's suspend count and number of resident pages. The structure returned is task_basic_info.

TASK_THREAD_TIMES_INFO
Returns system and user space run-times for live threads. The structure returned is task_thread_times_info.

TASK_SCHED_FIFO_INFO
Returns default FIFO scheduling policy attributes to be assigned to new threads. The structure returned is policy_fifo_base.

TASK_SCHED_RR_INFO
Returns default round-robin scheduling policy attributes to be assigned to new threads. The structure returned is policy_rr_base.

TASK_SCHED_TIMESHARE_INFO
Returns default timeshare scheduling policy attributes to be assigned to new threads. The structure returned is policy_timeshare_base.

TASK_SECURITY_TOKEN
Returns the security token for the task. The value returned is of type security_token_t.

TASK_AUDIT_TOKEN
Returns the security token for the task. The value returned is of type audit_token_t.

TASK_USER_DATA
Returns user-specified information previously established via the task_set_info interface. The structure returned is task_user_data.

task_info
[out structure] Information about the specified task.

task_info_count
[in/out scalar] On input, the maximum size of the buffer; on output, the size returned (in natural-sized units).

DESCRIPTION

The task_info function returns an information structure of type flavor.

NOTES

At any given time, a task has one default scheduling policy assigned to it (as returned by TASK_BASIC_INFO). As such, only one of the scheduling flavors will return valid information.

RETURN VALUES

KERN_INVALID_POLICY
A request was made for the default scheduling policy attributes for the task but the requested policy is not the task's default policy.

RELATED INFORMATION

Functions: task_get_special_port, task_set_special_port, task_set_info, task_threads, thread_info, thread_get_state, thread_set_state.

Data Structures: task_basic_info, policy_timeshare_info, policy_fifo_info, policy_rr_info, task_thread_times_info. \ No newline at end of file diff --git a/osfmk/man/task_set_security_token.html b/osfmk/man/task_set_security_token.html deleted file mode 100755 index 1c9ace457..000000000 --- a/osfmk/man/task_set_security_token.html +++ /dev/null @@ -1 +0,0 @@ -

task_set_security_token


Function - Change the target task's security token.

SYNOPSIS

kern_return_t   task_set_security_token
                (task_t                                    task,
                 security_port_t                  host_security,
                 security_token_t                security_token);

PARAMETERS

task
[in task send right] The port for the task for which the token is to be set.

host_security
[in security send right] The host's security port.

security_token
[in scalar] The new security token.

DESCRIPTION

The task_set_security_token function changes the specified task's security token; the new token will be included in all subsequent messages sent from the task. The initial value of a task's security token is that of its parent.

NOTES

The host security port is a privileged port given to the system bootstrap task for the use of this call.

RETURN VALUES

KERN_INVALID_SECURITY
The value of host_security does not specify the security port for the host on which task lies.

RELATED INFORMATION

Functions: task_create, task_info, mach_msg. \ No newline at end of file diff --git a/osfmk/ppc/AltiAssist.s b/osfmk/ppc/AltiAssist.s index 5925372ae..540a232b9 100644 --- a/osfmk/ppc/AltiAssist.s +++ b/osfmk/ppc/AltiAssist.s @@ -39,8 +39,6 @@ #include #include -#define kernAccess 31 - ; ; ; General stuff what happens here: @@ -71,11 +69,7 @@ LEXT(AltivecAssist) - mfmsr r20 ; Get the current MSR li r10,emvr0 ; Point to the vector savearea - oris r20,r20,hi16(MASK(MSR_VEC)) ; Turn on vector - mtmsr r20 ; Turn on vector - isync li r11,emvr1 ; Another savearea stvxl v0,r10,r2 ; Save V0 diff --git a/osfmk/ppc/Diagnostics.c b/osfmk/ppc/Diagnostics.c index 43f57a908..cffb9067a 100644 --- a/osfmk/ppc/Diagnostics.c +++ b/osfmk/ppc/Diagnostics.c @@ -57,18 +57,17 @@ #include #include #include -#include #include #include #include #include -#include +#include #include extern struct vc_info vinfo; kern_return_t testPerfTrap(int trapno, struct savearea *ss, - unsigned int dsisr, unsigned int dar); + unsigned int dsisr, addr64_t dar); int diagCall(struct savearea *save) { @@ -78,8 +77,11 @@ int diagCall(struct savearea *save) { } ttt, adj; natural_t tbu, tbu2, tbl; struct per_proc_info *per_proc; /* Area for my per_proc address */ - int cpu; + int cpu, ret; unsigned int tstrt, tend, temp, temp2; + addr64_t src, snk; + uint64_t scom, hid1, hid4, srrwrk, stat; + scomcomm sarea; if(!(dgWork.dgFlags & enaDiagSCs)) return 0; /* If not enabled, cause an exception */ @@ -119,7 +121,7 @@ int diagCall(struct savearea *save) { */ case dgLRA: - save->save_r3 = pmap_extract(current_act()->map->pmap, save->save_r4); /* Get read address */ + save->save_r3 = pmap_find_phys(current_act()->map->pmap, save->save_r4); /* Get read address */ return -1; /* Return no AST checking... */ @@ -128,11 +130,58 @@ int diagCall(struct savearea *save) { */ case dgpcpy: -#if 0 - save->save_r3 = copyp2v(save->save_r4, save->save_r5, save->save_r6); /* Copy the physical page */ + +#if 1 + src = (save->save_r4 << 32) | (0x00000000FFFFFFFFULL & save->save_r5); /* Merge into 64-bit */ + snk = (save->save_r6 << 32) | (0x00000000FFFFFFFFULL & save->save_r7); /* Merge into 64-bit */ + save->save_r3 = copypv(src, snk, save->save_r8, save->save_r9); /* Copy the physical page */ #endif return 1; /* Return and check for ASTs... */ +/* + * Read/Write physical memory + */ + case dgprw: + + src = (save->save_r5 << 32) | (0x00000000FFFFFFFFULL & save->save_r6); /* Merge into 64-bit */ + + switch(save->save_r4) { /* Select the actual function */ + + case 0: + save->save_r3 = (uint64_t)ml_phys_read_byte((unsigned int)src); + break; + + case 1: + save->save_r3 = (uint64_t)ml_phys_read_byte_64(src); + break; + + case 2: + save->save_r3 = (uint64_t)ml_phys_read((unsigned int)src); + break; + + case 3: + save->save_r3 = (uint64_t)ml_phys_read_64(src); + break; + + case 4: + ml_phys_write_byte((unsigned int)src, (unsigned int)save->save_r7); + break; + + case 5: + ml_phys_write_byte_64(src, (unsigned int)save->save_r7); + break; + + case 6: + ml_phys_write((unsigned int)src, (unsigned int)save->save_r7); + break; + + case 7: + ml_phys_write_64(src, (unsigned int)save->save_r7); + break; + } + + return 1; /* Return and check for ASTs... */ + /* * Soft reset processor @@ -162,16 +211,7 @@ int diagCall(struct savearea *save) { */ case dgFlush: -#if 1 cacheInit(); /* Blow cache */ -#else - asm volatile(" mftb %0" : "=r" (tstrt)); - tend = tstrt; - while((tend - tstrt) < 0x000A2837) { - asm volatile(" mftb %0" : "=r" (tend)); - } - -#endif return 1; /* Return and check for ASTs... */ /* @@ -192,9 +232,9 @@ int diagCall(struct savearea *save) { * parms - vaddr, paddr, size, prot, attributes */ case dgBMphys: - - pmap_map_block(current_act()->map->pmap, save->save_r4, save->save_r5, save->save_r6, /* Map in the block */ - save->save_r7, save->save_r8, 0); + + pmap_map_block(current_act()->map->pmap, (addr64_t)save->save_r4, /* Map in the block */ + save->save_r5, save->save_r6, save->save_r7, save->save_r8, 0); return 1; /* Return and check for ASTs... */ @@ -213,28 +253,16 @@ int diagCall(struct savearea *save) { /* * Allows direct control of alignment handling. * - * The bottom two bits of the parameter are used to set the two control bits: - * 0b00 - !trapUnalignbit - !notifyUnalignbit - default - instruction is emulated - * 0b01 - !trapUnalignbit - notifyUnalignbit - emulation is done, but traps afterwards - * 0b10 - trapUnalignbit - !notifyUnalignbit - no emulation - causes exception - * 0b11 - trapUnalignbit - notifyUnalignbit - no emulation - causes exception + * The bottom bit of the parameter is used to set the control bit, enaNotifyEM. */ case dgAlign: - temp = current_act()->mact.specFlags; /* Save the old values */ - - temp = ((current_act()->mact.specFlags >> (31 - trapUnalignbit - 1)) /* Reformat them to pass back */ - | (current_act()->mact.specFlags >> (31 - notifyUnalignbit))) & 3; - - temp2 = ((save->save_r4 << (31 - trapUnalignbit - 1)) & trapUnalign) /* Move parms into flag format */ - | ((save->save_r4 << (31 - notifyUnalignbit)) & notifyUnalign); - - current_act()->mact.specFlags &= ~(trapUnalign | notifyUnalign); /* Clean the old ones */ - current_act()->mact.specFlags |= temp2; /* Set the new ones */ - - per_proc_info[cpu_number()].spcFlags = current_act()->mact.specFlags; + temp = dgWork.dgFlags; /* Save the old values */ - save->save_r3 = temp; + temp2 = (save->save_r4 & 1) << (31 - enaNotifyEMb); /* Move parms into flag format */ + dgWork.dgFlags = (temp & ~enaNotifyEM) | temp2; /* Set the flag */ + + save->save_r3 = (temp >> (31 - enaNotifyEMb)) & 1; /* Return the original */ return 1; /* Return and check for ASTs... */ @@ -243,14 +271,102 @@ int diagCall(struct savearea *save) { */ case dgBootScreen: -#if 0 ml_set_interrupts_enabled(1); - (void)copyout((char *)&vinfo, (char *)save->save_r4, sizeof(struct vc_info)); /* Copy out the video info */ + (void)copyout((char *)&vinfo, CAST_DOWN(char *, save->save_r4), sizeof(struct vc_info)); /* Copy out the video info */ + ml_set_interrupts_enabled(0); + return 1; /* Return and check for ASTs... */ + +/* + * Don't return info for boot screen + */ + case dgCPNull: + + ml_set_interrupts_enabled(1); + (void)copyout((char *)&vinfo, CAST_DOWN(char *, save->save_r4), 0); /* Copy out nothing */ ml_set_interrupts_enabled(0); -#endif return 1; /* Return and check for ASTs... */ +/* + * Test machine check handler - only on 64-bit machines + */ + case dgmck: + if(!(per_proc_info[0].pf.Available & pf64Bit)) return 0; /* Leave if not correct machine */ + + fwEmMck(save->save_r4, save->save_r5, save->save_r6, save->save_r7, save->save_r8, save->save_r9); /* Start injecting */ + + return -1; /* Return and don't check for ASTs... */ + +/* + * Set 64-bit on or off - only on 64-bit machines + */ + case dg64: + if(!(per_proc_info[0].pf.Available & pf64Bit)) return 0; /* Leave if not correct machine */ + + srrwrk = save->save_srr1 >> 63; /* Save the old 64-bit bit */ + + save->save_srr1 = (save->save_srr1 & 0x7FFFFFFFFFFFFFFFULL) | (save->save_r4 << 63); /* Set the requested mode */ + save->save_r3 = srrwrk; /* Return the old value */ + + return -1; /* Return and don't check for ASTs... */ + +/* + * Test the probe read function + */ + + case dgProbeRead: + + src = (save->save_r4 << 32) | (0x00000000FFFFFFFFULL & save->save_r5); /* Merge into 64-bit */ + save->save_r3 = ml_probe_read_64(src, &temp); /* Try the address */ + save->save_r4 = temp; /* Return the data */ + return -1; /* Regurn and don't check for ASTs */ + +/* + * Do perf monitor stuff + */ + + case dgPerfMon: + setPmon(save->save_r4, save->save_r5); /* Go load up MMCR0 and MMCR1 */ + return -1; /* Regurn and don't check for ASTs */ + +/* + * Map a page + * Don't bother to check for any errors. + * parms - vaddr, paddr, prot, attributes + */ + case dgMapPage: + + (void)mapping_map(current_act()->map->pmap, /* Map in the page */ + (addr64_t)(((save->save_r5 & 0xFFFFFFFF) << 32) | (save->save_r5 & 0xFFFFFFFF)), save->save_r6, 0, 1, VM_PROT_READ|VM_PROT_WRITE); + + return -1; /* Return and check for ASTs... */ + +/* + * SCOM interface + * parms - pointer to scomcomm + */ + case dgScom: + + ret = copyin((unsigned int)(save->save_r4), &sarea, sizeof(scomcomm)); /* Get the data */ + if(ret) return 0; /* Copyin failed - return an exception */ + + sarea.scomstat = 0xFFFFFFFFFFFFFFFFULL; /* Clear status */ + cpu = cpu_number(); /* Get us */ + + if((sarea.scomcpu < NCPUS) && machine_slot[sarea.scomcpu].running) { + if(sarea.scomcpu == cpu) fwSCOM(&sarea); /* Do it if it is us */ + else { /* Otherwise, tell the other processor */ + (void)cpu_signal(sarea.scomcpu, SIGPcpureq, CPRQscom ,(unsigned int)&sarea); /* Ask him to do this */ + (void)hw_cpu_sync((unsigned long)&sarea.scomstat, LockTimeOut); /* Wait for the other processor to get its temperature */ + } + } + + ret = copyout(&sarea, (unsigned int)(save->save_r4), sizeof(scomcomm)); /* Get the data */ + if(ret) return 0; /* Copyin failed - return an exception */ + + return -1; /* Return and check for ASTs... */ + + default: /* Handle invalid ones */ return 0; /* Return an exception */ @@ -259,7 +375,7 @@ int diagCall(struct savearea *save) { }; kern_return_t testPerfTrap(int trapno, struct savearea *ss, - unsigned int dsisr, unsigned int dar) { + unsigned int dsisr, addr64_t dar) { if(trapno != T_ALIGNMENT) return KERN_FAILURE; diff --git a/osfmk/ppc/Diagnostics.h b/osfmk/ppc/Diagnostics.h index df43630fe..c8f6853c0 100644 --- a/osfmk/ppc/Diagnostics.h +++ b/osfmk/ppc/Diagnostics.h @@ -40,6 +40,7 @@ #ifndef __ppc__ #error This file is only useful on PowerPC. #endif +#include int diagCall(struct savearea *save); @@ -55,6 +56,14 @@ int diagCall(struct savearea *save); #define dgBootScreen 7 #define dgFlush 8 #define dgAlign 9 +#define dgprw 10 +#define dgmck 11 +#define dg64 12 +#define dgProbeRead 13 +#define dgCPNull 14 +#define dgPerfMon 15 +#define dgMapPage 16 +#define dgScom 17 typedef struct diagWork { /* Diagnostic work area */ @@ -73,6 +82,10 @@ typedef struct diagWork { /* Diagnostic work area */ #define enaDiagSDMb 27 #define enaDiagEM 0x00000020 #define enaDiagEMb 26 +#define enaDiagTrap 0x00000040 +#define enaDiagTrapb 25 +#define enaNotifyEM 0x00000080 +#define enaNotifyEMb 24 /* Suppress lock checks */ #define disLkType 0x80000000 #define disLktypeb 0 @@ -92,7 +105,16 @@ typedef struct diagWork { /* Diagnostic work area */ } diagWork; +typedef struct scomcomm { + uint16_t scomcpu; /* CPU number */ + uint16_t scomfunc; /* 0 = read; 1 = write */ + uint32_t scomreg; /* SCOM register */ + uint64_t scomstat; /* returned status */ + uint64_t scomdata; /* input for write, output for read */ +} scomcomm; + extern diagWork dgWork; +extern int diagTrap(struct savearea *, unsigned int); #endif /* _DIAGNOSTICS_H_ */ diff --git a/osfmk/ppc/Emulate.s b/osfmk/ppc/Emulate.s index 7ce668c5f..f7037e6af 100644 --- a/osfmk/ppc/Emulate.s +++ b/osfmk/ppc/Emulate.s @@ -39,7 +39,6 @@ #include #include -#define kernAccess 31 #define traceInst 30 #define dssAllDone 29 @@ -67,13 +66,13 @@ LEXT(Emulate) + bf-- pf64Bitb,emn64 ; Skip if not 64-bit + b EXT(Emulate64) ; Jump to the 64-bit code... - mfsprg r31,0 ; Get the per_proc - lis r30,hi16(EXT(dgWork)) ; Get the high half of diagnostic work area - lwz r12,savesrr1(r13) ; Get the exception info - ori r30,r30,lo16(EXT(dgWork)) ; And the low half +emn64: mfsprg r31,0 ; Get the per_proc + lwz r12,savesrr1+4(r13) ; Get the exception info rlwinm. r0,r12,0,SRR1_PRG_ILL_INS_BIT,SRR1_PRG_ILL_INS_BIT ; Emulation candidate? - lwz r30,dgFlags(r30) ; Get the flags + lwz r30,dgFlags(0) ; Get the flags beq+ eExit ; Nope, do not try to emulate... rlwinm. r0,r30,0,enaDiagEMb,enaDiagEMb ; Do we want to try to emulate something? @@ -114,9 +113,9 @@ eNoVect: bl eIFetch ; Get the instruction image cror cr1_eq,cr1_eq,cr0_eq ; Remember bne cr1_eq,eNotIndex ; Go check non-index forms... - rlwinm. r21,r10,18,25,29 ; Extract index to rA to build EA - rlwinm r22,r10,23,25,29 ; Extract index to rB - addi r24,r13,saver0 ; Point to the start of registers + rlwinm. r21,r10,19,24,28 ; Extract index to rA to build EA + rlwinm r22,r10,24,24,28 ; Extract index to rB + addi r24,r13,saver0+4 ; Point to the start of registers li r19,0 ; Assume 0 base beq eZeroBase ; Yes... lwzx r19,r24,r21 ; Get the base register value @@ -131,15 +130,15 @@ eNotIndex: cmplwi r0,725 ; stswi? cror cr1_eq,cr1_eq,cr0_eq ; Remember bne cr1,eExit ; Not one we handle... - rlwinm. r21,r10,18,25,29 ; Extract index to rA to build EA - addi r24,r13,saver0 ; Point to the start of registers + rlwinm. r21,r10,19,24,28 ; Extract index to rA to build EA + addi r24,r13,saver0+4 ; Point to the start of registers li r22,0 ; Assume 0 base beq eFinishUp ; Yes, it is... lwzx r22,r24,r21 ; Get the base register value eFinishUp: stw r20,savedsisr(r13) ; Set the DSISR li r11,T_ALIGNMENT ; Get the exception code - stw r22,savedar(r13) ; Save the DAR + stw r22,savedar+4(r13) ; Save the DAR stw r11,saveexception(r13) ; Set the exception code b EXT(AlignAssist) ; Go emulate the handler... @@ -156,20 +155,15 @@ eExit: b EXT(EmulExit) ; Just return for now... .align 5 -eIFetch: lwz r23,savesrr1(r13) ; Get old MSR +eIFetch: lwz r23,savesrr1+4(r13) ; Get old MSR mflr r28 ; Save return rlwinm. r22,r23,0,MSR_PR_BIT,MSR_PR_BIT ; Within kernel? mfmsr r30 ; Save the MSR for now - lwz r23,savesrr0(r13) ; Get instruction address - crmove kernAccess,cr0_eq ; Remember if fault was in kernel - li r25,4 ; Set access length - or r22,r22,r30 ; Add PR to access MSR + lwz r23,savesrr0+4(r13) ; Get instruction address - bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to - - ori r22,r22,lo16(MASK(MSR_DR)|MASK(MSR_RI)) ; Set RI onto access MSR + ori r22,r30,lo16(MASK(MSR_DR)|MASK(MSR_RI)) ; Set RI and DR onto access MSR crset cr0_eq ; Set this to see if we failed mtmsr r22 ; Flip DR, RI, and maybe PR on @@ -177,15 +171,10 @@ eIFetch: lwz r23,savesrr1(r13) ; Get old MSR lwz r10,0(r23) ; Fetch the instruction - crmove 28,cr0_eq ; Remember if we failed - li r0,loadMSR ; Set the magic "get back to supervisor" SC - mr r3,r30 ; Get MSR to load - sc ; Get back to supervisor state - - bfl+ kernAccess,aaUnSetSegs ; Go set SRs if we are in user and need to + mtmsr r30 ; Trans and RI off + isync mtlr r28 ; Restore the LR - crmove cr0_eq,28 ; Set CR0_EQ if the fetch succeeded blr ; Return with instruction image in R10 @@ -194,14 +183,14 @@ eIFetch: lwz r23,savesrr1(r13) ; Get old MSR ; eRedriveAsISI: - lwz r6,savesrr1(r13) ; Get the srr1 value + lwz r6,savesrr1+4(r13) ; Get the srr1 value lwz r4,SAVflags(r13) ; Pick up the flags li r11,T_INSTRUCTION_ACCESS ; Set failing instruction fetch code rlwimi r6,r1,0,0,4 ; Move the DSISR bits to the SRR1 oris r4,r4,hi16(SAVredrive) ; Set the redrive bit stw r11,saveexception(r13) ; Set the replacement code stw r4,SAVflags(r13) ; Set redrive request - stw r6,savesrr1(r13) ; Set the srr1 value + stw r6,savesrr1+4(r13) ; Set the srr1 value b EXT(EmulExit) ; Bail out to handle ISI... @@ -228,55 +217,40 @@ eRedriveAsISI: .globl EXT(AlignAssist) LEXT(AlignAssist) - -#if 0 - b EXT(EmulExit) ; Just return for now... -#endif - - + bf-- pf64Bitb,aan64 ; Skip if not 64-bit + b EXT(AlignAssist64) ; Jump to the 64-bit code... + +aan64: lwz r20,savedsisr(r13) ; Get the DSISR mfsprg r31,0 ; Get the per_proc - lwz r20,savedsisr(r13) ; Get the DSISR + mtcrf 0x10,r20 ; Put instruction ID in CR for later lwz r21,spcFlags(r31) ; Grab the special flags - mtcrf 0x1C,r20 ; Put instruction ID in CR for later + mtcrf 0x08,r20 ; Put instruction ID in CR for later rlwinm. r0,r21,0,runningVMbit,runningVMbit ; Are we running a VM? - lwz r22,savesrr1(r13) ; Get the SRR1 + mtcrf 0x04,r20 ; Put instruction ID in CR for later + lwz r22,savesrr1+4(r13) ; Get the SRR1 bne- aaPassAlong ; We are in a VM, no emulation for alignment exceptions... - rlwinm. r0,r21,0,trapUnalignbit,trapUnalignbit ; Should we trap alignment exceptions? + lwz r19,dgFlags(0) ; Get the diagnostics flags crxor iFloat,iOptype1,iOptype2 ; Set this to 0 if both bits are either 0 or 1 mr r26,r20 ; Save the DSISR - bne- aaPassAlong ; No alignment exceptions allowed... rlwinm. r0,r22,0,MSR_SE_BIT,MSR_SE_BIT ; Were we single stepping? - lwz r23,savedar(r13) ; Pick up the address that we want to access + lwz r23,savedar+4(r13) ; Pick up the address that we want to access crnot traceInst,cr0_eq ; Remember if trace is on - rlwinm. r0,r21,0,notifyUnalignbit,notifyUnalignbit ; Should we notify that an alignment exception happened? - mfsprg r28,2 ; Get the processor features - crnot iNotify,cr0_eq ; Remember to tell someone we did this - rlwinm. r22,r22,0,MSR_PR_BIT,MSR_PR_BIT ; Did we take the exception in the kernel and isolate PR? + + rlwinm. r0,r19,0,enaNotifyEMb,enaNotifyEMb ; Should we notify that an alignment exception happened? mfmsr r30 ; Save the MSR for now + crnot iNotify,cr0_eq ; Remember to tell someone we did this li r29,emfp0 ; Point to work area crxor iFloat,iFloat,iOptype3 ; Set true if we have a floating point instruction - or r22,r22,r30 ; Add PR to access MSR dcbz r29,r31 ; Clear and allocate a cache line for us to work in - rlwinm r24,r20,2,25,29 ; Get displacement to register to update if update form + rlwinm r24,r20,3,24,28 ; Get displacement to register to update if update form rlwimi r20,r20,24,28,28 ; Move load/store indication to the bottom of index - ori r22,r22,lo16(MASK(MSR_DR)|MASK(MSR_RI)) ; Set RI onto access MSR - crmove kernAccess,cr0_eq ; Remember if fault was in kernel - rlwinm. r28,r28,0,pfAltivecb,pfAltivecb ; Do we have Altivec on this machine? + ori r22,r30,lo16(MASK(MSR_DR)|MASK(MSR_RI)) ; Set RI onto access MSR rlwimi r20,r20,26,27,27 ; Move single/double indication to just above the bottom - beq aaNoVect ; Nope, no Altivec... - - dssall ; We need to kill streams because we are going to flip to problem state - sync - -aaNoVect: lis r29,hi16(aaFPopTable) ; High part of FP branch table + lis r29,hi16(EXT(aaFPopTable)) ; High part of FP branch table bf- iFloat,aaNotFloat ; This is not a floating point instruction... - li r25,8 ; Assume 8-byte access for now - ori r29,r29,lo16(aaFPopTable) ; Low part of FP branch table - bt iDouble,aaFPis8 ; So far, we think we are a double... - li r25,4 ; Set word access + ori r29,r29,lo16(EXT(aaFPopTable)) ; Low part of FP branch table -aaFPis8: rlwimi r29,r20,0,22,28 ; Index into table based upon register||iDouble||iStore - ori r0,r30,lo16(MASK(MSR_FP)) ; Turn on floating point + rlwimi r29,r20,0,22,28 ; Index into table based upon register||iDouble||iStore mtctr r29 ; Get set to call the function bt iStore,aaFPstore ; This is an FP store... @@ -284,11 +258,8 @@ aaFPis8: rlwimi r29,r20,0,22,28 ; Index into table based upon register||iDou ; Here we handle floating point loads ; -aaFPload: bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to - - crset cr0_eq ; Set this to see if we failed - ori r3,r30,lo16(MASK(MSR_FP)) ; We will need FP on in a bit, so turn on when we ditch problem state - mtmsr r22 ; Flip DR, RI, and maybe PR on +aaFPload: crset cr0_eq ; Set this to see if we failed + mtmsr r22 ; Flip DR, RI isync lwz r10,0(r23) ; Get the first word @@ -297,8 +268,9 @@ aaFPload: bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to lwz r11,4(r23) ; Get the second half aaLdNotDbl: mr r4,r0 ; Save the DAR if we failed the access - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state and turn on FP + + mtmsr r30 ; Turn off translation again + isync bf- cr0_eq,aaRedriveAsDSI ; Go redrive this as a DSI... @@ -315,24 +287,17 @@ aaLdNotDbl: mr r4,r0 ; Save the DAR if we failed the access .align 5 -aaFPstore: mtmsr r0 ; We need floating point on for the first phase - isync - - bctrl ; Go save the source FP register +aaFPstore: bctrl ; Go save the source FP register lwz r10,emfp0(r31) ; Get first word crandc iDouble,iDouble,iOptype4 ; Change to 4-byte access if stfiwx lwz r11,emfp0+4(r31) ; and the second bf+ iOptype4,aaNotstfiwx ; This is not a stfiwx... - li r25,4 ; Set this is a word mr r10,r11 ; The stfiwx wants to store the second half aaNotstfiwx: - bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to - crset cr0_eq ; Set this to see if we failed - mr r3,r30 ; Set the normal MSR - mtmsr r22 ; Flip DR, RI, and maybe PR on + mtmsr r22 ; Flip DR, RI isync stw r10,0(r23) ; Save the first word @@ -341,31 +306,26 @@ aaNotstfiwx: stw r11,4(r23) ; Save the second half aaStNotDbl: mr r4,r0 ; Save the DAR if we failed the access - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state - + mtmsr r30 ; Turn off + isync bf- cr0_eq,aaRedriveAsDSI ; Go redrive this as a DSI... - - ; ; Common exit routines ; -aaComExit: lwz r10,savesrr0(r13) ; Get the failing instruction address +aaComExit: lwz r10,savesrr0+4(r13) ; Get the failing instruction address add r24,r24,r13 ; Offset to update register li r11,T_IN_VAIN ; Assume we are all done addi r10,r10,4 ; Step to the next instruction bf iUpdate,aaComExNU ; Skip if not an update form... - stw r23,saver0(r24) ; Update the target + stw r23,saver0+4(r24) ; Update the target aaComExNU: lwz r9,SAVflags(r13) ; Get the flags - stw r10,savesrr0(r13) ; Set new PC + stw r10,savesrr0+4(r13) ; Set new PC bt- traceInst,aaComExitrd ; We are tracing, go emulate trace... bf+ iNotify,aaComExGo ; Nothing special here, go... - - bfl+ kernAccess,aaUnSetSegs ; Go set SRs if we are in user and need to li r11,T_ALIGNMENT ; Set the we just did an alignment exception.... @@ -396,7 +356,7 @@ aaNotFloat: ori r19,r19,lo16(aaEmTable) ; Low part of table address blt- aaPassAlong ; We do not handle any of these (lwarx, stwcx., eciwx, ecowx)... add r19,r19,r3 ; Point to emulation routine - rlwinm r18,r26,29,25,29 ; Get the target/source register displacement + rlwinm r18,r26,30,24,28 ; Get the target/source register displacement mtctr r19 ; Set the routine address @@ -438,14 +398,12 @@ aaEmTable: .align 5 aaLmwStmw: - subfic r25,r18,32*4 ; Calculate the length of the transfer + rlwinm r17,r18,31,1,29 ; Convert doublword based index to words li r28,0 ; Set no extra bytes to move (used for string instructions) - mr r17,r25 ; Save the word transfer length here + subfic r17,r17,32*4 ; Calculate the length of the transfer -aaLSComm: addi r19,r13,saver0 ; Offset to registers in savearea +aaLSComm: addi r19,r13,saver0+4 ; Offset to registers in savearea mr r16,r23 ; Make a hunk pointer - - bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to bt iUpdate,aaStmw ; This is the stmw... @@ -458,7 +416,7 @@ aaLmwNxt: cmplwi cr1,r17,8*4 ; Is there enough to move 8? subi r17,r17,8*4 ; Back off for another hunk crset cr0_eq ; Set this to see if we failed - mtmsr r22 ; Flip DR, RI, and maybe PR on + mtmsr r22 ; Flip DR, RI isync lwz r2,0(r16) ; Load word 0 @@ -478,38 +436,37 @@ aaLmwNxt: cmplwi cr1,r17,8*4 ; Is there enough to move 8? lwz r9,28(r16) ; Load word 7 aaLmwB1: mr r4,r0 ; Remember DAR, jus in case we failed the access - mr r3,r30 ; Set the normal MSR - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + mtmsr r30 ; Turn off DR, RI + isync bf- cr0_eq,aaRedriveAsDSI ; We failed, go redrive this as a DSI... addi r16,r16,8*4 ; Point up to next input aread stwx r2,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r15,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r14,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r5,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r6,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r7,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r8,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r9,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed b aaLmwNxt ; Do the next hunk... @@ -533,26 +490,25 @@ aaLmwNxtH: cmplwi cr1,r17,4*4 ; Do we have 4 left? lwz r5,12(r16) ; Load word 3 aaLmwB2: mr r4,r0 ; Remember DAR, jus in case we failed the access - mr r3,r30 ; Set the normal MSR - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + mtmsr r30 ; Turn off DR, RI + isync bf- cr0_eq,aaRedriveAsDSI ; We failed, go redrive this as a DSI... addi r16,r16,4*4 ; Point up to next input aread stwx r2,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r15,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r14,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed stwx r5,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed aaLmwL4: or. r5,r17,r28 ; Do we have anything left? cmplwi cr1,r17,(2*4) ; Do we have one, two, or three full words left? @@ -602,26 +558,27 @@ aaLmwDn: rlwinm r5,r5,24,0,7 ; Move first byte to top cmplwi cr1,r17,(2*4) ; Do we have one, two, or three full words left? mr r3,r30 ; Set the normal MSR rlwimi r5,r9,8,8,23 ; Move bytes 1 and 2 after 0 - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + + mtmsr r30 ; Turn off DR, RI + isync bf- cr0_eq,aaRedriveAsDSI ; We failed, go redrive this as a DSI... beq- cr2,aaLmwCb ; No full words, copy bytes... stwx r2,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed blt cr1,aaLmwCb ; We only had one, we are done... stwx r15,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed beq cr1,aaLmwCb ; We had two, we are done... stwx r14,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed aaLmwCb: mr. r28,r28 ; Any trailing bytes to do? beq+ aaComExit ; Nope, leave... @@ -644,29 +601,29 @@ aaStmwNxt: cmplwi cr1,r17,8*4 ; Is there enough to move 8? subi r17,r17,8*4 ; Back off for another hunk lwzx r2,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r15,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r14,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r5,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r6,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r7,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r8,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r9,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed crset cr0_eq ; Set this to see if we failed mtmsr r22 ; Flip DR, RI, and maybe PR on @@ -692,9 +649,8 @@ aaStmwNxt: cmplwi cr1,r17,8*4 ; Is there enough to move 8? aaStmwB1: mr r4,r0 ; Remember DAR, jus in case we failed the access - mr r3,r30 ; Set the normal MSR - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + mtmsr r30 ; Normal MSR + isync bt- cr0_eq,aaStmwNxt ; We have more to do and no failed access... b aaRedriveAsDSI ; We failed, go redrive this as a DSI... @@ -706,20 +662,20 @@ aaStmwNxtH: cmplwi cr1,r17,(4*4) ; Do we have at least 4 left? subi r17,r17,4*4 ; Set count properly lwzx r2,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r15,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r14,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed lwzx r5,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed crset cr0_eq ; Set this to see if we failed - mtmsr r22 ; Flip DR, RI, and maybe PR on + mtmsr r22 ; Flip DR, RI isync stw r2,0(r16) ; Store word 0 @@ -733,9 +689,8 @@ aaStmwNxtH: cmplwi cr1,r17,(4*4) ; Do we have at least 4 left? addi r16,r16,4*4 ; Point up to next input aread aaStmwB2: mr r4,r0 ; Remember DAR, jus in case we failed the access - mr r3,r30 ; Set the normal MSR - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + mtmsr r30 ; Normal MSR + isync bf- cr0_eq,aaRedriveAsDSI ; We failed, go redrive this as a DSI... @@ -747,18 +702,18 @@ aaStmwL4: or. r5,r17,r28 ; Do we have anything left to do? beq- cr2,aaStmwBy1 ; No full words, check out bytes lwzx r2,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed blt cr1,aaStmwBy1 ; We only had one, go save it... lwzx r15,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed beq cr1,aaStmwBy1 ; We had two, go save it... lwzx r14,r19,r18 ; Store register - addi r18,r18,4 ; Next register - rlwinm r18,r18,0,25,29 ; Wrap back to 0 if needed + addi r18,r18,8 ; Next register + rlwinm r18,r18,0,24,28 ; Wrap back to 0 if needed aaStmwBy1: mr. r28,r28 ; Do we have any trailing bytes? beq+ aaStmwSt ; Nope... @@ -766,22 +721,23 @@ aaStmwBy1: mr. r28,r28 ; Do we have any trailing bytes? lwzx r5,r19,r18 ; Yes, pick up one extra register aaStmwSt: crset cr0_eq ; Set this to see if we failed - mtmsr r22 ; Flip DR, RI, and maybe PR on + mtmsr r22 ; Flip DR, RI isync beq- cr2,aaStmwBy2 ; No words, check trailing bytes... stw r2,0(r16) ; Save first word - bf- cr0_eq,aaStmwDn ; Read failed, escape... + bf- cr0_eq,aaStmwDn ; Store failed, escape... addi r16,r16,4 ; Bump sink blt cr1,aaStmwBy2 ; We only had one, we are done... stw r15,0(r16) ; Save second word - bf- cr0_eq,aaStmwDn ; Read failed, escape... + bf- cr0_eq,aaStmwDn ; Store failed, escape... addi r16,r16,4 ; Bump sink beq cr1,aaStmwBy2 ; We had two, we are done... stw r14,0(r16) ; Save third word + bf- cr0_eq,aaStmwDn ; Store failed, escape... addi r16,r16,4 ; Bump sink aaStmwBy2: rlwinm r2,r5,8,24,31 ; Get byte 0 @@ -804,9 +760,8 @@ aaStmwBy2: rlwinm r2,r5,8,24,31 ; Get byte 0 stb r14,2(r16) ; Save third byte aaStmwDn: mr r4,r0 ; Remember DAR, jus in case we failed the access - mr r3,r30 ; Set the normal MSR - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + mtmsr r30 ; Normal MSR + isync bf- cr0_eq,aaRedriveAsDSI ; We failed, go redrive this as a DSI... @@ -819,7 +774,7 @@ aaStmwDn: mr r4,r0 ; Remember DAR, jus in case we failed the access .align 5 -aaLswx: lwz r17,savexer(r13) ; Pick up the XER +aaLswx: lwz r17,savexer+4(r13) ; Pick up the XER crclr iUpdate ; Make sure we think this the load form rlwinm. r25,r17,0,25,31 ; Get the number of bytes to load rlwinm r28,r17,0,30,31 ; Get the number of bytes past an even word @@ -853,7 +808,7 @@ aaLswi: mr r9,r23 ; Save the DAR .align 5 -aaStswx: lwz r17,savexer(r13) ; Pick up the XER +aaStswx: lwz r17,savexer+4(r13) ; Pick up the XER crclr iUpdate ; Make sure this is clear in case we have 0 length rlwinm. r25,r17,0,25,31 ; Get the number of bytes to load rlwinm r28,r17,0,30,31 ; Get the number of bytes past an even word @@ -891,28 +846,24 @@ aaStswi: mr r9,r23 ; Save the DAR aaLwbrx: add r18,r18,r13 ; Index to source register - li r25,4 ; Set the length - - bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to crset cr0_eq ; Set this to see if we failed - mr r3,r30 ; Set the normal MSR mtmsr r22 ; Flip DR, RI, and maybe PR on isync lwz r11,0(r23) ; Load the word mr r4,r0 ; Save the DAR if we failed the access - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state - + mtmsr r30 ; Restore normal MSR + isync + bf- cr0_eq,aaRedriveAsDSI ; We failed, go redrive this as a DSI... rlwinm r10,r11,8,0,31 ; Get byte 0 to 3 and byte 2 to 1 rlwimi r10,r11,24,16,23 ; Move byte 1 to byte 2 rlwimi r10,r11,24,0,7 ; Move byte 3 to byte 0 - stw r10,saver0(r18) ; Set the register + stw r10,saver0+4(r18) ; Set the register b aaComExit ; All done, go exit... @@ -926,26 +877,22 @@ aaLwbrx: aaStwbrx: add r18,r18,r13 ; Index to source register - li r25,4 ; Set the length - lwz r11,saver0(r18) ; Get the register to store + lwz r11,saver0+4(r18) ; Get the register to store rlwinm r10,r11,8,0,31 ; Get byte 0 to 3 and byte 2 to 1 rlwimi r10,r11,24,16,23 ; Move byte 1 to byte 2 rlwimi r10,r11,24,0,7 ; Move byte 3 to byte 0 - bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to - crset cr0_eq ; Set this to see if we failed - mr r3,r30 ; Set the normal MSR mtmsr r22 ; Flip DR, RI, and maybe PR on isync stw r10,0(r23) ; Store the reversed halfword mr r4,r0 ; Save the DAR if we failed the access - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state - + mtmsr r30 ; Restore normal MSR + isync + bt+ cr0_eq,aaComExit ; All done, go exit... b aaRedriveAsDSI ; We failed, go redrive this as a DSI... @@ -959,27 +906,23 @@ aaStwbrx: aaLhbrx: add r18,r18,r13 ; Index to source register - li r25,2 ; Set the length - - bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to crset cr0_eq ; Set this to see if we failed - mr r3,r30 ; Set the normal MSR mtmsr r22 ; Flip DR, RI, and maybe PR on isync lhz r11,0(r23) ; Load the halfword mr r4,r0 ; Save the DAR if we failed the access - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + mtmsr r30 ; Restore normal MSR + isync bf- cr0_eq,aaRedriveAsDSI ; We failed, go redrive this as a DSI... rlwinm r10,r11,8,16,23 ; Rotate bottom byte up one and clear everything else rlwimi r10,r11,24,24,31 ; Put old second from bottom into bottom - stw r10,saver0(r18) ; Set the register + stw r10,saver0+4(r18) ; Set the register b aaComExit ; All done, go exit... @@ -992,23 +935,19 @@ aaLhbrx: aaSthbrx: add r18,r18,r13 ; Index to source register - li r25,2 ; Set the length - lwz r10,saver0(r18) ; Get the register to store + lwz r10,saver0+4(r18) ; Get the register to store rlwinm r10,r10,8,0,31 ; Rotate bottom byte up one rlwimi r10,r10,16,24,31 ; Put old second from bottom into bottom - bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to - crset cr0_eq ; Set this to see if we failed - mr r3,r30 ; Set the normal MSR mtmsr r22 ; Flip DR, RI, and maybe PR on isync sth r10,0(r23) ; Store the reversed halfword mr r4,r0 ; Save the DAR if we failed the access - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + mtmsr r30 ; Restore normal MSR + isync bt+ cr0_eq,aaComExit ; All done, go exit... b aaRedriveAsDSI ; We failed, go redrive this as a DSI... @@ -1020,13 +959,9 @@ aaSthbrx: .align 5 aaDcbz: - li r25,32 ; Set the length rlwinm r23,r23,0,0,26 ; Round back to a 32-byte boundary - bfl+ kernAccess,aaSetSegs ; Go set SRs if we are in user and need to - crset cr0_eq ; Set this to see if we failed - mr r3,r30 ; Set the normal MSR li r0,0 ; Clear this out mtmsr r22 ; Flip DR, RI, and maybe PR on isync @@ -1048,8 +983,8 @@ aaDcbz: stw r0,28(r23) ; Clear word aaDcbzXit: mr r4,r0 ; Save the DAR if we failed the access - li r0,loadMSR ; Set the magic "get back to supervisor" SC - sc ; Get back to supervisor state + mtmsr r30 ; Restore normal MSR + isync crclr iUpdate ; Make sure we do not think this is an update form @@ -1061,10 +996,6 @@ aaDcbzXit: mr r4,r0 ; Save the DAR if we failed the access ; Unhandled alignment exception, pass it along ; -aaPassAlongUnMap: - bfl+ kernAccess,aaUnSetSegs ; Go set SRs if we are in user and need to - - aaPassAlong: b EXT(EmulExit) @@ -1078,7 +1009,6 @@ aaPassAlong: .align 5 aaComExitrd: - bfl+ kernAccess,aaUnSetSegs ; Go set SRs back if we need to because we are not going back to user yet oris r9,r9,hi16(SAVredrive) ; Set the redrive bit li r11,T_TRACE ; Set trace interrupt rlwinm r12,r12,0,16,31 ; Clear top half of SRR1 @@ -1094,92 +1024,15 @@ aaComExitrd: aaRedriveAsDSI: mr r20,r1 ; Save the DSISR mr r21,r4 - bfl+ kernAccess,aaUnSetSegs ; Go set SRs back if we need to because we are not going back to user yet lwz r4,SAVflags(r13) ; Pick up the flags li r11,T_DATA_ACCESS ; Set failing data access code oris r4,r4,hi16(SAVredrive) ; Set the redrive bit stw r20,savedsisr(r13) ; Set the DSISR of failed access - stw r21,savedar(r13) ; Set the address of the failed access + stw r21,savedar+4(r13) ; Set the address of the failed access stw r11,saveexception(r13) ; Set the replacement code stw r4,SAVflags(r13) ; Set redrive request b EXT(EmulExit) ; Bail out to handle ISI... -; -; Set segment registers for user access. Do not call this if we are trying to get -; supervisor state memory. We do not need this. -; -; Performance-wise, we will usually be setting one SR here. Most memory will be -; allocated before the 1GB mark. Since the kernel maps the first GB, the exception -; handler always sets the SRs before we get here. Therefore, we will usually -; have to remap it. -; -; Also, we need to un-do these mapping ONLY if we take a non-standard -; exit, e.g., emulate DSI, emulate trace exception, etc. This is because -; translation will never be turned on until we return and at that point, -; normal exception exit code will restore the first 4 SRs if needed. -; - - .align 5 - - .globl EXT(aaSetSegsX) - -LEXT(aaSetSegsX) - -aaSetSegs: addi r3,r25,-1 ; Point at last accessed offset in range - lwz r7,PP_USERPMAP(r31) ; Get the current user pmap - lis r0,0x4000 ; This is the address of the first segment outside of the kernel - rlwinm r5,r23,6,26,29 ; Get index into pmap table - add r4,r23,r3 ; Point to the last byte accessed - addi r7,r7,PMAP_SEGS ; Point to the segment slot - cmplw r23,r0 ; See if first segment register needs to be reloaded - cmplw cr2,r4,r0 ; Do we need to set the second (if any) SR? - xor r0,r4,r23 ; See if we are in the same segment as first - bge aaSetS1ok ; Nope, we are in a pure user range - - lwzx r6,r5,r7 ; Get the user address space SR value - mtsrin r6,r23 ; Load the corresponding SR register - -aaSetS1ok: rlwinm. r0,r0,0,0,3 ; Any change in segment? - bgelr- cr2 ; We are in user only space, we do not need to mess with SR - rlwinm r5,r4,6,26,29 ; Get index into pmap table - beqlr+ ; No change in segment, we are done... - - lwzx r6,r5,r7 ; Get the user address space SR value - mtsrin r6,r4 ; Load the corresponding SR register - blr ; Leave... - -; -; Unset segment registers for user access. Do not call unless we had a user access. -; - - .align 5 - - .globl EXT(aaUnSetSegsX) - -LEXT(aaUnSetSegsX) - -aaUnSetSegs: - addi r3,r25,-1 ; Point at last accessed offset in range - lis r0,0x4000 ; This is the address of the first segment outside of the kernel - lis r5,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value - add r4,r23,r3 ; Point to the last byte accessed - cmplw r23,r0 ; See if first segment register needs to be reloaded - rlwimi r5,r23,24,8,11 ; Make the correct kernel segment - cmplw cr2,r4,r0 ; Do we need to set the second (if any) SR? - xor r0,r4,r23 ; See if we are in the same segment as first - bge aaUnSetS1ok ; Nope, we are in a pure user range - - mtsrin r5,r23 ; Load the corresponding SR register - -aaUnSetS1ok: - rlwinm. r0,r0,0,0,3 ; Any change in segment? - bgelr cr2 ; We are in user only space, we do not need to mess with SR - rlwimi r5,r4,24,8,11 ; Make the correct kernel segment - beqlr+ ; No change in segment, we are done... - - mtsrin r5,r4 ; Load the corresponding SR register - blr ; Leave... - ; @@ -1192,8 +1045,9 @@ aaUnSetS1ok: ; .align 10 ; Make sure we are on a 1k boundary + .globl EXT(aaFPopTable) -aaFPopTable: +LEXT(aaFPopTable) lfs f0,emfp0(r31) ; Load single variant blr diff --git a/osfmk/ppc/Emulate64.s b/osfmk/ppc/Emulate64.s new file mode 100644 index 000000000..fff72fefb --- /dev/null +++ b/osfmk/ppc/Emulate64.s @@ -0,0 +1,945 @@ +/* + * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Emulate64.s + * + * Software emulation of instructions not handled in hw, on 64-bit machines. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +// CR bit set if the instruction is an "update" form (LFDU, STWU, etc): +#define kUpdate 25 + +// CR bit set if interrupt occured in trace mode (ie, MSR_SE_BIT): +#define kTrace 8 + +// CR bit set if notification on alignment interrupts is requested (notifyUnalignbit in spcFlags): +#define kNotify 9 + +// CR bit distinguishes between alignment and program exceptions: +#define kAlignment 10 + + + +// ************************************* +// * P R O G R A M I N T E R R U P T * +// ************************************* +// +// These are floating pt exceptions, illegal instructions, privileged mode violations, +// and traps. All we're interested in at this low level is illegal instructions. +// The ones we "emulate" are: +// DCBA, which is not implemented in the IBM 970. The emulation is to ignore it, +// as it is just a hint. +// MCRXR, which is not implemented on the IBM 970, but is in the PPC ISA. +// +// Additionally, to facilitate debugging the alignment handler, we recognize a special +// diagnostic mode that is used to simulate alignment exceptions. When in this mode, +// if the instruction has opcode==0 and the extended opcode is one of the X-form +// instructions that can take an alignment interrupt, then we change the opcode to +// 31 and pretend it got an alignment interrupt. This exercises paths that +// are hard to drive or perhaps never driven on this particular CPU. + + .text + .globl EXT(Emulate64) + .align 5 +LEXT(Emulate64) + crclr kAlignment // not an alignment exception + b a64AlignAssistJoin // join alignment handler + + +// Return from alignment handler with all the regs loaded for opcode emulation. + +a64HandleProgramInt: + rlwinm. r0,r29,0,SRR1_PRG_ILL_INS_BIT,SRR1_PRG_ILL_INS_BIT // illegal opcode? + beq a64PassAlong // No, must have been trap or priv violation etc + rlwinm r3,r20,6,26,31 // right justify opcode field (bits 0-5) + rlwinm r4,r20,31,22,31 // right justify extended opcode field (bits 21-30) + cmpwi cr0,r3,31 // X-form? + cmpwi cr1,r4,758 // DCBA? + cmpwi cr4,r4,512 // MCRXR? + crand cr1_eq,cr0_eq,cr1_eq // merge the two tests for DCBA + crand cr4_eq,cr0_eq,cr4_eq // and for MCRXR + beq++ cr1_eq,a64ExitEm // was DCBA, so ignore + bne-- cr4_eq,a64NotEmulated // skip if not MCRXR + +// Was MCRXR, so emulate. + + ld r3,savexer(r13) // get the XER + lwz r4,savecr(r13) // and the CR + rlwinm r5,r20,11,27,29 // get (CR# * 4) from instruction + rlwinm r6,r3,0,4,31 // zero XER[32-35] (also XER[0-31]) + sld r4,r4,r5 // move target CR field to bits 32-35 + rlwimi r4,r3,0,0,3 // move XER[32-35] into CR field + stw r6,savexer+4(r13) // update XER + srd r4,r4,r5 // re-position CR + stw r4,savecr(r13) // update CR + b a64ExitEm // done + +// Not an opcode we normally emulate. If in special diagnostic mode and opcode=0, +// emulate as an alignment exception. This special case is for test software. + +a64NotEmulated: + lwz r30,dgFlags(0) // Get the flags + rlwinm. r0,r30,0,enaDiagEMb,enaDiagEMb // Do we want to try to emulate something? + beq++ a64PassAlong // No emulation allowed + cmpwi r3,0 // opcode==0 ? + bne a64PassAlong // not the special case + oris r20,r20,0x7C00 // change opcode to 31 + crset kAlignment // say we took alignment exception + rlwinm r5,r4,0,26+1,26-1 // mask Update bit (32) out of extended opcode + rlwinm r5,r5,0,0,31 // Clean out leftover junk from rlwinm + + cmpwi r4,1014 // dcbz/dcbz128 ? + crmove cr1_eq,cr0_eq + cmpwi r5,21 // ldx/ldux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,599 // lfdx/lfdux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,535 // lfsx/lfsux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,343 // lhax/lhaux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,790 // lhbrx ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,279 // lhzx/lhzux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,597 // lswi ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,533 // lswx ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,341 // lwax/lwaux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,534 // lwbrx ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,23 // lwz/lwzx ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,149 // stdx/stdux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,727 // stfdx/stfdux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,983 // stfiwx ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,663 // stfsx/stfsux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,918 // sthbrx ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,407 // sthx/sthux ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,725 // stswi ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,661 // stswx ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r4,662 // stwbrx ? + cror cr1_eq,cr0_eq,cr1_eq + cmpwi r5,151 // stwx/stwux ? + cror cr1_eq,cr0_eq,cr1_eq + + beq++ cr1,a64GotInstruction // it was one of the X-forms we handle + crclr kAlignment // revert to program interrupt + b a64PassAlong // not recognized extended opcode + + +// ***************************************** +// * A L I G N M E N T I N T E R R U P T * +// ***************************************** +// +// We get here in exception context, ie with interrupts disabled, translation off, and +// in 64-bit mode, with: +// r13 = save-area pointer, with general context already saved in it +// cr6 = feature flags +// We preserve r13 and cr6. Other GPRs and CRs, the LR and CTR are used. +// +// Current 64-bit processors (GPUL) handle almost all misaligned operations in hardware, +// so this routine usually isn't called very often. Only floating pt ops that cross a page +// boundary and are not word aligned, and LMW/STMW can take exceptions to cacheable memory. +// However, in contrast to G3 and G4, any misaligned load/store will get an alignment +// interrupt on uncached memory. +// +// We always emulate scalar ops with a series of byte load/stores. Doing so is no slower +// than LWZ/STW in cases where a scalar op gets an alignment exception. +// +// This routine supports all legal permutations of alignment interrupts occuring in user or +// supervisor mode, 32 or 64-bit addressing, and translation on or off. We do not emulate +// instructions that go past the end of an address space, such as "LHZ -1(0)"; we just pass +// along the alignment exception rather than wrap around to byte 0. (Treatment of address +// space wrap is a moot point in Mac OS X, since we do not map either the last page or +// page 0.) +// +// First, check for a few special cases such as virtual machines, etc. + + .globl EXT(AlignAssist64) + .align 5 +LEXT(AlignAssist64) + crset kAlignment // mark as alignment interrupt + +a64AlignAssistJoin: // join here from program interrupt handler + mfsprg r31,0 // get the per_proc data ptr + mcrf cr3,cr6 // save feature flags here... + lwz r21,spcFlags(r31) // grab the special flags + ld r29,savesrr1(r13) // get the MSR etc at the fault + ld r28,savesrr0(r13) // get the EA of faulting instruction + mfmsr r26 // save MSR at entry + rlwinm. r0,r21,0,runningVMbit,runningVMbit // Are we running a VM? + lwz r19,dgFlags(0) // Get the diagnostics flags + bne-- a64PassAlong // yes, let the virtual machine monitor handle + + +// Set up the MSR shadow regs. We turn on FP in this routine, and usually set DR and RI +// when accessing user space (the SLB is still set up with all the user space translations.) +// However, if the interrupt occured in the kernel with DR off, we keep it off while +// accessing the "target" address space. If we set DR to access the target space, we also +// set RI. The RI bit tells the exception handlers to clear cr0 beq and return if we get an +// exception accessing the user address space. We are careful to test cr0 beq after every such +// access. We keep the following "shadows" of the MSR in global regs across this code: +// r25 = MSR at entry, plus FP and probably DR and RI (used to access target space) +// r26 = MSR at entry +// r27 = free +// r29 = SRR1 (ie, MSR at interrupt) +// Note that EE and IR are always off, and SF is always on in this code. + + rlwinm r3,r29,0,MSR_DR_BIT,MSR_DR_BIT // was translation on at fault? + rlwimi r3,r3,32-MSR_RI_BIT+MSR_DR_BIT,MSR_RI_BIT,MSR_RI_BIT // if DR was set, set RI too + or r25,r26,r3 // assemble MSR to use accessing target space + + +// Because the DSISR and DAR are either not set or are not to be trusted on some 64-bit +// processors on an alignment interrupt, we must fetch the faulting instruction ourselves, +// then decode/hash the opcode and reconstruct the EA manually. + + mtmsr r25 // turn on FP and (if it was on at fault) DR and RI + isync // wait for it to happen + cmpw r0,r0 // turn on beq so we can check for DSIs + lwz r20,0(r28) // fetch faulting instruction, probably with DR on + bne-- a64RedriveAsISI // got a DSI trying to fetch it, pretend it was an ISI + mtmsr r26 // turn DR back off + isync // wait for it to happen + + +// Set a few flags while we wait for the faulting instruction to arrive from cache. + + rlwinm. r0,r29,0,MSR_SE_BIT,MSR_SE_BIT // Were we single stepping? + stw r20,savemisc2(r13) // Save the instruction image in case we notify + crnot kTrace,cr0_eq + rlwinm. r0,r19,0,enaNotifyEMb,enaNotifyEMb // Should we notify? + crnot kNotify,cr0_eq + + +// Hash the intruction into a 5-bit value "AAAAB" used to index the branch table, and a +// 1-bit kUpdate flag, as follows: +// ¥ for X-form instructions (with primary opcode 31): +// the "AAAA" bits are bits 21-24 of the instruction +// the "B" bit is the XOR of bits 29 and 30 +// the update bit is instruction bit 25 +// ¥ for D and DS-form instructions (actually, any primary opcode except 31): +// the "AAAA" bits are bits 1-4 of the instruction +// the "B" bit is 0 +// the update bit is instruction bit 5 +// +// Just for fun (and perhaps a little speed on deep-pipe machines), we compute the hash, +// update flag, and EA without branches and with ipc >= 2. +// +// When we "bctr" to the opcode-specific reoutine, the following are all set up: +// MSR = EE and IR off, SF and FP on +// r13 = save-area pointer (physical) +// r14 = ptr to saver0 in save-area (ie, to base of GPRs) +// r15 = 0x00000000FFFFFFFF if 32-bit mode fault, 0xFFFFFFFFFFFFFFFF if 64 +// r16 = RA * 8 (ie, reg# not reg value) +// r17 = EA +// r18 = (RA|0) (reg value) +// r19 = -1 if X-form, 0 if D-form +// r20 = faulting instruction +// r21 = RT * 8 (ie, reg# not reg value) +// r22 = addr(aaFPopTable)+(RT*32), ie ptr to floating pt table for target register +// r25 = MSR at entrance, probably with DR and RI set (for access to target space) +// r26 = MSR at entrance +// r27 = free +// r28 = SRR0 (ie, EA of faulting instruction) +// r29 = SRR1 (ie, MSR at fault) +// r30 = scratch, usually user data +// r31 = per-proc pointer +// cr2 = kTrace, kNotify, and kAlignment flags +// cr3 = saved copy of feature flags used in lowmem vector code +// cr6 = bits 24-27 of CR are bits 24-27 of opcode if X-form, or bits 4-5 and 00 if D-form +// bit 25 is the kUpdate flag, set for update form instructions +// cr7 = bits 28-31 of CR are bits 28-31 of opcode if X-form, or 0 if D-form + +a64GotInstruction: // here from program interrupt with instruction in r20 + rlwinm r21,r20,6+6,20,25 // move the primary opcode (bits 0-6) to bits 20-25 + la r14,saver0(r13) // r14 <- base address of GPR registers + xori r19,r21,0x07C0 // iff primary opcode is 31, set r19 to 0 + rlwinm r16,r20,16+3,24,28 // r16 <- RA*8 + subi r19,r19,1 // set bit 0 iff X-form (ie, if primary opcode is 31) + rlwinm r17,r20,21+3,24,28 // r17 <- RB*8 (if X-form) + sradi r19,r19,63 // r19 <- -1 if X-form, 0 if D-form + extsh r22,r20 // r22 <- displacement (if D-form) + + ldx r23,r14,r17 // get (RB), if any + and r15,r20,r19 // instruction if X, 0 if D + andc r17,r21,r19 // primary opcode in bits 20-25 if D, 0 if X + ldx r18,r14,r16 // get (RA) + subi r24,r16,1 // set bit 0 iff RA==0 + or r21,r15,r17 // r21 <- instruction if X, or bits 0-5 in bits 20-25 if D + sradi r24,r24,63 // r24 <- -1 if RA==0, 0 otherwise + rlwinm r17,r21,32-4,25,28 // shift opcode bits 21-24 to 25-28 (hash "AAAA" bits) + lis r10,ha16(a64BranchTable) // start to build up branch table address + rlwimi r17,r21,0,29,29 // move opcode bit 29 into hash as start of "B" bit + rlwinm r30,r21,1,29,29 // position opcode bit 30 in position 29 + and r12,r23,r19 // RB if X-form, 0 if D-form + andc r11,r22,r19 // 0 if X-form, sign extended displacement if D-form + xor r17,r17,r30 // bit 29 ("B") of hash is xor(bit29,bit30) + addi r10,r10,lo16(a64BranchTable) + or r12,r12,r11 // r12 <- (RB) or displacement, as appropriate + lwzx r30,r10,r17 // get address from branch table + mtcrf 0x01,r21 // move opcode bits 28-31 to CR7 + sradi r15,r29,32 // propogate SF bit from SRR1 (MSR_SF, which is bit 0) + andc r18,r18,r24 // r18 <- (RA|0) + mtcrf 0x02,r21 // move opcode bits 24-27 to CR6 (kUpdate is bit 25) + add r17,r18,r12 // r17 <- EA, which might need to be clamped to 32 bits + mtctr r30 // set up branch address + + oris r15,r15,0xFFFF // start to fill low word of r15 with 1s + rlwinm r21,r20,11+3,24,28 // r21 <- RT * 8 + lis r22,ha16(EXT(aaFPopTable)) // start to compute address of floating pt table + ori r15,r15,0xFFFF // now bits 32-63 of r15 are 1s + addi r22,r22,lo16(EXT(aaFPopTable)) + and r17,r17,r15 // clamp EA to 32 bits if necessary + rlwimi r22,r21,2,22,26 // move RT into aaFPopTable address (which is 1KB aligned) + + bf-- kAlignment,a64HandleProgramInt // return to Program Interrupt handler + bctr // if alignment interrupt, jump to opcode-specific routine + + +// Floating-pt load single (lfs[u], lfsx[u]) + +a64LfsLfsx: + bl a64Load4Bytes // get data in r30 + mtctr r22 // set up address of "lfs fRT,emfp0(r31)" + stw r30,emfp0(r31) // put word here for aaFPopTable routine + bctrl // do the lfs + b a64UpdateCheck // update RA if necessary and exit + + +// Floating-pt store single (stfs[u], stfsx[u]) + +a64StfsStfsx: + ori r22,r22,8 // set dir==1 (ie, single store) in aaFPopTable + mtctr r22 // set up address of "stfs fRT,emfp0(r31)" + bctrl // execute the store into emfp0 + lwz r30,emfp0(r31) // get the word + bl a64Store4Bytes // store r30 into user space + b a64UpdateCheck // update RA if necessary and exit + + +// Floating-pt store as integer word (stfiwx) + +a64Stfiwx: + ori r22,r22,16+8 // set size=1, dir==1 (ie, double store) in aaFPopTable + mtctr r22 // set up FP register table address + bctrl // double precision store into emfp0 + lwz r30,emfp0+4(r31) // get the low-order word + bl a64Store4Bytes // store r30 into user space + b a64Exit // successfully emulated + + +// Floating-pt load double (lfd[u], lfdx[u]) + +a64LfdLfdx: + ori r22,r22,16 // set Double bit in aaFPopTable address + bl a64Load8Bytes // get data in r30 + mtctr r22 // set up address of "lfd fRT,emfp0(r31)" + std r30,emfp0(r31) // put doubleword here for aaFPopTable routine + bctrl // execute the load + b a64UpdateCheck // update RA if necessary and exit + + +// Floating-pt store double (stfd[u], stfdx[u]) + +a64StfdStfdx: + ori r22,r22,16+8 // set size=1, dir==1 (ie, double store) in aaFPopTable address + mtctr r22 // address of routine to stfd RT + bctrl // store into emfp0 + ld r30,emfp0(r31) // get the doubleword + bl a64Store8Bytes // store r30 into user space + b a64UpdateCheck // update RA if necessary and exit + + +// Load halfword w 0-fill (lhz[u], lhzx[u]) + +a64LhzLhzx: + bl a64Load2Bytes // load into r30 from user space (w 0-fill) + stdx r30,r14,r21 // store into RT slot in register file + b a64UpdateCheck // update RA if necessary and exit + + +// Load halfword w sign fill (lha[u], lhax[u]) + +a64LhaLhax: + bl a64Load2Bytes // load into r30 from user space (w 0-fill) + extsh r30,r30 // sign-extend + stdx r30,r14,r21 // store into RT slot in register file + b a64UpdateCheck // update RA if necessary and exit + + +// Load halfword byte reversed (lhbrx) + +a64Lhbrx: + bl a64Load2Bytes // load into r30 from user space (w 0-fill) + rlwinm r3,r30,8,16,23 // reverse bytes into r3 + rlwimi r3,r30,24,24,31 + stdx r3,r14,r21 // store into RT slot in register file + b a64Exit // successfully emulated + + +// Store halfword (sth[u], sthx[u]) + +a64SthSthx: + ldx r30,r14,r21 // get RT + bl a64Store2Bytes // store r30 into user space + b a64UpdateCheck // update RA if necessary and exit + + +// Store halfword byte reversed (sthbrx) + +a64Sthbrx: + addi r21,r21,6 // point to low two bytes of RT + lhbrx r30,r14,r21 // load and reverse + bl a64Store2Bytes // store r30 into user space + b a64Exit // successfully emulated + + +// Load word w 0-fill (lwz[u], lwzx[u]), also lwarx. + +a64LwzLwzxLwarx: + andc r3,r19,r20 // light bit 30 of r3 iff lwarx + andi. r0,r3,2 // is it lwarx? + bne-- a64PassAlong // yes, never try to emulate a lwarx + bl a64Load4Bytes // load 4 bytes from user space into r30 (0-filled) + stdx r30,r14,r21 // update register file + b a64UpdateCheck // update RA if necessary and exit + + +// Load word w sign fill (lwa, lwax[u]) + +a64Lwa: + crclr kUpdate // no update form of lwa (its a reserved encoding) +a64Lwax: + bl a64Load4Bytes // load 4 bytes from user space into r30 (0-filled) + extsw r30,r30 // sign extend + stdx r30,r14,r21 // update register file + b a64UpdateCheck // update RA if necessary and exit + + +// Load word byte reversed (lwbrx) + +a64Lwbrx: + bl a64Load4Bytes // load 4 bytes from user space into r30 (0-filled) + rlwinm r3,r30,24,0,31 // flip bytes 1234 to 4123 + rlwimi r3,r30,8,8,15 // r3 is now 4323 + rlwimi r3,r30,8,24,31 // r3 is now 4321 + stdx r3,r14,r21 // update register file + b a64Exit // successfully emulated + + +// Store word (stw[u], stwx[u]) + +a64StwStwx: + ldx r30,r14,r21 // get RT + bl a64Store4Bytes // store r30 into user space + b a64UpdateCheck // update RA if necessary and exit + + +// Store word byte reversed (stwbrx) + +a64Stwbrx: + addi r21,r21,4 // point to low word of RT + lwbrx r30,r14,r21 // load and reverse + bl a64Store4Bytes // store r30 into user space + b a64Exit // successfully emulated + + +// Load doubleword (ld[u], ldx[u]), also lwa. + +a64LdLwa: // these are DS form: ld=0, ldu=1, and lwa=2 + andi. r0,r20,2 // ld[u] or lwa? (test bit 30 of DS field) + rlwinm r3,r20,0,30,31 // must adjust EA by subtracting DS field + sub r17,r17,r3 + and r17,r17,r15 // re-clamp to 32 bits if necessary + bne a64Lwa // handle lwa +a64Ldx: + bl a64Load8Bytes // load 8 bytes from user space into r30 + stdx r30,r14,r21 // update register file + b a64UpdateCheck // update RA if necessary and exit + + +// Store doubleword (stdx[u], std[u]) + +a64StdxStwcx: + bf-- 30,a64PassAlong // stwcx, so pass along alignment exception + b a64Stdx // was stdx +a64StdStfiwx: + bt 30,a64Stfiwx // handle stfiwx + rlwinm. r3,r20,0,30,31 // must adjust EA by subtracting DS field + sub r17,r17,r3 + and r17,r17,r15 // re-clamp to 32 bits if necessary +a64Stdx: + ldx r30,r14,r21 // get RT + bl a64Store8Bytes // store RT into user space + b a64UpdateCheck // update RA if necessary and exit + + +// Dcbz and Dcbz128 (bit 10 distinguishes the two forms) + +a64DcbzDcbz128: + andis. r0,r20,0x0020 // bit 10 set? + li r3,0 // get a 0 to store + li r0,4 // assume 32-bit version, store 8 bytes 4x + li r4,_COMM_PAGE_BASE_ADDRESS + rldicr r17,r17,0,63-5 // 32-byte align EA + beq a64DcbzSetup // it was the 32-byte version + rldicr r17,r17,0,63-7 // zero low 7 bits of EA + li r0,16 // store 8 bytes 16x +a64DcbzSetup: + xor r4,r4,r28 // was dcbz in the commpage(s)? + and r4,r4,r15 // mask off high-order bits if 32-bit mode + srdi. r4,r4,12 // check SRR0 + bne a64NotCommpage // not in commpage + rlwinm. r4,r29,0,MSR_PR_BIT,MSR_PR_BIT // did fault occur in user mode? + beq-- a64NotCommpage // do not zero cr7 if kernel got alignment exception + lwz r4,savecr(r13) // if we take a dcbz{128} in the commpage... + rlwinm r4,r4,0,0,27 // ...clear user's cr7... + stw r4,savecr(r13) // ...as a flag for _COMM_PAGE_BIGCOPY +a64NotCommpage: + mtctr r0 + cmpw r0,r0 // turn cr0 beq on so we can check for DSIs + mtmsr r25 // turn on DR and RI so we can address user space + isync // wait for it to happen +a64DcbzLoop: + std r3,0(r17) // store into user space + bne-- a64RedriveAsDSI + addi r17,r17,8 + bdnz a64DcbzLoop + + mtmsr r26 // restore MSR + isync // wait for it to happen + b a64Exit + + +// Load and store multiple (lmw, stmw), distinguished by bit 25 + +a64LmwStmw: + subfic r22,r21,32*8 // how many regs to load or store? + srwi r22,r22,1 // get bytes to load/store + bf 25,a64LoadMultiple // handle lmw + b a64StoreMultiple // it was stmw + + +// Load string word immediate (lswi) + +a64Lswi: + rlwinm r22,r20,21,27,31 // get #bytes in r22 + and r17,r18,r15 // recompute EA as (RA|0), and clamp + subi r3,r22,1 // r22==0? + rlwimi r22,r3,6,26,26 // map count of 0 to 32 + b a64LoadMultiple + + +// Store string word immediate (stswi) + +a64Stswi: + rlwinm r22,r20,21,27,31 // get #bytes in r22 + and r17,r18,r15 // recompute EA as (RA|0), and clamp + subi r3,r22,1 // r22==0? + rlwimi r22,r3,6,26,26 // map count of 0 to 32 + b a64StoreMultiple + + +// Load string word indexed (lswx), also lwbrx + +a64LswxLwbrx: + bf 30,a64Lwbrx // was lwbrx + ld r22,savexer(r13) // get the xer + rlwinm r22,r22,0,25,31 // isolate the byte count + b a64LoadMultiple // join common code + + +// Store string word indexed (stswx), also stwbrx + +a64StswxStwbrx: + bf 30,a64Stwbrx // was stwbrx + ld r22,savexer(r13) // get the xer + rlwinm r22,r22,0,25,31 // isolate the byte count + b a64StoreMultiple // join common code + + +// Load multiple words. This handles lmw, lswi, and lswx. + +a64LoadMultiple: // r22 = byte count, may be 0 + subic. r3,r22,1 // get (#bytes-1) + blt a64Exit // done if 0 + add r4,r17,r3 // get EA of last operand byte + and r4,r4,r15 // clamp + cmpld r4,r17 // address space wrap? + blt-- a64PassAlong // pass along exception if so + srwi. r4,r22,2 // get # full words to load + rlwinm r22,r22,0,30,31 // r22 <- leftover byte count + cmpwi cr1,r22,0 // leftover bytes? + beq a64Lm3 // no words + mtctr r4 // set up word count + cmpw r0,r0 // set beq for DSI test +a64Lm2: + mtmsr r25 // turn on DR and RI + isync // wait for it to happen + lbz r3,0(r17) + bne-- a64RedriveAsDSI // got a DSI + lbz r4,1(r17) + bne-- a64RedriveAsDSI // got a DSI + lbz r5,2(r17) + bne-- a64RedriveAsDSI // got a DSI + lbz r6,3(r17) + bne-- a64RedriveAsDSI // got a DSI + rlwinm r30,r3,24,0,7 // pack bytes into r30 + rldimi r30,r4,16,40 + rldimi r30,r5,8,48 + rldimi r30,r6,0,56 + mtmsr r26 // turn DR back off so we can store into register file + isync + addi r17,r17,4 // bump EA + stdx r30,r14,r21 // pack into register file + addi r21,r21,8 // bump register file offset + rlwinm r21,r21,0,24,28 // wrap around to 0 + bdnz a64Lm2 +a64Lm3: // cr1/r22 = leftover bytes (0-3), cr0 beq set + beq cr1,a64Exit // no leftover bytes + mtctr r22 + mtmsr r25 // turn on DR so we can access user space + isync + lbz r3,0(r17) // get 1st leftover byte + bne-- a64RedriveAsDSI // got a DSI + rlwinm r30,r3,24,0,7 // position in byte 4 of r30 (and clear rest of r30) + bdz a64Lm4 // only 1 byte leftover + lbz r3,1(r17) // get 2nd byte + bne-- a64RedriveAsDSI // got a DSI + rldimi r30,r3,16,40 // insert into byte 5 of r30 + bdz a64Lm4 // only 2 bytes leftover + lbz r3,2(r17) // get 3rd byte + bne-- a64RedriveAsDSI // got a DSI + rldimi r30,r3,8,48 // insert into byte 6 +a64Lm4: + mtmsr r26 // turn DR back off so we can store into register file + isync + stdx r30,r14,r21 // pack partially-filled word into register file + b a64Exit + + +// Store multiple words. This handles stmw, stswi, and stswx. + +a64StoreMultiple: // r22 = byte count, may be 0 + subic. r3,r22,1 // get (#bytes-1) + blt a64Exit // done if 0 + add r4,r17,r3 // get EA of last operand byte + and r4,r4,r15 // clamp + cmpld r4,r17 // address space wrap? + blt-- a64PassAlong // pass along exception if so + srwi. r4,r22,2 // get # full words to load + rlwinm r22,r22,0,30,31 // r22 <- leftover byte count + cmpwi cr1,r22,0 // leftover bytes? + beq a64Sm3 // no words + mtctr r4 // set up word count + cmpw r0,r0 // turn on beq so we can check for DSIs +a64Sm2: + ldx r30,r14,r21 // get next register + addi r21,r21,8 // bump register file offset + rlwinm r21,r21,0,24,28 // wrap around to 0 + srwi r3,r30,24 // shift the four bytes into position + srwi r4,r30,16 + srwi r5,r30,8 + mtmsr r25 // turn on DR so we can access user space + isync // wait for it to happen + stb r3,0(r17) + bne-- a64RedriveAsDSI // got a DSI + stb r4,1(r17) + bne-- a64RedriveAsDSI // got a DSI + stb r5,2(r17) + bne-- a64RedriveAsDSI // got a DSI + stb r30,3(r17) + bne-- a64RedriveAsDSI // got a DSI + mtmsr r26 // turn DR back off + isync + addi r17,r17,4 // bump EA + bdnz a64Sm2 +a64Sm3: // r22 = 0-3, cr1 set on r22, cr0 beq set + beq cr1,a64Exit // no leftover bytes + ldx r30,r14,r21 // get last register + mtctr r22 + mtmsr r25 // turn on DR so we can access user space + isync // wait for it to happen +a64Sm4: + rlwinm r30,r30,8,0,31 // position next byte + stb r30,0(r17) // pack into user space + addi r17,r17,1 // bump user space ptr + bne-- a64RedriveAsDSI // got a DSI + bdnz a64Sm4 + mtmsr r26 // turn DR back off + isync + b a64Exit + + +// Subroutines to load bytes from user space. + +a64Load2Bytes: // load 2 bytes right-justified into r30 + addi r7,r17,1 // get EA of last byte + and r7,r7,r15 // clamp + cmpld r7,r17 // address wrap? + blt-- a64PassAlong // yes + mtmsr r25 // turn on DR so we can access user space + isync // wait for it to happen + sub. r30,r30,r30 // 0-fill dest and set beq + b a64Load2 // jump into routine +a64Load4Bytes: // load 4 bytes right-justified into r30 (ie, low order word) + addi r7,r17,3 // get EA of last byte + and r7,r7,r15 // clamp + cmpld r7,r17 // address wrap? + blt-- a64PassAlong // yes + mtmsr r25 // turn on DR so we can access user space + isync // wait for it to happen + sub. r30,r30,r30 // 0-fill dest and set beq + b a64Load4 // jump into routine +a64Load8Bytes: // load 8 bytes into r30 + addi r7,r17,7 // get EA of last byte + and r7,r7,r15 // clamp + cmpld r7,r17 // address wrap? + blt-- a64PassAlong // yes + mtmsr r25 // turn on DR so we can access user space + isync // wait for it to happen + sub. r30,r30,r30 // 0-fill dest and set beq + lbz r3,-7(r7) // get byte 0 + bne-- a64RedriveAsDSI // got a DSI + lbz r4,-6(r7) // and byte 1, etc + bne-- a64RedriveAsDSI // got a DSI + lbz r5,-5(r7) + bne-- a64RedriveAsDSI // got a DSI + lbz r6,-4(r7) + bne-- a64RedriveAsDSI // got a DSI + rldimi r30,r3,56,0 // position bytes in upper word + rldimi r30,r4,48,8 + rldimi r30,r5,40,16 + rldimi r30,r6,32,24 +a64Load4: + lbz r3,-3(r7) + bne-- a64RedriveAsDSI // got a DSI + lbz r4,-2(r7) + bne-- a64RedriveAsDSI // got a DSI + rldimi r30,r3,24,32 // insert bytes 4 and 5 into r30 + rldimi r30,r4,16,40 +a64Load2: + lbz r3,-1(r7) + bne-- a64RedriveAsDSI // got a DSI + lbz r4,0(r7) + bne-- a64RedriveAsDSI // got a DSI + mtmsr r26 // turn DR back off + isync + rldimi r30,r3,8,48 // insert bytes 6 and 7 into r30 + rldimi r30,r4,0,56 + blr + + +// Subroutines to store bytes into user space. + +a64Store2Bytes: // store bytes 6 and 7 of r30 + addi r7,r17,1 // get EA of last byte + and r7,r7,r15 // clamp + cmpld r7,r17 // address wrap? + blt-- a64PassAlong // yes + mtmsr r25 // turn on DR so we can access user space + isync // wait for it to happen + cmpw r0,r0 // set beq so we can check for DSI + b a64Store2 // jump into routine +a64Store4Bytes: // store bytes 4-7 of r30 (ie, low order word) + addi r7,r17,3 // get EA of last byte + and r7,r7,r15 // clamp + cmpld r7,r17 // address wrap? + blt-- a64PassAlong // yes + mtmsr r25 // turn on DR so we can access user space + isync // wait for it to happen + cmpw r0,r0 // set beq so we can check for DSI + b a64Store4 // jump into routine +a64Store8Bytes: // r30 = bytes + addi r7,r17,7 // get EA of last byte + and r7,r7,r15 // clamp + cmpld r7,r17 // address wrap? + blt-- a64PassAlong // yes + mtmsr r25 // turn on DR so we can access user space + isync // wait for it to happen + cmpw r0,r0 // set beq so we can check for DSI + rotldi r3,r30,8 // shift byte 0 into position + rotldi r4,r30,16 // and byte 1 + rotldi r5,r30,24 // and byte 2 + rotldi r6,r30,32 // and byte 3 + stb r3,-7(r7) // store byte 0 + bne-- a64RedriveAsDSI // got a DSI + stb r4,-6(r7) // and byte 1 etc... + bne-- a64RedriveAsDSI // got a DSI + stb r5,-5(r7) + bne-- a64RedriveAsDSI // got a DSI + stb r6,-4(r7) + bne-- a64RedriveAsDSI // got a DSI +a64Store4: + rotldi r3,r30,40 // shift byte 4 into position + rotldi r4,r30,48 // and byte 5 + stb r3,-3(r7) + bne-- a64RedriveAsDSI // got a DSI + stb r4,-2(r7) + bne-- a64RedriveAsDSI // got a DSI +a64Store2: + rotldi r3,r30,56 // shift byte 6 into position + stb r3,-1(r7) // store byte 6 + bne-- a64RedriveAsDSI // got a DSI + stb r30,0(r7) // store byte 7, which is already positioned + bne-- a64RedriveAsDSI // got a DSI + mtmsr r26 // turn off DR + isync + blr + + +// Exit routines. + +a64ExitEm: + li r30,T_EMULATE // Change exception code to emulate + stw r30,saveexception(r13) // Save it + b a64Exit // Join standard exit routine... + +a64PassAlong: // unhandled exception, just pass it along + crset kNotify // return T_ALIGNMENT or T_PROGRAM + crclr kTrace // not a trace interrupt + b a64Exit1 +a64UpdateCheck: // successfully emulated, may be update form + bf kUpdate,a64Exit // update? + stdx r17,r14,r16 // yes, store EA into RA +a64Exit: // instruction successfully emulated + addi r28,r28,4 // bump SRR0 past the emulated instruction + li r30,T_IN_VAIN // eat the interrupt since we emulated it + and r28,r28,r15 // clamp to address space size (32 vs 64) + std r28,savesrr0(r13) // save, so we return to next instruction +a64Exit1: + bt-- kTrace,a64Trace // were we in single-step at fault? + bt-- kNotify,a64Notify // should we say T_ALIGNMENT anyway? +a64Exit2: + mcrf cr6,cr3 // restore feature flags + mr r11,r30 // pass back exception code (T_IN_VAIN etc) in r11 + b EXT(EmulExit) // return to exception processing + + +// Notification requested: pass exception upstairs even though it might have been emulated. + +a64Notify: + li r30,T_ALIGNMENT // somebody wants to know about it (but don't redrive) + bt kAlignment,a64Exit2 // was an alignment exception + li r30,T_PROGRAM // was an emulated instruction + b a64Exit2 + + +// Emulate a trace interrupt after handling alignment interrupt. + +a64Trace: + lwz r9,SAVflags(r13) // get the save-area flags + li r30,T_TRACE + oris r9,r9,hi16(SAVredrive) // Set the redrive bit + stw r30,saveexception(r13) // Set the exception code + stw r9,SAVflags(r13) // Set the flags + b a64Exit2 // Exit and do trace interrupt... + + +// Got a DSI accessing user space. Redrive. One way this can happen is if another +// processor removes a mapping while we are emulating. + +a64RedriveAsISI: // this DSI happened fetching the opcode (r1==DSISR r4==DAR) + mtmsr r26 // turn DR back off + isync // wait for it to happen + li r30,T_INSTRUCTION_ACCESS + rlwimi r29,r1,0,0,4 // insert the fault type from DSI's DSISR + std r29,savesrr1(r13) // update SRR1 to look like an ISI + b a64Redrive + +a64RedriveAsDSI: // r0==DAR r1==DSISR + mtmsr r26 // turn DR back off + isync // wait for it to happen + stw r1,savedsisr(r13) // Set the DSISR of failed access + std r0,savedar(r13) // Set the address of the failed access + li r30,T_DATA_ACCESS // Set failing data access code +a64Redrive: + lwz r9,SAVflags(r13) // Pick up the flags + stw r30,saveexception(r13) // Set the replacement code + oris r9,r9,hi16(SAVredrive) // Set the redrive bit + stw r9,SAVflags(r13) // Set redrive request + crclr kTrace // don't take a trace interrupt + crclr kNotify // don't pass alignment exception + b a64Exit2 // done + + +// This is the branch table, indexed by the "AAAAB" opcode hash. + +a64BranchTable: + .long a64LwzLwzxLwarx // 00000 lwz[u], lwzx[u], lwarx + .long a64Ldx // 00001 ldx[u] + .long a64PassAlong // 00010 ldarx (never emulate these) + .long a64PassAlong // 00011 + .long a64StwStwx // 00100 stw[u], stwx[u] + .long a64StdxStwcx // 00101 stdx[u], stwcx + .long a64PassAlong // 00110 + .long a64PassAlong // 00111 stdcx (never emulate these) + .long a64LhzLhzx // 01000 lhz[u], lhzx[u] + .long a64PassAlong // 01001 + .long a64LhaLhax // 01010 lha[u], lhax[u] + .long a64Lwax // 01011 lwax[u] + .long a64SthSthx // 01100 sth[u], sthx[u] + .long a64PassAlong // 01101 + .long a64LmwStmw // 01110 lmw, stmw + .long a64PassAlong // 01111 + .long a64LfsLfsx // 10000 lfs[u], lfsx[u] + .long a64LswxLwbrx // 10001 lswx, lwbrx + .long a64LfdLfdx // 10010 lfd[u], lfdx[u] + .long a64Lswi // 10011 lswi + .long a64StfsStfsx // 10100 stfs[u], stfsx[u] + .long a64StswxStwbrx // 10101 stswx, stwbrx + .long a64StfdStfdx // 10110 stfd[u], stfdx[u] + .long a64Stswi // 10111 stswi + .long a64PassAlong // 11000 + .long a64Lhbrx // 11001 lhbrx + .long a64LdLwa // 11010 ld[u], lwa + .long a64PassAlong // 11011 + .long a64PassAlong // 11100 + .long a64Sthbrx // 11101 sthbrx + .long a64StdStfiwx // 11110 std[u], stfiwx + .long a64DcbzDcbz128 // 11111 dcbz, dcbz128 + + diff --git a/osfmk/ppc/Firmware.h b/osfmk/ppc/Firmware.h index bd9a00569..d22e18e1e 100644 --- a/osfmk/ppc/Firmware.h +++ b/osfmk/ppc/Firmware.h @@ -41,6 +41,9 @@ #error This file is only useful on PowerPC. #endif +#include +#include + /* * This routine is used to write debug output to either the modem or printer port. * parm 1 is printer (0) or modem (1); parm 2 is ID (printed directly); parm 3 converted to hex @@ -51,14 +54,18 @@ void dbgLog(unsigned int d0, unsigned int d1, unsigned int d2, unsigned int d3); void dbgLog2(unsigned int type, unsigned int p1, unsigned int p2); void dbgDispLL(unsigned int port, unsigned int id, unsigned int data); void fwSCCinit(unsigned int port); +void fwEmMck(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int); /* Start injecting */ +void fwSCOM(scomcomm *); /* Read/Write SCOM */ +void setPmon(unsigned int, unsigned int); /* Set perf mon stuff */ -extern void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3); +extern void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4); #if 0 /* (TEST/DEBUG) - eliminate inline */ -extern __inline__ void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3) { +extern __inline__ void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4) { __asm__ volatile("mr r3,%0" : : "r" (item1) : "r3"); __asm__ volatile("mr r4,%0" : : "r" (item2) : "r4"); __asm__ volatile("mr r5,%0" : : "r" (item3) : "r5"); + __asm__ volatile("mr r6,%0" : : "r" (item3) : "r6"); __asm__ volatile("lis r0,hi16(CutTrace)" : : : "r0"); __asm__ volatile("ori r0,r0,lo16(CutTrace)" : : : "r0"); __asm__ volatile("sc"); @@ -110,7 +117,7 @@ extern __inline__ void ChokeSys(unsigned int ercd) { typedef struct Boot_Video bootBumbleC; extern void StoreReal(unsigned int val, unsigned int addr); -extern void ReadReal(unsigned int raddr, unsigned int *vaddr); +extern void ReadReal(addr64_t raddr, unsigned int *vaddr); extern void ClearReal(unsigned int addr, unsigned int lgn); extern void LoadDBATs(unsigned int *bat); extern void LoadIBATs(unsigned int *bat); @@ -122,6 +129,7 @@ extern void GratefulDebInit(bootBumbleC *boot_video_info); extern void GratefulDebDisp(unsigned int coord, unsigned int data); extern void checkNMI(void); +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct GDWorkArea { /* Grateful Deb work area one per processor */ /* Note that a lot of info is duplicated for each processor */ @@ -147,6 +155,7 @@ typedef struct GDWorkArea { /* Grateful Deb work area one per processor */ unsigned int GDrowbuf2[128]; /* Buffer to an 8 character row */ } GDWorkArea; +#pragma pack() #define GDfontsize 16 #define GDdispcols 2 diff --git a/osfmk/ppc/Firmware.s b/osfmk/ppc/Firmware.s index fbec7dfea..aa87a1239 100644 --- a/osfmk/ppc/Firmware.s +++ b/osfmk/ppc/Firmware.s @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -83,54 +82,57 @@ EXT(FWtable): * R3 is as passed in by the user. All others must be gotten from the save area */ -ENTRY(FirmwareCall, TAG_NO_FRAME_USED) + + .align 5 + .globl EXT(FirmwareCall) + +LEXT(FirmwareCall) rlwinm r1,r0,2,1,29 /* Clear out bit 0 and multiply by 4 */ lis r12,HIGH_ADDR(EXT(FWtable)) /* Get the high part of the firmware call table */ cmplwi r1,EXT(FirmwareCnt)*4 /* Is it a valid firmware call number */ - mflr r11 /* Save the return */ ori r12,r12,LOW_ADDR(EXT(FWtable)) /* Now the low part */ ble+ goodCall /* Yeah, it is... */ li r3,T_SYSTEM_CALL /* Tell the vector handler that we know nothing */ - blr /* Return for errors... */ + b EXT(FCReturn) ; Bye dudes... goodCall: mfsprg r10,0 /* Make sure about the per_proc block */ lwzx r1,r1,r12 /* Pick up the address of the routine */ - lwz r4,saver4(r13) /* Pass in caller's R4 */ - lwz r5,saver5(r13) /* Pass in caller's R5 */ + lwz r4,saver4+4(r13) /* Pass in caller's R4 */ + lwz r5,saver5+4(r13) /* Pass in caller's R5 */ rlwinm. r1,r1,0,0,29 /* Make sure the flag bits are clear */ - stw r11,PP_TEMPWORK1(r10) /* Save our return point */ mtlr r1 /* Put it in the LR */ beq- callUnimp /* This one was unimplimented... */ blrl /* Call the routine... */ - mfsprg r10,0 /* Make sure about the per_proc again */ - stw r3,saver3(r13) /* Pass back the return code to caller */ - lwz r11,PP_TEMPWORK1(r10) /* Get our return point */ + stw r3,saver3+4(r13) /* Pass back the return code to caller */ li r3,T_IN_VAIN /* Tell the vector handler that we took care of it */ - mtlr r11 /* Set the return */ - blr /* Bye, dudes... */ + b EXT(FCReturn) ; Bye dudes... -callUnimp: lwz r11,PP_TEMPWORK1(r10) /* Restore the return address */ - li r3,T_SYSTEM_CALL /* Tell the vector handler that we know nothing */ - mtlr r11 /* Restore the LR */ - blr /* Return for errors... */ +callUnimp: li r3,T_SYSTEM_CALL /* Tell the vector handler that we know nothing */ + b EXT(FCReturn) ; Bye dudes... /* * This routine is used to store using a real address. It stores parmeter1 at parameter2. */ -ENTRY(StoreReal, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(StoreReal) + +LEXT(StoreReal) lis r0,HIGH_ADDR(StoreRealCall) /* Get the top part of the SC number */ ori r0,r0,LOW_ADDR(StoreRealCall) /* and the bottom part */ sc /* Do it to it */ blr /* Bye bye, Birdie... */ -ENTRY(StoreRealLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(StoreRealLL) + +LEXT(StoreRealLL) stw r3,0(r4) /* Store the word */ blr /* Leave... */ @@ -138,15 +140,22 @@ ENTRY(StoreRealLL, TAG_NO_FRAME_USED) /* * This routine is used to clear a range of physical pages. */ + + .align 5 + .globl EXT(ClearReal) -ENTRY(ClearReal, TAG_NO_FRAME_USED) +LEXT(ClearReal) lis r0,HIGH_ADDR(ClearRealCall) /* Get the top part of the SC number */ ori r0,r0,LOW_ADDR(ClearRealCall) /* and the bottom part */ sc /* Do it to it */ blr /* Bye bye, Birdie... */ -ENTRY(ClearRealLL, TAG_NO_FRAME_USED) + + .align 5 + .globl EXT(ClearRealLL) + +LEXT(ClearRealLL) /* * We take the first parameter as a physical address. The second is the length in bytes. @@ -175,30 +184,56 @@ clrloop: subi r4,r4,32 /* Back off a cache line */ /* * This routine will read in 32 byte of real storage. */ - -ENTRY(ReadReal, TAG_NO_FRAME_USED) - - mfmsr r0 /* Get the MSR */ - rlwinm r5,r0,0,28,26 /* Clear DR bit */ - rlwinm r5,r5,0,17,15 /* Clear EE bit */ - mtmsr r5 /* Disable EE and DR */ + + .align 5 + .globl EXT(ReadReal) + +LEXT(ReadReal) + + mfsprg r9,2 ; Get the features + mfmsr r0 ; Get the MSR + li r8,lo16(MASK(MSR_DR)) ; Get the DR bit + rlwinm. r9,r9,0,pf64Bitb,pf64Bitb ; Are we 64-bit? + ori r8,r8,lo16(MASK(MSR_EE)) ; Add in the EE bit + li r7,1 ; Get set for it + andc r8,r0,r8 ; Turn off EE and DR + bt-- cr0_eq,rr32a ; Yes, we are... + + rldimi r8,r7,63,MSR_SF_BIT ; Set SF bit (bit 0) + sldi r3,r3,32 ; Slide on over for true 64-bit address + mtmsrd r8 + isync + or r3,r3,r4 ; Join top and bottom of address + mr r4,r5 ; Set destination address + b rrJoina ; Join on up... + +rr32a: mr r3,r4 ; Position bottom of long long + mr r4,r5 ; Set destination address + mtmsr r8 /* Disable EE and DR */ isync /* Just make sure about it */ - lwz r5,0(r3) /* Get word 0 */ +rrJoina: lwz r5,0(r3) /* Get word 0 */ lwz r6,4(r3) /* Get word 1 */ lwz r7,8(r3) /* Get word 2 */ lwz r8,12(r3) /* Get word 3 */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable lwz r9,16(r3) /* Get word 4 */ + ori r2,r2,lo16(MASK(MSR_FP)) ; Get the FP enable lwz r10,20(r3) /* Get word 5 */ - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + andc r0,r0,r2 ; Clear VEC and FP enables lwz r11,24(r3) /* Get word 6 */ lwz r12,28(r3) /* Get word 7 */ - mtmsr r0 /* Restore original machine state */ + bt-- cr0_eq,rr32b ; We are not 64-bit... + + mtmsrd r0 + isync + b rrJoinb ; Join on up... + +rr32b: mtmsr r0 /* Restore original machine state */ isync /* Insure goodness */ - stw r5,0(r4) /* Set word 0 */ +rrJoinb: stw r5,0(r4) /* Set word 0 */ stw r6,4(r4) /* Set word 1 */ stw r7,8(r4) /* Set word 2 */ stw r8,12(r4) /* Set word 3 */ @@ -213,8 +248,12 @@ ENTRY(ReadReal, TAG_NO_FRAME_USED) /* * This routine is used to load all 4 DBATs. */ + + .align 5 + .globl EXT(LoadDBATs) + +LEXT(LoadDBATs) -ENTRY(LoadDBATs, TAG_NO_FRAME_USED) lis r0,HIGH_ADDR(LoadDBATsCall) /* Top half of LoadDBATsCall firmware call number */ ori r0,r0,LOW_ADDR(LoadDBATsCall) /* Bottom half */ @@ -222,7 +261,11 @@ ENTRY(LoadDBATs, TAG_NO_FRAME_USED) blr /* Bye bye, Birdie... */ -ENTRY(xLoadDBATsLL, TAG_NO_FRAME_USED) + + .align 5 + .globl EXT(xLoadDBATsLL) + +LEXT(xLoadDBATsLL) lwz r4,0(r3) /* Get DBAT 0 high */ lwz r5,4(r3) /* Get DBAT 0 low */ @@ -251,14 +294,21 @@ ENTRY(xLoadDBATsLL, TAG_NO_FRAME_USED) * This routine is used to load all 4 IBATs. */ -ENTRY(LoadIBATs, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(LoadIBATs) + +LEXT(LoadIBATs) + lis r0,HIGH_ADDR(LoadIBATsCall) /* Top half of LoadIBATsCall firmware call number */ ori r0,r0,LOW_ADDR(LoadIBATsCall) /* Bottom half */ sc /* Do it to it */ blr /* Bye bye, Birdie... */ -ENTRY(xLoadIBATsLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(xLoadIBATsLL) + +LEXT(xLoadIBATsLL) lwz r4,0(r3) /* Get IBAT 0 high */ lwz r5,4(r3) /* Get IBAT 0 low */ @@ -287,8 +337,11 @@ ENTRY(xLoadIBATsLL, TAG_NO_FRAME_USED) /* * This is the glue to call the CutTrace firmware call */ - -ENTRY(dbgTrace, TAG_NO_FRAME_USED) + + .align 5 + .globl EXT(dbgTrace) + +LEXT(dbgTrace) lis r0,HIGH_ADDR(CutTrace) /* Top half of CreateFakeIO firmware call number */ ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half */ @@ -298,8 +351,11 @@ ENTRY(dbgTrace, TAG_NO_FRAME_USED) /* * This is the glue to create a fake I/O interruption */ - -ENTRY(CreateFakeIO, TAG_NO_FRAME_USED) + + .align 5 + .globl EXT(CreateFakeIO) + +LEXT(CreateFakeIO) lis r0,HIGH_ADDR(CreateFakeIOCall) /* Top half of CreateFakeIO firmware call number */ ori r0,r0,LOW_ADDR(CreateFakeIOCall) /* Bottom half */ @@ -309,14 +365,18 @@ ENTRY(CreateFakeIO, TAG_NO_FRAME_USED) /* * This is the glue to create a fake Dec interruption */ - -ENTRY(CreateFakeDEC, TAG_NO_FRAME_USED) + + .align 5 + .globl EXT(CreateFakeDEC) + +LEXT(CreateFakeDEC) #if 0 mflr r4 ; (TEST/DEBUG) bl EXT(ml_sense_nmi) ; (TEST/DEBUG) mtlr r4 ; (TEST/DEBUG) -#endif +#endif + lis r0,HIGH_ADDR(CreateFakeDECCall) /* Top half of CreateFakeDEC firmware call number */ ori r0,r0,LOW_ADDR(CreateFakeDECCall) /* Bottom half */ sc /* Do it to it */ @@ -327,7 +387,10 @@ ENTRY(CreateFakeDEC, TAG_NO_FRAME_USED) * This is the glue to create a shutdown context */ -ENTRY(CreateShutdownCTX, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(CreateShutdownCTX) + +LEXT(CreateShutdownCTX) lis r0,HIGH_ADDR(CreateShutdownCTXCall) /* Top half of CreateFakeIO firmware call number */ ori r0,r0,LOW_ADDR(CreateShutdownCTXCall) /* Bottom half */ @@ -337,8 +400,11 @@ ENTRY(CreateShutdownCTX, TAG_NO_FRAME_USED) /* * This is the glue to choke system */ - -ENTRY(ChokeSys, TAG_NO_FRAME_USED) + + .align 5 + .globl EXT(ChokeSys) + +LEXT(ChokeSys) lis r0,HIGH_ADDR(Choke) /* Top half of Choke firmware call number */ ori r0,r0,LOW_ADDR(Choke) /* Bottom half */ @@ -349,8 +415,11 @@ ENTRY(ChokeSys, TAG_NO_FRAME_USED) * Used to initialize the SCC for debugging output */ + + .align 5 + .globl EXT(fwSCCinit) -ENTRY(fwSCCinit, TAG_NO_FRAME_USED) +LEXT(fwSCCinit) mfmsr r8 /* Save the MSR */ mr. r3,r3 /* See if printer or modem */ @@ -635,8 +704,11 @@ wSCCrdy: eieio /* Barricade it */ * This routine is used to write debug output to either the modem or printer port. * parm 1 is printer (0) or modem (1); parm 2 is ID (printed directly); parm 3 converted to hex */ + + .align 5 + .globl EXT(dbgDisp) -ENTRY(dbgDisp, TAG_NO_FRAME_USED) +LEXT(dbgDisp) mr r12,r0 /* Keep R0 pristene */ lis r0,HIGH_ADDR(dbgDispCall) /* Top half of dbgDispCall firmware call number */ @@ -649,7 +721,10 @@ ENTRY(dbgDisp, TAG_NO_FRAME_USED) /* Here's the low-level part of dbgDisp */ -ENTRY(dbgDispLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(dbgDispLL) + +LEXT(dbgDispLL) dbgDispInt: mfmsr r8 /* Save the MSR */ @@ -1032,21 +1107,22 @@ hexTab: STRINGD "0123456789ABCDEF" /* Convert hex numbers to printable hex */ -ENTRY(dbgRegsLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(dbgRegsLL) + +LEXT(dbgRegsLL) + b EXT(FCReturn) ; Bye dudes... +#if 0 li r3,0 /* ? */ bl dbgRegsCm /* Join on up... */ - -/* - * Note that we bypass the normal return 'cause we don't wanna mess up R3 - */ - mfsprg r11,0 /* Get the per_proc */ - lwz r11,PP_TEMPWORK1(r11) /* Get our return point */ - li r3,T_IN_VAIN /* Tell the vector handler that we took care of it */ - mtlr r11 /* Set the return */ - blr /* Bye, dudes... */ + b EXT(FCReturn) ; Bye dudes... -ENTRY(dbgRegs, TAG_NO_FRAME_USED) + + .align 5 + .globl EXT(dbgRegs) + +LEXT(dbgRegs) dbgRegsCm: mfmsr r8 /* Save the MSR */ mr. r3,r3 /* ? */ @@ -1431,14 +1507,17 @@ ddwait1: lwarx r5,0,r3 /* Get the lock */ mtmsr r8 /* Restore the MSR */ isync /* Wait for it */ blr /* Leave... */ - +#endif /* * Used for debugging to leave stuff in 0x380-0x3FF (128 bytes). * Mapping is V=R. Stores and loads are real. */ + + .align 5 + .globl EXT(dbgCkpt) -ENTRY(dbgCkpt, TAG_NO_FRAME_USED) +LEXT(dbgCkpt) mr r12,r0 /* Keep R0 pristene */ lis r0,HIGH_ADDR(dbgCkptCall) /* Top half of dbgCkptCall firmware call number */ @@ -1451,7 +1530,11 @@ ENTRY(dbgCkpt, TAG_NO_FRAME_USED) /* Here's the low-level part of dbgCkpt */ -ENTRY(dbgCkptLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(dbgCkptLL) + +LEXT(dbgCkptLL) + li r12,0x380 /* Point to output area */ li r1,32 /* Get line size */ @@ -1556,14 +1639,14 @@ ENTRY(dbgCkptLL, TAG_NO_FRAME_USED) * Do Preemption. Forces a T_PREEMPT trap to allow a preemption to occur. */ -ENTRY(DoPreemptLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(DoPreemptLL) + +LEXT(DoPreemptLL) - mfsprg r11,0 /* Get the per_proc address */ - lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ li r3,T_PREEMPT /* Set preemption interrupt value */ - mtlr r11 /* Restore the LR */ stw r3,saveexception(r13) /* Modify the exception type to preemption */ - blr /* Return to interrupt handler */ + b EXT(FCReturn) ; Bye dudes... /* @@ -1573,14 +1656,14 @@ ENTRY(DoPreemptLL, TAG_NO_FRAME_USED) * Forces a T_CSWITCH */ -ENTRY(SwitchContextLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(SwitchContextLL) + +LEXT(SwitchContextLL) - mfsprg r11,0 /* Get the per_proc address */ - lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ li r3,T_CSWITCH /* Set context switch value */ - mtlr r11 /* Restore the LR */ stw r3,saveexception(r13) /* Modify the exception type to switch context */ - blr /* Return to interrupt handler */ + b EXT(FCReturn) ; Bye dudes... /* @@ -1588,92 +1671,106 @@ ENTRY(SwitchContextLL, TAG_NO_FRAME_USED) * Forces a T_INTERRUPT trap to pretend that an actual I/O interrupt occurred. */ -ENTRY(CreateFakeIOLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(CreateFakeIOLL) + +LEXT(CreateFakeIOLL) - mfsprg r11,0 /* Get the per_proc address */ - lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ li r3,T_INTERRUPT /* Set external interrupt value */ - mtlr r11 /* Restore the LR */ stw r3,saveexception(r13) /* Modify the exception type to external */ - blr /* Return to interrupt handler */ + b EXT(FCReturn) ; Bye dudes... /* * Create a shutdown context * Forces a T_SHUTDOWN trap. */ -ENTRY(CreateShutdownCTXLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(CreateShutdownCTXLL) + +LEXT(CreateShutdownCTXLL) - mfsprg r11,0 /* Get the per_proc address */ - lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ li r3,T_SHUTDOWN /* Set external interrupt value */ - mtlr r11 /* Restore the LR */ stw r3,saveexception(r13) /* Modify the exception type to external */ - blr /* Return to interrupt handler */ + b EXT(FCReturn) ; Bye dudes... /* * Create a fake decrementer 'rupt. * Forces a T_DECREMENTER trap to pretend that an actual decrementer interrupt occurred. */ -ENTRY(CreateFakeDECLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(CreateFakeDECLL) + +LEXT(CreateFakeDECLL) - mfsprg r11,0 /* Get the per_proc address */ - lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ li r3,T_DECREMENTER /* Set decrementer interrupt value */ - mtlr r11 /* Restore the LR */ stw r3,saveexception(r13) /* Modify the exception type to external */ - blr /* Return to interrupt handler */ + b EXT(FCReturn) ; Bye dudes... /* * Choke the system. */ -ENTRY(DoChokeLL, TAG_NO_FRAME_USED) + .align 5 + .globl EXT(DoChokeLL) + +LEXT(DoChokeLL) - mfsprg r11,0 ; Get the per_proc address - lwz r11,PP_TEMPWORK1(r11) ; Restore the return address li r3,T_CHOKE ; Set external interrupt value - mtlr r11 ; Restore the LR stw r3,saveexception(r13) ; Modify the exception type to external - blr ; Return to interrupt handler - + b EXT(FCReturn) ; Bye dudes... + /* - * Set the low level trace flags + * Null firmware call */ - -ENTRY(LLTraceSet, TAG_NO_FRAME_USED) - mfsprg r6,2 ; Get feature flags - mfmsr r12 /* Get the MSR */ - mr r4,r3 /* Save the new value */ - andi. r3,r12,0x01C0 /* Clear interrupts and translation */ - mtcrf 0x04,r6 ; Set the features - bt pfNoMSRirb,ltsNoMSR ; Use MSR... + .align 5 + .globl EXT(NullLL) - mtmsr r3 ; Translation and all off - isync ; Toss prefetch - b ltsNoMSRx - -ltsNoMSR: li r0,loadMSR ; Get the MSR setter SC - sc ; Set it +LEXT(NullLL) -ltsNoMSRx: - - lis r5,hi16(EXT(trcWork)) ; Get trace area - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - ori r5,r5,lo16(EXT(trcWork)) ; again - - lwz r3,traceMask(r5) /* Get the old trace flags to pass back */ - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - stw r4,traceMask(r5) /* Replace with the new ones */ + li r3,T_IN_VAIN ; Set to just ignore this one + b EXT(FCReturn) ; Bye dudes... + +; +; Null firmware call +; + + .align 5 + .globl EXT(iNullLL) + +LEXT(iNullLL) + + mfspr r4,pmc1 ; Get stamp + stw r4,0x6100+(9*16)+0x0(0) ; Save it +#if 1 + mfspr r4,pmc2 ; Get stamp + stw r4,0x6100+(9*16)+0x4(0) ; Save it + mfspr r4,pmc3 ; Get stamp + stw r4,0x6100+(9*16)+0x8(0) ; Save it + mfspr r4,pmc4 ; Get stamp + stw r4,0x6100+(9*16)+0xC(0) ; Save it +#endif + li r3,T_IN_VAIN ; Set to just ignore this one + b EXT(FCReturn) ; Bye dudes... - mtmsr r12 /* Restore the MSR */ - isync +; +; Set the low level trace flags +; + + .align 5 + .globl EXT(LLTraceSet) + +LEXT(LLTraceSet) + + mr r4,r3 ; Save the new value - blr /* Leave... */ + lwz r3,traceMask(0) ; Get the old trace flags to pass back + stw r4,traceMask(0) ; Replace with the new ones + blr ; Leave... -#if 1 +#if 0 /* ; *************************************************************************** @@ -1698,7 +1795,11 @@ ltsNoMSRx: #define GDfromright 20 #define GDfontsize 16 -ENTRY(GratefulDeb,TAG_NO_FRAME_USED) + .align 5 + .globl EXT(GratefulDeb) + +LEXT(GratefulDeb) + mfspr r6,pir /* Get the PIR */ lis r5,HIGH_ADDR(EXT(GratefulDebWork)) /* Point to our work area */ rlwinm r6,r6,8,23,23 /* Get part of the offset to our processors area */ @@ -1828,7 +1929,10 @@ GDbailout: mr r1,r31 /* Move the workarea base */ */ -ENTRY(GratefulDebDisp,TAG_NO_FRAME_USED) + .align 5 + .globl EXT(GratefulDebDisp) + +LEXT(GratefulDebDisp) mfmsr r9 /* Save the current MSR */ mflr r7 /* Save the return */ @@ -1849,22 +1953,26 @@ ENTRY(GratefulDebDisp,TAG_NO_FRAME_USED) */ -ENTRY(checkNMI,TAG_NO_FRAME_USED) + .align 5 + .globl EXT(checkNMI) + +LEXT(checkNMI) mfmsr r9 /* Save it */ andi. r8,r9,0x7FCF /* Clear it */ mtmsr r8 /* Disable it */ isync /* Fence it */ lis r7,0xF300 /* Find it */ + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable ori r7,r7,0x0020 /* Find it */ + ori r2,r2,lo16(MASK(MSR_FP)) ; Get the FP enable dcbi 0,r7 /* Toss it */ sync /* Sync it */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off + andc r9,r9,r2 ; Clear VEC and FP enables eieio /* Get it */ lwz r6,0x000C(r7) /* Check it */ eieio /* Fence it */ dcbi 0,r7 /* Toss it */ - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off rlwinm. r4,r6,0,19,19 /* Check it */ rlwinm r6,r6,0,20,18 /* Clear it */ sync /* Sync it */ @@ -1887,193 +1995,6 @@ xnonmi: /* Label it */ isync /* Hold it */ blr /* Return from it */ - -/* - * Early debug code - */ - -dumpr7: lis r9,HIGH_ADDR(hexTab) /* (TEST/DEBUG) */ - li r5,8 /* (TEST/DEBUG) */ - ori r9,r9,LOW_ADDR(hexTab) /* (TEST/DEBUG) */ - -dumpr7n: rlwinm r7,r7,4,0,31 /* (TEST/DEBUG) */ - mr r6,r7 /* (TEST/DEBUG) */ - andi. r6,r6,15 /* (TEST/DEBUG) */ - lbzx r6,r9,r6 /* (TEST/DEBUG) */ - lis r10,0xF301 /* (TEST/DEBUG) */ - ori r10,r10,0x2000 /* (TEST/DEBUG) */ - -#if 0 -xqrw2: eieio /* (TEST/DEBUG) */ - lbz r7,0(r10) /* (TEST/DEBUG) */ - dcbi 0,r10 /* (TEST/DEBUG) */ - sync /* (TEST/DEBUG) */ - andi. r7,r7,0x04 /* (TEST/DEBUG) */ - beq xqrw2 /* (TEST/DEBUG) */ -#endif - - dcbf 0,r10 /* (TEST/DEBUG) */ - sync /* (TEST/DEBUG) */ - dcbi 0,r10 /* (TEST/DEBUG) */ - eieio /* (TEST/DEBUG) */ - stb r6,4(r10) /* (TEST/DEBUG) */ - - lis r6,10 /* (TEST/DEBUG) */ -dumpr7d: addi r6,r6,-1 /* (TEST/DEBUG) */ - mr. r6,r6 /* (TEST/DEBUG) */ - bne- dumpr7d /* (TEST/DEBUG) */ - dcbf 0,r10 /* (TEST/DEBUG) */ - sync /* (TEST/DEBUG) */ - dcbi 0,r10 /* (TEST/DEBUG) */ - eieio /* (TEST/DEBUG) */ - - addic. r5,r5,-1 /* (TEST/DEBUG) */ - bne+ dumpr7n /* (TEST/DEBUG) */ - - blr /* (TEST/DEBUG) */ - -; -; Log a special entry in physical memory. -; This assumes that memory size has been significantly lowered using -; the maxmem boot option. The buffer starts just after the end of mem_size. -; -; This is absolutely for special tracing cases. Do not ever leave in... -; - -ENTRY(dbgLog,TAG_NO_FRAME_USED) - - li r11,0 ; Clear callers callers callers return - li r10,0 ; Clear callers callers callers callers return - li r9,0 ; Clear callers callers callers callers callers return - lwz r2,0(r1) ; Get callers callers stack frame - lis r0,0x4000 ; First invalid address - lwz r12,8(r2) ; Get our callers return - lwz r2,0(r2) ; Back chain - - mr. r2,r2 ; End of chain? - cmplw cr1,r2,r0 ; Valid kernel address? - beq- nosavehere ; Yes, end of chain... - bge- cr1,nosavehere ; No... - lwz r11,8(r2) ; Get our callers return - lwz r2,0(r2) ; Back chain - - mr. r2,r2 ; End of chain? - cmplw cr1,r2,r0 ; Valid kernel address? - beq- nosavehere ; Yes, end of chain... - bge- cr1,nosavehere ; No... - lwz r10,8(r2) ; Get our callers return - lwz r2,0(r2) ; Back chain - - mr. r2,r2 ; End of chain? - cmplw cr1,r2,r0 ; Valid kernel address? - beq- nosavehere ; Yes, end of chain... - bge- cr1,nosavehere ; No... - lwz r9,8(r2) ; Get our callers return - -nosavehere: mfmsr r8 ; Get the MSR - lis r2,hi16(EXT(DebugWork)) ; High part of area - lis r7,hi16(EXT(mem_actual)) ; High part of actual - andi. r0,r8,0x7FCF ; Interrupts and translation off - ori r2,r2,lo16(EXT(DebugWork)) ; Get the entry - mtmsr r0 ; Turn stuff off - ori r7,r7,lo16(EXT(mem_actual)) ; Get the actual - isync - - lwz r0,4(r2) ; Get the flag - mr. r0,r0 ; Should we log? - lwz r0,0(r7) ; Get the end of memory - lwz r7,0(r2) ; Get the position - bne- waytoofar ; No logging... - mr. r7,r7 ; Is this the first? - bne+ gotspot ; Nope... - - lis r7,hi16(EXT(mem_size)) ; High part of defined memory - ori r7,r7,lo16(EXT(mem_size)) ; Low part of defined memory - lwz r7,0(r7) ; Make it end of defined - -gotspot: cmplw r7,r0 ; Do we fit in memory - addi r0,r7,0x0020 ; Next slot - bge- waytoofar ; No fit... - - stw r0,0(r2) ; Set next time slot - dcbz 0,r7 ; Zap it - - stw r3,0(r7) ; First data - li r3,32 ; Disp to next line - stw r4,4(r7) ; Second data - dcbz r3,r7 ; Zap it - stw r5,8(r7) ; Third data - stw r6,12(r7) ; Fourth data - - stw r12,16(r7) ; Callers callers - stw r11,20(r7) ; Callers callers caller - stw r10,24(r7) ; Callers callers callers caller - stw r9,28(r7) ; Callers callers callers callers caller - -waytoofar: mtmsr r8 ; Back to normal - isync - blr - -; -; Same as the other, but no traceback and 16 byte entry -; Trashes R0, R2, R10, R12 -; - - .align 5 - .globl EXT(dbgLog2) - -LEXT(dbgLog2) - - - mfmsr r10 ; Get the MSR - lis r2,hi16(EXT(DebugWork)) ; High part of area - lis r12,hi16(EXT(mem_actual)) ; High part of actual - andi. r0,r10,0x7FCF ; Interrupts and translation off - ori r2,r2,lo16(EXT(DebugWork)) ; Get the entry - mtmsr r0 ; Turn stuff off - ori r12,r12,lo16(EXT(mem_actual)) ; Get the actual - isync - - lwz r0,4(r2) ; Get the flag - mr. r0,r0 ; Should we log? - lwz r0,0(r12) ; Get the end of memory - lwz r12,0(r2) ; Get the position - bne- waytoofar2 ; No logging... - mr. r12,r12 ; Is this the first? - bne+ gotspot2 ; Nope... - - lis r12,hi16(EXT(mem_size)) ; High part of defined memory - ori r12,r12,lo16(EXT(mem_size)) ; Low part of defined memory - lwz r12,0(r12) ; Make it end of defined - -gotspot2: cmplw cr1,r12,r0 ; Do we fit in memory - rlwinm. r0,r12,0,27,27 ; Are we on a new line? - bge- cr1,waytoofar2 ; No fit... - addi r0,r12,0x0010 ; Next slot - - bne+ nonewline ; Not on a new line... - dcbz br0,r12 ; Clear it so we do not fetch it - -nonewline: cmplwi r3,68 ; Special place for time stamp? - - stw r0,0(r2) ; Set next time slot - bne+ nospcts ; Nope... - - lwz r0,0x17C(br0) ; Get special saved time stamp - b nospctt ; Skip... - -nospcts: mftb r0 ; Get the current time - -nospctt: stw r3,4(r12) ; First data - stw r4,8(r12) ; Second data - stw r5,12(r12) ; Third data - stw r0,0(r12) ; Time stamp - -waytoofar2: mtmsr r10 ; Back to normal - isync - blr - - ; ; Saves floating point registers ; @@ -2083,13 +2004,19 @@ waytoofar2: mtmsr r10 ; Back to normal LEXT(stFloat) - mfmsr r0 ; Save the MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r4,r0,0,MSR_EE_BIT,MSR_EE_BIT ; Turn off interruptions - ori r4,r4,lo16(MASK(MSR_FP)) ; Enable floating point + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable + li r4,0 + ori r2,r2,lo16(MASK(MSR_FP)) ; Get the FP enable + ori r4,r4,lo16(MASK(MSR_EE)) ; Get the EE bit + + mfmsr r0 ; Save the MSR + + andc r4,r0,r4 ; Clear EE + ori r4,r4,lo16(MASK(MSR_FP)) ; Enable floating point mtmsr r4 isync + + andc r0,r0,r2 ; Clear VEC and FP enables stfd f0,0x00(r3) stfd f1,0x08(r3) @@ -2140,6 +2067,10 @@ LEXT(stFloat) LEXT(stVectors) + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable + li r4,0 + ori r2,r2,lo16(MASK(MSR_FP)) ; Get the FP enable + ori r4,r4,lo16(MASK(MSR_EE)) ; Get the EE bit mfsprg r6,2 ; Get features mr r5,r3 ; Save area address @@ -2148,13 +2079,15 @@ LEXT(stVectors) beqlr- ; No... mfmsr r0 ; Save the MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r4,r0,0,MSR_EE_BIT,MSR_EE_BIT ; Turn off interruptions + + andc r4,r0,r4 ; Clear EE + oris r4,r4,hi16(MASK(MSR_VEC)) ; Enable vectors mtmsr r4 isync + andc r0,r0,r2 ; Clear FP and VEC + stvxl v0,0,r5 addi r5,r5,16 stvxl v1,0,r5 @@ -2238,10 +2171,16 @@ LEXT(stVectors) LEXT(stSpecrs) + + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable + li r4,0 + ori r2,r2,lo16(MASK(MSR_FP)) ; Get the FP enable + ori r4,r4,lo16(MASK(MSR_EE)) ; Get the EE bit + + mfmsr r0 ; Save the MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r4,r0,0,MSR_EE_BIT,MSR_EE_BIT ; Turn off interruptions + andc r0,r0,r2 ; Turn of VEC and FP + andc r4,r0,r4 ; And EE mtmsr r4 isync @@ -2305,9 +2244,6 @@ stSnsr: mfsrin r6,r5 addi r4,r4,4 bne+ stSnsr - cmplwi cr1,r12,PROCESSOR_VERSION_604e - cmplwi cr5,r12,PROCESSOR_VERSION_604ev - cror cr1_eq,cr1_eq,cr5_eq ; Set if 604 type cmplwi r12,PROCESSOR_VERSION_750 mfspr r4,hid0 stw r4,(39*4)(r3) @@ -2316,15 +2252,13 @@ stSnsr: mfsrin r6,r5 li r5,0 li r6,0 li r7,0 - beq- cr1,before750 - blt- before750 mfspr r4,hid1 mfspr r5,l2cr mfspr r6,msscr0 mfspr r7,msscr1 -before750: stw r4,(40*4)(r3) + stw r4,(40*4)(r3) stw r6,(42*4)(r3) stw r5,(41*4)(r3) stw r7,(43*4)(r3) @@ -2339,7 +2273,6 @@ isis750: stw r4,0(r3) li r5,0 li r6,0 li r7,0 - beq- cr1,b4750 blt- b4750 mfspr r4,thrm1 @@ -2353,9 +2286,11 @@ b4750: stw r4,(44*4)(r3) stw r7,(47*4)(r3) li r4,0 + li r6,0 cmplwi r12,PROCESSOR_VERSION_7400 bne nnmax + mfspr r6,dabr mfpvr r5 rlwinm r5,r5,0,16,31 cmplwi r5,0x1101 @@ -2366,8 +2301,167 @@ b4750: stw r4,(44*4)(r3) gnmax: mfspr r4,1016 nnmax: stw r4,(48*4)(r3) + stw r6,(49*4)(r3) mtmsr r0 isync blr + + +; +; fwEmMck - this forces the hardware to emulate machine checks +; Only valid on 64-bit machines +; Note: we want interruptions disabled here +; + + .globl EXT(fwEmMck) + + .align 5 + +LEXT(fwEmMck) + + + rlwinm r3,r3,0,1,0 ; Copy low of high high - scomd + rlwinm r5,r5,0,1,0 ; Copy low of high high - hid1 + rlwinm r7,r7,0,1,0 ; Copy low of high high - hid4 + rlwimi r3,r4,0,0,31 ; Copy low of low low + rlwimi r5,r6,0,0,31 ; Copy low of low low + rlwimi r7,r8,0,0,31 ; Copy low of low low + + lis r9,3 ; Start forming hid1 error inject mask + lis r10,hi16(0x01084083) ; Start formaing hid4 error inject mask + ori r9,r9,0xC000 ; Next bit + ori r10,r10,lo16(0x01084083) ; Next part + sldi r9,r9,32 ; Shift up high + sldi r10,r10,8 ; Shift into position + + mfspr r0,hid1 ; Get hid1 + mfspr r2,hid4 ; and hid4 + + and r5,r5,r9 ; Keep only error inject controls - hid1 + and r7,r7,r10 ; Keep only error inject controls - hid4 + + andc r0,r0,r9 ; Clear error inject controls hid1 + andc r2,r2,r10 ; Clear error inject controls hid4 + + or r0,r0,r5 ; Add in the new controls hid1 + or r2,r2,r7 ; Add in the new controls hid4 + +/* ? */ +#if 0 + lis r12,CoreErrI ; Get the error inject controls + sync + + mtspr scomd,r3 ; Set the error inject controls + mtspr scomc,r12 ; Request error inject + mfspr r11,scomc ; Get back the status (we just ignore it) +#endif + sync + isync + + mtspr hid1,r0 ; Move in hid1 controls + mtspr hid1,r0 ; We need to do it twice + isync + + sync + mtspr hid4,r2 ; Move in hid4 controls + isync + + blr ; Leave... + +; +; fwSCOMrd - read/write SCOM +; + .align 5 + .globl EXT(fwSCOM) + +LEXT(fwSCOM) + + lhz r12,scomfunc(r3) ; Get the function + lwz r4,scomreg(r3) ; Get the register + rldicr r4,r4,8,47 ; Position for SCOM + + mr. r12,r12 ; See if read or write + bne fwSCwrite ; Go do a write + + mfsprg r0,2 ; Get the feature flags + ori r4,r4,0x8000 ; Set to read data + rlwinm. r0,r0,pfSCOMFixUpb+1,31,31 ; Set shift if we need a fix me up + sync + + mtspr scomc,r4 ; Request the register + mfspr r11,scomd ; Get the register contents + mfspr r10,scomc ; Get back the status + sync + isync + + sld r11,r11,r0 ; Fix up if needed + + std r11,scomdata(r3) ; Save result + eieio + std r10,scomstat(r3) ; Save status + + blr + +fwSCwrite: ld r5,scomdata(r3) ; Get the data + + sync + + mtspr scomd,r5 ; Set the data + mtspr scomc,r4 ; Set it + mfspr r10,scomc ; Get back the status + sync + isync + + std r10,scomstat(r3) ; Save status + + blr + +; +; diagTrap - this is used to trigger checks from user space +; any "twi 31,r31,0xFFFx" will come here (x = 0 to F). +; On entry R3 points to savearea. +; R4 is the "x" from instruction; +; Pass back 1 to no-op twi and return to user +; Pass back 0 to treat as normal twi. +; + + .globl EXT(diagTrap) + + .align 5 + +LEXT(diagTrap) + + li r3,1 ; Ignore TWI + blr ; Leave... + + + + +; +; setPmon - this is used to manipulate MMCR0 and MMCR1 + + .globl EXT(setPmon) + + .align 5 + +LEXT(setPmon) + + li r0,0 + isync + mtspr mmcr0,r0 ; Clear MMCR0 + mtspr mmcr1,r0 ; Clear MMCR1 + mtspr pmc1,r0 + mtspr pmc2,r0 + mtspr pmc3,r0 + mtspr pmc4,r0 + + isync + + mtspr mmcr0,r3 ; Set MMCR0 + mtspr mmcr1,r4 ; Set MMCR1 + isync + blr ; Leave... + + diff --git a/osfmk/ppc/FirmwareC.c b/osfmk/ppc/FirmwareC.c index 030dce73b..a2e9bd5f3 100644 --- a/osfmk/ppc/FirmwareC.c +++ b/osfmk/ppc/FirmwareC.c @@ -42,11 +42,9 @@ #include #include #include -#include #include #include #include -//#include #include #include @@ -281,4 +279,7 @@ void GratefulDebInit(bootBumbleC *boot_video_info) { /* Initialize the video deb } - +void debugNoop(void); +void debugNoop(void) { /* This does absolutely nothing */ + return; +} diff --git a/osfmk/ppc/FirmwareCalls.h b/osfmk/ppc/FirmwareCalls.h index 51542dbdb..e404ade79 100644 --- a/osfmk/ppc/FirmwareCalls.h +++ b/osfmk/ppc/FirmwareCalls.h @@ -63,9 +63,8 @@ fwCallEnt(dbgRegsCall, dbgRegsLL) /* Dumps all registers */ fwCallEnt(CreateFakeDECCall, CreateFakeDECLL) /* Make a fake decrementer interruption */ fwCallEnt(CreateShutdownCTXCall, CreateShutdownCTXLL) /* create a shutdown context */ -#if PERF_HIST - fwCallEnt(PerfCtlCall, PerfCtlLL) /* Control performance monitor */ -#endif + fwCallEnt(NullCall, NullLL) /* Null Firmware call */ + fwCallEnt(iNullCall, iNullLL) /* Instrumented null Firmware call */ #endif /* _FIRMWARECALLS_H_ */ diff --git a/osfmk/ppc/MPinterfaces.s b/osfmk/ppc/MPinterfaces.s deleted file mode 100644 index b428d1129..000000000 --- a/osfmk/ppc/MPinterfaces.s +++ /dev/null @@ -1,458 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_FREE_COPYRIGHT@ - */ -/* - * @APPLE_FREE_COPYRIGHT@ - */ - -/* - MPinterfaces.s - - General interface to the MP hardware handlers anonymous - - Lovingly crafted by Bill Angell using traditional methods and only natural or recycled materials. - No animal products are used other than rendered otter bile. - -*/ - -#include -#include -#include -#include -#include -#include - -/* - * This first section is the glue for the high level C code. - * Anything that needs any kind of system services (e.g., VM) has to be done here. The firmware - * code that implements the SC runs in real mode. - */ - - - -/* #define MPI_DEBUGGING 0 */ -#define MPI_DEBUGGING 0 - -/* - * The routine that implements cpu_number. - */ - -ENTRY(cpu_number, TAG_NO_FRAME_USED) - - mfmsr r9 /* Save the old MSR */ - rlwinm r8,r9,0,17,15 /* Clear interruptions */ - mtmsr r8 /* Interrupts off */ - mfsprg r7,0 /* Get per-proc block */ - lhz r3,PP_CPU_NUMBER(r7) /* Get CPU number */ - mtmsr r9 /* Restore interruptions to entry */ - blr /* Return... */ - - -/* - * The routine glues to the count CPU firmware call - */ - -ENTRY(MPgetProcCount, TAG_NO_FRAME_USED) - - mr r12,r0 /* Keep R0 pristene */ - lis r0,HIGH_ADDR(MPgetProcCountCall) /* Top half of MPgetProcCount firmware call number */ - ori r0,r0,LOW_ADDR(MPgetProcCountCall) /* Bottom half */ - sc /* Go see how many processors we have */ - -#if MPI_DEBUGGING - lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ - ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ - sc /* Cut a backend trace entry */ -#endif - - mr r0,r12 /* Restore R0 */ - - blr /* Return, pass back R3... */ - -/* - * The routine glues to the start CPU firmware call - actually it's really a boot - * The first parameter is the CPU number to start - * The second parameter is the real address of the code used to boot the processor - * The third parameter is the real addess of the CSA for the subject processor - */ - -ENTRY(MPstart, TAG_NO_FRAME_USED) - - mr r12,r0 /* Keep R0 pristene */ - lis r0,HIGH_ADDR(MPstartCall) /* Top half of MPstartCall firmware call number */ - ori r0,r0,LOW_ADDR(MPstartCall) /* Bottom half */ - sc /* Go see how many processors we have */ - -#if MPI_DEBUGGING - lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ - ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ - sc /* Cut a backend trace entry */ -#endif - - mr r0,r12 /* Restore R0 */ - blr /* Return... */ - -/* - * This routine glues to the get external interrupt handler physical address - */ - -ENTRY(MPexternalHook, TAG_NO_FRAME_USED) - - mr r12,r0 /* Keep R0 pristene */ - lis r0,HIGH_ADDR(MPexternalHookCall) /* Top half of MPexternalHookCall firmware call number */ - ori r0,r0,LOW_ADDR(MPexternalHookCall) /* Bottom half */ - sc /* Go see how many processors we have */ - -#if MPI_DEBUGGING - lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ - ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ - sc /* Cut a backend trace entry */ -#endif - - mr r0,r12 /* Restore R0 */ - blr /* Return... */ - - -/* - * This routine glues to the signal processor routine - */ - -ENTRY(MPsignal, TAG_NO_FRAME_USED) - - mr r12,r0 /* Keep R0 pristene */ - lis r0,HIGH_ADDR(MPsignalCall) /* Top half of MPsignalCall firmware call number */ - ori r0,r0,LOW_ADDR(MPsignalCall) /* Bottom half */ - sc /* Go kick the other guy */ - -#if MPI_DEBUGGING - lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ - ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ - sc /* Cut a backend trace entry */ -#endif - - mr r0,r12 /* Restore R0 */ - blr /* Return... */ - - -/* - * This routine glues to the stop processor routine - */ - -ENTRY(MPstop, TAG_NO_FRAME_USED) - - mr r12,r0 /* Keep R0 pristene */ - lis r0,HIGH_ADDR(MPstopCall) /* Top half of MPsignalCall firmware call number */ - ori r0,r0,LOW_ADDR(MPstopCall) /* Bottom half */ - sc /* Stop the other guy cold */ - -#if MPI_DEBUGGING - lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ - ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ - sc /* Cut a backend trace entry */ -#endif - - mr r0,r12 /* Restore R0 */ - blr /* Return... */ - - -/* ************************************************************************************************************* - * - * This second section is the glue for the low level stuff directly into the MP plugin. - * At this point every register in existence should be saved. Well, they're saved, - * but R13 points to the savearea, and R20 to the trace entry. Please be careful - * with these. You won't like what happens if they're different when you exit. - * - ***************************************************************************************************************/ - - -/* - * See how many physical processors we have - */ - -ENTRY(MPgetProcCountLL, TAG_NO_FRAME_USED) - - lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ - ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ - lwz r10,kCountProcessors*4(r11) /* Get the routine entry point */ - mflr r14 /* Save the return in an unused register */ - mtlr r10 /* Set it */ - blrl /* Call the routine */ - mtlr r14 /* Restore firmware caller address */ - blr /* Leave... */ - -/* - * Start up a processor - */ - -ENTRY(MPstartLL, TAG_NO_FRAME_USED) - - lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ - ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ - lwz r10,kStartProcessor*4(r11) /* Get the routine entry point */ - mflr r14 /* Save the return in an unused register */ - mtlr r10 /* Set it */ - blrl /* Call the routine */ - mtlr r14 /* Restore firmware caller address */ - blr /* Leave... */ - -/* - * Get physical address of SIGP external handler - */ - -ENTRY(MPexternalHookLL, TAG_NO_FRAME_USED) - - lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ - ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ - lwz r10,kExternalHook*4(r11) /* Get the routine entry point */ - mflr r14 /* Save the return in an unused register */ - mtlr r10 /* Set it */ - blrl /* Call the routine */ - mtlr r14 /* Restore firmware caller address */ - blr /* Leave... */ - - - -/* - * Send a signal to another processor - */ - -ENTRY(MPsignalLL, TAG_NO_FRAME_USED) - - lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ - ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ - lwz r10,kSignalProcessor*4(r11) /* Get the routine entry point */ - mflr r14 /* Save the return in an unused register */ - mtlr r10 /* Set it */ - blrl /* Call the routine */ - mtlr r14 /* Restore firmware caller address */ - blr /* Leave... */ - - - -/* - * Stop another processor - */ - -ENTRY(MPstopLL, TAG_NO_FRAME_USED) - - lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ - ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ - lwz r10,kStopProcessor*4(r11) /* Get the routine entry point */ - mflr r14 /* Save the return in an unused register */ - mtlr r10 /* Set it */ - blrl /* Call the routine */ - mtlr r14 /* Restore firmware caller address */ - blr /* Leave... */ - - -/* - * Third section: Miscellaneous MP related routines - */ - - - -/* - * All non-primary CPUs start here. - * We are dispatched by the SMP driver. Addressing is real (no DR or IR), - * interruptions disabled, etc. R3 points to the CPUStatusArea (CSA) which contains - * most of the state for the processor. This is set up by the primary. Note that we - * do not use everything in the CSA. Caches should be clear and coherent with - * no paradoxies (well, maybe one doxie, a pair would be pushing it). - */ - -ENTRY(start_secondary,TAG_NO_FRAME_USED) - - mr r31,r3 /* Get the pointer to the CSA */ - - lis r21,HIGH_ADDR(SpinTimeOut) /* Get the top part of the spin timeout */ - ori r21,r21,LOW_ADDR(SpinTimeOut) /* Slam in the bottom part */ - -GetValid: lbz r10,CSAregsAreValid(r31) /* Get the CSA validity value */ - - - mr. r10,r10 /* Is the area valid yet? */ - bne GotValid /* Yeah... */ - addic. r21,r21,-1 /* Count the try */ - isync /* Make sure we don't prefetch the valid flag */ - bge+ GetValid /* Still more tries left... */ - blr /* Return and cancel startup request... */ - -GotValid: li r21,0 /* Set the valid flag off (the won't be after the RFI) */ - lwz r10,CSAdec(r31) /* Get the decrimenter */ - stb r21,CSAregsAreValid(r31) /* Clear that validity flag */ - - lwz r11,CSAdbat+(0*8)+0(r31) /* Get the first DBAT */ - lwz r12,CSAdbat+(0*8)+4(r31) /* Get the first DBAT */ - lwz r13,CSAdbat+(1*8)+0(r31) /* Get the second DBAT */ - mtdec r10 /* Set the decrimenter */ - lwz r14,CSAdbat+(1*8)+4(r31) /* Get the second DBAT */ - mtdbatu 0,r11 /* Set top part of DBAT 0 */ - lwz r15,CSAdbat+(2*8)+0(r31) /* Get the third DBAT */ - mtdbatl 0,r12 /* Set lower part of DBAT 0 */ - lwz r16,CSAdbat+(2*8)+4(r31) /* Get the third DBAT */ - mtdbatu 1,r13 /* Set top part of DBAT 1 */ - lwz r17,CSAdbat+(3*8)+0(r31) /* Get the fourth DBAT */ - mtdbatl 1,r14 /* Set lower part of DBAT 1 */ - lwz r18,CSAdbat+(3*8)+4(r31) /* Get the fourth DBAT */ - mtdbatu 2,r15 /* Set top part of DBAT 2 */ - lwz r11,CSAibat+(0*8)+0(r31) /* Get the first IBAT */ - mtdbatl 2,r16 /* Set lower part of DBAT 2 */ - lwz r12,CSAibat+(0*8)+4(r31) /* Get the first IBAT */ - mtdbatu 3,r17 /* Set top part of DBAT 3 */ - lwz r13,CSAibat+(1*8)+0(r31) /* Get the second IBAT */ - mtdbatl 3,r18 /* Set lower part of DBAT 3 */ - lwz r14,CSAibat+(1*8)+4(r31) /* Get the second IBAT */ - mtibatu 0,r11 /* Set top part of IBAT 0 */ - lwz r15,CSAibat+(2*8)+0(r31) /* Get the third IBAT */ - mtibatl 0,r12 /* Set lower part of IBAT 0 */ - lwz r16,CSAibat+(2*8)+4(r31) /* Get the third IBAT */ - mtibatu 1,r13 /* Set top part of IBAT 1 */ - lwz r17,CSAibat+(3*8)+0(r31) /* Get the fourth IBAT */ - mtibatl 1,r14 /* Set lower part of IBAT 1 */ - lwz r18,CSAibat+(3*8)+4(r31) /* Get the fourth IBAT */ - mtibatu 2,r15 /* Set top part of IBAT 2 */ - lwz r11,CSAsdr1(r31) /* Get the SDR1 value */ - mtibatl 2,r16 /* Set lower part of IBAT 2 */ - lwz r12,CSAsprg(r31) /* Get SPRG0 (the per_proc_info address) */ - mtibatu 3,r17 /* Set top part of IBAT 3 */ - lwz r13,CSAmsr(r31) /* Get the MSR */ - mtibatl 3,r18 /* Set lower part of IBAT 3 */ - lwz r14,CSApc(r31) /* Get the PC */ - sync /* Sync up */ - mtsdr1 r11 /* Set the SDR1 value */ - sync /* Sync up */ - - la r10,CSAsr-4(r31) /* Point to SR 0 - 4 */ - li r9,0 /* Start at SR 0 */ - -LoadSRs: lwz r8,4(r10) /* Get the next SR in line */ - addi r10,r10,4 - mtsrin r8,r9 /* Load up the SR */ - addis r9,r9,0x1000 /* Bump to the next SR */ - mr. r9,r9 /* See if we wrapped back to 0 */ - bne+ LoadSRs /* Not yet... */ - - lwz r0,CSAgpr+(0*4)(r31) /* Get a GPR */ - lwz r9,CSAsprg+(1*4)(r31) /* Get SPRG1 (the initial active savearea) */ - mtsrr1 r13 /* Set the MSR to dispatch */ - lwz r1,CSAgpr+(1*4)(r31) /* Get a GPR */ - mtsprg 0,r12 /* Set the SPRG0 (per_proc_into) value */ - lwz r2,CSAgpr+(2*4)(r31) /* Get a GPR */ - mtsrr0 r14 /* Set the PC to dispatch */ - lwz r3,CSAgpr+(3*4)(r31) /* Get a GPR */ - mtsprg 1,r9 /* Set the SPRG1 (the initial active savearea) value */ - lwz r4,CSAgpr+(4*4)(r31) /* Get a GPR */ - lwz r5,CSAgpr+(5*4)(r31) /* Get a GPR */ - lwz r6,CSAgpr+(6*4)(r31) /* Get a GPR */ - lwz r7,CSAgpr+(7*4)(r31) /* Get a GPR */ - lwz r8,CSAgpr+(8*4)(r31) /* Get a GPR */ - lwz r9,CSAgpr+(9*4)(r31) /* Get a GPR */ - lwz r10,CSAgpr+(10*4)(r31) /* Get a GPR */ - lwz r11,CSAgpr+(11*4)(r31) /* Get a GPR */ - lwz r12,CSAgpr+(12*4)(r31) /* Get a GPR */ - lwz r13,CSAgpr+(13*4)(r31) /* Get a GPR */ - lwz r14,CSAgpr+(14*4)(r31) /* Get a GPR */ - lwz r15,CSAgpr+(15*4)(r31) /* Get a GPR */ - lwz r16,CSAgpr+(16*4)(r31) /* Get a GPR */ - lwz r17,CSAgpr+(17*4)(r31) /* Get a GPR */ - lwz r18,CSAgpr+(18*4)(r31) /* Get a GPR */ - lwz r19,CSAgpr+(19*4)(r31) /* Get a GPR */ - lwz r20,CSAgpr+(20*4)(r31) /* Get a GPR */ - lwz r21,CSAgpr+(21*4)(r31) /* Get a GPR */ - lwz r22,CSAgpr+(22*4)(r31) /* Get a GPR */ - lwz r23,CSAgpr+(23*4)(r31) /* Get a GPR */ - lwz r24,CSAgpr+(24*4)(r31) /* Get a GPR */ - lwz r25,CSAgpr+(25*4)(r31) /* Get a GPR */ - lwz r26,CSAgpr+(26*4)(r31) /* Get a GPR */ - lwz r27,CSAgpr+(27*4)(r31) /* Get a GPR */ - lwz r28,CSAgpr+(28*4)(r31) /* Get a GPR */ - lwz r29,CSAgpr+(29*4)(r31) /* Get a GPR */ - lwz r30,CSAgpr+(30*4)(r31) /* Get a GPR */ - lwz r31,CSAgpr+(31*4)(r31) /* Get a GPR */ - - sync /* Make sure we're sunk */ - - rfi /* Get the whole shebang going... */ - - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - - - - -/* - * This routine handles requests to firmware from another processor. It is actually the second level - * of a three level signaling protocol. The first level is handled in the physical MP driver. It is the - * basic physical control for the processor, e.g., physical stop, reset, start. The second level (this - * one) handles cross-processor firmware requests, e.g., complete TLB purges. The last are AST requests - * which are handled directly by mach. - * - * If this code handles the request (based upon MPPICParm0BU which is valid until the next SIGP happens - - * actually, don't count on it once you enable) it will RFI back to the - * interrupted code. If not, it will return and let the higher level interrupt handler be called. - * - * We need to worry about registers we use here, check in lowmem_vectors to see what is boten and verboten. - * - * Note that there are no functions implemented yet. - */ - - -ENTRY(MPsignalFW, TAG_NO_FRAME_USED) - - - mfspr r7,pir /* Get the processor address */ - lis r6,HIGH_ADDR(EXT(MPPICPUs)) /* Get high part of CPU control block array */ - rlwinm r7,r7,5,23,26 /* Get index into CPU array */ - ori r6,r6,HIGH_ADDR(EXT(MPPICPUs)) /* Get low part of CPU control block array */ - add r7,r7,r6 /* Point to the control block for this processor */ - lwz r6,MPPICParm0BU(r7) /* Just pick this up for now */ - blr /* Leave... */ - - -/* - * Make space for the maximum supported CPUs in the data section - */ - -#ifdef __ELF__ - .section ".data" -#else - .data -#endif - .align 5 -EXT(CSA): - .set ., .+(CSAsize*NCPUS) -#ifndef __MACHO__ - .type EXT(CSA), @object - .size EXT(CSA), CSAsize*NCPUS -#endif - .globl EXT(CSA) diff --git a/osfmk/ppc/Makefile b/osfmk/ppc/Makefile index eecd880e5..fbef39c9c 100644 --- a/osfmk/ppc/Makefile +++ b/osfmk/ppc/Makefile @@ -8,9 +8,6 @@ include $(MakeInc_cmd) include $(MakeInc_def) -DATAFILES = \ - asm.h machlimits.h - EXPORT_ONLY_FILES = \ asm.h \ cpu_capabilities.h \ @@ -19,13 +16,11 @@ EXPORT_ONLY_FILES = \ hw_lock_types.h \ io_map_entries.h \ proc_reg.h \ - machine_routines.h \ + machine_routines.h \ Diagnostics.h \ savearea.h \ mappings.h -INSTALL_MD_LIST = ${DATAFILES} - INSTALL_MD_DIR = ppc INSTALL_MD_LCL_LIST = cpu_capabilities.h diff --git a/osfmk/ppc/POWERMAC/dbdma.c b/osfmk/ppc/POWERMAC/dbdma.c deleted file mode 100644 index 8d7101b2c..000000000 --- a/osfmk/ppc/POWERMAC/dbdma.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - * - */ - -#include - -#include /* For isync */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -static int dbdma_alloc_index = 0; -dbdma_command_t *dbdma_alloc_commands = NULL; - -void -dbdma_start(dbdma_regmap_t *dmap, dbdma_command_t *commands) -{ - unsigned long addr = kvtophys((vm_offset_t) commands); - - if (addr & 0xf) - panic("dbdma_start command structure not 16-byte aligned"); - - dmap->d_intselect = 0xff; /* Endian magic - clear out interrupts */ - DBDMA_ST4_ENDIAN(&dmap->d_control, - DBDMA_CLEAR_CNTRL( (DBDMA_CNTRL_ACTIVE | - DBDMA_CNTRL_DEAD | - DBDMA_CNTRL_WAKE | - DBDMA_CNTRL_FLUSH | - DBDMA_CNTRL_PAUSE | - DBDMA_CNTRL_RUN ))); - eieio(); - - while (DBDMA_LD4_ENDIAN(&dmap->d_status) & DBDMA_CNTRL_ACTIVE) - eieio(); - - dmap->d_cmdptrhi = 0; eieio();/* 64-bit not yet */ - DBDMA_ST4_ENDIAN(&dmap->d_cmdptrlo, addr); eieio(); - - DBDMA_ST4_ENDIAN(&dmap->d_control, DBDMA_SET_CNTRL(DBDMA_CNTRL_RUN)); - eieio(); - -} - -void -dbdma_stop(dbdma_regmap_t *dmap) -{ - DBDMA_ST4_ENDIAN(&dmap->d_control, DBDMA_CLEAR_CNTRL(DBDMA_CNTRL_RUN) | - DBDMA_SET_CNTRL(DBDMA_CNTRL_FLUSH)); eieio(); - - while (DBDMA_LD4_ENDIAN(&dmap->d_status) & (DBDMA_CNTRL_ACTIVE|DBDMA_CNTRL_FLUSH)) - eieio(); -} - -void -dbdma_flush(dbdma_regmap_t *dmap) -{ - DBDMA_ST4_ENDIAN(&dmap->d_control,DBDMA_SET_CNTRL(DBDMA_CNTRL_FLUSH)); - eieio(); - - while (DBDMA_LD4_ENDIAN(&dmap->d_status) & (DBDMA_CNTRL_FLUSH)) - eieio(); -} - -void -dbdma_reset(dbdma_regmap_t *dmap) -{ - DBDMA_ST4_ENDIAN(&dmap->d_control, - DBDMA_CLEAR_CNTRL( (DBDMA_CNTRL_ACTIVE | - DBDMA_CNTRL_DEAD | - DBDMA_CNTRL_WAKE | - DBDMA_CNTRL_FLUSH | - DBDMA_CNTRL_PAUSE | - DBDMA_CNTRL_RUN ))); - eieio(); - - while (DBDMA_LD4_ENDIAN(&dmap->d_status) & DBDMA_CNTRL_RUN) - eieio(); -} - -void -dbdma_continue(dbdma_regmap_t *dmap) -{ - DBDMA_ST4_ENDIAN(&dmap->d_control, DBDMA_SET_CNTRL(DBDMA_CNTRL_RUN|DBDMA_CNTRL_WAKE) | DBDMA_CLEAR_CNTRL(DBDMA_CNTRL_PAUSE|DBDMA_CNTRL_DEAD)); - eieio(); -} - -void -dbdma_pause(dbdma_regmap_t *dmap) -{ - DBDMA_ST4_ENDIAN(&dmap->d_control,DBDMA_SET_CNTRL(DBDMA_CNTRL_PAUSE)); - eieio(); - - while (DBDMA_LD4_ENDIAN(&dmap->d_status) & DBDMA_CNTRL_ACTIVE) - eieio(); -} - -dbdma_command_t * -dbdma_alloc(int count) -{ - dbdma_command_t *dbdmap; - - /* - * For now, we assume that dbdma_alloc() is called only when - * the system is bootstrapping, i.e. before the other CPUs - * are activated... - * If that's not the case, we need to protect the global - * variables here. - */ - assert(cpu_number() == master_cpu); - - if (dbdma_alloc_index == 0) - dbdma_alloc_commands = (dbdma_command_t *) io_map(0, PAGE_SIZE); - if ((dbdma_alloc_index+count) >= PAGE_SIZE / sizeof(dbdma_command_t)) - panic("Too many dbdma command structures!"); - - dbdmap = &dbdma_alloc_commands[dbdma_alloc_index]; - dbdma_alloc_index += count; - return dbdmap; -} diff --git a/osfmk/ppc/POWERMAC/mp/MPPlugIn.h b/osfmk/ppc/POWERMAC/mp/MPPlugIn.h deleted file mode 100644 index 1a593fd7b..000000000 --- a/osfmk/ppc/POWERMAC/mp/MPPlugIn.h +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_FREE_COPYRIGHT@ - */ -/* - * @APPLE_FREE_COPYRIGHT@ - */ - -/* - MPPlugIn.h - - Herein we find all the global MP plugin stuff - - Lovingly crafted by Bill Angell using traditional methods - -*/ - - -/* - * External hook completion codes - * - * The MP plugin's external interrupt hook returns one of these codes - */ - -#define kMPVainInterrupt 0 /* Interruption in vain -- ignore it */ -#define kMPIOInterruptPending 1 /* This is an I/O interruption -- handle it */ -#define kMPSignalPending 2 /* This is a pending signal -- handle it */ - - -/* *********************************************************************** - * Entry point jump table entry numbers - * *********************************************************************** */ - -#define kCountProcessors 0 -#define kStartProcessor 1 /* ->cpu address, ->start address, ->pass-thru parm */ -#define kResumeProcessor 2 /* ->cpu address */ -#define kStopProcessor 3 /* ->cpu address */ -#define kResetProcessor 4 /* ->cpu address */ -#define kSignalProcessor 5 /* ->cpu address */ -#define kStoreProcessorStatus 6 /* ->cpu address, ->status area address */ -#define kSynchClock 7 /* ->cpu address */ -#define kExternalHook 8 /* no parms */ -#define kProcessorState 9 /* ->cpu address */ -#define kRunSIGPRun 10 /* no parms */ -#define kPhoneyFirmware 11 /* Dummy kernel for alternate processors */ - -#define kMPPlugInMaxCall 11 /* set MPPlugInMaxCall to the highest-numbered call */ - - -/* *********************************************************************** - * MP Plug-In specification - * - * The address of this area is passed to the MP plugin by the initialization code. If the - * version ID and the installed hardware match the MP plugin, it returns its memory - * requirements and a table of offsets to its entry points. - * *********************************************************************** */ - -#define kMPPlugInVersionID 1 - -#define kSIGPUninitializedState 0 -#define kSIGPResetState 1 -#define kSIGPStoppedState 2 -#define kSIGPOperatingState 3 -#define kSIGPErrorState 4 - -#define kSIGPnoErr 0 -#define kSIGPInvalidStateErr -3999 -#define kSIGPInterfaceBusyErr -3998 -#define kSIGPPrivilegeErr -3997 -#define kSIGPNoPlugInErr -3996 -#define kTimeBaseSynchronizationErr -3995 -#define kSIGPTargetAddrErr -3994 -#define kSIGPInvalidStatusErr -3993 - -#define kMPPlugInInstallFailed -4999 -#define kMPPlugInInternalError -4998 - -/* - * *********************************************************************** - * Signal processor request codes - * *********************************************************************** - */ - -#define SIGPast 0 /* Requests an ast on target processor */ -#define SIGPptlb 1 /* Requests a total purge of the TLB */ -#define SIGPkdb 2 /* Requests a KDB entry */ - -/* - * *********************************************************************** - * Temporary debugging error codes (well, at least as temporary as the income tax) - * *********************************************************************** - */ -#define kMPPHairyPalms -10002 -#define kMPPOffline -10003 -#define kMPPBadState -10004 -#define kMPPInvalCPU -10005 -#define kMPPCantLock -10006 -#define kMPPNotReady -10007 -#define kMPPNotStopped -10008 -#define kMPPBadCPU -10009 -#define kMPPOnly1CPU -10010 -#define kMPPBadVers -10011 -#define kMPPNotRunning -10012 -#define kMPPTimeOut -10013 -#define kMPPInitTO1 -10014 -#define kMPPInitTO2 -10015 -#define kMPPInitTO3 -10016 - - -/* - * *********************************************************************** - * Let's define some hardware stuff - * *********************************************************************** - */ - -#define Bandit1 0xF2000000 -#define PCI1AdrReg 0xF2800000 -#define GrandCentral 0xF3000000 -#define EtherNetROM 0xF3019000 -#define HammerHead 0xF8000000 -#define ArbConfig 0x0090 -#define TwoCPU 0x02 -#define WhoAmI 0x00B0 -#define PriCPU 0x10 -#define SecCPU 0x08 -#define IntReg 0x00C0 -#define SecInt 0x80 - - -/* - * *********************************************************************** - * Let's define the flags for MPPInterface - * *********************************************************************** - */ - -#define SpinTimeOut 30000000 - -#define MPPICmsgp 0xc0000000 /* Message pending (busy + pass) */ -#define MPPICBusy 0x80000000 /* Processor area busy, i.e., locked */ -#define MPPICPass 0x40000000 /* Busy lock passed to receiving processor */ -#define MPPICOnline 0x20000000 /* Processor is online */ -#define MPPICReady 0x10000000 /* Processor is ready, i.e., started, not reset */ -#define MPPICStop 0x08000000 /* Processor is stopped */ -#define MPPICBset 0x000000FF /* Processor that owns busy, i.e., the ID of */ - /* whomever set busy. When a busy is passed, */ - /* this is the requestor of the function. */ -#define MPPICfunc 0x0000FF00 /* Current function */ -#define MPPICfIdle 0x00 /* No function pending */ -#define MPPICfStrt 0x01 /* Start the processor, physical address in */ - /* MPPIParm0 */ -#define MPPICfResm 0x02 /* Resume a stopped processor */ -#define MPPICfStop 0x03 /* Stop a processor */ -#define MPPICfSigp 0x04 /* Signal a processor */ -#define MPPICfStat 0x05 /* Store the processor machine state - */ - /* physical address of response in MPPIParm0 */ -#define MPPICfTBsy 0x06 /* Synchronize timebase - */ - /* TB image in MPPIParm0 and MPPIParm1 */ -#define MPPICfReset 0x07 /* Reset the processor */ -#define MPPICfTBsy1 0x81 /* TB sync, phase 1 */ -#define MPPICfTBsy2 0x82 /* TB sync, phase 2 */ -#define MPPICSigp 0x80000000 /* Processor has signal pending (keep signal status when stopped) */ -#define MPPICXRun 0x40000000 /* Explicit SIGP run call */ - - - -#ifndef __ASSEMBLER__ - -typedef unsigned char CPUState; -typedef unsigned int CPUNotification; - -struct MPPlugInSpec { /* This is MPSxxxx for assembler */ - unsigned int versionID; /* Version ID, must match */ - unsigned int *areaAddr; /* Virtual address of area to be */ - /* relocated to physical memory */ - unsigned int areaSize; /* Size of area to be relocated */ - unsigned int *offsetTableAddr; /* Virtual address of table of entry offsets */ - unsigned int *baseAddr; /* Common base area - used for debugging */ - unsigned int *dataArea; /* Pointer to the MP workarea - used for debugging */ - unsigned int *CPUArea; /* Pointer to the CPU workarea - used for debugging */ - unsigned int *SIGPhandler; /* Physical address of signal interrupt filter */ -}; - -typedef struct MPPlugInSpec MPPlugInSpec; -typedef MPPlugInSpec *MPPlugInSpecPtr; - -struct MPEntryPts { - unsigned int EntAddr[kMPPlugInMaxCall+1]; /* Real addresses of all plugin entry points */ -}; - -typedef struct MPEntryPts MPEntryPts; - -struct SystemRegister { - unsigned int regno; - unsigned int contents; -}; - -typedef struct SystemRegister SystemRegister; - -typedef struct FPRegs { - unsigned int lo; - unsigned int hi; -} FPRegs; - -struct BATregs { - unsigned int upper; - unsigned int lower; -}; - -typedef struct BATregs BATregs; - - -#define kSysRegCount 16 - -struct CPUStatusArea { /* 0000 This is CSAxxxxx for assembler */ - -/* - * Note that this guy always has to be in one-to-one mapped area contiguously - */ - - CPUState state; /* 0000 */ - unsigned char regsAreValid; /* 0001 */ - unsigned char filler[2]; /* 0002 */ - unsigned int gpr[32]; /* 0004 */ - FPRegs fpr[32]; /* 0084 */ - unsigned int cr; /* 0184 */ - unsigned int fpscr; /* 0188 */ - unsigned int xer; /* 018C */ - unsigned int lr; /* 0190 */ - unsigned int ctr; /* 0194 */ - unsigned int tbu; /* 0198 This is rtcu on 601. */ - unsigned int tbl; /* 019C This is rtcl on 601. */ - unsigned int pvr; /* 01A0 */ - BATregs ibat[4]; /* 01A4 */ - BATregs dbat[4]; /* 01E4 */ - unsigned int sdr1; /* 0224 */ - unsigned int sr[16]; /* 0228 */ - unsigned int dar; /* 0268 */ - unsigned int dsisr; /* 026C */ - unsigned int sprg[4]; /* 0270 */ - unsigned int srr0; /* 0280 */ - unsigned int srr1; /* 0284 */ - unsigned int dec; /* 0288 */ - unsigned int dabr; /* 028C */ - unsigned int iabr; /* 0290 */ - unsigned int ear; /* 0294 */ - unsigned int hid[16]; /* 0298 */ - unsigned int mmcr[2]; /* 02D8 */ - unsigned int pmc[4]; /* 02E0 */ - unsigned int pir; /* 02F0 */ - unsigned int sda; /* 02F4 */ - unsigned int sia; /* 02F8 */ - unsigned int mq; /* 02FC */ - - unsigned int msr; /* 0300 */ - unsigned int pc; /* 0304 */ - - SystemRegister sysregs[kSysRegCount]; /* 0308 */ - - unsigned int filler2[6]; /* 0388 Always pad up to 32-byte boundary */ - /* 03A0 */ -}; - -typedef struct CPUStatusArea CPUStatusArea; -typedef CPUStatusArea *CPUStatusAreaPtr; - -extern CPUStatusArea CSA[NCPUS]; - -struct SenseInfo { - CPUNotification notification; - CPUState state; -}; - -typedef struct SenseInfo SenseInfo; -typedef SenseInfo *SenseInfoPtr; - - -struct MPPInterface { - - unsigned int MPPICStat; /* Processor status (interlocked update for this one) */ - unsigned int MPPICParm0; /* SIGP parm 0 */ - unsigned int MPPICParm1; /* SIGP parm 1 */ - unsigned int MPPICParm2; /* SIGP parm 2 */ - unsigned int MPPICspare0; /* unused */ - unsigned int MPPICspare1; /* unused */ - unsigned int MPPICParm0BU; /* Parm 0 backed up here at 'rupt time for safe keeping */ - unsigned int MPPICPriv; /* Processor status (interlocked update for this one) */ -}; - -typedef struct MPPInterface MPPInterface; -typedef MPPInterface *MPPInterfacePtr; - -extern MPPInterface MPPICPUs[]; - - -/* *********************************************************************** - * Function prototypes and data areas - * *********************************************************************** */ - -extern unsigned int MPgetProcCount (void); -extern unsigned int MPstart (unsigned int cpu, unsigned int sadr, unsigned int parm); -extern unsigned int MPexternalHook (void); -extern unsigned int MPsignal (unsigned int cpu, unsigned int SIGPparm); -extern unsigned int MPstop (unsigned int cpu); -#if 0 -extern unsigned int MPCPUAddress (void); -extern unsigned int MPresume (unsigned int cpu); -extern unsigned int MPreset (unsigned int cpu); -extern unsigned int MPSense (unsigned int cpu, unsigned int *info); -extern unsigned int MPstoreStatus (unsigned int cpu, unsigned int *statusArea); -extern unsigned int MPSetStatus (unsigned int cpu, unsigned int *statusArea); -extern unsigned int MPgetSignal (void); -extern unsigned int MPsyncTB (void); -extern unsigned int MPcheckPending (void); -#endif -extern int MPinstall (unsigned int physAddr, unsigned int band1, unsigned int hammerh, unsigned int grandc, - unsigned int pci1ar, unsigned int enetr); -extern unsigned int MPprobe (MPPlugInSpecPtr spec, unsigned int hammerh); - -extern void start_secondary (void); -extern void mp_intr (void); - - -extern MPPlugInSpec MPspec; /* An area for the MP interfaces */ -extern MPEntryPts MPEntries; /* Real addresses of plugin routines */ - -#endif /* ndef __ASSEMBLER */ diff --git a/osfmk/ppc/POWERMAC/mp/MP_2p.s b/osfmk/ppc/POWERMAC/mp/MP_2p.s deleted file mode 100644 index e4951d28a..000000000 --- a/osfmk/ppc/POWERMAC/mp/MP_2p.s +++ /dev/null @@ -1,2412 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT_INTERNAL_USE_ONLY@ - */ - -/* - MP_2p.s - - MP low-level signaling, configuration, et all. This is for a and Apple/Daystar 2p board - - Lovingly crafted by Bill Angell using traditional methods - -*/ - -#include -#include -#include -#include -#include - - - - .set MPPlugInVersion,0 /* Current version code */ - -/* */ -/* Interfaces to hardware */ -/* */ - - .set PCI1ARdisp, 0x00800000 /* Displacement from Bandit to PCI1 address configuiration register */ - .set GrandCdisp, 0x01000000 /* Displacement from Bandit to Grand Central */ - .set EventsReg, 0x20 /* Interruption events register (latched) */ - .set LevelsReg, 0x2C /* Interruption levels register (unlatched) */ - .set MaskReg, 0x24 /* Interruption mask register */ - .set ClearReg, 0x28 /* Interruption clear register */ - .set TicksPerMic, 11 /* We'll use 11 ticks per µS - 120MHz is really 10, 180MHz is 11.24 */ - .set EtherNRdisp, 0x01019000 /* Displacement into bandit of EtherNet ROM */ - -#ifdef __ELF__ - .section ".data" -#else - .data -#endif - - .align 5 /* Get us out to the end */ - - .globl MPPIwork -#ifdef __ELF__ - .type MPPIwork,@function -#endif - -MPPIwork: -MPPIstatus: .byte 0 /* Global MP board status */ - .set MPPIinit, 0x80 /* Global initialization complete */ - .set MPPI2Pv2, 0x40 /* Second rev of 2P board (no watchdog and different state machine) */ - .byte 0 /* Reserved */ -MPPIinst: .byte 0 /* Mask of CPUs installed */ -MPPIonline: .byte 0 /* Mask of CPUs online (i.e., initialized) */ -MPPIlogCPU: .long 0 /* Used to configure CPU addresses */ -MPPITBsync: .long 0 /* Used to sync time bases */ - .long 0 -MPPIHammer: .long 0 /* Address of HammerHead */ -MPPIGrandC: .long 0 /* Address of GrandCentral */ -MPPIPCI1Adr: .long 0 /* Address of PCI1's config reg addr */ -MPPIEther: .long 0 /* Address of EtherNet ROM */ - - .align 5 -MPPISncFght: .fill 4,4,0 /* Space for 9 passes of a TB sync fight + 1 guard pass */ - .fill 4,4,0 - .fill 4,4,0 - .fill 4,4,0 - .fill 4,4,0 - .fill 4,4,0 - .fill 4,4,0 - .fill 4,4,0 - .fill 4,4,0 - .fill 4,4,0 - .align 7 /* Point to the start of the CPU status */ - - .globl EXT(MPPICPUs) -#ifdef __ELF__ - .type EXT(MPPICPUs),@function -#endif -EXT(MPPICPUs): /* Start of Processor specific areas */ -/* There are 8 of these indexed by processor number */ - - -MPPICPU0: .fill 8,4,0 /* First processor */ -MPPICPU1: .fill 8,4,0 /* Second processor */ -MPPICPU2: .fill 8,4,0 /* Third processor */ -MPPICPU3: .fill 8,4,0 /* Fourth processor */ - .set MPPIMaxCPU, (.-EXT(MPPICPUs)-32)/32 /* Get the maximum CPU address */ - - - .text - -/******************************************************************************************************** */ -/******************************************************************************************************** */ -/* */ -/* Here starteth ye stuff */ -/* */ -/******************************************************************************************************** */ -/******************************************************************************************************** */ - -/******************************************************************************************************** */ -/* */ -/* Validate that the hardware matches with our code. At this point, we cannot check */ -/* for anything other than the possibility of this working. There's no version code */ -/* or nothin'. So, if we have a second processor and are a 604 or 604e, we'll say */ -/* we're capable. Also we'll check version codes for our code. */ -/* */ -/* When we get here, DDAT and IDAT are both on, 'rupts are disabled. */ -/* */ -/* We're called like this: */ -/* OSStatus MP_probe(MPPlugInSpecPtr spec, UInt32 HammerheadAddr); */ -/* */ -/******************************************************************************************************** */ - -ENTRY(MPprobe, TAG_NO_FRAME_USED) - - -MPPIbase: mfpvr r7 /* Get the processor version */ - rlwinm r7,r7,16,16,31 /* Isolate the processor type */ - - lbz r5,ArbConfig(r4) /* See if there is another processor */ - - andi. r5,r5,TwoCPU /* Are we a real live two processor? */ - beq OneWay /* Nope, we be gone... */ - - cmplwi cr0,r7,4 /* Are we a 604? */ - beq SeemsOK /* Yeah, we're cool... */ - cmplwi cr0,r7,9 /* Are we a 604E? */ - beq SeemsOK /* Yeah, go finish up... */ - -OneWay: li r3,0 /* Say we can't find the proper CPU */ - blr /* Leave... */ - -SeemsOK: mr r10,r3 /* Save the parameter list */ - - lwz r4,MPSversionID(r10) /* Get the version ID */ - cmplwi cr0,r4,kMPPlugInVersionID /* Correct version? */ - beq IsOK /* Yeah, we think we're ok... */ - - li r3,0 /* Set bad version' */ - blr /* Leave... */ - -IsOK: mflr r11 /* Save the LR */ - lis r9,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - bl SetBase1 /* Jump to the next instruction */ -SetBase1: mflr r12 /* Get the base register */ - ori r9,r9,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - addi r12,r12,LOW_ADDR(MPPIbase-SetBase1) /* Adjust to the start of all our code */ - - stw r12,MPSbaseAddr(r10) /* Save off the common base for all functions */ - - la r5,LOW_ADDR(MPPIFunctions-MPPIbase)(r12) /* Point to the base of all functions */ - stw r5,MPSareaAddr(r10) /* Pass back the code address */ - - la r5,LOW_ADDR(MPPIFuncOffs-MPPIbase)(r12) /* Point to the function offset table */ - stw r5,MPSoffsetTableAddr(r10) /* Pass back the pointer to the offset table */ - - li r5,LOW_ADDR(MPPISize-MPPIFunctions) /* Get our size without data area */ - stw r5,MPSareaSize(r10) /* Save it */ - - stw r9,MPSdataArea(r10) /* Save it */ - - la r5,LOW_ADDR(EXT(MPPICPUs)-MPPIwork)(r9) /* Point to the CPU area base */ - stw r5,MPSCPUArea(r10) /* Save it */ - - mtlr r11 /* Restore that return address */ - li r3,1 /* Set no error */ - blr /* Leave, we're all done... */ - -/******************************************************************************************************** */ -/******************************************************************************************************** */ -/* */ -/* Here starteth ye code that starteth up ye second prothether. */ -/* Yea, though ye prothether executeth asynchronously, it appears unto men */ -/* in ye shape of a synchronous process. By ye instruction of He who gave it */ -/* form and being, it stopeth to worship and praise its Lord, to joyously */ -/* receive His blessings and teachings, to guide its way along the path to */ -/* righteous execution. */ -/* */ -/******************************************************************************************************** */ -/******************************************************************************************************** */ - - -/******************************************************************************************************** */ -/* */ -/* Initialize the MP hardware. This will bring the other processor online. */ -/* */ -/* First we will tick the board to its 5th state the "TBEN off" state. */ -/* */ -/* Just for giggles, here's the states: */ -/* */ -/* 1) 1st ROM - This state exists after motherboard reset */ -/* 2) Open Firmware - Transitions here when the SecInt line is first asserted */ -/* Open Firmware attempts to execute some code on the secondary */ -/* processor to obtain the PVR register. It's got some problems */ -/* and hangs the secondary disabled. */ -/* 3) Reset (my name) - Entered when the SecInt line is deasserted. A timer starts and */ -/* 468µS later the reset line is pulled. I may have this wrong here, */ -/* it may be that the reset line is held for 468µS. Either way, */ -/* this state is invisible to us. */ -/* 4) 2nd ROM - This state exists when the secondary processor begins executing */ -/* after the reset. */ -/* 5) TBEN off - We transition here when SecInt is asserted in the 2nd ROM state. */ -/* In this state, the TBEN pin is set to disable the timebase from */ -/* running on all processors, thus freezing time. (Performace analysis */ -/* note: here would be the best time to run stats, all tests would */ -/* run in 0 time giving us infinite speed.) Also the "primary arbitration" */ -/* mode is set. This mode causes the CPU board to arbitrate both processors */ -/* using a single bus master. This gets us around the L2 cache dumbness. */ -/* We should also note that because of this, there is now no way to */ -/* tell if we are on the secondary processor, the WhoAmI register will */ -/* always indicate the primary processor. We need to have sewn */ -/* name tags into our underwear before now. */ -/* Finally, this state is the only way we can tell if we are executing */ -/* on the older version of the 2-way board. When it is in this state */ -/* "primary arbitration" has not been enabled yet. The WhoAmI register */ -/* will indicate if we are on the secondary processor on not. We should */ -/* check this because we need to do signals differently. */ -/* 6) TBEN on - The next assertion of SecInt brings us to our final destination. For */ -/* those of you who will be deplaning, please remember that timebases */ -/* are running and primary arbitration is enabled. Always remember: */ -/* buckle up for safety and if you're tired pull over for a rest. */ -/* */ -/******************************************************************************************************** */ - -ENTRY(MPinstall, TAG_NO_FRAME_USED) - -/* int MP_install(unsigned int *physAddr, unsigned int band1, unsigned int hammerh, unsigned int grandc, - * unsigned int pci1ar, unsigned int enetr); - */ - - lis r11,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - mflr r0 /* Save the LR */ - ori r11,r11,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - - stw r5,MPPIHammer-MPPIwork(r11) /* Save the HammerHead address for later */ - stw r6,MPPIGrandC-MPPIwork(r11) /* Save address of Grand Central */ - stw r7,MPPIPCI1Adr-MPPIwork(r11) /* Save the PCI1 address register address */ - stw r8,MPPIEther-MPPIwork(r11) /* Save Ethernet ROM address */ - - li r4,LOW_ADDR(0xC080) /* Set CPU 0&1 installed, CPU 0 online */ - lis r10,(MPPICOnline+MPPICReady)>>16 /* Set CPU 0 online and ready */ - - mfspr r6,pir /* Get the PIR contents */ - - sth r4,MPPIinst-MPPIwork(r11) /* Set 'em for later */ - rlwinm r6,r6,0,0,27 /* Clear to use processor 0 */ - stw r10,EXT(MPPICPUs)-MPPIwork(r11) /* Preset CPU 0 online and ready */ - - mtspr pir,r6 /* Set our PIR */ - -/* */ -/* Ok, ok, enough of this. Let's really start 'em up. */ -/* */ - - lis r9,HIGH_ADDR(CPUInit) /* Top of init code */ - li r6,1 /* Get the other guy's CPU address */ - ori r9,r9,LOW_ADDR(CPUInit) /* Get physical address of init code */ - - mfmsr r8 /* Get the MSR */ - - stw r6,MPPIlogCPU-MPPIwork(r11) /* Set the logical CPU address to assign */ - - rlwinm r6,r8,0,17,15 /* Turn off interruptions */ - sync /* Make sure the work area is updated */ - mtmsr r6 /* Flip the EE bit off */ - isync /* Chill a bit */ - - stw r9,0(r7) /* Pass the initialization code address to our friend */ - sync /* Fence off the pig */ - - li r6,0 /* Clear this out */ - stb r6,IntReg(r5) /* Kick the other processor */ - eieio /* Pig in the sty */ - -/* At this point we should be in the "TBEN off" state. The second processor should be starting */ -/* to come up. */ - -/* Note that we are assuming that the secondary processor will reset the interrupt request. */ -/* If we are on one of the old boards, we will die in about 256µS if it is not reset, 'cause */ -/* of that silly watchchihuahua timer. We can't use the TB or decrimenter here to set a */ -/* timeout because when we are in "TBEN off" state these guys don't run. */ - - lis r4,HIGH_ADDR(SpinTimeOut) /* Get about 1 second at 200MHz */ - /* At 120 MHz this is 1.66 seconds, at 400MHz it is .5 */ - /* All these are more than enough time for this handshake */ - ori r4,r4,LOW_ADDR(SpinTimeOut) /* Get the bottom part */ - -WaitReady: lwz r9,0(r7) /* Get this back */ - mr. r9,r9 /* The other processor will set to 0 */ - /* when it is ready for the work area address */ - beq CodeUp /* The code is up on the other side */ - subi r4,r4,1 /* Count the try */ - mr. r4,r4 /* Did we timeout? */ - bne+ WaitReady /* Nope... */ - - li r3,kMPPInitTO1 /* Set that we timed out with initial code bringup */ - mtmsr r8 /* Restore the interrupt state */ - mtlr r0 /* Restore the return addess */ - blr /* Return a failure... */ - -CodeUp: isync /* Make sure we don't prefetch past here */ - -/* Timebase is stopped here, no need for the funky "get time base right" loop */ - - mftbu r4 /* Get upper timebase half */ - mftb r9 /* Get bottom */ - stw r4,MPPITBsync-MPPIwork(r11) /* Save the top */ - stw r9,MPPITBsync+4-MPPIwork(r11) /* Save the second half */ - sync /* Be very sure it's there */ - - stw r11,0(r7) /* Set the PCI1 adr reg non-zero - this releases the spin */ - /* loop and allows the timebase to be set. */ - eieio - - lis r9,HIGH_ADDR(SpinTimeOut) /* Get the spin time */ - ori r9,r9,LOW_ADDR(SpinTimeOut) /* Get the bottom part */ - -WaitTBset: lwz r4,0(r7) /* Get this back */ - mr. r4,r4 /* When zero, the other guy's TB is set up */ - beq- TBSetUp /* She's'a all done... */ - subi r9,r9,1 /* Count the try */ - mr. r9,r9 /* Did we timeout? */ - bne+ WaitTBset /* Nope... */ - - li r3,kMPPInitTO3 /* Set that we timed out setting clock */ - mtmsr r8 /* Restore the interrupt state */ - isync - mtlr r0 /* Restore the return addess */ - blr /* Return a failure... */ - -TBSetUp: stb r6,IntReg(r5) /* Kick the other processor again */ - /* This will tick us to the next state */ - eieio - -SpinDelay: addi r6,r6,1 /* Bump spin count (we finally are trashing R6) */ - cmplwi cr0,r6,4096 /* Spun enough? */ - ble+ SpinDelay /* Nope... */ - - li r6,SecInt /* Set the interrupt bit */ - stb r6,IntReg(r5) /* Deassert the external signal */ -/* */ -/* Ok, the other processor should be online in a spin waiting for a start signal from */ -/* us. It should be in the reset state with no external interruptions pending. There may */ -/* be a decrimenter pop waiting in the wings though. */ -/* */ - - lwz r7,MPPIGrandC-MPPIwork(r11) /* Point to GrandCentral */ - lwz r4,MaskReg(r7) /* Get the grand central mask register (note that this */ - /* is a little-endian area, but I'm too lazy to access it that way */ - /* so I'll document what it really should be, but, probably, it would */ - /* have been much, much easier just to code up the lwbrx and be done */ - /* with it rather than producing this monograph describing my alternate */ - /* access method that I really don't explain anyway. */ - ori r4,r4,0x0040 /* Flip on bit 30 (hah, figure that one out). This enables the */ - /* Ext10 interrupt which is connected to the MACE ethernet chip's */ - /* chip-select pin. */ - stw r4,MaskReg(r7) /* Stick it on back */ - eieio - - mtlr r0 /* Get back the original LR */ - sync /* Make sure all storage ops are done */ - mtmsr r8 /* Restore the MSR */ - isync - li r3,kSIGPnoErr /* Set that we worked jest fine and dandy */ - blr /* Bye now... */ - - .align 5 -/******************************************************************************************************** */ -/******************************************************************************************************** */ -/* */ -/* This is where the individual SIGP function calls reside. */ -/* Also, it is where we cram the second processor's initialization code wo'w we */ -/* can use physical addressing. */ -/* */ -/******************************************************************************************************** */ -/******************************************************************************************************** */ - -MPPIFunctions: /* Start of all externally called functions and interrupt handling code */ - - -/******************************************************************************************************** */ -/* */ -/* Count the number of processors. This hardwires to 2 (or 1 if no secondary) */ -/* */ -/******************************************************************************************************** */ - -CountProcessors: - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - mfmsr r9 /* Get the MSR */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - - ori r10,r9,0x0010 /* Turn on DDAT */ - - lwz r8,MPPIHammer-MPPIwork(r12) /* Point to the HammerHead controller */ - - mtmsr r10 /* Turn on DDAT */ - isync /* Kill speculation */ - - li r3,2 /* Assume we have them all */ - lbz r5,ArbConfig(r8) /* Check if we've seen a second processor */ - andi. r5,r5,TwoCPU /* Are we a real live two processor? */ - mtmsr r9 /* Put back the DDAT */ - isync - - bnelr+ /* Yeah... */ - li r3,1 /* Nope, set a count of 1 */ - blr /* Leave, we're inadequate... */ - -/******************************************************************************************************** */ -/* */ -/* Start up the selected processor (R3=processor; R4=physical start address; R5=pass-thru parm) */ -/* */ -/******************************************************************************************************** */ - -StartProcessor: - - mr r7,r5 /* Copy pass-thru parameter */ - mfspr r10,pir /* Get our processor number */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - cmplw cr0,r3,r10 /* Trying to start ourselves? */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ - li r3,kMPPHairyPalms /* Set trying to do it to ourselves */ - beqlr- /* Self abuse... */ - li r3,kSIGPTargetAddrErr /* CPU number is too big */ - bgtlr- cr1 /* Sure are... (Get our address also) */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - mflr r11 /* Save the return address */ - add r9,r9,r12 /* Point right at the entry */ - -SPretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ - li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ - rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ - lis r6,MPPICOnline>>16 /* Get the online flag */ - bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ - and. r0,r5,r6 /* Are we online */ - li r3,kMPPOffline /* Set offline */ - beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ - li r3,kMPPBadState /* Set bad state */ - oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ - - stwcx. r5,0,r9 /* Try to set busy */ - bne- SPretry - - ori r6,r10,MPPICfStrt<<8 /* Put the Start function in front of the processor ID */ - rlwimi r5,r6,0,16,31 /* Put these behind the status flags */ - stw r4,MPPICParm0(r9) /* Set the starting physical address parameter */ - stw r7,MPPICParm2(r9) /* Set pass-thru parameter */ - - sync /* Make sure it's all out there */ - b KickAndGo /* We're done now... */ - -/******************************************************************************************************** */ -/* */ -/* Reset the selected processor (R3=processor). You can't reset yourself or the primary. */ -/* We're gonna try, try real hard... This is not for the faint-of-heart. */ -/* If there's ever any way to yank a reset line, we'll do it here. */ -/* */ -/******************************************************************************************************** */ - -ResetProcessor: - mfspr r10,pir /* Get our processor number */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - rlwinm r10,r10,0,28,31 /* Clean up the PIR */ - cmplw cr0,r3,r10 /* Trying to start ourselves? */ - cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ - li r3,kMPPHairyPalms /* Set trying to do it to ourselves */ - beqlr- /* Self abuse... */ - mr. r9,r9 /* Trying to reset the primary?!? Dude, that's insubordination!!!! */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - li r3,kMPPInvalCPU /* Say that that's a major offense */ - beqlr- /* Bye now... */ - li r3,kSIGPTargetAddrErr /* CPU number is too big */ - bgtlr- cr1 /* Sure are... (Get our address also) */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - mflr r11 /* Save the return address */ - add r9,r9,r12 /* Point right at the entry */ - - li r4,16 /* Try for 16 times to get the busy lock */ - -RSlockS: mftb r6 /* Time stamp start */ - -RSlock: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ - rlwinm. r0,r5,0,2,2 /* Are we online */ - li r3,kMPPOffline /* Set offline */ - cmplwi cr1,r5,0 /* Check for busy */ - beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ - bge+ cr1,RSnotBusy /* Not busy, make it so... */ - - mftb r7 /* Stamp the time */ - sub r7,r7,r6 /* Get elapsed time */ - rlwinm. r7,r7,16,16,31 /* Divide ticks by microseconds (this is pretty darn "kinda-in-the-ballpark") */ - cmplwi cr0,r7,TicksPerMic /* See if we hit 65536µS yet */ - blt+ RSlock /* Not yet... */ - -RSatmtCnt: subi r4,r4,1 /* Count the retries */ - mr. r4,r4 /* Are we done yet? */ - bgt+ RSlockS /* Start the lock attempt again... */ - - li r3,kMPPCantLock /* Say we can't get the lock */ - b ErrorReturn /* Bye, dude... */ - -RSnotBusy: rlwinm r5,r5,0,0,15 /* Clear out the function and requestor */ - oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Set busy */ - or r5,r10,r5 /* Add in our processor */ - ori r5,r5,MPPICfReset<<8 /* Set the reset function */ - stwcx. r5,0,r9 /* Cram it back */ - bne- RSatmtCnt /* We lost the reservation... */ - b KickAndGo /* Try to send it across... */ - - -/******************************************************************************************************** */ -/* */ -/* Here we will try to resume execution of a stopped processor (R3=processor). */ -/* */ -/******************************************************************************************************** */ - -ResumeProcessor: - mfspr r10,pir /* Get our processor number */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - cmplw cr0,r3,r10 /* Trying to resume ourselves? */ - cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ - li r3,kMPPHairyPalms /* Set trying to do it to ourselves */ - beqlr- /* Self abuse... */ - li r3,kSIGPTargetAddrErr /* CPU number is too big */ - bgtlr- cr1 /* Sure are... (Get our address also) */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - mflr r11 /* Save the link register */ - add r9,r9,r12 /* Point right at the entry */ - -RPretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ - li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ - rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ - lis r6,MPPICOnline>>16 /* Get the online flag */ - bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ - and. r0,r5,r6 /* Are we online */ - li r3,kMPPOffline /* Set offline */ - lis r6,MPPICReady>>16 /* Get the ready bit */ - beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ - and. r0,r5,r6 /* Are we ready? */ - li r3,kMPPNotReady /* Set not ready */ - lis r6,MPPICStop>>16 /* Get the stopped bit */ - beq- ErrorReturn /* Ain't ready, buzz off... */ - and. r0,r5,r6 /* Are we stopped? */ - li r3,kMPPNotStopped /* Set not stopped */ - oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ - beq- ErrorReturn /* Nope, not stopped, so how do we resume? */ - - stwcx. r5,0,r9 /* Try to set busy */ - bne- RPretry - - ori r6,r10,MPPICfResm<<8 /* Put the resume function in front of the processor ID */ - rlwimi r5,r6,0,16,31 /* Put these behind the status flags */ - b KickAndGo /* We're done now... */ - - - -/******************************************************************************************************** */ -/* */ -/* Here we will try to stop execution of a running processor (R3=processor). */ -/* */ -/******************************************************************************************************** */ - -StopProcessor: - mfspr r10,pir /* Get our processor number */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - cmplw cr0,r3,r10 /* Are we doing ourselves? */ - cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ - li r3,kMPPHairyPalms /* Set trying to do it to ourselves */ - beqlr- /* Self abuse... */ - li r3,kSIGPTargetAddrErr /* CPU number is too big */ - bgtlr- cr1 /* Sure are... (Get our address also) */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - mflr r11 /* Save the link register */ - add r9,r9,r12 /* Point right at the entry */ - -PPretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ - li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ - rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ - lis r6,MPPICOnline>>16 /* Get the online flag */ - bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ - and. r0,r5,r6 /* Are we online */ - li r3,kMPPOffline /* Set offline */ - lis r6,MPPICReady>>16 /* Get the ready bit */ - beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ - and. r0,r5,r6 /* Are we ready? */ - li r3,kMPPNotReady /* Set not ready */ - lis r6,MPPICStop>>16 /* Get the stopped bit */ - beq- ErrorReturn /* Ain't ready, buzz off... */ - and. r0,r5,r6 /* Are we stopped? */ - li r3,kMPPNotRunning /* Set not running */ - oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ - bne- ErrorReturn /* Nope, already stopped, so how do we stop? */ - - stwcx. r5,0,r9 /* Try to set busy */ - ori r10,r10,MPPICfStop<<8 /* Put the stop function in front of the processor ID */ - bne- PPretry - - rlwimi r5,r10,0,16,31 /* Put these behind the status flags */ - b KickAndGo /* We're done now... */ - - -/******************************************************************************************************** */ -/* */ -/* Here we will try to signal a running processor (R3=processor). */ -/* Note that this should have good performace. Well, actually, seeing as how slow we really are, it */ -/* probably is moot anyhow. */ -/* Another note: this function (and all most others as well) will return a timeout when the */ -/* second processor tries to do itself on the old version of the board. This happens because */ -/* In order to keep the watchchihuahua from popping (just imagine the scene: that little runt-dog just so */ -/* excited that its veins and eyes bulge and then explode) signaling to the secondary */ -/* is done syncronously and disabled. If the secondary signals the secondary, it will never enable so */ -/* it will never see the 'rupt, so it will never clear it, so it will time out, so there... */ -/* */ -/******************************************************************************************************** */ - -SignalProcessor: - mfspr r10,pir /* Get our processor number */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ - li r3,kSIGPTargetAddrErr /* CPU number is too big */ - bgtlr- cr1 /* Sure are... (Get our address also) */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - mflr r11 /* Save the link register */ - add r9,r9,r12 /* Point right at the entry */ - -SiPretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ - li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ - rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ - lis r6,MPPICOnline>>16 /* Get the online flag */ - bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ - and. r0,r5,r6 /* Are we online */ - li r3,kMPPOffline /* Set offline */ - lis r6,MPPICReady>>16 /* Get the ready bit */ - beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ - and. r0,r5,r6 /* Are we ready? */ - li r3,kMPPNotReady /* Set not ready */ - oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ - beq- ErrorReturn /* Ain't ready, buzz off... */ - - stwcx. r5,0,r9 /* Try to set busy */ - ori r10,r10,MPPICfSigp<<8 /* Put the SIGP function in front of the processor ID */ - bne- SiPretry - - stw r4,MPPICParm0(r9) /* Pass along the SIGP parameter */ - - rlwimi r5,r10,0,16,31 /* Put these behind the status flags */ - b KickAndGo /* We're done now... */ - - -/******************************************************************************************************** */ -/* */ -/* Here we will store the state of a processor (R3=processor; R4=status area). */ -/* Self abuse will store the state as is, is not asynchronous, and grows hair on your palms. */ -/* */ -/******************************************************************************************************** */ - -StoreProcessorStatus: - mfspr r10,pir /* Get our processor number */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - cmplw cr0,r3,r10 /* Saving our own state??? Abusing oneself??? */ - cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ - li r3,kSIGPTargetAddrErr /* CPU number is too big */ - mflr r11 /* Save the link register */ - beq Flagellant /* Oh baby, oh baby... */ - bgtlr- cr1 /* Sure are... (Get our address also) */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - add r9,r9,r12 /* Point right at the entry */ - -SSretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ - li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ - rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ - lis r6,MPPICOnline>>16 /* Get the online flag */ - bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ - and. r0,r5,r6 /* Are we online */ - li r3,kMPPOffline /* Set offline */ - beq- ErrorReturn /* Ain't online, buzz off... */ - oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ - - stwcx. r5,0,r9 /* Try to set busy */ - ori r10,r10,MPPICfStat<<8 /* Put the store status function in front of the processor ID */ - bne- SSretry /* Lost reservation, return busy... */ - - li r0,0 /* Get false */ - stb r0,CSAregsAreValid(r4) /* Set that the registers ain't valid */ - stw r4,MPPICParm0(r9) /* Set the status area physical address parameter */ - - rlwimi r5,r10,0,16,31 /* Put these behind the status flags */ - b KickAndGo /* We're done now... */ - -/* Spill one's seed upon the soil */ - -Flagellant: bl StoreStatus /* Go store off all the registers 'n' stuff */ - mtlr r11 /* Restore the return address */ - li r3,kSIGPnoErr /* Return no error */ - blr /* Leave... */ - - -/******************************************************************************************************** */ -/* */ -/* Here we will attempt to syncronize clocks (R3=processor). */ -/* Self abuse will just return with an all-ok code. */ -/* */ -/******************************************************************************************************** */ - -SynchClock: - mfspr r10,pir /* Get our processor number */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - cmplw cr0,r3,r10 /* Cleaning our own clock?? */ - cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - li r3,kSIGPnoErr /* Assume self-cleaning clock */ - beqlr /* Oh baby, oh baby... */ - li r3,kSIGPTargetAddrErr /* CPU number is too big */ - bgtlr- cr1 /* Sure are... (Get our address also) */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - mflr r11 /* Save the link register */ - add r9,r9,r12 /* Point right at the entry */ - -SyCretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ - li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ - rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ - lis r6,MPPICOnline>>16 /* Get the online flag */ - bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ - and. r0,r5,r6 /* Are we online */ - li r3,kMPPOffline /* Set offline */ - beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ - oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ - li r0,0 /* Clear this */ - - stwcx. r5,0,r9 /* Try to set busy */ - ori r10,r10,MPPICfTBsy<<8 /* Put the timebase sync function in front of the processor ID */ - bne- SyCretry /* Lost reservation, return busy... */ - - stw r0,MPPITBsync+4-MPPIwork(r12) /* Make sure the parm area is 0 */ - mr r0,r11 /* Save the LR */ - bl SyCbase /* Get a base register */ -SyCbase: rlwimi r5,r10,0,16,31 /* Put these behind the status flags */ - mflr r11 /* Get the base */ - la r11,(4*4)(r11) /* DON'T MESS WITH THESE INSTRUCTIONS Make up the return point */ - b KickAndGo /* Go signal the other side */ - -SyCKrtrn: mr r11,r0 /* Restore the return */ - -/* */ -/* Start sync'ing 'er up */ -/* */ - - mftb r4 /* Take a timeout stamp (don't need top half, we have at least 13 hours) */ - -SyCInP0: lwz r5,0(r9) /* Get the CPU status word */ - rlwinm r5,r5,24,24,31 /* Isolate the command byte */ - cmplwi cr0,r5,MPPICfTBsy1 /* Have we reached time base sync phase 1 yet? */ - beq SyCInP1 /* Yeah, we're in phase 1... */ - mftb r5 /* Get the bottom half of the timer again */ - sub r5,r5,r4 /* How long we been messin' around? */ - cmplwi cr0,r5,1000*TicksPerMic /* Don't try more'n' a 1000µS */ - blt+ SyCInP0 /* We haven't, so wait some more... */ - li r3,kMPPTimeOut /* Signal timeout */ - b ErrorReturn /* By dude... */ - -/* */ -/* Here we make sure there is enough time to sync the clocks before the lower part of the TB ticks */ -/* up into the high part. This eliminates the need for any funky */ -/* "get-the-top-then-get-the-bottom-then-get-the-top-again-to-see-if-it-changed" stuff. That would */ -/* only make the sync harder to do. */ -/* */ -/* Also, because we use the lower TB value for the signal, we also need to make sure we do not have */ -/* a value of 0, we would be ever-so-sorry if it was. */ -/* */ - -SyCInP1: li r4,lo16(0xC000) /* Get the minimum time left on clock before tick ('bout 1 1/4 ms) */ - li r8,0 /* Get a 0 constant */ - -SyCdelay: mftb r5 /* Get the time left */ - cmplw cr0,r5,r4 /* See if there is sufficient time before carry into high clock */ - bgt- SyCdelay /* Nope, hang until it is... */ - mr. r5,r5 /* Did we just tick, however? */ - beq- SyCdelay /* Yeah, wait until it is at least 1... */ - - mftbu r4 /* Get the upper */ - stw r4,MPPITBsync-MPPIwork(r12) /* Make sure the top half is set */ - sync /* Wait until it is done */ - - mftb r5 /* Get the lower timebase now */ - stw r5,MPPITBsync+4-MPPIwork(r12) /* Shove it out for the other processor */ - - la r6,MPPISncFght-MPPIwork(r12) /* Point to the courtroom area */ - li r5,0 /* Point to the first line */ - -SyCclear: dcbz r5,r6 /* Clear the court */ - addi r5,r5,32 /* Point to the next line */ - cmplwi cr0,r5,10*2*32 /* Enough for 9 iterations, 2 chunks at a time */ - blt+ SyCclear /* Clear the whole smear... */ - sync /* Make sure everyone's out */ - - mftb r5 /* Get the lower timebase now */ - -SyCWait: lwz r7,MPPITBsync+4-MPPIwork(r12) /* Get it back */ - mftb r6 /* Get the bottom half again */ - mr. r7,r7 /* Have they set their clock yet? */ - sub r0,r6,r5 /* See if we're hung up */ - beq- SyCdonesync /* Clock is set */ - cmplwi cr0,r0,1000*TicksPerMic /* Timeout if we spend more than 1000µS doing this */ - blt+ SyCWait /* No timeout, wait some more... */ - li r3,kMPPTimeOut /* Set timeout */ - b ErrorReturn /* Leave... */ - -/* */ -/* Ok, so now we have set a preliminary TB value on the second processor. It's close, but only */ -/* within handgranade range. */ -/* */ -/* What we will do now is to let the processors (starting with the other guy) argue about the time for */ -/* a while (10 passes-we use the middle 8). We'll look at the results and try to adjust the other processor's */ -/* time such that the timing windows are overlapping evenly. This should put the TBs close enough together */ -/* (0-2 ticks) that the difference is undetectable. */ -/* */ - - - -SyCdonesync: - li r4,0 /* Clear this */ - la r5,MPPISncFght-MPPIwork(r12) /* Point to the squared circle */ - -SyCWtArg: - dcbf 0,r5 /* Make sure of it */ - sync /* Doubly shure */ - lwz r6,0(r5) /* Listen for the defence argument */ - - mr. r6,r6 /* See if they are done */ - beq+ SyCWtArg /* Nope, still going... */ - - mftb r7 /* They're done, time for rebuttal */ - stw r7,32(r5) /* Make rebuttle */ - - addi r4,r4,1 /* Count rounds */ - - cmplwi cr0,r4,10 /* See if we've gone 8 rounds plus an extra one */ - addi r5,r5,64 /* Point to the next round areas */ - - blt+ SyCWtArg /* Not yet, come out of your corners fighting... */ - - mftb r5 /* Stamp the wait */ - -SyCWadj: lwz r7,MPPITBsync+4-MPPIwork(r12) /* Get adjustment flag */ - mftb r6 /* Get timebase again */ - - mr. r7,r7 /* Have they set their timebase with adjusted time yet? */ - sub r6,r6,r5 /* Get elapsed time */ - bne+ SyCdone /* They say it, sync done... */ - cmplwi cr0,r6,1000*TicksPerMic /* Timeout if we spend more than 1000µS doing this */ - blt+ SyCWadj /* Still time, wait until adjustment is done... */ - - li r3,kMPPTimeOut /* Set timeout */ - b ErrorReturn /* Pass it back... */ - -SyCdone: li r3,kSIGPnoErr /* No errors */ - mtlr r11 /* Restore LR */ - blr /* Leave... */ - - -/******************************************************************************************************** */ -/* */ -/* Here we will get the physical address of the interrupt handler. */ -/* */ -/******************************************************************************************************** */ - -GetExtHandlerAddress: - mflr r11 /* Save our return */ - bl GEXbase /* Make a base address */ -GEXbase: mflr r3 /* Get address into our base */ - addi r3,r3,LOW_ADDR(GotSignal-GEXbase) /* Get the logical address of the 'rupt handler */ - - mtlr r11 /* Restore LR */ - blr - - -/******************************************************************************************************** */ -/* */ -/* Here we will get a snapshot of the processor's current signaling state (R3=processor). */ -/* */ -/******************************************************************************************************** */ - -ProcessorState: - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ - li r3,kSIGPTargetAddrErr /* CPU number is too big */ - bgtlr- cr1 /* Sure are... (Get our address also) */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - add r9,r9,r12 /* Point right at the entry */ - lwz r4,MPPICStat(r9) /* Get the status word */ - li r3,kSIGPnoErr /* Set no errors */ - rlwinm. r4,r4,0,0,0 /* Test for busy status */ - beqlr /* Return kSIGPnoErr if not busy */ - li r3,kSIGPInterfaceBusyErr /* Otherwise, return busy */ - blr /* Return it */ - -/******************************************************************************************************** */ -/* */ -/* Here we will try to handle any pending messages (just as if an interruption occurred). */ -/* The purpose of this function is to assure the message passing system runs even */ -/* though external interrupts are disabled. Lacking a separate physical signalling */ -/* class, we have to share the external interrupt signal. Unfortunately, there are */ -/* times when disabled loops occur (in spin locks, in the debugger, etc.), and when they */ -/* happen, a low level message sent to a processor will not get processed, hence this */ -/* function exists to be called from those disabled loops. Since the calls are often */ -/* from disabled code, all that can be done is to process any pending *message*. Any */ -/* pending notification interruption (referred to throughtout this code as a SIGP */ -/* interruption) must remain pending. */ -/* */ -/******************************************************************************************************** */ - -RunSIGPRun: - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - mfspr r3,pir /* Get our CPU address */ - rlwinm r9,r3,5,23,26 /* Get index into CPU array */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - mflr r11 /* Save the link register */ - add r9,r9,r12 /* Point right at our entry */ - lwz r3,MPPICPriv(r9) /* Get our privates */ - cmplw cr1,r11,r11 /* Make sure IdleWait doesn't try to clear 'rupt request */ - oris r3,r3,MPPICXRun>>16 /* Diddle with them and show we entered here */ - stw r3,MPPICPriv(r9) /* Put away our privates */ - b IdleWait /* Go pretend there was an interrupt... */ - -/******************************************************************************************************** */ -/* */ -/* Error return. We only need this when we leave with a reservation. We really SHOULD clear it... */ -/* */ -/******************************************************************************************************** */ - -ErrorReturn: - mtlr r11 /* Restore LR */ - blr - -/******************************************************************************************************** */ -/* */ -/* Kick the target processor. Note that we won't set the passing bit until we are ready to exit. */ -/* The reason for this is that we have the silly, old watchchihuahua board to deal with. Because */ -/* we can't just set the interrupt and leave, we gotta wait for it to be seen on the other side. */ -/* This means that there could be a timeout and if so, we need to back off the function request else */ -/* we'd see busy when they tried to redrive it. We'll have to deal with a tad of spin on the secondary side. */ -/* note that this just applies to a primary to secondary function on the old board. */ -/* */ -/******************************************************************************************************** */ - -KickAndGo: - la r8,MPPICPU0-MPPIwork(r12) /* Get the primary work area address */ - mtlr r11 /* Restore the link register */ - cmplw cr0,r8,r9 /* Which is target? primary or secondary? */ - mfmsr r11 /* Save off the MSR */ - oris r5,r5,MPPICPass>>16 /* Set the passing bit on */ - stw r5,MPPICStat(r9) /* Store the pass and let the other processor go on */ - - beq KickPrimary /* The target is the primary... */ - - ori r3,r11,0x0010 /* Turn on DDAT bit */ - lbz r4,MPPIstatus-MPPIwork(r12) /* Load up the global status byte */ - lwz r8,MPPIHammer-MPPIwork(r12) /* Point to the Hammerhead area */ - - mtmsr r3 /* Turn on DDAT */ - isync - - andi. r4,r4,MPPI2Pv2 /* Are we on the new or old board? */ - li r3,0 /* Set the bit for an interrupt request */ - beq KickOld /* Ok, it's the old board... */ - - sync /* Make sure this is out there */ - stb r3,IntReg(r8) /* Set the interruption signal */ - eieio - - mtmsr r11 /* Set DDAT back to what it was */ - isync - li r3,kSIGPnoErr /* Set no errors */ - blr /* Leave... */ - -KickOld: li r4,8 /* Set the number of tries */ - -KickAgain: mftb r6 /* Stamp the bottom half of time base */ - stb r3,IntReg(r8) /* Stick the interrupt */ - eieio /* Fence me in */ - -CheckKick: lbz r10,IntReg(r8) /* Get the interrupt request back again */ - mr. r10,r10 /* Yes? Got it? */ - bne FinalDelay /* Yeah, do the final delay and then go away... */ - - mftb r7 /* Get the time again */ - sub r7,r7,r6 /* Get time-so-far */ - cmplwi cr0,r7,75*TicksPerMic /* Hold it for 75µS (average disable is supposed to be 100µS or so) */ - blt+ CheckKick /* Keep waiting the whole time... */ - - li r10,SecInt /* Set the deassert bit */ - mftb r6 /* Stamp start of deassert time */ - stb r10,IntReg(r8) /* Deassert the interrupt request */ - eieio - -DeassertWT: mftb r7 /* Stamp out the time */ - sub r7,r7,r6 /* Get elapsed */ - cmplwi cr0,r7,16*TicksPerMic /* Hold off 16µS (minimum is 12µS) */ - blt+ DeassertWT /* Keep spinning... */ - - subi r4,r4,1 /* See if we have another retry we can do */ - mr. r4,r4 /* Are we there yet? */ - blt+ KickAgain /* Retry one more time... */ - - rlwinm r5,r5,0,2,31 /* Clear busy and passing bits */ - rlwinm r5,r5,0,24,15 /* Clear the function request to idle */ - - mtmsr r11 /* Restore DDAT stuff */ - isync - - stw r5,MPPICStat(r9) /* Rescind the request */ - li r3,kMPPTimeOut /* Set timeout */ - blr /* Leave... */ - -FinalDelay: mftb r6 /* Stamp the start of the final delay */ - -FinalDelayW: - mftb r7 /* Stamp out the time */ - sub r7,r7,r6 /* Get elapsed */ - cmplwi cr0,r7,16*TicksPerMic /* Hold off 16µS (minimum is 12µS) */ - blt+ FinalDelayW /* Keep spinning... */ - - mtmsr r11 /* Restore DDAT stuff */ - isync - li r3,kSIGPnoErr /* Set no errors */ - blr /* Leave... */ - -KickPrimary: - ori r3,r11,0x0010 /* Turn on the DDAT bit */ - lwz r8,MPPIEther-MPPIwork(r12) /* Get the address of the ethernet ROM */ - - mtmsr r3 /* Turn on DDAT */ - isync - - li r4,4 /* Get flip count */ - - sync /* Make sure the status word is out there */ - -FlipOff: lbz r3,0(r8) /* Reference ethernet ROM to get chip select twiddled */ - eieio /* Make sure of this (Hmm, this is chip select, not memory-mapped */ - /* storage. Do we even need the eieio?) */ - - addic. r4,r4,-1 /* Have we flipped them off enough? */ - bgt+ FlipOff /* Not yet, they deserve more... */ - - mtmsr r11 /* Restore DDAT stuff */ - isync - li r3,kSIGPnoErr /* Set no errors */ - blr /* Return... */ - -/******************************************************************************************************** */ -/* */ -/* This is the code for the secondary processor */ -/* */ -/******************************************************************************************************** */ - -/* Note that none of this code needs locks because there's kind of a synchronization */ -/* shuffle going on. */ - -/* */ -/* First, we need to do a bit of initialization of the processor. */ -/* */ - - -CPUInit: - li r27,0x3040 /* Set floating point and machine checks on, IP to 0xFFF0xxxx */ - mtmsr r27 /* Load 'em on in */ - isync - - lis r28,-32768 /* Turn on machine checks */ - /* should be 0x8000 */ - ori r28,r28,0xCC84 /* Enable caches, clear them, */ - /* disable serial execution and turn BHT on */ - sync - mtspr HID0,r28 /* Start the cache clear */ - sync - -/* */ -/* Clear out the TLB. They be garbage after hard reset. */ -/* */ - - li r0,512 /* Get number of TLB entries (FIX THIS) */ - li r3,0 /* Start at 0 */ - mtctr r0 /* Set the CTR */ - -purgeTLB: tlbie r3 /* Purge this entry */ - addi r3,r3,4096 /* Next page */ - bdnz purgeTLB /* Do 'em all... */ - - sync /* Make sure all TLB purges are done */ - tlbsync /* Make sure on other processors also */ - sync /* Make sure the TLBSYNC is done */ - -/* */ -/* Clear out the BATs. They are garbage after hard reset. */ -/* */ - - li r3,0 /* Clear a register */ - - mtspr DBAT0L,r3 /* Clear BAT */ - mtspr DBAT0U,r3 /* Clear BAT */ - mtspr DBAT1L,r3 /* Clear BAT */ - mtspr DBAT1U,r3 /* Clear BAT */ - mtspr DBAT2L,r3 /* Clear BAT */ - mtspr DBAT2U,r3 /* Clear BAT */ - mtspr DBAT3L,r3 /* Clear BAT */ - mtspr DBAT3U,r3 /* Clear BAT */ - - mtspr IBAT0L,r3 /* Clear BAT */ - mtspr IBAT0U,r3 /* Clear BAT */ - mtspr IBAT1L,r3 /* Clear BAT */ - mtspr IBAT1U,r3 /* Clear BAT */ - mtspr IBAT2L,r3 /* Clear BAT */ - mtspr IBAT2U,r3 /* Clear BAT */ - mtspr IBAT3L,r3 /* Clear BAT */ - mtspr IBAT3U,r3 /* Clear BAT */ - -/* */ -/* Map 0xF0000000 to 0xFFFFFFFF for I/O; make it R/W non-cacheable */ -/* Map 0x00000000 to 0x0FFFFFFF for mainstore; make it R/W cachable */ -/* */ - - lis r6,0xF000 /* Set RPN to last segment */ - ori r6,r6,0x1FFF /* Set up upper BAT for 256M, access both */ - - lis r7,0xF000 /* Set RPN to last segment */ - ori r7,r7,0x0032 /* Set up lower BAT for 256M, access both, non-cachable */ - - mtspr DBAT0L,r7 /* Setup ROM and I/O mapped areas */ - mtspr DBAT0U,r6 /* Now do the upper DBAT */ - sync - - li r6,0x1FFF /* Set up upper BAT for 256M, access both */ - li r7,0x0012 /* Set up lower BAT for r/w access */ - - mtspr DBAT1L,r7 /* Set up an initial view of mainstore */ - mtspr DBAT1U,r6 /* Now do the upper DBAT */ - sync - -/* */ -/* Clean up SDR and segment registers */ -/* */ - - li r3,0 /* Clear a register */ - mtspr SDR1,r3 /* Clear SDR1 */ - - li r4,0 /* Clear index for segment registers */ - lis r5,0x1000 /* Set the segment indexer */ - -clearSR: mtsrin r3,r4 /* Zero out the SR */ - add. r4,r4,r5 /* Point to the next segment */ - bne- clearSR /* Keep going until we wrap back to 0 */ - - lis r5,HIGH_ADDR(EXT(FloatInit)) /* Get top of floating point init value */ - ori r5,r5,LOW_ADDR(EXT(FloatInit)) /* Slam bottom */ - lfd f0,0(r5) /* Initialize FP0 */ - fmr f1,f0 /* Ours in not */ - fmr f2,f0 /* to wonder why, */ - fmr f3,f0 /* ours is but to */ - fmr f4,f0 /* do or die! */ - fmr f5,f0 - fmr f6,f0 - fmr f7,f0 - fmr f8,f0 - fmr f9,f0 - fmr f10,f0 - fmr f11,f0 - fmr f12,f0 - fmr f13,f0 - fmr f14,f0 - fmr f15,f0 - fmr f16,f0 - fmr f17,f0 - fmr f18,f0 - fmr f19,f0 - fmr f20,f0 - fmr f21,f0 - fmr f22,f0 - fmr f23,f0 - fmr f24,f0 - fmr f25,f0 - fmr f26,f0 - fmr f27,f0 - fmr f28,f0 - fmr f29,f0 - fmr f30,f0 - fmr f31,f0 - -/* */ -/* Whew, that was like, work, man! What a cleaning job, I should be neater */ -/* when I reset. */ -/* */ -/* Finally we can get some data DAT turned on and we can reset the interrupt */ -/* (which may have been done before we get here) and get into the bring up */ -/* handshakes. */ -/* */ -/* Note that here we need to use the actual V=R addresses for HammerHead */ -/* and PCI1 adr. There are no virtual mappings set up on this processor. */ -/* We need to switch once the firmware is initialized. Also, we don't know */ -/* where our control block is yet. */ -/* */ - - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - - mfmsr r3 /* Get the MSR */ - ori r3,r3,0x0010 /* Turn data DAT on */ - mtmsr r3 /* DAT is on (well, almost) */ - isync /* Now it is for sure */ - - lis r8,HammerHead>>16 /* Point to the HammerHead controller */ - li r7,SecInt /* Get value to reset */ - stb r7,IntReg(r8) /* Reset the interrupt */ - eieio /* Fence it off */ - -/* */ -/* Now we can plant and harvest some bits. */ -/* */ - - lwz r6,MPPIlogCPU-MPPIwork(r12) /* Get the logical CPU address to assign */ - mfspr r7,pir /* Get the old PIR */ - rlwimi r7,r6,0,27,31 /* Copy all of the reserved parts */ - mtspr pir,r7 /* Set it */ - -/* */ -/* This little piece of code here determines if we are on the first or second version */ -/* of the two processor board. The old one shouldn't ever be shipped (well, maybe by */ -/* DayStar) but there are some around here. */ -/* */ -/* The newer version of the 2P board has a different state machine than the older one. */ -/* When we are in the board state we're in now, primary arbitration is turned on while */ -/* it is not until the next state in the old board. By checking the our bus address */ -/* (WhoAmI) we can tell. */ -/* */ - - lbz r7,WhoAmI(r8) /* Get the current bus master ID */ - andi. r7,r7,PriCPU /* Do we think we're the primary? */ - beq On2Pv1 /* No, that means we're on the old 2P board */ - - lbz r7,MPPIstatus-MPPIwork(r12) /* Get the status byte */ - ori r7,r7,MPPI2Pv2 /* Show we're on the new board */ - stb r7,MPPIstatus-MPPIwork(r12) /* Set the board version */ - -On2Pv1: rlwinm r9,r6,5,23,26 /* Get index into the CPU specific area */ - - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Index to processor */ - add r9,r9,r12 /* Get a base for our CPU specific area */ - - oris r6,r6,((MPPICBusy+MPPICOnline+MPPICStop)>>16)&0x0000FFFF /* Set CPU busy, online, stopped, */ - /* and busy set by himself */ - stw r6,MPPICStat(r9) /* Save the whole status word */ - - li r4,0x80 /* Get beginnings of a CPU address mask */ - lhz r11,MPPIinst-MPPIwork(r12) /* Get the installed and online status flags */ - srw r4,r4,r6 /* Make a mask */ - rlwimi r4,r4,8,16,23 /* Double up the mask for both flags */ - or r11,r11,r4 /* Set that we are installed and online */ - sync /* Make sure the main processor sees the rest of the stuff */ - - sth r11,MPPIinst-MPPIwork(r12) /* We're almost done, just need to set the TB */ - - lis r5,PCI1AdrReg>>16 /* Point to the PCI1 address register */ - li r4,0 /* Clear this out */ - stw r4,0(r5) /* Set PCI register to 0 to show we're ready for TB sync */ - eieio /* Fence it off */ - -Wait4TB: lwz r7,0(r5) /* Get the PCI1 reg to see if time to set time */ - mr. r7,r7 /* Is it ready yet? */ - beq Wait4TB /* Nope, wait for it... */ - isync /* No peeking... */ - - lwz r3,MPPITBsync-MPPIwork(r12) /* Get the high word of TB */ - lwz r4,MPPITBsync+4-MPPIwork(r12) /* Get the low word */ - -/* Note that we need no TB magic here 'cause they ain't running */ - - mttbu r3 /* Set the high part */ - mttbl r4 /* Set the low part */ - - rlwinm r6,r6,0,2,31 /* Clear the busy bit and passed */ - stw r6,MPPICStat(r9) /* Store the status word */ - - sync /* Make sure all is right with the world */ - - li r3,0 /* Set the init done signal */ - stw r3,0(r5) /* Feed the dog and let him out */ - sync /* Make sure this is pushed on out */ - - li r27,0x3040 /* Make MSR the way we likes it */ - mtmsr r27 /* Load 'em on in */ - isync - -/* */ -/* Jump on to the idle wait loop. We're online and ready, but we're */ -/* still in the reset state. We need to wait until we see a start signal. */ -/* */ -/* Note that the idle loop expects R9 to be our CPU-specific work area; */ -/* R12 is the base of the code and global work area */ -/* */ - - cmplw cr1,r11,r12 /* Make sure IdleWait knows to clear 'rupt request */ - b IdleWait - - -/******************************************************************************************************** */ -/******************************************************************************************************** */ -/* */ -/* Here is the interruption handler. */ -/* */ -/* What we'll do here is to get our registers into a standard state and figure out which */ -/* which processor we are on. The processors have pretty much the same code. The primary */ -/* will reset the the secondary to primary interruption bit and the secondary will reset the SecInt */ -/* flags. */ -/* */ -/* The primary to secondary interrupt is an exception interruption contolled by a bit in the */ -/* Hammerhead IntReg. The only bit in here is SecInt which is active low. Writing a 0 into the */ -/* bit (bit 0) yanks on the external pin on the secondary. Note that it is the only external */ -/* connected on the secondary. SecInt must be set to 1 to clear the interruption. On the old */ -/* 2P board, asserting the external interrupt causes a watchdog timer to start which expires unless */ -/* the interrupt request is withdrawn. On a 180Mhz system the time to expire is about 256µS, */ -/* not very long. So, what we need to do is to time the assertion and if it has not been reset */ -/* reset, do it ourself. Unfortunatelty we need to keep it deasserted for at least 12µS or the */ -/* watchdog will not stop. This leads to another problem: even if the secondary processor sees */ -/* the interrupt and deasserts the request itself, we cannot reassert before the 12µS limit, */ -/* else havoc will be wrought. We just gotta make sure. */ -/* */ -/* So, the secondary to primary interrupt is megafunky. The mother board is wired with the */ -/* MACE ethernet chip's chip-select pin wired to Grand Centeral's external interrrupt #10 pin. */ -/* This causes a transient interrupt whenever MACE is diddled. GC latches the interrupt into the */ -/* events register where we can see it and clear it. */ -/* */ -/******************************************************************************************************** */ -/******************************************************************************************************** */ - -GotSignal: mfspr r9,pir /* Get our processor ID */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ - rlwinm r9,r9,5,23,26 /* Clean this up */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ - la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ - mflr r11 /* Save our return */ - add r9,r9,r12 /* Point right at the entry */ - -/* We'll come in here if we're stopped and found the 'rupt via polling */ -/* or we were kicked off by the PollSIGP call. We need */ -/* to wipe out the interrupt request no matter how we got here. */ - -SimRupt: mfmsr r4 /* Get the MSR */ - - la r8,MPPICPU0-MPPIwork(r12) /* Get address of main processor's work area */ - ori r5,r4,0x0010 /* Turn on the DDAT bit */ - cmplw cr0,r8,r9 /* Are we on the main? */ - cmplw cr1,r4,r4 /* Set CR1 to indicate we've cleared any 'rupts */ - bne SecondarySig /* Go if we are not on main processor... */ - -/* */ -/* Handle the secondary to primary signal */ -/* */ - -PrimarySig: - - lwz r8,MPPIGrandC-MPPIwork(r12) /* Get the address of the Grand Central area base */ - mtmsr r5 /* Turn on DDAT */ - isync /* Now don't be usin' dem speculative executions */ - li r7,EventsReg /* Get address of the interrupt events register */ - lwbrx r6,r7,r8 /* Grab the interruption events */ - - lis r5,0x4000 /* Get the mask for the Ext10 pin */ - and. r0,r6,r5 /* See if our bit is on */ - li r7,ClearReg /* Point to the interruption clear register */ - - beq+ SkpClr /* Skip the clear 'cause it's supposed to be soooo slow... */ - - stwbrx r5,r7,r8 /* Reset the interrupt latch */ - eieio /* Fence off the last 'rupt */ - -SkpClr: mtmsr r4 /* Set MSR to entry state */ - isync /* Make sure we ain't gunked up no future storage references */ - - bne+ IdleWait /* Go join up and decode the function... */ - - mtlr r11 /* Restore return address */ - andc. r0,r6,r5 /* Any other bits on? */ - li r3,kMPVainInterrupt /* Assume we got nothing */ - beqlr /* We got nothing, tell 'em to eat 'rupt... */ - li r3,kMPIOInterruptPending /* Tell them to process an I/O 'rupt */ - blr /* Ignore the interrupt... */ - -/* */ -/* Handle the primary to secondary signal */ -/* */ - -SecondarySig: - lwz r3,MPPICStat(r9) /* Pick up our status word */ - lis r8,HammerHead>>16 /* Get the address of the hammerhead (used during INIT on non-main processor) */ - rlwinm. r3,r3,0,3,3 /* Check if we are already "in-the-know" (all started up) */ - beq- UseAltAddr /* Nope, use hardcoded Hammerhead address */ - lwz r8,MPPIHammer-MPPIwork(r12) /* Get the kernel's HammerHead area */ - -UseAltAddr: mtmsr r5 /* Turn on DDAT */ - isync /* Now don't be usin' dem speculative executions */ - li r0,SecInt /* Get the Secondary interrupt bit */ - stb r0,IntReg(r8) /* Reset the interrupt request */ - mtmsr r4 /* Set MSR to entry state */ - eieio /* Fence me in */ - isync /* Make sure we ain't gunked up no future storage references */ - - b IdleWait /* Go decode this request... */ - -/******************************************************************************************************** */ -/******************************************************************************************************** */ -/* */ -/* This is the idle wait. */ -/* */ -/* We're stuck in here so long as we are stopped or reset. */ -/* All functions except for "start" pass back through here. Start is weird because */ -/* it is an initial thing, i.e., we can't have gotten here via any kind of exception, */ -/* so there is no state to restore. The "started" code is expected to require no know */ -/* state and will take care of all initialization/fixup required. */ -/* */ -/******************************************************************************************************** */ -/******************************************************************************************************** */ - -BadRuptState: /* We don't do anything special yet for a bad state, just eat request */ -KillBusy: rlwinm r3, r3, 0, 2, 31 /* Remove the message pending flags. */ - rlwinm r3, r3, 0, 24, 16 /* Set the function to idle. */ - stw r3,MPPICStat(r9) /* Update/unlock the status word. */ - -ReenterWait: cmplwi cr1,r9,0 /* Turn off the 'rupt cleared flag */ - -IdleWait: lis r4,MPPICBusy>>16 /* Get busy status */ - -SpinIdle: - lwz r3,MPPICStat(r9) /* Pick up our status word */ - - and. r5,r3,r4 /* Isolate the busy bit */ - lis r6,MPPICPass>>16 /* Get the passed busy flag */ - bne TooBusy /* Work, work, work, that's all we do is work... */ - - rlwinm. r5,r3,0,4,4 /* See if we are stopped */ - lwz r8,MPPICPriv(r9) /* Pick up our private flags */ - bne- SpinIdle /* Yeah, keep spinning... */ - - -/* */ -/* Restore the state and get outta here. Now, we shouldn't be in a reset state and not be stopped, */ -/* so we can go ahead and safely return up a level because it exists. If we are reset, no state exists */ -/* and we should always be stopped. */ -/* */ - - rlwinm r4, r8, 1, 0, 0 /* Get the explicit run bit, shifted left one. */ - rlwinm. r5, r8, 0, 0, 0 /* See if there is a SIGP signal pending */ - and r4, r8, r4 /* Turn off the SIGP pending bit if this was not an explicit run */ - /* Also the explicit run bit is cleared */ - mtlr r11 /* Restore the return point */ - li r3,kMPVainInterrupt /* Tell the interrupt handler to ignore the interrupt */ - stw r4,MPPICPriv(r9) /* Set that flag back for later */ - beqlr /* Time to leave if we ate the 'rupt... */ - - li r3,kMPSignalPending /* Set that there is a SIGP interruption pending */ - - blr /* Go away, let our caller handle this thing... QED!!!!!!!!! */ - -/* */ -/* QQQQQ EEEEEEEEEE DDDDDDDDD */ -/* QQQQQQQQQ EEEEEEEEEE DDDDDDDDDDD */ -/* QQQQ QQQQ EEEE DDD DDD */ -/* QQQQ QQQQ EEEEEEEEEE DDD DDD */ -/* QQQQ Q QQQQ EEEEEEEEEE DDD DDD */ -/* QQQQ QQQQQ EEEE DDD DDD */ -/* QQQQQQQQQQQ EEEEEEEEEE DDDDDDDDDDD */ -/* QQQQQ QQQ EEEEEEEEEE DDDDDDDDD */ -/* */ -/* (I finished here) */ -/* */ - - -/* */ -/* This is where we decode the function and do what's right. */ -/* First we need to check if it's really time to do something. */ -/* */ - -TooBusy: and. r5,r3,r6 /* See if the passed flag is on */ - beq SpinIdle /* No, not yet, try the whole smear again... */ - - beq+ cr1,KeepRupt /* Don't clear 'rupt if we already did (or entered via RunSIGRun) */ - - lwz r5,MPPICPriv(r9) /* Get the private flags */ - rlwinm. r5, r5, 0, 1, 1 /* Did we enter via RunSIGPRun? */ - beq SimRupt /* Nope, 's'ok, go clear physical 'rupt... */ - -KeepRupt: - bl GetOurBase /* Get our address */ -GetOurBase: rlwinm r4,r3,26,22,29 /* Get the opcode index * 4 */ - mflr r12 /* Get the base address */ - la r7,LOW_ADDR(IFuncTable-GetOurBase)(r12) /* Point to the function table */ - - cmplwi cr0,r4,7*4 /* See if they sent us some bogus junk */ - /* Change 7 if we add more functions */ - add r7,r7,r4 /* Point right at the entry */ - bgt- KillBusy /* Bad request code, reset busy and eat it... */ - - mtlr r7 /* Set up the LR */ - - blr /* Go execute the function... */ - -IFuncTable: - b KillBusy /* This handles the signal in vain... */ - b IStart /* This handles the start function */ - b IResume /* This handles the resume function */ - b IStop /* This handles the stop function */ - b ISIGP /* This handles the SIGP function */ - b IStatus /* This handles the store status function */ - b ITBsync /* This handles the synchronize timer base function */ - b IReset /* This handles the reset function */ - -/******************************************************************************************************** */ -/******************************************************************************************************** */ -/* */ -/* Here are the functions handled at interrupt time */ -/* */ -/******************************************************************************************************** */ -/******************************************************************************************************** */ - -/******************************************************************************************************** */ -/* */ -/* The Start function. This guy requires that the processor be in the reset and online state. */ -/* */ -/******************************************************************************************************** */ - -IStart: lis r4,MPPICOnline>>16 /* Get bits required to be on */ - isync /* Make sure we haven't gone past here */ - and r6,r3,r4 /* See if they are on */ - cmplw cr1,r6,r4 /* Are they all on? */ - lwz r4,MPPICParm0(r9) /* Get the physical address of the code to go to */ - bne- cr1,BadRuptState /* Some required state bits are off */ - rlwinm r3,r3,0,2,31 /* Kill the busy bits */ - rlwinm r3,r3,0,24,15 /* Set the function to idle */ - oris r3,r3,MPPICReady>>16 /* Set ready state */ - rlwinm r3,r3,0,5,3 /* Clear out the stop bit */ - mtlr r4 /* Set the LR */ - stw r3,MPPICStat(r9) /* Clear out the status flags */ - lwz r3,MPPICParm2(r9) /* Get pass-thru parameter */ - blrl /* Start up the code... */ -/* */ -/* The rules for coming back here via BLR are just opposite the normal way: you can trash R0-R3 and */ -/* R13-R31, all the CRs; don't touch SPRG1 or SPRG3, the MSR, the SRs or BATs 0 and 1. */ -/* Follow these simple rules and you allowed back; don't follow them and die. */ -/* We only come back here if there is some kind of startup failure so's we can try again later */ -/* */ - - lwz r3,MPPICStat(r9) /* Get back the status word */ - cmplw cr1,r4,r4 /* Show that we have already taken care of the 'rupt */ - rlwinm r3,r3,0,4,2 /* Reset the ready bit */ - b KillBusy /* Back into the fold... */ - -/******************************************************************************************************** */ -/* */ -/* The Resume function. This guy requires that the processor be online and ready. */ -/* */ -/******************************************************************************************************** */ - -IResume: lis r4,(MPPICOnline+MPPICReady)>>16 /* Get states required to be set */ - and r6,r3,r4 /* See if they are on */ - cmplw cr0,r6,r4 /* Are they all on? */ - bne- BadRuptState /* Some required off state bits are on */ - rlwinm r3,r3,0,5,3 /* Clear out the stop bit */ - b KillBusy /* Get going... */ - -/******************************************************************************************************** */ -/* */ -/* The Stop function. All we care about here is that the guy is online. */ -/* */ -/******************************************************************************************************** */ - -IStop: lis r4,MPPICOnline>>16 /* All we care about is if we are online or not */ - and. r6,r3,r4 /* See if we are online */ - beq- BadRuptState /* Some required off state bits are on */ - oris r3,r3,MPPICStop>>16 /* Set the stop bit */ - b KillBusy /* Get stopped... */ - - -/******************************************************************************************************** */ -/* */ -/* The SIGP function. All we care about here is that the guy is online. */ -/* */ -/******************************************************************************************************** */ - -ISIGP: lis r4,(MPPICOnline+MPPICReady)>>16 /* Get states required to be set */ - and r6,r3,r4 /* See if they are on */ - lwz r7,MPPICPriv(r9) /* Get the private flags */ - cmplw cr0,r6,r4 /* Are they all on? */ - oris r6,r7,(MPPICSigp>>16)&0x0000FFFF /* Set the SIGP pending bit */ - bne- BadRuptState /* Some required off state bits are on */ - lwz r4,MPPICParm0(r9) /* Get the SIGP parameter */ - stw r6,MPPICPriv(r9) /* Stick the pending bit back */ - stw r4,MPPICParm0BU(r9) /* Back up parm 0 so it is safe once we unlock */ - b KillBusy /* Get stopped... */ - -/******************************************************************************************************** */ -/* */ -/* The store status function. This guy requires that the processor be in the stopped state. */ -/* */ -/******************************************************************************************************** */ - -IStatus: lis r4,MPPICOnline>>16 /* All we care about is if we are online or not */ - and. r6,r3,r4 /* See if we are online */ - isync /* Make sure we havn't gone past here */ - beq- BadRuptState /* Some required off state bits are on */ - lwz r4,MPPICParm0(r9) /* Get the status area physical address */ - rlwinm. r6,r3,0,3,3 /* Test processor ready */ - - beq INotReady /* Not ready, don't assume valid exception save area */ - bl StoreStatus /* Go store off all the registers 'n' stuff */ - b KillBusy /* All done... */ - -INotReady: - lis r7,0xDEAD /* Get 0xDEAD + 1 */ - ori r7,r7,0xF1D0 /* Get 0xDEADF1D0 */ - stw r7,CSAgpr+(0*4)(r4) /* Store invalid R0 */ - stw r7,CSAgpr+(1*4)(r4) /* Store invalid R1 */ - stw r7,CSAgpr+(2*4)(r4) /* Store invalid R2 */ - stw r7,CSAgpr+(3*4)(r4) /* Store invalid R3 */ - stw r7,CSAgpr+(4*4)(r4) /* Store invalid R4 */ - stw r7,CSAgpr+(5*4)(r4) /* Store invalid R5 */ - stw r7,CSAgpr+(6*4)(r4) /* Store invalid R6 */ - stw r7,CSAgpr+(7*4)(r4) /* Store invalid R7 */ - stw r7,CSAgpr+(8*4)(r4) /* Store invalid R8 */ - stw r7,CSAgpr+(9*4)(r4) /* Store invalid R9 */ - stw r7,CSAgpr+(10*4)(r4) /* Store invalid R10 */ - stw r7,CSAgpr+(11*4)(r4) /* Store invalid R11 */ - stw r7,CSAgpr+(12*4)(r4) /* Store invalid R12 */ - stw r13,CSAgpr+(13*4)(r4) /* Save general registers */ - stw r14,CSAgpr+(14*4)(r4) /* Save general registers */ - stw r15,CSAgpr+(15*4)(r4) /* Save general registers */ - stw r16,CSAgpr+(16*4)(r4) /* Save general registers */ - stw r17,CSAgpr+(17*4)(r4) /* Save general registers */ - stw r18,CSAgpr+(18*4)(r4) /* Save general registers */ - stw r19,CSAgpr+(19*4)(r4) /* Save general registers */ - stw r20,CSAgpr+(20*4)(r4) /* Save general registers */ - stw r21,CSAgpr+(21*4)(r4) /* Save general registers */ - stw r22,CSAgpr+(22*4)(r4) /* Save general registers */ - stw r23,CSAgpr+(23*4)(r4) /* Save general registers */ - stw r24,CSAgpr+(24*4)(r4) /* Save general registers */ - stw r25,CSAgpr+(25*4)(r4) /* Save general registers */ - stw r26,CSAgpr+(26*4)(r4) /* Save general registers */ - stw r27,CSAgpr+(27*4)(r4) /* Save general registers */ - stw r28,CSAgpr+(28*4)(r4) /* Save general registers */ - stw r29,CSAgpr+(29*4)(r4) /* Save general registers */ - stw r30,CSAgpr+(30*4)(r4) /* Save general registers */ - stw r31,CSAgpr+(31*4)(r4) /* Save general registers */ - bl StoreLiveStatus - b KillBusy - -/* */ -/* Save the whole status. Lot's of busy work. */ -/* Anything marked unclean is of the devil and should be shunned. Actually, it depends upon */ -/* knowledge of firmware control areas and is no good for a plug in. But, we've sacrificed the */ -/* white ram and are standing within a circle made of his skin, so we can dance with the devil */ -/* safely. */ -/* */ - -StoreStatus: - mfspr r10,sprg0 /* Get the pointer to the exception save area (unclean) */ - - lwz r5,saver0(r13) /* Get R0 (unclean) */ - lwz r6,saver1(r13) /* Get R1 (unclean) */ - lwz r7,saver2(r13) /* Get R2 (unclean) */ - stw r5,CSAgpr+(0*4)(r4) /* Save R0 */ - stw r6,CSAgpr+(1*4)(r4) /* Save R1 */ - stw r7,CSAgpr+(2*4)(r4) /* Save R2 */ - lwz r5,saver3(r13) /* Get R3 (unclean) */ - lwz r6,saver4(r13) /* Get R4 (unclean) */ - lwz r7,saver5(r13) /* Get R5 (unclean) */ - stw r5,CSAgpr+(3*4)(r4) /* Save R3 */ - stw r6,CSAgpr+(4*4)(r4) /* Save R4 */ - stw r7,CSAgpr+(5*4)(r4) /* Save R5 */ - lwz r5,saver6(r13) /* Get R6 (unclean) */ - lwz r6,saver7(r13) /* Get R7 (unclean) */ - lwz r7,saver8(r13) /* Get R8 (unclean) */ - stw r5,CSAgpr+(6*4)(r4) /* Save R6 */ - stw r6,CSAgpr+(7*4)(r4) /* Save R7 */ - stw r7,CSAgpr+(8*4)(r4) /* Save R8 */ - lwz r5,saver9(r13) /* Get R9 (unclean) */ - lwz r6,saver10(r13) /* Get R10 (unclean) */ - lwz r7,saver11(r13) /* Get R11 (unclean) */ - stw r5,CSAgpr+(9*4)(r4) /* Save R9 */ - stw r6,CSAgpr+(10*4)(r4) /* Save R10 */ - lwz r5,saver12(r13) /* Get R12 (unclean) */ - stw r7,CSAgpr+(11*4)(r4) /* Save R11 */ - stw r5,CSAgpr+(12*4)(r4) /* Save R12 */ - - lwz r5,saver13(r13) /* Get R13 (unclean) */ - lwz r6,saver14(r13) /* Get R14 (unclean) */ - lwz r7,saver15(r13) /* Get R15 (unclean) */ - stw r5,CSAgpr+(13*4)(r4) /* Save R13 */ - stw r6,CSAgpr+(14*4)(r4) /* Save R14 */ - stw r7,CSAgpr+(15*4)(r4) /* Save R15 */ - lwz r5,saver16(r13) /* Get R16 (unclean) */ - lwz r6,saver17(r13) /* Get R17 (unclean) */ - lwz r7,saver18(r13) /* Get R18 (unclean) */ - stw r5,CSAgpr+(16*4)(r4) /* Save R16 */ - stw r6,CSAgpr+(17*4)(r4) /* Save R17 */ - stw r7,CSAgpr+(18*4)(r4) /* Save R18 */ - lwz r5,saver19(r13) /* Get R19 (unclean) */ - lwz r6,saver20(r13) /* Get R20 (unclean) */ - lwz r7,saver21(r13) /* Get R21 (unclean) */ - stw r5,CSAgpr+(19*4)(r4) /* Save R19 */ - stw r6,CSAgpr+(20*4)(r4) /* Save R20 */ - stw r7,CSAgpr+(21*4)(r4) /* Save R21 */ - lwz r5,saver22(r13) /* Get R22 (unclean) */ - lwz r6,saver23(r13) /* Get R23 (unclean) */ - lwz r7,saver24(r13) /* Get R24 (unclean) */ - stw r5,CSAgpr+(22*4)(r4) /* Save R22 */ - stw r6,CSAgpr+(23*4)(r4) /* Save R23*/ - stw r7,CSAgpr+(24*4)(r4) /* Save R24 */ - lwz r5,saver25(r13) /* Get R25 (unclean) */ - lwz r6,saver26(r13) /* Get R26 (unclean) */ - lwz r7,saver27(r13) /* Get R27 (unclean) */ - stw r5,CSAgpr+(25*4)(r4) /* Save R25 */ - stw r6,CSAgpr+(26*4)(r4) /* Save R26 */ - stw r7,CSAgpr+(27*4)(r4) /* Save R27 */ - - lwz r5,saver28(r13) /* Get R28 (unclean) */ - lwz r6,saver29(r13) /* Get R29 (unclean) */ - lwz r7,saver30(r13) /* Get R30 (unclean) */ - stw r5,CSAgpr+(28*4)(r4) /* Save R28 */ - lwz r5,saver31(r13) /* Get R31(unclean) */ - stw r6,CSAgpr+(29*4)(r4) /* Save R29 */ - stw r7,CSAgpr+(30*4)(r4) /* Save R30 */ - stw r5,CSAgpr+(31*4)(r4) /* Save R31 */ - -StoreLiveStatus: - mfmsr r5 /* Get the current MSR */ - ori r6,r5,0x2000 /* Turn on floating point instructions */ - mtmsr r6 /* Turn them on */ - isync /* Make sure they're on */ - - stfd f0,CSAfpr+(0*8)(r4) /* Save floating point registers */ - stfd f1,CSAfpr+(1*8)(r4) /* Save floating point registers */ - stfd f2,CSAfpr+(2*8)(r4) /* Save floating point registers */ - stfd f3,CSAfpr+(3*8)(r4) /* Save floating point registers */ - stfd f4,CSAfpr+(4*8)(r4) /* Save floating point registers */ - stfd f5,CSAfpr+(5*8)(r4) /* Save floating point registers */ - stfd f6,CSAfpr+(6*8)(r4) /* Save floating point registers */ - stfd f7,CSAfpr+(7*8)(r4) /* Save floating point registers */ - stfd f8,CSAfpr+(8*8)(r4) /* Save floating point registers */ - stfd f9,CSAfpr+(9*8)(r4) /* Save floating point registers */ - stfd f10,CSAfpr+(10*8)(r4) /* Save floating point registers */ - stfd f11,CSAfpr+(11*8)(r4) /* Save floating point registers */ - stfd f12,CSAfpr+(12*8)(r4) /* Save floating point registers */ - stfd f13,CSAfpr+(13*8)(r4) /* Save floating point registers */ - stfd f14,CSAfpr+(14*8)(r4) /* Save floating point registers */ - stfd f15,CSAfpr+(15*8)(r4) /* Save floating point registers */ - stfd f16,CSAfpr+(16*8)(r4) /* Save floating point registers */ - stfd f17,CSAfpr+(17*8)(r4) /* Save floating point registers */ - stfd f18,CSAfpr+(18*8)(r4) /* Save floating point registers */ - stfd f19,CSAfpr+(19*8)(r4) /* Save floating point registers */ - stfd f20,CSAfpr+(20*8)(r4) /* Save floating point registers */ - stfd f21,CSAfpr+(21*8)(r4) /* Save floating point registers */ - stfd f22,CSAfpr+(22*8)(r4) /* Save floating point registers */ - stfd f23,CSAfpr+(23*8)(r4) /* Save floating point registers */ - stfd f24,CSAfpr+(24*8)(r4) /* Save floating point registers */ - stfd f25,CSAfpr+(25*8)(r4) /* Save floating point registers */ - stfd f26,CSAfpr+(26*8)(r4) /* Save floating point registers */ - stfd f27,CSAfpr+(27*8)(r4) /* Save floating point registers */ - stfd f28,CSAfpr+(28*8)(r4) /* Save floating point registers */ - stfd f29,CSAfpr+(29*8)(r4) /* Save floating point registers */ - stfd f30,CSAfpr+(30*8)(r4) /* Save floating point registers */ - stfd f31,CSAfpr+(31*8)(r4) /* Save floating point registers */ - - mffs f1 /* Get the FPSCR */ - stfd f1,CSAfpscr-4(r4) /* Save the whole thing (we'll overlay the first half with CR later) */ - - lfd f1,CSAfpr+(1*4)(r4) /* Restore F1 */ - - mtmsr r5 /* Put the floating point back to what it was before */ - isync /* Wait for it */ - - lwz r6,savecr(r13) /* Get the old CR (unclean) */ - stw r6,CSAcr(r4) /* Save the CR */ - - mfxer r6 /* Get the XER */ - stw r6,CSAxer(r4) /* Save the XER */ - - lwz r6,savelr(r13) /* Get the old LR (unclean) */ - stw r6,CSAlr(r4) /* Save the LR */ - - mfctr r6 /* Get the CTR */ - stw r6,CSActr(r4) /* Save the CTR */ - -STtbase: mftbu r5 /* Get the upper timebase */ - mftb r6 /* Get the lower */ - mftbu r7 /* Get the top again */ - cmplw cr0,r5,r7 /* Did it tick? */ - bne- STtbase /* Yeah, do it again... */ - - mfdec r7 /* Get the decrimenter (make it at about the same time as the TB) */ - stw r7,CSAdec(r4) /* Save the decrimenter */ - - - stw r5,CSAtbu(r4) /* Stash the top part */ - stw r6,CSAtbl(r4) /* Stash the lower part */ - - lwz r5,savesrr1(r13) /* SRR1 at exception is as close as we get to the MSR (unclean) */ - lwz r6,savesrr0(r13) /* Get SRR0 also */ - stw r5,CSAmsr(r4) /* Save the MSR */ - stw r6,CSApc(r4) /* Save the PC */ - stw r5,CSAsrr1(r4) /* Set SRR1 also */ - stw r6,CSAsrr0(r4) /* Save SRR0 */ - - mfpvr r5 /* Get the PVR */ - stw r5,CSApvr(r4) /* Save the PVR */ - - mfspr r5,pir /* Get the PIR */ - stw r5,CSApir(r4) /* Save the PIR */ - - mfspr r5,ibat0u /* Get the upper IBAT0 */ - mfspr r6,ibat0l /* Get the lower IBAT0 */ - stw r5,CSAibat+(0*8+0)(r4) /* Save the upper IBAT0 */ - stw r6,CSAibat+(0*8+4)(r4) /* Save the upper IBAT0 */ - - mfspr r5,ibat1u /* Get the upper IBAT1 */ - mfspr r6,ibat1l /* Get the lower IBAT1 */ - stw r5,CSAibat+(1*8+0)(r4) /* Save the upper IBAT1 */ - stw r6,CSAibat+(1*8+4)(r4) /* Save the upper IBAT1 */ - - mfspr r5,ibat2u /* Get the upper IBAT2 */ - mfspr r6,ibat2l /* Get the lower IBAT2 */ - stw r5,CSAibat+(2*8+0)(r4) /* Save the upper IBAT2 */ - stw r6,CSAibat+(2*8+4)(r4) /* Save the upper IBAT2 */ - - mfspr r5,ibat3u /* Get the upper IBAT3 */ - mfspr r6,ibat3l /* Get the lower IBAT3 */ - stw r5,CSAibat+(3*8+0)(r4) /* Save the upper IBAT3 */ - stw r6,CSAibat+(3*8+4)(r4) /* Save the upper IBAT3 */ - - mfspr r5,dbat0u /* Get the upper DBAT0 */ - mfspr r6,dbat0l /* Get the lower DBAT0 */ - stw r5,CSAdbat+(0*8+0)(r4) /* Save the upper DBAT0 */ - stw r6,CSAdbat+(0*8+4)(r4) /* Save the upper DBAT0 */ - - mfspr r5,dbat1u /* Get the upper DBAT1 */ - mfspr r6,dbat1l /* Get the lower DBAT1 */ - stw r5,CSAdbat+(1*8+0)(r4) /* Save the upper DBAT1 */ - stw r6,CSAdbat+(1*8+4)(r4) /* Save the upper DBAT1 */ - - mfspr r5,dbat2u /* Get the upper DBAT2 */ - mfspr r6,dbat2l /* Get the lower DBAT2 */ - stw r5,CSAdbat+(2*8+0)(r4) /* Save the upper DBAT2 */ - stw r6,CSAdbat+(2*8+4)(r4) /* Save the upper DBAT2 */ - - mfspr r5,dbat3u /* Get the upper DBAT3 */ - mfspr r6,dbat3l /* Get the lower DBAT3 */ - stw r5,CSAdbat+(3*8+0)(r4) /* Save the upper DBAT3 */ - stw r6,CSAdbat+(3*8+4)(r4) /* Save the upper DBAT3 */ - - mfsdr1 r5 /* Get the SDR1 */ - stw r5,CSAsdr1(r4) /* Save the SDR1 */ - - mfsr r5,sr0 /* Get SR 0 */ - mfsr r6,sr1 /* Get SR 1 */ - mfsr r7,sr2 /* Get SR 2 */ - stw r5,CSAsr+(0*4)(r4) /* Save SR 0 */ - stw r6,CSAsr+(1*4)(r4) /* Save SR 1 */ - mfsr r5,sr3 /* Get SR 3 */ - mfsr r6,sr4 /* Get SR 4 */ - stw r7,CSAsr+(2*4)(r4) /* Save SR 2 */ - mfsr r7,sr5 /* Get SR 5 */ - stw r5,CSAsr+(3*4)(r4) /* Save SR 3 */ - stw r6,CSAsr+(4*4)(r4) /* Save SR 4 */ - mfsr r5,sr6 /* Get SR 6 */ - mfsr r6,sr7 /* Get SR 7 */ - stw r7,CSAsr+(5*4)(r4) /* Save SR 5 */ - mfsr r7,sr8 /* Get SR 8 */ - stw r5,CSAsr+(6*4)(r4) /* Save SR 6 */ - stw r6,CSAsr+(7*4)(r4) /* Save SR 7 */ - mfsr r5,sr9 /* Get SR 9 */ - mfsr r6,sr10 /* Get SR 11 */ - stw r7,CSAsr+(8*4)(r4) /* Save SR 8 */ - mfsr r7,sr11 /* Get SR 11 */ - stw r5,CSAsr+(9*4)(r4) /* Save SR 9 */ - stw r6,CSAsr+(10*4)(r4) /* Save SR 10 */ - mfsr r5,sr12 /* Get SR 12 */ - mfsr r6,sr13 /* Get SR 13 */ - stw r7,CSAsr+(11*4)(r4) /* Save SR 11 */ - mfsr r7,sr14 /* Get SR 14 */ - stw r5,CSAsr+(12*4)(r4) /* Save SR 12 */ - stw r6,CSAsr+(13*4)(r4) /* Save SR 13 */ - mfsr r5,sr15 /* Get SR 15 */ - stw r7,CSAsr+(14*4)(r4) /* Save SR 14 */ - stw r5,CSAsr+(15*4)(r4) /* Save SR 15 */ - - mfdar r6 /* Get the DAR */ - stw r6,CSAdar(r4) /* Save it */ - - mfdsisr r5 /* Get the DSISR */ - stw r5,CSAdsisr(r4) /* Save it */ - - stw r10,CSAsprg+(1*4)(r4) /* Save SPRG1 */ - mfspr r7,sprg0 /* Get SPRG0 */ - mfspr r6,sprg2 /* Get SPRG2 */ - stw r7,CSAsprg+(0*4)(r4) /* Save SPRG0 */ - mfspr r5,sprg3 /* Get SPRG3 */ - stw r6,CSAsprg+(2*4)(r4) /* Save SPRG2 */ - stw r5,CSAsprg+(3*4)(r4) /* Save SPRG4 */ - - mfspr r6,1013 /* Get the DABR */ - mfspr r7,1010 /* Get the IABR */ - stw r6,CSAdabr(r4) /* Save the DABR */ - stw r7,CSAiabr(r4) /* Save the IABR */ - - mfspr r5,282 /* Get the EAR */ - stw r5,CSAear(r4) /* Save the EAR */ - - lis r7,0xDEAD /* Get 0xDEAD */ - ori r7,r7,0xF1D0 /* Get 0xDEADF1D0 */ - - mfpvr r5 /* Get the processor type */ - rlwinm r5,r5,16,16,31 /* Isolate the processor */ - cmplwi cr1,r5,4 /* Set CR1_EQ if this is a plain 604, something else if it's a 604E */ - - mfspr r6,hid0 /* Get HID0 */ - mr r5,r7 /* Assume 604 */ - beq cr1,NoHID1 /* It is... */ - mfspr r5,hid1 /* Get the HID1 */ - -NoHID1: stw r6,CSAhid+(0*4)(r4) /* Save HID0 */ - stw r5,CSAhid+(1*4)(r4) /* Save HID1 */ - stw r7,CSAhid+(2*4)(r4) /* Save HID2 */ - stw r7,CSAhid+(3*4)(r4) /* Save HID3 */ - stw r7,CSAhid+(4*4)(r4) /* Save HID4 */ - stw r7,CSAhid+(5*4)(r4) /* Save HID5 */ - stw r7,CSAhid+(6*4)(r4) /* Save HID6 */ - stw r7,CSAhid+(7*4)(r4) /* Save HID7 */ - stw r7,CSAhid+(8*4)(r4) /* Save HID8 */ - stw r7,CSAhid+(9*4)(r4) /* Save HID9 */ - stw r7,CSAhid+(10*4)(r4) /* Save HID10 */ - stw r7,CSAhid+(11*4)(r4) /* Save HID11 */ - stw r7,CSAhid+(12*4)(r4) /* Save HID12 */ - stw r7,CSAhid+(13*4)(r4) /* Save HID13 */ - stw r7,CSAhid+(14*4)(r4) /* Save HID14 */ - stw r7,CSAhid+(15*4)(r4) /* Save HID15 */ - - mfspr r6,952 /* Get MMCR0 */ - mr r5,r7 /* Assume 604 */ - beq NoMMCR1 /* It is... */ - mfspr r5,956 /* Get the MMCR1 */ - -NoMMCR1: stw r6,CSAmmcr+(0*4)(r4) /* Save MMCR0 */ - stw r5,CSAmmcr+(1*4)(r4) /* Save MMCR1 */ - - mfspr r6,953 /* Get PMC1 */ - mfspr r5,954 /* Get PMC2 */ - stw r6,CSApmc+(0*4)(r4) /* Save PMC1 */ - stw r5,CSApmc+(1*4)(r4) /* Save PMC2 */ - - mr r6,r7 /* Assume 604 */ - mr r5,r7 /* Assume 604 */ - beq NoPMC3 /* Yeah... */ - mfspr r6,957 /* Get the PMC3 for a 604E */ - mfspr r5,958 /* Get the PMC4 for a 604E */ - -NoPMC3: stw r6,CSApmc+(2*4)(r4) /* Save PMC3 */ - stw r5,CSApmc+(3*4)(r4) /* Save PMC4 */ - - mfspr r6,955 /* Get SIA */ - mfspr r5,959 /* Get SDA */ - stw r6,CSAsia(r4) /* Save the SIA */ - stw r5,CSAsda(r4) /* Save the SDA */ - - stw r7,CSAmq(r4) /* There is no MQ on either the 604 or 604E */ - - - lwz r6,MPPICStat(r9) /* Get the status of this processor */ - lis r10,MPPICReady>>16 /* Get the flag for reset or not */ - li r5,kSIGPResetState /* Assume we're operating */ - and. r0,r6,r10 /* See if the ready bit is set */ - lis r10,MPPICStop>>16 /* Get the flag for stopped or not */ - beq SetStateInf /* Go set that we are reset... */ - and. r0,r6,r10 /* Are we stopped? */ - li r5,kSIGPStoppedState /* Assume we area */ - bne SetStateInf /* We are, go set it... */ - li r5,kSIGPOperatingState /* Not stopped, so we're going */ - -SetStateInf: stb r5,CSAstate(r4) /* Set the state byte */ - - li r0,1 /* Set the truth */ - sync /* Make sure it's stored */ - - stb r0,CSAregsAreValid(r4) /* Set that the status is valid */ - - blr /* We're done here... */ - - -/******************************************************************************************************** */ -/* */ -/* The synchronize time base function. No state requirements for this one. */ -/* */ -/******************************************************************************************************** */ - -ITBsync: /* This handles the synchronize time base function */ - lis r12,HIGH_ADDR(MPPIwork) /* Get the top of work area */ - li r0,MPPICfTBsy1 /* Get the flag for TB sync state 1 */ - li r7,0 /* Get a 0 */ - ori r12,r12,LOW_ADDR(MPPIwork) /* Get low part of work area */ - mttbl r7 /* Clear the bottom of the TB so's there's noupper ticks */ - mttbu r7 /* Clear the top part, just 'cause I wanna */ - - sync /* Make sure all is saved */ - stb r0,MPPICStat+2(r9) /* Tell the main dude to tell us the time */ - isync /* Make sure we don't go nowhere's */ - -/* */ -/* Remember that the sync'ing processor insures that the TB won't tick the high part for at least */ -/* 16k ticks. That should be way longer than we need for the whole process here */ -/* */ - -WaitTBLower: lwz r5,MPPITBsync+4-MPPIwork(r12) /* Get the lower part of the TB */ - mttbl r5 /* Put it in just in case it's set now */ - mr. r5,r5 /* Was it actually? */ - beq+ WaitTBLower /* Nope, go check again... */ - lwz r4,MPPITBsync-MPPIwork(r12) /* Get the high order part */ - mttbu r4 /* Set the top half also */ - - stw r7,MPPITBsync+4-MPPIwork(r12) /* Tell 'em we've got it */ - - sync - - li r4,0 /* Clear this */ - la r5,MPPISncFght-32-MPPIwork(r12) /* Point to the squared circle (our corner) */ - - b TB1stPnch /* Go take the first punch... */ - -TBSargue: - dcbf 0,r5 /* *** Fix cache coherency (data integrity) HW bug *** */ - sync /* *** Fix cache coherency (data integrity) HW bug *** */ - lwz r6,0(r5) /* Listen for the procecution's argument */ - mr. r6,r6 /* See if they are done */ - beq+ TBSargue /* Nope, still going... */ - -TB1stPnch: mftb r7 /* They're done, time for rebuttal */ - stw r7,32(r5) /* Make rebuttle */ - - addi r4,r4,1 /* Count rounds */ - - cmplwi cr0,r4,10 /* See if we've gone 9 more rounds */ - addi r5,r5,64 /* Point to the next round areas */ - - blt+ TBSargue /* Not yet, come out of your corners fighting... */ - -/* */ -/* We'll set the latest-up-to-datest from the other processor now */ -/* */ -TBSetTB: - dcbf 0,r5 /* *** Fix cache coherency (data integrity) HW bug *** */ - sync /* *** Fix cache coherency (data integrity) HW bug *** */ - lwz r6,0(r5) /* Listen for the procecution's argument */ - mttbl r6 /* Set it just in case it's ok */ - mr. r6,r6 /* See if they are done */ - beq+ TBSetTB /* Nope, still going... */ - -/* */ -/* Get average duration for each processor. We skip the first pass on the asumption */ -/* that the caches were not warmed up and it would take longer. In proctice this */ -/* is what was seen. */ -/* */ - - mr r0,r11 /* Move return address to a safe register */ - - li r4,0 /* Clear a counter */ - li r3,0 /* Clear accumulator for duration */ - li r10,0 /* Clear start time accumulator top half */ - li r11,0 /* Clear start time accumulator bottom half */ - li r1,0 /* Clear start time accumulator top half */ - li r2,0 /* Clear start time accumulator bottom half */ - li r10,0 /* Clear accumulator for durations */ - la r5,MPPISncFght+64-MPPIwork(r12) /* Get second round start time address */ - -TBSaccumU: lwz r6,0(r5) /* Get start time */ - lwz r11,32(r5) /* Get the other processor's start time */ - lwz r7,64(r5) /* Get end time */ - lwz r8,96(r5) /* Other proc's end time */ - sub r7,r7,r6 /* Get duration */ - sub r8,r8,r11 /* Get other side's duration */ - addi r4,r4,1 /* Count arguments */ - add r3,r3,r7 /* Accumulate durations */ - add r2,r2,r7 /* Accumulate other side's durations */ - cmplwi cr0,r4,8 /* Have we gotten them all yet? */ - addi r5,r5,64 /* Step to the next argument */ - blt+ TBSaccumU /* We're not done yet... */ - - add r7,r2,r3 /* Sum the two differences */ - addi r7,r7,0x10 /* Round up */ - rlwinm r7,r7,27,5,31 /* Get the average difference divided in half */ - - mftb r8 /* Get the time now */ - add r8,r8,r7 /* Slide the window */ - mttbl r8 /* Set the time */ - - stw r12,MPPITBsync+4-MPPIwork(r12) /* Show that we are done */ - - lwz r3,MPPICStat(r9) /* Get back our status */ - mr r11,r0 /* Restore the return register */ - b KillBusy /* We're all done now, done for it, c'est la vie... */ - - -/******************************************************************************************************** */ -/* */ -/* The reset function. No state requirements for this one. */ -/* This suicides the processor. Our caller is never returned to (good english). The only way out of */ -/* this is a start function subsequently. So, we give a flying f**k about the registers 'n' sutff. */ -/* */ -/******************************************************************************************************** */ - -IReset: lis r28,0x8000 /* Turn on machine checks */ - - ori r28,r28,0xCC84 /* Enable caches, clear them, */ - /* disable serial execution and turn BHT on */ - sync - mtspr HID0,r28 /* Start the cache clear */ - sync - -/* */ -/* Clear out the TLB. They be garbage after hard reset. */ -/* */ - - li r0,512 /* Get number of TLB entries (FIX THIS) */ - li r3,0 /* Start at 0 */ - mtctr r0 /* Set the CTR */ - -IRpurgeTLB: tlbie r3 /* Purge this entry */ - addi r3,r3,4096 /* Next page */ - bdnz IRpurgeTLB /* Do 'em all... */ - - sync /* Make sure all TLB purges are done */ - tlbsync /* Make sure on other processors also */ - sync /* Make sure the TLBSYNC is done */ - -/* */ -/* Clear out the BATs. */ -/* */ - - li r3,0 /* Clear a register */ - - mtspr DBAT0L,r3 /* Clear BAT */ - mtspr DBAT0U,r3 /* Clear BAT */ - mtspr DBAT1L,r3 /* Clear BAT */ - mtspr DBAT1U,r3 /* Clear BAT */ - mtspr DBAT2L,r3 /* Clear BAT */ - mtspr DBAT2U,r3 /* Clear BAT */ - mtspr DBAT3L,r3 /* Clear BAT */ - mtspr DBAT3U,r3 /* Clear BAT */ - - mtspr IBAT0L,r3 /* Clear BAT */ - mtspr IBAT0U,r3 /* Clear BAT */ - mtspr IBAT1L,r3 /* Clear BAT */ - mtspr IBAT1U,r3 /* Clear BAT */ - mtspr IBAT2L,r3 /* Clear BAT */ - mtspr IBAT2U,r3 /* Clear BAT */ - mtspr IBAT3L,r3 /* Clear BAT */ - mtspr IBAT3U,r3 /* Clear BAT */ - -/* */ -/* Map 0xF0000000 to 0xFFFFFFFF for I/O; make it R/W non-cacheable */ -/* Map 0x00000000 to 0x0FFFFFFF for mainstore; make it R/W cachable */ -/* */ - - lis r6,0xF000 /* Set RPN to last segment */ - ori r6,r6,0x1FFF /* Set up upper BAT for 256M, access both */ - - lis r7,0xF000 /* Set RPN to last segment */ - ori r7,r7,0x0032 /* Set up lower BAT for 256M, access both, non-cachable */ - - mtspr DBAT0L,r7 /* Setup ROM and I/O mapped areas */ - mtspr DBAT0U,r6 /* Now do the upper DBAT */ - sync - - li r6,0x1FFF /* Set up upper BAT for 256M, access both */ - li r7,0x0012 /* Set up lower BAT for r/w access */ - - mtspr DBAT1L,r7 /* Set up an initial view of mainstore */ - mtspr DBAT1U,r6 /* Now do the upper DBAT */ - sync - -/* */ -/* Clean up SDR and segment registers */ -/* */ - - li r3,0 /* Clear a register */ - mtspr SDR1,r3 /* Clear SDR1 */ - - li r4,0 /* Clear index for segment registers */ - lis r5,0x1000 /* Set the segment indexer */ - -IRclearSR: mtsrin r3,r4 /* Zero out the SR */ - add. r4,r4,r5 /* Point to the next segment */ - bne- IRclearSR /* Keep going until we wrap back to 0 */ - - lis r3,(MPPICOnline+MPPICStop)>>16 /* Set the reset/online state flags */ - b KillBusy /* Go wipe out the busy flags... */ - -/* (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) */ -/* */ -/* Here lies the Phoney Firmware used to test SIGPs. Take this out later. */ -/* */ -/* (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) */ - -mp_PhoneyFirmware: - - li r27,0x3040 /* Set floating point and machine checks on, IP to 0xFFF0xxxx */ - mtmsr r27 /* Load 'em on in */ - isync - - bl PhoneyBase /* Make a base register */ -PhoneyBase: mflr r26 /* Get it */ - addi r26,r26,LOW_ADDR(MPPIbase-PhoneyBase) /* Adjust it back */ - - la r20,LOW_ADDR(rupttab-MPPIbase)(r26) /* Get the address of the interrupt table */ - la r21,LOW_ADDR(rupttabend-MPPIbase)(r26) /* Get the end of the table */ - -relocate: lwz r22,0(r20) /* Get the displacement to routine */ - add r22,r22,r12 /* Relocate to the physical address */ - stw r22,0(r20) /* Stick it back */ - addi r20,r20,4 /* Point to the next one */ - cmplw cr0,r20,r21 /* Still in table? */ - ble+ cr0,relocate /* Yeah... */ - - la r20,LOW_ADDR(rupttab-MPPIbase)(r26) /* Get the interrupt table back again */ - mtsprg 3,r20 /* Activate the phoney Rupt table */ - - lis r24,hi16(HammerHead) /* Get the actual hammerhead address */ - ori r24,r24,0x0032 /* Make R/W non-cachable */ - lwz r23,MPPIHammer-MPPIwork(r12) /* Get the address mapped on the main processor */ - ori r23,r23,0x0003 /* Set both super and user valid for 128KB */ - - mtspr DBAT0L,r24 /* Setup hammerhead's real address */ - mtspr DBAT0U,r23 /* Map hammerhead to the same virtual address as on the main processor */ - sync /* Make sure it is done */ - - la r25,MPPICPU2-MPPIwork(r12) /* Point to a phoney register save area */ - mtsprg 1,r25 /* Phoney up initialized processor state */ - - lis r24,0xFEED /* Get 0xFEED */ - ori r24,r24,0xF1D0 /* Get 0xFEEDF1D0 */ - - stw r24,CSAgpr+(0*4)(r25) /* Store invalid R0 */ - stw r24,CSAgpr+(1*4)(r25) /* Store invalid R1 */ - stw r24,CSAgpr+(2*4)(r25) /* Store invalid R2 */ - stw r24,CSAgpr+(3*4)(r25) /* Store invalid R3 */ - stw r24,CSAgpr+(4*4)(r25) /* Store invalid r4 */ - stw r24,CSAgpr+(5*4)(r25) /* Store invalid R5 */ - stw r24,CSAgpr+(6*4)(r25) /* Store invalid R6 */ - stw r24,CSAgpr+(7*4)(r25) /* Store invalid r7 */ - stw r24,CSAgpr+(8*4)(r25) /* Store invalid R8 */ - stw r24,CSAgpr+(9*4)(r25) /* Store invalid R9 */ - stw r24,CSAgpr+(10*4)(r25) /* Store invalid R10 */ - stw r24,CSAgpr+(11*4)(r25) /* Store invalid R11 */ - stw r24,CSAgpr+(12*4)(r25) /* Store invalid R12 */ - -waititout: lwz r25,0x30(br0) /* Get wait count */ - mfmsr r24 /* Get the MSR */ - addi r25,r25,1 /* Bounce it up */ - ori r24,r24,0x8000 /* Turn on external interruptions */ - stw r25,0x30(br0) /* Save back the count */ - mtmsr r24 /* Set it */ - isync /* Stop until we're here */ - b waititout /* Loop forever... */ - -/* */ -/* Phoney interrupt handlers */ -/* */ - -pexternal: mflr r29 /* Get the LR value */ - lwz r29,0(r29) /* Get the rupt code */ - stw r29,0x0B0(br0) /* Save the code */ - bl GotSignal /* Call the signal handler */ - oris r3,r3,0x8000 /* Turn on high bit so we see a code 0 */ - stw r3,0xA8(br0) /* Save return code in debug area */ - -ignorerupt: mflr r29 /* Get the LR value */ - lwz r29,0(r29) /* Get the rupt code */ - stw r29,0x0B0(br0) /* Save the code */ - rfi /* Bail to from whence we commest... */ - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - -rupttab: .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long pexternal /* Phoney external handler */ - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt - .long ignorerupt -rupttabend: .long ignorerupt - -/* (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) */ -/* */ -/* Here lies the end of the Phoney Firmware used to test SIGPs. Take this out later. */ -/* */ -/* (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) */ - - -/* */ -/* Table of function offsets */ -/* */ - -MPPIFuncOffs: - - .long CountProcessors-MPPIFunctions /* Offset to routine */ - .long StartProcessor-MPPIFunctions /* Offset to routine */ - .long ResumeProcessor-MPPIFunctions /* Offset to routine */ - .long StopProcessor-MPPIFunctions /* Offset to routine */ - .long ResetProcessor-MPPIFunctions /* Offset to routine */ - .long SignalProcessor-MPPIFunctions /* Offset to routine */ - .long StoreProcessorStatus-MPPIFunctions /* Offset to routine */ - .long SynchClock-MPPIFunctions /* Offset to routine */ - .long GetExtHandlerAddress-MPPIFunctions /* Offset to routine */ - .long GotSignal-MPPIFunctions /* Offset to routine */ - .long ProcessorState-MPPIFunctions /* Offset to routine */ - .long RunSIGPRun-MPPIFunctions /* Offset to routine */ - .long mp_PhoneyFirmware-MPPIFunctions /* (TEST/DEBUG) */ - -MPPISize: - diff --git a/osfmk/ppc/POWERMAC/panic_image.c b/osfmk/ppc/POWERMAC/panic_image.c deleted file mode 100644 index c130da354..000000000 --- a/osfmk/ppc/POWERMAC/panic_image.c +++ /dev/null @@ -1,269 +0,0 @@ - -//image_pixel_data -static const struct { - unsigned int pd_width; - unsigned int pd_height; - unsigned int bytes_per_pixel; /* 3:RGB, 4:RGBA */ - unsigned char image_pixel_data[0xbf62]; -} panic_dialog = { - 471, 258, 3, -0xff, 0xf3, 0xff, 0xf3, 0xff, 0xf3, 0xff, 0xf3, 0xff, 0xf3, 0x8a, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0x8c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x8c, 0x90, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0xff, 0xe0, 0xfc, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x7c, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x21, 0xe0, 0x03, 00, 0x03, 0x9c, 0x6c, 0xe0, 0x03, 0x9c, 0x03, 00, 0x36, 0xe0, 0x0c, 0x60, 0x03, 0xad, 0x6f, 0xe0, 0x03, 00, 0x03, 0x9c, 0xe1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xad, 0x90, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0xbd, 0x66, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x30, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0xf6, 0xe0, 0x03, 0xad, 0x03, 0xbd, 0x45, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x21, 0xe0, 0x03, 00, 0x03, 0x9c, 0x5d, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x36, 0xe0, 0x03, 00, 0x03, 0x45, 0x03, 0x60, 0x03, 0x38, 0x03, 00, 0x03, 0x8c, 0x6c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x2a, 0xe0, 0x03, 0xad, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0xa2, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x06, 0x52, 0x0c, 0xe0, 0x06, 0x9c, 0x39, 0xe0, 0x06, 0xad, 0x0f, 0xe0, 0x06, 0xad, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x06, 0x9c, 0x21, 0xe0, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0xcf, 0x06, 0x9c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0xcf, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x2d, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x2a, 0xe0, 0x06, 0xbd, 0x15, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x21, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x9c, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x06, 0x9c, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0x60, 0x03, 0x2c, 0x18, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x30, 0xe0, 0x06, 0xad, 0x15, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x18, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x09, 0xe0, 0x06, 0x9c, 0x30, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0xad, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x21, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x72, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x16, 0x03, 0x45, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x60, 0x09, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x9c, 0x09, 00, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x38, 0x03, 0x2c, 0x03, 0xad, 0x03, 0x2c, 0x09, 00, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x6e, 0x03, 0x20, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0xad, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x7c, 0x06, 0x16, 0x03, 0x20, 0x03, 0xad, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x09, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x06, 0x45, 0x06, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x12, 0xe0, 0x09, 00, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x7c, 0x06, 0x16, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x38, 0x03, 0x2c, 0x03, 0xad, 0x0f, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x09, 00, 0x03, 0x9c, 0x03, 0x2c, 0x09, 00, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x6f, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x38, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x8c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x52, 0x15, 0xe0, 0x03, 0x16, 0x03, 0x45, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x1e, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x60, 0x06, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x38, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x06, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x12, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x16, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x8c, 0x15, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 00, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x6f, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x45, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x16, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x15, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x21, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x60, 0x06, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x0c, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xad, 0x03, 0x20, 0x06, 0x45, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x12, 0xe0, 0x03, 00, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0x7c, 0x03, 0x16, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x52, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x16, 0x03, 0xbd, 0x03, 0xad, 0x03, 00, 0x09, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x18, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x6f, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x20, 0x09, 0x2c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x03, 00, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0xad, 0x06, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xad, 0x03, 0x7c, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x15, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x21, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0x60, 0x03, 0x52, 0x03, 0x7c, 0x03, 0x16, 0x03, 0xe0, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0x12, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x9c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x8c, 0x06, 0x45, 0x03, 0xad, 0x03, 0x20, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x18, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x6f, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x0f, 0xe0, 0x06, 0x45, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x06, 0x45, 0x18, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x52, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x15, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x21, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x0c, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x03, 0xe0, 0x06, 0x45, 0x03, 0x20, 0x03, 0xad, 0x03, 0xbd, 0x03, 00, 0x03, 0x6e, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x06, 0x45, 0x1e, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x16, 0x03, 0x6e, 0x03, 0x0c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x03, 0x52, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x18, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x6f, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x09, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x03, 0x7c, 0x15, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x45, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x18, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xdf, 0x03, 0xe0, 0x03, 00, 0x03, 0x8b, 0x03, 0xdd, 0x03, 0x7b, 0x03, 00, 0x03, 0x5d, 0x03, 0xd7, 0x03, 0xd6, 0x03, 0x2a, 0x03, 0x41, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0xbe, 0x03, 0x0c, 0x03, 0x65, 0x06, 0xcb, 0x03, 0xbd, 0x06, 0xce, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd3, 0x03, 0xd4, 0x03, 0xd6, 0x03, 0x96, 0x03, 0x5d, 0x03, 0xdb, 0x03, 0xdc, 0x03, 0xdd, 0x06, 0xdf, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0x2c, 0x12, 0xe0, 0x06, 0x38, 0x06, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x6e, 0x06, 0x16, 0x06, 0xe0, 0x06, 0x16, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x16, 0x03, 0x45, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x16, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x18, 0xe0, 0x03, 00, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x09, 0xe0, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x6f, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x52, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x52, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x45, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x09, 0x2c, 0x03, 0x16, 0x03, 0xad, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x0c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x20, 0x06, 0x16, 0x03, 0x7c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x18, 0xe0, 0x03, 0xad, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xab, 0x06, 0xdd, 0x03, 0x51, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x42, 0x03, 0x2a, 0x03, 0x5c, 0x03, 0xd4, 0x03, 0xd3, 0x03, 0x84, 0x03, 00, 0x03, 0x29, 0x03, 0x9f, 0x03, 0xce, 0x03, 0x90, 0x03, 0x0c, 0x03, 0x1e, 0x03, 0x29, 0x03, 0x0c, 0x06, 0xce, 0x03, 0x29, 0x03, 0x59, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0xd3, 0x03, 0x5b, 0x03, 00, 0x03, 0xd7, 0x03, 0xda, 0x06, 0xdb, 0x03, 0xdc, 0x03, 0xdd, 0x03, 0x9b, 0x03, 00, 0x06, 0xdf, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x52, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x7c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x45, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x15, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x20, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x18, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x6f, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x6c, 0xe0, 0x06, 0x9c, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x39, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x0c, 0xe0, 0x06, 0x9c, 0x2d, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x06, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x18, 0xe0, 0x06, 0xad, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x30, 0xe0, 0x06, 0x9c, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x30, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xbb, 0x03, 0x9a, 0x03, 0xda, 0x03, 0xd7, 0x06, 0xd6, 0x03, 0xd4, 0x06, 0xa4, 0x06, 0xd3, 0x06, 0xd1, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x0c, 0xd0, 0x03, 0x91, 0x03, 0x9f, 0x18, 0xd0, 0x06, 0xd1, 0x06, 0xd3, 0x09, 0xd4, 0x06, 0xd6, 0x03, 0xd7, 0x03, 0xda, 0x03, 0xdc, 0x03, 0xdd, 0x03, 0xdf, 0x12, 0xe0, 0x06, 0x9c, 0x1b, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x51, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x21, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x30, 0xe0, 0x06, 0x9c, 0x30, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x27, 0xe0, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x0f, 0xe0, 0x06, 0xad, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x09, 0xe0, 0x06, 0xad, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x8d, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xc9, 0xe0, 0x03, 0x52, 0x03, 0x60, 0x9f, 0xe0, 0x06, 0xdf, 0x03, 0xdd, 0x03, 0xdc, 0x03, 00, 0x03, 0x99, 0x03, 0xd9, 0x03, 0xd6, 0x03, 0xd4, 0x03, 0xd1, 0x03, 0xce, 0x06, 0xcd, 0x09, 0xce, 0x09, 0xcf, 0x0c, 0xd0, 0x1e, 0xd1, 0x0c, 0xd0, 0x09, 0xcf, 0x09, 0xce, 0x06, 0xcd, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xd4, 0x03, 0xd6, 0x03, 0xd9, 0x03, 0xdb, 0x06, 0xdc, 0x03, 0xdd, 0x06, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x69, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xc9, 0xe0, 0x03, 0x6e, 0x03, 0xcf, 0x9c, 0xe0, 0x03, 0xdf, 0x03, 0xdd, 0x03, 0xdb, 0x03, 0xd7, 0x03, 0xd4, 0x03, 0x5a, 0x03, 0xb0, 0x3f, 0xd0, 0x06, 0xd1, 0x45, 0xd0, 0x03, 0xd4, 0x03, 0xd7, 0x03, 0xdb, 0x03, 0xdd, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x66, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x60, 0xe0, 0x03, 0xdf, 0x06, 0xdd, 0x03, 0xdb, 0x03, 0xd9, 0x03, 0xd5, 0x03, 0xd4, 0x03, 0xd1, 0x03, 0xcf, 0x03, 0xcd, 0x06, 0xce, 0x03, 0xcf, 0x06, 0xd0, 0x12, 0xd1, 0x48, 0xd0, 0x12, 0xd1, 0x06, 0xd0, 0x03, 0xcf, 0x06, 0xce, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd4, 0x03, 0xd5, 0x03, 0xd9, 0x03, 0xdb, 0x06, 0xdd, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x5a, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x5a, 0xe0, 0x06, 0xdf, 0x03, 0xdc, 0x03, 0xd9, 0x03, 0xd4, 0x03, 0xcf, 0x06, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd0, 0x09, 0xd1, 0x7e, 0xd0, 0x09, 0xd1, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x06, 0xcd, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xd9, 0x03, 0xdc, 0x06, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x54, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0xff, 0xe0, 0xf9, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xd7, 0x03, 0xd5, 0x03, 0xd3, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xcf, 0xae, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0xd3, 0x03, 0xd5, 0x03, 0xd7, 0x03, 0xdc, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x4e, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x06, 0x20, 0x03, 0x9c, 0xab, 0xe0, 0x03, 0x9c, 0x03, 00, 0x8d, 0xe0, 0x03, 0x9c, 0x03, 00, 0xab, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xda, 0x03, 0xd6, 0x03, 0xd3, 0x09, 0xcf, 0x18, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x36, 0xd0, 0x0c, 0x5a, 0x03, 0xa0, 0x57, 0xd0, 0x09, 0xcf, 0x03, 0xd3, 0x03, 0xd6, 0x03, 0xda, 0x03, 0xdc, 0x03, 0xdf, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0xff, 0xe0, 0xff, 0xe0, 0x30, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0xb4, 0xe0, 0x03, 0x9c, 0x03, 00, 0x8d, 0xe0, 0x03, 0x9c, 0x03, 00, 0xa2, 0xe0, 0x06, 0xdf, 0x03, 0xdc, 0x03, 0xd9, 0x03, 0xd3, 0x1b, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x36, 0xd0, 0x03, 00, 0x03, 0x41, 0x03, 0x5a, 0x03, 0x34, 0x03, 00, 0x03, 0x83, 0x30, 0xd0, 0x03, 0x92, 0x03, 0xbf, 0x2d, 0xd0, 0x03, 0xb0, 0x03, 0xa3, 0x03, 0xd9, 0x03, 0xdc, 0x06, 0xdf, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x2a, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0xff, 0xe0, 0xf0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x21, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0x9c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x18, 0xe0, 0x06, 0x9c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x06, 0x9c, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x1b, 0xe0, 0x06, 0x9c, 0x15, 0xe0, 0x03, 0x9c, 0x18, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x12, 0xe0, 0x06, 0xbd, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xbc, 0x03, 0x9c, 0x03, 0xbb, 0x03, 0xd9, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xce, 0x03, 0x9f, 0x03, 0x91, 0x03, 0xbf, 0x12, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x06, 0xa0, 0x0f, 0xd0, 0x06, 0xa0, 0x18, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x41, 0x03, 0x29, 0x0c, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xa0, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x0c, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x03, 0xb0, 0x0f, 0xd0, 0x03, 0xbf, 0x03, 0xa0, 0x03, 0xcf, 0x03, 0x59, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xd9, 0x03, 0xdd, 0x06, 0xdf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x21, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0xff, 0xe0, 0xc0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0x60, 0x09, 00, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x60, 0x03, 0x20, 0x03, 0x52, 0x0f, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 00, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x6e, 0x03, 0x20, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x51, 0x03, 0x0c, 0x03, 0x2a, 0x03, 0x15, 0x03, 0xd0, 0x06, 0xcf, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x29, 0x03, 0x34, 0x12, 0xd0, 0x09, 00, 0x03, 0x29, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0x41, 0x03, 0x1f, 0x03, 0x0c, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0x1f, 0x03, 0x15, 0x03, 0x41, 0x15, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x34, 0x06, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x74, 0x06, 0xd0, 0x03, 0x83, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x29, 0x09, 00, 0x06, 0xd0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x29, 0x03, 00, 0x03, 0x83, 0x06, 0xd0, 0x03, 00, 0x03, 0x5a, 0x03, 0x1f, 0x03, 0x4c, 0x03, 0x92, 0x09, 00, 0x03, 0x59, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xda, 0x03, 0xdc, 0x03, 0xdd, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x09, 00, 0x03, 0x2c, 0x03, 0x9c, 0x09, 00, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x8c, 0xff, 0xe0, 0xbd, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x0c, 0x06, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x0f, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 00, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x1b, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x52, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x03, 0xdf, 0x03, 0xdc, 0x03, 00, 0x03, 0x85, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0x92, 0x03, 00, 0x03, 0xc1, 0x03, 0xd1, 0x15, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x5a, 0x03, 00, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x83, 0x03, 00, 0x06, 0xd0, 0x06, 0x41, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0xa0, 0x12, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x0f, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x29, 0x06, 0xd0, 0x03, 00, 0x03, 0x29, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xd7, 0x03, 0x5f, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0xff, 0xe0, 0xbd, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x03, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x6e, 0x09, 0xe0, 0x03, 0xad, 0x03, 00, 0x09, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x0c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6d, 0x03, 00, 0x03, 0xdc, 0x03, 0xd6, 0x03, 0x15, 0x03, 0x1e, 0x03, 0xae, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0x41, 0x03, 0xbf, 0x15, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x1f, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x0c, 0x03, 0x67, 0x06, 0x92, 0x03, 0x1f, 0x03, 0x5a, 0x12, 0xd0, 0x09, 00, 0x03, 0x0c, 0x03, 0xb0, 0x06, 0xd0, 0x03, 0xa0, 0x03, 00, 0x09, 0x92, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x41, 0x03, 0x0c, 0x03, 0x92, 0x0c, 0xd0, 0x03, 00, 0x03, 0x92, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0x41, 0x03, 0x29, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x06, 0xce, 0x03, 0xd0, 0x03, 0x5c, 0x03, 0x2b, 0x03, 0xdd, 0x03, 0xdf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x0c, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0xff, 0xe0, 0xbd, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0xcf, 0x03, 0x52, 0x03, 00, 0x03, 0x20, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xad, 0x06, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x15, 0xe0, 0x03, 0x6e, 0x06, 0x0c, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x52, 0x03, 00, 0x03, 0x20, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x06, 0x2c, 0x03, 0x2b, 0x03, 0x2a, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xb0, 0x03, 0x29, 0x03, 00, 0x03, 0x34, 0x09, 0xd0, 0x03, 0x92, 0x03, 0x15, 0x03, 00, 0x03, 0x67, 0x12, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 00, 0x03, 0x1f, 0x09, 0x29, 0x03, 0x74, 0x12, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x74, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0x29, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x4c, 0x03, 00, 0x03, 0x1f, 0x03, 0xb0, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 0x4c, 0x03, 00, 0x03, 0x29, 0x03, 0x15, 0x03, 0x29, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x15, 0xd0, 0x03, 0x5a, 0x03, 0x2a, 0x03, 0xd7, 0x03, 0xdc, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0xff, 0xe0, 0xbd, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x1b, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x12, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x52, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x1b, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x38, 0x0f, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd5, 0x03, 0xd1, 0x06, 0xcf, 0x06, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x67, 0x0c, 0xd0, 0x03, 0x74, 0x03, 00, 0x03, 0xa0, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x74, 0x1e, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x15, 0xd0, 0x03, 0xbf, 0x03, 0x1f, 0x03, 0x34, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x15, 0xd0, 0x03, 0x59, 0x03, 0x29, 0x03, 0xd1, 0x03, 0xd5, 0x03, 0xdb, 0x03, 0x44, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x0c, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0xff, 0xe0, 0xbd, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x0c, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0xce, 0x03, 0x0c, 0x03, 0x6b, 0x03, 0xd1, 0x03, 0xce, 0x03, 0xbe, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x1f, 0x03, 0x67, 0x03, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0xa0, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xb0, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0xbf, 0x12, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x41, 0x03, 0x1f, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x34, 0x09, 0xd0, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x34, 0x06, 0xd0, 0x03, 00, 0x03, 0x74, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x1f, 0x06, 0xd0, 0x03, 0x41, 0x03, 0x29, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x15, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x03, 0xaf, 0x03, 0xce, 0x03, 0xc1, 0x03, 0x0c, 0x03, 0x7b, 0x03, 0xdf, 0x03, 0x60, 0x03, 0x20, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0xff, 0xe0, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x38, 0x06, 0x20, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x09, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x09, 0x2c, 0x03, 0x16, 0x03, 0xad, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x38, 0x06, 0x20, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x60, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x45, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xdd, 0x03, 0xdb, 0x03, 0x95, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x29, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x15, 0x03, 0x1f, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x41, 0x15, 0xd0, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x4c, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x34, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x15, 0x03, 0x67, 0x12, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0x1f, 0x03, 0x41, 0x03, 0xd0, 0x03, 0xb0, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x34, 0x06, 0x1f, 0x03, 0x0c, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x0c, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xb0, 0x03, 0x0c, 0x06, 0x1f, 0x03, 0x4c, 0x03, 00, 0x03, 0x4c, 0x03, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 0xb0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x74, 0x0f, 0xd0, 0x03, 0x5a, 0x06, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x41, 0x03, 0xd5, 0x03, 0xdb, 0x03, 0xbb, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x6e, 0x03, 00, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x60, 0x03, 00, 0xff, 0xe0, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x60, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x2a, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x24, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x1b, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x06, 0x9c, 0x27, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x06, 0x9c, 0x27, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xdf, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x92, 0x03, 0xa0, 0x0c, 0xd0, 0x06, 0x92, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xa0, 0x1b, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x21, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xbf, 0x33, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x0c, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x03, 0xbf, 0x0c, 0xd0, 0x06, 0xa0, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0xb0, 0x15, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x0f, 0xd0, 0x06, 0xb0, 0x03, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xd9, 0x03, 0xcd, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0xff, 0xe0, 0xd8, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xfc, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xd6, 0x03, 0xd1, 0x03, 0xce, 0x03, 0xcf, 0xff, 0xd0, 0x33, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xd6, 0x03, 0xdc, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x12, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xfc, 0xe0, 0x03, 0x9c, 0x03, 0x7c, 0x12, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x93, 0xd0, 0x12, 0xcf, 0x93, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xdc, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x0f, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x12, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd4, 0x06, 0xcf, 0x96, 0xd0, 0x06, 0xcf, 0x0c, 0xd0, 0x06, 0xcf, 0x96, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x0c, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x0f, 0xe0, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd4, 0x06, 0xcf, 0x93, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xd6, 0x03, 0xda, 0x06, 0xdc, 0x03, 0xda, 0x03, 0xd6, 0x03, 0xd3, 0x03, 0xcf, 0x03, 0xce, 0x93, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xd9, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x09, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x09, 0xe0, 0x03, 0xdf, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0xd1, 0x03, 0xcf, 0x96, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xe0, 0x03, 0xe4, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xe0, 0x03, 0xda, 0x03, 0xd4, 0x06, 0xcf, 0x96, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0xdd, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x03, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x09, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x9c, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xe2, 0x03, 0xe6, 0x0c, 0xe5, 0x03, 0xe6, 0x03, 0xe2, 0x03, 0xda, 0x03, 0xd4, 0x06, 0xcf, 0x9c, 0xd0, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xff, 0xe0, 0x03, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x06, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd1, 0x03, 0xcf, 0x99, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd6, 0x03, 0xdd, 0x03, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x06, 0xe5, 0x03, 0xe6, 0x03, 0xe7, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xcf, 0x99, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xff, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x03, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd1, 0x9f, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xe2, 0x09, 0xe6, 0x06, 0xe5, 0x09, 0xe6, 0x03, 0xe2, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xce, 0x9f, 0xd0, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xfc, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xfc, 0xe0, 0x03, 0xdf, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xcf, 0x9f, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xdc, 0x03, 0xe5, 0x03, 0xe6, 0x12, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xdc, 0x03, 0xd0, 0x03, 0xcd, 0x9f, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdd, 0x03, 0xdf, 0xff, 0xe0, 0xf6, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xfc, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd1, 0x03, 0xcf, 0xa2, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xdd, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdd, 0x03, 0xd0, 0x03, 0xcd, 0xa2, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xf6, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf9, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd1, 0x03, 0xcf, 0xa5, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xa5, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xf3, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf6, 0xe0, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0xd1, 0x03, 0xcf, 0xa8, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xa8, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0xdd, 0xff, 0xe0, 0xf0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf3, 0xe0, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd1, 0xae, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xae, 0xd0, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0xdf, 0xff, 0xe0, 0xed, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf0, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd4, 0x03, 0xcf, 0xae, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xae, 0xd0, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xea, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xed, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xd4, 0xb4, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xb4, 0xd0, 0x03, 0xd4, 0x03, 0xdc, 0x03, 0xdf, 0xff, 0xe0, 0xe7, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xed, 0xe0, 0x03, 0xdc, 0x03, 0xd6, 0x06, 0xcf, 0xb1, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xb1, 0xd0, 0x06, 0xcf, 0x03, 0xd6, 0x03, 0xdc, 0xff, 0xe0, 0xe7, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xea, 0xe0, 0x03, 0xdd, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xcf, 0xb4, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xb4, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd6, 0x03, 0xdd, 0xff, 0xe0, 0xe4, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xe7, 0xe0, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd1, 0xb1, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xb1, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xdf, 0xff, 0xe0, 0xe1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xe4, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd3, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd1, 0xb1, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xb1, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xde, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0x36, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0xa5, 0xe0, 0x03, 0xdc, 0x03, 0xd4, 0x06, 0xcf, 0xba, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0xba, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xdc, 0xff, 0xe0, 0xde, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x06, 0x8c, 0x0f, 0xe0, 0x03, 0x8c, 0x03, 0xad, 0x30, 0xe0, 0x03, 0x60, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x72, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x20, 0x03, 0x45, 0xa5, 0xe0, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd1, 0x03, 0xcf, 0x7e, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0x90, 0x03, 00, 0x06, 0xcf, 0x03, 0x90, 0x03, 0x73, 0x03, 0xce, 0x03, 0xcf, 0x21, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x21, 0xd0, 0x03, 0xcf, 0x09, 0xce, 0x06, 0xcf, 0x09, 0xce, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x5a, 0x03, 0xb0, 0x0c, 0xd0, 0x03, 0x74, 0x03, 0x5a, 0x1b, 0xd0, 0x03, 0x92, 0x03, 0x74, 0x3f, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0xdf, 0x48, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x7b, 0xe0, 0x03, 00, 0x03, 0x9c, 0x42, 0xe0, 0x03, 00, 0x03, 0x9c, 0xc3, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x0f, 0xe0, 0x03, 0x16, 0x03, 0xbd, 0x30, 0xe0, 0x03, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x72, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0xa8, 0xe0, 0x03, 0xdb, 0x03, 0xd3, 0x03, 0xcf, 0x12, 0xd0, 0x03, 0xb0, 0x03, 0xa0, 0x60, 0xd0, 0x09, 0xcf, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0x95, 0x03, 00, 0x06, 0xda, 0x03, 0x79, 0x03, 0x4f, 0x03, 0xd3, 0x06, 0xcf, 0x1e, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xc3, 0x03, 0xb0, 0x0f, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x1e, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0x03, 0xd6, 0x03, 0xd9, 0x06, 0xda, 0x03, 0xd9, 0x03, 0xd6, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x29, 0x03, 00, 0x03, 0x4c, 0x09, 0xd0, 0x03, 0xbf, 0x06, 00, 0x1b, 0xd0, 0x03, 0x74, 0x03, 0x4c, 0x1e, 0xd0, 0x03, 0xb0, 0x03, 0xa0, 0x1e, 0xd0, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xdb, 0x48, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x27, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x4e, 0xe0, 0x03, 00, 0x03, 0x9c, 0x42, 0xe0, 0x03, 00, 0x03, 0x9c, 0xc3, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x2d, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x36, 0xe0, 0x03, 0x9c, 0x09, 0xe0, 0x06, 0xad, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x06, 0xe0, 0x06, 0xad, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xad, 0x03, 0xcf, 0x09, 0xe0, 0x06, 0xbd, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x9c, 0x1b, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xd5, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xa0, 0x03, 0x92, 0x03, 0xbf, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x12, 0xd0, 0x03, 0x92, 0x09, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x1b, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xa0, 0x12, 0xd0, 0x03, 0xbf, 0x03, 0x9f, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0x96, 0x03, 0xbb, 0x03, 0x9d, 0x03, 00, 0x09, 0xe5, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x0c, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0x9e, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x0f, 0xe5, 0x03, 0xa1, 0x03, 0xc3, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x1b, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xb3, 0x03, 0xb8, 0x03, 0xe1, 0x09, 0xe5, 0x03, 0xe4, 0x03, 0xe1, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd3, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x15, 0x09, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x03, 00, 0x09, 0xd0, 0x03, 0xbf, 0x06, 0x92, 0x1e, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x09, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd5, 0x03, 0xa9, 0x03, 0xac, 0x0f, 0xe0, 0x06, 0xad, 0x2d, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xad, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0f, 0xe0, 0x06, 0x9c, 0x2a, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0xb4, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x45, 0x03, 0x38, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x0c, 00, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x16, 0x03, 0x45, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x2c, 0x06, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x8c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x38, 0x03, 0x2c, 0x03, 0xad, 0x03, 0x9c, 0x03, 00, 0x03, 0x6e, 0x03, 0x20, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcd, 0x03, 0x0c, 0x03, 0xbf, 0x03, 0xbe, 0x03, 0x29, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x09, 00, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0x1f, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x41, 0x03, 0x15, 0x06, 0x1f, 0x03, 0xbf, 0x12, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x15, 0x03, 0x29, 0x03, 0xbf, 0x06, 0xd0, 0x03, 00, 0x03, 0x59, 0x03, 0x1e, 0x03, 0x4c, 0x03, 0xd1, 0x03, 0xa5, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2d, 0x03, 00, 0x06, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x03, 0xe1, 0x03, 0xda, 0x03, 0x2a, 0x03, 0x4c, 0x03, 0x1f, 0x03, 0x29, 0x03, 00, 0x03, 0x83, 0x06, 0xd0, 0x03, 0x34, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x4d, 0x03, 0xdf, 0x03, 0xa0, 0x09, 00, 0x03, 0x62, 0x03, 0xe5, 0x03, 0xb0, 0x03, 0x0d, 0x03, 0x2d, 0x03, 0x0d, 0x03, 0x7c, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x68, 0x03, 0x1f, 0x03, 0x7d, 0x03, 0xe5, 0x0c, 0xe6, 0x03, 0xe5, 0x03, 0xe4, 0x03, 0xe0, 0x03, 0xdb, 0x03, 0x2a, 0x03, 0x41, 0x03, 00, 0x03, 0x9f, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0x1f, 0x03, 0x4c, 0x03, 00, 0x06, 0xd0, 0x03, 0x67, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x15, 0x03, 0x1f, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0x1f, 0x03, 0x29, 0x03, 00, 0x03, 0x83, 0x03, 0xd0, 0x03, 0x92, 0x09, 00, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x74, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0x41, 0x03, 0x1f, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x16, 0x03, 0x45, 0x06, 0xe0, 0x03, 0x60, 0x0f, 00, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x0f, 0xe0, 0x03, 0x9c, 0x09, 00, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x16, 0x06, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x7c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x16, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x52, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0xdf, 0x03, 0x6c, 0x03, 0x41, 0x03, 0xcf, 0x03, 0x41, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xb0, 0x06, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x67, 0x0f, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x41, 0x06, 0xd0, 0x03, 00, 0x03, 0x29, 0x03, 0xbf, 0x03, 0xd4, 0x03, 0xd9, 0x03, 0x20, 0x03, 0x61, 0x06, 0xe6, 0x03, 0x63, 0x03, 00, 0x06, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x03, 0xe5, 0x03, 0xdf, 0x03, 0x2a, 0x03, 0x0c, 0x03, 0x9f, 0x03, 0xd0, 0x03, 0x4c, 0x03, 0x29, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x81, 0x03, 00, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x09, 0xe5, 0x03, 0x16, 0x03, 0x80, 0x03, 0xe7, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x91, 0x03, 00, 0x03, 0x4f, 0x03, 0xdf, 0x03, 0xe5, 0x18, 0xe6, 0x03, 0xe2, 0x03, 0x2b, 0x03, 0x5d, 0x03, 0x1f, 0x03, 0x4c, 0x03, 0xce, 0x03, 0xae, 0x03, 0x15, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x4c, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x15, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x5a, 0x03, 00, 0x03, 0x74, 0x03, 0xd1, 0x03, 0x89, 0x03, 00, 0x06, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x45, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x45, 0x0f, 0xe0, 0x03, 00, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x8c, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x09, 0x9c, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x06, 0x45, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xdc, 0x03, 0x15, 0x03, 0x9f, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x83, 0x09, 0xd0, 0x03, 0x1f, 0x03, 0x67, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x09, 0xd0, 0x03, 0x34, 0x03, 0x41, 0x06, 0x92, 0x03, 0x41, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0x67, 0x03, 0x1f, 0x09, 0xd0, 0x03, 0x83, 0x03, 0x0c, 0x03, 0xd0, 0x03, 0xcf, 0x03, 00, 0x03, 0x92, 0x03, 0xd6, 0x03, 0xdc, 0x03, 0xbe, 0x03, 00, 0x03, 0xc3, 0x06, 0xe6, 0x03, 0xa0, 0x03, 00, 0x06, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe6, 0x03, 0xe2, 0x03, 0x2b, 0x03, 0x4e, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0c, 0xd0, 0x03, 0x92, 0x03, 0x65, 0x03, 00, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x06, 0xe5, 0x03, 0xb0, 0x03, 00, 0x03, 0x9f, 0x03, 0xa1, 0x03, 0xa0, 0x03, 00, 0x03, 0x92, 0x03, 0xcd, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x90, 0x03, 00, 0x03, 0xdb, 0x03, 0xe2, 0x06, 0xe6, 0x0c, 0xe5, 0x0c, 0xe6, 0x03, 0x2c, 0x03, 0x61, 0x03, 0x6c, 0x03, 0x15, 0x03, 0xd0, 0x03, 0x4b, 0x03, 0x4c, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0xa0, 0x03, 00, 0x09, 0x92, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x1f, 0x06, 0xcf, 0x03, 0x95, 0x03, 00, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x6e, 0x03, 00, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x6e, 0x03, 0x16, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x2c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x45, 0x18, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x20, 0x09, 0x2c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x06, 0x20, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xbc, 0x03, 0x96, 0x03, 0x1f, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x15, 0x09, 0x29, 0x03, 0x4c, 0x0f, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xcf, 0x03, 0xd0, 0x03, 00, 0x03, 0x96, 0x03, 0xe0, 0x03, 0xe4, 0x03, 0xa0, 0x03, 00, 0x03, 0xe6, 0x06, 0xe5, 0x03, 0x9f, 0x03, 00, 0x06, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe5, 0x03, 0xe4, 0x03, 0x2b, 0x03, 0x5c, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x1e, 0x03, 00, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x06, 0xe5, 0x03, 0x9f, 0x03, 00, 0x09, 0x2d, 0x03, 0x2b, 0x03, 0xa1, 0x03, 0xcd, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x90, 0x03, 00, 0x03, 0xdf, 0x03, 0xe4, 0x03, 0xe5, 0x03, 0xe6, 0x15, 0xe5, 0x03, 0xe6, 0x03, 0x2d, 0x03, 0x63, 0x03, 0xc1, 0x03, 00, 0x03, 0xa6, 0x03, 0x15, 0x03, 0xb0, 0x03, 0x91, 0x03, 00, 0x06, 0xd0, 0x03, 0xa0, 0x06, 0x15, 0x03, 0x29, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0x29, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xdf, 0x03, 0xe0, 0x03, 00, 0x03, 0x20, 0x09, 0x2c, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x06, 0x20, 0x03, 00, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x8c, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x06, 0x45, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x20, 0x03, 0xcf, 0x18, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x18, 0xe0, 0x03, 0x20, 0x03, 0x6c, 0x03, 0x35, 0x03, 0x72, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0x0c, 0x03, 0x83, 0x09, 0xd0, 0x03, 0x1f, 0x03, 0x67, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x09, 0xd0, 0x06, 0x41, 0x1b, 0xd0, 0x03, 0x67, 0x03, 0x1f, 0x09, 0xd0, 0x03, 0x83, 0x03, 0x0c, 0x03, 0xd0, 0x03, 0xd3, 0x03, 00, 0x03, 0x9c, 0x03, 0xe5, 0x03, 0xe6, 0x03, 0x9f, 0x03, 00, 0x09, 0xe5, 0x03, 0x9f, 0x03, 00, 0x06, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0x2b, 0x03, 0x5c, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0x83, 0x03, 0xd0, 0x03, 0x8f, 0x03, 00, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x06, 0xe5, 0x03, 0xc2, 0x03, 00, 0x03, 0xc2, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x83, 0x03, 00, 0x06, 0xd0, 0x03, 0x90, 0x03, 00, 0x03, 0xdf, 0x03, 0xe4, 0x06, 0xe6, 0x18, 0xe5, 0x03, 0x2d, 0x03, 0x62, 0x03, 0xe6, 0x03, 0x21, 0x03, 0x20, 0x03, 0x1f, 0x03, 0xd3, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x4c, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x0f, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0x8f, 0x03, 00, 0x03, 0xdc, 0x03, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x03, 0xad, 0x18, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x1e, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x06, 0x45, 0xba, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0x38, 0x03, 0x16, 0x03, 0x38, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x16, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x18, 0xe0, 0x03, 0x6d, 0x03, 0x15, 0x03, 0x0c, 0x03, 0xbe, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0x41, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0xbf, 0x0f, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x41, 0x03, 0xd4, 0x03, 0xda, 0x03, 00, 0x03, 0x9f, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0x0d, 0x03, 0x70, 0x03, 0xe5, 0x03, 0xd3, 0x03, 0x2d, 0x03, 00, 0x06, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0x2b, 0x03, 0x5c, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x72, 0x03, 00, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x16, 0x09, 0xe5, 0x03, 0x2d, 0x03, 0x39, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xc1, 0x03, 0xcd, 0x03, 0x5a, 0x03, 0x1f, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x1f, 0x03, 00, 0x06, 0xd0, 0x03, 0x90, 0x03, 00, 0x03, 0xdf, 0x03, 0xe4, 0x06, 0xe6, 0x03, 0x62, 0x03, 0x9f, 0x12, 0xe5, 0x03, 0x2d, 0x03, 0x62, 0x03, 0xe6, 0x03, 0x71, 0x03, 00, 0x03, 0x7d, 0x03, 0xda, 0x03, 0x94, 0x03, 00, 0x03, 0xce, 0x03, 0xd0, 0x03, 00, 0x03, 0x83, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x34, 0x09, 0xd0, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0x90, 0x03, 00, 0x03, 0xd9, 0x03, 0xdf, 0x03, 0x60, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0xad, 0x1b, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x12, 0xe0, 0x06, 0x38, 0x06, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x8c, 0x0c, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x0f, 00, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x18, 0xe0, 0x03, 0xcb, 0x03, 00, 0x03, 0x4c, 0x09, 0xd0, 0x03, 0xbf, 0x03, 0x29, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x74, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x0c, 0xd0, 0x03, 0x67, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x34, 0x12, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x15, 0x03, 0x29, 0x03, 0xc4, 0x03, 0xdc, 0x03, 0xe1, 0x03, 00, 0x03, 0xa0, 0x09, 0xe5, 0x03, 0x80, 0x03, 00, 0x03, 0x2d, 0x03, 0x16, 0x03, 0x70, 0x03, 00, 0x06, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0x2b, 0x03, 0x5b, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x4b, 0x03, 0x0c, 0x03, 0x20, 0x03, 0xe6, 0x03, 0xc3, 0x03, 0x0d, 0x03, 0x21, 0x03, 0x80, 0x03, 0xe5, 0x03, 0xc2, 0x03, 0x21, 0x03, 0x17, 0x03, 0x2d, 0x03, 0x0c, 0x03, 0xa1, 0x03, 0xcd, 0x03, 0xb0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x67, 0x03, 00, 0x06, 0xd0, 0x03, 0x90, 0x03, 00, 0x03, 0xdc, 0x03, 0xe4, 0x06, 0xe6, 0x03, 00, 0x03, 0x62, 0x12, 0xe5, 0x03, 0x2d, 0x03, 0x62, 0x03, 0xe5, 0x03, 0xd3, 0x03, 0xa0, 0x03, 0xd4, 0x03, 0xe1, 0x03, 0x9a, 0x03, 00, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x4c, 0x03, 0x0c, 0x09, 0x29, 0x03, 0x15, 0x03, 0xa0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x74, 0x03, 0xd0, 0x03, 0xb0, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd4, 0x03, 0xdc, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x2c, 0x0f, 00, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x7c, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x6c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x0c, 0xe0, 0x06, 0xad, 0x36, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x42, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x30, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x2a, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x24, 0xe0, 0x03, 0xdf, 0x03, 0xd7, 0x06, 0xcf, 0x03, 0xd1, 0x0c, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x03, 0xbf, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x18, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xb0, 0x18, 0xd0, 0x03, 0xbf, 0x03, 0x91, 0x03, 0xa0, 0x03, 0xd5, 0x03, 0xdc, 0x03, 0xe2, 0x03, 0xe5, 0x03, 0xe6, 0x0f, 0xe5, 0x03, 0xc2, 0x03, 0x9f, 0x03, 0xd3, 0x0f, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe0, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xcf, 0x12, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x03, 0xcd, 0x03, 0xb2, 0x03, 0xac, 0x03, 0xe6, 0x03, 0xe7, 0x03, 0xd3, 0x03, 0x9f, 0x03, 0xd3, 0x09, 0xe5, 0x03, 0xb3, 0x03, 0xa0, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x0c, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xe0, 0x03, 0xe5, 0x03, 0xe6, 0x27, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe2, 0x03, 0xdc, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xcf, 0x06, 0xa0, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x2a, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x09, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x15, 0xd0, 0x03, 0xd1, 0x06, 0xcf, 0x03, 0xd7, 0x03, 0xdf, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x39, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x09, 0xe0, 0x06, 0x9c, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x15, 0xe0, 0x06, 0x9c, 0x27, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xcf, 0xe0, 0x03, 0xdc, 0x03, 0xd4, 0x06, 0xcf, 0x03, 0xd1, 0x63, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xdb, 0x03, 0xe1, 0x06, 0xe6, 0x24, 0xe5, 0x03, 0xe6, 0x06, 0xe7, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd3, 0x06, 0xcf, 0x18, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x18, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xe1, 0x06, 0xe7, 0x03, 0xe6, 0x24, 0xe5, 0x06, 0xe6, 0x03, 0xe1, 0x03, 0xdb, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xcf, 0x63, 0xd0, 0x03, 0xd1, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xdc, 0xff, 0xe0, 0xc9, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xcc, 0xe0, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd1, 0x60, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xdc, 0x03, 0xe2, 0x09, 0xe6, 0x21, 0xe5, 0x09, 0xe6, 0x03, 0xe2, 0x03, 0xda, 0x03, 0xd3, 0x06, 0xcf, 0x1b, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x1b, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xe2, 0x09, 0xe6, 0x21, 0xe5, 0x09, 0xe6, 0x03, 0xe2, 0x03, 0xdc, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xcf, 0x60, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xdf, 0xff, 0xe0, 0xc6, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xcc, 0xe0, 0x03, 0xdc, 0x03, 0xd3, 0x03, 0xcf, 0x66, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xdc, 0x03, 0xe1, 0x03, 0xe5, 0x06, 0xe6, 0x21, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd1, 0x03, 0xcf, 0x03, 0xce, 0x1e, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x1e, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xe1, 0x03, 0xe5, 0x06, 0xe6, 0x21, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xdc, 0x03, 0xd4, 0x06, 0xcf, 0x66, 0xd0, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xdc, 0xff, 0xe0, 0xc6, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xc9, 0xe0, 0x03, 0xdf, 0x03, 0xd9, 0x06, 0xcf, 0x03, 0xd0, 0x03, 0xd1, 0x5d, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xe2, 0x03, 0xe5, 0x06, 0xe6, 0x1b, 0xe5, 0x0c, 0xe6, 0x03, 0xe4, 0x03, 0xe0, 0x03, 0xda, 0x03, 0xd3, 0x03, 0xce, 0x03, 0xcf, 0x21, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x21, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xe0, 0x03, 0xe4, 0x0c, 0xe6, 0x1b, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe2, 0x03, 0xda, 0x03, 0xd3, 0x06, 0xcf, 0x5d, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x06, 0xcf, 0x03, 0xd9, 0x03, 0xdf, 0xff, 0xe0, 0xc3, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x6f, 0xe0, 0x06, 0xad, 0xe4, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x4e, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0xdc, 0x03, 0xd5, 0x03, 0xcf, 0x06, 0xd0, 0x03, 0xd1, 0x5a, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xe1, 0x09, 0xe6, 0x1e, 0xe5, 0x09, 0xe6, 0x03, 0xe4, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd3, 0x06, 0xce, 0x24, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x24, 0xd0, 0x06, 0xce, 0x03, 0xd3, 0x03, 0xd9, 0x03, 0xdf, 0x03, 0xe4, 0x09, 0xe6, 0x1e, 0xe5, 0x09, 0xe6, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd1, 0x03, 0xce, 0x03, 0xcf, 0x5a, 0xd0, 0x03, 0xd1, 0x06, 0xd0, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdc, 0xff, 0xe0, 0xc3, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x5a, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x03, 0xad, 0xe1, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x60, 0x4b, 0xe0, 0x03, 0x20, 0x03, 0x45, 0x12, 0xe0, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd1, 0x63, 0xd0, 0x03, 0x91, 0x03, 00, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xe0, 0x06, 0xe6, 0x21, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xdc, 0x03, 0xd6, 0x03, 0xd1, 0x03, 0xcf, 0x03, 0xce, 0x1b, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x72, 0x03, 0x92, 0x24, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd6, 0x03, 0xdc, 0x03, 0xe1, 0x03, 0xe5, 0x06, 0xe6, 0x21, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x63, 0xd0, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0x60, 0x03, 0x2c, 0x78, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0x7c, 0xff, 0xe0, 0x27, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x5a, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x38, 0x03, 0xad, 0xe1, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x4e, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0x15, 0xe0, 0x03, 0xdd, 0x03, 0xd4, 0x03, 0xcf, 0x63, 0xd0, 0x03, 0x90, 0x03, 00, 0x03, 0xd6, 0x03, 0xe0, 0x03, 0xe5, 0x03, 0xe6, 0x24, 0xe5, 0x03, 0xe6, 0x03, 0xc3, 0x03, 0xae, 0x03, 0xda, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xcf, 0x21, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x4b, 0x03, 0x74, 0x2a, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xe1, 0x06, 0xe6, 0x24, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe0, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xce, 0x63, 0xd0, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0x5f, 0x03, 0x2c, 0x78, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0xff, 0xe0, 0x27, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x06, 0xad, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x06, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x9c, 0x09, 0xe0, 0x06, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x06, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x12, 0xe0, 0x06, 0xad, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0f, 0xe0, 0x06, 0x9c, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xad, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0xbb, 0x03, 0xd3, 0x03, 0xce, 0x18, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xbf, 0x0c, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x12, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x0f, 0xd0, 0x03, 0x92, 0x03, 0xaf, 0x03, 0x91, 0x03, 00, 0x03, 0xdc, 0x03, 0xe2, 0x03, 0xe6, 0x03, 0xc3, 0x03, 0x9f, 0x03, 0xb0, 0x12, 0xe5, 0x06, 0xb0, 0x09, 0xe6, 0x03, 0x61, 0x03, 0x2a, 0x03, 0xd3, 0x03, 0xd0, 0x06, 0xcf, 0x12, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x0c, 0xe5, 0x03, 0xd3, 0x03, 0x9f, 0x03, 0xb3, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x18, 0xd0, 0x06, 0xcf, 0x03, 0xd0, 0x03, 0xd3, 0x03, 0xd9, 0x03, 0xe1, 0x03, 0xa0, 0x06, 0xe6, 0x03, 0xd3, 0x03, 0x9f, 0x03, 0xb0, 0x15, 0xe5, 0x03, 0xe6, 0x03, 0xc3, 0x03, 0x9e, 0x03, 0xbb, 0x03, 0xd4, 0x06, 0xcf, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x0f, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x03, 0xb0, 0x09, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xb0, 0x12, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x0f, 0xd0, 0x03, 0xbf, 0x03, 0x90, 0x03, 0xc2, 0x03, 0x5f, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x30, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x12, 0xe0, 0x06, 0xad, 0x18, 0xe0, 0x03, 0xcf, 0x06, 0x9c, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x39, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x66, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x16, 0x03, 0x45, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x7c, 0x06, 0x16, 0x03, 0x20, 0x03, 0xad, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 00, 0x03, 0x60, 0x03, 0x20, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x06, 0x2c, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x9c, 0x09, 00, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2b, 0x03, 0x0c, 0x03, 0x75, 0x03, 0xcf, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x74, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x0c, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x41, 0x03, 0x15, 0x06, 0x1f, 0x03, 0xbf, 0x06, 0xd0, 0x03, 00, 0x03, 0x67, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x0c, 0x03, 0xb0, 0x06, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x2a, 0x03, 00, 0x03, 0xe1, 0x03, 0xe5, 0x03, 0x39, 0x03, 0x16, 0x03, 0x2d, 0x03, 0x0d, 0x03, 0x54, 0x06, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0x46, 0x03, 0x21, 0x03, 0x0d, 0x03, 0x54, 0x03, 0xe5, 0x03, 0x9c, 0x09, 00, 0x03, 0x59, 0x0f, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0x41, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x1f, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x58, 0x03, 0x2a, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xa1, 0x03, 00, 0x06, 0xe5, 0x03, 0x90, 0x03, 00, 0x03, 0x2d, 0x03, 0x21, 0x03, 0xa0, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x29, 0x03, 0x4c, 0x03, 0x1f, 0x03, 0x2c, 0x03, 0xe5, 0x03, 0x90, 0x03, 00, 0x03, 0x2d, 0x03, 0x21, 0x03, 0x9f, 0x12, 0xe5, 0x03, 0x54, 0x03, 0x0d, 0x03, 0x2d, 0x03, 0x16, 0x03, 0xda, 0x03, 0xd1, 0x03, 0xce, 0x03, 0x74, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0xa0, 0x06, 0x15, 0x03, 0x29, 0x03, 0x15, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x74, 0x06, 0x15, 0x03, 0x1f, 0x09, 0xd0, 0x03, 0x74, 0x03, 00, 0x03, 0x29, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x2b, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x15, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x45, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x12, 0xe0, 0x03, 0x6e, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x7c, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x0f, 00, 0x03, 0x60, 0x4b, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 00, 0x06, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xdd, 0x03, 0x93, 0x03, 0x0c, 0x12, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x4c, 0x06, 0xd0, 0x06, 0x34, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x67, 0x06, 0xd0, 0x03, 00, 0x03, 0x1f, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x1f, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x1f, 0x03, 0x5a, 0x03, 0xce, 0x03, 0xcf, 0x03, 0x5c, 0x03, 00, 0x03, 0xe5, 0x03, 0xe6, 0x03, 0xd4, 0x06, 0xe5, 0x03, 0x90, 0x03, 00, 0x06, 0xe5, 0x03, 0x62, 0x03, 00, 0x03, 0x80, 0x03, 0xe5, 0x03, 0x90, 0x03, 00, 0x03, 0xdf, 0x03, 0xd7, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xce, 0x12, 0xd0, 0x03, 0x5a, 0x03, 00, 0x03, 0x83, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x15, 0x03, 0x67, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x58, 0x03, 0x2a, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xa1, 0x03, 00, 0x06, 0xe5, 0x03, 0x2d, 0x03, 0x54, 0x03, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x34, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xb0, 0x03, 0xd7, 0x03, 0xdf, 0x03, 0x2d, 0x03, 0x54, 0x1b, 0xe5, 0x03, 00, 0x03, 0x90, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xdf, 0x03, 0xd6, 0x03, 0xaf, 0x03, 00, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x34, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x41, 0x0c, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x41, 0x03, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x35, 0x03, 0x2b, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x18, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x03, 0xad, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x12, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x52, 0x06, 0xe0, 0x06, 0x38, 0x06, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x06, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x0f, 0xe0, 0x03, 0x6e, 0x03, 0x0c, 0x03, 0xad, 0x4b, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x52, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x0f, 0xe0, 0x03, 0xad, 0x03, 00, 0x09, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x9c, 0x03, 0x99, 0x03, 0x91, 0x03, 00, 0x03, 0x92, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x03, 0x34, 0x03, 0x41, 0x06, 0x92, 0x03, 0x41, 0x03, 0x29, 0x06, 0xd0, 0x03, 00, 0x03, 0x83, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xaf, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0x99, 0x03, 00, 0x06, 0xe6, 0x06, 0xe5, 0x03, 0x9f, 0x03, 0x70, 0x03, 00, 0x06, 0xe5, 0x03, 0x62, 0x03, 0x21, 0x06, 0xe6, 0x03, 0xa0, 0x03, 00, 0x03, 0xd7, 0x03, 0xd1, 0x03, 0x59, 0x03, 0x29, 0x15, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x58, 0x03, 0x2a, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xa1, 0x03, 00, 0x06, 0xe5, 0x03, 0x46, 0x03, 0x0d, 0x03, 0x9f, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x67, 0x03, 0x1f, 0x06, 0x92, 0x03, 0x67, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x59, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0x45, 0x03, 0x0d, 0x03, 0xa0, 0x03, 0xe6, 0x15, 0xe5, 0x03, 0x16, 0x03, 0x21, 0x03, 0xc3, 0x03, 0xe6, 0x03, 0xe2, 0x03, 0xdb, 0x03, 0x68, 0x03, 0x1f, 0x03, 0x91, 0x03, 0x92, 0x03, 0x67, 0x03, 00, 0x03, 0xd0, 0x03, 0xa0, 0x03, 00, 0x03, 0xb0, 0x0c, 0xd0, 0x03, 0x34, 0x03, 0x4c, 0x09, 0xd0, 0x03, 0x4c, 0x03, 0x34, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xbf, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x09, 0xd0, 0x03, 0x59, 0x03, 0x2b, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x20, 0x03, 0xbd, 0x15, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x09, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x18, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x9c, 0x4e, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x20, 0x09, 0x2c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x2b, 0x03, 0x2a, 0x06, 0x29, 0x03, 0xa0, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x15, 0x09, 0x29, 0x03, 0x4c, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0x9d, 0x03, 00, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0x80, 0x03, 0x0d, 0x06, 0x21, 0x03, 00, 0x06, 0xe5, 0x03, 0x63, 0x03, 0x2d, 0x06, 0xe6, 0x03, 0x9c, 0x03, 00, 0x03, 0xd0, 0x03, 0xce, 0x03, 0x59, 0x03, 0x29, 0x15, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x58, 0x03, 0x2a, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xa1, 0x03, 00, 0x06, 0xe5, 0x03, 0xd3, 0x03, 0x54, 0x03, 00, 0x03, 0x21, 0x03, 0xc3, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x0c, 0x0c, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xce, 0x03, 0xd0, 0x03, 0xc7, 0x03, 0x52, 0x03, 00, 0x03, 0x21, 0x03, 0xc3, 0x03, 0xe6, 0x0f, 0xe5, 0x03, 0xc2, 0x03, 0x2d, 0x03, 00, 0x03, 0x39, 0x03, 0xe6, 0x03, 0xe1, 0x03, 0x5d, 0x03, 0x0c, 0x0c, 0x29, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x0f, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x58, 0x03, 0x2a, 0x03, 0xdf, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 00, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0xad, 0x06, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x16, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x7c, 0x51, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x06, 0x45, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x1b, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbb, 0x03, 0xd5, 0x03, 0xce, 0x15, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x03, 0xd0, 0x06, 0x41, 0x12, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdd, 0x03, 0x9f, 0x03, 00, 0x03, 0xe6, 0x03, 0xc2, 0x03, 00, 0x03, 0x90, 0x03, 0xe5, 0x03, 0x9f, 0x03, 00, 0x06, 0xe5, 0x03, 0x63, 0x03, 0x2d, 0x03, 0xe4, 0x03, 0xe1, 0x03, 0x97, 0x03, 00, 0x03, 0xcd, 0x03, 0xce, 0x03, 0x5a, 0x03, 0x2a, 0x15, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x06, 0x41, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x58, 0x03, 0x2a, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0x91, 0x03, 00, 0x0c, 0xe5, 0x03, 0xd3, 0x03, 0x21, 0x03, 0x39, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x12, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xd0, 0x03, 0x21, 0x03, 0x39, 0x03, 0xe6, 0x15, 0xe5, 0x03, 0xb0, 0x03, 0x0d, 0x03, 0x71, 0x03, 0xe4, 0x03, 0x7b, 0x03, 0x15, 0x03, 0xcf, 0x0c, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x0c, 0xd0, 0x03, 0x34, 0x03, 0x4c, 0x09, 0xd0, 0x03, 0x4c, 0x03, 0x34, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x59, 0x03, 0x2a, 0x03, 0xdc, 0x03, 0x7c, 0x03, 0x16, 0x18, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x6e, 0x0f, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x52, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x7c, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x45, 0x54, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0x8c, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x06, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x2b, 0x03, 0x37, 0x03, 0xd3, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xbf, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x41, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x0c, 0x03, 0x67, 0x03, 0xda, 0x03, 0xd0, 0x03, 0x2d, 0x03, 00, 0x03, 0xe6, 0x03, 0x9f, 0x03, 00, 0x03, 0xd3, 0x03, 0xe5, 0x03, 0x80, 0x03, 00, 0x06, 0xe5, 0x03, 0x63, 0x03, 0x2d, 0x03, 0xe1, 0x03, 0xda, 0x03, 0x93, 0x03, 00, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x15, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x58, 0x03, 0x1f, 0x03, 0xdf, 0x03, 0xd4, 0x03, 0x21, 0x03, 00, 0x06, 0xe5, 0x03, 0xd3, 0x06, 0xe5, 0x03, 0x55, 0x03, 0x39, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x67, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0xcf, 0x03, 0xbe, 0x03, 0xd3, 0x03, 0xda, 0x03, 0x52, 0x03, 0x39, 0x03, 0xe6, 0x0f, 0xe5, 0x03, 0xd3, 0x06, 0xe5, 0x03, 0x21, 0x03, 0x71, 0x03, 0xe5, 0x03, 0xd0, 0x03, 0x0c, 0x03, 0x67, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x41, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xa0, 0x03, 00, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0x15, 0x03, 0x2a, 0x03, 0xda, 0x03, 0xce, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 00, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x38, 0x57, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x03, 00, 0x09, 0xe0, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 0x2c, 0x03, 0x45, 0x03, 0xad, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0x20, 0x03, 0x16, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0xbc, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x1f, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x67, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x34, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x74, 0x03, 00, 0x03, 0x2b, 0x03, 0x16, 0x03, 0x71, 0x03, 00, 0x06, 0xe5, 0x03, 0x21, 0x03, 0x16, 0x03, 0x21, 0x03, 0x54, 0x03, 0x0d, 0x03, 0x21, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x2c, 0x03, 0xdc, 0x03, 0xd4, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x74, 0x0f, 0xd0, 0x03, 0x5a, 0x06, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x41, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x71, 0x03, 00, 0x06, 0xe5, 0x03, 0x39, 0x06, 0x21, 0x03, 0x0d, 0x03, 0xb2, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x92, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x29, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x83, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x4c, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x09, 0xd0, 0x03, 0x34, 0x06, 0x1f, 0x03, 0x0c, 0x03, 0xae, 0x09, 0xe6, 0x09, 0xe5, 0x03, 0x0d, 0x03, 0x2d, 0x03, 0x16, 0x03, 0x21, 0x03, 0xd3, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0x9b, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x29, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0xa0, 0x06, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x41, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x4c, 0x03, 0x29, 0x03, 0xd7, 0x03, 0xdf, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x09, 0x2c, 0x03, 0x16, 0x03, 0xad, 0x03, 0x9c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x52, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x6e, 0x03, 0x16, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xe0, 0x03, 0x9c, 0x0f, 00, 0x03, 0x60, 0x4b, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x2d, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x24, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x1b, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x36, 0xe0, 0x06, 0x9c, 0x2a, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x09, 0xe0, 0x03, 0xdd, 0x03, 0xd4, 0x03, 0x9f, 0x03, 0x92, 0x15, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x06, 0xa0, 0x0f, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xb0, 0x1e, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xb8, 0x03, 0x9d, 0x03, 0xd4, 0x03, 0xe6, 0x0c, 0xe5, 0x03, 0x9f, 0x03, 0xc2, 0x03, 0xe5, 0x03, 0xc2, 0x03, 0xb2, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0xdf, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xcf, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x0f, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x15, 0xd0, 0x03, 0xcd, 0x03, 0xc1, 0x03, 0x9c, 0x03, 0xd4, 0x03, 0xe7, 0x0c, 0xe5, 0x03, 0xb0, 0x03, 0x9f, 0x03, 0xd5, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x0f, 0xd0, 0x03, 0x92, 0x03, 0xa0, 0x0c, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x21, 0xd0, 0x03, 0x9f, 0x03, 0x92, 0x03, 0xc5, 0x03, 0xdf, 0x03, 0xe4, 0x06, 0xe6, 0x0c, 0xe5, 0x06, 0x9f, 0x06, 0xe5, 0x06, 0xe6, 0x03, 0xe1, 0x03, 0xda, 0x03, 0x92, 0x03, 0x9f, 0x0f, 0xd0, 0x06, 0x92, 0x0c, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xb0, 0x27, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x06, 0xd0, 0x03, 0xce, 0x03, 0xd4, 0x03, 0xdd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x06, 0x9c, 0x1b, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x0f, 0xe0, 0x06, 0xad, 0x1b, 0xe0, 0x03, 0xad, 0x06, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x30, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xe0, 0x06, 0xad, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0x45, 0x03, 0x6e, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x66, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xea, 0xe0, 0x03, 0xad, 0x03, 0x52, 0x03, 0x9c, 0x03, 0x8c, 0x03, 0x20, 0x03, 0x2c, 0xc0, 0xe0, 0x03, 0xdb, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x15, 0xd0, 0x03, 0x92, 0x03, 00, 0x3c, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xdf, 0x03, 0xe4, 0x1e, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xd9, 0x03, 0xd1, 0x06, 0xcf, 0x24, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x1e, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x48, 0xd0, 0x06, 0xcf, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0xe1, 0x03, 0xe5, 0x03, 0xe6, 0x1e, 0xe5, 0x03, 0xe4, 0x03, 0xdf, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xdb, 0xe4, 0xe0, 0x03, 0x9c, 0x03, 00, 0x15, 0xe0, 0x03, 00, 0x03, 0x9c, 0x2a, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xcf, 0x7e, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xea, 0xe0, 0x03, 0xcf, 0x03, 0x52, 0x06, 0x2c, 0x03, 0x60, 0x03, 0xcf, 0xbd, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xd1, 0x15, 0xd0, 0x03, 0xb0, 0x03, 0x5a, 0x3c, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0xe1, 0x03, 0xe6, 0x1b, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdc, 0x03, 0xd4, 0x03, 0xce, 0x2a, 0xd0, 0x03, 0x92, 0x03, 0x74, 0x1e, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x4e, 0xd0, 0x03, 0xce, 0x03, 0xd4, 0x03, 0xdc, 0x03, 0xe4, 0x06, 0xe6, 0x1b, 0xe5, 0x03, 0xe6, 0x03, 0xe1, 0x03, 0xd7, 0x03, 0xd1, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xd1, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xdf, 0xe1, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x60, 0x03, 0xbd, 0x2a, 0xe0, 0x03, 0xad, 0x03, 0x8c, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xba, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xce, 0x57, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xdc, 0x03, 0xe4, 0x03, 0xe6, 0x1b, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd7, 0x03, 0xcf, 0x03, 0xce, 0x4e, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x4e, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd7, 0x03, 0xe0, 0x06, 0xe6, 0x1b, 0xe5, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0xdc, 0x03, 0xd4, 0x06, 0xcf, 0x57, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xb4, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb7, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd7, 0x03, 0xe1, 0x03, 0xe5, 0x03, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe2, 0x03, 0xda, 0x03, 0xd3, 0x03, 0xce, 0x03, 0xcf, 0x4e, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x4e, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xe2, 0x06, 0xe6, 0x18, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xd7, 0x03, 0xcf, 0x03, 0xce, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xab, 0xe0, 0x06, 0xad, 0x06, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd3, 0x03, 0xcf, 0x5a, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xe4, 0x1b, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdf, 0x03, 0xd5, 0x03, 0xcf, 0x03, 0xce, 0x51, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x51, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdf, 0x03, 0xe5, 0x06, 0xe6, 0x1b, 0xe5, 0x03, 0xe4, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xce, 0x5a, 0xd0, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x96, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x27, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x9c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x3c, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xbc, 0x03, 0x5d, 0x03, 0xd0, 0x03, 0xcf, 0x18, 0xd0, 0x03, 0xb0, 0x03, 0x5a, 0x15, 0xd0, 0x03, 0x74, 0x03, 0x92, 0x1b, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0x60, 0x03, 0xc3, 0x03, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe2, 0x03, 0xdb, 0x03, 0xd1, 0x06, 0xce, 0x0c, 0xd0, 0x03, 0x92, 0x03, 0x74, 0x3f, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x51, 0xd0, 0x06, 0xce, 0x03, 0xd1, 0x03, 0xdb, 0x03, 0xe2, 0x06, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xdf, 0x03, 0xd5, 0x5d, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x96, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x27, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x42, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x54, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x39, 0xe0, 0x03, 0xcf, 0x03, 0x38, 0x03, 0xad, 0x09, 0xe0, 0x03, 0xac, 0x03, 0x2a, 0x03, 0xcd, 0x03, 0xcf, 0x18, 0xd0, 0x03, 0xa0, 0x03, 0x29, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0xa0, 0x09, 0xd0, 0x03, 0x4c, 0x03, 0x74, 0x1b, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0x2c, 0x03, 0xb3, 0x03, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd6, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xaf, 0x03, 0xa0, 0x09, 0xd0, 0x03, 0x74, 0x03, 0x4c, 0x3f, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x51, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd6, 0x03, 0xe0, 0x06, 0xe6, 0x18, 0xe5, 0x03, 0xe6, 0x03, 0xe7, 0x03, 0xe2, 0x03, 0xd9, 0x03, 0xd1, 0x5a, 0xd0, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xd7, 0x03, 0xdf, 0xff, 0xe0, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x27, 0xe0, 0x03, 0xcf, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x21, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x06, 0xad, 0x1b, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x1e, 0xe0, 0x03, 0x9c, 0x09, 0xe0, 0x06, 0xad, 0x06, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xd4, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd1, 0x06, 0xd0, 0x06, 0xa0, 0x15, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x18, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x03, 0xb0, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x59, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xe4, 0x06, 0xe6, 0x06, 0xe5, 0x06, 0x9f, 0x0c, 0xe5, 0x03, 0xd3, 0x03, 0x9f, 0x03, 0x9a, 0x03, 0xd3, 0x03, 0xce, 0x03, 0xcf, 0x03, 0x5a, 0x03, 0x29, 0x1b, 0xd0, 0x06, 0x92, 0x15, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x12, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x54, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xdc, 0x03, 0xe4, 0x1b, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdb, 0x03, 0xd4, 0x06, 0xcf, 0x51, 0xd0, 0x03, 0xd1, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xd4, 0x03, 0xdd, 0xff, 0xe0, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x60, 0x03, 0x20, 0x03, 0x52, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x09, 00, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x15, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x2c, 0x06, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x16, 0x03, 0x45, 0x03, 0xe0, 0x03, 0xdf, 0x03, 0x9a, 0x03, 00, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0x41, 0x03, 0x1f, 0x03, 0x0c, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x03, 0x92, 0x09, 00, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x29, 0x03, 00, 0x03, 0x83, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x59, 0x03, 0xd4, 0x03, 0xdd, 0x03, 00, 0x03, 0xa0, 0x03, 0xe5, 0x03, 0xc2, 0x03, 0x0d, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x62, 0x06, 0xe5, 0x03, 0x71, 0x03, 0x0d, 0x03, 0x2c, 0x03, 0x15, 0x03, 0x1f, 0x03, 0xce, 0x03, 0x92, 0x09, 00, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0xa0, 0x03, 0x15, 0x06, 0x1f, 0x03, 0x15, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0x1f, 0x03, 0x29, 0x03, 00, 0x03, 0x83, 0x0f, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x57, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xe1, 0x03, 0xe5, 0x03, 0xe6, 0x18, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0xd4, 0x06, 0xce, 0x51, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xd4, 0x03, 0xdc, 0xff, 0xe0, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 00, 0x03, 0x8c, 0x0f, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x12, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x12, 0xe0, 0x03, 0x8c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x16, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xdd, 0x03, 0x99, 0x03, 00, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 00, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x83, 0x03, 00, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x59, 0x03, 0xd6, 0x03, 0xe0, 0x03, 00, 0x03, 0x9f, 0x03, 0xe5, 0x03, 0x62, 0x03, 0x21, 0x0f, 0xe5, 0x03, 0xd4, 0x03, 0xe4, 0x03, 0xdd, 0x03, 0xc4, 0x03, 00, 0x03, 0x90, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x4c, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x57, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdd, 0x03, 0xe4, 0x03, 0xe6, 0x1b, 0xe5, 0x03, 0xe6, 0x03, 0xe0, 0x03, 0xd6, 0x06, 0xce, 0x51, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xdc, 0x03, 0xdf, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x16, 0x03, 0x20, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x09, 0x9c, 0x03, 00, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x0c, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x06, 0x45, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x03, 0xdc, 0x03, 0x99, 0x03, 00, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x1f, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x09, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0x41, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xda, 0x03, 0xe2, 0x03, 00, 0x03, 0x9f, 0x03, 0xe5, 0x03, 0x80, 0x03, 00, 0x03, 0x70, 0x0c, 0xe5, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0xa8, 0x03, 0x93, 0x03, 00, 0x03, 0x91, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xa0, 0x03, 00, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x57, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xe4, 0x03, 0xe6, 0x1b, 0xe5, 0x03, 0xe7, 0x03, 0xe2, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xce, 0x51, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 00, 0x03, 0x38, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x2c, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 00, 0x03, 0x20, 0x09, 0x2c, 0x03, 0x7c, 0x03, 0xdb, 0x03, 0x96, 0x03, 00, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x4c, 0x03, 00, 0x03, 0x29, 0x03, 0x15, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5b, 0x03, 0xdb, 0x03, 0xe4, 0x03, 00, 0x03, 0x9f, 0x06, 0xe5, 0x03, 0x70, 0x06, 0x0d, 0x03, 0x9f, 0x06, 0xe5, 0x03, 0xb2, 0x03, 0x16, 0x03, 0x15, 0x03, 0x29, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x5a, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd6, 0x03, 0xe1, 0x03, 0xe6, 0x1b, 0xe5, 0x03, 0xe7, 0x03, 0xe4, 0x03, 0xdb, 0x03, 0xd3, 0x03, 0xcf, 0x51, 0xd0, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xdd, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x54, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x1b, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x0c, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0xda, 0x03, 0x95, 0x03, 00, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x29, 0x03, 0x5b, 0x03, 0xdd, 0x03, 0xe5, 0x03, 00, 0x03, 0x9f, 0x0c, 0xe5, 0x03, 0x46, 0x03, 0x0d, 0x03, 0xe5, 0x03, 0xe6, 0x03, 0x16, 0x03, 0x52, 0x03, 0xd4, 0x03, 0xcf, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xa0, 0x03, 00, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x5a, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xdf, 0x03, 0xe5, 0x03, 0xe6, 0x18, 0xe5, 0x03, 0xe7, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0xd4, 0x06, 0xcf, 0x54, 0xd0, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0xdc, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x45, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x6e, 0x03, 0x16, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xd7, 0x03, 0x95, 0x03, 00, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x1f, 0x06, 0xd0, 0x03, 0x41, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x29, 0x03, 0x5c, 0x03, 0xdf, 0x03, 0xe6, 0x03, 00, 0x03, 0x9f, 0x03, 0xe5, 0x03, 0xd3, 0x06, 0xe5, 0x03, 0x90, 0x03, 0x0d, 0x03, 0xe5, 0x03, 0xe6, 0x03, 00, 0x03, 0x89, 0x03, 0xd0, 0x03, 0xae, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x09, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x06, 0x74, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x5a, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xdb, 0x03, 0xe4, 0x03, 0xe6, 0x18, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xcf, 0x54, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd6, 0x03, 0xdb, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x60, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x45, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x20, 0x06, 0x16, 0x03, 0x7c, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xd6, 0x03, 0x94, 0x03, 00, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xb0, 0x03, 0x0c, 0x06, 0x1f, 0x03, 0x4c, 0x03, 00, 0x03, 0x4c, 0x03, 0xce, 0x03, 0x29, 0x03, 0x5d, 0x03, 0xe0, 0x03, 0xe6, 0x03, 00, 0x03, 0x9f, 0x03, 0xe5, 0x03, 0x70, 0x03, 0x16, 0x03, 0x2d, 0x03, 00, 0x03, 0x81, 0x06, 0xe6, 0x03, 0x52, 0x03, 0x0c, 0x09, 0x29, 0x03, 0x15, 0x03, 0xa0, 0x03, 0xb0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x03, 0xa0, 0x03, 0x15, 0x06, 0x1f, 0x03, 0x15, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x06, 0xd0, 0x06, 0x29, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x5a, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xe1, 0x09, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd9, 0x03, 0xd0, 0x03, 0xce, 0x54, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xda, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x06, 0x9c, 0x0f, 0xe0, 0x06, 0xad, 0x3c, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x15, 0xe0, 0x06, 0xbd, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x33, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x2a, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xd5, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xcf, 0x2a, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x0f, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0xb0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xe1, 0x06, 0xe6, 0x09, 0xe5, 0x03, 0xc2, 0x03, 0x9f, 0x03, 0xc2, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe0, 0x03, 0xa6, 0x03, 0xa0, 0x03, 0xcf, 0x03, 0xbf, 0x03, 0x92, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x15, 0xd0, 0x06, 0x92, 0x2d, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x5d, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xe0, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xcd, 0x54, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd3, 0x5d, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xdb, 0x03, 0xe2, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xcf, 0x5d, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x5d, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xdd, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe2, 0x03, 0xdb, 0x03, 0xd0, 0x03, 0xcd, 0x5a, 0xd0, 0x03, 0xd3, 0x03, 0xd9, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd1, 0x03, 0xcf, 0x5a, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdc, 0x03, 0xe4, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdc, 0x03, 0xd3, 0x03, 0xd0, 0x03, 0xcf, 0x5d, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x5d, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd3, 0x03, 0xdc, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdc, 0x03, 0xd1, 0x03, 0xcd, 0x5a, 0xd0, 0x03, 0xd1, 0x03, 0xd7, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd0, 0x03, 0xcf, 0x5a, 0xd0, 0x03, 0xcd, 0x03, 0xd3, 0x03, 0xdd, 0x03, 0xe4, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdb, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xcf, 0x5d, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x5d, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0xdb, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdd, 0x03, 0xd3, 0x03, 0xcd, 0x5d, 0xd0, 0x03, 0xd6, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xcf, 0x03, 0xce, 0x5a, 0xd0, 0x03, 0xcb, 0x03, 0xd3, 0x03, 0xdf, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xda, 0x66, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x66, 0xd0, 0x03, 0xda, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdf, 0x03, 0xd3, 0x03, 0xcb, 0x5a, 0xd0, 0x03, 0xcf, 0x03, 0xd4, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xce, 0x03, 0xcd, 0x5a, 0xd0, 0x03, 0xcb, 0x03, 0xd4, 0x03, 0xe0, 0x09, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xd9, 0x03, 0xcf, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcf, 0x03, 0xd9, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x09, 0xe6, 0x03, 0xe0, 0x03, 0xd4, 0x03, 0xcb, 0x5a, 0xd0, 0x03, 0xcf, 0x03, 0xd3, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x06, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcb, 0x03, 0xd4, 0x03, 0xe0, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xd7, 0x03, 0xce, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xce, 0x03, 0xd7, 0x03, 0xe5, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd4, 0x03, 0xcb, 0x57, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x06, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcb, 0x03, 0xd4, 0x03, 0xe0, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xd7, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x12, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd7, 0x03, 0xe5, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd4, 0x03, 0xcb, 0x57, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xcb, 0x03, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcb, 0x03, 0xd4, 0x03, 0xe1, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xd6, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdf, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xdf, 0x03, 0xd1, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd6, 0x03, 0xe5, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe1, 0x03, 0xd4, 0x03, 0xcb, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd1, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xcb, 0x03, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcb, 0x03, 0xd4, 0x03, 0xe1, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xd6, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xdc, 0x03, 0xe5, 0x03, 0xe6, 0x12, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xdc, 0x03, 0xd0, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd6, 0x03, 0xe5, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe1, 0x03, 0xd4, 0x03, 0xcb, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd1, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x06, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcb, 0x03, 0xd4, 0x03, 0xe0, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xd7, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xe2, 0x06, 0xe6, 0x0c, 0xe5, 0x06, 0xe6, 0x03, 0xe2, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd7, 0x03, 0xe5, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd4, 0x03, 0xcb, 0x57, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x06, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcb, 0x03, 0xd4, 0x03, 0xe0, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xd7, 0x03, 0xce, 0x63, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xe0, 0x03, 0xe6, 0x03, 0xe7, 0x03, 0xe6, 0x06, 0xe5, 0x03, 0xe6, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xe0, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xcd, 0x63, 0xd0, 0x03, 0xce, 0x03, 0xd7, 0x03, 0xe5, 0x06, 0xe6, 0x15, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd4, 0x03, 0xcb, 0x57, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcb, 0x03, 0xd4, 0x03, 0xe0, 0x09, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xd9, 0x03, 0xcf, 0x63, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xe2, 0x12, 0xe6, 0x03, 0xe2, 0x03, 0xdb, 0x03, 0xd4, 0x06, 0xcf, 0x63, 0xd0, 0x03, 0xcf, 0x03, 0xd9, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x09, 0xe6, 0x03, 0xe0, 0x03, 0xd4, 0x03, 0xcb, 0x57, 0xd0, 0x06, 0xcf, 0x03, 0xd3, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcb, 0x03, 0xd3, 0x03, 0xdf, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xcf, 0x63, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xe0, 0x03, 0xe2, 0x06, 0xe5, 0x03, 0xe2, 0x03, 0xe0, 0x03, 0xda, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xce, 0x63, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdf, 0x03, 0xd3, 0x03, 0xcb, 0x57, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcd, 0x03, 0xd3, 0x03, 0xdd, 0x03, 0xe4, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdb, 0x03, 0xd1, 0x06, 0xcf, 0x60, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xd7, 0x03, 0xda, 0x06, 0xdd, 0x03, 0xda, 0x03, 0xd7, 0x03, 0xd3, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x60, 0xd0, 0x06, 0xcf, 0x03, 0xd1, 0x03, 0xdb, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdd, 0x03, 0xd3, 0x03, 0xcd, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd5, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd1, 0x06, 0xcf, 0x57, 0xd0, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdc, 0x03, 0xe4, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdc, 0x03, 0xd3, 0x03, 0xd0, 0x03, 0xcf, 0x63, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x12, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x63, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd3, 0x03, 0xdc, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdc, 0x03, 0xd1, 0x03, 0xcd, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd6, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd3, 0x06, 0xcf, 0x57, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xdb, 0x03, 0xe2, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xcf, 0x69, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x0c, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x69, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xdd, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe2, 0x03, 0xdb, 0x03, 0xd0, 0x03, 0xcd, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xd7, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd4, 0x5d, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xe1, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe0, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xcf, 0xea, 0xd0, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xe0, 0x03, 0xe5, 0x06, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xcd, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xd9, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd5, 0x03, 0xd3, 0x06, 0xcf, 0x54, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xe0, 0x06, 0xe6, 0x12, 0xe5, 0x09, 0xe6, 0x03, 0xe1, 0x03, 0xd9, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0xe4, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xe1, 0x09, 0xe6, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xce, 0x54, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xb4, 0xe0, 0x03, 0xd6, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xce, 0x54, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd6, 0x03, 0xdf, 0x03, 0xe6, 0x03, 0xe7, 0x18, 0xe5, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0xdb, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xce, 0xe4, 0xd0, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xdb, 0x03, 0xe4, 0x03, 0xe6, 0x18, 0xe5, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xce, 0x54, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdb, 0xff, 0xe0, 0xae, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xcf, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x9c, 0x4e, 0xe0, 0x06, 0xbd, 0x03, 0xe0, 0x03, 0x9c, 0x6c, 0xe0, 0x03, 0x7c, 0x03, 0x9c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x60, 0xe0, 0x03, 0x9c, 0x03, 0x45, 0x06, 0x2c, 0x03, 0x52, 0x03, 0x9c, 0x03, 0xd9, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xce, 0x54, 0xd0, 0x06, 0xcf, 0x03, 0xd4, 0x03, 0xdd, 0x03, 0xe5, 0x03, 0xe7, 0x18, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0xd3, 0x06, 0xce, 0xe4, 0xd0, 0x06, 0xce, 0x03, 0xd3, 0x03, 0xdd, 0x03, 0xe5, 0x03, 0xe6, 0x0f, 0xe5, 0x03, 0x9f, 0x03, 0x80, 0x03, 0xe5, 0x03, 0xe7, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0x76, 0x03, 0x91, 0x03, 0xcf, 0x18, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x36, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdc, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x9c, 0x36, 0xe0, 0x03, 00, 0x03, 0x9c, 0x63, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0xd8, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x20, 0x03, 0x38, 0x03, 0x9c, 0x03, 0x7c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x7c, 0x4e, 0xe0, 0x06, 0x7c, 0x03, 0xe0, 0x03, 0x2c, 0x6c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x5d, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x60, 0x03, 0x9c, 0x03, 0x8c, 0x03, 0x52, 0x03, 0x45, 0x03, 0xda, 0x03, 0xd6, 0x03, 0xd1, 0x03, 0xce, 0x54, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xdb, 0x03, 0xe4, 0x03, 0xe7, 0x03, 0xb0, 0x03, 0xc2, 0x15, 0xe5, 0x03, 0xe6, 0x03, 0xe1, 0x03, 0xd6, 0x03, 0xd0, 0x06, 0xcf, 0x78, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x30, 0xd0, 0x03, 0xa0, 0x03, 0xb0, 0x2a, 0xd0, 0x06, 0xcf, 0x03, 0xd0, 0x03, 0xd6, 0x03, 0xe1, 0x03, 0xe6, 0x12, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe5, 0x03, 0xe7, 0x03, 0xe4, 0x03, 0xdb, 0x03, 0x2a, 0x03, 0x59, 0x03, 0xcf, 0x18, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x92, 0x03, 0xbf, 0x2a, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xdd, 0x0c, 0xe0, 0x03, 0x20, 0x03, 0x38, 0x03, 0x9c, 0x03, 0x7c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x7c, 0x36, 0xe0, 0x03, 00, 0x03, 0x9c, 0x63, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x2c, 0xd8, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0x9c, 0x03, 00, 0x21, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x1e, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x06, 0xe0, 0x06, 0xad, 0x24, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0xad, 0x09, 0xe0, 0x06, 0xad, 0x12, 0xe0, 0x06, 0xad, 0x15, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0xdb, 0x03, 0xd7, 0x03, 0xd1, 0x03, 0xad, 0x03, 0x92, 0x03, 0xb0, 0x12, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x0f, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xb0, 0x12, 0xd0, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xe2, 0x03, 0xe7, 0x03, 0x2d, 0x03, 0x62, 0x0c, 0xe5, 0x03, 0xd3, 0x03, 0x9f, 0x03, 0xd3, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0xda, 0x03, 0xd3, 0x03, 0xce, 0x03, 0xcf, 0x03, 0x92, 0x18, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x0f, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x33, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xb0, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x0f, 0xd0, 0x06, 0x92, 0x03, 0xbf, 0x0f, 0xd0, 0x06, 0xb0, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x12, 0xd0, 0x03, 0x92, 0x03, 0xaf, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xe4, 0x03, 0xe6, 0x12, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe5, 0x03, 0xe7, 0x03, 0xe2, 0x03, 0xd9, 0x03, 0x29, 0x03, 0x58, 0x09, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xa0, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xbf, 0x12, 0xd0, 0x03, 0x92, 0x03, 0xad, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xdd, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x21, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x06, 0x9c, 0x03, 0xcf, 0x48, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x06, 0xad, 0xc6, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x8c, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x8c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 00, 0x03, 0x60, 0x03, 0x20, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x16, 0x03, 0x45, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0x4d, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x74, 0x06, 0x15, 0x03, 0x1f, 0x03, 0xa0, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x0c, 0x03, 0xb0, 0x06, 0xd0, 0x03, 00, 0x03, 0x67, 0x03, 0x15, 0x03, 0x29, 0x03, 00, 0x03, 0x74, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0x29, 0x03, 0x5c, 0x03, 0xe0, 0x03, 0x63, 0x09, 00, 0x03, 0x9f, 0x03, 0xe5, 0x03, 0x80, 0x03, 0x0d, 0x03, 0x2d, 0x03, 0x0d, 0x03, 0xb2, 0x03, 0xe4, 0x03, 0xdd, 0x03, 0x2a, 0x03, 0x4b, 0x03, 0x1e, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x74, 0x06, 0x15, 0x03, 0x1f, 0x09, 0xd0, 0x03, 0x41, 0x03, 0x15, 0x06, 0x1f, 0x03, 0xbf, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x12, 0xd0, 0x03, 0x4c, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x15, 0x06, 0xd0, 0x09, 00, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xa0, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x0c, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x67, 0x03, 0x1f, 0x03, 0x74, 0x03, 0x5a, 0x09, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0x1f, 0x03, 0x29, 0x03, 00, 0x03, 0x82, 0x03, 0xd5, 0x03, 0xdd, 0x03, 0xe4, 0x03, 0xe6, 0x12, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe5, 0x03, 0xe6, 0x03, 0xe0, 0x03, 0xd5, 0x03, 0x29, 0x03, 0x58, 0x06, 0xd0, 0x03, 0x34, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x29, 0x09, 00, 0x06, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0x1f, 0x03, 0x29, 0x03, 00, 0x03, 0x84, 0x03, 0xdb, 0x03, 0xdf, 0x09, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x8c, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x0f, 00, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x16, 0x03, 0x45, 0xc3, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x45, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x0f, 0xe0, 0x03, 00, 0x03, 0x8c, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x2c, 0x03, 0xcf, 0x06, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0xdd, 0x03, 0x7a, 0x03, 0x0c, 0x03, 0xbd, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x41, 0x03, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0x1f, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x1f, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x29, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x67, 0x03, 0x0c, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xce, 0x03, 0x29, 0x03, 0x5b, 0x03, 0xdc, 0x03, 0xe5, 0x03, 0x2d, 0x03, 0x62, 0x06, 0xe5, 0x03, 0xc2, 0x03, 00, 0x03, 0xc2, 0x03, 0xe5, 0x03, 0x62, 0x03, 0x39, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0x2a, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x41, 0x03, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x67, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x12, 0xd0, 0x03, 00, 0x03, 0x83, 0x0c, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0xbf, 0x09, 0xd0, 0x03, 0x1f, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x4c, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x34, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x4b, 0x03, 0x29, 0x03, 0xd7, 0x03, 0xe1, 0x03, 0xe5, 0x03, 0xe6, 0x12, 0xe5, 0x03, 0x62, 0x03, 0x16, 0x03, 0x63, 0x03, 0x62, 0x03, 0x5f, 0x03, 0x5b, 0x03, 0x15, 0x03, 0x59, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x83, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x34, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x4b, 0x03, 0x2a, 0x03, 0xdb, 0x0f, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x45, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x45, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x06, 0x45, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xad, 0xc0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0x16, 0x03, 0x20, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0xdf, 0x03, 0x37, 0x03, 0x4d, 0x03, 0xcb, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0x4c, 0x03, 0x34, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x29, 0x03, 0x5b, 0x03, 0xdb, 0x03, 0xe4, 0x03, 0x2d, 0x03, 0x63, 0x06, 0xe5, 0x03, 0x70, 0x03, 0x21, 0x06, 0x9f, 0x03, 0x70, 0x03, 00, 0x03, 0xe5, 0x03, 0xe4, 0x03, 0x2b, 0x03, 0x5a, 0x03, 0xce, 0x03, 0xcf, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xbf, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x34, 0x03, 0x41, 0x06, 0x92, 0x03, 0x41, 0x03, 0x29, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x12, 0xd0, 0x03, 0x15, 0x03, 0x1f, 0x03, 0xb0, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x0f, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0x1f, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x67, 0x03, 0x1f, 0x06, 0x92, 0x03, 0x67, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x59, 0x03, 0x2a, 0x03, 0xdb, 0x03, 0xe4, 0x18, 0xe5, 0x03, 0x62, 0x03, 0x16, 0x03, 0x63, 0x03, 0x62, 0x03, 0x5e, 0x03, 0x5b, 0x03, 0x15, 0x03, 0x59, 0x0c, 0xd0, 0x03, 0x92, 0x03, 0x67, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x67, 0x03, 0x1f, 0x06, 0x92, 0x03, 0x67, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x58, 0x03, 0x2a, 0x03, 0xdc, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0x9c, 0x03, 0x20, 0x03, 0x60, 0xc0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x57, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x52, 0x03, 00, 0x03, 0x20, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 00, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x20, 0x09, 0x2c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x15, 0xe0, 0x03, 0x2b, 0x03, 0x5c, 0x03, 0xcb, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd7, 0x03, 0xe1, 0x03, 0x2d, 0x03, 0x63, 0x06, 0xe5, 0x03, 0x62, 0x03, 0x0d, 0x0c, 0x2d, 0x06, 0xe6, 0x03, 0x2c, 0x03, 0x5c, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xcf, 0x0c, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x15, 0x09, 0x29, 0x03, 0x4c, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x12, 0xd0, 0x03, 0xb0, 0x03, 0x29, 0x03, 00, 0x03, 0x34, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0xbf, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x0c, 0x0c, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0x59, 0x03, 0x2a, 0x03, 0xe0, 0x06, 0xe6, 0x15, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe7, 0x03, 0xe1, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x59, 0x06, 0xd0, 0x03, 0x74, 0x03, 0x0c, 0x06, 0x1f, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x0c, 0x0c, 0x29, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xce, 0x03, 0x58, 0x03, 0x2a, 0x03, 0xdd, 0x18, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x20, 0x09, 0x2c, 0x03, 0x7c, 0xc0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x5a, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x38, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x6e, 0x03, 0xe0, 0x06, 0x45, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x38, 0x03, 0x4f, 0x06, 0xce, 0x03, 0xd0, 0x03, 0x4c, 0x03, 0x34, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x03, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x1f, 0x03, 0x59, 0x03, 0xd4, 0x03, 0xdf, 0x03, 0x2d, 0x03, 0x63, 0x06, 0xe5, 0x03, 0x80, 0x03, 0x16, 0x0c, 0xe5, 0x06, 0xe6, 0x03, 0x2c, 0x03, 0x5e, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0xce, 0x0c, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x06, 0x41, 0x12, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x1f, 0x03, 0x5a, 0x18, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x67, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x41, 0x03, 0x29, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x12, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xce, 0x03, 0xcd, 0x03, 0x5a, 0x03, 0x2a, 0x03, 0xe4, 0x06, 0xe6, 0x15, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe6, 0x03, 0xdf, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0x29, 0x03, 0x59, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0x83, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x12, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xce, 0x03, 0x59, 0x03, 0x2a, 0x03, 0xdf, 0x1b, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x1e, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0xcc, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x06, 0xbd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x45, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xc1, 0x03, 0xcf, 0x03, 0xbe, 0x03, 0x0c, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x06, 0xd0, 0x03, 00, 0x03, 0x83, 0x03, 0xd0, 0x03, 0x74, 0x03, 00, 0x03, 0x58, 0x03, 0xd0, 0x03, 0xda, 0x03, 0x2c, 0x03, 0x46, 0x06, 0xe5, 0x03, 0xd3, 0x03, 0x0d, 0x03, 0x70, 0x06, 0xe5, 0x03, 0xd3, 0x06, 0xe6, 0x03, 0x2d, 0x03, 0x5f, 0x03, 0xd5, 0x06, 0xce, 0x0c, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 00, 0x03, 0x83, 0x03, 0xd0, 0x03, 0x74, 0x03, 00, 0x03, 0x5a, 0x12, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x1f, 0x03, 0x67, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xb0, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x1f, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 0x29, 0x03, 0x41, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x67, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xce, 0x03, 0x5c, 0x03, 0x2b, 0x06, 0xe6, 0x03, 0x81, 0x03, 0x80, 0x12, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe4, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x74, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x74, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x67, 0x06, 0xd0, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xcf, 0x03, 0x5a, 0x03, 0x2a, 0x03, 0xdf, 0x0c, 0xe0, 0x06, 0xbd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x12, 0xe0, 0x06, 0x38, 0x06, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x12, 0xe0, 0x06, 0x38, 0x06, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0xc0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0xbd, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x38, 0x06, 0x20, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x15, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 00, 0x06, 0x2c, 0x03, 00, 0x03, 0x45, 0x03, 0xe0, 0x03, 0xdf, 0x03, 0x50, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 00, 0x03, 0x29, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x0c, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x41, 0x03, 0x29, 0x03, 0x58, 0x03, 0xcf, 0x03, 0xd6, 0x03, 0x8e, 0x03, 00, 0x03, 0x2d, 0x03, 0xb0, 0x03, 0xe5, 0x03, 0x9f, 0x03, 0x0d, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x0d, 0x03, 0xe5, 0x03, 0xe6, 0x03, 0x2d, 0x03, 0x61, 0x03, 0xd9, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0xce, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x67, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x34, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x41, 0x03, 0x29, 0x03, 0x5a, 0x12, 0xd0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x15, 0x03, 0x1f, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x4c, 0x03, 0xd0, 0x03, 0x83, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x41, 0x03, 0x0c, 0x03, 0x74, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 0x83, 0x03, 00, 0x03, 0x29, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x92, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x29, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x59, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0x5d, 0x03, 0x2c, 0x06, 0xe6, 0x06, 0x2d, 0x12, 0xe5, 0x03, 0x63, 0x03, 0x2d, 0x03, 0xe1, 0x03, 0xd6, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x4c, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x0c, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x29, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xcf, 0x03, 0x5b, 0x03, 0x2a, 0x03, 0xdf, 0x0c, 0xe0, 0x03, 0xbd, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x7c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0x2c, 0x0f, 00, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x52, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x7c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0xc0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x3c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x06, 0x9c, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x66, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0x33, 0xe0, 0x03, 0xbd, 0x06, 0x9c, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0xdc, 0x03, 0xb5, 0x03, 0x91, 0x03, 0xae, 0x30, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xb0, 0x03, 0x92, 0x0f, 0xd0, 0x06, 0xa0, 0x06, 0xd0, 0x06, 0xce, 0x03, 0xd4, 0x03, 0xdc, 0x03, 0xc1, 0x03, 0xa0, 0x0c, 0xe5, 0x03, 0x9f, 0x03, 0xb0, 0x09, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xdf, 0x03, 0xd6, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xcf, 0x24, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xb0, 0x0c, 0xd0, 0x06, 0xa0, 0x1e, 0xd0, 0x06, 0x92, 0x0f, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x92, 0x03, 0xbf, 0x15, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x0c, 0xd0, 0x03, 0x92, 0x03, 0xa0, 0x09, 0xd0, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd6, 0x03, 0xdf, 0x03, 0xe5, 0x03, 0xe6, 0x1b, 0xe5, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0xdc, 0x03, 0xd4, 0x06, 0xce, 0x0f, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x03, 0xd0, 0x03, 0xb0, 0x03, 0xa0, 0x12, 0xd0, 0x06, 0xa0, 0x0c, 0xd0, 0x03, 0x92, 0x03, 0xa0, 0x0f, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd6, 0x03, 0xdc, 0x15, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x1e, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xcf, 0xc3, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xba, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xcf, 0x03, 0xcd, 0x30, 0xd0, 0x03, 00, 0x03, 0x92, 0x21, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xe1, 0x1e, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdc, 0x03, 0xd3, 0x03, 0xce, 0x06, 0xcf, 0xc0, 0xd0, 0x06, 0xcf, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xdc, 0x03, 0xe4, 0x06, 0xe6, 0x1e, 0xe5, 0x03, 0xe1, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xb4, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xba, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xcf, 0x03, 0xcd, 0x30, 0xd0, 0x03, 0x5a, 0x03, 0xb0, 0x21, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xdd, 0x03, 0xe4, 0x06, 0xe6, 0x18, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xd9, 0x03, 0xd0, 0x06, 0xce, 0x03, 0xcf, 0xba, 0xd0, 0x03, 0xcf, 0x06, 0xce, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xe1, 0x03, 0xe5, 0x03, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdd, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xb4, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xbd, 0xe0, 0x03, 0xdc, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0xcf, 0x57, 0xd0, 0x06, 0xce, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xe1, 0x06, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdd, 0x03, 0xd5, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0xba, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdd, 0x03, 0xe4, 0x06, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd0, 0x06, 0xce, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdc, 0xff, 0xe0, 0xb7, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xbd, 0xe0, 0x03, 0xdd, 0x03, 0xd5, 0x03, 0xcf, 0x03, 0xce, 0x57, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xdd, 0x03, 0xe5, 0x03, 0xe6, 0x18, 0xe5, 0x09, 0xe6, 0x03, 0xe2, 0x03, 0xdb, 0x03, 0xd4, 0x06, 0xce, 0x03, 0xcf, 0xb4, 0xd0, 0x03, 0xcf, 0x06, 0xce, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xe2, 0x09, 0xe6, 0x18, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x57, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdd, 0xff, 0xe0, 0xb7, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xbd, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x54, 0xd0, 0x09, 0xcf, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xe1, 0x03, 0xe5, 0x06, 0xe6, 0x18, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe0, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0xae, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xe0, 0x03, 0xe5, 0x03, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xd9, 0x03, 0xd0, 0x09, 0xcf, 0x54, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xb7, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x7c, 0x0c, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x9c, 0x45, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x27, 0xe0, 0x03, 0x9c, 0x03, 00, 0x93, 0xe0, 0x03, 0x9c, 0x03, 0x7c, 0x1e, 0xe0, 0x03, 0x7c, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd3, 0x03, 0xcd, 0x03, 0xcf, 0x15, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0x34, 0x03, 0x67, 0x1b, 0xd0, 0x03, 0x92, 0x03, 00, 0x18, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd4, 0x03, 0xdd, 0x03, 0xe4, 0x06, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xe0, 0x03, 00, 0x03, 0x92, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x72, 0xd0, 0x03, 00, 0x03, 0x92, 0x18, 0xd0, 0x03, 0x92, 0x03, 0xd0, 0x06, 0xb0, 0x0c, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe6, 0x18, 0xe5, 0x06, 0xe6, 0x03, 0xe4, 0x03, 0xdd, 0x03, 0xd4, 0x03, 0xce, 0x03, 0xcf, 0x1e, 0xd0, 0x03, 0x92, 0x03, 00, 0x36, 0xd0, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xdf, 0x03, 0x60, 0x03, 0x2c, 0x18, 0xe0, 0x06, 0xbd, 0x03, 0xe0, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x4b, 0xe0, 0x03, 0xcf, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x9c, 0x36, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x7c, 0xbd, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x06, 0x2c, 0x09, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x7c, 0x45, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x27, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x09, 0xe0, 0x03, 0xad, 0x03, 0xbd, 0x33, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x3f, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0x1e, 0xe0, 0x03, 0x52, 0x03, 0x7c, 0x21, 0xe0, 0x03, 0xdd, 0x03, 0xd5, 0x03, 0xcd, 0x03, 0xcf, 0x12, 0xd0, 0x03, 0x4c, 0x03, 0x1f, 0x06, 0x83, 0x03, 0x41, 0x1b, 0xd0, 0x03, 0x92, 0x03, 00, 0x1b, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0xe0, 0x06, 0xe6, 0x18, 0xe5, 0x09, 0xe6, 0x03, 0xe5, 0x03, 00, 0x03, 0x95, 0x03, 0xcf, 0x06, 0xcd, 0x03, 0xcf, 0x6f, 0xd0, 0x03, 00, 0x03, 0x92, 0x18, 0xd0, 0x03, 0x29, 0x03, 0xd0, 0x06, 0x74, 0x09, 0xd0, 0x03, 0xcf, 0x06, 0xcd, 0x03, 0xcf, 0x03, 0xd6, 0x03, 0xe0, 0x03, 0x62, 0x03, 0x2d, 0x06, 0xe6, 0x0c, 0xe5, 0x03, 0xb0, 0x03, 0xc2, 0x06, 0xe5, 0x06, 0xe6, 0x03, 0xe0, 0x03, 0xd9, 0x03, 0xd0, 0x03, 0xce, 0x21, 0xd0, 0x03, 0x92, 0x03, 00, 0x36, 0xd0, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xd5, 0x03, 0xdd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x18, 0xe0, 0x06, 0x7c, 0x03, 0xe0, 0x03, 0x2c, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x4b, 0xe0, 0x03, 0x20, 0x03, 0x38, 0x03, 0x9c, 0x03, 0x7c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x7c, 0x36, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0xbd, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x21, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xcf, 0x06, 0x9c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x06, 0xad, 0x0c, 0xe0, 0x03, 0xcf, 0x06, 0x9c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x1e, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x18, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0xce, 0x03, 0x97, 0x03, 0xbd, 0x03, 0xcf, 0x12, 0xd0, 0x03, 00, 0x03, 0x92, 0x15, 0xd0, 0x06, 0xa0, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x1b, 0xd0, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xdb, 0x03, 0xe2, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0x9f, 0x03, 0xc2, 0x0f, 0xe5, 0x03, 0xb0, 0x03, 0xb2, 0x03, 0xe6, 0x03, 00, 0x03, 0x9c, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0x9e, 0x03, 0x9f, 0x12, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x1b, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x15, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x0f, 0xd0, 0x06, 0xa0, 0x03, 0xd0, 0x03, 00, 0x03, 0x92, 0x0c, 0xd0, 0x03, 0xa0, 0x03, 0xbf, 0x18, 0xd0, 0x03, 0xcf, 0x06, 0xcd, 0x03, 0x92, 0x03, 0x95, 0x03, 0xcf, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x12, 0xe5, 0x03, 0x2d, 0x03, 0x62, 0x03, 0xe5, 0x06, 0xe6, 0x03, 0xe2, 0x03, 0xdb, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x06, 0x92, 0x12, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 0x92, 0x03, 0xb0, 0x12, 0xd0, 0x03, 0xa0, 0x03, 0xbf, 0x0c, 0xd0, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xc8, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0x60, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x9c, 0x21, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x9c, 0x03, 00, 0x21, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0xab, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0x52, 0x03, 00, 0x06, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xe0, 0x03, 0x9c, 0x09, 00, 0x06, 0x60, 0x09, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x09, 00, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2b, 0x03, 0x0c, 0x03, 0x9f, 0x03, 0xcf, 0x0f, 0xd0, 0x03, 0x15, 0x03, 0x4c, 0x0f, 0xd0, 0x03, 0xbf, 0x06, 0x1f, 0x03, 0x15, 0x03, 0x41, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x34, 0x03, 0x1f, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x58, 0x03, 0x29, 0x03, 0xd5, 0x03, 0xdd, 0x03, 0x2d, 0x03, 0x54, 0x03, 0x21, 0x03, 0x2d, 0x03, 00, 0x03, 0x90, 0x06, 0xe5, 0x03, 0xd3, 0x03, 0x21, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 00, 0x03, 0x9f, 0x03, 0xdf, 0x03, 0xc5, 0x03, 0x1f, 0x03, 0x1e, 0x03, 0x15, 0x03, 0x40, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x74, 0x06, 0x15, 0x03, 0x1f, 0x15, 0xd0, 0x03, 0x74, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x74, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x29, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0x34, 0x03, 0x29, 0x03, 0xa0, 0x03, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0x72, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x2b, 0x03, 0x39, 0x03, 0xe6, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe5, 0x03, 0xd3, 0x03, 0x16, 0x03, 0x46, 0x03, 0xe5, 0x03, 0x62, 0x09, 00, 0x03, 0xa0, 0x03, 0xe5, 0x03, 0xdd, 0x03, 0xd5, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xa0, 0x03, 0x15, 0x06, 0x1f, 0x03, 0x15, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x29, 0x03, 00, 0x06, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x74, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0x34, 0x03, 0x29, 0x03, 0xa0, 0x09, 0xd0, 0x06, 0xcf, 0x03, 0x74, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x8c, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0xa8, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x06, 0x2c, 0x06, 0x60, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x0f, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xdd, 0x03, 0x5c, 0x03, 0x34, 0x03, 0xce, 0x0f, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0xa0, 0x09, 0xd0, 0x06, 0x41, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x03, 0x67, 0x03, 0x0c, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x59, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xd9, 0x03, 0x2c, 0x03, 0x0d, 0x03, 0xb2, 0x03, 0xe5, 0x03, 0x54, 0x03, 0x2d, 0x06, 0xe5, 0x03, 0x54, 0x03, 0x2d, 0x06, 0xe5, 0x03, 0x9f, 0x03, 00, 0x03, 0xa0, 0x03, 0xe5, 0x03, 0x45, 0x03, 0x42, 0x03, 0xcf, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0x9f, 0x03, 0xcf, 0x03, 0x92, 0x03, 00, 0x03, 0x41, 0x03, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0x92, 0x0f, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x15, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x29, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 00, 0x03, 0x83, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x59, 0x03, 0xce, 0x03, 0xad, 0x03, 00, 0x03, 0x78, 0x03, 0xe0, 0x03, 0xe5, 0x06, 0xe6, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe5, 0x03, 0x39, 0x03, 0x21, 0x03, 0xd3, 0x06, 0xe5, 0x03, 0x2d, 0x03, 0x62, 0x06, 0xe6, 0x03, 0xe1, 0x03, 0xd9, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xce, 0x03, 0x15, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x06, 0xd0, 0x03, 0x1f, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 00, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 0x0c, 0x06, 0xd0, 0x03, 0x5a, 0x03, 00, 0x03, 0x83, 0x0f, 0xd0, 0x03, 0xce, 0x03, 0xbe, 0x03, 00, 0x03, 0x9b, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x38, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x45, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x45, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0xa8, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x06, 0x2c, 0x06, 0x60, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x45, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x20, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 00, 0x09, 0x9c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6b, 0x03, 00, 0x03, 0xcd, 0x03, 0xcf, 0x0f, 0xd0, 0x03, 0xb0, 0x03, 0x41, 0x03, 00, 0x03, 0x4c, 0x06, 0xd0, 0x03, 0x0c, 0x03, 0x67, 0x06, 0x92, 0x03, 0x1f, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x92, 0x03, 00, 0x03, 0x83, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x59, 0x03, 0x29, 0x03, 0xce, 0x03, 0xd3, 0x03, 0x2b, 0x03, 0x53, 0x06, 0xe6, 0x03, 0x62, 0x03, 0x2d, 0x06, 0xe5, 0x03, 0x16, 0x03, 0x80, 0x09, 0xe5, 0x03, 00, 0x03, 0xa0, 0x03, 0xe7, 0x03, 0x0d, 0x03, 0x6e, 0x03, 0x96, 0x03, 0x92, 0x03, 0x1e, 0x03, 0x59, 0x03, 0xce, 0x03, 0x91, 0x03, 00, 0x03, 0xbf, 0x06, 0xd0, 0x03, 00, 0x03, 0x92, 0x0f, 0xd0, 0x03, 0x67, 0x03, 0x1f, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xa0, 0x03, 00, 0x09, 0x92, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x15, 0x03, 0x74, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0c, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x29, 0x03, 0x59, 0x03, 0xce, 0x03, 0x67, 0x03, 0x15, 0x03, 0xe0, 0x03, 0xe6, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0x62, 0x03, 0x0d, 0x03, 0xc2, 0x09, 0xe5, 0x03, 0x2d, 0x03, 0x63, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0xdb, 0x03, 0xd3, 0x06, 0xce, 0x03, 0x9f, 0x03, 00, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0xa0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x06, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x03, 0xa0, 0x03, 00, 0x09, 0x92, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0x74, 0x03, 0x15, 0x03, 0xdf, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x12, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x09, 0x9c, 0x03, 00, 0x03, 0x9c, 0xa5, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x03, 00, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xad, 0x06, 0x16, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x06, 0x20, 0x03, 00, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 00, 0x03, 0x38, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x06, 0x2c, 0x03, 0x2b, 0x03, 0x2a, 0x03, 0xcd, 0x03, 0xce, 0x15, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 00, 0x03, 0x1f, 0x09, 0x29, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0x15, 0x03, 0x1f, 0x0c, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0x2a, 0x03, 0x60, 0x03, 0xe5, 0x03, 0xe6, 0x03, 0x62, 0x03, 0x2d, 0x06, 0xe5, 0x03, 00, 0x03, 0x9f, 0x09, 0xe5, 0x03, 00, 0x03, 0x9f, 0x03, 0xe5, 0x03, 00, 0x03, 0x21, 0x03, 0x2c, 0x06, 0x2a, 0x03, 0x73, 0x03, 0xce, 0x03, 0x91, 0x03, 00, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x0f, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0x29, 0x03, 0xa0, 0x03, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0c, 0xd0, 0x03, 00, 0x03, 0x92, 0x06, 0xcf, 0x03, 0x29, 0x03, 0x59, 0x03, 0xd3, 0x03, 0x5e, 0x03, 0x2c, 0x03, 0xe5, 0x03, 0xe6, 0x09, 0xe5, 0x03, 0x62, 0x03, 0x16, 0x03, 00, 0x03, 0x54, 0x0c, 0xe5, 0x03, 0x2d, 0x03, 0x63, 0x03, 0xe5, 0x03, 0xdf, 0x03, 0xd6, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0x92, 0x03, 00, 0x0c, 0xd0, 0x03, 00, 0x06, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x0c, 0x29, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0xce, 0x03, 0xcd, 0x03, 0x5a, 0x03, 0x2b, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x03, 00, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x1b, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0x2c, 0x03, 0xad, 0xa5, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x06, 0x45, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x52, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x06, 0x45, 0x1b, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0xdd, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xce, 0x18, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0x15, 0x03, 0x74, 0x0f, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x5a, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x4c, 0x03, 0x29, 0x03, 0xce, 0x03, 0xcd, 0x03, 0x29, 0x03, 0x5c, 0x03, 0xe0, 0x03, 0xe6, 0x03, 0x63, 0x03, 0x2d, 0x06, 0xe5, 0x03, 00, 0x03, 0x9f, 0x09, 0xe5, 0x03, 00, 0x03, 0x9f, 0x03, 0xe5, 0x03, 0x16, 0x03, 0x81, 0x03, 0xe4, 0x03, 0xe0, 0x03, 0xdb, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0x8f, 0x03, 00, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd0, 0x03, 00, 0x03, 0x92, 0x0f, 0xd0, 0x03, 0x74, 0x03, 0x15, 0x09, 0xd0, 0x03, 0x1f, 0x03, 0x5a, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x0f, 0xd0, 0x03, 00, 0x03, 0x92, 0x09, 0xd0, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0c, 0xd0, 0x03, 00, 0x03, 0x90, 0x06, 0xcd, 0x03, 0x1f, 0x03, 0x5c, 0x03, 0xdb, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe6, 0x09, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0x80, 0x03, 00, 0x03, 0x9f, 0x09, 0xe5, 0x03, 0x2d, 0x03, 0x63, 0x03, 0xe0, 0x03, 0xd6, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xa0, 0x03, 00, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0xbf, 0x03, 00, 0x03, 0xa0, 0x03, 0x92, 0x03, 00, 0x09, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x03, 0xb0, 0x03, 00, 0x03, 0xb0, 0x0f, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0f, 0xd0, 0x03, 0xce, 0x03, 0xd0, 0x03, 0x5c, 0x03, 0x2b, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x45, 0x03, 0x0c, 0x03, 0xcf, 0x06, 0xe0, 0x06, 0x45, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x1e, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x1e, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0xb1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x60, 0x03, 0x9c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xc9, 0x03, 0xd3, 0x03, 0xce, 0x03, 0xcf, 0x09, 0xd0, 0x03, 0x92, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x03, 0xbf, 0x06, 0xd0, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x74, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x4c, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x29, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0x29, 0x03, 0x5a, 0x03, 0xd9, 0x03, 0xe1, 0x03, 0x63, 0x03, 0x2d, 0x03, 0xe6, 0x03, 0xe5, 0x06, 0x39, 0x06, 0xe5, 0x03, 0x54, 0x03, 00, 0x03, 0x9f, 0x03, 0xe5, 0x03, 0x62, 0x03, 0x16, 0x03, 0xd4, 0x03, 0xe5, 0x03, 0xe4, 0x03, 0xcd, 0x03, 0xd9, 0x03, 0x93, 0x03, 00, 0x06, 0xcd, 0x03, 0xce, 0x03, 00, 0x03, 0x91, 0x0f, 0xd0, 0x03, 0xbf, 0x03, 0x0c, 0x03, 0x5a, 0x03, 0x92, 0x03, 0x29, 0x03, 0x15, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x34, 0x09, 0xd0, 0x03, 0xbf, 0x03, 0xd0, 0x06, 0x34, 0x06, 0xd0, 0x03, 0x4c, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xd0, 0x06, 0xcf, 0x03, 0xce, 0x03, 00, 0x03, 0x81, 0x03, 0xcf, 0x03, 0x76, 0x03, 00, 0x03, 0x5f, 0x03, 0xe4, 0x03, 0xd3, 0x03, 0x0d, 0x03, 0x81, 0x06, 0xe5, 0x03, 0xd3, 0x03, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x03, 0xe5, 0x03, 0x80, 0x03, 0x0d, 0x03, 0xb0, 0x06, 0xe6, 0x03, 0x2d, 0x03, 0x45, 0x03, 0xd9, 0x03, 0xd0, 0x06, 0xcd, 0x03, 0xcf, 0x06, 0xd0, 0x03, 0x15, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0x5a, 0x03, 0x15, 0x06, 0xd0, 0x03, 0x0c, 0x03, 0x67, 0x03, 0xd0, 0x03, 0xbf, 0x03, 0x29, 0x03, 00, 0x06, 0xd0, 0x03, 0x29, 0x03, 0x34, 0x09, 0xd0, 0x03, 0xbf, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0c, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xa8, 0x03, 00, 0x03, 0xad, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 00, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x06, 0xbd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x12, 0xe0, 0x06, 0x38, 0x06, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0xcf, 0xa5, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x0c, 00, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x45, 0x09, 0xe0, 0x03, 0x52, 0x03, 00, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x09, 0x2c, 0x03, 0x16, 0x03, 0xad, 0x03, 0x9c, 0x03, 00, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x03, 0xe0, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xad, 0x06, 0x38, 0x03, 0x8c, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xd6, 0x06, 0xce, 0x09, 0xd0, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0x4c, 0x09, 0xd0, 0x03, 0x34, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x15, 0x03, 0x67, 0x03, 0xd0, 0x03, 0x92, 0x03, 00, 0x06, 0xd0, 0x03, 0x74, 0x03, 00, 0x03, 0x74, 0x03, 0xd0, 0x03, 0x83, 0x03, 00, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x4c, 0x03, 0x29, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0x29, 0x03, 0x59, 0x03, 0xd1, 0x03, 0xda, 0x03, 0x61, 0x03, 0x2d, 0x06, 0xe6, 0x03, 0xb0, 0x03, 0x0d, 0x03, 0x21, 0x03, 0x16, 0x03, 0x80, 0x03, 00, 0x03, 0x9f, 0x06, 0xe5, 0x03, 0x39, 0x03, 0x0d, 0x03, 0x2d, 0x03, 0x16, 0x03, 0x70, 0x03, 0xe1, 0x03, 0x9a, 0x03, 00, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcd, 0x03, 00, 0x03, 0x90, 0x06, 0xcf, 0x0c, 0xd0, 0x03, 0xa0, 0x06, 0x34, 0x03, 0x83, 0x03, 0x29, 0x03, 0x5a, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0xa0, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x74, 0x03, 00, 0x03, 0x92, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x03, 0xcf, 0x03, 0xce, 0x06, 0xcd, 0x03, 0x4b, 0x03, 0x0c, 0x03, 0x1f, 0x03, 0x43, 0x03, 0x2c, 0x03, 0x62, 0x06, 0xe6, 0x03, 0x81, 0x03, 0x0d, 0x06, 0x21, 0x03, 0x39, 0x03, 0xe5, 0x03, 0x62, 0x03, 0x2d, 0x06, 0xe5, 0x03, 0x46, 0x03, 0x0d, 0x03, 0xb2, 0x03, 0xe5, 0x03, 0x8e, 0x03, 00, 0x03, 0x2a, 0x03, 0x9f, 0x03, 0xcd, 0x03, 0xcf, 0x09, 0xd0, 0x03, 0xa0, 0x03, 0x15, 0x06, 0x1f, 0x03, 0x15, 0x03, 0xa0, 0x06, 0xd0, 0x03, 0x74, 0x03, 00, 0x03, 0x29, 0x03, 0x15, 0x03, 0x67, 0x03, 00, 0x06, 0xd0, 0x03, 0xb0, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xa0, 0x03, 0xd0, 0x03, 0x5a, 0x03, 0x29, 0x0c, 0xd0, 0x06, 0xce, 0x03, 0xd6, 0x03, 0xdd, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x52, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0xbd, 0x06, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x7c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0xa5, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x87, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x21, 0xe0, 0x06, 0xad, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x06, 0xe0, 0x06, 0x9c, 0x0f, 0xe0, 0x06, 0xad, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x1b, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x45, 0xe0, 0x03, 0x16, 0x03, 0x8c, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcf, 0x09, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xb0, 0x0f, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xbf, 0x21, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x0f, 0xd0, 0x03, 0xcf, 0x06, 0xce, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xe1, 0x03, 0xe5, 0x06, 0xe6, 0x03, 0xd3, 0x03, 0x9f, 0x03, 0xc2, 0x12, 0xe5, 0x03, 0xc2, 0x03, 0x9f, 0x03, 0xd4, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe4, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x06, 0xcf, 0x0f, 0xd0, 0x03, 0x15, 0x03, 0x83, 0x0c, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x0c, 0xd0, 0x03, 0xbf, 0x03, 0x92, 0x03, 0xb0, 0x06, 0xd0, 0x06, 0xcf, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xa7, 0x03, 0xac, 0x03, 0xe4, 0x03, 0xe5, 0x06, 0xe6, 0x06, 0xe5, 0x03, 0xd3, 0x03, 0x9f, 0x03, 0xb0, 0x12, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xdb, 0x03, 0xb3, 0x03, 0x90, 0x03, 0xce, 0x03, 0xcf, 0x12, 0xd0, 0x06, 0x92, 0x0f, 0xd0, 0x03, 0xb0, 0x03, 0x92, 0x03, 0xbf, 0x12, 0xd0, 0x03, 0xa0, 0x03, 0x92, 0x18, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xda, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x21, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x21, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x30, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x1e, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0xab, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xa5, 0xe0, 0x03, 0x45, 0x03, 0x8c, 0x03, 0x9c, 0x03, 0x38, 0x03, 0x0c, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0xdd, 0x03, 0xd5, 0x06, 0xce, 0x03, 0xcf, 0x5d, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xe1, 0x03, 0xe5, 0x09, 0xe6, 0x1e, 0xe5, 0x09, 0xe6, 0x03, 0xe5, 0x03, 0xe2, 0x03, 0xdd, 0x03, 0xda, 0x03, 0xd5, 0x03, 0xd1, 0x03, 0xcf, 0x0f, 0xce, 0x03, 0x40, 0x03, 0x82, 0x03, 0x91, 0x03, 0x34, 0x03, 0x0c, 0x03, 0xbf, 0x1b, 0xd0, 0x0c, 0xcf, 0x12, 0xce, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd5, 0x03, 0xda, 0x03, 0xdd, 0x03, 0xe2, 0x03, 0xe5, 0x09, 0xe6, 0x1e, 0xe5, 0x09, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xdb, 0x03, 0xd4, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x5d, 0xd0, 0x03, 0xcf, 0x06, 0xce, 0x03, 0xd5, 0x03, 0xdd, 0xff, 0xe0, 0xc6, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xa5, 0xe0, 0x03, 0x6e, 0x06, 0x2c, 0x03, 0x45, 0x03, 0xbd, 0x18, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0xce, 0x5d, 0xd0, 0x06, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xe1, 0x03, 0xe5, 0x06, 0xe6, 0x24, 0xe5, 0x09, 0xe6, 0x03, 0xe5, 0x03, 0xe4, 0x03, 0xe0, 0x03, 0xdb, 0x03, 0xd7, 0x03, 0xd5, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcd, 0x03, 0x65, 0x06, 0x29, 0x03, 0x40, 0x03, 0xaf, 0x21, 0xcf, 0x09, 0xce, 0x06, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0xd5, 0x03, 0xd7, 0x03, 0xdb, 0x03, 0xe0, 0x03, 0xe4, 0x03, 0xe5, 0x09, 0xe6, 0x24, 0xe5, 0x06, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd4, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x06, 0xcf, 0x5d, 0xd0, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xc6, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xcf, 0xe0, 0x03, 0xdc, 0x03, 0xd5, 0x06, 0xce, 0x03, 0xd0, 0x63, 0xcf, 0x06, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xe1, 0x03, 0xe5, 0x03, 0xe6, 0x2d, 0xe5, 0x09, 0xe6, 0x03, 0xe4, 0x03, 0xe1, 0x03, 0xdd, 0x03, 0xda, 0x03, 0xd5, 0x03, 0xd3, 0x09, 0xd0, 0x0c, 0xcf, 0x06, 0xce, 0x0c, 0xcf, 0x06, 0xce, 0x0c, 0xcf, 0x09, 0xd0, 0x03, 0xd3, 0x03, 0xd5, 0x03, 0xda, 0x03, 0xdd, 0x03, 0xe1, 0x03, 0xe4, 0x09, 0xe6, 0x2d, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xdb, 0x03, 0xd4, 0x03, 0xcf, 0x06, 0xce, 0x63, 0xcf, 0x03, 0xd0, 0x06, 0xce, 0x03, 0xd5, 0x03, 0xdc, 0xff, 0xe0, 0xc9, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xcf, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd1, 0x03, 0xcd, 0x69, 0xcf, 0x09, 0xce, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xe2, 0x06, 0xe6, 0x2d, 0xe5, 0x06, 0xe6, 0x06, 0xe5, 0x06, 0xe4, 0x03, 0xe0, 0x03, 0xdd, 0x03, 0xdb, 0x03, 0xd9, 0x03, 0xd6, 0x03, 0xd5, 0x03, 0xd3, 0x03, 0xd1, 0x03, 0xcf, 0x03, 0xce, 0x12, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd3, 0x03, 0xd5, 0x03, 0xd6, 0x03, 0xd9, 0x03, 0xdb, 0x03, 0xdd, 0x03, 0xe0, 0x06, 0xe4, 0x06, 0xe5, 0x06, 0xe6, 0x2d, 0xe5, 0x06, 0xe6, 0x03, 0xe2, 0x03, 0xdb, 0x03, 0xd4, 0x09, 0xce, 0x69, 0xcf, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xc9, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xd2, 0xe0, 0x03, 0xdd, 0x03, 0xd5, 0x03, 0xce, 0x03, 0xcd, 0x69, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xe1, 0x09, 0xe6, 0x30, 0xe5, 0x06, 0xe6, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe4, 0x03, 0xe1, 0x03, 0xe0, 0x03, 0xdd, 0x03, 0xdc, 0x03, 0xdb, 0x03, 0xda, 0x03, 0xd9, 0x03, 0xd7, 0x0c, 0xd6, 0x03, 0xd7, 0x03, 0xd9, 0x03, 0xda, 0x03, 0xdb, 0x03, 0xdc, 0x03, 0xdd, 0x03, 0xe0, 0x03, 0xe1, 0x03, 0xe4, 0x03, 0xe5, 0x03, 0xe6, 0x03, 0xe7, 0x06, 0xe6, 0x30, 0xe5, 0x09, 0xe6, 0x03, 0xe1, 0x03, 0xda, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xce, 0x69, 0xcf, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd5, 0x03, 0xdd, 0xff, 0xe0, 0xcc, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x7c, 0x03, 0x6e, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0xbd, 0x99, 0xe0, 0x03, 0xbd, 0x18, 0x60, 0xb7, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd0, 0x03, 0xcd, 0x6f, 0xcf, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0xe0, 0x03, 0xe5, 0x03, 0xe6, 0x33, 0xe5, 0x06, 0xe6, 0x4e, 0xe5, 0x06, 0xe6, 0x33, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe0, 0x03, 0xd9, 0x03, 0xd1, 0x03, 0xce, 0x03, 0xcd, 0x6f, 0xcf, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xcc, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x45, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x30, 0xe0, 0x03, 0xad, 0x03, 0xbd, 0x18, 0xe0, 0x03, 0xbd, 0x09, 0x60, 0x03, 0x16, 0x03, 0x2c, 0x09, 0x60, 0x2d, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x87, 0xe0, 0x03, 0xdd, 0x03, 0xd5, 0x06, 0xce, 0x6c, 0xcf, 0x09, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdf, 0x03, 0xe2, 0x09, 0xe6, 0x33, 0xe5, 0x48, 0xe6, 0x33, 0xe5, 0x09, 0xe6, 0x03, 0xe2, 0x03, 0xdf, 0x03, 0xd7, 0x03, 0xd0, 0x09, 0xce, 0x6c, 0xcf, 0x06, 0xce, 0x03, 0xd5, 0x03, 0xdd, 0xff, 0xe0, 0xcf, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x20, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x27, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0f, 0xe0, 0x06, 0x9c, 0x03, 0xcf, 0x0f, 0xe0, 0x06, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x24, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xad, 0x09, 0xe0, 0x03, 00, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xcf, 0x72, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd3, 0x06, 0xce, 0x6f, 0xcf, 0x06, 0xcd, 0x03, 0xd0, 0x03, 0xd6, 0x03, 0xdb, 0x03, 0xe1, 0x03, 0xe5, 0x0c, 0xe6, 0x2a, 0xe5, 0x48, 0xe6, 0x2a, 0xe5, 0x0c, 0xe6, 0x03, 0xe5, 0x03, 0xe1, 0x03, 0xdb, 0x03, 0xd6, 0x03, 0xd0, 0x06, 0xcd, 0x6f, 0xcf, 0x06, 0xce, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xcf, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x16, 0x06, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x06, 0xe0, 0x09, 00, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x6e, 0x03, 0x20, 0x03, 0x7c, 0x03, 0x60, 0x09, 00, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x09, 00, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0xad, 0x72, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xcf, 0x03, 0xce, 0x6f, 0xcf, 0x03, 0xce, 0x06, 0xcd, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xd9, 0x03, 0xdf, 0x03, 0xe2, 0x0c, 0xe6, 0x96, 0xe5, 0x0c, 0xe6, 0x03, 0xe2, 0x03, 0xdf, 0x03, 0xd9, 0x03, 0xd3, 0x03, 0xcf, 0x06, 0xcd, 0x03, 0xce, 0x6f, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xd2, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0x52, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x24, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x0f, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x38, 0x75, 0xe0, 0x03, 0xdc, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xce, 0x72, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xda, 0x03, 0xe0, 0x03, 0xe4, 0x03, 0xe5, 0x09, 0xe6, 0x8a, 0xe5, 0x09, 0xe6, 0x03, 0xe5, 0x03, 0xe4, 0x03, 0xe0, 0x03, 0xda, 0x03, 0xd5, 0x03, 0xd0, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x72, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xdc, 0xff, 0xe0, 0xd5, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x45, 0x03, 0xe0, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x16, 0x03, 0x20, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xcf, 0x0f, 0x9c, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0x6e, 0x03, 00, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x6e, 0x03, 00, 0x75, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd1, 0x03, 0xcd, 0x03, 0xce, 0x72, 0xcf, 0x06, 0xce, 0x06, 0xcd, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdf, 0x03, 0xe2, 0x03, 0xe5, 0x03, 0xe6, 0x90, 0xe5, 0x03, 0xe6, 0x03, 0xe5, 0x03, 0xe2, 0x03, 0xdf, 0x03, 0xd7, 0x03, 0xd0, 0x06, 0xcd, 0x06, 0xce, 0x72, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xd5, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x16, 0x09, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 00, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xad, 0x0f, 0x2c, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x06, 0x20, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x52, 0x03, 00, 0x03, 0x20, 0x03, 0xbd, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x0c, 0x0c, 0x2c, 0x78, 0xe0, 0x03, 0xdf, 0x03, 0xd7, 0x03, 0xcf, 0x03, 0xce, 0x78, 0xcf, 0x09, 0xcd, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xd9, 0x03, 0xdd, 0x03, 0xe1, 0x03, 0xe4, 0x09, 0xe6, 0x78, 0xe5, 0x09, 0xe6, 0x03, 0xe4, 0x03, 0xe1, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0xd4, 0x03, 0xd0, 0x09, 0xcd, 0x78, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd7, 0x03, 0xdf, 0xff, 0xe0, 0xd8, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x06, 0x45, 0x12, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x24, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xbd, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x38, 0x06, 0xe0, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x87, 0xe0, 0x03, 0xdc, 0x03, 0xd5, 0x03, 0xcf, 0x03, 0xce, 0x75, 0xcf, 0x03, 0xd0, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xd9, 0x03, 0xdd, 0x03, 0xe1, 0x03, 0xe4, 0x06, 0xe5, 0x09, 0xe6, 0x60, 0xe5, 0x09, 0xe6, 0x06, 0xe5, 0x03, 0xe4, 0x03, 0xe1, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd0, 0x75, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdc, 0xff, 0xe0, 0xdb, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x24, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x9c, 0x03, 00, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x7c, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x06, 0xe0, 0x03, 00, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x06, 0x7c, 0x6f, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd1, 0x06, 0xce, 0x7b, 0xcf, 0x0c, 0xce, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xd7, 0x03, 0xdc, 0x03, 0xe1, 0x03, 0xe4, 0x03, 0xe6, 0x03, 0xe7, 0x0c, 0xe6, 0x4e, 0xe5, 0x0c, 0xe6, 0x03, 0xe7, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0xe1, 0x03, 0xdc, 0x03, 0xd7, 0x03, 0xd4, 0x03, 0xd0, 0x0c, 0xce, 0x7b, 0xcf, 0x06, 0xce, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xdb, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 00, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x6e, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x38, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x7c, 0x03, 0x9c, 0x03, 00, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0x2c, 0x03, 0xad, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x20, 0x03, 0x16, 0x03, 0x20, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x20, 0x03, 0xe0, 0x03, 0x38, 0x06, 0x20, 0x03, 0x0c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x0c, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x06, 0x2c, 0x72, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd0, 0x06, 0xce, 0x7e, 0xcf, 0x03, 0xce, 0x06, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd3, 0x03, 0xd6, 0x03, 0xda, 0x03, 0xdf, 0x03, 0xe1, 0x06, 0xe4, 0x03, 0xe6, 0x0c, 0xe7, 0x15, 0xe6, 0x12, 0xe5, 0x15, 0xe6, 0x0c, 0xe7, 0x03, 0xe6, 0x06, 0xe4, 0x03, 0xe1, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd6, 0x03, 0xd3, 0x03, 0xd0, 0x03, 0xce, 0x06, 0xcd, 0x03, 0xce, 0x7e, 0xcf, 0x06, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xde, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x6f, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0xbd, 0x0c, 0xe0, 0x06, 0xad, 0x12, 0xe0, 0x06, 0x9c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x36, 0xe0, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x0c, 0xe0, 0x06, 0xad, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x84, 0xe0, 0x03, 0xdc, 0x03, 0xd6, 0x03, 0xcf, 0x03, 0xcd, 0x84, 0xcf, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xd6, 0x03, 0xda, 0x03, 0xdc, 0x03, 0xe0, 0x06, 0xe4, 0x06, 0xe5, 0x3c, 0xe6, 0x06, 0xe5, 0x06, 0xe4, 0x03, 0xe0, 0x03, 0xdc, 0x03, 0xda, 0x03, 0xd6, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x84, 0xcf, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd6, 0x03, 0xdc, 0xff, 0xe0, 0xe1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xe7, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd4, 0x06, 0xce, 0x87, 0xcf, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0xd3, 0x03, 0xd5, 0x03, 0xd9, 0x03, 0xdb, 0x03, 0xdc, 0x03, 0xdd, 0x03, 0xe0, 0x03, 0xe1, 0x03, 0xe2, 0x06, 0xe4, 0x03, 0xe5, 0x18, 0xe6, 0x03, 0xe5, 0x06, 0xe4, 0x03, 0xe2, 0x03, 0xe1, 0x03, 0xe0, 0x03, 0xdd, 0x03, 0xdc, 0x03, 0xdb, 0x03, 0xd9, 0x03, 0xd5, 0x03, 0xd3, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x87, 0xcf, 0x06, 0xce, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xe1, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xea, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd1, 0x06, 0xce, 0x8d, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd4, 0x03, 0xd6, 0x03, 0xd7, 0x03, 0xd9, 0x03, 0xda, 0x06, 0xdc, 0x03, 0xdd, 0x03, 0xdf, 0x12, 0xe0, 0x03, 0xdf, 0x03, 0xdd, 0x06, 0xdc, 0x03, 0xda, 0x03, 0xd9, 0x03, 0xd7, 0x03, 0xd6, 0x03, 0xd4, 0x03, 0xd1, 0x03, 0xcf, 0x09, 0xcd, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x8d, 0xcf, 0x06, 0xce, 0x03, 0xd1, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xe4, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xed, 0xe0, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0xd1, 0x03, 0xce, 0x87, 0xcf, 0x06, 0xd0, 0x03, 0xcf, 0x06, 0xce, 0x06, 0xcd, 0x03, 0xcb, 0x06, 0xcd, 0x03, 0xce, 0x06, 0xcf, 0x0c, 0xd0, 0x06, 0xd1, 0x12, 0xd3, 0x06, 0xd1, 0x0c, 0xd0, 0x06, 0xcf, 0x03, 0xce, 0x06, 0xcd, 0x03, 0xcb, 0x06, 0xcd, 0x06, 0xce, 0x03, 0xcf, 0x06, 0xd0, 0x87, 0xcf, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0xdd, 0xff, 0xe0, 0xe7, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf0, 0xe0, 0x03, 0xdd, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xce, 0x93, 0xcf, 0x03, 0xd0, 0x09, 0xcf, 0x06, 0xce, 0x06, 0xcd, 0x09, 0xcb, 0x24, 0xca, 0x09, 0xcb, 0x06, 0xcd, 0x06, 0xce, 0x09, 0xcf, 0x03, 0xd0, 0x93, 0xcf, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdd, 0xff, 0xe0, 0xea, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf0, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xd5, 0x03, 0xcf, 0x03, 0xce, 0xff, 0xcf, 0x87, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xdc, 0x03, 0xdf, 0xff, 0xe0, 0xea, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf3, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd5, 0x06, 0xce, 0xff, 0xcf, 0x81, 0xcf, 0x06, 0xce, 0x03, 0xd5, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xed, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf6, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd4, 0x03, 0xcf, 0x06, 0xce, 0xff, 0xcf, 0x75, 0xcf, 0x06, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xf0, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xf9, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xce, 0xff, 0xcf, 0x75, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xf3, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xfc, 0xe0, 0x03, 0xdd, 0x03, 0xda, 0x03, 0xd4, 0x06, 0xce, 0xff, 0xcf, 0x6f, 0xcf, 0x06, 0xce, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xdd, 0xff, 0xe0, 0xf6, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd3, 0x03, 0xcf, 0x03, 0xce, 0xff, 0xcf, 0x69, 0xcf, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xf9, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x03, 0xe0, 0x03, 0xdf, 0x03, 0xda, 0x03, 0xd4, 0x03, 0xcf, 0x06, 0xce, 0xff, 0xcf, 0x5d, 0xcf, 0x06, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xdf, 0xff, 0xe0, 0xfc, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x06, 0xe0, 0x03, 0xdd, 0x03, 0xda, 0x03, 0xd4, 0x09, 0xce, 0xff, 0xcf, 0x57, 0xcf, 0x09, 0xce, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xdd, 0xff, 0xe0, 0xff, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x09, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd4, 0x03, 0xcf, 0x03, 0xcd, 0x03, 0xce, 0xff, 0xcf, 0x51, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x03, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x0c, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd5, 0x03, 0xd0, 0x09, 0xce, 0xff, 0xcf, 0x45, 0xcf, 0x09, 0xce, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x06, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x0f, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xd7, 0x03, 0xd0, 0x03, 0xcd, 0x06, 0xce, 0xff, 0xcf, 0x3f, 0xcf, 0x06, 0xce, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xd7, 0x03, 0xdc, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x09, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x12, 0xe0, 0x03, 0xdf, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0xd1, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0xff, 0xcf, 0x39, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xd9, 0x03, 0xdd, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x0c, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x18, 0xe0, 0x03, 0xdd, 0x03, 0xd9, 0x03, 0xd3, 0x03, 0xcf, 0x06, 0xcd, 0x03, 0xce, 0xff, 0xcf, 0x2d, 0xcf, 0x03, 0xce, 0x06, 0xcd, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xd9, 0x03, 0xdd, 0xff, 0xe0, 0xff, 0xe0, 0x12, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x1b, 0xe0, 0x03, 0xdf, 0x03, 0xdb, 0x03, 0xd6, 0x03, 0xd0, 0x06, 0xcd, 0x03, 0xce, 0xff, 0xcf, 0x27, 0xcf, 0x03, 0xce, 0x06, 0xcd, 0x03, 0xd0, 0x03, 0xd6, 0x03, 0xdb, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x15, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x21, 0xe0, 0x03, 0xdc, 0x03, 0xd7, 0x03, 0xd1, 0x03, 0xcf, 0x09, 0xce, 0xff, 0xcf, 0x1b, 0xcf, 0x09, 0xce, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0xdc, 0xff, 0xe0, 0xff, 0xe0, 0x1b, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xb7, 0xe0, 0x06, 0x52, 0x03, 0xbd, 0x5d, 0xe0, 0x03, 0x6e, 0x03, 0xad, 0x21, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xcf, 0x21, 0x9c, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0xad, 0x1b, 0xe0, 0x03, 0xcf, 0x06, 0x9c, 0x03, 0x60, 0x03, 0x38, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x6e, 0x1e, 0xe0, 0x03, 0x60, 0x39, 0xe0, 0x03, 0xdd, 0x03, 0xda, 0x03, 0xd5, 0x03, 0x40, 0x03, 0x72, 0x06, 0xce, 0x0f, 0xcf, 0x03, 0xbe, 0x21, 0x91, 0x0f, 0xcf, 0x03, 0x91, 0x03, 0xbe, 0x0c, 0xcf, 0x03, 0xbe, 0x03, 0x66, 0x0f, 0xcf, 0x06, 0x91, 0x1e, 0xcf, 0x06, 0xaf, 0x2a, 0xcf, 0x03, 0x74, 0x03, 0xaf, 0x24, 0xcf, 0x03, 0x59, 0x2a, 0xcf, 0x06, 0xce, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd5, 0x03, 0xda, 0x03, 0xdd, 0x7e, 0xe0, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x8c, 0x03, 0x9c, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x6e, 0x03, 0xad, 0x48, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x15, 0xe0, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x7c, 0x06, 0xad, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x52, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x7c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0x52, 0x0f, 0xe0, 0x03, 0xad, 0x09, 0x9c, 0x03, 0xad, 0x03, 0xe0, 0x0c, 0x9c, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0xad, 0x03, 0xe0, 0x12, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x69, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x78, 0xe0, 0x03, 0xcf, 0x24, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x7c, 0x03, 0xbd, 0x03, 0x60, 0x5a, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0xcf, 0x21, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0xbd, 0x0f, 0x60, 0x03, 0x16, 0x0f, 0x60, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x09, 0x2c, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x60, 0x06, 0x45, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x1e, 0xe0, 0x03, 00, 0x18, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x09, 0x2c, 0x03, 0x20, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0xb6, 0x03, 0x41, 0x03, 0x15, 0x03, 0x8f, 0x03, 0xce, 0x03, 0x66, 0x03, 0x9f, 0x06, 0xcf, 0x03, 0xaf, 0x09, 0x59, 0x03, 0x40, 0x03, 0x29, 0x03, 0x59, 0x03, 0x29, 0x03, 0x40, 0x09, 0x59, 0x0f, 0xcf, 0x03, 00, 0x0c, 0xcf, 0x03, 0x9f, 0x03, 0x40, 0x03, 0x4c, 0x03, 0x82, 0x0c, 0xcf, 0x06, 0x59, 0x06, 0xcf, 0x03, 0xbe, 0x03, 0x91, 0x03, 0xaf, 0x0f, 0xcf, 0x03, 0x4c, 0x03, 0x82, 0x03, 0xcf, 0x03, 0x74, 0x03, 0x59, 0x03, 0x91, 0x1e, 0xcf, 0x03, 0x29, 0x03, 0x91, 0x24, 0xcf, 0x03, 00, 0x24, 0xcf, 0x06, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xd1, 0x03, 0xd7, 0x03, 0xdc, 0x03, 0xdf, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x03, 0x9c, 0x03, 0x45, 0x03, 0x38, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xad, 0x18, 0x9c, 0x03, 0xcf, 0x3c, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0xcf, 0x18, 0xe0, 0x03, 0xcf, 0x2d, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x7c, 0x06, 0x60, 0x03, 0x9c, 0x03, 0x38, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x20, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x38, 0x03, 0x20, 0x03, 0x45, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x2c, 0x09, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x16, 0x09, 0x60, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x16, 0x06, 0x60, 0x03, 0x16, 0x06, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x20, 0x03, 0x9c, 0x69, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x2c, 0x15, 00, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x03, 0xcf, 0x1e, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xbd, 0x06, 0x52, 0x03, 0xbd, 0x5a, 0xe0, 0x03, 0x52, 0x09, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x16, 0x09, 0xe0, 0x03, 0xad, 0x09, 0x2c, 0x03, 0x0c, 0x06, 0x2c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xad, 0x18, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xad, 0x06, 0x2c, 0x03, 0x20, 0x03, 0x16, 0x06, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x7c, 0x09, 0x60, 0x03, 0x38, 0x03, 0x2c, 0x03, 00, 0x09, 0x2c, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xad, 0x03, 0xe0, 0x06, 0x45, 0x18, 0xe0, 0x03, 0xdd, 0x03, 0xda, 0x03, 0xb3, 0x03, 0x29, 0x03, 0xbd, 0x03, 0x1e, 0x03, 0xbe, 0x09, 0xcf, 0x03, 0xbe, 0x06, 0x91, 0x03, 0x66, 0x03, 0x40, 0x03, 0x91, 0x03, 0x40, 0x03, 0x66, 0x06, 0x91, 0x0f, 0xcf, 0x03, 0xbe, 0x03, 0x1f, 0x0c, 0xcf, 0x03, 0xbe, 0x03, 0x59, 0x03, 0x74, 0x03, 0x91, 0x03, 0xcf, 0x03, 0x91, 0x06, 0x29, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x29, 0x03, 0x59, 0x03, 0xaf, 0x0f, 0xcf, 0x03, 0x29, 0x03, 0x9f, 0x03, 0x29, 0x03, 0x59, 0x03, 0x91, 0x03, 0x1f, 0x03, 0x74, 0x0c, 0xcf, 0x03, 0x9f, 0x03, 0x34, 0x09, 0x59, 0x03, 0x15, 0x03, 0x40, 0x03, 0x34, 0x03, 0x29, 0x03, 0x1f, 0x09, 0xcf, 0x03, 0x74, 0x09, 0x59, 0x03, 0x34, 0x03, 0x29, 0x03, 00, 0x09, 0x29, 0x03, 0x4c, 0x18, 0xcf, 0x06, 0xcd, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xda, 0x03, 0xdd, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x38, 0x03, 0x6e, 0x03, 0xad, 0x03, 0x6e, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x12, 0x60, 0x03, 0x16, 0x03, 0x8c, 0x3c, 0xe0, 0x03, 00, 0x09, 0xe0, 0x03, 0x8c, 0x12, 0xe0, 0x03, 0x52, 0x09, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x16, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0xad, 0x09, 0x2c, 0x03, 0x0c, 0x06, 0x2c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x45, 0x06, 0x60, 0x03, 0xad, 0x03, 0x8c, 0x03, 0x38, 0x09, 0x9c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x0f, 0x2c, 0x03, 0xe0, 0x03, 0x0c, 0x0c, 0x2c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x45, 0x03, 0x6e, 0x03, 0xbd, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x0c, 0x03, 0xcf, 0x69, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x66, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x60, 0x03, 0x0c, 0x03, 0x9c, 0x5d, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x6e, 0x03, 0x45, 0x12, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x1b, 0xe0, 0x03, 0x0c, 0x09, 0x2c, 0x03, 0x0c, 0x09, 0x2c, 0x03, 0x0c, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0x6e, 0x03, 0x45, 0x03, 0x9c, 0x03, 0xad, 0x03, 0x52, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 00, 0x06, 0xe0, 0x0f, 0x9c, 0x03, 0xbd, 0x03, 00, 0x09, 0xe0, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0x45, 0x03, 0x52, 0x1b, 0xe0, 0x03, 0x2c, 0x03, 0xdd, 0x03, 0xdb, 0x03, 0xd7, 0x03, 0x68, 0x03, 0x59, 0x03, 0xcb, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x03, 0x91, 0x03, 0x29, 0x03, 0x59, 0x03, 0x40, 0x03, 0x29, 0x03, 0x59, 0x03, 0x29, 0x03, 0x40, 0x03, 0x59, 0x03, 0x15, 0x09, 0xcf, 0x03, 0xbe, 0x03, 0x91, 0x03, 0x59, 0x03, 0x15, 0x03, 0x59, 0x03, 0x74, 0x03, 0xcf, 0x03, 0xbe, 0x03, 0x15, 0x03, 0x91, 0x12, 0xcf, 0x03, 0x59, 0x03, 0x91, 0x06, 0xcf, 0x03, 0x4c, 0x15, 0xcf, 0x03, 0x29, 0x03, 0x59, 0x03, 0x74, 0x06, 0xcf, 0x03, 0xaf, 0x03, 0x15, 0x0f, 0xcf, 0x03, 0x9f, 0x09, 0x91, 0x03, 0x1f, 0x03, 0x66, 0x03, 0xaf, 0x0f, 0xcf, 0x0f, 0x91, 0x03, 0xaf, 0x03, 00, 0x09, 0xcf, 0x03, 0xbe, 0x0f, 0xcf, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xcb, 0x03, 0xce, 0x03, 0xd3, 0x03, 0xd7, 0x03, 0xdb, 0x03, 0xdd, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x45, 0x09, 0xe0, 0x03, 0x16, 0x03, 0xcf, 0x03, 0x6e, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x12, 0xe0, 0x03, 00, 0x03, 0xcf, 0x2d, 0xe0, 0x03, 0x52, 0x0c, 0x2c, 0x03, 00, 0x0c, 0x2c, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x6e, 0x03, 0x45, 0x0f, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x15, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0x03, 0x52, 0x03, 00, 0x03, 0x0c, 0x03, 0x60, 0x03, 0x7c, 0x03, 0x45, 0x03, 0x52, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0x60, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x20, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x45, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x9c, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x6c, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x66, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0x8c, 0x03, 0x45, 0x03, 0x16, 0x03, 0x60, 0x03, 0xcf, 0x5d, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0xad, 0x09, 0xe0, 0x03, 0x16, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x52, 0x03, 0x6e, 0x03, 0x60, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x06, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x60, 0x03, 0x9c, 0x09, 0x2c, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0xe0, 0x03, 00, 0x0f, 0xe0, 0x03, 0x7c, 0x03, 0x2c, 0x03, 0x60, 0x03, 00, 0x1b, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x7c, 0x18, 0xe0, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbb, 0x03, 0x1f, 0x03, 0xb4, 0x03, 0x92, 0x03, 0xce, 0x06, 0xcd, 0x03, 0x8f, 0x03, 0x59, 0x03, 0xcf, 0x03, 0x91, 0x03, 0x59, 0x03, 0xcf, 0x03, 0x59, 0x03, 0x91, 0x03, 0xcf, 0x03, 0x29, 0x06, 0xcf, 0x03, 0xbe, 0x03, 0x29, 0x03, 0x59, 0x03, 0x29, 0x03, 0x4c, 0x03, 0x91, 0x03, 0x4c, 0x03, 0x40, 0x03, 0xcf, 0x03, 0x9f, 0x03, 0x1f, 0x12, 0xcf, 0x03, 0x59, 0x03, 0x66, 0x06, 0x59, 0x03, 0x1f, 0x15, 0xcf, 0x03, 0x0c, 0x03, 0x40, 0x0c, 0xcf, 0x03, 0x1f, 0x03, 0x9f, 0x0c, 0xcf, 0x03, 0xaf, 0x09, 0xcf, 0x03, 0x29, 0x09, 0x91, 0x03, 0x74, 0x12, 0xcf, 0x03, 0x74, 0x03, 0x29, 0x03, 0x59, 0x03, 00, 0x12, 0xcf, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xda, 0x03, 0xdd, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x6e, 0x09, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x3f, 0xe0, 0x03, 00, 0x18, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0xad, 0x09, 0xe0, 0x03, 0x16, 0x03, 0xad, 0x12, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x52, 0x03, 0x6e, 0x03, 0x60, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xad, 0x03, 00, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x9c, 0x03, 0xad, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x45, 0x06, 0xe0, 0x0c, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x0f, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x16, 0x06, 0x60, 0x03, 0x16, 0x06, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x6c, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x66, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x21, 0xe0, 0x03, 0x8c, 0x03, 0x20, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x45, 0x03, 0x8c, 0x1b, 0xe0, 0x03, 0x52, 0x0c, 0x2c, 0x03, 0x20, 0x0f, 0xe0, 0x03, 0x7c, 0x1b, 0x60, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x06, 0xad, 0x03, 0x16, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x38, 0x03, 0x8c, 0x03, 0x7c, 0x03, 0x38, 0x03, 0x8c, 0x03, 0x45, 0x03, 0x16, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x0c, 0x09, 0x2c, 0x03, 0x0c, 0x09, 0x2c, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0x9c, 0x03, 0x45, 0x03, 0x6e, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x16, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x9c, 0x03, 00, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x38, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x38, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x03, 0xcb, 0x03, 0x1f, 0x03, 0xc4, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0x8f, 0x03, 0x15, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x15, 0x03, 0x29, 0x03, 0x15, 0x03, 0x1f, 0x03, 0x29, 0x03, 0x0c, 0x0f, 0xcf, 0x03, 0x29, 0x03, 0xaf, 0x06, 0xcf, 0x03, 00, 0x06, 0xcf, 0x03, 0x40, 0x03, 0x74, 0x0c, 0xcf, 0x03, 0x59, 0x03, 0x0c, 0x03, 0x40, 0x03, 0x91, 0x03, 0x34, 0x03, 0x40, 0x03, 0x1f, 0x03, 0x74, 0x0f, 0xcf, 0x03, 0x0c, 0x03, 0x82, 0x0c, 0xcf, 0x03, 0x29, 0x03, 0x91, 0x0c, 0xcf, 0x03, 0x34, 0x09, 0x29, 0x03, 0x15, 0x03, 0x1f, 0x06, 0x59, 0x03, 0x91, 0x0f, 0xcf, 0x03, 0x91, 0x03, 0x40, 0x03, 0xcf, 0x03, 0x91, 0x03, 00, 0x0f, 0xcf, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xd9, 0x03, 0xdc, 0x03, 0xdf, 0x15, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x6e, 0x0f, 0xe0, 0x03, 0x52, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x7c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0x7c, 0x1b, 0x60, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x16, 0x03, 0xcf, 0x03, 0xe0, 0x03, 00, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x06, 0xad, 0x03, 0x16, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x2c, 0x24, 0xe0, 0x03, 0x8c, 0x03, 0x20, 0x0c, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x38, 0x03, 0x8c, 0x03, 0x7c, 0x03, 0x38, 0x03, 0x8c, 0x03, 0x45, 0x03, 0x16, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0xad, 0x03, 0x7c, 0x06, 0xad, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x6e, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x0c, 0x03, 0xbd, 0x03, 0x52, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x0f, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x8c, 0x03, 0xad, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x6c, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x66, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x1e, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x30, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x7c, 0x1b, 0x60, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0x16, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x6e, 0x03, 00, 0x03, 0x52, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x8c, 0x03, 0x60, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 00, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x7c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x2c, 0x09, 0xe0, 0x06, 0x6e, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x7c, 0x03, 0x5f, 0x03, 0xd9, 0x03, 0xd4, 0x03, 0xd0, 0x03, 0xce, 0x06, 0xcd, 0x03, 0x29, 0x03, 0xae, 0x18, 0xcf, 0x03, 0xbe, 0x03, 0x15, 0x09, 0xcf, 0x03, 00, 0x06, 0xcf, 0x03, 0xaf, 0x03, 0x15, 0x09, 0xcf, 0x03, 0x40, 0x03, 0x66, 0x03, 0x59, 0x03, 0x91, 0x03, 0xbe, 0x03, 0x1f, 0x03, 0xcf, 0x03, 0xbe, 0x03, 0x15, 0x03, 0x9f, 0x0c, 0xcf, 0x03, 00, 0x03, 0xbe, 0x0c, 0xcf, 0x03, 0x1f, 0x03, 0x91, 0x18, 0xcf, 0x03, 0x59, 0x03, 0x91, 0x18, 0xcf, 0x03, 0x59, 0x03, 0x91, 0x03, 0xcf, 0x03, 0xbe, 0x03, 00, 0x03, 0x9f, 0x03, 0xcf, 0x03, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xd9, 0x03, 0xdc, 0x03, 0xdf, 0x18, 0xe0, 0x03, 0xbd, 0x03, 0x16, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x24, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x7c, 0x1b, 0x60, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x06, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0x16, 0x03, 0xad, 0x21, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x6e, 0x03, 00, 0x03, 0x52, 0x03, 0xad, 0x0c, 0xe0, 0x06, 0xcf, 0x03, 0x20, 0x06, 0xe0, 0x03, 0xad, 0x03, 0xcf, 0x03, 0x38, 0x03, 0xcf, 0x03, 0x16, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x45, 0x03, 0x52, 0x03, 0x2c, 0x03, 0x38, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x38, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x09, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x9c, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x12, 0xe0, 0x03, 0x45, 0x03, 0xbd, 0x54, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x66, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x30, 0xe0, 0x03, 0x8c, 0x03, 0x60, 0x48, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x03, 0x6e, 0x03, 0x20, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0x20, 0x09, 0x9c, 0x03, 0x20, 0x09, 0x9c, 0x03, 0x20, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x7c, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x16, 0x03, 0x2c, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x03, 0x9c, 0x03, 0x45, 0x03, 0x16, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0x7c, 0x03, 0x45, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0xdf, 0x03, 0xa9, 0x06, 0x2a, 0x03, 0x29, 0x03, 00, 0x09, 0x29, 0x03, 0x0c, 0x09, 0x29, 0x09, 0xcf, 0x06, 0x59, 0x09, 0xcf, 0x03, 0x0c, 0x09, 0xcf, 0x03, 0x66, 0x03, 0xbe, 0x03, 0xcf, 0x03, 0x9f, 0x03, 0x34, 0x03, 0xcf, 0x03, 0x59, 0x03, 0x91, 0x03, 0x40, 0x03, 0x82, 0x06, 0xcf, 0x06, 0x59, 0x0c, 0xcf, 0x03, 0x29, 0x0c, 0xcf, 0x03, 0xbe, 0x03, 0x0c, 0x12, 0xcf, 0x03, 0x9f, 0x06, 0x59, 0x03, 0x29, 0x03, 0x74, 0x18, 0xcf, 0x03, 0x91, 0x03, 0x29, 0x03, 0x91, 0x03, 0x40, 0x03, 0x15, 0x03, 0x90, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xd9, 0x03, 0xdc, 0x03, 0xdf, 0x1e, 0xe0, 0x03, 0x45, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0x6e, 0x03, 0x2c, 0x36, 0xe0, 0x03, 0x45, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0x8c, 0x03, 0x45, 0x1b, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x21, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x03, 0x6e, 0x03, 0x20, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x38, 0x06, 0x60, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x8c, 0x03, 0x60, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x9c, 0x06, 0x2c, 0x03, 0x9c, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x09, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x9c, 0x06, 0x38, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x0c, 0x06, 0x2c, 0x03, 0x0c, 0x09, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x57, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x66, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x0c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x30, 0xe0, 0x03, 0x60, 0x03, 0x6e, 0x45, 0xe0, 0x06, 0x45, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x16, 0x15, 0x60, 0x03, 0x16, 0x03, 0x60, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x52, 0x03, 0x60, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0xbd, 0x03, 0xe0, 0x06, 0x9c, 0x03, 0x6e, 0x03, 0x45, 0x03, 0x9c, 0x03, 0xad, 0x03, 0xcf, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x9c, 0x03, 0x16, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0xe0, 0x06, 0x2c, 0x03, 0x38, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x0c, 0x03, 0x45, 0x09, 0xe0, 0x03, 0x38, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xdf, 0x03, 0xdc, 0x03, 0x42, 0x03, 0x69, 0x03, 0xd3, 0x03, 0xcf, 0x03, 0x9e, 0x03, 0x29, 0x03, 0xcd, 0x03, 0xce, 0x0c, 0xcf, 0x03, 0x15, 0x03, 0xaf, 0x06, 0xcf, 0x03, 0x91, 0x03, 0x29, 0x12, 0xcf, 0x03, 0x59, 0x03, 0x91, 0x03, 0xcf, 0x03, 0x59, 0x06, 0x40, 0x09, 0xcf, 0x03, 0x34, 0x03, 0x74, 0x1b, 0xcf, 0x03, 0x4c, 0x03, 0x40, 0x0f, 0xcf, 0x03, 0x40, 0x03, 0x34, 0x03, 0x82, 0x03, 0x91, 0x03, 0x34, 0x03, 0x29, 0x03, 0x1f, 0x03, 0x91, 0x15, 0xcf, 0x03, 0x91, 0x03, 0x59, 0x03, 0x90, 0x03, 0x15, 0x03, 0xbd, 0x03, 0xcd, 0x03, 0xcf, 0x03, 0xd3, 0x03, 0xd5, 0x03, 0xd9, 0x03, 0x51, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x03, 0xcf, 0x18, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xbd, 0x33, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x09, 0xe0, 0x03, 0x16, 0x03, 0xad, 0x15, 0xe0, 0x06, 0x45, 0x1e, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x0c, 0x03, 0xad, 0x15, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0x8c, 0x03, 00, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x8c, 0x03, 0x9c, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x20, 0x09, 0x9c, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 00, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x52, 0x03, 0x60, 0x57, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x2c, 0x15, 00, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x7c, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x60, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0xad, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0x8c, 0x1b, 0xe0, 0x03, 0x38, 0x03, 0x9c, 0x3f, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x45, 0x1b, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x06, 0xe0, 0x03, 0xad, 0x15, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x20, 0x03, 0x60, 0x03, 0x20, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x52, 0x09, 0x2c, 0x03, 0x60, 0x06, 0xe0, 0x06, 0x60, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x60, 0x06, 0x7c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x38, 0x15, 0xe0, 0x03, 0x45, 0x03, 0x60, 0x18, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xad, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x20, 0x0c, 0xe0, 0x03, 0x0c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x7c, 0x03, 0x44, 0x03, 0x2a, 0x03, 0x4e, 0x03, 0x15, 0x03, 0xbe, 0x0f, 0xce, 0x03, 0x73, 0x03, 0x40, 0x06, 0xbe, 0x03, 0xcf, 0x06, 0x4c, 0x12, 0xcf, 0x06, 0x66, 0x03, 0xbe, 0x03, 0x34, 0x03, 0x40, 0x09, 0xcf, 0x03, 0x66, 0x03, 0x1f, 0x1b, 0xcf, 0x03, 0x66, 0x03, 0x1f, 0x03, 0xbe, 0x0f, 0xcf, 0x03, 00, 0x09, 0xcf, 0x03, 0x4c, 0x03, 0x91, 0x03, 0x9f, 0x03, 0x29, 0x03, 0x34, 0x03, 0xbe, 0x09, 0xcf, 0x0c, 0xce, 0x03, 0x40, 0x03, 0x59, 0x03, 0xcf, 0x03, 0xd0, 0x03, 0xd5, 0x03, 0xda, 0x03, 0xdd, 0x03, 0xad, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x6e, 0x18, 0xe0, 0x03, 0xbd, 0x03, 0x16, 0x03, 0x8c, 0x15, 0xe0, 0x03, 0x45, 0x03, 0x38, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x16, 0x03, 0xad, 0x33, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x8c, 0x09, 0xe0, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x45, 0x15, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x7c, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x60, 0x03, 0xcf, 0x18, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x06, 0xe0, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xbd, 0x03, 0x52, 0x03, 0x2c, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x2c, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x16, 0x09, 0x60, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x38, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x60, 0x03, 0x20, 0x03, 0xcf, 0x57, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x66, 0xe0, 0x03, 0xad, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0x16, 0x03, 0x45, 0x03, 0x8c, 0x21, 0xe0, 0x03, 0x8c, 0x03, 0x38, 0x0c, 0x2c, 0x03, 0x52, 0x03, 0x7c, 0x0c, 0xe0, 0x0f, 0x2c, 0x03, 0x0c, 0x06, 0x2c, 0x03, 0x52, 0x33, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x8c, 0x21, 0xe0, 0x03, 0x52, 0x03, 0x20, 0x09, 0x60, 0x03, 0x38, 0x03, 0x20, 0x0c, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x6e, 0x12, 0x9c, 0x03, 0x8c, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x45, 0x03, 0x20, 0x03, 0x38, 0x03, 0x52, 0x03, 0x16, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x8c, 0x03, 0x20, 0x03, 0x52, 0x1e, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x60, 0x06, 0x16, 0x03, 0x45, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0x03, 0x20, 0x03, 0x9c, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0x6e, 0x03, 0x2b, 0x03, 0x51, 0x03, 0x5e, 0x03, 0x1f, 0x03, 0x4e, 0x03, 0xb0, 0x03, 0xcd, 0x03, 0xca, 0x03, 0xbc, 0x03, 0x0c, 0x03, 0xad, 0x03, 0x9f, 0x03, 0x0c, 0x03, 0x29, 0x03, 0x0c, 0x03, 0xaf, 0x12, 0xcf, 0x03, 0xbe, 0x03, 0x40, 0x03, 0x34, 0x03, 0x74, 0x03, 0xcf, 0x03, 0x82, 0x03, 0x40, 0x03, 0x1f, 0x03, 0x4c, 0x03, 0xbe, 0x12, 0xcf, 0x03, 0x91, 0x03, 0x4c, 0x03, 0x15, 0x03, 0x40, 0x03, 0xbe, 0x12, 0xcf, 0x03, 0x59, 0x03, 0x1f, 0x03, 0x59, 0x03, 0x4c, 0x03, 0x1f, 0x03, 0xbe, 0x06, 0xcf, 0x03, 0x4c, 0x03, 0xbe, 0x06, 0xcf, 0x06, 0xcd, 0x03, 0xcb, 0x03, 0x7e, 0x03, 0x1e, 0x03, 0x4c, 0x03, 0xd5, 0x03, 0xd9, 0x03, 0xdb, 0x03, 0xdd, 0x03, 0xdf, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0x52, 0x03, 0x7c, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x6e, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x60, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x60, 0x03, 0xcf, 0x39, 0xe0, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 00, 0x0c, 0xe0, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x8c, 0x1b, 0xe0, 0x03, 0x16, 0x03, 0x45, 0x03, 0x8c, 0x24, 0xe0, 0x03, 0x52, 0x03, 0x20, 0x09, 0x60, 0x03, 0x38, 0x03, 0x20, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x38, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x45, 0x03, 0x52, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0x03, 0x20, 0x03, 0x9c, 0x0f, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x20, 0x0c, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x45, 0x03, 0xcf, 0x5a, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0x15, 0xe0, 0x03, 0xcf, 0x03, 0x60, 0x03, 0xcf, 0x27, 0xe0, 0x03, 0xcf, 0x09, 0x9c, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0xad, 0x06, 0x2c, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x12, 0x60, 0x03, 0x9c, 0x03, 0xbd, 0x03, 0x60, 0x06, 0x9c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0x8c, 0x03, 0x9c, 0x03, 0x2c, 0x03, 0x38, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0x6e, 0x03, 0x9c, 0x24, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x6e, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x09, 0x2c, 0x03, 0x45, 0x03, 0xad, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x7c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xdf, 0x03, 0xdd, 0x03, 0x9a, 0x03, 0x2a, 0x03, 0x6a, 0x03, 0xd4, 0x03, 0xd1, 0x03, 0x9f, 0x03, 0xce, 0x03, 0xcd, 0x03, 0xcb, 0x03, 0x8f, 0x03, 0xad, 0x06, 0xce, 0x1e, 0xcf, 0x03, 0xbe, 0x03, 0x90, 0x1b, 0xce, 0x03, 0xae, 0x03, 0x66, 0x03, 0xae, 0x09, 0xce, 0x12, 0xcf, 0x03, 0xbe, 0x06, 0x91, 0x06, 0xcf, 0x06, 0xce, 0x06, 0xcd, 0x03, 0xcb, 0x03, 0xcd, 0x03, 0xce, 0x03, 0xcf, 0x03, 0xd1, 0x03, 0x68, 0x03, 0x96, 0x03, 0xda, 0x03, 0xdc, 0x03, 0xdd, 0x03, 0xdf, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x2c, 0x03, 0x60, 0x51, 0xe0, 0x03, 0xcf, 0x4e, 0xe0, 0x03, 0x60, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x60, 0x03, 0xcf, 0x4e, 0xe0, 0x03, 0xcf, 0x09, 0x9c, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x52, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x7c, 0x03, 0x16, 0x03, 0x7c, 0x03, 0xcf, 0x12, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 0x2c, 0x1b, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x60, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x69, 0xe0, 0x03, 0xdf, 0x03, 0xdd, 0x03, 0xda, 0x03, 0xd7, 0x03, 0xd4, 0x03, 0xcf, 0x90, 0xce, 0x03, 0xcf, 0x03, 0xd4, 0x03, 0xd7, 0x03, 0xda, 0x03, 0xdd, 0x03, 0xdf, 0xff, 0xe0, 0x60, 0xe0, 0x03, 0xcf, 0xff, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x6c, 0xe0, 0x06, 0xdf, 0x03, 0xdd, 0x03, 0xdc, 0x03, 0xdb, 0x03, 0xd9, 0x03, 0xd6, 0x03, 0xd4, 0x03, 0xd1, 0x03, 0xd0, 0x03, 0xcd, 0x03, 0xcb, 0x03, 0xca, 0x09, 0xcb, 0x0c, 0xcd, 0x09, 0xce, 0x24, 0xcf, 0x09, 0xce, 0x0c, 0xcd, 0x09, 0xcb, 0x03, 0xca, 0x03, 0xcb, 0x03, 0xcd, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0xd4, 0x03, 0xd6, 0x03, 0xd9, 0x03, 0xdb, 0x03, 0xdc, 0x03, 0xdd, 0x06, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x66, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x78, 0xe0, 0x06, 0xdf, 0x03, 0xdd, 0x03, 0xdb, 0x03, 0xda, 0x03, 0xd7, 0x03, 0xd5, 0x03, 0xd3, 0x03, 0xd1, 0x06, 0xd0, 0x06, 0xcf, 0x09, 0xce, 0x09, 0xcd, 0x03, 0xce, 0x1e, 0xcd, 0x03, 0xce, 0x09, 0xcd, 0x09, 0xce, 0x06, 0xcf, 0x06, 0xd0, 0x03, 0xd1, 0x03, 0xd3, 0x03, 0xd5, 0x03, 0xd7, 0x03, 0xda, 0x03, 0xdb, 0x03, 0xdd, 0x06, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x72, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0x84, 0xe0, 0x03, 0xdf, 0x06, 0xdd, 0x03, 0xdc, 0x03, 0xdb, 0x03, 0xda, 0x03, 0xd9, 0x03, 0xd7, 0x03, 0xd6, 0x03, 0xd5, 0x03, 0xd4, 0x03, 0xd3, 0x03, 0xd1, 0x03, 0xd0, 0x06, 0xcf, 0x03, 0xce, 0x09, 0xcd, 0x06, 0xcb, 0x09, 0xcd, 0x03, 0xce, 0x06, 0xcf, 0x03, 0xd0, 0x03, 0xd1, 0x03, 0xd3, 0x03, 0xd4, 0x03, 0xd5, 0x03, 0xd6, 0x03, 0xd7, 0x03, 0xd9, 0x03, 0xda, 0x03, 0xdb, 0x03, 0xdc, 0x06, 0xdd, 0x03, 0xdf, 0xff, 0xe0, 0xff, 0xe0, 0x7e, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x9c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x52, 0x12, 0xe0, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0xad, 0x3c, 0xe0, 0x03, 0x7c, 0x03, 0xad, 0xbd, 0xe0, 0x03, 0x9c, 0x2a, 0xe0, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x8c, 0x03, 0x9c, 0x03, 0x38, 0x12, 0xe0, 0x03, 0x6e, 0x03, 0xad, 0x48, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0x18, 0xe0, 0x03, 0x7c, 0x03, 0xad, 0x03, 0xe0, 0x12, 0x9c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x15, 0xe0, 0x06, 0xdf, 0x03, 0xdd, 0x03, 0xdc, 0x03, 0xdb, 0x03, 0xda, 0x03, 0xd9, 0x03, 0xd7, 0x03, 0xd6, 0x03, 0xd5, 0x03, 0xd4, 0x03, 0xd3, 0x06, 0xd1, 0x0c, 0xd0, 0x06, 0xd1, 0x03, 0xd3, 0x03, 0xc3, 0x03, 0xa4, 0x03, 0xd6, 0x03, 0xd7, 0x03, 0xd9, 0x03, 0xda, 0x03, 0xdb, 0x03, 0xdc, 0x03, 0xdd, 0x03, 0xdf, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x52, 0x18, 0xe0, 0x03, 0x6e, 0xff, 0xe0, 0xff, 0xe0, 0x60, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x45, 0x06, 0xbd, 0x09, 0x60, 0x03, 0x16, 0x09, 0x60, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x09, 0x2c, 0x03, 0x20, 0x03, 0xad, 0x15, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x45, 0xe0, 0x03, 00, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x15, 0xe0, 0x03, 00, 0x4b, 0xe0, 0x03, 00, 0x2a, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x2c, 0x03, 0x60, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0xcf, 0x18, 0xe0, 0x03, 0xcf, 0x2d, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0xcf, 0x0f, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x16, 0x06, 0x60, 0x03, 0x16, 0x06, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x20, 0x03, 0x9c, 0x39, 0xe0, 0x03, 0xbd, 0x15, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x45, 0x15, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x38, 0x03, 0x60, 0x03, 0x6e, 0x15, 0xe0, 0x03, 0x38, 0x03, 0x8c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0xad, 0xff, 0xe0, 0xff, 0xe0, 0x45, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x52, 0x03, 0x9c, 0x03, 0x52, 0x09, 0x9c, 0x03, 0x20, 0x09, 0x9c, 0x09, 0xe0, 0x03, 0x38, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xad, 0x03, 0xe0, 0x06, 0x45, 0x18, 0xe0, 0x03, 0x16, 0x0c, 0xe0, 0x03, 0x8c, 0x39, 0xe0, 0x03, 00, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x15, 0xe0, 0x03, 00, 0x4b, 0xe0, 0x03, 00, 0x2a, 0xe0, 0x03, 00, 0x09, 0xe0, 0x03, 0x8c, 0x12, 0xe0, 0x03, 0x52, 0x09, 00, 0x03, 0x2c, 0x03, 0x20, 0x03, 0x16, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x45, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0xad, 0x09, 0x2c, 0x03, 0x0c, 0x06, 0x2c, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xad, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x45, 0x03, 0x6e, 0x03, 0xbd, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x0c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x09, 0x2c, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x16, 0x03, 00, 0x03, 0x16, 0x03, 0x7c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x45, 0x18, 0xe0, 0x03, 0x9c, 0x03, 0x38, 0x03, 0x9c, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xbd, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0x38, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x20, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0xad, 0xff, 0xe0, 0xff, 0xe0, 0x30, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x03, 0x38, 0x03, 0xcf, 0x03, 0x7c, 0x06, 0x60, 0x03, 0x16, 0x06, 0x60, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x09, 0x9c, 0x03, 0x6e, 0x03, 0x16, 0x03, 0x52, 0x03, 0x45, 0x15, 0xe0, 0x03, 0x45, 0x03, 0x52, 0x12, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x52, 0x03, 00, 0x03, 0x2c, 0x03, 0x38, 0x03, 0x9c, 0x03, 0xe0, 0x06, 0x52, 0x36, 0xe0, 0x03, 00, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x15, 0xe0, 0x03, 00, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0x6e, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x60, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xad, 0x03, 0xbd, 0x12, 0xe0, 0x03, 00, 0x1b, 0xe0, 0x03, 0x52, 0x0c, 0x2c, 0x03, 00, 0x0c, 0x2c, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x6e, 0x03, 0x45, 0x0f, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x15, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x9c, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x20, 0x06, 0x9c, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x1e, 0xe0, 0x03, 0x8c, 0x06, 0x9c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x38, 0x03, 0xbd, 0x15, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0x7c, 0x15, 0xe0, 0x03, 0x52, 0x03, 0x2c, 0x06, 0x16, 0x03, 0x52, 0x03, 0x60, 0x18, 0xe0, 0x09, 0x2c, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x45, 0x03, 0x8c, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0xcf, 0xff, 0xe0, 0xff, 0xe0, 0x2d, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0x38, 0x03, 0x6e, 0x03, 0x8c, 0x03, 0xad, 0x0f, 0x9c, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x20, 0x03, 0xe0, 0x03, 0xbd, 0x09, 0x60, 0x03, 0x45, 0x03, 0x20, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x7c, 0x15, 0xe0, 0x03, 0x6e, 0x03, 0x7c, 0x03, 0x60, 0x03, 0x45, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x20, 0x03, 0x9c, 0x33, 0xe0, 0x03, 00, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x45, 0x03, 0x2c, 0x03, 00, 0x06, 0x2c, 0x03, 0x60, 0x03, 0x6e, 0x03, 0x9c, 0x03, 0x16, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x7c, 0x12, 0xe0, 0x03, 00, 0x03, 0x7c, 0x03, 0xcf, 0x24, 0xe0, 0x03, 00, 0x18, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x06, 0xad, 0x09, 0xe0, 0x03, 0x16, 0x03, 0xad, 0x12, 0xe0, 0x03, 0x7c, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x52, 0x03, 0x6e, 0x03, 0x60, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x16, 0x06, 0x60, 0x03, 0x16, 0x06, 0x60, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x2d, 0xe0, 0x03, 0x45, 0x03, 0x7c, 0x18, 0xe0, 0x03, 0x7c, 0x03, 0x16, 0x03, 0x9c, 0x1e, 0xe0, 0x03, 0x45, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x09, 0x60, 0x03, 0xbd, 0x18, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x60, 0x03, 0x52, 0xff, 0xe0, 0xff, 0xe0, 0x2d, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x9c, 0x03, 0x38, 0x03, 0x52, 0x12, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x2c, 0x03, 0x38, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x38, 0x03, 0x7c, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x33, 0xe0, 0x03, 00, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 0x8c, 0x03, 0x9c, 0x03, 0xcf, 0x03, 00, 0x0c, 0xe0, 0x03, 0x45, 0x03, 0x52, 0x0f, 0xe0, 0x03, 0x16, 0x06, 0xcf, 0x03, 0x0c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x12, 0xe0, 0x03, 00, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x52, 0x03, 0xbd, 0x15, 0xe0, 0x03, 0x16, 0x03, 0xcf, 0x03, 0xe0, 0x03, 00, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x0c, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x06, 0xad, 0x03, 0x16, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x2c, 0x24, 0xe0, 0x03, 0x8c, 0x03, 0x20, 0x0c, 0xe0, 0x03, 0xad, 0x03, 00, 0x03, 0x38, 0x03, 0x8c, 0x03, 0x7c, 0x03, 0x38, 0x03, 0x8c, 0x03, 0x45, 0x03, 0x16, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0x60, 0x03, 0x8c, 0x03, 0xad, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x2a, 0xe0, 0x06, 0x60, 0x18, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0xcf, 0x21, 0xe0, 0x03, 0x16, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x60, 0x09, 0x9c, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x0f, 0xe0, 0x03, 0x16, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0xcf, 0xff, 0xe0, 0xff, 0xe0, 0x2a, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0xad, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0xcf, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x2c, 0x03, 0x7c, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0x16, 0x03, 0xcf, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x30, 0xe0, 0x03, 0x60, 0x0c, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x15, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x06, 0x45, 0x12, 0xe0, 0x03, 0x52, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x2c, 0x15, 0xe0, 0x03, 00, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x60, 0x03, 0x0c, 0x03, 0x60, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x06, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0x38, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0x16, 0x03, 0xad, 0x21, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x16, 0x03, 0x8c, 0x03, 0xe0, 0x03, 0xcf, 0x03, 0x6e, 0x03, 00, 0x03, 0x52, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x16, 0x03, 0x9c, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x12, 0xe0, 0x03, 0x45, 0x03, 0xbd, 0x12, 0xe0, 0x03, 0x16, 0x03, 0xbd, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x03, 0x7c, 0x1e, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x24, 0xe0, 0x03, 0xbd, 0x03, 0x60, 0x09, 0x2c, 0x03, 0x45, 0x03, 0x20, 0x0f, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x52, 0x03, 0x6e, 0xff, 0xe0, 0xff, 0xe0, 0x2a, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x6e, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x6e, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x7c, 0x03, 0x60, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x9c, 0x12, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x38, 0x09, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x8c, 0x03, 0xbd, 0x3f, 0xe0, 0x03, 0x45, 0x03, 0x6e, 0x15, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0xad, 0x03, 0x52, 0x24, 0xe0, 0x03, 0x38, 0x03, 0x9c, 0x15, 0xe0, 0x03, 00, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x38, 0x03, 0x52, 0x0c, 0xe0, 0x03, 0x45, 0x03, 0x6e, 0x06, 0xe0, 0x03, 00, 0x06, 0xe0, 0x03, 0x8c, 0x03, 0x45, 0x1b, 0xe0, 0x03, 0x2c, 0x03, 0x60, 0x21, 0xe0, 0x03, 0x9c, 0x03, 0x0c, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x03, 0x6e, 0x03, 0x20, 0x0f, 0xe0, 0x03, 0x9c, 0x06, 0x38, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x0c, 0x06, 0x2c, 0x03, 0x0c, 0x09, 0x2c, 0x0c, 0xe0, 0x03, 00, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x0c, 0x15, 0xe0, 0x03, 00, 0x1e, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x52, 0x1b, 0xe0, 0x03, 0x6e, 0x03, 0x60, 0x06, 0xe0, 0x03, 0x60, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0xad, 0x03, 0x16, 0x03, 0x8c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x8c, 0x03, 0x2c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x7c, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x2c, 0xff, 0xe0, 0xff, 0xe0, 0x2a, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x2c, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x52, 0x06, 0xe0, 0x03, 0x16, 0x03, 0xe0, 0x03, 0x2c, 0x0f, 0xe0, 0x06, 0x2c, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x7c, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0xe0, 0x06, 0x2c, 0x03, 0x38, 0x03, 0xad, 0x06, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x8c, 0x09, 0xe0, 0x03, 00, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x8c, 0x36, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0xcf, 0x15, 0xe0, 0x03, 00, 0x2d, 0xe0, 0x03, 0x6e, 0x03, 0x45, 0x18, 0xe0, 0x03, 00, 0x0f, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x0c, 0x03, 0xcf, 0x06, 0xe0, 0x03, 00, 0x09, 0xe0, 0x03, 0x16, 0x03, 0xad, 0x15, 0xe0, 0x06, 0x45, 0x1e, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x0c, 0x03, 0xad, 0x15, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x03, 0xe0, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0x2c, 0x06, 0xe0, 0x03, 0x7c, 0x0c, 0xe0, 0x03, 00, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0x52, 0x03, 0x60, 0x15, 0xe0, 0x03, 0x2c, 0x03, 0x6e, 0x21, 0xe0, 0x03, 0x45, 0x03, 0x20, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0x20, 0x03, 0xad, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x18, 0xe0, 0x03, 0x38, 0x03, 0x8c, 0x21, 0xe0, 0x03, 0x6e, 0x03, 0x60, 0x03, 0xe0, 0x03, 0x20, 0x03, 0xbd, 0x09, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x20, 0x03, 0x2c, 0x03, 0xcf, 0xff, 0xe0, 0xff, 0xe0, 0x18, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0x38, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x52, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x09, 0xe0, 0x03, 0x7c, 0x03, 0x20, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0x60, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x03, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x16, 0x03, 0xe0, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x8c, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xbd, 0x03, 0x20, 0x03, 0x7c, 0x30, 0xe0, 0x03, 0x6e, 0x03, 0x16, 0x03, 0xad, 0x18, 0xe0, 0x03, 0x0c, 0x03, 0x9c, 0x09, 0xe0, 0x03, 0xad, 0x03, 0x8c, 0x18, 0xe0, 0x03, 0x60, 0x03, 0x20, 0x1b, 0xe0, 0x03, 00, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x8c, 0x09, 0xe0, 0x03, 00, 0x09, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x45, 0x15, 0xe0, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x7c, 0x03, 0x38, 0x03, 0x0c, 0x03, 0x60, 0x03, 0xcf, 0x18, 0xe0, 0x03, 0x9c, 0x03, 0x20, 0x06, 0xe0, 0x03, 0xad, 0x15, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0x38, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x60, 0x03, 0x20, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0xad, 0x03, 0x0c, 0x03, 0x60, 0x03, 0xbd, 0x1e, 0xe0, 0x03, 0x6e, 0x03, 0x20, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xad, 0x03, 0x20, 0x06, 0xe0, 0x03, 0x6e, 0x03, 0x38, 0x0c, 0xe0, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x45, 0x03, 0x6e, 0x0f, 0xe0, 0x03, 0xbd, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0x16, 0x03, 0x52, 0x03, 0x2c, 0x0f, 0xe0, 0x03, 0xcf, 0x03, 0xe0, 0x03, 0xad, 0x03, 0x45, 0x03, 0xe0, 0x03, 0x7c, 0x03, 0x6e, 0xff, 0xe0, 0xff, 0xe0, 0x18, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4b, 0xe0, 0x03, 0xad, 0x03, 0x52, 0x03, 0x9c, 0x03, 0x60, 0x03, 0xcf, 0x03, 0x9c, 0x03, 0x16, 0x03, 0xbd, 0x03, 0xe0, 0x03, 0x2c, 0x03, 0xe0, 0x03, 0x52, 0x03, 0x9c, 0x06, 0xe0, 0x03, 0x52, 0x03, 0x6e, 0x06, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0xad, 0x18, 0xe0, 0x03, 0x2c, 0x03, 0x45, 0x03, 0x60, 0x06, 0x16, 0x03, 0x45, 0x03, 0xcf, 0x09, 0xe0, 0x03, 0x2c, 0x03, 0x7c, 0x03, 0xe0, 0x03, 0x16, 0x03, 0x20, 0x03, 0x0c, 0x03, 0x8c, 0x12, 0xe0, 0x03, 0xcf, 0x03, 0x20, 0x03, 0x7c, 0x27, 0xe0, 0x03, 0x60, 0x03, 0x16, 0x03, 0x38, 0x03, 0xbd, 0x1b, 0xe0, 0x03, 0x8c, 0x03, 0x20, 0x09, 00, 0x03, 0x20, 0x03, 0x2c, 0x12, 0xe0, 0x03, 0x8c, 0x03, 0x16, 0x03, 0x52, 0x1e, 0xe0, 0x03, 00, 0x1b, 0xe0, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 00, 0x0c, 0xe0, 0x03, 0x8c, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x0c, 0x03, 0x8c, 0x1b, 0xe0, 0x03, 0x16, 0x03, 0x45, 0x03, 0x8c, 0x24, 0xe0, 0x03, 0x52, 0x03, 0x20, 0x09, 0x60, 0x03, 0x38, 0x03, 0x20, 0x0c, 0xe0, 0x03, 0x60, 0x03, 0x9c, 0x0c, 0xe0, 0x03, 0x2c, 0x15, 0xe0, 0x03, 0xcf, 0x06, 0x20, 0x03, 0x2c, 0x03, 0x0c, 0x03, 0x45, 0x03, 0xcf, 0x1b, 0xe0, 0x03, 0xbd, 0x03, 0x45, 0x03, 0x16, 0x03, 0x20, 0x1e, 0xe0, 0x03, 0x7c, 0x03, 0x0c, 0x03, 0xad, 0x0c, 0xe0, 0x03, 0x38, 0x03, 0x7c, 0x06, 0xe0, 0x03, 0xcf, 0x03, 0x52, 0x03, 0x16, 0x06, 0x2c, 0x03, 0x20, 0x03, 0x52, 0x09, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0x20, 0x06, 0x60, 0x03, 0x52, 0x06, 0x20, 0x12, 0xe0, 0x03, 0xad, 0x03, 0x60, 0x03, 0xcf, 0x15, 0xe0, 0x03, 0xbd, 0x03, 0x2c, 0x03, 0xcf, 0x03, 0x52, 0x03, 0x7c, 0xff, 0xe0, 0xff, 0xe0, 0x18, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x4e, 0xe0, 0x03, 0xbd, 0x03, 0x9c, 0x03, 0x60, 0x03, 0x9c, 0x03, 0x2c, 0x03, 0xbd, 0x06, 0xe0, 0x03, 0x45, 0x03, 0x2c, 0x03, 0x45, 0x15, 0xe0, 0x03, 0x9c, 0x03, 0xcf, 0x1e, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xad, 0x12, 0xe0, 0x03, 0xad, 0x09, 0xe0, 0x06, 0xad, 0x18, 0xe0, 0x03, 0xad, 0x03, 0x52, 0x27, 0xe0, 0x03, 0xad, 0x03, 0xbd, 0x48, 0xe0, 0x03, 0xad, 0x03, 0xbd, 0x21, 0xe0, 0x03, 0x60, 0x2a, 0xe0, 0x03, 0x60, 0x18, 0xe0, 0x03, 0xcf, 0x03, 0x60, 0x03, 0xcf, 0x4e, 0xe0, 0x03, 0xcf, 0x09, 0x9c, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 0x2c, 0x03, 0x38, 0x03, 0xbd, 0x0c, 0xe0, 0x03, 0x2c, 0x1b, 0xe0, 0x03, 0xad, 0x03, 0x9c, 0x03, 0xcf, 0x2a, 0xe0, 0x03, 0xcf, 0x21, 0xe0, 0x03, 0x7c, 0x03, 0xcf, 0x0c, 0xe0, 0x03, 0xbd, 0x03, 0xcf, 0x2a, 0xe0, 0x03, 0xcf, 0x09, 0x9c, 0x39, 0xe0, 0x03, 0x8c, 0x03, 0x2c, 0x03, 0x60, 0xff, 0xe0, 0xff, 0xe0, 0x1b, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0x51, 0xe0, 0x03, 0xcf, 0x03, 0xbd, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x2a, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0x03, 0xf3, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0xff, 0xe0, 0x81, 0xe0, 0x06, 0xf3, -0xff, 0xf3, 0xff, 0xf3, 0xff, 0xf3, 0xff, 0xf3, 0xff, 0xf3, 0x8a, 0xf3, - -}; diff --git a/osfmk/ppc/POWERMAC/rendered_numbers.c b/osfmk/ppc/POWERMAC/rendered_numbers.c deleted file mode 100644 index a30a90a0c..000000000 --- a/osfmk/ppc/POWERMAC/rendered_numbers.c +++ /dev/null @@ -1,374 +0,0 @@ -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0xac]; -} num_0 = { -/* w */ 9, -/* h */ 12, -/* pixel_data */ -0x1b, 0xe6, -0x06, 0xe6, 0x03, 0xcb, 0x03, 0x78, 0x03, 0x6c, 0x03, 0xb8, 0x09, 0xe6, -0x03, 0xe6, 0x03, 0xc9, 0x03, 0x1a, 0x03, 0x6e, 0x03, 0x84, 0x03, 0x16, 0x03, 0xa6, 0x06, 0xe6, -0x03, 0xe6, 0x03, 0x5a, 0x03, 0x58, 0x06, 0xe6, 0x03, 0x8f, 0x03, 0x26, 0x06, 0xe6, -0x03, 0xe6, 0x03, 0x09, 0x03, 0x9c, 0x06, 0xe6, 0x03, 0xd3, 0x03, 00, 0x03, 0xbb, 0x03, 0xe6, -0x03, 0xd3, 0x03, 00, 0x03, 0xb5, 0x09, 0xe6, 0x03, 0x04, 0x03, 0x9d, 0x03, 0xe6, -0x03, 0xc5, 0x03, 00, 0x03, 0xbd, 0x09, 0xe6, 0x03, 0x0b, 0x03, 0x91, 0x03, 0xe6, -0x03, 0xdc, 0x03, 00, 0x03, 0xae, 0x06, 0xe6, 0x03, 0xe2, 0x03, 00, 0x03, 0xa9, 0x03, 0xe6, -0x03, 0xe6, 0x03, 0x23, 0x03, 0x87, 0x06, 0xe6, 0x03, 0xba, 0x03, 0x03, 0x03, 0xd0, 0x03, 0xe6, -0x03, 0xe6, 0x03, 0x83, 0x03, 0x2e, 0x03, 0xe0, 0x03, 0xe6, 0x03, 0x57, 0x03, 0x4e, 0x06, 0xe6, -0x03, 0xe6, 0x03, 0xe4, 0x03, 0x58, 0x03, 0x1b, 0x03, 0x27, 0x03, 0x34, 0x03, 0xd6, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0xd2, 0x03, 0xc9, 0x0c, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x72]; -} num_1 = { -/* w */ 7, -/* h */ 12, -/* pixel_data */ -0x15, 0xe6, -0x03, 0xe6, 0x03, 0xe5, 0x03, 0xbd, 0x03, 0x83, 0x03, 0xbc, 0x06, 0xe6, -0x03, 0xe6, 0x03, 0x36, 0x03, 0x31, 0x03, 0x03, 0x03, 0x9e, 0x06, 0xe6, -0x03, 0xe6, 0x03, 0xe0, 0x03, 0xe6, 0x03, 0x0b, 0x03, 0x9e, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0x0b, 0x03, 0x9e, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0x0b, 0x03, 0x9e, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0x0b, 0x03, 0x9e, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0x0b, 0x03, 0x9e, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0x0b, 0x03, 0x9e, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0x0b, 0x03, 0x9e, 0x06, 0xe6, -0x03, 0xe6, 0x03, 0x33, 0x03, 0x0d, 0x03, 00, 0x03, 0x09, 0x03, 0x0d, 0x03, 0xbd, -0x15, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x82]; -} num_2 = { -/* w */ 8, -/* h */ 12, -/* pixel_data */ -0x18, 0xe6, -0x03, 0xe6, 0x03, 0xe2, 0x03, 0xb4, 0x03, 0x7d, 0x03, 0x6d, 0x03, 0xb7, 0x06, 0xe6, -0x03, 0xe6, 0x03, 0xae, 0x03, 0x28, 0x03, 0x74, 0x03, 0x7b, 0x03, 0x1b, 0x03, 0x6d, 0x03, 0xe6, -0x03, 0xe6, 0x03, 0xe5, 0x09, 0xe6, 0x03, 0x93, 0x03, 0x11, 0x03, 0xde, -0x0f, 0xe6, 0x03, 0x9e, 0x03, 0x06, 0x03, 0xdc, -0x0c, 0xe6, 0x03, 0xe3, 0x03, 0x3b, 0x03, 0x51, 0x03, 0xe6, -0x09, 0xe6, 0x03, 0xe4, 0x03, 0x5c, 0x03, 0x3b, 0x03, 0xde, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0xe5, 0x03, 0x55, 0x03, 0x40, 0x03, 0xdc, 0x06, 0xe6, -0x06, 0xe6, 0x03, 0x65, 0x03, 0x35, 0x03, 0xdd, 0x09, 0xe6, -0x03, 0xe6, 0x03, 0xb0, 0x03, 00, 0x03, 0x9d, 0x09, 0xcf, 0x03, 0xe4, -0x03, 0xe6, 0x03, 0x77, 0x0f, 00, 0x03, 0xcc, -0x18, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x8e]; -} num_3 = { -/* w */ 8, -/* h */ 12, -/* pixel_data */ -0x18, 0xe6, -0x06, 0xe6, 0x03, 0xa2, 0x03, 0x73, 0x03, 0x66, 0x03, 0x9f, 0x03, 0xdb, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0x51, 0x03, 0x77, 0x03, 0x88, 0x03, 0x2e, 0x03, 0x4d, 0x03, 0xe6, -0x0f, 0xe6, 0x03, 0xaf, 0x03, 0x02, 0x03, 0xda, -0x0f, 0xe6, 0x03, 0x7b, 0x03, 0x29, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0xd8, 0x03, 0x78, 0x03, 0x45, 0x03, 0x49, 0x03, 0xca, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0xd7, 0x03, 0x76, 0x03, 0x42, 0x03, 0x24, 0x03, 0x8a, 0x03, 0xe6, -0x0f, 0xe6, 0x03, 0x9f, 0x03, 00, 0x03, 0xc5, -0x0f, 0xe6, 0x03, 0xe0, 0x03, 0x06, 0x03, 0x8a, -0x03, 0xe6, 0x03, 0xe5, 0x03, 0xcd, 0x06, 0xe6, 0x03, 0x8c, 0x03, 00, 0x03, 0xb6, -0x03, 0xe6, 0x03, 0xe2, 0x03, 0x03, 0x03, 0x20, 0x03, 0x22, 0x03, 0x29, 0x03, 0x86, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0xe2, 0x03, 0xc6, 0x03, 0xc9, 0x09, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x80]; -} num_4 = { -/* w */ 8, -/* h */ 12, -/* pixel_data */ -0x18, 0xe6, -0x0f, 0xe6, 0x03, 0xc4, 0x03, 0x90, 0x03, 0xe6, -0x0c, 0xe6, 0x03, 0xdc, 0x03, 0x24, 0x03, 0x0a, 0x03, 0xe6, -0x0c, 0xe6, 0x03, 0x5a, 0x03, 0x3e, 0x03, 0x0a, 0x03, 0xe6, -0x09, 0xe6, 0x03, 0x98, 0x03, 0x29, 0x03, 0xbd, 0x03, 0x0b, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0xc8, 0x03, 0x17, 0x03, 0xc1, 0x03, 0xc5, 0x03, 0x0b, 0x03, 0xe6, -0x03, 0xe6, 0x03, 0xe2, 0x03, 0x32, 0x03, 0x8f, 0x03, 0xe6, 0x03, 0xc5, 0x03, 0x0b, 0x03, 0xe6, -0x03, 0xe6, 0x03, 0x8a, 0x03, 0x07, 0x06, 0x49, 0x03, 0x3e, 0x03, 0x03, 0x03, 0x49, -0x03, 0xe6, 0x03, 0xc2, 0x09, 0x97, 0x03, 0x75, 0x03, 0x07, 0x03, 0x97, -0x0f, 0xe6, 0x03, 0xb2, 0x03, 0x0b, 0x03, 0xe6, -0x0f, 0xe6, 0x03, 0xb2, 0x03, 0x0b, 0x03, 0xe6, -0x18, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x82]; -} num_5 = { -/* w */ 8, -/* h */ 12, -/* pixel_data */ -0x18, 0xe6, -0x06, 0xe6, 0x03, 0xd7, 0x0c, 0x8a, 0x03, 0xaf, -0x06, 0xe6, 0x03, 0xc1, 0x03, 0x03, 0x09, 0x45, 0x03, 0x86, -0x06, 0xe6, 0x03, 0xc1, 0x03, 0x0d, 0x0c, 0xe6, -0x06, 0xe6, 0x03, 0xc1, 0x03, 0x0d, 0x0c, 0xe6, -0x06, 0xe6, 0x03, 0xc1, 0x03, 0x01, 0x03, 0x37, 0x03, 0x5e, 0x03, 0xc4, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0xe0, 0x03, 0xc0, 0x03, 0x92, 0x03, 0x4c, 0x03, 0x08, 0x03, 0xbc, -0x03, 0xc6, 0x0c, 0xe6, 0x03, 0xe2, 0x03, 0x1b, 0x03, 0x56, -0x03, 0xd6, 0x0f, 0xe6, 0x03, 0x4b, 0x03, 0x3f, -0x06, 0xe6, 0x03, 0xe3, 0x06, 0xe6, 0x03, 0xcb, 0x03, 0x09, 0x03, 0x74, -0x06, 0xe6, 0x03, 0x9c, 0x03, 0x15, 0x03, 0x31, 0x03, 0x20, 0x03, 0x66, 0x03, 0xe0, -0x06, 0xe6, 0x03, 0xe2, 0x03, 0xc6, 0x03, 0xc2, 0x09, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x9c]; -} num_6 = { -/* w */ 9, -/* h */ 12, -/* pixel_data */ -0x1b, 0xe6, -0x0c, 0xe6, 0x03, 0xa9, 0x03, 0x6d, 0x03, 0x65, 0x03, 0x8d, 0x03, 0xdd, -0x09, 0xe6, 0x03, 0x6e, 0x03, 0x23, 0x03, 0x88, 0x03, 0x82, 0x03, 0x4c, 0x03, 0xcf, -0x06, 0xe6, 0x03, 0xbf, 0x03, 0x05, 0x03, 0xcd, 0x0c, 0xe6, -0x06, 0xe6, 0x03, 0x65, 0x03, 0x3d, 0x03, 0xe6, 0x03, 0xe5, 0x09, 0xe6, -0x06, 0xe6, 0x03, 0x46, 0x03, 0x50, 0x03, 0x56, 0x06, 0x15, 0x03, 0x86, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0x32, 0x03, 0x03, 0x03, 0x9f, 0x03, 0xe2, 0x03, 0x99, 0x03, 0x02, 0x03, 0xce, -0x06, 0xe6, 0x03, 0x44, 0x03, 0x3f, 0x09, 0xe6, 0x03, 0x24, 0x03, 0x7b, -0x06, 0xe6, 0x03, 0x68, 0x03, 0x24, 0x09, 0xe6, 0x03, 0x37, 0x03, 0x71, -0x06, 0xe6, 0x03, 0xc2, 0x03, 00, 0x03, 0xa6, 0x03, 0xe6, 0x03, 0xc2, 0x03, 0x04, 0x03, 0xb6, -0x09, 0xe6, 0x03, 0x8f, 0x03, 0x06, 0x03, 0x26, 0x03, 0x09, 0x03, 0x84, 0x03, 0xe6, -0x0c, 0xe6, 0x03, 0xde, 0x03, 0xbc, 0x03, 0xdb, 0x06, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x68]; -} num_7 = { -/* w */ 8, -/* h */ 12, -/* pixel_data */ -0x18, 0xe6, -0x03, 0xe6, 0x03, 0xe2, 0x0f, 0x8a, 0x03, 0x8c, -0x03, 0xe6, 0x03, 0xdd, 0x0c, 0x32, 0x03, 0x24, 0x03, 0x07, -0x12, 0xe6, 0x03, 0x4d, 0x03, 0x67, -0x0f, 0xe6, 0x03, 0xa5, 0x03, 0x16, 0x03, 0xda, -0x0c, 0xe6, 0x03, 0xdd, 0x03, 0x1e, 0x03, 0x93, 0x03, 0xe6, -0x0c, 0xe6, 0x03, 0x6d, 0x03, 0x37, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0xc8, 0x03, 0x05, 0x03, 0xb9, 0x06, 0xe6, -0x09, 0xe6, 0x03, 0x50, 0x03, 0x2a, 0x09, 0xe6, -0x06, 0xe6, 0x03, 0xd0, 0x03, 00, 0x03, 0x7b, 0x09, 0xe6, -0x06, 0xe6, 0x03, 0x92, 0x03, 00, 0x03, 0xca, 0x09, 0xe6, -0x18, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0xa0]; -} num_8 = { -/* w */ 8, -/* h */ 12, -/* pixel_data */ -0x18, 0xe6, -0x09, 0xe6, 0x03, 0xd1, 0x03, 0x88, 0x03, 0x6b, 0x03, 0xae, 0x03, 0xe5, -0x06, 0xe6, 0x03, 0xd2, 0x03, 0x12, 0x03, 0x77, 0x03, 0x80, 0x03, 0x19, 0x03, 0x94, -0x06, 0xe6, 0x03, 0x76, 0x03, 0x56, 0x06, 0xe6, 0x03, 0x7f, 0x03, 0x46, -0x06, 0xe6, 0x03, 0x85, 0x03, 0x15, 0x03, 0xc3, 0x03, 0xe5, 0x03, 0x43, 0x03, 0x8c, -0x06, 0xe6, 0x03, 0xe2, 0x03, 0x4e, 0x03, 0x09, 0x03, 0x3a, 0x03, 0x76, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0xcd, 0x03, 0x2b, 0x03, 0x60, 0x03, 0x0d, 0x03, 0x37, 0x03, 0xcd, -0x06, 0xe6, 0x03, 0x39, 0x03, 0x7f, 0x03, 0xe6, 0x03, 0xca, 0x03, 0x2f, 0x03, 0x30, -0x03, 0xe6, 0x03, 0xdb, 0x03, 0x02, 0x03, 0xb4, 0x06, 0xe6, 0x03, 0xb0, 0x03, 00, -0x03, 0xe6, 0x03, 0xe5, 0x03, 0x12, 0x03, 0x61, 0x03, 0xe4, 0x03, 0xe6, 0x03, 0x76, 0x03, 0x11, -0x06, 0xe6, 0x03, 0xa9, 0x03, 0x34, 0x03, 0x1b, 0x03, 0x24, 0x03, 0x3a, 0x03, 0xae, -0x0c, 0xe6, 0x03, 0xd1, 0x03, 0xd3, 0x06, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x96]; -} num_9 = { -/* w */ 8, -/* h */ 12, -/* pixel_data */ -0x18, 0xe6, -0x09, 0xe6, 0x03, 0xbf, 0x03, 0x6e, 0x03, 0x6c, 0x03, 0xb3, 0x03, 0xe6, -0x06, 0xe6, 0x03, 0xac, 0x03, 0x14, 0x03, 0x6e, 0x03, 0x63, 0x03, 0x0a, 0x03, 0x9b, -0x06, 0xe6, 0x03, 0x2f, 0x03, 0x70, 0x06, 0xe6, 0x03, 0x7c, 0x03, 0x1c, -0x03, 0xe6, 0x03, 0xe5, 0x03, 0x05, 0x03, 0xa2, 0x06, 0xe6, 0x03, 0xbd, 0x03, 00, -0x06, 0xe6, 0x03, 0x1a, 0x03, 0x6e, 0x06, 0xe6, 0x03, 0xa6, 0x03, 00, -0x06, 0xe6, 0x03, 0x88, 0x03, 0x12, 0x03, 0x80, 0x03, 0x90, 0x03, 0x39, 0x03, 00, -0x09, 0xe6, 0x03, 0x9c, 0x03, 0x52, 0x03, 0x6b, 0x03, 0x98, 0x03, 00, -0x03, 0xd1, 0x0f, 0xe6, 0x03, 0x74, 0x03, 0x19, -0x03, 0xe2, 0x03, 0xe6, 0x03, 0xde, 0x06, 0xe6, 0x03, 0xd3, 0x03, 0x23, 0x03, 0x85, -0x06, 0xe6, 0x03, 0x6c, 0x03, 0x15, 0x03, 0x37, 0x03, 0x14, 0x03, 0x79, 0x03, 0xe6, -0x09, 0xe6, 0x03, 0xcf, 0x03, 0xbc, 0x03, 0xe1, 0x06, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0xa4]; -} num_a = { -/* w */ 10, -/* h */ 12, -/* pixel_data */ -0x1e, 0xe6, -0x0c, 0xe6, 0x03, 0xe2, 0x03, 0x8a, 0x03, 0xbf, 0x09, 0xe6, -0x0c, 0xe6, 0x03, 0x9c, 0x03, 00, 0x03, 0x43, 0x09, 0xe6, -0x03, 0xe3, 0x09, 0xe6, 0x03, 0x40, 0x03, 0x24, 0x03, 0x02, 0x03, 0xcd, 0x06, 0xe6, -0x03, 0xba, 0x06, 0xe6, 0x03, 0xca, 0x03, 0x02, 0x03, 0xc0, 0x03, 0x0e, 0x03, 0x76, 0x06, 0xe6, -0x03, 0xa0, 0x06, 0xe6, 0x03, 0x70, 0x03, 0x3f, 0x03, 0xe6, 0x03, 0x60, 0x03, 0x1d, 0x06, 0xe6, -0x03, 0xa3, 0x03, 0xe6, 0x03, 0xe3, 0x03, 0x18, 0x03, 0x9a, 0x03, 0xe6, 0x03, 0xbb, 0x03, 00, 0x03, 0xa8, 0x03, 0xe6, -0x03, 0xc0, 0x03, 0xe6, 0x03, 0xa0, 0x03, 00, 0x03, 0x58, 0x06, 0x5d, 0x03, 0x08, 0x03, 0x4e, 0x03, 0xe6, -0x03, 0xe1, 0x03, 0xe6, 0x03, 0x45, 0x03, 0x49, 0x09, 0x8a, 0x03, 0x5e, 0x03, 0x06, 0x03, 0xd5, -0x03, 0xe6, 0x03, 0xcd, 0x03, 0x02, 0x03, 0xc4, 0x09, 0xe6, 0x03, 0xda, 0x03, 0x09, 0x03, 0x80, -0x03, 0xe6, 0x03, 0x75, 0x03, 0x39, 0x0f, 0xe6, 0x03, 0x59, 0x03, 0x27, -0x1e, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x98]; -} num_b = { -/* w */ 7, -/* h */ 12, -/* pixel_data */ -0x15, 0xe6, -0x03, 0xe6, 0x03, 0xae, 0x03, 0x8a, 0x03, 0x8c, 0x03, 0xb1, 0x03, 0xe1, 0x03, 0xe6, -0x03, 0xe6, 0x03, 0x58, 0x03, 0x1b, 0x03, 0x4c, 0x03, 0x1b, 0x03, 0x09, 0x03, 0xc0, -0x03, 0xe6, 0x03, 0x58, 0x03, 0x43, 0x03, 0xe6, 0x03, 0xe2, 0x03, 0x1f, 0x03, 0x5a, -0x03, 0xe6, 0x03, 0x58, 0x03, 0x43, 0x03, 0xe6, 0x03, 0xe4, 0x03, 0x21, 0x03, 0x7b, -0x03, 0xe6, 0x03, 0x58, 0x03, 0x31, 0x03, 0x90, 0x03, 0x49, 0x03, 0x52, 0x03, 0xd8, -0x03, 0xe6, 0x03, 0x58, 0x03, 0x1c, 0x03, 0x44, 0x03, 0x0d, 0x03, 0x73, 0x03, 0xdd, -0x03, 0xe6, 0x03, 0x58, 0x03, 0x43, 0x03, 0xe6, 0x03, 0xdd, 0x03, 0x24, 0x03, 0x49, -0x03, 0xe6, 0x03, 0x58, 0x03, 0x43, 0x06, 0xe6, 0x03, 0x89, 0x03, 0x05, -0x03, 0xe6, 0x03, 0x58, 0x03, 0x43, 0x03, 0xe2, 0x03, 0xcc, 0x03, 0x3e, 0x03, 0x2a, -0x03, 0xe6, 0x03, 0x58, 0x06, 00, 0x03, 0x09, 0x03, 0x2f, 0x03, 0xb7, -0x15, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x8a]; -} num_c = { -/* w */ 9, -/* h */ 12, -/* pixel_data */ -0x1b, 0xe6, -0x09, 0xe6, 0x03, 0xd5, 0x03, 0x8c, 0x03, 0x65, 0x03, 0x66, 0x03, 0x87, 0x03, 0xab, -0x06, 0xe6, 0x03, 0xa8, 0x03, 0x14, 0x03, 0x3d, 0x03, 0x7e, 0x03, 0x80, 0x03, 0x56, 0x03, 0x2a, -0x03, 0xe6, 0x03, 0xd9, 0x03, 0x14, 0x03, 0x61, 0x0f, 0xe6, -0x03, 0xe6, 0x03, 0x86, 0x03, 0x08, 0x03, 0xd6, 0x0f, 0xe6, -0x03, 0xe6, 0x03, 0x62, 0x03, 0x27, 0x12, 0xe6, -0x03, 0xe6, 0x03, 0x50, 0x03, 0x36, 0x12, 0xe6, -0x03, 0xe6, 0x03, 0x6e, 0x03, 0x19, 0x12, 0xe6, -0x03, 0xde, 0x03, 0xa1, 0x03, 00, 0x03, 0xaa, 0x0f, 0xe6, -0x06, 0xe6, 0x03, 0x37, 0x03, 0x1b, 0x03, 0xb2, 0x06, 0xe6, 0x03, 0xd5, 0x03, 0x8a, -0x06, 0xe6, 0x03, 0xd8, 0x03, 0x58, 0x03, 0x03, 0x03, 0x13, 0x03, 0x1d, 0x03, 0x24, 0x03, 0x61, -0x0c, 0xe6, 0x03, 0xe1, 0x03, 0xc0, 0x03, 0xd1, 0x06, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0xb2]; -} num_d = { -/* w */ 10, -/* h */ 12, -/* pixel_data */ -0x1e, 0xe6, -0x03, 0xe6, 0x03, 0xd9, 0x06, 0x8a, 0x03, 0x8b, 0x03, 0x9c, 0x03, 0xbb, 0x03, 0xdf, 0x06, 0xe6, -0x03, 0xe6, 0x03, 0xc5, 0x03, 00, 0x03, 0x49, 0x03, 0x55, 0x03, 0x3e, 0x03, 0x14, 0x03, 0x21, 0x03, 0xb6, 0x03, 0xe6, -0x03, 0xe6, 0x03, 0xc5, 0x03, 00, 0x03, 0xba, 0x06, 0xe6, 0x03, 0xda, 0x03, 0x49, 0x03, 0x0d, 0x03, 0xd4, -0x03, 0xe6, 0x03, 0xc5, 0x03, 00, 0x03, 0xba, 0x09, 0xe6, 0x03, 0xd4, 0x03, 0x09, 0x03, 0x79, -0x03, 0xe6, 0x03, 0xc5, 0x03, 00, 0x03, 0xba, 0x0c, 0xe6, 0x03, 0x2f, 0x03, 0x52, -0x03, 0xe6, 0x03, 0xc5, 0x03, 00, 0x03, 0xba, 0x0c, 0xe6, 0x03, 0x3c, 0x03, 0x4e, -0x03, 0xe6, 0x03, 0xc5, 0x03, 00, 0x03, 0xba, 0x0c, 0xe6, 0x03, 0x1d, 0x03, 0x70, -0x03, 0xe6, 0x03, 0xc5, 0x03, 00, 0x03, 0xba, 0x09, 0xe6, 0x03, 0xac, 0x03, 00, 0x03, 0xb6, -0x03, 0xe6, 0x03, 0xc5, 0x03, 00, 0x03, 0xba, 0x03, 0xe6, 0x03, 0xd1, 0x03, 0x9a, 0x03, 0x15, 0x03, 0x63, 0x03, 0xe6, -0x03, 0xe6, 0x03, 0xc5, 0x09, 00, 0x03, 0x12, 0x03, 0x37, 0x03, 0x95, 0x06, 0xe6, -0x1e, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x64]; -} num_e = { -/* w */ 7, -/* h */ 12, -/* pixel_data */ -0x15, 0xe6, -0x03, 0xe6, 0x03, 0xc1, 0x0c, 0x8a, 0x03, 0x99, -0x03, 0xe6, 0x03, 0x88, 0x03, 0x06, 0x09, 0x5b, 0x03, 0x71, -0x03, 0xe6, 0x03, 0x88, 0x03, 0x10, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x88, 0x03, 0x10, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x88, 0x03, 0x09, 0x09, 0x8d, 0x03, 0xd3, -0x03, 0xe6, 0x03, 0x88, 0x03, 0x06, 0x09, 0x5c, 0x03, 0xca, -0x03, 0xe6, 0x03, 0x88, 0x03, 0x10, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x88, 0x03, 0x10, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x88, 0x03, 0x10, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x88, 0x0f, 00, -0x15, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x66]; -} num_f = { -/* w */ 7, -/* h */ 12, -/* pixel_data */ -0x15, 0xe6, -0x03, 0xe6, 0x03, 0xc5, 0x0c, 0x8a, 0x03, 0x95, -0x03, 0xe6, 0x03, 0x93, 0x03, 0x01, 0x09, 0x5b, 0x03, 0x6b, -0x03, 0xe6, 0x03, 0x93, 0x03, 0x06, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x93, 0x03, 0x06, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x93, 0x03, 0x04, 0x09, 0xad, 0x03, 0xd7, -0x03, 0xe6, 0x03, 0x93, 0x03, 00, 0x09, 0x3d, 0x03, 0xba, -0x03, 0xe6, 0x03, 0x93, 0x03, 0x06, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x93, 0x03, 0x06, 0x0c, 0xe6, -0x03, 0xe6, 0x03, 0x93, 0x03, 0x06, 0x0c, 0xe6, -0x03, 0xcb, 0x03, 0x93, 0x03, 0x06, 0x0c, 0xe6, -0x15, 0xe6, - -}; -static const struct { - unsigned int num_w; - unsigned int num_h; - unsigned char num_pixel_data[0x3c]; -} num_colon = { -/* w */ 4, -/* h */ 12, -/* pixel_data */ -0x0c, 0xe6, -0x0c, 0xe6, -0x0c, 0xe6, -0x0c, 0xe6, -0x03, 0xe6, 0x03, 0xc1, 0x03, 0x18, 0x03, 0xd6, -0x03, 0xe6, 0x03, 0xd7, 0x03, 0x93, 0x03, 0xe0, -0x0c, 0xe6, -0x0c, 0xe6, -0x0c, 0xe6, -0x03, 0xe6, 0x03, 0xdc, 0x03, 0xac, 0x03, 0xe2, -0x03, 0xe6, 0x03, 0xbe, 0x03, 00, 0x03, 0xd4, -0x0c, 0xe6, - -}; diff --git a/osfmk/ppc/POWERMAC/video_console.c b/osfmk/ppc/POWERMAC/video_console.c deleted file mode 100644 index 61eed76c7..000000000 --- a/osfmk/ppc/POWERMAC/video_console.c +++ /dev/null @@ -1,2923 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_FREE_COPYRIGHT@ - * - */ -/* - * @APPLE_FREE_COPYRIGHT@ - */ -/* MACH PPC - video_console.c - * - * Original based on NetBSD's mac68k/dev/ite.c driver - * - * This driver differs in - * - MACH driver"ized" - * - Uses phys_copy and flush_cache to in several places - * for performance optimizations - * - 7x15 font - * - Black background and white (character) foreground - * - Assumes 6100/7100/8100 class of machine - * - * The original header follows... - * - * - * NetBSD: ite.c,v 1.16 1995/07/17 01:24:34 briggs Exp - * - * Copyright (c) 1988 University of Utah. - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * the Systems Programming Group of the University of Utah Computer - * Science Department. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: Utah $Hdr: ite.c 1.28 92/12/20$ - * - * @(#)ite.c 8.2 (Berkeley) 1/12/94 - */ - -/* - * ite.c - * - * The ite module handles the system console; that is, stuff printed - * by the kernel and by user programs while "desktop" and X aren't - * running. Some (very small) parts are based on hp300's 4.4 ite.c, - * hence the above copyright. - * - * -- Brad and Lawrence, June 26th, 1994 - * - */ - -#include - -#include -#include -#include /* spl definitions */ -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include - -#include "panic_image.c" -#include "rendered_numbers.c" - - -#define FAST_JUMP_SCROLL - -#define CHARWIDTH 8 -#define CHARHEIGHT 16 - -#define ATTR_NONE 0 -#define ATTR_BOLD 1 -#define ATTR_UNDER 2 -#define ATTR_REVERSE 4 - -enum vt100state_e { - ESnormal, /* Nothing yet */ - ESesc, /* Got ESC */ - ESsquare, /* Got ESC [ */ - ESgetpars, /* About to get or getting the parameters */ - ESgotpars, /* Finished getting the parameters */ - ESfunckey, /* Function key */ - EShash, /* DEC-specific stuff (screen align, etc.) */ - ESsetG0, /* Specify the G0 character set */ - ESsetG1, /* Specify the G1 character set */ - ESask, - EScharsize, - ESignore /* Ignore this sequence */ -} vt100state = ESnormal; - -struct vc_info vinfo; - -/* Calculated in vccninit(): */ -static int vc_wrap_mode = 1, vc_relative_origin = 0; -static int vc_charset_select = 0, vc_save_charset_s = 0; -static int vc_charset[2] = { 0, 0 }; -static int vc_charset_save[2] = { 0, 0 }; - -/* VT100 state: */ -#define MAXPARS 16 -static int x = 0, y = 0, savex, savey; -static int par[MAXPARS], numpars, hanging_cursor, attr, saveattr; - -/* VT100 tab stops & scroll region */ -static char tab_stops[255]; -static int scrreg_top, scrreg_bottom; - -/* Misc */ -static void vc_initialize(void); -static void vc_flush_forward_buffer(void); -static void vc_store_char(unsigned char); -static void vc_putchar(char ch); - -void vcattach(void); - -/* panic dialog and info saving */ -int mac_addr_digit_x; -int mac_addr_digit_y; -static void blit_digit( int digit ); -boolean_t panicDialogDrawn = FALSE; - -static void -panic_blit_rect( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - int transparent, unsigned char * dataPtr ); - -static void -panic_blit_rect_8( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - int transparent, unsigned char * dataPtr ); - -static void -panic_blit_rect_16( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - int transparent, unsigned char * dataPtr ); - -static void -panic_blit_rect_32( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - int transparent, unsigned char * dataPtr ); - -static void -blit_rect_of_size_and_color( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - unsigned int dataPtr ); - -static void -dim_screen(void); - -/*static void -dim_screen8(void); -*/ - -static void -dim_screen16(void); - -static void -dim_screen32(void); - - -/* - * For the color support (Michel Pollet) - */ -static unsigned char vc_color_index_table[33] = - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }; - -static unsigned long vc_color_depth_masks[4] = - { 0x000000FF, 0x00007FFF, 0x00FFFFFF }; - -static unsigned long vc_colors[8][3] = { - { 0xFFFFFFFF, 0x00000000, 0x00000000 }, /* black */ - { 0x23232323, 0x7C007C00, 0x00FF0000 }, /* red */ - { 0xb9b9b9b9, 0x03e003e0, 0x0000FF00 }, /* green */ - { 0x05050505, 0x7FE07FE0, 0x00FFFF00 }, /* yellow */ - { 0xd2d2d2d2, 0x001f001f, 0x000000FF}, /* blue */ -// { 0x80808080, 0x31933193, 0x00666699 }, /* blue */ - { 0x18181818, 0x7C1F7C1F, 0x00FF00FF }, /* magenta */ - { 0xb4b4b4b4, 0x03FF03FF, 0x0000FFFF }, /* cyan */ - { 0x00000000, 0x7FFF7FFF, 0x00FFFFFF } /* white */ -}; - -static unsigned long vc_color_mask = 0; -static unsigned long vc_color_fore = 0; -static unsigned long vc_color_back = 0; -static int vc_normal_background = 1; - - -/* - * For the jump scroll and buffering (Michel Pollet) - * 80*22 means on a 80*24 screen, the screen will - * scroll jump almost a full screen - * keeping only what's necessary for you to be able to read ;-) - */ -#define VC_MAX_FORWARD_SIZE (100*36) - -/* - * Delay between console updates in clock hz units, the larger the - * delay the fuller the jump-scroll buffer will be and so the faster the - * (scrolling) output. The smaller the delay, the less jerky the - * display. Heuristics show that at 10 touch-typists (Mike!) complain - */ -#define VC_CONSOLE_UPDATE_TIMEOUT 5 - -static unsigned char vc_forward_buffer[VC_MAX_FORWARD_SIZE]; -static long vc_forward_buffer_size = 0; -static int vc_forward_buffer_enabled = 0; -static int vc_forward_buffer_busy = 0; -decl_simple_lock_data(,vc_forward_lock) - -#ifdef FAST_JUMP_SCROLL -static void (*vc_forward_paintchar) (unsigned char c, int x, int y, int attrs); -static enum { - PFoff, - PFwind, - PFscroll, - PFunwind -} vc_forward_preflight_mode = PFoff; -static struct { - enum vt100state_e vt100state; - - int vc_wrap_mode, vc_relative_origin; - int vc_charset_select, vc_save_charset_s; - int vc_charset[2]; - int vc_charset_save[2]; - - int x, y, savex, savey; - int par[MAXPARS], numpars, hanging_cursor, attr, saveattr; - - char tab_stops[255]; - int scrreg_top, scrreg_bottom; - - unsigned long vc_color_fore; - unsigned long vc_color_back; -} vc_forward_preflight_save; -static int vc_forward_scroll = 0; -#endif FAST_JUMP_SCROLL - -/* - * New Rendering code from Michel Pollet - */ - -/* That function will be called for drawing */ -static void (*vc_paintchar) (unsigned char c, int x, int y, int attrs); - -#ifdef RENDERALLOCATE -static unsigned char *renderedFont = NULL; /* rendered font buffer */ -#else -#define REN_MAX_DEPTH 32 -/* that's the size for a 32 bits buffer... */ -#define REN_MAX_SIZE (128L*1024) -static unsigned char renderedFont[REN_MAX_SIZE]; -#endif - -/* Rendered Font Size */ -static unsigned long vc_rendered_font_size = REN_MAX_SIZE; -static long vc_rendered_error = 0; - -/* If the one bit table was reversed */ -static short vc_one_bit_reversed = 0; - -/* Size of a character in the table (bytes) */ -static int vc_rendered_char_size = 0; - -/* -# Attribute codes: -# 00=none 01=bold 04=underscore 05=blink 07=reverse 08=concealed -# Text color codes: -# 30=black 31=red 32=green 33=yellow 34=blue 35=magenta 36=cyan 37=white -# Background color codes: -# 40=black 41=red 42=green 43=yellow 44=blue 45=magenta 46=cyan 47=white -*/ - -#define VC_RESET_BACKGROUND 40 -#define VC_RESET_FOREGROUND 37 - -static void vc_color_set(int color) -{ - if (vinfo.v_depth < 8) - return; - if (color >= 30 && color <= 37) - vc_color_fore = vc_colors[color-30][vc_color_index_table[vinfo.v_depth]]; - if (color >= 40 && color <= 47) { - vc_color_back = vc_colors[color-40][vc_color_index_table[vinfo.v_depth]]; - vc_normal_background = color == 40; - } - -} - -static void vc_render_font(short olddepth, short newdepth) -{ - int charIndex; /* index in ISO font */ - union { - unsigned char *charptr; - unsigned short *shortptr; - unsigned long *longptr; - } current; /* current place in rendered font, multiple types. */ - - unsigned char *theChar; /* current char in iso_font */ - - if (olddepth == newdepth && renderedFont) { - return; /* nothing to do */ - } - - if (olddepth != 1 && renderedFont) { -#ifdef RENDERALLOCATE - (void) kmem_free(kernel_map, (vm_offset_t*)renderedFont, vc_rendered_font_size); -#endif - } - vc_rendered_font_size = REN_MAX_SIZE; - if (newdepth == 1) { -#ifdef RENDERALLOCATE - renderedFont = iso_font; -#endif - vc_rendered_char_size = 16; - if (!vc_one_bit_reversed) { /* reverse the font for the blitter */ - int i; - for (i = 0; i < ((ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size); i++) { - if (iso_font[i]) { - unsigned char mask1 = 0x80; - unsigned char mask2 = 0x01; - unsigned char val = 0; - while (mask1) { - if (iso_font[i] & mask1) - val |= mask2; - mask1 >>= 1; - mask2 <<= 1; - } - renderedFont[i] = ~val; - } else renderedFont[i] = 0xff; - } - vc_one_bit_reversed = 1; - } - return; - } - { - long csize = newdepth / 8; /* bytes per pixel */ - vc_rendered_char_size = csize ? CHARHEIGHT * (csize * CHARWIDTH) : - /* for 2 & 4 */ CHARHEIGHT * (CHARWIDTH/(6-newdepth)); - csize = (ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size; -#ifndef RENDERALLOCATE - if (csize > vc_rendered_font_size) { - vc_rendered_error = csize; - return; - } else - vc_rendered_font_size = csize; -#else - vc_rendered_font_size = csize; -#endif - } - -#ifdef RENDERALLOCATE - if (kmem_alloc(kernel_map, - (vm_offset_t *)&renderedFont, - vc_rendered_font_size) != KERN_SUCCESS) { - renderedFont = NULL; - vc_rendered_error = vc_rendered_font_size; - return; - } -#endif - current.charptr = renderedFont; - theChar = iso_font; - for (charIndex = ISO_CHAR_MIN; charIndex <= ISO_CHAR_MAX; charIndex++) { - int line; - for (line = 0; line < CHARHEIGHT; line++) { - unsigned char mask = 1; - do { - switch (newdepth) { - case 2: { - unsigned char value = 0; - if (*theChar & mask) value |= 0xC0; mask <<= 1; - if (*theChar & mask) value |= 0x30; mask <<= 1; - if (*theChar & mask) value |= 0x0C; mask <<= 1; - if (*theChar & mask) value |= 0x03; - value = ~value; - *current.charptr++ = value; - } - break; - case 4: - { - unsigned char value = 0; - if (*theChar & mask) value |= 0xF0; mask <<= 1; - if (*theChar & mask) value |= 0x0F; - value = ~value; - *current.charptr++ = value; - } - break; - case 8: - *current.charptr++ = (*theChar & mask) ? 0xff : 0; - break; - case 16: - *current.shortptr++ = (*theChar & mask) ? 0xFFFF : 0; - break; - - case 32: - *current.longptr++ = (*theChar & mask) ? 0xFFFFFFFF : 0; - break; - } - mask <<= 1; - } while (mask); /* while the single bit drops to the right */ - theChar++; - } - } -} - -#ifdef FAST_JUMP_SCROLL -static void vc_paint_char(unsigned char ch, int xx, int yy, int attrs) -{ - switch (vc_forward_preflight_mode) { - case PFoff: - vc_forward_paintchar(ch, xx, yy, attrs); - break; - case PFwind: - break; - case PFscroll: - break; - case PFunwind: - if (yy >= scrreg_top && yy < scrreg_bottom) { - yy -= vc_forward_scroll; - if (yy < scrreg_top || yy >= scrreg_bottom) - break; - } - vc_forward_paintchar(ch, xx, yy, attrs); - break; - } -} -#endif FAST_JUMP_SCROLL - -static void vc_paint_char1(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned char *theChar; - unsigned char *where; - int i; - - theChar = (unsigned char*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned char*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ - *where = *theChar++; - - where = (unsigned char*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ - unsigned char val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - unsigned char mask1 = 0xC0, mask2 = 0x40; - int bit = 0; - for (bit = 0; bit < 7; bit++) { - if ((save & mask1) == mask2) - val &= ~mask2; - mask1 >>= 1; - mask2 >>= 1; - } - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - *where = val; - - where = (unsigned char*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} - -static void vc_paint_char2(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned short *theChar; - unsigned short *where; - int i; - - theChar = (unsigned short*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned short*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * 2)); - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ - *where = *theChar++; - - where = (unsigned short*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ - unsigned short val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - unsigned short mask1 = 0xF000, mask2 = 0x3000; - int bit = 0; - for (bit = 0; bit < 7; bit++) { - if ((save & mask1) == mask2) - val &= ~mask2; - mask1 >>= 2; - mask2 >>= 2; - } - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - *where = val; - - where = (unsigned short*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} - -static void vc_paint_char4(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned long *theChar; - unsigned long *where; - int i; - - theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned long*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * 4)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ - *where = *theChar++; - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ - unsigned long val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - unsigned long mask1 = 0xff000000, mask2 = 0x0F000000; - int bit = 0; - for (bit = 0; bit < 7; bit++) { - if ((save & mask1) == mask2) - val &= ~mask2; - mask1 >>= 4; - mask2 >>= 4; - } - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - *where = val; - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} - -static void vc_paint_char8c(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned long *theChar; - unsigned long *where; - int i; - - theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned long*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * CHARWIDTH)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attr? FLY !*/ - unsigned long *store = where; - int x; - for (x = 0; x < 2; x++) { - unsigned long val = *theChar++; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little slower */ - unsigned long *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 2; x++) { - unsigned long val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (lastpixel && !(save & 0xFF000000)) - val |= 0xff000000; - if ((save & 0xFFFF0000) == 0xFF000000) - val |= 0x00FF0000; - if ((save & 0x00FFFF00) == 0x00FF0000) - val |= 0x0000FF00; - if ((save & 0x0000FFFF) == 0x0000FF00) - val |= 0x000000FF; - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - lastpixel = save & 0xff; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} -static void vc_paint_char16c(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned long *theChar; - unsigned long *where; - int i; - - theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned long*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * CHARWIDTH * 2)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attrs ? FLY ! */ - unsigned long *store = where; - int x; - for (x = 0; x < 4; x++) { - unsigned long val = *theChar++; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ - unsigned long *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 4; x++) { - unsigned long val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (save == 0xFFFF0000) val |= 0xFFFF; - else if (lastpixel && !(save & 0xFFFF0000)) - val |= 0xFFFF0000; - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - - val = (vc_color_back & ~val) | (vc_color_fore & val); - - *store++ = val; - lastpixel = save & 0x7fff; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} -static void vc_paint_char32c(unsigned char ch, int xx, int yy, int attrs) -{ - unsigned long *theChar; - unsigned long *where; - int i; - - theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); - where = (unsigned long*)(vinfo.v_baseaddr + - (yy * CHARHEIGHT * vinfo.v_rowbytes) + - (xx * CHARWIDTH * 4)); - - if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attrs ? FLY ! */ - unsigned long *store = where; - int x; - for (x = 0; x < 8; x++) { - unsigned long val = *theChar++; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < CHARHEIGHT; i++) { /* a little slower */ - unsigned long *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 8; x++) { - unsigned long val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (lastpixel && !save) - val = 0xFFFFFFFF; - } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; - - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - lastpixel = save; - } - - where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); - } - -} - -/* - * That's a plain dumb reverse of the cursor position - * It do a binary reverse, so it will not looks good when we have - * color support. we'll see that later - */ -static void reversecursor(void) -{ - union { - unsigned char *charptr; - unsigned short *shortptr; - unsigned long *longptr; - } where; - int line, col; - - where.longptr = (unsigned long*)(vinfo.v_baseaddr + - (y * CHARHEIGHT * vinfo.v_rowbytes) + - (x /** CHARWIDTH*/ * vinfo.v_depth)); - for (line = 0; line < CHARHEIGHT; line++) { - switch (vinfo.v_depth) { - case 1: - *where.charptr = ~*where.charptr; - break; - case 2: - *where.shortptr = ~*where.shortptr; - break; - case 4: - *where.longptr = ~*where.longptr; - break; -/* that code still exists because since characters on the screen are - * of different colors that reverse function may not work if the - * cursor is on a character that is in a different color that the - * current one. When we have buffering, things will work better. MP - */ -#ifdef VC_BINARY_REVERSE - case 8: - where.longptr[0] = ~where.longptr[0]; - where.longptr[1] = ~where.longptr[1]; - break; - case 16: - for (col = 0; col < 4; col++) - where.longptr[col] = ~where.longptr[col]; - break; - case 32: - for (col = 0; col < 8; col++) - where.longptr[col] = ~where.longptr[col]; - break; -#else - case 8: - for (col = 0; col < 8; col++) - where.charptr[col] = where.charptr[col] != (vc_color_fore & vc_color_mask) ? - vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; - break; - case 16: - for (col = 0; col < 8; col++) - where.shortptr[col] = where.shortptr[col] != (vc_color_fore & vc_color_mask) ? - vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; - break; - case 32: - for (col = 0; col < 8; col++) - where.longptr[col] = where.longptr[col] != (vc_color_fore & vc_color_mask) ? - vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; - break; -#endif - } - where.charptr += vinfo.v_rowbytes; - } -} - - -static void -scrollup(int num) -{ - unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; - - linelongs = (vinfo.v_rowbytes * CHARHEIGHT) >> 2; - rowline = (vinfo.v_rowbytes) >> 2; - rowscanline = (vinfo.v_rowscanbytes) >> 2; - -#ifdef FAST_JUMP_SCROLL - if (vc_forward_preflight_mode == PFwind) { - vc_forward_scroll += num; - return; - } - if (vc_forward_preflight_mode == PFscroll || vc_forward_preflight_mode == PFoff) { -#endif FAST_JUMP_SCROLL - - to = (unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs); - from = to + (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ - - i = (scrreg_bottom - scrreg_top) - num; - - while (i-- > 0) { - for (line = 0; line < CHARHEIGHT; line++) { - /* - * Only copy what is displayed - */ - video_scroll_up((unsigned int) from, - (unsigned int) (from+(vinfo.v_rowscanbytes/4)), - (unsigned int) to); - - from += rowline; - to += rowline; - } - } - - /* Now set the freed up lines to the background colour */ - - - to = ((unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs)) - + ((scrreg_bottom - scrreg_top - num) * linelongs); - -#ifdef FAST_JUMP_SCROLL - if (vc_forward_preflight_mode == PFscroll) - return; - } else if (vc_forward_preflight_mode == PFunwind) { - long linestart, linelast; - vc_forward_scroll -= num; - - linestart = scrreg_bottom - num - vc_forward_scroll; - linelast = linestart + num - 1; - - if (linestart >= scrreg_bottom || linelast < scrreg_top) - return; - - if (linelast >= scrreg_bottom) - linelast = scrreg_bottom - 1; - if (linestart < scrreg_top) - linestart = scrreg_top; - - to = ((unsigned long *) vinfo.v_baseaddr) + (linelongs * linestart); - num = linelast - linestart + 1; - } -#endif FAST_JUMP_SCROLL - - for (linelongs = CHARHEIGHT * num; linelongs-- > 0;) { - from = to; - for (i = 0; i < rowscanline; i++) - *to++ = vc_color_back; - - to = from + rowline; - } - -} - -static void -scrolldown(int num) -{ - unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; - - linelongs = (vinfo.v_rowbytes * CHARHEIGHT) >> 2; - rowline = (vinfo.v_rowbytes) >> 2; - rowscanline = (vinfo.v_rowscanbytes) >> 2; - -#ifdef FAST_JUMP_SCROLL - if (vc_forward_preflight_mode == PFwind) { - vc_forward_scroll -= num; - return; - } - if (vc_forward_preflight_mode == PFscroll || vc_forward_preflight_mode == PFoff) { -#endif FAST_JUMP_SCROLL - - to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_bottom) - - (rowline - rowscanline); - from = to - (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ - - i = (scrreg_bottom - scrreg_top) - num; - - while (i-- > 0) { - for (line = 0; line < CHARHEIGHT; line++) { - /* - * Only copy what is displayed - */ - video_scroll_down((unsigned int) from, - (unsigned int) (from-(vinfo.v_rowscanbytes/4)), - (unsigned int) to); - - from -= rowline; - to -= rowline; - } - } - - /* Now set the freed up lines to the background colour */ - - to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_top); - -#ifdef FAST_JUMP_SCROLL - if (vc_forward_preflight_mode == PFscroll) - return; - } else if (vc_forward_preflight_mode == PFunwind) { - long linestart, linelast; - vc_forward_scroll += num; - - linestart = scrreg_top - vc_forward_scroll; - linelast = linestart + num - 1; - - if (linestart >= scrreg_bottom || linelast < scrreg_top) - return; - - if (linelast >= scrreg_bottom) - linelast = scrreg_bottom - 1; - if (linestart < scrreg_top) - linestart = scrreg_top; - - to = ((unsigned long *) vinfo.v_baseaddr) + (linelongs * linestart); - num = linelast - linestart + 1; - } -#endif FAST_JUMP_SCROLL - - for (line = CHARHEIGHT * num; line > 0; line--) { - from = to; - - for (i = 0; i < rowscanline; i++) - *(to++) = vc_color_back; - - to = from + rowline; - } - -} - - -static void -clear_line(int which) -{ - int start, end, i; - - /* - * This routine runs extremely slowly. I don't think it's - * used all that often, except for To end of line. I'll go - * back and speed this up when I speed up the whole vc - * module. --LK - */ - - switch (which) { - case 0: /* To end of line */ - start = x; - end = vinfo.v_columns-1; - break; - case 1: /* To start of line */ - start = 0; - end = x; - break; - case 2: /* Whole line */ - start = 0; - end = vinfo.v_columns-1; - break; - } - - for (i = start; i <= end; i++) { - vc_paintchar(' ', i, y, ATTR_NONE); - } - -} - -static void -clear_screen(int which) -{ - unsigned long *p, *endp, *row; - int linelongs, col; - int rowline, rowlongs; - - rowline = vinfo.v_rowscanbytes / 4; - rowlongs = vinfo.v_rowbytes / 4; - - p = (unsigned long*) vinfo.v_baseaddr;; - endp = (unsigned long*) vinfo.v_baseaddr; - - linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; - - switch (which) { - case 0: /* To end of screen */ - clear_line(0); - if (y < vinfo.v_rows - 1) { - p += (y + 1) * linelongs; - endp += rowlongs * vinfo.v_height; - } - break; - case 1: /* To start of screen */ - clear_line(1); - if (y > 1) { - endp += (y + 1) * linelongs; - } - break; - case 2: /* Whole screen */ - endp += rowlongs * vinfo.v_height; - break; - } - - for (row = p ; row < endp ; row += rowlongs) { - for (col = 0; col < rowline; col++) - *(row+col) = vc_color_back; - } - -} - -static void -reset_tabs(void) -{ - int i; - - for (i = 0; i<= vinfo.v_columns; i++) { - tab_stops[i] = ((i % 8) == 0); - } - -} - -static void -vt100_reset(void) -{ - reset_tabs(); - scrreg_top = 0; - scrreg_bottom = vinfo.v_rows; - attr = ATTR_NONE; - vc_charset[0] = vc_charset[1] = 0; - vc_charset_select = 0; - vc_wrap_mode = 1; - vc_relative_origin = 0; - vc_color_set(VC_RESET_BACKGROUND); - vc_color_set(VC_RESET_FOREGROUND); - -} - -static void -putc_normal(unsigned char ch) -{ - switch (ch) { - case '\a': /* Beep */ - { - spl_t s; - - if(FALSE) { - /* - * No sound hardware, invert the screen twice instead - */ - unsigned long *ptr; - int i, j; - /* XOR the screen twice */ - for (i = 0; i < 2 ; i++) { - /* For each row, xor the scanbytes */ - for (ptr = (unsigned long*)vinfo.v_baseaddr; - ptr < (unsigned long*)(vinfo.v_baseaddr + - (vinfo.v_height * vinfo.v_rowbytes)); - ptr += (vinfo.v_rowbytes / - sizeof (unsigned long*))) - for (j = 0; - j < vinfo.v_rowscanbytes / - sizeof (unsigned long*); - j++) - *(ptr+j) =~*(ptr+j); - } - } - } - break; - - case 127: /* Delete */ - case '\b': /* Backspace */ - if (hanging_cursor) { - hanging_cursor = 0; - } else - if (x > 0) { - x--; - } - break; - case '\t': /* Tab */ - while (x < vinfo.v_columns && !tab_stops[++x]); - if (x >= vinfo.v_columns) - x = vinfo.v_columns-1; - break; - case 0x0b: - case 0x0c: - case '\n': /* Line feed */ - if (y >= scrreg_bottom -1 ) { - scrollup(1); - y = scrreg_bottom - 1; - } else { - y++; - } - break; - case '\r': /* Carriage return */ - x = 0; - hanging_cursor = 0; - break; - case 0x0e: /* Select G1 charset (Control-N) */ - vc_charset_select = 1; - break; - case 0x0f: /* Select G0 charset (Control-O) */ - vc_charset_select = 0; - break; - case 0x18 : /* CAN : cancel */ - case 0x1A : /* like cancel */ - /* well, i do nothing here, may be later */ - break; - case '\033': /* Escape */ - vt100state = ESesc; - hanging_cursor = 0; - break; - default: - if (ch >= ' ') { - if (hanging_cursor) { - x = 0; - if (y >= scrreg_bottom -1 ) { - scrollup(1); - y = scrreg_bottom - 1; - } else { - y++; - } - hanging_cursor = 0; - } - vc_paintchar((ch >= 0x60 && ch <= 0x7f) ? ch + vc_charset[vc_charset_select] - : ch, x, y, attr); - if (x == vinfo.v_columns - 1) { - hanging_cursor = vc_wrap_mode; - } else { - x++; - } - } - break; - } - -} - -static void -putc_esc(unsigned char ch) -{ - vt100state = ESnormal; - - switch (ch) { - case '[': - vt100state = ESsquare; - break; - case 'c': /* Reset terminal */ - vt100_reset(); - clear_screen(2); - x = y = 0; - break; - case 'D': /* Line feed */ - case 'E': - if (y >= scrreg_bottom -1) { - scrollup(1); - y = scrreg_bottom - 1; - } else { - y++; - } - if (ch == 'E') x = 0; - break; - case 'H': /* Set tab stop */ - tab_stops[x] = 1; - break; - case 'M': /* Cursor up */ - if (y <= scrreg_top) { - scrolldown(1); - y = scrreg_top; - } else { - y--; - } - break; - case '>': - vt100_reset(); - break; - case '7': /* Save cursor */ - savex = x; - savey = y; - saveattr = attr; - vc_save_charset_s = vc_charset_select; - vc_charset_save[0] = vc_charset[0]; - vc_charset_save[1] = vc_charset[1]; - break; - case '8': /* Restore cursor */ - x = savex; - y = savey; - attr = saveattr; - vc_charset_select = vc_save_charset_s; - vc_charset[0] = vc_charset_save[0]; - vc_charset[1] = vc_charset_save[1]; - break; - case 'Z': /* return terminal ID */ - break; - case '#': /* change characters height */ - vt100state = EScharsize; - break; - case '(': - vt100state = ESsetG0; - break; - case ')': /* character set sequence */ - vt100state = ESsetG1; - break; - case '=': - break; - default: - /* Rest not supported */ - break; - } - -} - -static void -putc_askcmd(unsigned char ch) -{ - if (ch >= '0' && ch <= '9') { - par[numpars] = (10*par[numpars]) + (ch-'0'); - return; - } - vt100state = ESnormal; - - switch (par[0]) { - case 6: - vc_relative_origin = ch == 'h'; - break; - case 7: /* wrap around mode h=1, l=0*/ - vc_wrap_mode = ch == 'h'; - break; - default: - break; - } - -} - -static void -putc_charsizecmd(unsigned char ch) -{ - vt100state = ESnormal; - - switch (ch) { - case '3' : - case '4' : - case '5' : - case '6' : - break; - case '8' : /* fill 'E's */ - { - int xx, yy; - for (yy = 0; yy < vinfo.v_rows; yy++) - for (xx = 0; xx < vinfo.v_columns; xx++) - vc_paintchar('E', xx, yy, ATTR_NONE); - } - break; - } - -} - -static void -putc_charsetcmd(int charset, unsigned char ch) -{ - vt100state = ESnormal; - - switch (ch) { - case 'A' : - case 'B' : - default: - vc_charset[charset] = 0; - break; - case '0' : /* Graphic characters */ - case '2' : - vc_charset[charset] = 0x21; - break; - } - -} - -static void -putc_gotpars(unsigned char ch) -{ - int i; - - if (ch < ' ') { - /* special case for vttest for handling cursor - movement in escape sequences */ - putc_normal(ch); - vt100state = ESgotpars; - return; - } - vt100state = ESnormal; - switch (ch) { - case 'A': /* Up */ - y -= par[0] ? par[0] : 1; - if (y < scrreg_top) - y = scrreg_top; - break; - case 'B': /* Down */ - y += par[0] ? par[0] : 1; - if (y >= scrreg_bottom) - y = scrreg_bottom - 1; - break; - case 'C': /* Right */ - x += par[0] ? par[0] : 1; - if (x >= vinfo.v_columns) - x = vinfo.v_columns-1; - break; - case 'D': /* Left */ - x -= par[0] ? par[0] : 1; - if (x < 0) - x = 0; - break; - case 'H': /* Set cursor position */ - case 'f': - x = par[1] ? par[1] - 1 : 0; - y = par[0] ? par[0] - 1 : 0; - if (vc_relative_origin) - y += scrreg_top; - hanging_cursor = 0; - break; - case 'X': /* clear p1 characters */ - if (numpars) { - int i; - for (i = x; i < x + par[0]; i++) - vc_paintchar(' ', i, y, ATTR_NONE); - } - break; - case 'J': /* Clear part of screen */ - clear_screen(par[0]); - break; - case 'K': /* Clear part of line */ - clear_line(par[0]); - break; - case 'g': /* tab stops */ - switch (par[0]) { - case 1: - case 2: /* reset tab stops */ - /* reset_tabs(); */ - break; - case 3: /* Clear every tabs */ - { - int i; - - for (i = 0; i <= vinfo.v_columns; i++) - tab_stops[i] = 0; - } - break; - case 0: - tab_stops[x] = 0; - break; - } - break; - case 'm': /* Set attribute */ - for (i = 0; i < numpars; i++) { - switch (par[i]) { - case 0: - attr = ATTR_NONE; - vc_color_set(VC_RESET_BACKGROUND); - vc_color_set(VC_RESET_FOREGROUND); - break; - case 1: - attr |= ATTR_BOLD; - break; - case 4: - attr |= ATTR_UNDER; - break; - case 7: - attr |= ATTR_REVERSE; - break; - case 22: - attr &= ~ATTR_BOLD; - break; - case 24: - attr &= ~ATTR_UNDER; - break; - case 27: - attr &= ~ATTR_REVERSE; - break; - case 5: - case 25: /* blink/no blink */ - break; - default: - vc_color_set(par[i]); - break; - } - } - break; - case 'r': /* Set scroll region */ - x = y = 0; - /* ensure top < bottom, and both within limits */ - if ((numpars > 0) && (par[0] < vinfo.v_rows)) { - scrreg_top = par[0] ? par[0] - 1 : 0; - if (scrreg_top < 0) - scrreg_top = 0; - } else { - scrreg_top = 0; - } - if ((numpars > 1) && (par[1] <= vinfo.v_rows) && (par[1] > par[0])) { - scrreg_bottom = par[1]; - if (scrreg_bottom > vinfo.v_rows) - scrreg_bottom = vinfo.v_rows; - } else { - scrreg_bottom = vinfo.v_rows; - } - if (vc_relative_origin) - y = scrreg_top; - break; - } - -} - -static void -putc_getpars(unsigned char ch) -{ - if (ch == '?') { - vt100state = ESask; - return; - } - if (ch == '[') { - vt100state = ESnormal; - /* Not supported */ - return; - } - if (ch == ';' && numpars < MAXPARS - 1) { - numpars++; - } else - if (ch >= '0' && ch <= '9') { - par[numpars] *= 10; - par[numpars] += ch - '0'; - } else { - numpars++; - vt100state = ESgotpars; - putc_gotpars(ch); - } -} - -static void -putc_square(unsigned char ch) -{ - int i; - - for (i = 0; i < MAXPARS; i++) { - par[i] = 0; - } - - numpars = 0; - vt100state = ESgetpars; - - putc_getpars(ch); - -} - -static void -vc_putchar(char ch) -{ - if (!ch) { - return; /* ignore null characters */ - } - switch (vt100state) { - default:vt100state = ESnormal; /* FALLTHROUGH */ - case ESnormal: - putc_normal(ch); - break; - case ESesc: - putc_esc(ch); - break; - case ESsquare: - putc_square(ch); - break; - case ESgetpars: - putc_getpars(ch); - break; - case ESgotpars: - putc_gotpars(ch); - break; - case ESask: - putc_askcmd(ch); - break; - case EScharsize: - putc_charsizecmd(ch); - break; - case ESsetG0: - putc_charsetcmd(0, ch); - break; - case ESsetG1: - putc_charsetcmd(1, ch); - break; - } - - if (x >= vinfo.v_columns) { - x = vinfo.v_columns - 1; - } - if (x < 0) { - x = 0; - } - if (y >= vinfo.v_rows) { - y = vinfo.v_rows - 1; - } - if (y < 0) { - y = 0; - } - -} - -/* - * Actually draws the buffer, handle the jump scroll - */ -static void vc_flush_forward_buffer(void) -{ - int start = 0; - int todo = 0; - spl_t s; - - assert(vc_forward_buffer_enabled); - - s = splhigh(); - simple_lock(&vc_forward_lock); - - if (vc_forward_buffer_busy) { - /* Bail out if we're already in the middle of a flush. */ - simple_unlock(&vc_forward_lock); - splx(s); - return; - } - - vc_forward_buffer_busy = 1; - - while (todo < vc_forward_buffer_size) { - todo = vc_forward_buffer_size; - - /* Drop the lock while we update the screen. */ - simple_unlock(&vc_forward_lock); - splx(s); - - reversecursor(); - - do { - int i; -#ifdef FAST_JUMP_SCROLL - if ((todo - start) < 2) { - vc_putchar(vc_forward_buffer[start++]); - } else { - assert(vc_forward_scroll == 0); - - vc_forward_preflight_save.vt100state = vt100state; - vc_forward_preflight_save.vc_wrap_mode = vc_wrap_mode; - vc_forward_preflight_save.vc_relative_origin = vc_relative_origin; - vc_forward_preflight_save.vc_charset_select = vc_charset_select; - vc_forward_preflight_save.vc_save_charset_s = vc_save_charset_s; - vc_forward_preflight_save.vc_charset[0] = vc_charset[0]; - vc_forward_preflight_save.vc_charset[1] = vc_charset[1]; - vc_forward_preflight_save.vc_charset_save[0] = vc_charset_save[0]; - vc_forward_preflight_save.vc_charset_save[1] = vc_charset_save[1]; - vc_forward_preflight_save.x = x; - vc_forward_preflight_save.y = y; - vc_forward_preflight_save.savex = savex; - vc_forward_preflight_save.savey = savey; - vc_forward_preflight_save.numpars = numpars; - vc_forward_preflight_save.hanging_cursor = hanging_cursor; - vc_forward_preflight_save.attr = attr; - vc_forward_preflight_save.saveattr = saveattr; - vc_forward_preflight_save.scrreg_top = scrreg_top; - vc_forward_preflight_save.scrreg_bottom = scrreg_bottom; - vc_forward_preflight_save.vc_color_fore = vc_color_fore; - vc_forward_preflight_save.vc_color_back = vc_color_back; - bcopy( (const char *) par, - (char *) vc_forward_preflight_save.par, - (vm_size_t) sizeof(par) ); - bcopy( (const char *) tab_stops, - (char *) vc_forward_preflight_save.tab_stops, - (vm_size_t) sizeof(tab_stops) ); - - vc_forward_preflight_mode = PFwind; - - for (i = start; - i < todo && - vc_forward_preflight_save.scrreg_top == scrreg_top && - vc_forward_preflight_save.scrreg_bottom == scrreg_bottom; - i++) - vc_putchar(vc_forward_buffer[i]); - - vt100state = vc_forward_preflight_save.vt100state; - vc_wrap_mode = vc_forward_preflight_save.vc_wrap_mode; - vc_relative_origin = vc_forward_preflight_save.vc_relative_origin; - vc_charset_select = vc_forward_preflight_save.vc_charset_select; - vc_save_charset_s = vc_forward_preflight_save.vc_save_charset_s; - vc_charset[0] = vc_forward_preflight_save.vc_charset[0]; - vc_charset[1] = vc_forward_preflight_save.vc_charset[1]; - vc_charset_save[0] = vc_forward_preflight_save.vc_charset_save[0]; - vc_charset_save[1] = vc_forward_preflight_save.vc_charset_save[1]; - x = vc_forward_preflight_save.x; - y = vc_forward_preflight_save.y; - savex = vc_forward_preflight_save.savex; - savey = vc_forward_preflight_save.savey; - numpars = vc_forward_preflight_save.numpars; - hanging_cursor = vc_forward_preflight_save.hanging_cursor; - attr = vc_forward_preflight_save.attr; - saveattr = vc_forward_preflight_save.saveattr; - scrreg_top = vc_forward_preflight_save.scrreg_top; - scrreg_bottom = vc_forward_preflight_save.scrreg_bottom; - vc_color_fore = vc_forward_preflight_save.vc_color_fore; - vc_color_back = vc_forward_preflight_save.vc_color_back; - bcopy( (const char *) vc_forward_preflight_save.par, - (char *) par, - (vm_size_t) sizeof(par) ); - bcopy( (const char *) vc_forward_preflight_save.tab_stops, - (char *) tab_stops, - (vm_size_t) sizeof(tab_stops) ); - - vc_forward_preflight_mode = PFscroll; - - if (vc_forward_scroll > 0) - scrollup(vc_forward_scroll > scrreg_bottom - scrreg_top ? - scrreg_bottom - scrreg_top : vc_forward_scroll); - else if (vc_forward_scroll < 0) - scrolldown(-vc_forward_scroll > scrreg_bottom - scrreg_top ? - scrreg_bottom - scrreg_top : -vc_forward_scroll); - - vc_forward_preflight_mode = PFunwind; - - for (; start < i; start++) - vc_putchar(vc_forward_buffer[start]); - - assert(vc_forward_scroll == 0); - - vc_forward_preflight_mode = PFoff; - } -#else !FAST_JUMP_SCROLL - int plaintext = 1; - int drawlen = start; - int jump = 0; - int param = 0, changebackground = 0; - enum vt100state_e vtState = vt100state; - /* - * In simple words, here we're pre-parsing the text to look for - * + Newlines, for computing jump scroll - * + /\033\[[0-9;]*]m/ to continue on - * any other sequence will stop. We don't want to have cursor - * movement escape sequences while we're trying to pre-scroll - * the screen. - * We have to be extra carefull about the sequences that changes - * the background color to prevent scrolling in those - * particular cases. - * That parsing was added to speed up 'man' and 'color-ls' a - * zillion time (at least). It's worth it, trust me. - * (mail Nick Stephen for a True Performance Graph) - * Michel Pollet - */ - for (i = start; i < todo && plaintext; i++) { - drawlen++; - switch (vtState) { - case ESnormal: - switch (vc_forward_buffer[i]) { - case '\033': - vtState = ESesc; - break; - case '\n': - jump++; - break; - } - break; - case ESesc: - switch (vc_forward_buffer[i]) { - case '[': - vtState = ESgetpars; - param = 0; - changebackground = 0; - break; - default: - plaintext = 0; - break; - } - break; - case ESgetpars: - if ((vc_forward_buffer[i] >= '0' && - vc_forward_buffer[i] <= '9') || - vc_forward_buffer[i] == ';') { - if (vc_forward_buffer[i] >= '0' && - vc_forward_buffer[i] <= '9') - param = (param*10)+(vc_forward_buffer[i]-'0'); - else { - if (param >= 40 && param <= 47) - changebackground = 1; - if (!vc_normal_background && - !param) - changebackground = 1; - param = 0; - } - break; /* continue on */ - } - vtState = ESgotpars; - /* fall */ - case ESgotpars: - switch (vc_forward_buffer[i]) { - case 'm': - vtState = ESnormal; - if (param >= 40 && param <= 47) - changebackground = 1; - if (!vc_normal_background && - !param) - changebackground = 1; - if (changebackground) { - plaintext = 0; - jump = 0; - /* REALLY don't jump */ - } - /* Yup ! we've got it */ - break; - default: - plaintext = 0; - break; - } - break; - default: - plaintext = 0; - break; - } - - } - - /* - * Then we look if it would be appropriate to forward jump - * the screen before drawing - */ - if (jump && (scrreg_bottom - scrreg_top) > 2) { - jump -= scrreg_bottom - y - 1; - if (jump > 0 ) { - if (jump >= scrreg_bottom - scrreg_top) - jump = scrreg_bottom - scrreg_top -1; - y -= jump; - scrollup(jump); - } - } - /* - * and we draw what we've found to the parser - */ - for (i = start; i < drawlen; i++) - vc_putchar(vc_forward_buffer[start++]); - /* - * Continue sending characters to the parser until we're sure we're - * back on normal characters. - */ - for (i = start; i < todo && - vt100state != ESnormal ; i++) - vc_putchar(vc_forward_buffer[start++]); -#endif !FAST_JUMP_SCROLL - /* Then loop again if there still things to draw */ - } while (start < todo); - - reversecursor(); - - /* Re-acquire the lock while we check our state. */ - s = splhigh(); - simple_lock(&vc_forward_lock); - } - - vc_forward_buffer_busy = 0; - vc_forward_buffer_size = 0; - - simple_unlock(&vc_forward_lock); - splx(s); -} - -int -vcputc(int l, int u, int c) -{ - if(!vinfo.v_baseaddr) - return; - - /* - * Either we're really buffering stuff or we're not yet because - * the probe hasn't been done. - */ - if (vc_forward_buffer_enabled) - vc_store_char(c); - else - vc_putchar(c); - - return 0; -} - -/* - * Store characters to be drawn 'later', handle overflows - */ - -static void -vc_store_char(unsigned char c) -{ - int flush = 0; - spl_t s; - - assert(vc_forward_buffer_enabled); - - s = splhigh(); - simple_lock(&vc_forward_lock); - - /* Spin until the buffer has space for another character. */ - while (vc_forward_buffer_size == VC_MAX_FORWARD_SIZE) { - simple_unlock(&vc_forward_lock); - splx(s); - /* wait */ - s = splhigh(); - simple_lock(&vc_forward_lock); - } - - assert(vc_forward_buffer_size < VC_MAX_FORWARD_SIZE); - - vc_forward_buffer[vc_forward_buffer_size++] = (unsigned char)c; - - if (vc_forward_buffer_size == 1) { - /* If we're adding the first character to the buffer, - * start the timer, otherwise it is already running. - */ - if (debug_mode) { - flush = 1; - } else { - timeout((timeout_fcn_t)vc_flush_forward_buffer, - (void *)0, - VC_CONSOLE_UPDATE_TIMEOUT); - } - } else if (vc_forward_buffer_size == VC_MAX_FORWARD_SIZE || debug_mode) { - /* - * If there is an overflow or this is an immediate character display - * (eg. pre-clock printfs, panics), then we force a draw (take into - * account that a flush might already be in progress). - */ - if (!vc_forward_buffer_busy) { - flush = 1; - untimeout((timeout_fcn_t)vc_flush_forward_buffer, (void *)0); - } - } - - simple_unlock(&vc_forward_lock); - splx(s); - - if (flush) { - /* - * Immediate character display.. kernel printf uses this. Make sure - * get flushed and that panics get fully displayed. - */ - vc_flush_forward_buffer(); - } -} - -static void -vc_initialize(void) -{ -#if 0 - GratefulDebInit(); /* (TEST/DEBUG) */ -#endif - -#if DEBUG && SERIAL_CONSOLE_DEFAULT && !defined(MACH_PE) - printf(" Video info: %d; video_board=%08X\n", i, vboard); - printf(" Video name: %s\n", vinfo.v_name); - printf(" height=%d; width=%d, depth=%d; rowbytes=%d; type=%08X\n", - vinfo.v_height, vinfo.v_width, vinfo.v_depth, vinfo.v_rowbytes, vinfo.v_type); - printf(" physical address=%08X\n", vinfo.v_physaddr); -#endif - - vinfo.v_rows = vinfo.v_height / CHARHEIGHT; - vinfo.v_columns = vinfo.v_width / CHARWIDTH; - - if (vinfo.v_depth >= 8) { - vinfo.v_rowscanbytes = (vinfo.v_depth / 8) * vinfo.v_width; - } else { - vinfo.v_rowscanbytes = vinfo.v_width / (8 / vinfo.v_depth); - } - -#if DEBUG && SERIAL_CONSOLE_DEFAULT && !defined(MACH_PE) - printf(" inited=%d\n", vc_initted); -#endif - - - vc_render_font(1, vinfo.v_depth); - vc_color_mask = vc_color_depth_masks[vc_color_index_table[vinfo.v_depth]]; - vt100_reset(); - switch (vinfo.v_depth) { - default: - case 1: - vc_paintchar = vc_paint_char1; - break; - case 2: - vc_paintchar = vc_paint_char2; - break; - case 4: - vc_paintchar = vc_paint_char4; - break; - case 8: - vc_paintchar = vc_paint_char8c; - break; - case 16: - vc_paintchar = vc_paint_char16c; - break; - case 32: - vc_paintchar = vc_paint_char32c; - break; - } - -#ifdef FAST_JUMP_SCROLL - vc_forward_paintchar = vc_paintchar; - vc_paintchar = vc_paint_char; -#endif FAST_JUMP_SCROLL -} - -void -vcattach(void) -{ - if (vinfo.v_depth >= 8) - printf("\033[31mC\033[32mO\033[33mL\033[34mO\033[35mR\033[0m "); - printf("video console at 0x%x (%dx%dx%d)\n", vinfo.v_baseaddr, - vinfo.v_width, vinfo.v_height, vinfo.v_depth); - - /* - * Added for the buffering and jump scrolling - */ - /* Init our lock */ - simple_lock_init(&vc_forward_lock, ETAP_IO_TTY); - - vc_forward_buffer_enabled = 1; - -} - - -struct vc_progress_element { - unsigned int version; - unsigned int flags; - unsigned int time; - unsigned char count; - unsigned char res[3]; - int width; - int height; - int dx; - int dy; - int transparent; - unsigned int res2[3]; - unsigned char data[0]; -}; -typedef struct vc_progress_element vc_progress_element; - -static vc_progress_element * vc_progress; -static const unsigned char * vc_progress_data; -static const unsigned char * vc_progress_alpha; -static boolean_t vc_progress_enable; -static const unsigned char * vc_clut; -static const unsigned char * vc_clut8; -static unsigned char vc_revclut8[256]; -static unsigned int vc_progress_tick; -static boolean_t vc_graphics_mode; -static boolean_t vc_acquired; -static boolean_t vc_need_clear; -static boolean_t vc_needsave; -static vm_address_t vc_saveunder; -static vm_size_t vc_saveunder_len; - -static void vc_blit_rect_8c( int x, int y, - int width, int height, - const unsigned char * dataPtr, - const unsigned char * alphaPtr, - unsigned char * backPtr, - boolean_t save, boolean_t static_alpha ) -{ - volatile unsigned char * dst; - int line, col; - unsigned int data; - unsigned char alpha; - - dst = (unsigned char *)(vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x)); - - for( line = 0; line < height; line++) { - for( col = 0; col < width; col++) { - data = 0; - if( dataPtr != 0) data = *dataPtr++; - else if( alphaPtr != 0) data = vc_revclut8[*alphaPtr++]; - *(dst + col) = data; - } - dst = (volatile unsigned char *) (((int)dst) + vinfo.v_rowbytes); - } - -} - -static void vc_blit_rect_16( int x, int y, - int width, int height, - const unsigned char * dataPtr, - const unsigned char * alphaPtr, - unsigned short * backPtr, - boolean_t save, boolean_t static_alpha ) -{ - volatile unsigned short * dst; - int line, col; - unsigned int data, index, alpha, back; - - dst = (volatile unsigned short *)(vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 2)); - - for( line = 0; line < height; line++) { - for( col = 0; col < width; col++) { - if( dataPtr != 0) { - index = *dataPtr++; - index *= 3; - } - - if( alphaPtr && backPtr) { - - alpha = *alphaPtr++; - data = 0; - if( dataPtr != 0) { - if( vc_clut[index + 0] > alpha) - data |= (((vc_clut[index + 0] - alpha) & 0xf8) << 7); - if( vc_clut[index + 1] > alpha) - data |= (((vc_clut[index + 1] - alpha) & 0xf8) << 2); - if( vc_clut[index + 2] > alpha) - data |= (((vc_clut[index + 2] - alpha) & 0xf8) >> 3); - } - - if( save) { - back = *(dst + col); - if ( !static_alpha) - *backPtr++ = back; - back = (((((back & 0x7c00) * alpha) + 0x3fc00) >> 8) & 0x7c00) - | (((((back & 0x03e0) * alpha) + 0x01fe0) >> 8) & 0x03e0) - | (((((back & 0x001f) * alpha) + 0x000ff) >> 8) & 0x001f); - if ( static_alpha) - *backPtr++ = back; - } else { - back = *backPtr++; - if ( !static_alpha) { - back = (((((back & 0x7c00) * alpha) + 0x3fc00) >> 8) & 0x7c00) - | (((((back & 0x03e0) * alpha) + 0x01fe0) >> 8) & 0x03e0) - | (((((back & 0x001f) * alpha) + 0x000ff) >> 8) & 0x001f); - } - } - - data += back; - - } else - if( dataPtr != 0) { - data = ( (0xf8 & (vc_clut[index + 0])) << 7) - | ( (0xf8 & (vc_clut[index + 1])) << 2) - | ( (0xf8 & (vc_clut[index + 2])) >> 3); - } - - *(dst + col) = data; - } - dst = (volatile unsigned short *) (((int)dst) + vinfo.v_rowbytes); - } -} - -static void vc_blit_rect_32( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - const unsigned char * dataPtr, - const unsigned char * alphaPtr, - unsigned int * backPtr, - boolean_t save, boolean_t static_alpha ) -{ - volatile unsigned int * dst; - int line, col; - unsigned int data, index, alpha, back; - - dst = (volatile unsigned int *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 4)); - - for( line = 0; line < height; line++) { - for( col = 0; col < width; col++) { - if( dataPtr != 0) { - index = *dataPtr++; - index *= 3; - } - - if( alphaPtr && backPtr) { - - alpha = *alphaPtr++; - data = 0; - if( dataPtr != 0) { - if( vc_clut[index + 0] > alpha) - data |= ((vc_clut[index + 0] - alpha) << 16); - if( vc_clut[index + 1] > alpha) - data |= ((vc_clut[index + 1] - alpha) << 8); - if( vc_clut[index + 2] > alpha) - data |= ((vc_clut[index + 2] - alpha)); - } - - if( save) { - back = *(dst + col); - if ( !static_alpha) - *backPtr++ = back; - back = (((((back & 0x00ff00ff) * alpha) + 0x00ff00ff) >> 8) & 0x00ff00ff) - | (((((back & 0x0000ff00) * alpha) + 0x0000ff00) >> 8) & 0x0000ff00); - if ( static_alpha) - *backPtr++ = back; - } else { - back = *backPtr++; - if ( !static_alpha) { - back = (((((back & 0x00ff00ff) * alpha) + 0x00ff00ff) >> 8) & 0x00ff00ff) - | (((((back & 0x0000ff00) * alpha) + 0x0000ff00) >> 8) & 0x0000ff00); - } - } - - data += back; - - } else - if( dataPtr != 0) { - data = (vc_clut[index + 0] << 16) - | (vc_clut[index + 1] << 8) - | (vc_clut[index + 2]); - } - - *(dst + col) = data; - } - dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); - } -} - -void -draw_panic_dialog( void ) -{ - int pd_x,pd_y, iconx, icony, tx_line, tx_col; - int line_width = 1; - int f1, f2, d1, d2, d3, rem; - char *pair = "ff"; - int count = 0; - char digit; - int nibble; - char colon = ':'; - char dot = '.'; - struct ether_addr kdp_mac_addr = kdp_get_mac_addr(); - unsigned int ip_addr = kdp_get_ip_address(); - - - if (!panicDialogDrawn) - { - if ( !logPanicDataToScreen ) - { - - /* dim the screen 50% before putting up panic dialog */ - dim_screen(); - - /* set up to draw background box */ - pd_x = (vinfo.v_width/2) - panic_dialog.pd_width/2; - pd_y = (vinfo.v_height/2) - panic_dialog.pd_height/2; - - /* draw image */ - panic_blit_rect( pd_x, pd_y, panic_dialog.pd_width, panic_dialog.pd_height, 0, (unsigned char*) panic_dialog.image_pixel_data); - - /* offset for mac address text */ - mac_addr_digit_x = (vinfo.v_width/2) - 130; /* use 62 if no ip */ - mac_addr_digit_y = (vinfo.v_height/2) + panic_dialog.pd_height/2 - 20; - - if(kdp_mac_addr.ether_addr_octet[0] || kdp_mac_addr.ether_addr_octet[1]|| kdp_mac_addr.ether_addr_octet[2] - || kdp_mac_addr.ether_addr_octet[3] || kdp_mac_addr.ether_addr_octet[4] || kdp_mac_addr.ether_addr_octet[5]) - { - /* blit the digits for mac address */ - for (count = 0; count < 6; count++ ) - { - nibble = (kdp_mac_addr.ether_addr_octet[count] & 0xf0) >> 4; - digit = nibble < 10 ? nibble + '0':nibble - 10 + 'a'; - blit_digit(digit); - - nibble = kdp_mac_addr.ether_addr_octet[count] & 0xf; - digit = nibble < 10 ? nibble + '0':nibble - 10 + 'a'; - blit_digit(digit); - if( count < 5 ) - blit_digit( colon ); - } - } - else /* blit the ff's */ - { - for( count = 0; count < 6; count++ ) - { - digit = pair[0]; - blit_digit(digit); - digit = pair[1]; - blit_digit(digit); - if( count < 5 ) - blit_digit( colon ); - } - } - /* now print the ip address */ - mac_addr_digit_x = (vinfo.v_width/2) + 10; - if(ip_addr != 0) - { - /* blit the digits for ip address */ - for (count = 0; count < 4; count++ ) - { - nibble = (ip_addr & 0xff000000 ) >> 24; - - d3 = (nibble % 0xa) + '0'; - nibble = nibble/0xa; - d2 = (nibble % 0xa) + '0'; - nibble = nibble /0xa; - d1 = (nibble % 0xa) + '0'; - - if( d1 ) blit_digit(d1); - blit_digit(d2); - blit_digit(d3); - if( count < 3 ) - blit_digit(dot); - - d1= d2 = d3 = 0; - ip_addr = ip_addr << 8; - } - } - } - } - panicDialogDrawn = TRUE; - -} - - -static void -blit_digit( int digit ) -{ - switch( digit ) - { - case '0': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_0.num_w, num_0.num_h, 255, (unsigned char*) num_0.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_0.num_w - 1; - break; - } - case '1': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_1.num_w, num_1.num_h, 255, (unsigned char*) num_1.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_1.num_w ; - break; - } - case '2': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_2.num_w, num_2.num_h, 255, (unsigned char*) num_2.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_2.num_w ; - break; - } - case '3': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_3.num_w, num_3.num_h, 255, (unsigned char*) num_3.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_3.num_w ; - break; - } - case '4': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_4.num_w, num_4.num_h, 255, (unsigned char*) num_4.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_4.num_w ; - break; - } - case '5': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_5.num_w, num_5.num_h, 255, (unsigned char*) num_5.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_5.num_w ; - break; - } - case '6': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_6.num_w, num_6.num_h, 255, (unsigned char*) num_6.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_6.num_w ; - break; - } - case '7': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_7.num_w, num_7.num_h, 255, (unsigned char*) num_7.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_7.num_w ; - break; - } - case '8': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_8.num_w, num_8.num_h, 255, (unsigned char*) num_8.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_8.num_w ; - break; - } - case '9': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_9.num_w, num_9.num_h, 255, (unsigned char*) num_9.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_9.num_w ; - break; - } - case 'a': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_a.num_w, num_a.num_h, 255, (unsigned char*) num_a.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_a.num_w ; - break; - } - case 'b': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_b.num_w, num_b.num_h, 255, (unsigned char*) num_b.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_b.num_w ; - break; - } - case 'c': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_c.num_w, num_c.num_h, 255, (unsigned char*) num_c.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_c.num_w ; - break; - } - case 'd': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_d.num_w, num_d.num_h, 255, (unsigned char*) num_d.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_d.num_w ; - break; - } - case 'e': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_e.num_w, num_e.num_h, 255, (unsigned char*) num_e.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_e.num_w ; - break; - } - case 'f': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_f.num_w, num_f.num_h, 255, (unsigned char*) num_f.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_f.num_w ; - break; - } - case ':': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y, num_colon.num_w, num_colon.num_h, 255, (unsigned char*) num_colon.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_colon.num_w; - break; - } - case '.': { - panic_blit_rect( mac_addr_digit_x, mac_addr_digit_y + (num_colon.num_h/2), num_colon.num_w, num_colon.num_h/2, 255, (unsigned char*) num_colon.num_pixel_data); - mac_addr_digit_x = mac_addr_digit_x + num_colon.num_w; - break; - } - default: - break; - - } -} - -static void -panic_blit_rect( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - int transparent, unsigned char * dataPtr ) -{ - if(!vinfo.v_depth) - return; - - switch( vinfo.v_depth) { - case 8: - panic_blit_rect_8( x, y, width, height, transparent, dataPtr); - break; - case 16: - panic_blit_rect_16( x, y, width, height, transparent, dataPtr); - break; - case 32: - panic_blit_rect_32( x, y, width, height, transparent, dataPtr); - break; - } -} - -/* panic_blit_rect_8 is not tested and probably doesn't draw correctly. - it really needs a clut to use -*/ -static void -panic_blit_rect_8( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - int transparent, unsigned char * dataPtr ) -{ - volatile unsigned int * dst; - int line, col; - unsigned int pixelR, pixelG, pixelB; - - dst = (volatile unsigned int *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - x); - - for( line = 0; line < height; line++) { - for( col = 0; col < width; col++) { - pixelR = *dataPtr++; - pixelG = *dataPtr++; - pixelB = *dataPtr++; - if(( pixelR != transparent) || (pixelG != transparent) || (pixelB != transparent)) - { - *(dst + col) = ((19595 * pixelR + - 38470 * pixelG + - 7471 * pixelB ) / 65536); - } - - } - dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); - } -} - -/* panic_blit_rect_16 draws adequately. It would be better if it had a clut - to use instead of scaling the 32bpp color values. - - panic_blit_rect_16 decodes the RLE encoded image data on the fly, scales it down - to 16bpp, and fills in each of the three pixel values (RGB) for each pixel - and writes it to the screen. - -*/ -static void -panic_blit_rect_16( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - int transparent, unsigned char * dataPtr ) -{ - volatile unsigned int * dst; - int line, value, total = 0; - unsigned int quantity, tmp, pixel; - int pix_pos = 2; - int w = width / 2; - boolean_t secondTime = 0; - int pix_incr = 0; - - - dst = (volatile unsigned int *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 2)); - -/* - *(dst + col) = ( (0xf8 & (vc_clut[data + 0])) << 7) - | ( (0xf8 & (vc_clut[data + 1])) << 2) - | ( (0xf8 & (vc_clut[data + 2])) >> 3); - -*/ - for( line = 0; line < height; line++) - { - while ( total < width ) - { - quantity = *dataPtr++; - value = *dataPtr++; - value = (0x1f * value)/255; - while( quantity > 0 ) - { - switch( pix_pos ) - { - case 2: /* red */ - { - tmp |= (value << 10) & 0x7c00; - // tmp |= (value & 0xf8) << 7; - quantity--; - pix_pos--; - break; - } - case 1: /* green */ - { - tmp |= (value << 5) & 0x3e0; - // tmp |= (value & 0xf8) << 2; - quantity--; - pix_pos--; - break; - } - default: /* blue */ - { - tmp |= value & 0x1f; - // tmp |= (value & 0xf8) >> 3; - total++; - quantity--; - pix_pos = 2; - if( secondTime ) - { - pixel |= tmp; - secondTime = 0; - *(dst + pix_incr++) = pixel; - tmp = 0; - pixel = 0; - } - else - { - pixel = tmp << 16; - secondTime = 1; - } - break; - } - } - } - } - dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); - total = 0; - pix_incr = 0; - } -} - -/* - panic_blit_rect_32 decodes the RLE encoded image data on the fly, and fills - in each of the three pixel values (RGB) for each pixel and writes it to the - screen. -*/ -static void -panic_blit_rect_32( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - int transparent, unsigned char * dataPtr ) -{ - volatile unsigned int * dst; - int line, total = 0; - unsigned int value, quantity, tmp; - int pix_pos = 2; - - dst = (volatile unsigned int *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 4)); - - for( line = 0; line < height; line++) - { - while ( total < width ) - { - quantity = *dataPtr++; - value = *dataPtr++; - while( quantity > 0 ) - { - switch( pix_pos ) - { - case 2: - { - tmp = value << 16; - quantity--; - pix_pos--; - break; - } - case 1: - { - tmp |= value << 8; - quantity--; - pix_pos--; - break; - } - default: - { - tmp |= value; - *(dst + total) = tmp; - total++; - quantity--; - pix_pos = 2; - break; - } - - } - } - - } - dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); - total = 0; - } -} - -static void -dim_screen(void) -{ - if(!vinfo.v_depth) - return; - - switch( vinfo.v_depth) { - /*case 8: - dim_screen8(); - break; - */ - case 16: - dim_screen16(); - break; - case 32: - dim_screen32(); - break; - } -} - -static void -dim_screen16(void) -{ - unsigned long *p, *endp, *row; - int linelongs, col; - int rowline, rowlongs; - unsigned long value, tmp; - - rowline = vinfo.v_rowscanbytes / 4; - rowlongs = vinfo.v_rowbytes / 4; - - p = (unsigned long*) vinfo.v_baseaddr;; - endp = (unsigned long*) vinfo.v_baseaddr; - - linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; - endp += rowlongs * vinfo.v_height; - - for (row = p ; row < endp ; row += rowlongs) { - for (col = 0; col < rowline; col++) { - value = *(row+col); - tmp = ((value & 0x7C007C00) >> 1) & 0x3C003C00; - tmp |= ((value & 0x03E003E0) >> 1) & 0x01E001E0; - tmp |= ((value & 0x001F001F) >> 1) & 0x000F000F; - *(row+col) = tmp; //half (dimmed)? - } - - } - -} - -static void -dim_screen32(void) -{ - unsigned long *p, *endp, *row; - int linelongs, col; - int rowline, rowlongs; - unsigned long value, tmp; - - rowline = vinfo.v_rowscanbytes / 4; - rowlongs = vinfo.v_rowbytes / 4; - - p = (unsigned long*) vinfo.v_baseaddr;; - endp = (unsigned long*) vinfo.v_baseaddr; - - linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; - endp += rowlongs * vinfo.v_height; - - for (row = p ; row < endp ; row += rowlongs) { - for (col = 0; col < rowline; col++) { - value = *(row+col); - tmp = ((value & 0x00FF0000) >> 1) & 0x007F0000; - tmp |= ((value & 0x0000FF00) >> 1) & 0x00007F00; - tmp |= (value & 0x000000FF) >> 1; - *(row+col) = tmp; //half (dimmed)? - } - - } - -} - -static void vc_blit_rect( unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - const unsigned char * dataPtr, - const unsigned char * alphaPtr, - vm_address_t backBuffer, - boolean_t save, boolean_t static_alpha ) -{ - if(!vinfo.v_baseaddr) - return; - - switch( vinfo.v_depth) { - case 8: - if( vc_clut8 == vc_clut) - vc_blit_rect_8c( x, y, width, height, dataPtr, alphaPtr, (unsigned char *) backBuffer, save, static_alpha ); - break; - case 16: - vc_blit_rect_16( x, y, width, height, dataPtr, alphaPtr, (unsigned short *) backBuffer, save, static_alpha ); - break; - case 32: - vc_blit_rect_32( x, y, width, height, dataPtr, alphaPtr, (unsigned int *) backBuffer, save, static_alpha ); - break; - } -} - -static void vc_progress_task( void * arg ) -{ - spl_t s; - int count = (int) arg; - int x, y, width, height; - const unsigned char * data; - - s = splhigh(); - simple_lock(&vc_forward_lock); - - if( vc_progress_enable) { - - count++; - if( count >= vc_progress->count) - count = 0; - - width = vc_progress->width; - height = vc_progress->height; - x = vc_progress->dx; - y = vc_progress->dy; - data = vc_progress_data; - data += count * width * height; - if( 1 & vc_progress->flags) { - x += ((vinfo.v_width - width) / 2); - y += ((vinfo.v_height - height) / 2); - } - vc_blit_rect( x, y, width, height, - NULL, data, vc_saveunder, - vc_needsave, (0 == (4 & vc_progress->flags)) ); - vc_needsave = FALSE; - - timeout( vc_progress_task, (void *) count, - vc_progress_tick ); - } - simple_unlock(&vc_forward_lock); - splx(s); -} - -void vc_display_icon( vc_progress_element * desc, - const unsigned char * data ) -{ - int x, y, width, height; - - if( vc_acquired && vc_graphics_mode && vc_clut) { - - width = desc->width; - height = desc->height; - x = desc->dx; - y = desc->dy; - if( 1 & desc->flags) { - x += ((vinfo.v_width - width) / 2); - y += ((vinfo.v_height - height) / 2); - } - vc_blit_rect( x, y, width, height, data, NULL, (vm_address_t) NULL, FALSE, TRUE ); - } -} - -static boolean_t ignore_first_enable = TRUE; - -static boolean_t -vc_progress_set( boolean_t enable, unsigned int initial_tick ) -{ - spl_t s; - vm_address_t saveBuf = 0; - vm_size_t saveLen = 0; - unsigned int count; - unsigned int index; - unsigned char data8; - unsigned short data16; - unsigned short * buf16; - unsigned int data32; - unsigned int * buf32; - - if( !vc_progress) - return( FALSE ); - - if( enable & ignore_first_enable) { - enable = FALSE; - ignore_first_enable = FALSE; - } - - if( enable) { - saveLen = vc_progress->width * vc_progress->height * vinfo.v_depth / 8; - saveBuf = kalloc( saveLen ); - - if( !vc_need_clear) switch( vinfo.v_depth) { - case 8 : - for( count = 0; count < 256; count++) { - vc_revclut8[count] = vc_clut[0x01 * 3]; - data8 = (vc_clut[0x01 * 3] * count + 0x0ff) >> 8; - for( index = 0; index < 256; index++) { - if( (data8 == vc_clut[index * 3 + 0]) && - (data8 == vc_clut[index * 3 + 1]) && - (data8 == vc_clut[index * 3 + 2])) { - vc_revclut8[count] = index; - break; - } - } - } - memset( (void *) saveBuf, 0x01, saveLen ); - break; - - case 16 : - buf16 = (unsigned short *) saveBuf; - data16 = ((vc_clut[0x01 * 3 + 0] & 0xf8) << 7) - | ((vc_clut[0x01 * 3 + 0] & 0xf8) << 2) - | ((vc_clut[0x01 * 3 + 0] & 0xf8) >> 3); - for( count = 0; count < saveLen / 2; count++) - buf16[count] = data16; - break; - - case 32 : - buf32 = (unsigned int *) saveBuf; - data32 = ((vc_clut[0x01 * 3 + 0] & 0xff) << 16) - | ((vc_clut[0x01 * 3 + 1] & 0xff) << 8) - | ((vc_clut[0x01 * 3 + 2] & 0xff) << 0); - for( count = 0; count < saveLen / 4; count++) - buf32[count] = data32; - break; - } - } - - s = splhigh(); - simple_lock(&vc_forward_lock); - - if( vc_progress_enable != enable) { - vc_progress_enable = enable; - if( enable) { - vc_needsave = vc_need_clear; - vc_saveunder = saveBuf; - vc_saveunder_len = saveLen; - saveBuf = 0; - saveLen = 0; - timeout(vc_progress_task, (void *) 0, - initial_tick ); - } else { - if( vc_saveunder) { - saveBuf = vc_saveunder; - saveLen = vc_saveunder_len; - vc_saveunder = 0; - vc_saveunder_len = 0; - } - untimeout( vc_progress_task, (void *) 0 ); - } - } - - if( !enable) { - vc_forward_buffer_size = 0; - untimeout((timeout_fcn_t)vc_flush_forward_buffer, (void *)0); - - /* Spin if the flush is in progress */ - while (vc_forward_buffer_busy) { - simple_unlock(&vc_forward_lock); - splx(s); - /* wait */ - s = splhigh(); - simple_lock(&vc_forward_lock); - vc_forward_buffer_size = 0; - } - } - - simple_unlock(&vc_forward_lock); - splx(s); - - if( saveBuf) - kfree( saveBuf, saveLen ); - - return( TRUE ); -} - - -boolean_t -vc_progress_initialize( vc_progress_element * desc, - const unsigned char * data, - const unsigned char * clut ) -{ - if( (!clut) || (!desc) || (!data)) - return( FALSE ); - vc_clut = clut; - vc_clut8 = clut; - - vc_progress = desc; - vc_progress_data = data; - if( 2 & vc_progress->flags) - vc_progress_alpha = vc_progress_data - + vc_progress->count * vc_progress->width * vc_progress->height; - else - vc_progress_alpha = NULL; - vc_progress_tick = vc_progress->time * hz / 1000; - - return( TRUE ); -} - -// FirmwareC.c needs: -Boot_Video boot_video_info; - -extern int disableConsoleOutput; - -static void vc_clear_screen( void ) -{ - reversecursor(); - vt100_reset(); - x = y = 0; - clear_screen(2); - reversecursor(); -}; - -void -initialize_screen(Boot_Video * boot_vinfo, unsigned int op) -{ - if( boot_vinfo) { - bcopy( (const void *) boot_vinfo, - (void *) &boot_video_info, - sizeof( boot_video_info)); - - vinfo.v_name[0] = 0; - vinfo.v_width = boot_vinfo->v_width; - vinfo.v_height = boot_vinfo->v_height; - vinfo.v_depth = boot_vinfo->v_depth; - vinfo.v_rowbytes = boot_vinfo->v_rowBytes; - vinfo.v_physaddr = boot_vinfo->v_baseAddr; - vinfo.v_baseaddr = vinfo.v_physaddr; - vinfo.v_type = 0; - - vc_initialize(); -#if 0 - GratefulDebInit((bootBumbleC *)boot_vinfo); /* Re-initialize GratefulDeb */ -#endif - } - - switch( op ) { - - case kPEGraphicsMode: - vc_graphics_mode = TRUE; - disableConsoleOutput = TRUE; - vc_acquired = TRUE; - break; - - case kPETextMode: - vc_graphics_mode = FALSE; - disableConsoleOutput = FALSE; - vc_acquired = TRUE; - vc_clear_screen(); - break; - - case kPETextScreen: - vc_progress_set( FALSE, 0 ); - disableConsoleOutput = FALSE; - if( vc_need_clear) { - vc_need_clear = FALSE; - vc_clear_screen(); - } - break; - - case kPEEnableScreen: - if( vc_acquired) { - if( vc_graphics_mode) - vc_progress_set( TRUE, vc_progress_tick ); - else - vc_clear_screen(); - } - break; - - case kPEDisableScreen: - vc_progress_set( FALSE, 0 ); - break; - - case kPEAcquireScreen: - vc_need_clear = (FALSE == vc_acquired); - vc_acquired = TRUE; - vc_progress_set( vc_graphics_mode, vc_need_clear ? 2 * hz : 0 ); - disableConsoleOutput = vc_graphics_mode; - if( vc_need_clear && !vc_graphics_mode) { - vc_need_clear = FALSE; - vc_clear_screen(); - } - break; - - case kPEReleaseScreen: - vc_acquired = FALSE; - vc_progress_set( FALSE, 0 ); - vc_clut8 = NULL; - disableConsoleOutput = TRUE; -#if 0 - GratefulDebInit(0); /* Stop grateful debugger */ -#endif - break; - } -#if 0 - if( boot_vinfo) GratefulDebInit((bootBumbleC *)boot_vinfo); /* Re initialize GratefulDeb */ -#endif -} diff --git a/osfmk/ppc/POWERMAC/video_console.h b/osfmk/ppc/POWERMAC/video_console.h deleted file mode 100644 index 54b3591f2..000000000 --- a/osfmk/ppc/POWERMAC/video_console.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - */ -/* - * @APPLE_FREE_COPYRIGHT@ - */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:43 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:26:05 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.1.9.4 1997/05/09 15:36:59 barbou - * Moved "video" funnel declaration to video_board.h. - * [97/05/09 barbou] - * - * Revision 1.1.9.3 1997/05/08 19:33:07 barbou - * SMP support: - * Funnelized the "video" driver. - * [1997/05/08 18:20:34 barbou] - * - * Revision 1.1.9.2 1997/01/27 15:27:31 stephen - * Export new set/get_status - * VC_GETKEYBOARDLEDS/VC_SETKEYBOARDLEDS - * [1997/01/27 15:27:01 stephen] - * - * Revision 1.1.9.1 1996/12/09 16:52:52 stephen - * nmklinux_1.0b3_shared into pmk1.1 - * [1996/12/09 10:57:12 stephen] - * - * Revision 1.1.7.4 1996/10/18 08:25:16 stephen - * Added v_rowscanbytes field - * [1996/10/18 08:24:11 stephen] - * - * Revision 1.1.7.3 1996/10/14 18:36:33 stephen - * Added v_rows, v_volumns - * Removed sys/ioctl.h inclusion - * File is now exported from microkernel - * [1996/10/14 18:24:17 stephen] - * - * Revision 1.1.7.2 1996/08/23 09:24:10 stephen - * Added guards around file - * [1996/08/23 09:23:05 stephen] - * - * Revision 1.1.7.1 1996/06/20 12:53:46 stephen - * added VM_TYPE_AV - * [1996/06/20 12:51:04 stephen] - * - * Revision 1.1.4.3 1996/05/28 10:47:39 stephen - * Added HPV video capability - * [1996/05/28 10:45:10 stephen] - * - * Revision 1.1.4.2 1996/05/03 17:26:06 stephen - * Added APPLE_FREE_COPYRIGHT - * [1996/05/03 17:20:05 stephen] - * - * Revision 1.1.4.1 1996/04/11 09:06:47 emcmanus - * Copied from mainline.ppc. - * [1996/04/10 17:01:34 emcmanus] - * - * Revision 1.1.2.2 1996/03/14 12:58:25 stephen - * Various new definitions from Mike - * [1996/03/14 12:21:30 stephen] - * - * Revision 1.1.2.1 1996/02/08 17:37:58 stephen - * created - * [1996/02/08 17:32:46 stephen] - * - * $EndLog$ - */ - -#ifndef _POWERMAC_VIDEO_CONSOLE_H_ -#define _POWERMAC_VIDEO_CONSOLE_H_ - - -struct vc_info { - unsigned long v_height; /* pixels */ - unsigned long v_width; /* pixels */ - unsigned long v_depth; - unsigned long v_rowbytes; - unsigned long v_baseaddr; - unsigned long v_type; - char v_name[32]; - unsigned long v_physaddr; - unsigned long v_rows; /* characters */ - unsigned long v_columns; /* characters */ - unsigned long v_rowscanbytes; /* Actualy number of bytes used for display per row*/ - /* Note for PCI (VCI) systems, part of the row byte line - is used for the hardware cursor which is not to be touched */ - unsigned long v_reserved[5]; -}; - -#endif /* _POWERMAC_VIDEO_CONSOLE_H_ */ diff --git a/osfmk/ppc/POWERMAC/video_console_entries.h b/osfmk/ppc/POWERMAC/video_console_entries.h deleted file mode 100644 index afdf214cc..000000000 --- a/osfmk/ppc/POWERMAC/video_console_entries.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_FREE_COPYRIGHT@ - * - */ -/* - * @APPLE_FREE_COPYRIGHT@ - */ -/* - * HISTORY - * - * Revision 1.1.1.1 1998/09/22 21:05:43 wsanchez - * Import of Mac OS X kernel (~semeria) - * - * Revision 1.1.1.1 1998/03/07 02:26:05 wsanchez - * Import of OSF Mach kernel (~mburg) - * - * Revision 1.1.10.1 1996/12/09 16:52:54 stephen - * nmklinux_1.0b3_shared into pmk1.1 - * [1996/12/09 10:57:17 stephen] - * - * Revision 1.1.8.2 1996/06/14 08:40:48 emcmanus - * Added prototype for vc_putchar(). - * [1996/05/07 09:35:43 emcmanus] - * - * Revision 1.1.8.1 1996/06/07 16:04:24 stephen - * Added video_scroll_up and video_scroll_down prototypes - * [1996/06/07 15:43:59 stephen] - * - * Revision 1.1.4.3 1996/05/03 17:26:10 stephen - * Added APPLE_FREE_COPYRIGHT - * [1996/05/03 17:20:12 stephen] - * - * Revision 1.1.4.2 1996/04/27 15:23:46 emcmanus - * Added vcputc() and vcgetc() prototypes so these functions can be - * used in the console switch. - * [1996/04/27 15:03:38 emcmanus] - * - * Revision 1.1.4.1 1996/04/11 09:06:51 emcmanus - * Copied from mainline.ppc. - * [1996/04/10 17:01:38 emcmanus] - * - * Revision 1.1.2.3 1996/03/14 12:58:27 stephen - * no change - * [1996/03/14 12:56:24 stephen] - * - * Revision 1.1.2.2 1996/01/30 13:29:09 stephen - * Added vcmmap - * [1996/01/30 13:27:11 stephen] - * - * Revision 1.1.2.1 1996/01/12 16:15:06 stephen - * First revision - * [1996/01/12 14:41:47 stephen] - * - * $EndLog$ - */ -#include - -extern int vcputc( - int l, - int u, - int c); -extern int vcgetc( - int l, - int u, - boolean_t wait, - boolean_t raw); - -extern void video_scroll_up(unsigned long start, - unsigned long end, - unsigned long dest); - -extern void video_scroll_down(unsigned long start, /* HIGH addr */ - unsigned long end, /* LOW addr */ - unsigned long dest); /* HIGH addr */ diff --git a/osfmk/ppc/PPCcalls.c b/osfmk/ppc/PPCcalls.c index 36f77706e..27ed40e7e 100644 --- a/osfmk/ppc/PPCcalls.c +++ b/osfmk/ppc/PPCcalls.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include /* * To add a new entry: diff --git a/osfmk/ppc/PPCcalls.h b/osfmk/ppc/PPCcalls.h index 2f69c9952..bc03373e3 100644 --- a/osfmk/ppc/PPCcalls.h +++ b/osfmk/ppc/PPCcalls.h @@ -54,12 +54,28 @@ PPCcallEnt PPCcalls[] = { PPCcall(dis), /* 0x6009 CHUD Interface hook */ - PPCcall(dis), /* 0x600A disabled */ - PPCcall(dis), /* 0x600B disabled */ - PPCcall(dis), /* 0x600C disabled */ + PPCcall(ppcNull), /* 0x600A Null PPC syscall */ + PPCcall(perfmon_control), /* 0x600B performance monitor */ + PPCcall(ppcNullinst), /* 0x600C Instrumented Null PPC syscall */ PPCcall(dis), /* 0x600D disabled */ PPCcall(dis), /* 0x600E disabled */ PPCcall(dis), /* 0x600F disabled */ + PPCcall(dis), /* 0x6010 disabled */ + PPCcall(dis), /* 0x6011 disabled */ + PPCcall(dis), /* 0x6012 disabled */ + PPCcall(dis), /* 0x6013 disabled */ + PPCcall(dis), /* 0x6014 disabled */ + PPCcall(dis), /* 0x6015 disabled */ + PPCcall(dis), /* 0x6016 disabled */ + PPCcall(dis), /* 0x6017 disabled */ + PPCcall(dis), /* 0x6018 disabled */ + PPCcall(dis), /* 0x6019 disabled */ + PPCcall(dis), /* 0x601A disabled */ + PPCcall(dis), /* 0x601B disabled */ + PPCcall(dis), /* 0x601C disabled */ + PPCcall(dis), /* 0x601D disabled */ + PPCcall(dis), /* 0x601E disabled */ + PPCcall(dis), /* 0x601F disabled */ }; #undef dis diff --git a/osfmk/ppc/Performance.s b/osfmk/ppc/Performance.s index fdad6de1a..39cdbf1cc 100644 --- a/osfmk/ppc/Performance.s +++ b/osfmk/ppc/Performance.s @@ -38,7 +38,6 @@ #include #include -#include #include #include #include diff --git a/osfmk/ppc/PseudoKernel.c b/osfmk/ppc/PseudoKernel.c index 9fc5bb276..2c1379f66 100644 --- a/osfmk/ppc/PseudoKernel.c +++ b/osfmk/ppc/PseudoKernel.c @@ -72,10 +72,10 @@ kern_return_t syscall_notify_interrupt ( void ) { task_lock(task); /* Lock our task */ - fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ + fact = (thread_act_t)task->threads.next; /* Get the first activation on task */ act = 0; /* Pretend we didn't find it yet */ - for(i = 0; i < task->thr_act_count; i++) { /* Scan the whole list */ + for(i = 0; i < task->thread_count; i++) { /* Scan the whole list */ if(fact->mact.bbDescAddr) { /* Is this a Blue thread? */ bttd = (BTTD_t *)(fact->mact.bbDescAddr & -PAGE_SIZE); if(bttd->InterruptVector) { /* Is this the Blue interrupt thread? */ @@ -83,7 +83,7 @@ kern_return_t syscall_notify_interrupt ( void ) { break; /* Found it, Bail the loop... */ } } - fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + fact = (thread_act_t)fact->task_threads.next; /* Go to the next one */ } if(!act) { /* Couldn't find a bluebox */ @@ -170,11 +170,11 @@ void bbSetRupt(ReturnHandler *rh, thread_act_t act) { bttd->InterruptControlWord = (bttd->InterruptControlWord & ~kInterruptStateMask) | (kInPseudoKernel << kInterruptStateShift); - bttd->exceptionInfo.srr0 = sv->save_srr0; /* Save the current PC */ - sv->save_srr0 = bttd->InterruptVector; /* Set the new PC */ - bttd->exceptionInfo.sprg1 = sv->save_r1; /* Save the original R1 */ - sv->save_r1 = bttd->exceptionInfo.sprg0; /* Set the new R1 */ - bttd->exceptionInfo.srr1 = sv->save_srr1; /* Save the original MSR */ + bttd->exceptionInfo.srr0 = (unsigned int)sv->save_srr0; /* Save the current PC */ + sv->save_srr0 = (uint64_t)act->mact.bbInterrupt; /* Set the new PC */ + bttd->exceptionInfo.sprg1 = (unsigned int)sv->save_r1; /* Save the original R1 */ + sv->save_r1 = (uint64_t)bttd->exceptionInfo.sprg0; /* Set the new R1 */ + bttd->exceptionInfo.srr1 = (unsigned int)sv->save_srr1; /* Save the original MSR */ sv->save_srr1 &= ~(MASK(MSR_BE)|MASK(MSR_SE)); /* Clear SE|BE bits in MSR */ act->mact.specFlags &= ~bbNoMachSC; /* reactivate Mach SCs */ disable_preemption(); /* Don't move us around */ @@ -215,8 +215,10 @@ kern_return_t enable_bluebox( ) { thread_t th; - vm_offset_t kerndescaddr, physdescaddr, origdescoffset; + vm_offset_t kerndescaddr, origdescoffset; kern_return_t ret; + ppnum_t physdescpage; + BTTD_t *bttd; th = current_thread(); /* Get our thread */ @@ -242,8 +244,8 @@ kern_return_t enable_bluebox( return KERN_FAILURE; } - physdescaddr = /* Get the physical address of the page */ - pmap_extract(th->top_act->map->pmap, (vm_offset_t) Desc_TableStart); + physdescpage = /* Get the physical page number of the page */ + pmap_find_phys(th->top_act->map->pmap, (addr64_t)Desc_TableStart); ret = kmem_alloc_pageable(kernel_map, &kerndescaddr, PAGE_SIZE); /* Find a virtual address to use */ if(ret != KERN_SUCCESS) { /* Could we get an address? */ @@ -255,19 +257,25 @@ kern_return_t enable_bluebox( } (void) pmap_enter(kernel_pmap, /* Map this into the kernel */ - kerndescaddr, physdescaddr, VM_PROT_READ|VM_PROT_WRITE, + kerndescaddr, physdescpage, VM_PROT_READ|VM_PROT_WRITE, VM_WIMG_USE_DEFAULT, TRUE); + bttd = (BTTD_t *)kerndescaddr; /* Get the address in a convienient spot */ + th->top_act->mact.bbDescAddr = (unsigned int)kerndescaddr+origdescoffset; /* Set kernel address of the table */ th->top_act->mact.bbUserDA = (unsigned int)Desc_TableStart; /* Set user address of the table */ th->top_act->mact.bbTableStart = (unsigned int)TWI_TableStart; /* Set address of the trap table */ th->top_act->mact.bbTaskID = (unsigned int)taskID; /* Assign opaque task ID */ th->top_act->mact.bbTaskEnv = 0; /* Clean task environment data */ th->top_act->mact.emPendRupts = 0; /* Clean pending 'rupt count */ + th->top_act->mact.bbTrap = bttd->TrapVector; /* Remember trap vector */ + th->top_act->mact.bbSysCall = bttd->SysCallVector; /* Remember syscall vector */ + th->top_act->mact.bbInterrupt = bttd->InterruptVector; /* Remember interrupt vector */ + th->top_act->mact.bbPending = bttd->PendingIntVector; /* Remember pending vector */ th->top_act->mact.specFlags &= ~(bbNoMachSC | bbPreemptive); /* Make sure mach SCs are enabled and we are not marked preemptive */ th->top_act->mact.specFlags |= bbThread; /* Set that we are Classic thread */ - if(!(((BTTD_t *)kerndescaddr)->InterruptVector)) { /* See if this is a preemptive (MP) BlueBox thread */ + if(!(bttd->InterruptVector)) { /* See if this is a preemptive (MP) BlueBox thread */ th->top_act->mact.specFlags |= bbPreemptive; /* Yes, remember it */ } @@ -371,17 +379,17 @@ int bb_settaskenv( struct savearea *save ) task = current_task(); /* Figure out who our task is */ task_lock(task); /* Lock our task */ - fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ + fact = (thread_act_t)task->threads.next; /* Get the first activation on task */ act = 0; /* Pretend we didn't find it yet */ - for(i = 0; i < task->thr_act_count; i++) { /* Scan the whole list */ + for(i = 0; i < task->thread_count; i++) { /* Scan the whole list */ if(fact->mact.bbDescAddr) { /* Is this a Blue thread? */ if ( fact->mact.bbTaskID == save->save_r3 ) { /* Is this the task we are looking for? */ act = fact; /* Yeah... */ break; /* Found it, Bail the loop... */ } } - fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + fact = (thread_act_t)fact->task_threads.next; /* Go to the next one */ } if ( !act || !act->active) { @@ -395,7 +403,7 @@ int bb_settaskenv( struct savearea *save ) act->mact.bbTaskEnv = save->save_r4; if(act == current_act()) { /* Are we setting our own? */ disable_preemption(); /* Don't move us around */ - per_proc_info[cpu_number()].spcFlags = act->mact.specFlags; /* Copy the flags */ + per_proc_info[cpu_number()].ppbbTaskEnv = act->mact.bbTaskEnv; /* Remember the environment */ enable_preemption(); /* Ok to move us around */ } diff --git a/osfmk/ppc/aligned_data.s b/osfmk/ppc/aligned_data.s index a1d11367d..a1c091cd9 100644 --- a/osfmk/ppc/aligned_data.s +++ b/osfmk/ppc/aligned_data.s @@ -49,6 +49,8 @@ #include #include #include +#include +#include #include ; @@ -59,19 +61,19 @@ .data -/* 1024-byte aligned areas */ +/* 4096-byte aligned areas */ .globl EXT(per_proc_info) - .align 10 + .align 12 EXT(per_proc_info): ; Per processor data area - .fill (ppSize*NCPUS)/4,4,0 ; (filled with 0s) + .space (ppSize*NCPUS),0 ; (filled with 0s) /* 512-byte aligned areas */ .globl EXT(kernel_pmap_store) ; This is the kernel_pmap .align 8 EXT(kernel_pmap_store): - .set .,.+PMAP_SIZE + .set .,.+pmapSize /* 256-byte aligned areas */ @@ -86,11 +88,6 @@ EXT(GratefulDebWork): ; Enough for 2 rows of 8 chars of 16-pixel wide 32- debstash: .set .,.+256 - .globl EXT(hw_counts) ; Counter banks per processor - .align 8 -EXT(hw_counts): - .set .,.+(NCPUS*256) - #if PREEMPTSTACK ; @@ -106,37 +103,11 @@ EXT(DBGpreempt): /* 128-byte aligned areas */ - .globl EXT(saveanchor) - .align 7 -EXT(saveanchor): - .set .,.+SVsize - .globl EXT(mapCtl) .align 7 EXT(mapCtl): .set .,.+mapcsize - .globl EXT(trcWork) - .align 7 -EXT(trcWork): - .long EXT(traceTableBeg) ; The next trace entry to use -#if DEBUG -/* .long 0x02000000 */ /* Only alignment exceptions enabled */ - .long 0xFFFFFFFF /* All enabled */ -/* .long 0xFBBFFFFF */ /* EXT and DEC disabled */ -/* .long 0xFFBFFFFF */ /* DEC disabled */ -#else - .long 0x00000000 ; All disabled on non-debug systems -#endif - .long EXT(traceTableBeg) ; Start of the trace table - .long EXT(traceTableEnd) ; End (wrap point) of the trace - .long 0 ; Saved mask while in debugger - - .long 0 - .long 0 - .long 0 - - .globl fwdisplock .align 7 fwdisplock: @@ -197,19 +168,6 @@ EXT(QNaNbarbarian): .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ - .globl EXT(dgWork) - .align 5 -EXT(dgWork): - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - - /* 8-byte aligned areas */ .globl EXT(FloatInit) @@ -239,3 +197,41 @@ EXT(dbfloats): .align 3 EXT(dbspecrs): .set .,.+(80*4) + +/* + * Interrupt and debug stacks go here + */ + + .align PPC_PGSHIFT + .globl EXT(FixedStackStart) +EXT(FixedStackStart): + + .globl EXT(intstack) +EXT(intstack): + .set .,.+INTSTACK_SIZE*NCPUS + +/* Debugger stack - used by the debugger if present */ +/* NOTE!!! Keep the debugger stack right after the interrupt stack */ + + .globl EXT(debstack) +EXT(debstack): + .set ., .+KERNEL_STACK_SIZE*NCPUS + + .globl EXT(FixedStackEnd) +EXT(FixedStackEnd): + + .align ALIGN + .globl EXT(intstack_top_ss) +EXT(intstack_top_ss): + .long EXT(intstack)+INTSTACK_SIZE-FM_SIZE /* intstack_top_ss points to the top of interrupt stack */ + + .align ALIGN + .globl EXT(debstack_top_ss) +EXT(debstack_top_ss): + + .long EXT(debstack)+KERNEL_STACK_SIZE-FM_SIZE /* debstack_top_ss points to the top of debug stack */ + + .globl EXT(debstackptr) +EXT(debstackptr): + .long EXT(debstack)+KERNEL_STACK_SIZE-FM_SIZE + diff --git a/osfmk/ppc/asm.h b/osfmk/ppc/asm.h index 8b543b716..188eaa44a 100644 --- a/osfmk/ppc/asm.h +++ b/osfmk/ppc/asm.h @@ -68,6 +68,8 @@ #define sprg1 273 #define sprg2 274 #define sprg3 275 +#define scomc 276 +#define scomd 277 #define pvr 287 #define IBAT0U 528 @@ -105,6 +107,8 @@ #define dbat3l 543 #define ummcr2 928 /* Performance monitor control */ +#define upmc5 929 /* Performance monitor counter */ +#define upmc6 930 /* Performance monitor counter */ #define ubamr 935 /* Performance monitor mask */ #define ummcr0 936 /* Performance monitor control */ #define upmc1 937 /* Performance monitor counter */ @@ -115,6 +119,8 @@ #define upmc4 942 /* Performance monitor counter */ #define usda 943 /* User sampled data address */ #define mmcr2 944 /* Performance monitor control */ +#define pmc5 945 /* Performance monitor counter */ +#define pmc6 946 /* Performance monitor counter */ #define bamr 951 /* Performance monitor mask */ #define mmcr0 952 #define pmc1 953 @@ -125,8 +131,11 @@ #define pmc4 958 #define sda 959 /* Sampled data address */ #define dmiss 976 /* ea that missed */ +#define trig0 976 #define dcmp 977 /* compare value for the va that missed */ +#define trig1 977 #define hash1 978 /* pointer to first hash pteg */ +#define trig2 978 #define hash2 979 /* pointer to second hash pteg */ #define imiss 980 /* ea that missed */ #define tlbmiss 980 /* ea that missed */ @@ -145,8 +154,10 @@ #define iabr 1010 /* Instruction address breakpoint register */ #define ictrl 1011 /* Instruction Cache Control */ #define ldstdb 1012 /* Load/Store Debug */ +#define hid4 1012 /* Misc stuff */ #define dabr 1013 /* Data address breakpoint register */ #define msscr0 1014 /* Memory subsystem control */ +#define hid5 1014 /* Misc stuff */ #define msscr1 1015 /* Memory subsystem debug */ #define msssr0 1015 /* Memory Subsystem Status */ #define ldstcr 1016 /* Load/Store Status/Control */ @@ -159,6 +170,59 @@ #define thrm3 1022 /* Thermal management 3 */ #define pir 1023 /* Processor ID Register */ + +/* SPR registers (64-bit, PPC970 specific) */ + +#define scomc_gp 276 +#define scomd_gp 277 + +#define hsprg0 304 +#define hsprg1 305 +#define hdec 310 +#define hior 311 +#define rmor 312 +#define hrmor 313 +#define hsrr0 314 +#define hsrr1 315 +#define lpcr 318 +#define lpidr 319 + +#define ummcra_gp 770 +#define upmc1_gp 771 +#define upmc2_gp 772 +#define upmc3_gp 773 +#define upmc4_gp 774 +#define upmc5_gp 775 +#define upmc6_gp 776 +#define upmc7_gp 777 +#define upmc8_gp 778 +#define ummcr0_gp 779 +#define usiar_gp 780 +#define usdar_gp 781 +#define ummcr1_gp 782 +#define uimc_gp 783 + +#define mmcra_gp 786 +#define pmc1_gp 787 +#define pmc2_gp 788 +#define pmc3_gp 789 +#define pmc4_gp 790 +#define pmc5_gp 791 +#define pmc6_gp 792 +#define pmc7_gp 793 +#define pmc8_gp 794 +#define mmcr0_gp 795 +#define siar_gp 796 +#define sdar_gp 797 +#define mmcr1_gp 798 +#define imc_gp 799 + +#define trig0_gp 976 +#define trig1_gp 977 +#define trig2_gp 978 + +#define dabrx 1015 + ; hid0 bits #define emcp 0 #define emcpm 0x80000000 @@ -200,6 +264,7 @@ #define ilockm 0x00002000 #define dlock 19 #define dlockm 0x00001000 +#define exttben 19 #define icfi 20 #define icfim 0x00000800 #define dcfi 21 @@ -230,14 +295,17 @@ #define hid1prem 0x06000000 #define hid1pi0 14 #define hid1pi0m 0x00020000 +#define hid1FCPErr 14 #define hid1ps 15 +#define hid1FCD0PErr 15 #define hid1psm 0x00010000 #define hid1pc0 0x0000F800 #define hid1pr0 0x00000600 #define hid1pc1 0x000000F8 #define hid1pc0 0x0000F800 #define hid1pr1 0x00000006 - +#define hid1FCD1PErr 16 +#define hid1FIERATErr 17 ; hid2 bits #define hid2vmin 18 @@ -322,6 +390,80 @@ #define apmck 15 #define apmckm 0x00010000 +#define mckIFUE 42 +#define mckLDST 43 +#define mckXCs 44 +#define mckXCe 45 +#define mckNoErr 0 +#define mckIFSLBPE 1 +#define mckIFTLBPE 2 +#define mckIFTLBUE 3 + +; dsisr bits +#define mckUEdfr 16 +#define mckUETwDfr 17 +#define mckL1DCPE 18 +#define mckL1DTPE 19 +#define mckDEPE 20 +#define mckTLBPE 21 +#define mckSLBPE 23 + +; Async MCK source +#define AsyMCKSrc 0x0226 +#define AsyMCKRSrc 0x0227 +#define AsyMCKext 0 +#define AsyMCKfir 1 +#define AsyMCKhri 2 +#define AsyMCKdbg 3 +#define AsyMCKncstp 4 + +; Core FIR +#define cFIR 0x0300 +#define cFIRrst 0x0310 +#define cFIRICachePE 0 +#define cFIRITagPE0 1 +#define cFIRITagPE1 2 +#define cFIRIEratPE 3 +#define cFIRIFUL2UE 4 +#define cFIRIFUCS 5 +#define cFIRDCachePE 6 +#define cFIRDTagPE 7 +#define cFIRDEratPE 8 +#define cFIRTLBPE 9 +#define cFIRSLBPE 10 +#define cFIRSL2UE 11 + +; Core Error Inject +#define CoreErrI 0x0350 +#define CoreIFU 0 +#define CoreLSU 1 +#define CoreRate0 2 +#define CoreRate1 3 +#define CoreOnce 0 +#define CoreSolid 2 +#define CorePulse 3 + +; GUS Mode Register +#define GUSModeReg 0x0430 +#define GUSMdmapen 0x00008000 +#define GUSMstgtdis 0x00000080 +#define GUSMstgttim 0x00000038 +#define GUSMstgttoff 0x00000004 + +; HID4 +#define hid4RMCI 23 +#define hid4FAlgn 24 +#define hid4DisPF 25 +#define hid4ResPF 26 +#define hid4EnSPTW 27 +#define hid4L1DCFI 28 +#define hid4DisDERpg 31 +#define hid4DisDCTpg 36 +#define hid4DisDCpg 41 +#define hid4DisTLBpg 48 +#define hid4DisSLBpg 54 +#define hid4MckEIEna 55 + ; L2 cache control #define l2e 0 #define l2em 0x80000000 @@ -486,6 +628,13 @@ #define cr7_so 31 #define cr7_un 31 +#define slbESID 36 +#define slbKey 52 +#define slbIndex 52 +#define slbV 36 +#define slbVm 0x08000000 +#define slbCnt 64 + /* * Macros to access high and low word values of an address */ @@ -565,9 +714,7 @@ #define data16 .byte 0x66 #define addr16 .byte 0x67 -#if !GPROF #define MCOUNT -#endif /* GPROF */ #define ELF_FUNC(x) #define ELF_DATA(x) diff --git a/osfmk/ppc/ast.h b/osfmk/ppc/ast.h index 3310e203c..ef16b94bc 100644 --- a/osfmk/ppc/ast.h +++ b/osfmk/ppc/ast.h @@ -33,8 +33,6 @@ #ifndef _PPC_AST_H_ #define _PPC_AST_H_ -/* - * Empty file - use the machine-independent versions. - */ +#define AST_PPC_CHUD 0x80000000 #endif /* _PPC_AST_H_ */ diff --git a/osfmk/ppc/atomic_switch.s b/osfmk/ppc/atomic_switch.s index 7d9579efa..75d89bc89 100644 --- a/osfmk/ppc/atomic_switch.s +++ b/osfmk/ppc/atomic_switch.s @@ -63,7 +63,7 @@ ENTRY(atomic_switch_syscall, TAG_NO_FRAME_USED) * Note: the BlueBox fast path system calls (-1 and -2) we handled as * an ultra-fast trap in lowmem_vectors. */ - li r5, BTTD_SYSCALL_VECTOR + lwz r5,bbSysCall(r13) ; Pick up the syscall vector b .L_CallPseudoKernel ENTRY(atomic_switch_trap, TAG_NO_FRAME_USED) @@ -76,7 +76,7 @@ ENTRY(atomic_switch_trap, TAG_NO_FRAME_USED) cmplwi cr7,r24,BB_RFI_TRAP ; Is this an RFI? beq cr7,.L_ExitPseudoKernel ; Yes... - li r5, BTTD_TRAP_VECTOR + lwz r5,bbTrap(r13) ; Pick up the trap vector /****************************************************************************** * void CallPseudoKernel ( int vector, thread_act_t * act, BEDA_t * beda, savearea *sv ) @@ -137,20 +137,19 @@ ENTRY(atomic_switch_trap, TAG_NO_FRAME_USED) .L_CallFromPreemptiveThread: - lwz r1,savesrr0(r4) ; Get current PC - lwz r2,saver1(r4) ; Get current R1 - lwz r3,savesrr1(r4) ; Get current MSR + lwz r1,savesrr0+4(r4) ; Get current PC + lwz r2,saver1+4(r4) ; Get current R1 + lwz r3,savesrr1+4(r4) ; Get current MSR stw r1,BEDA_SRR0(r26) ; Save current PC rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 ; Clear SE|BE bits in MSR stw r2,BEDA_SPRG1(r26) ; Save current R1 - stw r3,savesrr1(r4) ; Load new MSR + stw r3,savesrr1+4(r4) ; Load new MSR lwz r1,BEDA_SPRG0(r26) ; Get replacement R1 - lwzx r2,r5,r6 ; Load vector address + stw r5,savesrr0+4(r4) ; Save vector as PC stw r3,BEDA_SRR1(r26) ; Update saved MSR - stw r1,saver1(r4) ; Load up new R1 - stw r2,savesrr0(r4) ; Save vector as PC + stw r1,saver1+4(r4) ; Load up new R1 b EXT(fastexit) ; Go back and take the fast path exit... @@ -186,7 +185,7 @@ ENTRY(atomic_switch_trap, TAG_NO_FRAME_USED) ori r7,r7,(0x8000 >> (bbNoMachSCbit - 16)) ; Disable Mach SCs for Blue Box cmpwi r2,0 ; Is this a preemptive thread - stw r1,savectr(r4) ; Update CTR + stw r1,savectr+4(r4) ; Update CTR beq .L_ExitFromPreemptiveThread lwz r8,BTTD_INTCONTROLWORD(r6) ; Get ICW @@ -202,11 +201,11 @@ ENTRY(atomic_switch_trap, TAG_NO_FRAME_USED) beq cr1,.L_ExitToSystemContext ; We are in system context beq .L_ExitUpdateRuptControlWord ; We do not have a pending interrupt - lwz r2,saver1(r4) ; Get current R1 + lwz r2,saver1+4(r4) ; Get current R1 lwz r1,BEDA_SPRG0(r26) ; Get replacement R1 stw r2,BEDA_SPRG1(r26) ; Save current R1 - stw r1,saver1(r4) ; Load up new R1 - lwz r3,BTTD_PENDINGINT_VECTOR(r6) ; Get pending interrupt PC + stw r1,saver1+4(r4) ; Load up new R1 + lwz r3,bbPending(r13) ; Get pending interrupt PC b .L_ExitAbortExit ; Abort and Exit .L_ExitToSystemContext: @@ -218,17 +217,17 @@ ENTRY(atomic_switch_trap, TAG_NO_FRAME_USED) .L_ExitFromPreemptiveThread: mfsprg r3,0 ; Get the per_proc - lwz r2,savesrr1(r4) ; Get current MSR + lwz r2,savesrr1+4(r4) ; Get current MSR lwz r1,BEDA_SRR1(r26) ; Get new MSR stw r7,ACT_MACT_SPF(r13) ; Update special flags stw r7,spcFlags(r3) ; Update per_proc version rlwimi r2,r1,0,MSR_FE0_BIT,MSR_FE1_BIT ; Insert FE0,FE1,SE,BE bits lwz r3,BEDA_SRR0(r26) ; Get new PC - stw r2,savesrr1(r4) ; Update MSR + stw r2,savesrr1+4(r4) ; Update MSR .L_ExitAbortExit: - stw r3,savesrr0(r4) ; Update PC + stw r3,savesrr0+4(r4) ; Update PC b EXT(fastexit) ; Go back and take the fast path exit... diff --git a/osfmk/ppc/bcopy.s b/osfmk/ppc/bcopy.s index 176b95715..88bc71fb1 100644 --- a/osfmk/ppc/bcopy.s +++ b/osfmk/ppc/bcopy.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -30,13 +30,22 @@ ; #include #include +#include ; Use CR5_lt to indicate non-cached #define noncache 20 + ; Use CR5_gt to indicate that we need to turn data translation back on #define fixxlate 21 -; Use CR5_eq to indicate that we need to invalidate bats -#define killbats 22 + +; Use CR5_eq to indicate that we need to invalidate bats (if 32-bit) or turn off +; 64-bit mode (if 64-bit) before returning to our caller. We overload the +; bit to reduce the number of conditional branches at bcopy exit. +#define restorex 22 + +; Use CR5_so to indicate that we need to restore real-mode cachability +; Only needed on 64-bit machines +#define flipcache 23 ; ; bcopy_nc(from, to, nbytes) @@ -56,19 +65,24 @@ LEXT(bcopy_nc) ; ; void bcopy_physvir(from, to, nbytes) ; Attempt to copy physically addressed memory with translation on if conditions are met. -; Otherwise do a normal bcopy_phys. +; Otherwise do a normal bcopy_phys. This routine is used because some 32-bit processors +; are very slow doing real-mode (translation off) copies, so we set up temporary BATs +; for the passed phys addrs and do the copy with translation on. ; ; Rules are: neither source nor destination can cross a page. -; No accesses above the 2GB line (I/O or ROM). ; -; Interrupts must be disabled throughout the copy when this is called - +; Interrupts must be disabled throughout the copy when this is called. ; To do this, we build a ; 128 DBAT for both the source and sink. If both are the same, only one is ; loaded. We do not touch the IBATs, so there is no issue if either physical page ; address is the same as the virtual address of the instructions we are executing. ; -; At the end, we invalidate the used DBATs and reenable interrupts. +; At the end, we invalidate the used DBATs. +; +; Note that the address parameters are long longs. We will transform these to 64-bit +; values. Note that on 32-bit architectures that this will ignore the high half of the +; passed in value. This should be ok since we can not have any bigger than 32 bit addresses +; there anyhow. ; ; Note, this one will not work in user state ; @@ -78,22 +92,32 @@ LEXT(bcopy_nc) LEXT(bcopy_physvir) - addic. r0,r5,-1 ; Get length - 1 + crclr flipcache ; (HACK) No cache flip needed + mfsprg r8,2 ; get processor feature flags + rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg + addic. r0,r7,-1 ; Get length - 1 + rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits add r11,r3,r0 ; Point to last byte of sink - cmplw cr1,r3,r4 ; Does source == sink? + rlwinm r4,r5,0,1,0 ; Duplicate high half of long long paddr into top of reg + mtcrf 0x02,r8 ; move pf64Bit to cr6 so we can test + rlwimi r4,r6,0,0,31 ; Combine bottom of long long to full 64-bits + mr r5,r7 ; Get the length into the right register + cmplw cr1,r3,r4 ; Does source == sink? + bt++ pf64Bitb,bcopy_phys1 ; if 64-bit processor, use standard routine (no BATs) add r12,r4,r0 ; Point to last byte of source bltlr- ; Bail if length is 0 or way too big xor r7,r11,r3 ; See if we went to next page xor r8,r12,r4 ; See if we went to next page or r0,r7,r8 ; Combine wrap - li r9,((PTE_WIMG_CB_CACHED_COHERENT<<3)|2) ; Set default attributes +// li r9,((PTE_WIMG_CB_CACHED_COHERENT<<3)|2) ; Set default attributes + li r9,((2<<3)|2) ; Set default attributes rlwinm. r0,r0,0,0,19 ; Did we overflow a page? li r7,2 ; Set validity flags li r8,2 ; Set validity flags - bne- EXT(bcopy_phys) ; Overflowed page, do normal physical copy... + bne- bcopy_phys1 ; Overflowed page, do normal physical copy... - crset killbats ; Remember to trash BATs on the way out + crset restorex ; Remember to trash BATs on the way out rlwimi r11,r9,0,15,31 ; Set sink lower DBAT value rlwimi r12,r9,0,15,31 ; Set source lower DBAT value rlwimi r7,r11,0,0,14 ; Set sink upper DBAT value @@ -112,41 +136,123 @@ LEXT(bcopy_physvir) bcpvsame: mr r6,r3 ; Set source crclr noncache ; Set cached + crclr fixxlate ; Set translation already ok - b copyit ; Go copy it... - + b copyit32 ; Go copy it... ; ; void bcopy_phys(from, to, nbytes) ; Turns off data translation before the copy. Note, this one will -; not work in user state +; not work in user state. This routine is used on 32 and 64-bit +; machines. +; +; Note that the address parameters are long longs. We will transform these to 64-bit +; values. Note that on 32-bit architectures that this will ignore the high half of the +; passed in value. This should be ok since we can not have any bigger than 32 bit addresses +; there anyhow. +; +; Also note that you probably will not be happy if either the sink or source spans across the +; boundary between RAM and I/O space. Good chance of hanging the machine and this code +; will not check, so be careful. ; .align 5 .globl EXT(bcopy_phys) LEXT(bcopy_phys) - + crclr flipcache ; (HACK) No cache flip needed + rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg + mfsprg r8,2 ; get processor feature flags + rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits + rlwinm r4,r5,0,1,0 ; Duplicate high half of long long paddr into top of reg + mtcrf 0x02,r8 ; move pf64Bit to cr6 so we can test + rlwimi r4,r6,0,0,31 ; Combine bottom of long long to full 64-bits + mr r5,r7 ; Get the length into the right register + +bcopy_phys1: ; enter from bcopy_physvir with pf64Bit already in cr6 mfmsr r9 ; Get the MSR - crclr noncache ; Set cached - rlwinm. r8,r9,0,MSR_DR_BIT,MSR_DR_BIT ; Is data translation on? - - cmplw cr1,r4,r3 ; Compare "to" and "from" + bt++ pf64Bitb,bcopy_phys64 ; skip if 64-bit (only they take hint) + +; 32-bit CPUs + + sub. r0,r3,r4 ; to==from? + rlwinm r8,r9,0,MSR_DR_BIT,MSR_DR_BIT ; was translation on? + cmpwi cr1,r8,0 ; set cr1 beq if translation was off + oris r8,r8,hi16(MASK(MSR_VEC)) ; Get vector enable cmplwi cr7,r5,0 ; Check if we have a 0 length + beqlr- ; bail if to==from + ori r8,r8,lo16(MASK(MSR_FP)) ; Get FP mr r6,r3 ; Set source - beqlr- cr1 ; Bail if "to" and "from" are the same - xor r9,r9,r8 ; Turn off translation if it is on (should be) + andc r9,r9,r8 ; Turn off translation if it is on (should be) and FP, VEC beqlr- cr7 ; Bail if length is 0 - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - crclr killbats ; Make sure we do not trash BATs on the way out - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + crclr restorex ; Make sure we do not trash BATs on the way out mtmsr r9 ; Set DR translation off isync ; Wait for it - crnot fixxlate,cr0_eq ; Remember to turn on translation if it was - b copyit ; Go copy it... + crnot fixxlate,cr1_eq ; Remember to turn on translation if it was + b copyit32 ; Go copy it... + +; 64-bit: turn DR off and SF on, remember if we need to restore on way out. + +bcopy_phys64: ; r9 = MSR + + srdi r2,r3,31 ; (HACK) Get a 1 if source is in I/O memory + srdi. r0,r9,63-MSR_SF_BIT ; set cr0 beq on if SF was off when we were called + rlwinm r8,r9,MSR_DR_BIT+1,31,31 ; r8 <- DR bit right justified + cmpld cr1,r3,r4 ; to==from? + li r0,1 ; Note - we use this in a couple places below + lis r6,hi16(MASK(MSR_VEC)) ; Get vector enable + cmpwi cr7,r5,0 ; length==0 ? + ori r6,r6,lo16(MASK(MSR_FP)|MASK(MSR_DR)) ; Add in FP and DR + beqlr-- cr1 ; bail if to==from + srdi r10,r4,31 ; (HACK) Get a 1 if sink is in I/O memory + rldimi r9,r0,63,MSR_SF_BIT ; set SF on + beqlr-- cr7 ; bail if length==0 + andc r9,r9,r6 ; turn DR, VEC, FP off + cmpwi cr1,r8,0 ; was DR on? + crmove restorex,cr0_eq ; if SF was off, remember to turn back off before we return + mtmsrd r9 ; turn 64-bit addressing on, data translation off + cmpldi cr0,r2,1 ; (HACK) Is source in I/O memory? + isync ; wait for it to happen + mr r6,r3 ; Set source + cmpldi cr7,r10,1 ; (HACK) Is sink in I/O memory? + crnot fixxlate,cr1_eq ; if DR was on, remember to turn back on before we return + + cror flipcache,cr0_eq,cr7_eq ; (HACK) See if either source or sink is in I/O area + + rlwinm r10,r9,MSR_EE_BIT+1,31,31 ; (HACK GLORIOUS HACK) Isolate the EE bit + sldi r11,r0,31-MSR_EE_BIT ; (HACK GLORIOUS HACK)) Get a mask for the EE bit + sldi r0,r0,32+8 ; (HACK) Get the right bit to turn off caching + bf++ flipcache,copyit64 ; (HACK) No need to mess with caching... + +; +; HACK GLORIOUS HACK - when we force of caching, we need to also force off +; interruptions. We are out of CR bits, so we need to stash the entry EE +; somewheres. It is in the XER.... We NEED to change this!!!! +; + + mtxer r10 ; (HACK GLORIOUS HACK) Remember EE + andc r9,r9,r11 ; (HACK GLORIOUS HACK) Turn off EE bit + mfspr r2,hid4 ; (HACK) Get HID4 + crset noncache ; (HACK) Set non-cached + mtmsrd r9 ; (HACK GLORIOUS HACK) Force off EE + or r2,r2,r0 ; (HACK) Set bit to make real accesses cache-inhibited + sync ; (HACK) Sync up + li r0,1 + mtspr hid4,r2 ; (HACK) Make real accesses cache-inhibited + isync ; (HACK) Toss prefetches + + lis r12,0xE000 ; (HACK) Get the unlikeliest ESID possible + srdi r12,r12,1 ; (HACK) Make 0x7FFFFFFFF0000000 + slbie r12 ; (HACK) Make sure the ERAT is cleared + + sync ; (HACK) + isync ; (HACK) + + b copyit64 + ; ; void bcopy(from, to, nbytes) @@ -159,14 +265,19 @@ LEXT(bcopy) crclr noncache ; Set cached -bcpswap: cmplw cr1,r4,r3 ; Compare "to" and "from" - mr. r5,r5 ; Check if we have a 0 length +bcpswap: + crclr flipcache ; (HACK) No cache flip needed + mfsprg r8,2 ; get processor feature flags + sub. r0,r4,r3 ; test for to==from in mode-independent way + mtcrf 0x02,r8 ; move pf64Bit to cr6 so we can test + cmpwi cr1,r5,0 ; Check if we have a 0 length + crclr restorex ; Make sure we do not trash BATs on the way out mr r6,r3 ; Set source - crclr killbats ; Make sure we do not trash BATs on the way out - beqlr- cr1 ; Bail if "to" and "from" are the same - beqlr- ; Bail if length is 0 crclr fixxlate ; Set translation already ok - b copyit ; Go copy it... + beqlr- ; Bail if "to" and "from" are the same + beqlr- cr1 ; Bail if length is 0 + bt++ pf64Bitb,copyit64 ; handle 64-bit processor + b copyit32 ; Go copy it... ; ; When we move the memory, forward overlays must be handled. We @@ -174,19 +285,32 @@ bcpswap: cmplw cr1,r4,r3 ; Compare "to" and "from" ; We need to preserve R3 because it needs to be returned for memcpy. ; We can be interrupted and lose control here. ; -; There is no stack, so in order to used floating point, we would -; need to take the FP exception. Any potential gains by using FP +; There is no stack, so in order to use vectors, we would +; need to take the vector exception. Any potential gains by using vectors ; would be more than eaten up by this. ; -; Later, we should used Altivec for large moves. +; NOTE: this code is called in three "modes": +; - on 32-bit processors (32-byte cache line) +; - on 64-bit processors running in 32-bit mode (128-byte cache line) +; - on 64-bit processors running in 64-bit mode (128-byte cache line) +; +; ALSO NOTE: bcopy is called from copyin and copyout etc +; with the "thread_recover" ptr set. This means bcopy must not set up a +; stack frame or touch non-volatile registers, and also means that it +; cannot rely on turning off interrupts, because we expect to get DSIs +; and have execution aborted by a "longjmp" to the thread_recover +; routine. ; .align 5 .globl EXT(memcpy) - + ; NB: memcpy is only called in 32-bit mode, albeit on both 32- and 64-bit + ; processors... LEXT(memcpy) - + crclr flipcache ; (HACK) No cache flip needed + mfsprg r8,2 ; get processor feature flags cmplw cr1,r3,r4 ; "to" and "from" the same? + mtcrf 0x02,r8 ; move pf64Bit to cr6 so we can test mr r6,r4 ; Set the "from" mr. r5,r5 ; Length zero? crclr noncache ; Set cached @@ -194,9 +318,10 @@ LEXT(memcpy) crclr fixxlate ; Set translation already ok beqlr- cr1 ; "to" and "from" are the same beqlr- ; Length is 0 - crclr killbats ; Make sure we do not trash BATs on the way out + crclr restorex ; Make sure we do not trash BATs on the way out + bt++ pf64Bitb,copyit64 ; handle 64-bit processors -copyit: sub r12,r4,r6 ; Get potential overlap (negative if backward move) +copyit32: sub r12,r4,r6 ; Get potential overlap (negative if backward move) lis r8,0x7FFF ; Start up a mask srawi r11,r12,31 ; Propagate the sign bit dcbt br0,r6 ; Touch in the first source line @@ -209,7 +334,7 @@ copyit: sub r12,r4,r6 ; Get potential overlap (negative if backward move) cmplwi cr7,r9,32 ; See if at least a line between source and sink dcbtst br0,r4 ; Touch in the first sink line cmplwi cr1,r5,32 ; Are we moving more than a line? - cror noncache,noncache,28 ; Set to not DCBZ output line if not enough space + cror noncache,noncache,cr7_lt ; Set to not DCBZ output line if not enough space blt- fwdovrlap ; This is a forward overlapping area, handle it... ; @@ -225,6 +350,7 @@ copyit: sub r12,r4,r6 ; Get potential overlap (negative if backward move) ; We can not do this if noncache is set because we will take an ; alignment exception. +G4word: ; enter from 64-bit case with word aligned uncached operands neg r0,r4 ; Get the number of bytes to move to align to a line boundary rlwinm. r0,r0,0,27,31 ; Clean it up and test it and r0,r0,r8 ; limit to the maximum front end move @@ -361,17 +487,45 @@ nohalf: bf 31,bcpydone ; Leave cuz we are all done... lbz r7,0(r6) ; Get the byte stb r7,0(r4) ; Save the single -bcpydone: bt- killbats,bcclrbat ; Jump if we need to clear bats... - bflr fixxlate ; Leave now if we do not need to fix translation... +bcpydone: mfmsr r9 ; Get the MSR + bf++ flipcache,bcpydone0 ; (HACK) No need to mess with caching... + + li r0,1 ; (HACK) Get a 1 + mfxer r10 ; (HACK GLORIOUS HACK) Get the entry EE + sldi r0,r0,32+8 ; (HACK) Get the right bit to turn off caching + mfspr r2,hid4 ; (HACK) Get HID4 + rlwinm r10,r10,31-MSR_EE_BIT,MSR_EE_BIT,MSR_EE_BIT ; (HACK GLORIOUS HACK) Set the EE bit + andc r2,r2,r0 ; (HACK) Clear bit to make real accesses cache-inhibited + or r9,r9,r10 ; (HACK GLORIOUS HACK) Set the EE in MSR + sync ; (HACK) Sync up + mtspr hid4,r2 ; (HACK) Make real accesses not cache-inhibited + isync ; (HACK) Toss prefetches + + lis r12,0xE000 ; (HACK) Get the unlikeliest ESID possible + srdi r12,r12,1 ; (HACK) Make 0x7FFFFFFFF0000000 + slbie r12 ; (HACK) Make sure the ERAT is cleared + + mtmsr r9 ; (HACK GLORIOUS HACK) Set EE properly + +bcpydone0: + lis r0,hi16(MASK(MSR_VEC)) ; Get the vector bit + ori r0,r0,lo16(MASK(MSR_FP)) ; Get the float bit + bf++ fixxlate,bcpydone1 ; skip if we do not need to fix translation... ori r9,r9,lo16(MASK(MSR_DR)) ; Turn data translation on - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + andc r9,r9,r0 ; Make sure that FP and VEC are off mtmsr r9 ; Just do it isync ; Hang in there - blr ; Leave cuz we are all done... - -bcclrbat: li r0,0 ; Get set to invalidate upper half + +bcpydone1: + bflr++ restorex ; done if we do not have to fix up addressing + mfsprg r8,2 ; get the feature flags again + mtcrf 0x02,r8 ; put pf64Bit where we can test it + bt++ pf64Bitb,bcpydone2 ; skip if 64-bit processor + + ; 32-bit processor, so clear out the BATs we set up for bcopy_physvir + + li r0,0 ; Get set to invalidate upper half sync ; Make sure all is well mtdbatu 0,r0 ; Clear sink upper DBAT mtdbatu 1,r0 ; Clear source upper DBAT @@ -379,6 +533,16 @@ bcclrbat: li r0,0 ; Get set to invalidate upper half isync blr + ; 64-bit processor, so turn off 64-bit mode we turned on to do bcopy_phys + +bcpydone2: + mfmsr r9 ; get MSR again + andc r9,r9,r0 ; Make sure that FP and VEC are off + rldicl r9,r9,0,MSR_SF_BIT+1 ; clear SF + mtmsrd r9 + isync + blr + ; ; 0123456789ABCDEF0123456789ABCDEF @@ -399,7 +563,8 @@ bcclrbat: li r0,0 ; Get set to invalidate upper half ; and on in order. That means that when we are at the second to last DW we ; have to wait until the whole line is in cache before we can proceed. ; - + +G4reverseWord: ; here from 64-bit code with word aligned uncached operands fwdovrlap: add r4,r5,r4 ; Point past the last sink byte add r6,r5,r6 ; Point past the last source byte and r0,r4,r8 ; Apply movement limit @@ -541,8 +706,311 @@ bnoword: bf 30,bnohalf ; No halfword to do... ; Move backend byte -bnohalf: bflr 31 ; Leave cuz we are all done... +bnohalf: bf 31,bcpydone ; Leave cuz we are all done... lbz r7,-1(r6) ; Get the byte stb r7,-1(r4) ; Save the single b bcpydone ; Go exit cuz we are all done... + + +// Here on 64-bit processors, which have a 128-byte cache line. This can be +// called either in 32 or 64-bit mode, which makes the test for reverse moves +// a little tricky. We've already filtered out the (sou==dest) and (len==0) +// special cases. +// +// When entered: +// r4 = destination (32 or 64-bit ptr) +// r5 = length (always 32 bits) +// r6 = source (32 or 64-bit ptr) +// cr5 = noncache, fixxlate, flipcache, and restorex flags set + + .align 5 +copyit64: + lis r2,0x4000 // r2 = 0x00000000 40000000 + neg r12,r4 // start to compute #bytes to align dest + bt-- noncache,noncache1 // (HACK) Do not even try anything cached... + dcbt 0,r6 // touch in 1st block of source +noncache1: + add. r2,r2,r2 // if 0x00000000 80000000 < 0, we are in 32-bit mode + cntlzw r9,r5 // get highest power-of-2 in length + rlwinm r7,r12,0,25,31 // r7 <- bytes to 128-byte align dest + bt-- noncache,noncache2 // (HACK) Do not even try anything cached... + dcbtst 0,r4 // touch in 1st destination cache block +noncache2: + sraw r2,r2,r9 // get mask with 1s for leading 0s in length, plus 1 more 1-bit + bge copyit64a // skip if we are running in 64-bit mode + rlwinm r4,r4,0,0,31 // running in 32-bit mode, so truncate ptrs and lengths to 32 bits + rlwinm r5,r5,0,0,31 + rlwinm r6,r6,0,0,31 +copyit64a: // now we can use 64-bit compares even if running in 32-bit mode + sub r8,r4,r6 // get (dest-source) + andc r7,r7,r2 // limit bytes to align by operand length + cmpld cr1,r8,r5 // if (dest-source) +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include /* (TEST/DEBUG) */ + +#define patper 253 + + +int main(void); +void clrarea(unsigned int *source, unsigned int *sink); +int tstcopy(void *src, void *snk, unsigned int lgn); +void clrarea2(unsigned int *source, unsigned int *sink); +int tstcopy2(void *src, void *snk, unsigned int lgn); +int tstcopy3(void *src, void *snk, unsigned int lgn); +int tstcopy4(void *src, void *snk, unsigned int lgn); +int tstcopy5(void *src, void *snk, unsigned int lgn); +int dumbcopy(void *src, void *snk, unsigned int lgn); + + +unsigned int gtick(void); + + +void bcopytest(void); +void bcopytest(void) { + + void *srcptr, *snkptr, *asrc, *asnk; + int bsrc, bsnk, size, iterations, i, ret, n; + unsigned int makerand, timein, timeout, ticks; + volatile int dbg = 0; + unsigned int *sink, *source; + + long long tottime, totbytes; + long long tottime2, totbytes2; + + kern_return_t retr; + + db_printf("bcopy test\n"); + + retr = kmem_alloc_wired(kernel_map, (vm_offset_t *)&sink, (1024*1024)+4096); /* Get sink area */ + if(retr != KERN_SUCCESS) { /* Did we find any memory at all? */ + panic("bcopytest: Whoops... no memory for sink\n"); + } + + retr = kmem_alloc_wired(kernel_map, (vm_offset_t *)&source, (1024*1024)+4096); /* Get source area */ + if(retr != KERN_SUCCESS) { /* Did we find any memory at all? */ + panic("bcopytest: Whoops... no memory for source\n"); + } + + db_printf("Source at %08X; Sink at %08X\n", source, sink); + + srcptr = (void *)&source[0]; + snkptr = (void *)&sink[0]; + +#if 1 + db_printf("Testing non-overlap case; source bndry = 0 to 7F; sink bndry = 0 - 7F; lgn = 1 to 256\n"); + for(bsrc = 0; bsrc < 128; bsrc++) { /* Step the source by 1 */ + for(bsnk = 0; bsnk < 128; bsnk++) { /* Step the sink by 1 */ + for(size = 1; size <= 256; size++) { /* Step the size by 1 */ + + clrarea(source, sink); /* Reset source and clear sink */ + if(size == 255) { + dbg = 99; + } + if(tstcopy((void *)((unsigned int)srcptr + bsrc), (void *)((unsigned int)snkptr + bsnk), size)) { + db_printf("Test failed; source = %02X; sink = %02X; length = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + } + } + } + } + db_printf("Non-overlap test complete\n"); +#endif + + +#if 1 + db_printf("Testing overlap\n"); + for(bsrc = 1; bsrc < 128; bsrc++) { /* Step the source by 1 */ + for(bsnk = 0; bsnk < 128; bsnk++) { /* Step the sink by 1 */ + for(size = 1; size <= 256; size++) { /* Step the size by 1 */ + + clrarea2(source, sink); /* Reset source and clear sink */ + if(bsrc < bsnk) { + dbg = 88; + } + else { + dbg = 99; + } + if(tstcopy2((void *)((unsigned int)srcptr + bsrc), (void *)((unsigned int)srcptr + bsnk), size)) { + db_printf("Test failed; source = %02X; sink = %02X; length = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + } + } + } + } + db_printf("Overlap test complete\n"); +#endif + +#if 1 + db_printf("Starting exhaustive tests\n"); + for(i = 0; i < 262144 * 4; i++) { /* Set all 1MB of source and dest to known pattern */ + ((unsigned char *)srcptr)[i] = i % patper; /* Make a non-power-of-two length pattern */ + ((unsigned char *)snkptr)[i] = i % patper; /* Make a non-power-of-two length pattern */ + } + + db_printf("No overlap; source < sink, length = 0 to 1023\nSource ="); + +#if 1 + for(bsrc = 0; bsrc < 128; bsrc++) { /* Step source by 1 */ + db_printf(" %3d", bsrc); /* Show where we're at */ + for(bsnk = 0; bsnk < 128; bsnk++) { /* Step sink by 1 */ + for(size = 0; size < 1025; size++) { /* Step size from 0 to 1023 */ + asrc = (void *)((unsigned int)srcptr + bsrc); /* Start byte address */ + asnk = (void *)((unsigned int)srcptr + bsnk + 2048); /* End byte address */ + ret = tstcopy5(asrc, asnk, size); /* Copy and validate */ + if(ret) { + db_printf("\nTest failed - source = %3d, sink = %3d size = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + } + } + } + } +#endif + + db_printf("\n"); + db_printf("No overlap; source > sink, length = 0 to 1023\nSource ="); + +#if 1 + for(bsrc = 0; bsrc < 128; bsrc++) { /* Step source by 1 */ + db_printf(" %3d", bsrc); /* Show where we're at */ + for(bsnk = 0; bsnk < 128; bsnk++) { /* Step sink by 1 */ + for(size = 0; size < 1025; size++) { /* Step size from 0 to 1023 */ + asrc = (void *)((unsigned int)srcptr + bsrc + 2048); /* Start byte address */ + asnk = (void *)((unsigned int)srcptr + bsnk); /* End byte address */ + ret = tstcopy5(asrc, asnk, size); /* Copy and validate */ + if(ret) { + db_printf("\nTest failed - source = %3d, sink = %3d size = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + } + } + } + } +#endif + + db_printf("\n"); + db_printf("Overlap; source = sink + N (N = 0 to 127), length = 0 to 1023\nN ="); + +#if 1 + for(n = 0; n < 128; n++) { /* Step n by 1 */ + db_printf(" %3d", n); /* Show where we're at */ + for(bsnk = 0; bsnk < 128; bsnk++) { /* Step sink by 1 */ + for(size = 0; size < 1025; size++) { /* Step size from 0 to 1023 */ + asrc = (void *)((unsigned int)srcptr + bsnk + n); /* Start byte address */ + asnk = (void *)((unsigned int)srcptr + bsnk); /* End byte address */ + ret = tstcopy5(asrc, asnk, size); /* Copy and validate */ + if(ret) { + db_printf("\nTest failed - source = %3d, sink = %3d size = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + } + } + } + } +#endif + + db_printf("\n"); + db_printf("Overlap; source + N = sink (N = 0 to 127), length = 0 to 1023\nSource ="); + +#if 1 + for(bsrc = 0; bsrc < 128; bsrc++) { /* Step source by 1 */ + db_printf(" %3d", bsrc); /* Show where we're at */ + for(n = 0; n < 128; n++) { /* Step N by 1 */ + for(size = 0; size < 1025; size++) { /* Step size from 0 to 1023 */ + asrc = (void *)((unsigned int)srcptr + bsnk); /* Start byte address */ + asnk = (void *)((unsigned int)srcptr + bsnk + n); /* End byte address */ + ret = tstcopy5(asrc, asnk, size); /* Copy and validate */ + if(ret) { + db_printf("\nTest failed - source = %3d, n = %3d size = %d\n", bsrc, n, size); + db_printf("failed\n"); + } + } + } + } +#endif + + + db_printf("\n"); + db_printf("Overlap; source = sink + N + 128 (N = 0 to 127), length = 0 to 1023\nN ="); + +#if 1 + for(n = 0; n < 128; n++) { /* Step n by 1 */ + db_printf(" %3d", n); /* Show where we're at */ + for(bsnk = 0; bsnk < 128; bsnk++) { /* Step sink by 1 */ + for(size = 0; size < 1025; size++) { /* Step size from 0 to 1023 */ + asrc = (void *)((unsigned int)srcptr + bsnk + n + 128); /* Start byte address */ + asnk = (void *)((unsigned int)srcptr + bsnk); /* End byte address */ + ret = tstcopy5(asrc, asnk, size); /* Copy and validate */ + if(ret) { + db_printf("\nTest failed - source = %3d, sink = %3d size = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + } + } + } + } +#endif + + db_printf("\n"); + db_printf("Overlap; source + N + 128 = sink (N = 0 to 127), length = 0 to 1023\nSource ="); + +#if 1 + for(bsrc = 0; bsrc < 128; bsrc++) { /* Step source by 1 */ + db_printf(" %3d", bsrc); /* Show where we're at */ + for(n = 0; n < 128; n++) { /* Step N by 1 */ + for(size = 0; size < 1025; size++) { /* Step size from 0 to 1023 */ + asrc = (void *)((unsigned int)srcptr + bsnk); /* Start byte address */ + asnk = (void *)((unsigned int)srcptr + bsnk + n + 128); /* End byte address */ + ret = tstcopy5(asrc, asnk, size); /* Copy and validate */ + if(ret) { + db_printf("\nTest failed - source = %3d, n = %3d size = %d\n", bsrc, n, size); + db_printf("failed\n"); + } + } + } + } +#endif + + db_printf("\n"); + db_printf("Overlap; source = sink + N + 256 (N = 0 to 127), length = 0 to 1023\nSource ="); + +#if 1 + for(n = 0; n < 128; n++) { /* Step n by 1 */ + db_printf(" %3d", n); /* Show where we're at */ + for(bsnk = 0; bsnk < 128; bsnk++) { /* Step sink by 1 */ + for(size = 0; size < 1025; size++) { /* Step size from 0 to 1023 */ + asrc = (void *)((unsigned int)srcptr + bsnk + n + 256); /* Start byte address */ + asnk = (void *)((unsigned int)srcptr + bsnk); /* End byte address */ + ret = tstcopy5(asrc, asnk, size); /* Copy and validate */ + if(ret) { + db_printf("\nTest failed - source = %3d, sink = %3d size = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + } + } + } + } +#endif + + db_printf("\n"); + db_printf("Overlap; source + N + 256 = sink (N = 0 to 127), length = 0 to 1023\nSource ="); +#if 1 + for(bsrc = 0; bsrc < 128; bsrc++) { /* Step source by 1 */ + db_printf(" %3d", bsrc); /* Show where we're at */ + for(n = 0; n < 128; n++) { /* Step N by 1 */ + for(size = 0; size < 1025; size++) { /* Step size from 0 to 1023 */ + asrc = (void *)((unsigned int)srcptr + bsnk); /* Start byte address */ + asnk = (void *)((unsigned int)srcptr + bsnk + n + 256); /* End byte address */ + ret = tstcopy5(asrc, asnk, size); /* Copy and validate */ + if(ret) { + db_printf("\nTest failed - source = %3d, n = %3d size = %d\n", bsrc, n, size); + db_printf("failed\n"); + } + } + } + } +#endif + + + + + + +#endif + +#if 0 + iterations = 1000; + tottime = 0; + totbytes = 0; + + db_printf("Random test starting; iterations = %d\n", iterations); + for(i = 0; i < 262144 * 4; i++) { /* Clear all 2MB of source (and dest for this test) */ + ((unsigned char *)srcptr)[i] = i & 255; + } + + for(i = 0; i < iterations; i++) { /* Test until we are done */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + bsrc = makerand & 0x0007FFFF; /* Generate source */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + bsnk = makerand & 0x0007FFFF; /* Generate sink */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + size = makerand & 0x0007FFFF; /* Generate length */ +#if 1 + db_printf("rt %7d: src = %08X; sink = %08X; length = %7d\n", i, ((unsigned int)srcptr + bsrc), + ((unsigned int)srcptr + bsnk), size); +#endif + + asrc = (void *)((unsigned int)srcptr + bsrc); + asnk = (void *)((unsigned int)srcptr + bsnk); + timein = gtick(); + ret = tstcopy3(asrc, asnk, size); + timeout = gtick(); + if(ret) { + db_printf("Test failed; source = %02X; sink = %02X; length = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + + } + ticks = timeout - timein; /* Get time in ticks for copy */ + tottime += ticks; + totbytes += size; + + rate = (double) totbytes / (double)tottime; /* Get bytes per tick */ +// rate = rate * (double)11250000.0; /* Bytes per second */ +// rate = rate * (double)16500000.0; /* Bytes per second */ + rate = rate * (double)tbfreq; /* Bytes per second */ + rate = rate / (double)1000000.0; /* Get number of MBs */ + + db_printf("Total bytes = %lld; total time = %lld; rate = %f10\n", totbytes, tottime, rate); + + } +#endif + + + +#if 0 + iterations = 100; + tottime = 0; + totbytes = 0; + + db_printf("Random test starting; iterations = %d\n", iterations); + for(i = 0; i < 262144 * 4; i++) { /* Clear all 2MB of source (and dest for this test) */ + ((unsigned char *)srcptr)[i] = i & 255; + } + + for(i = 0; i < iterations; i++) { /* Test until we are done */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + bsrc = makerand & 0x0007FFFF; /* Generate source */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + bsnk = makerand & 0x0007FFFF; /* Generate sink */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + size = makerand & 0x0007FFFF; /* Generate length */ +#if 1 + db_printf("rt %7d: src = %08X; sink = %08X; length = %7d\n", i, ((unsigned int)srcptr + bsrc), + ((unsigned int)srcptr + bsnk), size); +#endif + + asrc = (void *)((unsigned int)srcptr + bsrc); + asnk = (void *)((unsigned int)srcptr + bsnk); + timein = gtick(); + ret = tstcopy4(asrc, asnk, size); + timeout = gtick(); + if(ret) { + db_printf("Test failed; source = %02X; sink = %02X; length = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + + } + ticks = timeout - timein; /* Get time in ticks for copy */ + tottime += ticks; + totbytes += size; + + rate = (double) totbytes / (double)tottime; /* Get bytes per tick */ +// rate = rate * (double)11250000.0; /* Bytes per second */ +// rate = rate * (double)16500000.0; /* Bytes per second */ + rate = rate * (double)tbfreq; /* Bytes per second */ + rate = rate / (double)1000000.0; /* Get number of MBs */ + + db_printf("Total bytes = %lld; total time = %lld; rate = %f10\n", totbytes, tottime, rate); + + } +#endif + +#if 0 + iterations = 100; + tottime = 0; + totbytes = 0; + + db_printf("Random test starting; iterations = %d\n", iterations); + for(i = 0; i < 262144 * 4; i++) { /* Clear all 2MB of source (and dest for this test) */ + ((unsigned char *)srcptr)[i] = i & 255; + } + + for(i = 0; i < iterations; i++) { /* Test until we are done */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + bsrc = makerand & 0x0007FFFF; /* Generate source */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + bsnk = makerand & 0x0007FFFF; /* Generate sink */ + makerand = rand() << 16 | (rand() & 0x0000FFFF); + size = makerand & 0x0007FFFF; /* Generate length */ +#if 1 + db_printf("rt %7d: src = %08X; sink = %08X; length = %7d\n", i, ((unsigned int)srcptr + bsrc), + ((unsigned int)srcptr + bsnk), size); +#endif + + asrc = (void *)((unsigned int)srcptr + bsrc); + asnk = (void *)((unsigned int)srcptr + bsnk); + timein = gtick(); + ret = dumbcopy(asrc, asnk, size); + timeout = gtick(); + if(ret) { + db_printf("Test failed; source = %02X; sink = %02X; length = %d\n", bsrc, bsnk, size); + db_printf("failed\n"); + + } + ticks = timeout - timein; /* Get time in ticks for copy */ + tottime += ticks; + totbytes += size; + + rate = (double) totbytes / (double)tottime; /* Get bytes per tick */ + rate = rate * (double)tbfreq; /* Bytes per second */ + rate = rate / (double)1000000.0; /* Get number of MBs */ + + db_printf("Total bytes = %lld; total time = %lld; rate = %f10\n", totbytes, tottime, rate); + + } +#endif + + kmem_free(kernel_map, (vm_offset_t) sink, (1024*1024)+4096); /* Release this mapping block */ + kmem_free(kernel_map, (vm_offset_t) source, (1024*1024)+4096); /* Release this mapping block */ + + if(dbg == 22) db_printf("Gabbagoogoo\n"); + return; +} + +void clrarea(unsigned int *source, unsigned int *sink) { + + unsigned int i; + + for(i=0; i < 1024; i++) { /* Init source & sink */ + source[i] = 0x55555555; /* Known pattern */ + sink[i] = 0xAAAAAAAA; /* Known pattern */ + } + return; +} + +void clrarea2(unsigned int *source, unsigned int *sink) { + + unsigned int i; + unsigned char *ss; + + ss = (unsigned char *)&source[0]; + + for(i=0; i < 1024 * 4; i++) { /* Init source/sink */ + ss[i] = i & 0xFF; /* Known pattern */ + } + return; +} + +int tstcopy(void *src, void *snk, unsigned int lgn) { + + unsigned int i, crap; + + bcopy(src, snk, lgn); + + for(i = 0; i < lgn; i++) { + if(((unsigned char *)snk)[i] != 0x55) { + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("bad copy at sink[%d] (%08X) it is %02X\n", i,crap, ((unsigned char *)snk)[i]); + return 1; + } + } + if(((unsigned char *)snk)[lgn] != 0xAA) { /* Is it right? */ + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("Copied too far at sink[%d] (%08X) it is %02X\n", i, crap, ((unsigned char *)snk)[lgn]); + return 1; + } + return 0; + +} + +int tstcopy2(void *src, void *snk, unsigned int lgn) { + + unsigned int i, crap; + unsigned char ic, ec; + + ic = ((unsigned char *)src)[0]; + ec = ((unsigned char *)snk)[lgn]; + + bcopy(src, snk, lgn); + + for(i = 0; i < lgn; i++) { + if(((unsigned char *)snk)[i] != ic) { + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("bad copy at sink[%d] (%08X) it is %02X\n", i,crap, ((unsigned char *)snk)[i]); + return 1; + } + ic = (ic + 1) & 0xFF; + } + + if(((unsigned char *)snk)[lgn] != ec) { /* Is it right? */ + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("Copied too far at sink[%d] (%08X) it is %02X\n", i, crap, ((unsigned char *)snk)[lgn]); + return 1; + } + return 0; + +} + +int tstcopy3(void *src, void *snk, unsigned int lgn) { + + unsigned int i, crap; + unsigned char ic, ec, oic; + + oic = ((unsigned char *)snk)[0]; + ic = ((unsigned char *)src)[0]; + ec = ((unsigned char *)snk)[lgn]; + + bcopy(src, snk, lgn); + + for(i = 0; i < lgn; i++) { + if(((unsigned char *)snk)[i] != ic) { + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("bad copy at sink[%d] (%08X) it is %02X\n", i ,crap, ((unsigned char *)snk)[i]); + return 1; + } + ic = (ic + 1) & 0xFF; + } + + if(((unsigned char *)snk)[lgn] != ec) { /* Is it right? */ + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("Copied too far at sink[%d] (%08X) it is %02X\n", i, crap, ((unsigned char *)snk)[lgn]); + return 1; + } + + for(i=0; i < lgn; i++) { /* Restore pattern */ + ((unsigned char *)snk)[i] = oic; + oic = (oic + 1) & 0xFF; + } + + return 0; + +} + +int tstcopy4(void *src, void *snk, unsigned int lgn) { + + bcopy(src, snk, lgn); + return 0; + +} + +int tstcopy5(void *src, void *snk, unsigned int lgn) { + + unsigned int i, crap; + unsigned char ic, ec, oic, pc; + + oic = ((unsigned char *)snk)[0]; /* Original first sink character */ + ic = ((unsigned char *)src)[0]; /* Original first source character */ + ec = ((unsigned char *)snk)[lgn]; /* Original character just after last sink character */ + pc = ((unsigned char *)snk)[-1]; /* Original character just before sink */ + + bcopy(src, snk, lgn); + + if(((unsigned char *)snk)[lgn] != ec) { /* Did we copy too far forward? */ + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("Copied too far at sink[%d] (%08X) it is %02X\n", i, crap, ((unsigned char *)snk)[lgn]); + return 1; + } + + if(((unsigned char *)snk)[-1] != pc) { /* Did we copy too far backward? */ + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("Copied too far at sink[%d] (%08X) it is %02X\n", i, crap, ((unsigned char *)snk)[lgn]); + return 1; + } + + for(i = 0; i < lgn; i++) { /* Check sink byte sequence */ + if(((unsigned char *)snk)[i] != ic) { + crap = (unsigned int)&((unsigned char *)snk)[i]; + db_printf("bad copy at sink[%d] (%08X) it is %02X\n", i ,crap, ((unsigned char *)snk)[i]); + return 1; + } + ic = (ic + 1) % patper; + } + + for(i=0; i < lgn; i++) { /* Restore pattern */ + ((unsigned char *)snk)[i] = oic; + oic = (oic + 1) % patper; + } + + return 0; + +} + +int dumbcopy(void *src, void *snk, unsigned int lgn) { + int i; + char *p = (char *)snk; + char *q = (char *)src; + + for(i = 0; i < lgn; i++) { + *p++ = *q++; + } + return 0; + +} + + + + + + + + + + + + + diff --git a/osfmk/ppc/bsd_asm.s b/osfmk/ppc/bsd_asm.s deleted file mode 100644 index ffee9264c..000000000 --- a/osfmk/ppc/bsd_asm.s +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - -/* - * void cthread_set_self(cproc_t p) - * - * set's thread state "user_value" - * - * This op is invoked as follows: - * li r0, CthreadSetSelfNumber // load the fast-trap number - * sc // invoke fast-trap - * blr - * - * Entry: VM switched ON - * Interrupts OFF - * original r1-3 saved in sprg1-3 - * original srr0 and srr1 saved in per_proc_info structure - * original cr saved in per_proc_info structure - * exception type saved in per_proc_info structure - * r1 = scratch - * r2 = virt addr of per_proc_info - * r3 = exception type (one of EXC_...) - * - */ - .text - .align 5 -ENTRY(CthreadSetSelfNumber, TAG_NO_FRAME_USED) - lwz r1, PP_CPU_DATA(r2) - lwz r1, CPU_ACTIVE_THREAD(r1) - lwz r1, THREAD_TOP_ACT(r1) - lwz r1, ACT_MACT_PCB(r1) - - mfsprg r3, 3 - stw r3, CTHREAD_SELF(r1) - - /* Prepare to rfi to the exception exit routine, which is - * in physical address space */ - addis r3, 0, HIGH_CADDR(EXT(exception_exit)) - addi r3, r3, LOW_ADDR(EXT(exception_exit)) - - lwz r3, 0(r3) - mtsrr0 r3 - li r3, MSR_VM_OFF - mtsrr1 r3 - - lwz r3, PP_SAVE_SRR1(r2) /* load the last register... */ - lwz r2, PP_SAVE_SRR0(r2) /* For trampoline */ - lwz r1, PCB_SR0(r1) /* For trampoline... */ - - rfi - - -/* - * ur_cthread_t ur_cthread_self(void) - * - * return thread state "user_value" - * - * This op is invoked as follows: - * li r0, UrCthreadSelfNumber // load the fast-trap number - * sc // invoke fast-trap - * blr - * - * Entry: VM switched ON - * Interrupts OFF - * original r1-3 saved in sprg1-3 - * original srr0 and srr1 saved in per_proc_info structure - * original cr saved in per_proc_info structure - * exception type saved in per_proc_info structure - * r1 = scratch - * r2 = virt addr of per_proc_info - * r3 = exception type (one of EXC_...) - * - */ - .text - .align 5 -ENTRY(UrCthreadSelfNumber, TAG_NO_FRAME_USED) - lwz r1, PP_CPU_DATA(r2) - lwz r1, CPU_ACTIVE_THREAD(r1) - lwz r1, THREAD_TOP_ACT(r1) - lwz r1, ACT_MACT_PCB(r1) - - lwz r3, CTHREAD_SELF(r1) - mtsprg 3, r3 - - - /* Prepare to rfi to the exception exit routine, which is - * in physical address space */ - addis r3, 0, HIGH_CADDR(EXT(exception_exit)) - addi r3, r3, LOW_ADDR(EXT(exception_exit)) - lwz r3, 0(r3) - mtsrr0 r3 - li r3, MSR_VM_OFF - mtsrr1 r3 - - lwz r3, PP_SAVE_SRR1(r2) /* load the last register... */ - lwz r2, PP_SAVE_SRR0(r2) /* For trampoline */ - lwz r1, PCB_SR0(r1) /* For trampoline... */ - - rfi diff --git a/osfmk/ppc/bsd_ppc.c b/osfmk/ppc/bsd_ppc.c deleted file mode 100644 index a04f60c9a..000000000 --- a/osfmk/ppc/bsd_ppc.c +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -struct proc; - -#define ERESTART -1 /* restart syscall */ -#define EJUSTRETURN -2 /* don't modify regs, just return */ - -struct unix_syscallargs { - int flavor; - int r3; - int arg1, arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9; -}; -struct sysent { /* system call table */ - unsigned short sy_narg; /* number of args */ - char sy_parallel; /* can execute in parallel */ - char sy_funnel; /* funnel type */ - unsigned long (*sy_call)(void *, void *, int *); /* implementing function */ -}; - -#define KERNEL_FUNNEL 1 -#define NETWORK_FUNNEL 2 - -extern funnel_t * kernel_flock; -extern funnel_t * network_flock; - -extern struct sysent sysent[]; - -void *get_bsdtask_info( - task_t); - -int set_bsduthreadargs ( - thread_act_t, struct pcb *, - struct unix_syscallargs *); - -void * get_bsduthreadarg( - thread_act_t); - -void -unix_syscall( - struct pcb * pcb, - int, int, int, int, int, int, int ); - -/* - * Function: unix_syscall - * - * Inputs: pcb - pointer to Process Control Block - * arg1 - arguments to mach system calls - * arg2 - * arg3 - * arg4 - * arg5 - * arg6 - * arg7 - * - * Outputs: none - */ -void -unix_syscall( - struct pcb * pcb, - int arg1, - int arg2, - int arg3, - int arg4, - int arg5, - int arg6, - int arg7 - ) -{ - struct ppc_saved_state *regs; - thread_act_t thread; - struct sysent *callp; - int nargs, error; - unsigned short code; - struct proc *p; - void *vt; - int * vtint; - int *rval; - int funnel_type; - struct proc *current_proc(); - - struct unix_syscallargs sarg; - extern int nsysent; - - regs = &pcb->ss; - code = regs->r0; - - thread = current_act(); - p = current_proc(); - rval = (int *)get_bsduthreadrval(thread); - - /* - * Set up call pointer - */ - callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; - - sarg. flavor = (callp == sysent)? 1: 0; - if (sarg.flavor) { - code = regs->r3; - callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; - } else - sarg. r3 = regs->r3; - - if (code != 180) { - if (sarg.flavor) - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, - arg1, arg2, arg3, arg4, 0); - else - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, - sarg.r3, arg1, arg2, arg3, 0); - } - sarg.arg1 = arg1; - sarg.arg2 = arg2; - sarg.arg3 = arg3; - sarg.arg4 = arg4; - sarg.arg5 = arg5; - sarg.arg6 = arg6; - sarg.arg7 = arg7; - - if(callp->sy_funnel == NETWORK_FUNNEL) { - (void) thread_funnel_set(network_flock, TRUE); - } else { - (void) thread_funnel_set(kernel_flock, TRUE); - } - - set_bsduthreadargs(thread,pcb,&sarg); - - if (callp->sy_narg > 8) - panic("unix_syscall: max arg count exceeded"); - - rval[0] = 0; - - /* - * r4 is volatile, if we set it to regs->r4 here the child - * will have parents r4 after execve - */ - rval[1] = 0; - - error = 0; /* Start with a good value */ - - /* - ** the PPC runtime calls cerror after every unix system call, so - ** assume no error and adjust the "pc" to skip this call. - ** It will be set back to the cerror call if an error is detected. - */ - regs->srr0 += 4; - vt = get_bsduthreadarg(thread); - counter_always(c_syscalls_unix++); - current_task()->syscalls_unix++; - - ktrsyscall(p, code, callp->sy_narg, vt); - - error = (*(callp->sy_call))(p, (void *)vt, rval); - - regs = find_user_regs(thread); - if (regs == (struct ppc_saved_state *)0) - panic("No user savearea while returning from system call"); - - if (error == ERESTART) { - regs->srr0 -= 8; - } else if (error != EJUSTRETURN) { - if (error) { - regs->r3 = error; - /* set the "pc" to execute cerror routine */ - regs->srr0 -= 4; - } else { /* (not error) */ - regs->r3 = rval[0]; - regs->r4 = rval[1]; - } - } - /* else (error == EJUSTRETURN) { nothing } */ - - ktrsysret(p, code, error, rval[0]); - - (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); - - if (code != 180) { - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, - error, rval[0], rval[1], 0, 0); - } - - thread_exception_return(); - /* NOTREACHED */ -} - -unix_syscall_return(error) -{ - struct ppc_saved_state *regs; - thread_act_t thread; - struct sysent *callp; - int nargs; - unsigned short code; - int *rval; - struct proc *p; - void *vt; - int * vtint; - struct pcb *pcb; - struct proc *current_proc(); - - struct unix_syscallargs sarg; - extern int nsysent; - - thread = current_act(); - p = current_proc(); - rval = (int *)get_bsduthreadrval(thread); - pcb = thread->mact.pcb; - regs = &pcb->ss; - - if (thread_funnel_get() == THR_FUNNEL_NULL) - panic("Unix syscall return without funnel held"); - - /* - * Get index into sysent table - */ - code = regs->r0; - - if (error == ERESTART) { - regs->srr0 -= 8; - } else if (error != EJUSTRETURN) { - if (error) { - regs->r3 = error; - /* set the "pc" to execute cerror routine */ - regs->srr0 -= 4; - } else { /* (not error) */ - regs->r3 = rval[0]; - regs->r4 = rval[1]; - } - } - /* else (error == EJUSTRETURN) { nothing } */ - - ktrsysret(p, code, error, rval[0]); - - (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); - - if (code != 180) { - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, - error, rval[0], rval[1], 0, 0); - } - - thread_exception_return(); - /* NOTREACHED */ -} - diff --git a/osfmk/ppc/bzero.s b/osfmk/ppc/bzero.s index 247670799..82e83de1c 100644 --- a/osfmk/ppc/bzero.s +++ b/osfmk/ppc/bzero.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,269 +22,286 @@ * * @APPLE_LICENSE_HEADER_END@ */ - /* - * @OSF_FREE_COPYRIGHT@ - */ #include -#include /* For CACHE_LINE_SIZE */ - -/* - * void bzero(char *addr, unsigned int length) - * - * bzero implementation for PowerPC - * - assumes cacheable memory (i.e. uses DCBZ) - * - assumes non-pic code - * - * returns start address in r3, as per memset (called by memset) - */ - -ENTRY(bzero, TAG_NO_FRAME_USED) - - cmpwi cr0, r4, 0 /* no bytes to zero? */ - mr r7, r3 - mr r8, r3 /* use r8 as counter to where we are */ - beqlr- - cmpwi cr0, r4, CACHE_LINE_SIZE /* clear less than a block? */ - li r0, 0 /* use r0 as source of zeros */ - blt .L_bzeroEndWord - -/* first, clear bytes up to the next word boundary */ - addis r6, 0, HIGH_CADDR(.L_bzeroBeginWord) - addi r6, r6, LOW_ADDR(.L_bzeroBeginWord) - /* extract byte offset as word offset */ - rlwinm. r5, r8, 2, 28, 29 - addi r8, r8, -1 /* adjust for update */ - beq .L_bzeroBeginWord /* no bytes to zero */ - subfic r5, r5, 16 /* compute the number of instructions */ - sub r6, r6, r5 /* back from word clear to execute */ - mtctr r6 - bctr - - stbu r0, 1(r8) - stbu r0, 1(r8) - stbu r0, 1(r8) - -/* clear words up to the next block boundary */ -.L_bzeroBeginWord: - addis r6, 0, HIGH_CADDR(.L_bzeroBlock) - addi r6, r6, LOW_ADDR(.L_bzeroBlock) - addi r8, r8, 1 - rlwinm. r5, r8, 0, 27, 29 /* extract word offset */ - addi r8, r8, -4 /* adjust for update */ - beq .L_bzeroBlock /* no words to zero */ - /* compute the number of instructions */ - subfic r5, r5, CACHE_LINE_SIZE - sub r6, r6, r5 /* back from word clear to execute */ - mtctr r6 - bctr - - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - - /* clear cache blocks */ -.L_bzeroBlock: - addi r8, r8, 4 /* remove update adjust */ - sub r5, r8, r7 /* bytes zeroed */ - sub r4, r4, r5 - srwi. r5, r4, CACHE_LINE_POW2 /* blocks to zero */ - beq .L_bzeroEndWord - mtctr r5 - -.L_bzeroBlock1: - dcbz 0, r8 - addi r8, r8, CACHE_LINE_SIZE - bdnz .L_bzeroBlock1 - - /* clear remaining words */ -.L_bzeroEndWord: - addis r6, 0, HIGH_CADDR(.L_bzeroEndByte) - addi r6, r6, LOW_ADDR(.L_bzeroEndByte) - rlwinm. r5, r4, 0, 27, 29 /* extract word offset */ - addi r8, r8, -4 /* adjust for update */ - beq .L_bzeroEndByte /* no words to zero */ - sub r6, r6, r5 /* back from word clear to execute */ - mtctr r6 - bctr - - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - - /* clear remaining bytes */ -.L_bzeroEndByte: - addis r6, 0, HIGH_CADDR(.L_bzeroEnd) - addi r6, r6, LOW_ADDR(.L_bzeroEnd) - /* extract byte offset as word offset */ - rlwinm. r5, r4, 2, 28, 29 - addi r8, r8, 3 /* adjust for update */ - beqlr - sub r6, r6, r5 /* back from word clear to execute */ - mtctr r6 - bctr - - stbu r0, 1(r8) - stbu r0, 1(r8) - stbu r0, 1(r8) - -.L_bzeroEnd: - blr - -/* - * void *memset(void *from, int c, vm_size_t nbytes) - * - * almost everywhere in the kernel - * this appears to be called with argument c==0. We optimise for those - * cases and call bzero if we can. - * - */ - -ENTRY(memset, TAG_NO_FRAME_USED) - - mr. ARG3, ARG1 - mr ARG1, ARG2 - /* optimised case - do a bzero */ - beq+ EXT(bzero) - - /* If count is zero, return straight away */ - cmpi cr0, ARG1, 0 - beqlr- - - /* Now, ARG0 = addr, ARG1=len, ARG3=value */ - - subi ARG2, ARG0, 1 /* use ARG2 as our counter */ - -0: - subi ARG1, ARG1, 1 - cmpi cr0, ARG1, 0 - stbu ARG3, 1(ARG2) - bne+ 0b - - /* Return original address in ARG0 */ - - blr - -/* - * void bzero_nc(char *addr, unsigned int length) - * - * bzero implementation for PowerPC - * - assumes non-pic code - * - * returns start address in r3, as per memset (called by memset) - */ - -ENTRY(bzero_nc, TAG_NO_FRAME_USED) - - cmpwi cr0, r4, 0 /* no bytes to zero? */ - mr r7, r3 - mr r8, r3 /* use r8 as counter to where we are */ - beqlr- - cmpwi cr0, r4, CACHE_LINE_SIZE /* clear less than a block? */ - li r0, 0 /* use r0 as source of zeros */ - blt .L_bzeroNCEndWord - -/* first, clear bytes up to the next word boundary */ - addis r6, 0, HIGH_CADDR(.L_bzeroNCBeginWord) - addi r6, r6, LOW_ADDR(.L_bzeroNCBeginWord) - /* extract byte offset as word offset */ - rlwinm. r5, r8, 2, 28, 29 - addi r8, r8, -1 /* adjust for update */ - beq .L_bzeroNCBeginWord /* no bytes to zero */ - subfic r5, r5, 16 /* compute the number of instructions */ - sub r6, r6, r5 /* back from word clear to execute */ - mtctr r6 - bctr - - stbu r0, 1(r8) - stbu r0, 1(r8) - stbu r0, 1(r8) - -/* clear words up to the next block boundary */ -.L_bzeroNCBeginWord: - addis r6, 0, HIGH_CADDR(.L_bzeroNCBlock) - addi r6, r6, LOW_ADDR(.L_bzeroNCBlock) - addi r8, r8, 1 - rlwinm. r5, r8, 0, 27, 29 /* extract word offset */ - addi r8, r8, -4 /* adjust for update */ - beq .L_bzeroNCBlock /* no words to zero */ - /* compute the number of instructions */ - subfic r5, r5, CACHE_LINE_SIZE - sub r6, r6, r5 /* back from word clear to execute */ - mtctr r6 - bctr - - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - - /* clear cache blocks */ -.L_bzeroNCBlock: - addi r8, r8, 4 /* remove update adjust */ - sub r5, r8, r7 /* bytes zeroed */ - sub r4, r4, r5 - srwi. r5, r4, CACHE_LINE_POW2 /* blocks to zero */ - beq .L_bzeroNCEndWord - mtctr r5 - -.L_bzeroNCBlock1: - stw r0, 0(r8) - stw r0, 4(r8) - stw r0, 8(r8) - stw r0, 12(r8) - stw r0, 16(r8) - stw r0, 20(r8) - stw r0, 24(r8) - stw r0, 28(r8) - addi r8, r8, CACHE_LINE_SIZE - bdnz .L_bzeroNCBlock1 - - /* clear remaining words */ -.L_bzeroNCEndWord: - addis r6, 0, HIGH_CADDR(.L_bzeroNCEndByte) - addi r6, r6, LOW_ADDR(.L_bzeroNCEndByte) - rlwinm. r5, r4, 0, 27, 29 /* extract word offset */ - addi r8, r8, -4 /* adjust for update */ - beq .L_bzeroNCEndByte /* no words to zero */ - sub r6, r6, r5 /* back from word clear to execute */ - mtctr r6 - bctr - - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - stwu r0, 4(r8) - - /* clear remaining bytes */ -.L_bzeroNCEndByte: - addis r6, 0, HIGH_CADDR(.L_bzeroNCEnd) - addi r6, r6, LOW_ADDR(.L_bzeroNCEnd) - /* extract byte offset as word offset */ - rlwinm. r5, r4, 2, 28, 29 - addi r8, r8, 3 /* adjust for update */ - beqlr - sub r6, r6, r5 /* back from word clear to execute */ - mtctr r6 - bctr - - stbu r0, 1(r8) - stbu r0, 1(r8) - stbu r0, 1(r8) +#include +#include + + .text + .align 2 + .globl _memset + .globl _bzero + .globl _bzero_nc + .globl _bzero_phys + + +// *********************** +// * B Z E R O _ P H Y S * +// *********************** +// +// void bzero_phys(addr64_t phys_addr, uint32_t length); +// +// Takes a phys addr in (r3,r4), and length in r5. We leave cache on. + + .align 5 +LEXT(bzero_phys) + mflr r12 // save return address + rlwinm r3,r3,0,1,0 // coallesce long-long in (r3,r4) into reg64_t in r3 + rlwimi r3,r4,0,0,31 + mr r4,r5 // put length where bzero() expects it + bl EXT(ml_set_physical_get_ffs) // turn DR off, SF on, features in cr6, old MSR in r11 + bl EXT(bzero) // use normal bzero() routine + mtlr r12 // restore return + b EXT(ml_restore) // restore MSR, turning DR on and SF off + + +// ******************* +// * B Z E R O _ N C * +// ******************* +// +// void bzero_nc(char *addr, unsigned int length); +// +// For use with uncached memory. Doesn't seem to be used at all, so probably not +// performance critical. NB: we must avoid unaligned stores, because some +// machines (eg, 970) take alignment exceptions on _any_ unaligned op to uncached +// memory. Of course, we must also avoid dcbz. + +LEXT(bzero_nc) + cmplwi cr1,r4,20 // too short to bother with 16-byte loops? + cmplwi cr7,r4,0 // check for (len==0) + li r6,0 // get a 0 + bge cr1,bznc1 // skip if length >=20 + mtctr r4 // set up byte loop + beqlr-- cr7 // done if len=0 + +// Short operands, loop over bytes. + +bznc0: + stb r6,0(r3) + addi r3,r3,1 + bdnz bznc0 + blr + +// Handle operands long enough to do doubleword stores; we must doubleword +// align, to avoid alignment exceptions. + +bznc1: + neg r7,r3 // start to compute #bytes to align + mfsprg r10,2 // get feature flags + andi. r0,r7,7 // get #bytes to doubleword align + mr r5,r3 // make copy of operand ptr as bcopy expects + mtcrf 0x02,r10 // put pf64Bitb etc in cr6 + beq bzero_tail // already doubleword aligned + sub r4,r4,r0 // adjust count + mtctr r0 // set up loop +bznc2: // zero bytes until doubleword aligned + stb r6,0(r5) + addi r5,r5,1 + bdnz bznc2 + b bzero_tail // join bzero, now that r5 is aligned + + +// ************* *************** +// * B Z E R O * and * M E M S E T * +// ************* *************** +// +// void * memset(void *b, int c, size_t len); +// void bzero(void *b, size_t len); +// +// These routines support G3, G4, and the 970, and run in both 32 and +// 64-bit mode. Lengths (size_t) are always 32 bits. +// +// Register use: +// r0 = temp +// r2 = temp +// r3 = original ptr, not changed since memset returns it +// r4 = count of bytes to set +// r5 = working operand ptr ("rp") +// r6 = value to store (usually 0) +// r7-r9 = temps +// r10 = feature flags +// r11 = old MSR (if bzero_phys) +// r12 = return address (if bzero_phys) +// cr6 = feature flags (pf64Bit, pf128Byte, and pf32Byte) + + .align 5 +LEXT(memset) // void * memset(void *b, int c, size_t len); + andi. r6,r4,0xFF // copy value to working register, test for 0 + mr r4,r5 // move length to working register + bne-- memset1 // skip if nonzero +LEXT(bzero) // void bzero(void *b, size_t len); + dcbtst 0,r3 // touch in 1st cache block + mfsprg r10,2 // get features + li r6,0 // get a 0 + neg r7,r3 // start to compute #bytes to align + andi. r0,r10,pf128Byte+pf32Byte // get cache line size + mtcrf 0x02,r10 // put pf128Byte etc in cr6 + cmplw r4,r0 // operand length >= cache line size? + mr r5,r3 // make copy of operand ptr (can't change r3) + blt bzero_tail // too short for dcbz (or dcbz128) + rlwinm r0,r7,0,0x1F // get #bytes to 32-byte align + rlwinm r9,r7,0,0x7F // get #bytes to 128-byte align + bt++ pf128Byteb,bzero_128 // skip if 128-byte processor + +// Operand length >=32 and cache line size is 32. +// r0 = #bytes to 32-byte align +// r4 = length +// r5 = ptr to operand +// r6 = 0 + + sub r2,r4,r0 // adjust length + cmpwi cr1,r0,0 // already 32-byte aligned? + srwi. r8,r2,5 // get #32-byte chunks + beq bzero_tail // not long enough to dcbz + mtctr r8 // set up loop count + rlwinm r4,r2,0,27,31 // mask down to leftover byte count + beq cr1,bz_dcbz32 // skip if already 32-byte aligned + +// 32-byte align. We just store 32 0s, rather than test and use conditional +// branches. This is usually faster, because there are no mispredicts. + + stw r6,0(r5) // zero next 32 bytes + stw r6,4(r5) + stw r6,8(r5) + stw r6,12(r5) + stw r6,16(r5) + stw r6,20(r5) + stw r6,24(r5) + stw r6,28(r5) + add r5,r5,r0 // now r5 is 32-byte aligned + b bz_dcbz32 + +// Loop doing 32-byte version of DCBZ instruction. + + .align 4 // align the inner loop +bz_dcbz32: + dcbz 0,r5 // zero another 32 bytes + addi r5,r5,32 + bdnz bz_dcbz32 + +// Store trailing bytes. This routine is used both by bzero and memset. +// r4 = #bytes to store (may be large if memset) +// r5 = address +// r6 = value to store (in all 8 bytes) +// cr6 = pf64Bit etc flags + +bzero_tail: + srwi. r0,r4,4 // get #(16-byte-chunks) + mtcrf 0x01,r4 // remaining byte count to cr7 + beq bzt3 // no 16-byte chunks + mtctr r0 // set up loop count + bt++ pf64Bitb,bzt2 // skip if 64-bit processor + b bzt1 + .align 5 +bzt1: // loop over 16-byte chunks on 32-bit processor + stw r6,0(r5) + stw r6,4(r5) + stw r6,8(r5) + stw r6,12(r5) + addi r5,r5,16 + bdnz bzt1 + b bzt3 + .align 5 +bzt2: // loop over 16-byte chunks on 64-bit processor + std r6,0(r5) + std r6,8(r5) + addi r5,r5,16 + bdnz bzt2 + bf 28,bzt4 // 8-byte chunk? + std r6,0(r5) + addi r5,r5,8 + b bzt4 +bzt3: + bf 28,bzt4 // 8-byte chunk? + stw r6,0(r5) + stw r6,4(r5) + addi r5,r5,8 +bzt4: + bf 29,bzt5 // word? + stw r6,0(r5) + addi r5,r5,4 +bzt5: + bf 30,bzt6 // halfword? + sth r6,0(r5) + addi r5,r5,2 +bzt6: + bflr 31 // byte? + stb r6,0(r5) + blr + +// Operand length is >=128 and cache line size is 128. We assume that +// because the linesize is 128 bytes, this is a 64-bit processor. +// r4 = length +// r5 = ptr to operand +// r6 = 0 +// r7 = neg(r5) +// r9 = #bytes to 128-byte align + + .align 5 +bzero_128: + sub r2,r4,r9 // r2 <- length remaining after cache-line aligning + rlwinm r0,r7,0,0xF // r0 <- #bytes to 16-byte align + srwi. r8,r2,7 // r8 <- number of cache lines to 0 + std r6,0(r5) // always store 16 bytes to 16-byte align... + std r6,8(r5) // ...even if too short for dcbz128 + add r5,r5,r0 // 16-byte align ptr + sub r4,r4,r0 // adjust count + beq bzero_tail // r8==0, not long enough to dcbz128 + sub. r7,r9,r0 // get #bytes remaining to 128-byte align + rlwinm r4,r2,0,0x7F // r4 <- length remaining after dcbz128'ing + mtctr r8 // set up dcbz128 loop + beq bz_dcbz128 // already 128-byte aligned + b bz_align // enter loop over 16-byte chunks + +// 128-byte align by looping over 16-byte chunks. + + .align 5 +bz_align: // loop over 16-byte chunks + subic. r7,r7,16 // more to go? + std r6,0(r5) + std r6,8(r5) + addi r5,r5,16 + bgt bz_align + + b bz_dcbz128 // enter dcbz128 loop + +// Loop over 128-byte cache lines. +// r4 = length remaining after cache lines (0..127) +// r5 = ptr (128-byte aligned) +// r6 = 0 +// ctr = count of cache lines to 0 + + .align 5 +bz_dcbz128: + dcbz128 0,r5 // zero a 128-byte cache line + addi r5,r5,128 + bdnz bz_dcbz128 + + b bzero_tail // handle leftovers + + +// Handle memset() for nonzero values. This case is relatively infrequent; +// the large majority of memset() calls are for 0. +// r3 = ptr +// r4 = count +// r6 = value in lower byte (nonzero) + +memset1: + cmplwi r4,16 // too short to bother aligning? + rlwimi r6,r6,8,16,23 // replicate value to low 2 bytes + mr r5,r3 // make working copy of operand ptr + rlwimi r6,r6,16,0,15 // value now in all 4 bytes + blt bzero_tail // length<16, we won't be using "std" + mfsprg r10,2 // get feature flags + neg r7,r5 // start to compute #bytes to align + rlwinm r6,r6,0,1,0 // value now in all 8 bytes (if 64-bit) + andi. r0,r7,7 // r6 <- #bytes to doubleword align + stw r6,0(r5) // store 8 bytes to avoid a loop + stw r6,4(r5) + mtcrf 0x02,r10 // get pf64Bit flag etc in cr6 + sub r4,r4,r0 // adjust count + add r5,r5,r0 // doubleword align ptr + b bzero_tail + + -.L_bzeroNCEnd: - blr diff --git a/osfmk/ppc/cache.s b/osfmk/ppc/cache.s index 1844b788b..94d9fe0ca 100644 --- a/osfmk/ppc/cache.s +++ b/osfmk/ppc/cache.s @@ -26,82 +26,27 @@ * @OSF_COPYRIGHT@ */ -#include - #include #include -#include +#include #include -#include -#include -/* - * extern void sync_cache(vm_offset_t pa, unsigned count); - * - * sync_cache takes a physical address and count to sync, thus - * must not be called for multiple virtual pages. - * - * it writes out the data cache and invalidates the instruction - * cache for the address range in question +/* These routines run in 32 or 64-bit addressing, and handle + * 32 and 128 byte caches. They do not use compare instructions + * on addresses, since compares are 32/64-bit-mode-specific. */ -ENTRY(sync_cache, TAG_NO_FRAME_USED) - - /* Switch off data translations */ - mfmsr r6 - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 - mtmsr r7 - isync - - /* Check to see if the address is aligned. */ - add r8, r3,r4 - andi. r8,r8,(CACHE_LINE_SIZE-1) - beq- .L_sync_check - addi r4,r4,CACHE_LINE_SIZE - li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ - andc r4,r4,r7 - andc r3,r3,r7 - -.L_sync_check: - cmpwi r4, CACHE_LINE_SIZE - ble .L_sync_one_line - - /* Make ctr hold count of how many times we should loop */ - addi r8, r4, (CACHE_LINE_SIZE-1) - srwi r8, r8, CACHE_LINE_POW2 - mtctr r8 +#define kDcbf 0x1 +#define kDcbfb 31 +#define kDcbi 0x2 +#define kDcbib 30 +#define kIcbi 0x4 +#define kIcbib 29 - /* loop to flush the data cache */ -.L_sync_data_loop: - subic r4, r4, CACHE_LINE_SIZE - dcbf r3, r4 - bdnz .L_sync_data_loop - - sync - mtctr r8 - - /* loop to invalidate the instruction cache */ -.L_sync_inval_loop: - icbi r3, r4 - addic r4, r4, CACHE_LINE_SIZE - bdnz .L_sync_inval_loop - -.L_sync_cache_done: - sync /* Finish physical writes */ - mtmsr r6 /* Restore original translations */ - isync /* Ensure data translations are on */ - blr - -.L_sync_one_line: - dcbf 0,r3 - sync - icbi 0,r3 - b .L_sync_cache_done /* * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys); + * extern void flush_dcache64(addr64_t addr, unsigned count, boolean phys); * * flush_dcache takes a virtual or physical address and count to flush * and (can be called for multiple virtual pages). @@ -112,172 +57,222 @@ ENTRY(sync_cache, TAG_NO_FRAME_USED) * if 'phys' is non-zero then physical addresses will be used */ -ENTRY(flush_dcache, TAG_NO_FRAME_USED) - - /* optionally switch off data translations */ - - cmpwi r5, 0 - mfmsr r6 - beq+ 0f - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 - mtmsr r7 - isync -0: - - /* Check to see if the address is aligned. */ - add r8, r3,r4 - andi. r8,r8,(CACHE_LINE_SIZE-1) - beq- .L_flush_dcache_check - addi r4,r4,CACHE_LINE_SIZE - li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ - andc r4,r4,r7 - andc r3,r3,r7 -.L_flush_dcache_check: - cmpwi r4, CACHE_LINE_SIZE - ble .L_flush_dcache_one_line - - /* Make ctr hold count of how many times we should loop */ - addi r8, r4, (CACHE_LINE_SIZE-1) - srwi r8, r8, CACHE_LINE_POW2 - mtctr r8 + + .text + .align 5 + .globl _flush_dcache +_flush_dcache: + li r0,kDcbf // use DCBF instruction + rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine + b cache_op_join // join common code -.L_flush_dcache_flush_loop: - subic r4, r4, CACHE_LINE_SIZE - dcbf r3, r4 - bdnz .L_flush_dcache_flush_loop - -.L_flush_dcache_done: - /* Sync restore msr if it was modified */ - cmpwi r5, 0 - sync /* make sure invalidates have completed */ - beq+ 0f - mtmsr r6 /* Restore original translations */ - isync /* Ensure data translations are on */ -0: - blr - -.L_flush_dcache_one_line: - xor r4,r4,r4 - dcbf 0,r3 - b .L_flush_dcache_done + .align 5 + .globl _flush_dcache64 +_flush_dcache64: + rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg + li r0,kDcbf // use DCBF instruction + rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits + mr r4,r5 ; Move count + mr r5,r6 ; Move physical flag + b cache_op_join // join common code /* * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys); + * extern void invalidate_dcache64(addr64_t va, unsigned count, boolean phys); * * invalidate_dcache takes a virtual or physical address and count to * invalidate and (can be called for multiple virtual pages). * * it invalidates the data cache for the address range in question */ - -ENTRY(invalidate_dcache, TAG_NO_FRAME_USED) - - /* optionally switch off data translations */ - - cmpwi r5, 0 - mfmsr r6 - beq+ 0f - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 - mtmsr r7 - isync -0: - - /* Check to see if the address is aligned. */ - add r8, r3,r4 - andi. r8,r8,(CACHE_LINE_SIZE-1) - beq- .L_invalidate_dcache_check - addi r4,r4,CACHE_LINE_SIZE - li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ - andc r4,r4,r7 - andc r3,r3,r7 - -.L_invalidate_dcache_check: - cmpwi r4, CACHE_LINE_SIZE - ble .L_invalidate_dcache_one_line - - /* Make ctr hold count of how many times we should loop */ - addi r8, r4, (CACHE_LINE_SIZE-1) - srwi r8, r8, CACHE_LINE_POW2 - mtctr r8 - -.L_invalidate_dcache_invalidate_loop: - subic r4, r4, CACHE_LINE_SIZE - dcbi r3, r4 - bdnz .L_invalidate_dcache_invalidate_loop - -.L_invalidate_dcache_done: - /* Sync restore msr if it was modified */ - cmpwi r5, 0 - sync /* make sure invalidates have completed */ - beq+ 0f - mtmsr r6 /* Restore original translations */ - isync /* Ensure data translations are on */ -0: - blr - -.L_invalidate_dcache_one_line: - xor r4,r4,r4 - dcbi 0,r3 - b .L_invalidate_dcache_done + + .globl _invalidate_dcache +_invalidate_dcache: + li r0,kDcbi // use DCBI instruction + rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine + b cache_op_join // join common code + + + .align 5 + .globl _invalidate_dcache64 +_invalidate_dcache64: + rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg + li r0,kDcbi // use DCBI instruction + rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits + mr r4,r5 ; Move count + mr r5,r6 ; Move physical flag + b cache_op_join // join common code /* * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys); + * extern void invalidate_icache64(addr64_t addr, unsigned cnt, boolean phys); * * invalidate_icache takes a virtual or physical address and * count to invalidate, (can be called for multiple virtual pages). * * it invalidates the instruction cache for the address range in question. + */ + + .globl _invalidate_icache +_invalidate_icache: + li r0,kIcbi // use ICBI instruction + rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine + b cache_op_join // join common code + + + .align 5 + .globl _invalidate_icache64 +_invalidate_icache64: + rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg + li r0,kIcbi // use ICBI instruction + rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits + mr r4,r5 ; Move count + mr r5,r6 ; Move physical flag + b cache_op_join // join common code + +/* + * extern void sync_ppage(ppnum_t pa); + * + * sync_ppage takes a physical page number + * + * it writes out the data cache and invalidates the instruction + * cache for the address range in question */ -ENTRY(invalidate_icache, TAG_NO_FRAME_USED) - - /* optionally switch off data translations */ - cmpwi r5, 0 - mfmsr r6 - beq+ 0f - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 - mtmsr r7 - isync -0: - - /* Check to see if the address is aligned. */ - add r8, r3,r4 - andi. r8,r8,(CACHE_LINE_SIZE-1) - beq- .L_invalidate_icache_check - addi r4,r4,CACHE_LINE_SIZE - li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ - andc r4,r4,r7 - andc r3,r3,r7 + .globl _sync_ppage + .align 5 +_sync_ppage: // Should be the most commonly called routine, by far + mfsprg r2,2 + li r0,kDcbf+kIcbi // we need to dcbf and then icbi + mtcrf 0x02,r2 ; Move pf64Bit to cr6 + li r5,1 // set flag for physical addresses + li r4,4096 ; Set page size + bt++ pf64Bitb,spp64 ; Skip if 64-bit (only they take the hint) + rlwinm r3,r3,12,0,19 ; Convert to physical address - 32-bit + b cache_op_join ; Join up.... + +spp64: sldi r3,r3,12 ; Convert to physical address - 64-bit + b cache_op_join ; Join up.... + -.L_invalidate_icache_check: - cmpwi r4, CACHE_LINE_SIZE - ble .L_invalidate_icache_one_line - - /* Make ctr hold count of how many times we should loop */ - addi r8, r4, (CACHE_LINE_SIZE-1) - srwi r8, r8, CACHE_LINE_POW2 - mtctr r8 -.L_invalidate_icache_invalidate_loop: - subic r4, r4, CACHE_LINE_SIZE - icbi r3, r4 - bdnz .L_invalidate_icache_invalidate_loop +/* + * extern void sync_cache_virtual(vm_offset_t addr, unsigned count); + * + * Like "sync_cache", except it takes a virtual address and byte count. + * It flushes the data cache, invalidates the I cache, and sync's. + */ + + .globl _sync_cache_virtual + .align 5 +_sync_cache_virtual: + li r0,kDcbf+kIcbi // we need to dcbf and then icbi + li r5,0 // set flag for virtual addresses + b cache_op_join // join common code + + +/* + * extern void sync_cache(vm_offset_t pa, unsigned count); + * extern void sync_cache64(addr64_t pa, unsigned count); + * + * sync_cache takes a physical address and count to sync, thus + * must not be called for multiple virtual pages. + * + * it writes out the data cache and invalidates the instruction + * cache for the address range in question + */ -.L_invalidate_icache_done: - sync /* make sure invalidates have completed */ - mtmsr r6 /* Restore original translations */ - isync /* Ensure data translations are on */ - blr + .globl _sync_cache + .align 5 +_sync_cache: + li r0,kDcbf+kIcbi // we need to dcbf and then icbi + li r5,1 // set flag for physical addresses + rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine + b cache_op_join // join common code + + .globl _sync_cache64 + .align 5 +_sync_cache64: + rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg + li r0,kDcbf+kIcbi // we need to dcbf and then icbi + rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits + mr r4,r5 ; Copy over the length + li r5,1 // set flag for physical addresses + + + // Common code to handle the cache operations. + +cache_op_join: // here with r3=addr, r4=count, r5=phys flag, r0=bits + mfsprg r10,2 // r10 <- processor feature flags + cmpwi cr5,r5,0 // using physical addresses? + mtcrf 0x01,r0 // move kDcbf, kDcbi, and kIcbi bits to CR7 + andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size + mtcrf 0x02,r10 // move pf64Bit bit to CR6 + subi r8,r9,1 // r8 <- (linesize-1) + beq-- cr5,cache_op_2 // skip if using virtual addresses + + bf-- pf64Bitb,cache_op_not64 // This is not a 64-bit machine + + srdi r12,r3,31 // Slide bit 32 to bit 63 + cmpldi r12,1 // Are we in the I/O mapped area? + beqlr-- // No cache ops allowed here... + +cache_op_not64: + mflr r12 // save return address + bl EXT(ml_set_physical) // turn on physical addressing + mtlr r12 // restore return address + + // get r3=first cache line, r4=first line not in set, r6=byte count + +cache_op_2: + add r7,r3,r4 // point to 1st byte not to operate on + andc r3,r3,r8 // r3 <- 1st line to operate on + add r4,r7,r8 // round up + andc r4,r4,r8 // r4 <- 1st line not to operate on + sub. r6,r4,r3 // r6 <- byte count to operate on + beq-- cache_op_exit // nothing to do + bf-- kDcbfb,cache_op_6 // no need to dcbf + + + // DCBF loop + +cache_op_5: + sub. r6,r6,r9 // more to go? + dcbf r6,r3 // flush next line to RAM + bne cache_op_5 // loop if more to go + sync // make sure the data reaches RAM + sub r6,r4,r3 // reset count + + + // ICBI loop + +cache_op_6: + bf-- kIcbib,cache_op_8 // no need to icbi +cache_op_7: + sub. r6,r6,r9 // more to go? + icbi r6,r3 // invalidate next line + bne cache_op_7 + sub r6,r4,r3 // reset count + isync + sync + + + // DCBI loop + +cache_op_8: + bf++ kDcbib,cache_op_exit // no need to dcbi +cache_op_9: + sub. r6,r6,r9 // more to go? + dcbi r6,r3 // invalidate next line + bne cache_op_9 + sync + + + // restore MSR iff necessary and done + +cache_op_exit: + beqlr-- cr5 // if using virtual addresses, no need to restore MSR + b EXT(ml_restore) // restore MSR and return -.L_invalidate_icache_one_line: - xor r4,r4,r4 - icbi 0,r3 - b .L_invalidate_icache_done diff --git a/osfmk/ppc/chud/chud_cpu.c b/osfmk/ppc/chud/chud_cpu.c new file mode 100644 index 000000000..cbc5f8fef --- /dev/null +++ b/osfmk/ppc/chud/chud_cpu.c @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +__private_extern__ +int chudxnu_avail_cpu_count(void) +{ + host_basic_info_data_t hinfo; + kern_return_t kr; + mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; + + kr = host_info(host_self(), HOST_BASIC_INFO, (integer_t *)&hinfo, &count); + if(kr == KERN_SUCCESS) { + return hinfo.avail_cpus; + } else { + return 0; + } +} + +__private_extern__ +int chudxnu_phys_cpu_count(void) +{ + host_basic_info_data_t hinfo; + kern_return_t kr; + mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; + + kr = host_info(host_self(), HOST_BASIC_INFO, (integer_t *)&hinfo, &count); + if(kr == KERN_SUCCESS) { + return hinfo.max_cpus; + } else { + return 0; + } +} + +__private_extern__ +int chudxnu_cpu_number(void) +{ + return cpu_number(); +} + +__private_extern__ +kern_return_t chudxnu_enable_cpu(int cpu, boolean_t enable) +{ + chudxnu_unbind_current_thread(); + + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + return KERN_FAILURE; + } + + if(processor_ptr[cpu]!=PROCESSOR_NULL && processor_ptr[cpu]!=master_processor) { + if(enable) { + return processor_start(processor_ptr[cpu]); + } else { + return processor_exit(processor_ptr[cpu]); + } + } + return KERN_FAILURE; +} + +__private_extern__ +kern_return_t chudxnu_enable_cpu_nap(int cpu, boolean_t enable) +{ + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + return KERN_FAILURE; + } + + if(processor_ptr[cpu]!=PROCESSOR_NULL) { + ml_enable_nap(cpu, enable); + return KERN_SUCCESS; + } + + return KERN_FAILURE; +} + +__private_extern__ +boolean_t chudxnu_cpu_nap_enabled(int cpu) +{ + boolean_t prev; + + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + cpu = 0; + } + + prev = ml_enable_nap(cpu, TRUE); + ml_enable_nap(cpu, prev); + + return prev; +} + +__private_extern__ +kern_return_t chudxnu_set_shadowed_spr(int cpu, int spr, uint32_t val) +{ + cpu_subtype_t cpu_subtype; + uint32_t available; + kern_return_t retval = KERN_FAILURE; + + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + return KERN_FAILURE; + } + + chudxnu_bind_current_thread(cpu); + + available = per_proc_info[cpu].pf.Available; + cpu_subtype = machine_slot[cpu].cpu_subtype; + + if(spr==chud_750_l2cr) { + switch(cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + if(available & pfL2) { +// int enable = (val & 0x80000000) ? TRUE : FALSE; +// if(enable) { +// per_proc_info[cpu].pf.l2cr = val; +// } else { +// per_proc_info[cpu].pf.l2cr = 0; +// } + per_proc_info[cpu].pf.l2cr = val; + cacheInit(); + // mtspr(l2cr, per_proc_info[cpu].pf.l2cr); // XXXXXXX why is this necessary? XXXXXXX + retval = KERN_SUCCESS; + } else { + retval = KERN_FAILURE; + } + break; + default: + retval = KERN_INVALID_ARGUMENT; + break; + } + } + else if(spr==chud_7450_l3cr) { + switch(cpu_subtype) { + case CPU_SUBTYPE_POWERPC_7450: + if(available & pfL3) { + int enable = (val & 0x80000000) ? TRUE : FALSE; + if(enable) { + per_proc_info[cpu].pf.l3cr = val; + } else { + per_proc_info[cpu].pf.l3cr = 0; + } + cacheInit(); + retval = KERN_SUCCESS; + } else { + retval = KERN_FAILURE; + } + break; + default: + retval = KERN_INVALID_ARGUMENT; + break; + } + } + else if(spr==chud_750_hid0) { + switch(cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + cacheInit(); + cacheDisable(); /* disable caches */ + __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid0), "r" (val)); + per_proc_info[cpu].pf.pfHID0 = val; + cacheInit(); /* reenable caches */ + retval = KERN_SUCCESS; + break; + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid0), "r" (val)); + per_proc_info[cpu].pf.pfHID0 = val; + retval = KERN_SUCCESS; + break; + default: + retval = KERN_INVALID_ARGUMENT; + break; + } + } + else if(spr==chud_750_hid1) { + switch(cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid1), "r" (val)); + per_proc_info[cpu].pf.pfHID1 = val; + retval = KERN_SUCCESS; + break; + default: + retval = KERN_INVALID_ARGUMENT; + break; + } + } + else if(spr==chud_750fx_hid2 && cpu_subtype==CPU_SUBTYPE_POWERPC_750) { + __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750fx_hid2), "r" (val)); + per_proc_info[cpu].pf.pfHID2 = val; + retval = KERN_SUCCESS; + } + else if(spr==chud_7400_msscr0 && (cpu_subtype==CPU_SUBTYPE_POWERPC_7400 || cpu_subtype==CPU_SUBTYPE_POWERPC_7450)) { + __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr0), "r" (val)); + per_proc_info[cpu].pf.pfMSSCR0 = val; + retval = KERN_SUCCESS; + } + else if(spr==chud_7400_msscr1 && cpu_subtype==CPU_SUBTYPE_POWERPC_7400 || cpu_subtype==CPU_SUBTYPE_POWERPC_7450) { // called msssr0 on 7450 + __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr1), "r" (val)); + per_proc_info[cpu].pf.pfMSSCR1 = val; + retval = KERN_SUCCESS; + } + else if(spr==chud_7450_ldstcr && cpu_subtype==CPU_SUBTYPE_POWERPC_7450) { + __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7450_ldstcr), "r" (val)); + per_proc_info[cpu].pf.pfLDSTCR = val; + retval = KERN_SUCCESS; + } + else if(spr==chud_7450_ictrl && cpu_subtype==CPU_SUBTYPE_POWERPC_7450) { + __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7450_ictrl), "r" (val)); + per_proc_info[cpu].pf.pfICTRL = val; + retval = KERN_SUCCESS; + } else { + retval = KERN_INVALID_ARGUMENT; + } + + chudxnu_unbind_current_thread(); + return retval; +} + +__private_extern__ +kern_return_t chudxnu_set_shadowed_spr64(int cpu, int spr, uint64_t val) +{ + cpu_subtype_t cpu_subtype; + kern_return_t retval = KERN_FAILURE; + + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + return KERN_FAILURE; + } + + chudxnu_bind_current_thread(cpu); + + cpu_subtype = machine_slot[cpu].cpu_subtype; + + if(spr==chud_970_hid0) { + switch(cpu_subtype) { + case CPU_SUBTYPE_POWERPC_970: + chudxnu_mthid0_64(&val); + per_proc_info[cpu].pf.pfHID0 = val; + retval = KERN_SUCCESS; + break; + default: + retval = KERN_INVALID_ARGUMENT; + break; + } + } + else if(spr==chud_970_hid1) { + switch(cpu_subtype) { + case CPU_SUBTYPE_POWERPC_970: + chudxnu_mthid1_64(&val); + per_proc_info[cpu].pf.pfHID1 = val; + retval = KERN_SUCCESS; + break; + default: + retval = KERN_INVALID_ARGUMENT; + break; + } + } + else if(spr==chud_970_hid4) { + switch(cpu_subtype) { + case CPU_SUBTYPE_POWERPC_970: + chudxnu_mthid4_64(&val); + per_proc_info[cpu].pf.pfHID4 = val; + retval = KERN_SUCCESS; + break; + default: + retval = KERN_INVALID_ARGUMENT; + break; + } + } + else if(spr==chud_970_hid5) { + switch(cpu_subtype) { + case CPU_SUBTYPE_POWERPC_970: + chudxnu_mthid5_64(&val); + per_proc_info[cpu].pf.pfHID5 = val; + retval = KERN_SUCCESS; + break; + default: + retval = KERN_INVALID_ARGUMENT; + break; + } + } else { + retval = KERN_INVALID_ARGUMENT; + } + + chudxnu_unbind_current_thread(); + + return retval; +} + +__private_extern__ +uint32_t chudxnu_get_orig_cpu_l2cr(int cpu) +{ + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + cpu = 0; + } + return per_proc_info[cpu].pf.l2crOriginal; +} + +__private_extern__ +uint32_t chudxnu_get_orig_cpu_l3cr(int cpu) +{ + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + cpu = 0; + } + return per_proc_info[cpu].pf.l3crOriginal; +} + +__private_extern__ +void chudxnu_flush_caches(void) +{ + cacheInit(); +} + +__private_extern__ +void chudxnu_enable_caches(boolean_t enable) +{ + if(!enable) { + cacheInit(); + cacheDisable(); + } else { + cacheInit(); + } +} + +__private_extern__ +kern_return_t chudxnu_perfmon_acquire_facility(task_t task) +{ + return perfmon_acquire_facility(task); +} + +__private_extern__ +kern_return_t chudxnu_perfmon_release_facility(task_t task) +{ + return perfmon_release_facility(task); +} + +__private_extern__ +uint32_t * chudxnu_get_branch_trace_buffer(uint32_t *entries) +{ + extern int pc_trace_buf[1024]; + if(entries) { + *entries = sizeof(pc_trace_buf)/sizeof(int); + } + return pc_trace_buf; +} + +__private_extern__ +boolean_t chudxnu_get_interrupts_enabled(void) +{ + return ml_get_interrupts_enabled(); +} + +__private_extern__ +boolean_t chudxnu_set_interrupts_enabled(boolean_t enable) +{ + return ml_set_interrupts_enabled(enable); +} + +__private_extern__ +boolean_t chudxnu_at_interrupt_context(void) +{ + return ml_at_interrupt_context(); +} + +__private_extern__ +void chudxnu_cause_interrupt(void) +{ + ml_cause_interrupt(); +} + +__private_extern__ +kern_return_t chudxnu_get_cpu_rupt_counters(int cpu, rupt_counters_t *rupts) +{ + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + return KERN_FAILURE; + } + + if(rupts) { + boolean_t oldlevel = ml_set_interrupts_enabled(FALSE); + + rupts->hwResets = per_proc_info[cpu].hwCtr.hwResets; + rupts->hwMachineChecks = per_proc_info[cpu].hwCtr.hwMachineChecks; + rupts->hwDSIs = per_proc_info[cpu].hwCtr.hwDSIs; + rupts->hwISIs = per_proc_info[cpu].hwCtr.hwISIs; + rupts->hwExternals = per_proc_info[cpu].hwCtr.hwExternals; + rupts->hwAlignments = per_proc_info[cpu].hwCtr.hwAlignments; + rupts->hwPrograms = per_proc_info[cpu].hwCtr.hwPrograms; + rupts->hwFloatPointUnavailable = per_proc_info[cpu].hwCtr.hwFloatPointUnavailable; + rupts->hwDecrementers = per_proc_info[cpu].hwCtr.hwDecrementers; + rupts->hwIOErrors = per_proc_info[cpu].hwCtr.hwIOErrors; + rupts->hwSystemCalls = per_proc_info[cpu].hwCtr.hwSystemCalls; + rupts->hwTraces = per_proc_info[cpu].hwCtr.hwTraces; + rupts->hwFloatingPointAssists = per_proc_info[cpu].hwCtr.hwFloatingPointAssists; + rupts->hwPerformanceMonitors = per_proc_info[cpu].hwCtr.hwPerformanceMonitors; + rupts->hwAltivecs = per_proc_info[cpu].hwCtr.hwAltivecs; + rupts->hwInstBreakpoints = per_proc_info[cpu].hwCtr.hwInstBreakpoints; + rupts->hwSystemManagements = per_proc_info[cpu].hwCtr.hwSystemManagements; + rupts->hwAltivecAssists = per_proc_info[cpu].hwCtr.hwAltivecAssists; + rupts->hwThermal = per_proc_info[cpu].hwCtr.hwThermal; + rupts->hwSoftPatches = per_proc_info[cpu].hwCtr.hwSoftPatches; + rupts->hwMaintenances = per_proc_info[cpu].hwCtr.hwMaintenances; + rupts->hwInstrumentations = per_proc_info[cpu].hwCtr.hwInstrumentations; + + ml_set_interrupts_enabled(oldlevel); + return KERN_SUCCESS; + } else { + return KERN_FAILURE; + } +} + +__private_extern__ +kern_return_t chudxnu_clear_cpu_rupt_counters(int cpu) +{ + if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument + return KERN_FAILURE; + } + + bzero(&(per_proc_info[cpu].hwCtr), sizeof(struct hwCtrs)); + return KERN_SUCCESS; +} + +__private_extern__ +kern_return_t chudxnu_passup_alignment_exceptions(boolean_t enable) +{ + if(enable) { + dgWork.dgFlags |= enaNotifyEM; + } else { + dgWork.dgFlags &= ~enaNotifyEM; + } +} diff --git a/osfmk/ppc/chud/chud_cpu_asm.h b/osfmk/ppc/chud/chud_cpu_asm.h new file mode 100644 index 000000000..8f0ec5f36 --- /dev/null +++ b/osfmk/ppc/chud/chud_cpu_asm.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _CHUD_CPU_ASM_H_ +#define _CHUD_CPU_ASM_H_ + +void chudxnu_mfsrr0_64(uint64_t *val); +void chudxnu_mfsrr1_64(uint64_t *val); +void chudxnu_mfdar_64(uint64_t *val); +void chudxnu_mfsdr1_64(uint64_t *val); +void chudxnu_mfsprg0_64(uint64_t *val); +void chudxnu_mfsprg1_64(uint64_t *val); +void chudxnu_mfsprg2_64(uint64_t *val); +void chudxnu_mfsprg3_64(uint64_t *val); +void chudxnu_mfasr_64(uint64_t *val); +void chudxnu_mfdabr_64(uint64_t *val); +void chudxnu_mfhid0_64(uint64_t *val); +void chudxnu_mfhid1_64(uint64_t *val); +void chudxnu_mfhid4_64(uint64_t *val); +void chudxnu_mfhid5_64(uint64_t *val); +void chudxnu_mfmmcr0_64(uint64_t *val); +void chudxnu_mfmmcr1_64(uint64_t *val); +void chudxnu_mfmmcra_64(uint64_t *val); +void chudxnu_mfsiar_64(uint64_t *val); +void chudxnu_mfsdar_64(uint64_t *val); +void chudxnu_mfimc_64(uint64_t *val); +void chudxnu_mfrmor_64(uint64_t *val); +void chudxnu_mfhrmor_64(uint64_t *val); +void chudxnu_mfhior_64(uint64_t *val); +void chudxnu_mflpidr_64(uint64_t *val); +void chudxnu_mflpcr_64(uint64_t *val); +void chudxnu_mfdabrx_64(uint64_t *val); +void chudxnu_mfhsprg0_64(uint64_t *val); +void chudxnu_mfhsprg1_64(uint64_t *val); +void chudxnu_mfhsrr0_64(uint64_t *val); +void chudxnu_mfhsrr1_64(uint64_t *val); +void chudxnu_mfhdec_64(uint64_t *val); +void chudxnu_mftrig0_64(uint64_t *val); +void chudxnu_mftrig1_64(uint64_t *val); +void chudxnu_mftrig2_64(uint64_t *val); +void chudxnu_mfaccr_64(uint64_t *val); +void chudxnu_mfscomc_64(uint64_t *val); +void chudxnu_mfscomd_64(uint64_t *val); +void chudxnu_mfmsr_64(uint64_t *val); + +void chudxnu_mtsrr0_64(uint64_t *val); +void chudxnu_mtsrr1_64(uint64_t *val); +void chudxnu_mtdar_64(uint64_t *val); +void chudxnu_mtsdr1_64(uint64_t *val); +void chudxnu_mtsprg0_64(uint64_t *val); +void chudxnu_mtsprg1_64(uint64_t *val); +void chudxnu_mtsprg2_64(uint64_t *val); +void chudxnu_mtsprg3_64(uint64_t *val); +void chudxnu_mtasr_64(uint64_t *val); +void chudxnu_mtdabr_64(uint64_t *val); +void chudxnu_mthid0_64(uint64_t *val); +void chudxnu_mthid1_64(uint64_t *val); +void chudxnu_mthid4_64(uint64_t *val); +void chudxnu_mthid5_64(uint64_t *val); +void chudxnu_mtmmcr0_64(uint64_t *val); +void chudxnu_mtmmcr1_64(uint64_t *val); +void chudxnu_mtmmcra_64(uint64_t *val); +void chudxnu_mtsiar_64(uint64_t *val); +void chudxnu_mtsdar_64(uint64_t *val); +void chudxnu_mtimc_64(uint64_t *val); +void chudxnu_mtrmor_64(uint64_t *val); +void chudxnu_mthrmor_64(uint64_t *val); +void chudxnu_mthior_64(uint64_t *val); +void chudxnu_mtlpidr_64(uint64_t *val); +void chudxnu_mtlpcr_64(uint64_t *val); +void chudxnu_mtdabrx_64(uint64_t *val); +void chudxnu_mthsprg0_64(uint64_t *val); +void chudxnu_mthsprg1_64(uint64_t *val); +void chudxnu_mthsrr0_64(uint64_t *val); +void chudxnu_mthsrr1_64(uint64_t *val); +void chudxnu_mthdec_64(uint64_t *val); +void chudxnu_mttrig0_64(uint64_t *val); +void chudxnu_mttrig1_64(uint64_t *val); +void chudxnu_mttrig2_64(uint64_t *val); +void chudxnu_mtaccr_64(uint64_t *val); +void chudxnu_mtscomc_64(uint64_t *val); +void chudxnu_mtscomd_64(uint64_t *val); +void chudxnu_mtmsr_64(uint64_t *val); + +#endif // _CHUD_CPU_ASM_H_ diff --git a/osfmk/ppc/chud/chud_cpu_asm.s b/osfmk/ppc/chud/chud_cpu_asm.s new file mode 100644 index 000000000..6d309bbeb --- /dev/null +++ b/osfmk/ppc/chud/chud_cpu_asm.s @@ -0,0 +1,573 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define ASSEMBLER +#include +#include +#include + + .text + .align 5 + .globl EXT(chudxnu_mfsrr0_64) +EXT(chudxnu_mfsrr0_64): + mfspr r5,chud_ppc_srr0 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfsrr1_64) +EXT(chudxnu_mfsrr1_64): + mfspr r5,chud_ppc_srr1 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfdar_64) +EXT(chudxnu_mfdar_64): + mfspr r5,chud_ppc_dar + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfsdr1_64) +EXT(chudxnu_mfsdr1_64): + mfspr r5,chud_ppc_sdr1 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfsprg0_64) +EXT(chudxnu_mfsprg0_64): + mfspr r5,chud_ppc_sprg0 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfsprg1_64) +EXT(chudxnu_mfsprg1_64): + mfspr r5,chud_ppc_sprg1 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfsprg2_64) +EXT(chudxnu_mfsprg2_64): + mfspr r5,chud_ppc_sprg2 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfsprg3_64) +EXT(chudxnu_mfsprg3_64): + mfspr r5,chud_ppc_sprg3 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfasr_64) +EXT(chudxnu_mfasr_64): + mfspr r5,chud_ppc64_asr + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfdabr_64) +EXT(chudxnu_mfdabr_64): + mfspr r5,chud_ppc_dabr + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhid0_64) +EXT(chudxnu_mfhid0_64): + mfspr r5,chud_970_hid0 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhid1_64) +EXT(chudxnu_mfhid1_64): + mfspr r5,chud_970_hid1 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhid4_64) +EXT(chudxnu_mfhid4_64): + mfspr r5,chud_970_hid4 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhid5_64) +EXT(chudxnu_mfhid5_64): + mfspr r5,chud_970_hid5 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfmmcr0_64) +EXT(chudxnu_mfmmcr0_64): + mfspr r5,chud_970_mmcr0 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfmmcr1_64) +EXT(chudxnu_mfmmcr1_64): + mfspr r5,chud_970_mmcr1 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfmmcra_64) +EXT(chudxnu_mfmmcra_64): + mfspr r5,chud_970_mmcra + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfsiar_64) +EXT(chudxnu_mfsiar_64): + mfspr r5,chud_970_siar + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfsdar_64) +EXT(chudxnu_mfsdar_64): + mfspr r5,chud_970_sdar + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfimc_64) +EXT(chudxnu_mfimc_64): + mfspr r5,chud_970_imc + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfrmor_64) +EXT(chudxnu_mfrmor_64): + mfspr r5,chud_970_rmor + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhrmor_64) +EXT(chudxnu_mfhrmor_64): + mfspr r5,chud_970_hrmor + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhior_64) +EXT(chudxnu_mfhior_64): + mfspr r5,chud_970_hior + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mflpidr_64) +EXT(chudxnu_mflpidr_64): + mfspr r5,chud_970_lpidr + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mflpcr_64) +EXT(chudxnu_mflpcr_64): + mfspr r5,chud_970_lpcr + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfdabrx_64) +EXT(chudxnu_mfdabrx_64): + mfspr r5,chud_970_dabrx + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhsprg0_64) +EXT(chudxnu_mfhsprg0_64): + mfspr r5,chud_970_hsprg0 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhsprg1_64) +EXT(chudxnu_mfhsprg1_64): + mfspr r5,chud_970_hsprg1 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhsrr0_64) +EXT(chudxnu_mfhsrr0_64): + mfspr r5,chud_970_hsrr0 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhsrr1_64) +EXT(chudxnu_mfhsrr1_64): + mfspr r5,chud_970_hsrr1 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfhdec_64) +EXT(chudxnu_mfhdec_64): + mfspr r5,chud_970_hdec + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mftrig0_64) +EXT(chudxnu_mftrig0_64): + mfspr r5,chud_970_trig0 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mftrig1_64) +EXT(chudxnu_mftrig1_64): + mfspr r5,chud_970_trig1 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mftrig2_64) +EXT(chudxnu_mftrig2_64): + mfspr r5,chud_970_trig2 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfaccr_64) +EXT(chudxnu_mfaccr_64): + mfspr r5,chud_ppc64_accr + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfscomc_64) +EXT(chudxnu_mfscomc_64): + mfspr r5,chud_970_scomc + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mfscomd_64) +EXT(chudxnu_mfscomd_64): + mfspr r5,chud_970_scomd + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mtsrr0_64) +EXT(chudxnu_mtsrr0_64): + ld r5,0(r4) + mtspr chud_ppc_srr0,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtsrr1_64) +EXT(chudxnu_mtsrr1_64): + ld r5,0(r4) + mtspr chud_ppc_srr1,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtdar_64) +EXT(chudxnu_mtdar_64): + ld r5,0(r4) + mtspr chud_ppc_dar,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtsdr1_64) +EXT(chudxnu_mtsdr1_64): + ld r5,0(r4) + mtspr chud_ppc_sdr1,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtsprg0_64) +EXT(chudxnu_mtsprg0_64): + ld r5,0(r4) + mtspr chud_ppc_sprg0,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtsprg1_64) +EXT(chudxnu_mtsprg1_64): + ld r5,0(r4) + mtspr chud_ppc_sprg1,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtsprg2_64) +EXT(chudxnu_mtsprg2_64): + ld r5,0(r4) + mtspr chud_ppc_sprg2,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtsprg3_64) +EXT(chudxnu_mtsprg3_64): + ld r5,0(r4) + mtspr chud_ppc_sprg3,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtasr_64) +EXT(chudxnu_mtasr_64): + ld r5,0(r4) + mtspr chud_ppc64_asr,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtdabr_64) +EXT(chudxnu_mtdabr_64): + ld r5,0(r4) + mtspr chud_ppc_dabr,r5 + blr + + .align 5 + .globl EXT(chudxnu_mthid0_64) +EXT(chudxnu_mthid0_64): + ld r5,0(r4) + sync + mtspr chud_970_hid0,r5 + mfspr r5,chud_970_hid0 /* syncronization requirements */ + mfspr r5,chud_970_hid0 + mfspr r5,chud_970_hid0 + mfspr r5,chud_970_hid0 + mfspr r5,chud_970_hid0 + mfspr r5,chud_970_hid0 + blr + + .align 5 + .globl EXT(chudxnu_mthid1_64) +EXT(chudxnu_mthid1_64): + ld r5,0(r4) + mtspr chud_970_hid1,r5 /* tell you twice */ + mtspr chud_970_hid1,r5 + isync + blr + + .align 5 + .globl EXT(chudxnu_mthid4_64) +EXT(chudxnu_mthid4_64): + ld r5,0(r4) + sync /* syncronization requirements */ + mtspr chud_970_hid4,r5 + isync + blr + + .align 5 + .globl EXT(chudxnu_mthid5_64) +EXT(chudxnu_mthid5_64): + ld r5,0(r4) + mtspr chud_970_hid5,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtmmcr0_64) +EXT(chudxnu_mtmmcr0_64): + ld r5,0(r4) + mtspr chud_970_mmcr0,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtmmcr1_64) +EXT(chudxnu_mtmmcr1_64): + ld r5,0(r4) + mtspr chud_970_mmcr1,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtmmcra_64) +EXT(chudxnu_mtmmcra_64): + ld r5,0(r4) + mtspr chud_970_mmcra,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtsiar_64) +EXT(chudxnu_mtsiar_64): + ld r5,0(r4) + mtspr chud_970_siar,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtsdar_64) +EXT(chudxnu_mtsdar_64): + ld r5,0(r4) + mtspr chud_970_sdar,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtimc_64) +EXT(chudxnu_mtimc_64): + ld r5,0(r4) + mtspr chud_970_imc,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtrmor_64) +EXT(chudxnu_mtrmor_64): + ld r5,0(r4) + mtspr chud_970_rmor,r5 + blr + + .align 5 + .globl EXT(chudxnu_mthrmor_64) +EXT(chudxnu_mthrmor_64): + ld r5,0(r4) + mtspr chud_970_hrmor,r5 + blr + + .align 5 + .globl EXT(chudxnu_mthior_64) +EXT(chudxnu_mthior_64): + ld r5,0(r4) + mtspr chud_970_hior,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtlpidr_64) +EXT(chudxnu_mtlpidr_64): + ld r5,0(r4) + mtspr chud_970_lpidr,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtlpcr_64) +EXT(chudxnu_mtlpcr_64): + ld r5,0(r4) + mtspr chud_970_lpcr,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtdabrx_64) +EXT(chudxnu_mtdabrx_64): + ld r5,0(r4) + mtspr chud_970_lpcr,r5 + blr + + .align 5 + .globl EXT(chudxnu_mthsprg0_64) +EXT(chudxnu_mthsprg0_64): + ld r5,0(r4) + mtspr chud_970_hsprg0,r5 + blr + + .align 5 + .globl EXT(chudxnu_mthsprg1_64) +EXT(chudxnu_mthsprg1_64): + ld r5,0(r4) + mtspr chud_970_hsprg1,r5 + blr + + .align 5 + .globl EXT(chudxnu_mthsrr0_64) +EXT(chudxnu_mthsrr0_64): + ld r5,0(r4) + mtspr chud_970_hsrr0,r5 + blr + + .align 5 + .globl EXT(chudxnu_mthsrr1_64) +EXT(chudxnu_mthsrr1_64): + ld r5,0(r4) + mtspr chud_970_hsrr1,r5 + blr + + .align 5 + .globl EXT(chudxnu_mthdec_64) +EXT(chudxnu_mthdec_64): + ld r5,0(r4) + mtspr chud_970_hdec,r5 + blr + + .align 5 + .globl EXT(chudxnu_mttrig0_64) +EXT(chudxnu_mttrig0_64): + ld r5,0(r4) + mtspr chud_970_trig0,r5 + blr + + .align 5 + .globl EXT(chudxnu_mttrig1_64) +EXT(chudxnu_mttrig1_64): + ld r5,0(r4) + mtspr chud_970_trig1,r5 + blr + + .align 5 + .globl EXT(chudxnu_mttrig2_64) +EXT(chudxnu_mttrig2_64): + ld r5,0(r4) + mtspr chud_970_trig2,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtaccr_64) +EXT(chudxnu_mtaccr_64): + ld r5,0(r4) + mtspr chud_ppc64_accr,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtscomc_64) +EXT(chudxnu_mtscomc_64): + ld r5,0(r4) + mtspr chud_970_scomc,r5 + blr + + .align 5 + .globl EXT(chudxnu_mtscomd_64) +EXT(chudxnu_mtscomd_64): + ld r5,0(r4) + mtspr chud_970_scomd,r5 + + .align 5 + .globl EXT(chudxnu_mfmsr_64) +EXT(chudxnu_mfmsr_64): + mfmsr r5 + std r5,0(r3) + blr + + .align 5 + .globl EXT(chudxnu_mtmsr_64) +EXT(chudxnu_mtmsr_64): + ld r5,0(r3) + mtmsrd r5 + blr + diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.h b/osfmk/ppc/chud/chud_glue.c similarity index 93% rename from libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.h rename to osfmk/ppc/chud/chud_glue.c index ddc518f3d..eed5f7dea 100644 --- a/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.h +++ b/osfmk/ppc/chud/chud_glue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,3 +22,4 @@ * * @APPLE_LICENSE_HEADER_END@ */ + diff --git a/iokit/IOKit/adb/IOADBLib.h b/osfmk/ppc/chud/chud_memory.c similarity index 60% rename from iokit/IOKit/adb/IOADBLib.h rename to osfmk/ppc/chud/chud_memory.c index 1e50fb6fb..e034514ff 100644 --- a/iokit/IOKit/adb/IOADBLib.h +++ b/osfmk/ppc/chud/chud_memory.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -23,24 +23,31 @@ * @APPLE_LICENSE_HEADER_END@ */ +#include +#include -#define kNumADBMethods 4 +__private_extern__ +uint64_t chudxnu_avail_memory_size(void) +{ + extern vm_size_t mem_size; + return mem_size; +} -enum { - kADBReadDevice = 0, - kADBWriteDevice, - kADBClaimDevice, - kADBReleaseDevice -}; +__private_extern__ +uint64_t chudxnu_phys_memory_size(void) +{ + extern uint64_t mem_actual; + return mem_actual; +} -#ifndef KERNEL +__private_extern__ +vm_offset_t chudxnu_io_map(uint64_t phys_addr, vm_size_t size) +{ + return ml_io_map(phys_addr, size); // XXXXX limited to first 2GB XXXXX +} -#include - -io_connect_t IOPMFindADBController( mach_port_t ); -IOReturn IOPMClaimADBDevice ( io_connect_t, unsigned long ); -IOReturn IOPMReleaseADBDevice ( io_connect_t, unsigned long ); -IOReturn IOPMReadADBDevice ( io_connect_t, unsigned long, unsigned long, unsigned char *, unsigned long * ); -IOReturn IOPMWriteADBDevice ( io_connect_t, unsigned long, unsigned long, unsigned char *, unsigned long ); - -#endif +__private_extern__ +uint32_t chudxnu_phys_addr_wimg(uint64_t phys_addr) +{ + return IODefaultCacheBits(phys_addr); +} diff --git a/osfmk/ppc/chud/chud_osfmk_callback.c b/osfmk/ppc/chud/chud_osfmk_callback.c new file mode 100644 index 000000000..47c4a69a4 --- /dev/null +++ b/osfmk/ppc/chud/chud_osfmk_callback.c @@ -0,0 +1,421 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +extern kern_return_t chud_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv); +extern kern_return_t chud_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count); + +__private_extern__ +void chudxnu_cancel_all_callbacks(void) +{ + extern void chudxnu_exit_callback_cancel(void); + extern void chudxnu_thread_timer_callback_cancel(void); + + chudxnu_cpu_timer_callback_cancel_all(); + chudxnu_trap_callback_cancel(); + chudxnu_interrupt_callback_cancel(); + chudxnu_perfmon_ast_callback_cancel(); + chudxnu_cpusig_callback_cancel(); + chudxnu_kdebug_callback_cancel(); + chudxnu_exit_callback_cancel(); + chudxnu_thread_timer_callback_cancel(); +} + +#pragma mark **** cpu timer **** +static timer_call_data_t cpu_timer_call[NCPUS] = {{0}, {0}}; +static uint64_t t_deadline[NCPUS] = {0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL}; + +typedef void (*chudxnu_cpu_timer_callback_func_t)(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count); +static chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn[NCPUS] = {NULL, NULL}; + +static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1) +{ + int cpu; + boolean_t oldlevel; + struct ppc_thread_state64 state; + mach_msg_type_number_t count; + + oldlevel = ml_set_interrupts_enabled(FALSE); + cpu = cpu_number(); + + count = PPC_THREAD_STATE64_COUNT; + if(chudxnu_thread_get_state(current_act(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) { + if(cpu_timer_callback_fn[cpu]) { + (cpu_timer_callback_fn[cpu])(PPC_THREAD_STATE64, (thread_state_t)&state, count); + } + } + + ml_set_interrupts_enabled(oldlevel); +} + +__private_extern__ +kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units) +{ + int cpu; + boolean_t oldlevel; + + oldlevel = ml_set_interrupts_enabled(FALSE); + cpu = cpu_number(); + + timer_call_cancel(&(cpu_timer_call[cpu])); // cancel any existing callback for this cpu + + cpu_timer_callback_fn[cpu] = func; + + clock_interval_to_deadline(time, units, &(t_deadline[cpu])); + timer_call_setup(&(cpu_timer_call[cpu]), chudxnu_private_cpu_timer_callback, NULL); + timer_call_enter(&(cpu_timer_call[cpu]), t_deadline[cpu]); + + ml_set_interrupts_enabled(oldlevel); + return KERN_SUCCESS; +} + +__private_extern__ +kern_return_t chudxnu_cpu_timer_callback_cancel(void) +{ + int cpu; + boolean_t oldlevel; + + oldlevel = ml_set_interrupts_enabled(FALSE); + cpu = cpu_number(); + + timer_call_cancel(&(cpu_timer_call[cpu])); + t_deadline[cpu] = t_deadline[cpu] | ~(t_deadline[cpu]); // set to max value + cpu_timer_callback_fn[cpu] = NULL; + + ml_set_interrupts_enabled(oldlevel); + return KERN_SUCCESS; +} + +__private_extern__ +kern_return_t chudxnu_cpu_timer_callback_cancel_all(void) +{ + int cpu; + + for(cpu=0; cpu=16) { + retval = KERN_FAILURE; + } else { + retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */ + if(!retval) { + retval = KERN_FAILURE; + } else { + retval = KERN_SUCCESS; + } + } + } else { + retval = KERN_INVALID_ARGUMENT; + } + + ml_set_interrupts_enabled(oldlevel); + return retval; +} + +#pragma mark **** thread timer **** + +static thread_call_t thread_timer_call = NULL; + +typedef void (*chudxnu_thread_timer_callback_func_t)(uint32_t arg); +static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn = NULL; + +static void chudxnu_private_thread_timer_callback(thread_call_param_t param0, thread_call_param_t param1) +{ + if(thread_timer_call) { + thread_call_free(thread_timer_call); + thread_timer_call = NULL; + + if(thread_timer_callback_fn) { + (thread_timer_callback_fn)((uint32_t)param0); + } + } +} + +__private_extern__ +kern_return_t chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func, uint32_t arg, uint32_t time, uint32_t units) +{ + if(!thread_timer_call) { + uint64_t t_delay; + thread_timer_callback_fn = func; + thread_timer_call = thread_call_allocate((thread_call_func_t)chudxnu_private_thread_timer_callback, (thread_call_param_t)arg); + clock_interval_to_deadline(time, units, &t_delay); + thread_call_enter_delayed(thread_timer_call, t_delay); + return KERN_SUCCESS; + } else { + return KERN_FAILURE; // thread timer call already pending + } +} + +__private_extern__ +kern_return_t chudxnu_thread_timer_callback_cancel(void) +{ + if(thread_timer_call) { + thread_call_free(thread_timer_call); + thread_timer_call = NULL; + } + thread_timer_callback_fn = NULL; + return KERN_SUCCESS; +} diff --git a/osfmk/ppc/chud/chud_spr.h b/osfmk/ppc/chud/chud_spr.h new file mode 100644 index 000000000..c1224f30a --- /dev/null +++ b/osfmk/ppc/chud/chud_spr.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _CHUD_SPR_H_ +#define _CHUD_SPR_H_ + +/* PPC SPRs - 32-bit and 64-bit implementations */ +#define chud_ppc_srr0 26 +#define chud_ppc_srr1 27 +#define chud_ppc_dsisr 18 +#define chud_ppc_dar 19 +#define chud_ppc_dec 22 +#define chud_ppc_sdr1 25 +#define chud_ppc_sprg0 272 +#define chud_ppc_sprg1 273 +#define chud_ppc_sprg2 274 +#define chud_ppc_sprg3 275 +#define chud_ppc_ear 282 +#define chud_ppc_tbl 284 +#define chud_ppc_tbu 285 +#define chud_ppc_pvr 287 +#define chud_ppc_ibat0u 528 +#define chud_ppc_ibat0l 529 +#define chud_ppc_ibat1u 530 +#define chud_ppc_ibat1l 531 +#define chud_ppc_ibat2u 532 +#define chud_ppc_ibat2l 533 +#define chud_ppc_ibat3u 534 +#define chud_ppc_ibat3l 535 +#define chud_ppc_dbat0u 536 +#define chud_ppc_dbat0l 537 +#define chud_ppc_dbat1u 538 +#define chud_ppc_dbat1l 539 +#define chud_ppc_dbat2u 540 +#define chud_ppc_dbat2l 541 +#define chud_ppc_dbat3u 542 +#define chud_ppc_dbat3l 543 +#define chud_ppc_dabr 1013 +#define chud_ppc_msr 10000 /* FAKE */ + +/* PPC SPRs - 32-bit implementations */ +#define chud_ppc32_sr0 20000 /* FAKE */ +#define chud_ppc32_sr1 20001 /* FAKE */ +#define chud_ppc32_sr2 20002 /* FAKE */ +#define chud_ppc32_sr3 20003 /* FAKE */ +#define chud_ppc32_sr4 20004 /* FAKE */ +#define chud_ppc32_sr5 20005 /* FAKE */ +#define chud_ppc32_sr6 20006 /* FAKE */ +#define chud_ppc32_sr7 20007 /* FAKE */ +#define chud_ppc32_sr8 20008 /* FAKE */ +#define chud_ppc32_sr9 20009 /* FAKE */ +#define chud_ppc32_sr10 20010 /* FAKE */ +#define chud_ppc32_sr11 20011 /* FAKE */ +#define chud_ppc32_sr12 20012 /* FAKE */ +#define chud_ppc32_sr13 20013 /* FAKE */ +#define chud_ppc32_sr14 20014 /* FAKE */ +#define chud_ppc32_sr15 20015 /* FAKE */ + +/* PPC SPRs - 64-bit implementations */ +#define chud_ppc64_asr 280 + +/* PPC SPRs - 750/750CX/750CXe/750FX Specific */ +#define chud_750_upmc1 937 +#define chud_750_upmc2 938 +#define chud_750_upmc3 941 +#define chud_750_upmc4 942 +#define chud_750_mmcr0 952 +#define chud_750_pmc1 953 +#define chud_750_pmc2 954 +#define chud_750_sia 955 +#define chud_750_mmcr1 956 +#define chud_750_pmc3 957 +#define chud_750_pmc4 958 +#define chud_750_hid0 1008 +#define chud_750_hid1 1009 +#define chud_750_iabr 1010 +#define chud_750_l2cr 1017 +#define chud_750_ictc 1019 +#define chud_750_thrm1 1020 +#define chud_750_thrm2 1021 +#define chud_750_thrm3 1022 +#define chud_750fx_ibat4u 560 /* 750FX only */ +#define chud_750fx_ibat4l 561 /* 750FX only */ +#define chud_750fx_ibat5u 562 /* 750FX only */ +#define chud_750fx_ibat5l 563 /* 750FX only */ +#define chud_750fx_ibat6u 564 /* 750FX only */ +#define chud_750fx_ibat6l 565 /* 750FX only */ +#define chud_750fx_ibat7u 566 /* 750FX only */ +#define chud_750fx_ibat7l 567 /* 750FX only */ +#define chud_750fx_dbat4u 568 /* 750FX only */ +#define chud_750fx_dbat4l 569 /* 750FX only */ +#define chud_750fx_dbat5u 570 /* 750FX only */ +#define chud_750fx_dbat5l 571 /* 750FX only */ +#define chud_750fx_dbat6u 572 /* 750FX only */ +#define chud_750fx_dbat6l 573 /* 750FX only */ +#define chud_750fx_dbat7u 574 /* 750FX only */ +#define chud_750fx_dbat7l 575 /* 750FX only */ +#define chud_750fx_hid2 1016 /* 750FX only */ + +/* PPC SPRs - 7400/7410 Specific */ +#define chud_7400_upmc1 937 +#define chud_7400_upmc2 938 +#define chud_7400_upmc3 941 +#define chud_7400_upmc4 942 +#define chud_7400_mmcr2 944 +#define chud_7400_bamr 951 +#define chud_7400_mmcr0 952 +#define chud_7400_pmc1 953 +#define chud_7400_pmc2 954 +#define chud_7400_siar 955 +#define chud_7400_mmcr1 956 +#define chud_7400_pmc3 957 +#define chud_7400_pmc4 958 +#define chud_7400_sda 959 +#define chud_7400_hid0 1008 +#define chud_7400_hid1 1009 +#define chud_7400_iabr 1010 +#define chud_7400_msscr0 1014 +#define chud_7410_l2pmcr 1016 /* 7410 only */ +#define chud_7400_l2cr 1017 +#define chud_7400_ictc 1019 +#define chud_7400_thrm1 1020 +#define chud_7400_thrm2 1021 +#define chud_7400_thrm3 1022 +#define chud_7400_pir 1023 + +/* PPC SPRs - 7450/7455 Specific */ +#define chud_7455_sprg4 276 /* 7455 only */ +#define chud_7455_sprg5 277 /* 7455 only */ +#define chud_7455_sprg6 278 /* 7455 only */ +#define chud_7455_sprg7 279 /* 7455 only */ +#define chud_7455_ibat4u 560 /* 7455 only */ +#define chud_7455_ibat4l 561 /* 7455 only */ +#define chud_7455_ibat5u 562 /* 7455 only */ +#define chud_7455_ibat5l 563 /* 7455 only */ +#define chud_7455_ibat6u 564 /* 7455 only */ +#define chud_7455_ibat6l 565 /* 7455 only */ +#define chud_7455_ibat7u 566 /* 7455 only */ +#define chud_7455_ibat7l 567 /* 7455 only */ +#define chud_7455_dbat4u 568 /* 7455 only */ +#define chud_7455_dbat4l 569 /* 7455 only */ +#define chud_7455_dbat5u 570 /* 7455 only */ +#define chud_7455_dbat5l 571 /* 7455 only */ +#define chud_7455_dbat6u 572 /* 7455 only */ +#define chud_7455_dbat6l 573 /* 7455 only */ +#define chud_7455_dbat7u 574 /* 7455 only */ +#define chud_7455_dbat7l 575 /* 7455 only */ +#define chud_7450_upmc5 929 +#define chud_7450_upmc6 930 +#define chud_7450_upmc1 937 +#define chud_7450_upmc2 938 +#define chud_7450_upmc3 941 +#define chud_7450_upmc4 942 +#define chud_7450_mmcr2 944 +#define chud_7450_pmc5 945 +#define chud_7450_pmc6 946 +#define chud_7450_bamr 951 +#define chud_7450_mmcr0 952 +#define chud_7450_pmc1 953 +#define chud_7450_pmc2 954 +#define chud_7450_siar 955 +#define chud_7450_mmcr1 956 +#define chud_7450_pmc3 957 +#define chud_7450_pmc4 958 +#define chud_7450_tlbmiss 980 +#define chud_7450_ptehi 981 +#define chud_7450_ptelo 982 +#define chud_7450_l3pm 983 +#define chud_7450_hid0 1008 +#define chud_7450_hid1 1009 +#define chud_7450_iabr 1010 +#define chud_7450_ldstdb 1012 +#define chud_7450_msscr0 1014 +#define chud_7450_msssr0 1015 +#define chud_7450_ldstcr 1016 +#define chud_7450_l2cr 1017 +#define chud_7450_l3cr 1018 +#define chud_7450_ictc 1019 +#define chud_7450_ictrl 1011 +#define chud_7450_thrm1 1020 +#define chud_7450_thrm2 1021 +#define chud_7450_thrm3 1022 +#define chud_7450_pir 1023 + +/* PPC SPRs - 970 Specific */ +#define chud_970_vrsave 256 +#define chud_970_ummcra 770 +#define chud_970_upmc1 771 +#define chud_970_upmc2 772 +#define chud_970_upmc3 773 +#define chud_970_upmc4 774 +#define chud_970_upmc5 775 +#define chud_970_upmc6 776 +#define chud_970_upmc7 777 +#define chud_970_upmc8 778 +#define chud_970_ummcr0 779 +#define chud_970_usiar 780 +#define chud_970_usdar 781 +#define chud_970_ummcr1 782 +#define chud_970_uimc 783 +#define chud_970_mmcra 786 +#define chud_970_pmc1 787 +#define chud_970_pmc2 788 +#define chud_970_pmc3 789 +#define chud_970_pmc4 790 +#define chud_970_pmc5 791 +#define chud_970_pmc6 792 +#define chud_970_pmc7 793 +#define chud_970_pmc8 794 +#define chud_970_mmcr0 795 +#define chud_970_siar 796 +#define chud_970_sdar 797 +#define chud_970_mmcr1 798 +#define chud_970_imc 799 + +/* PPC SPRs - 7400/7410 Specific */ +#define chud_7400_msscr1 1015 + +/* PPC SPRs - 64-bit implementations */ +#define chud_ppc64_accr 29 +#define chud_ppc64_ctrl 152 + +/* PPC SPRs - 970 Specific */ +#define chud_970_scomc 276 +#define chud_970_scomd 277 +#define chud_970_hsprg0 304 +#define chud_970_hsprg1 305 +#define chud_970_hdec 310 +#define chud_970_hior 311 +#define chud_970_rmor 312 +#define chud_970_hrmor 313 +#define chud_970_hsrr0 314 +#define chud_970_hsrr1 315 +#define chud_970_lpcr 318 +#define chud_970_lpidr 319 +#define chud_970_trig0 976 +#define chud_970_trig1 977 +#define chud_970_trig2 978 +#define chud_970_hid0 1008 +#define chud_970_hid1 1009 +#define chud_970_hid4 1012 +#define chud_970_hid5 1014 +#define chud_970_dabrx 1015 +#define chud_970_trace 1022 +#define chud_970_pir 1023 + +#endif // _CHUD_SPR_H_ diff --git a/osfmk/ppc/chud/chud_thread.c b/osfmk/ppc/chud/chud_thread.c new file mode 100644 index 000000000..fe398f0fa --- /dev/null +++ b/osfmk/ppc/chud/chud_thread.c @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include + +__private_extern__ +kern_return_t chudxnu_bind_current_thread(int cpu) +{ + if(cpu>=0 && cpumact.pcb; // take the top savearea (user or kernel) +} + +static savearea *chudxnu_private_get_user_regs(void) +{ + return find_user_regs(current_act()); // take the top user savearea (skip any kernel saveareas) +} + +static savearea_fpu *chudxnu_private_get_fp_regs(void) +{ + fpu_save(current_act()->mact.curctx); // just in case it's live, save it + return current_act()->mact.curctx->FPUsave; // take the top savearea (user or kernel) +} + +static savearea_fpu *chudxnu_private_get_user_fp_regs(void) +{ + return find_user_fpu(current_act()); // take the top user savearea (skip any kernel saveareas) +} + +static savearea_vec *chudxnu_private_get_vec_regs(void) +{ + vec_save(current_act()->mact.curctx); // just in case it's live, save it + return current_act()->mact.curctx->VMXsave; // take the top savearea (user or kernel) +} + +static savearea_vec *chudxnu_private_get_user_vec_regs(void) +{ + return find_user_vec(current_act()); // take the top user savearea (skip any kernel saveareas) +} + +__private_extern__ +kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv) +{ + struct ppc_thread_state *ts; + struct ppc_thread_state64 *xts; + + switch(flavor) { + case PPC_THREAD_STATE: + if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */ + *count = 0; + return KERN_INVALID_ARGUMENT; + } + ts = (struct ppc_thread_state *) tstate; + if(sv) { + ts->r0 = (unsigned int)sv->save_r0; + ts->r1 = (unsigned int)sv->save_r1; + ts->r2 = (unsigned int)sv->save_r2; + ts->r3 = (unsigned int)sv->save_r3; + ts->r4 = (unsigned int)sv->save_r4; + ts->r5 = (unsigned int)sv->save_r5; + ts->r6 = (unsigned int)sv->save_r6; + ts->r7 = (unsigned int)sv->save_r7; + ts->r8 = (unsigned int)sv->save_r8; + ts->r9 = (unsigned int)sv->save_r9; + ts->r10 = (unsigned int)sv->save_r10; + ts->r11 = (unsigned int)sv->save_r11; + ts->r12 = (unsigned int)sv->save_r12; + ts->r13 = (unsigned int)sv->save_r13; + ts->r14 = (unsigned int)sv->save_r14; + ts->r15 = (unsigned int)sv->save_r15; + ts->r16 = (unsigned int)sv->save_r16; + ts->r17 = (unsigned int)sv->save_r17; + ts->r18 = (unsigned int)sv->save_r18; + ts->r19 = (unsigned int)sv->save_r19; + ts->r20 = (unsigned int)sv->save_r20; + ts->r21 = (unsigned int)sv->save_r21; + ts->r22 = (unsigned int)sv->save_r22; + ts->r23 = (unsigned int)sv->save_r23; + ts->r24 = (unsigned int)sv->save_r24; + ts->r25 = (unsigned int)sv->save_r25; + ts->r26 = (unsigned int)sv->save_r26; + ts->r27 = (unsigned int)sv->save_r27; + ts->r28 = (unsigned int)sv->save_r28; + ts->r29 = (unsigned int)sv->save_r29; + ts->r30 = (unsigned int)sv->save_r30; + ts->r31 = (unsigned int)sv->save_r31; + ts->cr = (unsigned int)sv->save_cr; + ts->xer = (unsigned int)sv->save_xer; + ts->lr = (unsigned int)sv->save_lr; + ts->ctr = (unsigned int)sv->save_ctr; + ts->srr0 = (unsigned int)sv->save_srr0; + ts->srr1 = (unsigned int)sv->save_srr1; + ts->mq = 0; + ts->vrsave = (unsigned int)sv->save_vrsave; + } else { + bzero((void *)ts, sizeof(struct ppc_thread_state)); + } + *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */ + return KERN_SUCCESS; + break; + case PPC_THREAD_STATE64: + if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */ + return KERN_INVALID_ARGUMENT; + } + xts = (struct ppc_thread_state64 *) tstate; + if(sv) { + xts->r0 = sv->save_r0; + xts->r1 = sv->save_r1; + xts->r2 = sv->save_r2; + xts->r3 = sv->save_r3; + xts->r4 = sv->save_r4; + xts->r5 = sv->save_r5; + xts->r6 = sv->save_r6; + xts->r7 = sv->save_r7; + xts->r8 = sv->save_r8; + xts->r9 = sv->save_r9; + xts->r10 = sv->save_r10; + xts->r11 = sv->save_r11; + xts->r12 = sv->save_r12; + xts->r13 = sv->save_r13; + xts->r14 = sv->save_r14; + xts->r15 = sv->save_r15; + xts->r16 = sv->save_r16; + xts->r17 = sv->save_r17; + xts->r18 = sv->save_r18; + xts->r19 = sv->save_r19; + xts->r20 = sv->save_r20; + xts->r21 = sv->save_r21; + xts->r22 = sv->save_r22; + xts->r23 = sv->save_r23; + xts->r24 = sv->save_r24; + xts->r25 = sv->save_r25; + xts->r26 = sv->save_r26; + xts->r27 = sv->save_r27; + xts->r28 = sv->save_r28; + xts->r29 = sv->save_r29; + xts->r30 = sv->save_r30; + xts->r31 = sv->save_r31; + xts->cr = sv->save_cr; + xts->xer = sv->save_xer; + xts->lr = sv->save_lr; + xts->ctr = sv->save_ctr; + xts->srr0 = sv->save_srr0; + xts->srr1 = sv->save_srr1; + xts->vrsave = sv->save_vrsave; + } else { + bzero((void *)xts, sizeof(struct ppc_thread_state64)); + } + *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */ + return KERN_SUCCESS; + break; + default: + *count = 0; + return KERN_INVALID_ARGUMENT; + break; + } +} + +__private_extern__ +kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count) +{ + struct ppc_thread_state *ts; + struct ppc_thread_state64 *xts; + + switch(flavor) { + case PPC_THREAD_STATE: + if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */ + return KERN_INVALID_ARGUMENT; + } + ts = (struct ppc_thread_state *) tstate; + if(sv) { + sv->save_r0 = (uint64_t)ts->r0; + sv->save_r1 = (uint64_t)ts->r1; + sv->save_r2 = (uint64_t)ts->r2; + sv->save_r3 = (uint64_t)ts->r3; + sv->save_r4 = (uint64_t)ts->r4; + sv->save_r5 = (uint64_t)ts->r5; + sv->save_r6 = (uint64_t)ts->r6; + sv->save_r7 = (uint64_t)ts->r7; + sv->save_r8 = (uint64_t)ts->r8; + sv->save_r9 = (uint64_t)ts->r9; + sv->save_r10 = (uint64_t)ts->r10; + sv->save_r11 = (uint64_t)ts->r11; + sv->save_r12 = (uint64_t)ts->r12; + sv->save_r13 = (uint64_t)ts->r13; + sv->save_r14 = (uint64_t)ts->r14; + sv->save_r15 = (uint64_t)ts->r15; + sv->save_r16 = (uint64_t)ts->r16; + sv->save_r17 = (uint64_t)ts->r17; + sv->save_r18 = (uint64_t)ts->r18; + sv->save_r19 = (uint64_t)ts->r19; + sv->save_r20 = (uint64_t)ts->r20; + sv->save_r21 = (uint64_t)ts->r21; + sv->save_r22 = (uint64_t)ts->r22; + sv->save_r23 = (uint64_t)ts->r23; + sv->save_r24 = (uint64_t)ts->r24; + sv->save_r25 = (uint64_t)ts->r25; + sv->save_r26 = (uint64_t)ts->r26; + sv->save_r27 = (uint64_t)ts->r27; + sv->save_r28 = (uint64_t)ts->r28; + sv->save_r29 = (uint64_t)ts->r29; + sv->save_r30 = (uint64_t)ts->r30; + sv->save_r31 = (uint64_t)ts->r31; + sv->save_cr = ts->cr; + sv->save_xer = (uint64_t)ts->xer; + sv->save_lr = (uint64_t)ts->lr; + sv->save_ctr = (uint64_t)ts->ctr; + sv->save_srr0 = (uint64_t)ts->srr0; + sv->save_srr1 = (uint64_t)ts->srr1; + sv->save_vrsave = ts->vrsave; + return KERN_SUCCESS; + } else { + return KERN_FAILURE; + } + break; + case PPC_THREAD_STATE64: + if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */ + return KERN_INVALID_ARGUMENT; + } + xts = (struct ppc_thread_state64 *) tstate; + if(sv) { + sv->save_r0 = xts->r0; + sv->save_r1 = xts->r1; + sv->save_r2 = xts->r2; + sv->save_r3 = xts->r3; + sv->save_r4 = xts->r4; + sv->save_r5 = xts->r5; + sv->save_r6 = xts->r6; + sv->save_r7 = xts->r7; + sv->save_r8 = xts->r8; + sv->save_r9 = xts->r9; + sv->save_r10 = xts->r10; + sv->save_r11 = xts->r11; + sv->save_r12 = xts->r12; + sv->save_r13 = xts->r13; + sv->save_r14 = xts->r14; + sv->save_r15 = xts->r15; + sv->save_r16 = xts->r16; + sv->save_r17 = xts->r17; + sv->save_r18 = xts->r18; + sv->save_r19 = xts->r19; + sv->save_r20 = xts->r20; + sv->save_r21 = xts->r21; + sv->save_r22 = xts->r22; + sv->save_r23 = xts->r23; + sv->save_r24 = xts->r24; + sv->save_r25 = xts->r25; + sv->save_r26 = xts->r26; + sv->save_r27 = xts->r27; + sv->save_r28 = xts->r28; + sv->save_r29 = xts->r29; + sv->save_r30 = xts->r30; + sv->save_r31 = xts->r31; + sv->save_cr = xts->cr; + sv->save_xer = xts->xer; + sv->save_lr = xts->lr; + sv->save_ctr = xts->ctr; + sv->save_srr0 = xts->srr0; + sv->save_srr1 = xts->srr1; + sv->save_vrsave = xts->vrsave; + return KERN_SUCCESS; + } else { + return KERN_FAILURE; + } + } +} + +__private_extern__ +kern_return_t chudxnu_thread_get_state(thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count, + boolean_t user_only) +{ + if(thr_act==current_act()) { + if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { + struct savearea *sv; + if(user_only) { + sv = chudxnu_private_get_user_regs(); + } else { + sv = chudxnu_private_get_regs(); + } + return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv); + } else if(flavor==PPC_FLOAT_STATE && user_only) { +#warning chudxnu_thread_get_state() does not yet support supervisor FP + return machine_thread_get_state(current_act(), flavor, tstate, count); + } else if(flavor==PPC_VECTOR_STATE && user_only) { +#warning chudxnu_thread_get_state() does not yet support supervisor VMX + return machine_thread_get_state(current_act(), flavor, tstate, count); + } else { + *count = 0; + return KERN_INVALID_ARGUMENT; + } + } else { + return machine_thread_get_state(thr_act, flavor, tstate, count); + } +} + +__private_extern__ +kern_return_t chudxnu_thread_set_state(thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t count, + boolean_t user_only) +{ + if(thr_act==current_act()) { + if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { + struct savearea *sv; + if(user_only) { + sv = chudxnu_private_get_user_regs(); + } else { + sv = chudxnu_private_get_regs(); + } + return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count); + } else if(flavor==PPC_FLOAT_STATE && user_only) { +#warning chudxnu_thread_set_state() does not yet support supervisor FP + return machine_thread_set_state(current_act(), flavor, tstate, count); + } else if(flavor==PPC_VECTOR_STATE && user_only) { +#warning chudxnu_thread_set_state() does not yet support supervisor VMX + return machine_thread_set_state(current_act(), flavor, tstate, count); + } else { + return KERN_INVALID_ARGUMENT; + } + } else { + return machine_thread_set_state(thr_act, flavor, tstate, count); + } +} + +static inline kern_return_t chudxnu_private_task_read_bytes(task_t task, vm_offset_t addr, int size, void *data) +{ + + kern_return_t ret; + + if(task==kernel_task) { + if(size==sizeof(unsigned int)) { + addr64_t phys_addr; + ppnum_t pp; + + pp = pmap_find_phys(kernel_pmap, addr); /* Get the page number */ + if(!pp) return KERN_FAILURE; /* Not mapped... */ + + phys_addr = ((addr64_t)pp << 12) | (addr & 0x0000000000000FFFULL); /* Shove in the page offset */ + + if(phys_addr < mem_actual) { /* Sanity check: is it in memory? */ + *((uint32_t *)data) = ml_phys_read_64(phys_addr); + return KERN_SUCCESS; + } + } else { + return KERN_FAILURE; + } + } else { + + ret = KERN_SUCCESS; /* Assume everything worked */ + if(copyin((void *)addr, data, size)) ret = KERN_FAILURE; /* Get memory, if non-zero rc, it didn't work */ + return ret; + } +} + +// chudxnu_current_thread_get_callstack gathers a raw callstack along with any information needed to +// fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.) +// after sampling has finished. +// +// For an N-entry callstack: +// +// [0] current pc +// [1..N-3] stack frames (including current one) +// [N-2] current LR (return value if we're in a leaf function) +// [N-1] current r0 (in case we've saved LR in r0) +// + +#define FP_LINK_OFFSET 2 +#define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned +#define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide + +#ifndef USER_MODE +#define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE) +#endif + +#ifndef SUPERVISOR_MODE +#define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE) +#endif + +#define VALID_STACK_ADDRESS(addr) (addr>=0x1000 && (addr&STACK_ALIGNMENT_MASK)==0x0 && (supervisor ? (addr>=kernStackMin && addr<=kernStackMax) : TRUE)) + +__private_extern__ +kern_return_t chudxnu_current_thread_get_callstack(uint32_t *callStack, + mach_msg_type_number_t *count, + boolean_t user_only) +{ + kern_return_t kr; + vm_address_t nextFramePointer = 0; + vm_address_t currPC, currLR, currR0; + vm_address_t framePointer; + vm_address_t prevPC = 0; + vm_address_t kernStackMin = min_valid_stack_address(); + vm_address_t kernStackMax = max_valid_stack_address(); + unsigned int *buffer = callStack; + int bufferIndex = 0; + int bufferMaxIndex = *count; + boolean_t supervisor; + struct savearea *sv; + + if(user_only) { + sv = chudxnu_private_get_user_regs(); + } else { + sv = chudxnu_private_get_regs(); + } + + if(!sv) { + *count = 0; + return KERN_FAILURE; + } + + supervisor = SUPERVISOR_MODE(sv->save_srr1); + + if(!supervisor && ml_at_interrupt_context()) { // can't do copyin() if on interrupt stack + *count = 0; + return KERN_FAILURE; + } + + bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end. + if(bufferMaxIndex<2) { + *count = 0; + return KERN_RESOURCE_SHORTAGE; + } + + currPC = sv->save_srr0; + framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */ + currLR = sv->save_lr; + currR0 = sv->save_r0; + + bufferIndex = 0; // start with a stack of size zero + buffer[bufferIndex++] = currPC; // save PC in position 0. + + // Now, fill buffer with stack backtraces. + while(bufferIndex SP + // Here, we'll get the lr from the stack. + volatile vm_address_t fp_link = (vm_address_t)(((unsigned *)framePointer)+FP_LINK_OFFSET); + + // Note that we read the pc even for the first stack frame (which, in theory, + // is always empty because the callee fills it in just before it lowers the + // stack. However, if we catch the program in between filling in the return + // address and lowering the stack, we want to still have a valid backtrace. + // FixupStack correctly disregards this value if necessary. + + if(supervisor) { + kr = chudxnu_private_task_read_bytes(kernel_task, fp_link, sizeof(unsigned int), &pc); + } else { + kr = chudxnu_private_task_read_bytes(current_task(), fp_link, sizeof(unsigned int), &pc); + } + if(kr!=KERN_SUCCESS) { + // IOLog("task_read_callstack: unable to read framePointer: %08x\n",framePointer); + pc = 0; + break; + } + + // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid + + if(supervisor) { + kr = chudxnu_private_task_read_bytes(kernel_task, framePointer, sizeof(unsigned int), &nextFramePointer); + } else { + kr = chudxnu_private_task_read_bytes(current_task(), framePointer, sizeof(unsigned int), &nextFramePointer); + } + if(kr!=KERN_SUCCESS) { + nextFramePointer = 0; + } + + if(nextFramePointer) { + buffer[bufferIndex++] = pc; + prevPC = pc; + } + + if(nextFramePointer=bufferMaxIndex) { + *count = 0; + return KERN_RESOURCE_SHORTAGE; + } + + // Save link register and R0 at bottom of stack. This means that we won't worry + // about these values messing up stack compression. These end up being used + // by FixupStack. + buffer[bufferIndex++] = currLR; + buffer[bufferIndex++] = currR0; + + *count = bufferIndex; + return KERN_SUCCESS; +} + +__private_extern__ +int chudxnu_task_threads(task_t task, + thread_act_array_t *thr_act_list, + mach_msg_type_number_t *count) +{ + mach_msg_type_number_t task_thread_count = 0; + kern_return_t kr; + + kr = task_threads(current_task(), thr_act_list, count); + if(kr==KERN_SUCCESS) { + thread_act_t thr_act; + int i, state_count; + for(i=0; i<(*count); i++) { + thr_act = convert_port_to_act(((ipc_port_t *)(*thr_act_list))[i]); + /* undo the mig conversion task_threads does */ + thr_act_list[i] = thr_act; + } + } + return kr; +} + +__private_extern__ +thread_act_t chudxnu_current_act(void) +{ + return current_act(); +} + +__private_extern__ +task_t chudxnu_current_task(void) +{ + return current_task(); +} + +__private_extern__ +kern_return_t chudxnu_thread_info(thread_act_t thr_act, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count) +{ + return thread_info(thr_act, flavor, thread_info_out, thread_info_count); +} diff --git a/osfmk/ppc/chud/chud_xnu.h b/osfmk/ppc/chud/chud_xnu.h new file mode 100644 index 000000000..a7a0dcd1f --- /dev/null +++ b/osfmk/ppc/chud/chud_xnu.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _PPC_CHUD_XNU_H_ +#define _PPC_CHUD_XNU_H_ + + +#include +#include +#include + +#pragma mark **** process **** +// ******************************************************************************** +// process +// ******************************************************************************** +int chudxnu_pid_for_task(task_t task); +task_t chudxnu_task_for_pid(int pid); +int chudxnu_current_pid(void); + +#pragma mark **** thread **** +// ******************************************************************************** +// thread +// ******************************************************************************** +kern_return_t chudxnu_bind_current_thread(int cpu); + +kern_return_t chudxnu_unbind_current_thread(void); + +kern_return_t chudxnu_thread_get_state(thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count, + boolean_t user_only); + +kern_return_t chudxnu_thread_set_state(thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t count, + boolean_t user_only); + +kern_return_t chudxnu_current_thread_get_callstack(uint32_t *callStack, + mach_msg_type_number_t *count, + boolean_t user_only); + +task_t chudxnu_current_task(void); + +thread_act_t chudxnu_current_act(void); + +int chudxnu_task_threads(task_t task, + thread_act_array_t *thr_act_list, + mach_msg_type_number_t *count); + +kern_return_t chudxnu_thread_info(thread_act_t thr_act, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count); + +#pragma mark **** memory **** +// ******************************************************************************** +// memory +// ******************************************************************************** + +uint64_t chudxnu_avail_memory_size(void); +uint64_t chudxnu_phys_memory_size(void); + +vm_offset_t chudxnu_io_map(uint64_t phys_addr, vm_size_t size); + +uint32_t chudxnu_phys_addr_wimg(uint64_t phys_addr); + +#pragma mark **** cpu **** +// ******************************************************************************** +// cpu +// ******************************************************************************** +int chudxnu_avail_cpu_count(void); +int chudxnu_phys_cpu_count(void); +int chudxnu_cpu_number(void); + +kern_return_t chudxnu_enable_cpu(int cpu, boolean_t enable); + +kern_return_t chudxnu_enable_cpu_nap(int cpu, boolean_t enable); +boolean_t chudxnu_cpu_nap_enabled(int cpu); + +boolean_t chudxnu_get_interrupts_enabled(void); +boolean_t chudxnu_set_interrupts_enabled(boolean_t enable); +boolean_t chudxnu_at_interrupt_context(void); +void chudxnu_cause_interrupt(void); + +kern_return_t chudxnu_set_shadowed_spr(int cpu, int spr, uint32_t val); +kern_return_t chudxnu_set_shadowed_spr64(int cpu, int spr, uint64_t val); + +uint32_t chudxnu_get_orig_cpu_l2cr(int cpu); +uint32_t chudxnu_get_orig_cpu_l3cr(int cpu); + +void chudxnu_flush_caches(void); +void chudxnu_enable_caches(boolean_t enable); + +kern_return_t chudxnu_perfmon_acquire_facility(task_t); +kern_return_t chudxnu_perfmon_release_facility(task_t); + +uint32_t * chudxnu_get_branch_trace_buffer(uint32_t *entries); + +typedef struct { + uint32_t hwResets; + uint32_t hwMachineChecks; + uint32_t hwDSIs; + uint32_t hwISIs; + uint32_t hwExternals; + uint32_t hwAlignments; + uint32_t hwPrograms; + uint32_t hwFloatPointUnavailable; + uint32_t hwDecrementers; + uint32_t hwIOErrors; + uint32_t hwSystemCalls; + uint32_t hwTraces; + uint32_t hwFloatingPointAssists; + uint32_t hwPerformanceMonitors; + uint32_t hwAltivecs; + uint32_t hwInstBreakpoints; + uint32_t hwSystemManagements; + uint32_t hwAltivecAssists; + uint32_t hwThermal; + uint32_t hwSoftPatches; + uint32_t hwMaintenances; + uint32_t hwInstrumentations; +} rupt_counters_t; + +kern_return_t chudxnu_get_cpu_rupt_counters(int cpu, rupt_counters_t *rupts); +kern_return_t chudxnu_clear_cpu_rupt_counters(int cpu); + +kern_return_t chudxnu_passup_alignment_exceptions(boolean_t enable); + +#pragma mark **** callbacks **** +// ******************************************************************************** +// callbacks +// ******************************************************************************** + +void chudxnu_cancel_all_callbacks(void); + +// cpu timer - each cpu has its own callback +typedef kern_return_t (*chudxnu_cpu_timer_callback_func_t)(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count); +kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units); // callback is entered on current cpu +kern_return_t chudxnu_cpu_timer_callback_cancel(void); // callback is cleared on current cpu +kern_return_t chudxnu_cpu_timer_callback_cancel_all(void); // callback is cleared on all cpus + +// trap callback - one callback for system +typedef kern_return_t (*chudxnu_trap_callback_func_t)(uint32_t trapentry, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count); +kern_return_t chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func); +kern_return_t chudxnu_trap_callback_cancel(void); + +// interrupt callback - one callback for system +typedef kern_return_t (*chudxnu_interrupt_callback_func_t)(uint32_t trapentry, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count); +kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func); +kern_return_t chudxnu_interrupt_callback_cancel(void); + +// ast callback - one callback for system +typedef kern_return_t (*chudxnu_perfmon_ast_callback_func_t)(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count); +kern_return_t chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func); +kern_return_t chudxnu_perfmon_ast_callback_cancel(void); +kern_return_t chudxnu_perfmon_ast_send(void); + +// cpusig callback - one callback for system +typedef kern_return_t (*chudxnu_cpusig_callback_func_t)(int request, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count); +kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func); +kern_return_t chudxnu_cpusig_callback_cancel(void); +kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request); + +// kdebug callback - one callback for system +typedef kern_return_t (*chudxnu_kdebug_callback_func_t)(uint32_t debugid, uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4); +kern_return_t chudxnu_kdebug_callback_enter(chudxnu_kdebug_callback_func_t func); +kern_return_t chudxnu_kdebug_callback_cancel(void); + +// task exit callback - one callback for system +typedef kern_return_t (*chudxnu_exit_callback_func_t)(int pid); +kern_return_t chudxnu_exit_callback_enter(chudxnu_exit_callback_func_t func); +kern_return_t chudxnu_exit_callback_cancel(void); + +// thread timer callback - one callback for system +typedef kern_return_t (*chudxnu_thread_timer_callback_func_t)(uint32_t arg); +kern_return_t chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func, uint32_t arg, uint32_t time, uint32_t units); +kern_return_t chudxnu_thread_timer_callback_cancel(void); + + +#endif /* _PPC_CHUD_XNU_H_ */ diff --git a/osfmk/ppc/POWERMAC/dbdma.h b/osfmk/ppc/chud/chud_xnu_glue.h similarity index 91% rename from osfmk/ppc/POWERMAC/dbdma.h rename to osfmk/ppc/chud/chud_xnu_glue.h index b1d88470a..eed5f7dea 100644 --- a/osfmk/ppc/POWERMAC/dbdma.h +++ b/osfmk/ppc/chud/chud_xnu_glue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -22,4 +22,4 @@ * * @APPLE_LICENSE_HEADER_END@ */ -#include + diff --git a/osfmk/ppc/commpage/bcopy_64.s b/osfmk/ppc/commpage/bcopy_64.s new file mode 100644 index 000000000..60df800ec --- /dev/null +++ b/osfmk/ppc/commpage/bcopy_64.s @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* ======================================= + * BCOPY, MEMCPY, and MEMMOVE for Mac OS X + * ======================================= + * + * Version of 2/20/2003, for a hypothetic 64-bit processor without Altivec. + * This version might be used bringing up new processors, with known + * Altivec bugs that need to be worked around. It is not particularly well + * optimized. + * + * Register usage. Note we use R2, so this code will not run in a PEF/CFM + * environment. + * r0 = "w7" or temp + * r2 = "w8" + * r3 = not used, as memcpy and memmove return 1st parameter as a value + * r4 = source ptr ("rs") + * r5 = count of bytes to move ("rc") + * r6 = "w1" + * r7 = "w2" + * r8 = "w3" + * r9 = "w4" + * r10 = "w5" + * r11 = "w6" + * r12 = destination ptr ("rd") + */ +#define rs r4 +#define rd r12 +#define rc r5 +#define rv r2 + +#define w1 r6 +#define w2 r7 +#define w3 r8 +#define w4 r9 +#define w5 r10 +#define w6 r11 +#define w7 r0 +#define w8 r2 + +#define ASSEMBLER +#include +#include +#include +#include + + .text + .globl EXT(bcopy_64) + +#define kLong 64 // too long for inline loopless code + + +// Main entry points. + + .align 5 +bcopy_64: // void bcopy(const void *src, void *dst, size_t len) + cmplwi rc,kLong // short or long? + sub w1,r4,r3 // must move in reverse if (rd-rs) +#include +#include +#include + + .text + .globl EXT(bcopy_970) + + +#define kShort 64 +#define kVeryLong (128*1024) + + +// Main entry points. + + .align 5 +bcopy_970: // void bcopy(const void *src, void *dst, size_t len) + cmplwi rc,kShort // short or long? + sub w1,r4,r3 // must move in reverse if (rd-rs)=kVeryLong (ie, several pages), then use the +// "bigcopy" path that pulls all the punches. This is the fastest +// case for cold-cache operands, as any this long will likely be. +// 2. If length>=128 and source is 16-byte aligned, then use the +// lvx/stvx loop over 128-byte chunks. This is the fastest +// case for hot-cache operands, 2nd fastest for cold. +// 3. If length>=128 and source is not 16-byte aligned, then use the +// lvx/vperm/stvx loop over 128-byte chunks. +// 4. If length<128 and source is 8-byte aligned, then use the +// ld/std loop over 32-byte chunks. +// 5. If length<128 and source is not 8-byte aligned, then use the +// lvx/vperm/stvx loop over 32-byte chunks. This is the slowest case. +// Registers at this point: +// r0/cr1 = count of cache lines ("chunks") that we'll cover (may be 0) +// rs = alignment unknown +// rd = 16-byte aligned +// rc = bytes remaining +// w2 = low 4 bits of (rd-rs), used to check alignment +// cr5 = beq if source is also 16-byte aligned + +LFwdAligned: + andi. w3,w2,7 // is source at least 8-byte aligned? + mtcrf 0x01,rc // move leftover count to cr7 for LShort16 + bne cr1,LFwdLongVectors // at least one 128-byte chunk, so use vectors + srwi w1,rc,5 // get 32-byte chunk count + mtcrf 0x02,rc // move bit 27 of length to cr6 for LShort32 + mtctr w1 // set up 32-byte loop (w1!=0) + beq LFwdMedAligned // source is 8-byte aligned, so use ld/std loop + mfspr rv,vrsave // get bitmap of live vector registers + oris w4,rv,0xFFF8 // we use v0-v12 + li c16,16 // get constant used in lvx + li c32,32 + mtspr vrsave,w4 // update mask + lvx v1,0,rs // prefetch 1st source quadword + lvsl vp,0,rs // get permute vector to shift left + + +// Fewer than 128 bytes but not doubleword aligned: use lvx/vperm/stvx. + +1: // loop over 32-byte chunks + lvx v2,c16,rs + lvx v3,c32,rs + addi rs,rs,32 + vperm vx,v1,v2,vp + vperm vy,v2,v3,vp + vor v1,v3,v3 // v1 <- v3 + stvx vx,0,rd + stvx vy,c16,rd + addi rd,rd,32 + bdnz 1b + + mtspr vrsave,rv // restore bitmap of live vr's + b LShort32 + + +// Fewer than 128 bytes and doubleword aligned: use ld/std. + + .align 5 +LFwdMedAligned: // loop over 32-byte chunks + ld w1,0(rs) + ld w2,8(rs) + ld w3,16(rs) + ld w4,24(rs) + addi rs,rs,32 + std w1,0(rd) + std w2,8(rd) + std w3,16(rd) + std w4,24(rd) + addi rd,rd,32 + bdnz LFwdMedAligned + + b LShort32 + + +// Forward, 128 bytes or more: use vectors. When entered: +// r0 = 128-byte chunks to move (>0) +// rd = 16-byte aligned +// cr5 = beq if source is 16-byte aligned +// cr7 = low 4 bits of rc (ie, leftover byte count 0-15) +// We set up many registers: +// ctr = number of 128-byte chunks to move +// r0/cr0 = leftover QWs to move +// cr7 = low 4 bits of rc (ie, leftover byte count 0-15) +// cr6 = beq if leftover byte count is 0 +// rv = original value of VRSave +// c16,c32,c48 = loaded + +LFwdLongVectors: + mfspr rv,vrsave // get bitmap of live vector registers + lis w3,kVeryLong>>16 // cutoff for very-long-operand special case path + cmplw cr1,rc,w3 // very long operand? + rlwinm w3,rc,0,28,31 // move last 0-15 byte count to w3 + bgea-- cr1,_COMM_PAGE_BIGCOPY // handle big copies separately + mtctr r0 // set up loop count + cmpwi cr6,w3,0 // set cr6 on leftover byte count + oris w4,rv,0xFFF8 // we use v0-v12 + rlwinm. r0,rc,28,29,31 // get number of quadword leftovers (0-7) and set cr0 + li c16,16 // get constants used in ldvx/stvx + mtspr vrsave,w4 // update mask + li c32,32 + li c48,48 + beq cr5,LFwdLongAligned // source is also 16-byte aligned, no need for vperm + lvsl vp,0,rs // get permute vector to shift left + lvx v1,0,rs // prefetch 1st source quadword + b LFwdLongUnaligned + + +// Forward, long, unaligned vector loop. + + .align 5 // align inner loops +LFwdLongUnaligned: // loop over 128-byte chunks + addi w4,rs,64 + lvx v2,c16,rs + lvx v3,c32,rs + lvx v4,c48,rs + lvx v5,0,w4 + lvx v6,c16,w4 + vperm vw,v1,v2,vp + lvx v7,c32,w4 + lvx v8,c48,w4 + addi rs,rs,128 + vperm vx,v2,v3,vp + addi w4,rd,64 + lvx v1,0,rs + stvx vw,0,rd + vperm vy,v3,v4,vp + stvx vx,c16,rd + vperm vz,v4,v5,vp + stvx vy,c32,rd + vperm vw,v5,v6,vp + stvx vz,c48,rd + vperm vx,v6,v7,vp + addi rd,rd,128 + stvx vw,0,w4 + vperm vy,v7,v8,vp + stvx vx,c16,w4 + vperm vz,v8,v1,vp + stvx vy,c32,w4 + stvx vz,c48,w4 + bdnz LFwdLongUnaligned + + beq 4f // no leftover quadwords + mtctr r0 +3: // loop over remaining quadwords + lvx v2,c16,rs + addi rs,rs,16 + vperm vx,v1,v2,vp + vor v1,v2,v2 // v1 <- v2 + stvx vx,0,rd + addi rd,rd,16 + bdnz 3b +4: + mtspr vrsave,rv // restore bitmap of live vr's + bne cr6,LShort16 // handle last 0-15 bytes if any + blr + + +// Forward, long, 16-byte aligned vector loop. + + .align 5 +LFwdLongAligned: // loop over 128-byte chunks + addi w4,rs,64 + lvx v1,0,rs + lvx v2,c16,rs + lvx v3,c32,rs + lvx v4,c48,rs + lvx v5,0,w4 + lvx v6,c16,w4 + lvx v7,c32,w4 + lvx v8,c48,w4 + addi rs,rs,128 + addi w4,rd,64 + stvx v1,0,rd + stvx v2,c16,rd + stvx v3,c32,rd + stvx v4,c48,rd + stvx v5,0,w4 + stvx v6,c16,w4 + stvx v7,c32,w4 + stvx v8,c48,w4 + addi rd,rd,128 + bdnz LFwdLongAligned + + beq 4f // no leftover quadwords + mtctr r0 +3: // loop over remaining quadwords (1-7) + lvx v1,0,rs + addi rs,rs,16 + stvx v1,0,rd + addi rd,rd,16 + bdnz 3b +4: + mtspr vrsave,rv // restore bitmap of live vr's + bne cr6,LShort16 // handle last 0-15 bytes if any + blr + + +// Long, reverse moves. +// rs = source +// rd = destination +// rc = count +// cr5 = beq if relatively 16-byte aligned + +LLongReverse: + add rd,rd,rc // point to end of operands + add rs,rs,rc + andi. r0,rd,0xF // #bytes to 16-byte align destination + beq 2f // already aligned + +// 16-byte align destination. + + mtctr r0 // set up for loop + sub rc,rc,r0 +1: + lbzu w1,-1(rs) + stbu w1,-1(rd) + bdnz 1b + +// Prepare for reverse vector loop. When entered: +// rd = 16-byte aligned +// cr5 = beq if source also 16-byte aligned +// We set up many registers: +// ctr/cr1 = number of 64-byte chunks to move (may be 0) +// r0/cr0 = leftover QWs to move +// cr7 = low 4 bits of rc (ie, leftover byte count 0-15) +// cr6 = beq if leftover byte count is 0 +// cm1 = -1 +// rv = original value of vrsave + +2: + mfspr rv,vrsave // get bitmap of live vector registers + srwi r0,rc,6 // get count of 64-byte chunks to move (may be 0) + oris w1,rv,0xFFF8 // we use v0-v12 + mtcrf 0x01,rc // prepare for moving last 0-15 bytes in LShortReverse16 + rlwinm w3,rc,0,28,31 // move last 0-15 byte count to w3 too + cmpwi cr1,r0,0 // set cr1 on chunk count + mtspr vrsave,w1 // update mask + mtctr r0 // set up loop count + cmpwi cr6,w3,0 // set cr6 on leftover byte count + rlwinm. r0,rc,28,30,31 // get number of quadword leftovers (0-3) and set cr0 + li cm1,-1 // get constants used in ldvx/stvx + + bne cr5,LReverseVecUnal // handle unaligned operands + beq cr1,2f // no chunks (if no chunks, must be leftover QWs) + li cm17,-17 + li cm33,-33 + li cm49,-49 + b 1f + +// Long, reverse 16-byte-aligned vector loop. + + .align 5 // align inner loops +1: // loop over 64-byte chunks + lvx v1,cm1,rs + lvx v2,cm17,rs + lvx v3,cm33,rs + lvx v4,cm49,rs + subi rs,rs,64 + stvx v1,cm1,rd + stvx v2,cm17,rd + stvx v3,cm33,rd + stvx v4,cm49,rd + subi rd,rd,64 + bdnz 1b + + beq 4f // no leftover quadwords +2: // r0=#QWs, rv=vrsave, cr7=(rc & F), cr6 set on cr7 + mtctr r0 +3: // loop over remaining quadwords (1-7) + lvx v1,cm1,rs + subi rs,rs,16 + stvx v1,cm1,rd + subi rd,rd,16 + bdnz 3b +4: + mtspr vrsave,rv // restore bitmap of live vr's + bne cr6,LShortReverse16 // handle last 0-15 bytes if any + blr + + +// Long, reverse, unaligned vector loop. +// ctr/cr1 = number of 64-byte chunks to move (may be 0) +// r0/cr0 = leftover QWs to move +// cr7 = low 4 bits of rc (ie, leftover byte count 0-15) +// cr6 = beq if leftover byte count is 0 +// rv = original value of vrsave +// cm1 = -1 + +LReverseVecUnal: + lvsl vp,0,rs // get permute vector to shift left + lvx v1,cm1,rs // v1 always looks ahead + li cm17,-17 + beq cr1,2f // no chunks (if no chunks, must be leftover QWs) + li cm33,-33 + li cm49,-49 + b 1f + + .align 5 // align the inner loops +1: // loop over 64-byte chunks + lvx v2,cm17,rs + lvx v3,cm33,rs + lvx v4,cm49,rs + subi rs,rs,64 + vperm vx,v2,v1,vp + lvx v1,cm1,rs + vperm vy,v3,v2,vp + stvx vx,cm1,rd + vperm vz,v4,v3,vp + stvx vy,cm17,rd + vperm vx,v1,v4,vp + stvx vz,cm33,rd + stvx vx,cm49,rd + subi rd,rd,64 + bdnz 1b + + beq 4f // no leftover quadwords +2: // r0=#QWs, rv=vrsave, v1=next QW, cr7=(rc & F), cr6 set on cr7 + mtctr r0 +3: // loop over 1-3 quadwords + lvx v2,cm17,rs + subi rs,rs,16 + vperm vx,v2,v1,vp + vor v1,v2,v2 // v1 <- v2 + stvx vx,cm1,rd + subi rd,rd,16 + bdnz 3b +4: + mtspr vrsave,rv // restore bitmap of live vr's + bne cr6,LShortReverse16 // handle last 0-15 bytes iff any + blr + + COMMPAGE_DESCRIPTOR(bcopy_970,_COMM_PAGE_BCOPY,k64Bit+kHasAltivec,0,kCommPageMTCRF) diff --git a/osfmk/ppc/commpage/bcopy_g3.s b/osfmk/ppc/commpage/bcopy_g3.s new file mode 100644 index 000000000..cfa8eddcc --- /dev/null +++ b/osfmk/ppc/commpage/bcopy_g3.s @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* ======================================= + * BCOPY, MEMCPY, and MEMMOVE for Mac OS X + * ======================================= + * + * Version of 2/20/2003, tuned for G3. + * + * Register usage. Note we use R2, so this code will not run in a PEF/CFM + * environment. + * + * r0 = "w7" or temp + * r2 = "w8" + * r3 = not used, as memcpy and memmove return 1st parameter as a value + * r4 = source ptr ("rs") + * r5 = count of bytes to move ("rc") + * r6 = "w1" + * r7 = "w2" + * r8 = "w3" + * r9 = "w4" + * r10 = "w5" + * r11 = "w6" + * r12 = destination ptr ("rd") + * f0-f3 = used for moving 8-byte aligned data + */ +#define rs r4 // NB: we depend on rs==r4 in "lswx" instructions +#define rd r12 +#define rc r5 + +#define w1 r6 +#define w2 r7 +#define w3 r8 +#define w4 r9 +#define w5 r10 +#define w6 r11 +#define w7 r0 +#define w8 r2 + +#define ASSEMBLER +#include +#include +#include +#include + + .text + .globl EXT(bcopy_g3) + + +#define kLong 33 // too long for string ops + + +// Main entry points. + + .align 5 +bcopy_g3: // void bcopy(const void *src, void *dst, size_t len) + cmplwi rc,kLong // length > 32 bytes? + sub w1,r4,r3 // must move in reverse if (rd-rs) 32 bytes? + sub w1,r3,rs // must move in reverse if (rd-rs)=1) + rlwinm rc,rc,0,0x1F // mask down to leftover bytes + mtctr r0 // set up loop count + beq 1f // dest already word aligned + +// Word align the destination. + + mtxer w4 // byte count to xer + cmpwi r0,0 // any chunks to xfer? + lswx w1,0,rs // move w4 bytes to align dest + add rs,rs,w4 + stswx w1,0,rd + add rd,rd,w4 + beq- 2f // pathologic case, no chunks to xfer + +// Forward, unaligned loop. + +1: + lwz w1,0(rs) + lwz w2,4(rs) + lwz w3,8(rs) + lwz w4,12(rs) + lwz w5,16(rs) + lwz w6,20(rs) + lwz w7,24(rs) + lwz w8,28(rs) + addi rs,rs,32 + stw w1,0(rd) + stw w2,4(rd) + stw w3,8(rd) + stw w4,12(rd) + stw w5,16(rd) + stw w6,20(rd) + stw w7,24(rd) + stw w8,28(rd) + addi rd,rd,32 + bdnz 1b +2: // rc = remaining bytes (0-31) + mtxer rc // set up count for string ops + mr r0,rd // move dest ptr out of the way + lswx r5,0,rs // load xer bytes into r5-r12 (rs==r4) + stswx r5,0,r0 // store them + blr + + + +// Forward, aligned loop. We use FPRs. + +LLongFloat: + andi. w4,w2,7 // W4 <- #bytes to doubleword-align destination + sub rc,rc,w4 // adjust count for alignment + srwi r0,rc,5 // number of 32-byte chunks to xfer + rlwinm rc,rc,0,0x1F // mask down to leftover bytes + mtctr r0 // set up loop count + beq 1f // dest already doubleword aligned + +// Doubleword align the destination. + + mtxer w4 // byte count to xer + cmpwi r0,0 // any chunks to xfer? + lswx w1,0,rs // move w4 bytes to align dest + add rs,rs,w4 + stswx w1,0,rd + add rd,rd,w4 + beq- 2f // pathologic case, no chunks to xfer +1: // loop over 32-byte chunks + lfd f0,0(rs) + lfd f1,8(rs) + lfd f2,16(rs) + lfd f3,24(rs) + addi rs,rs,32 + stfd f0,0(rd) + stfd f1,8(rd) + stfd f2,16(rd) + stfd f3,24(rd) + addi rd,rd,32 + bdnz 1b +2: // rc = remaining bytes (0-31) + mtxer rc // set up count for string ops + mr r0,rd // move dest ptr out of the way + lswx r5,0,rs // load xer bytes into r5-r12 (rs==r4) + stswx r5,0,r0 // store them + blr + + +// Long, reverse moves. +// cr5 = beq if relatively word aligned + +LLongReverse: + add rd,rd,rc // point to end of operands + 1 + add rs,rs,rc + beq cr5,LReverseFloat // aligned operands so can use FPRs + srwi r0,rc,5 // get chunk count + rlwinm rc,rc,0,0x1F // mask down to leftover bytes + mtctr r0 // set up loop count + mtxer rc // set up for trailing bytes +1: + lwz w1,-4(rs) + lwz w2,-8(rs) + lwz w3,-12(rs) + lwz w4,-16(rs) + stw w1,-4(rd) + lwz w5,-20(rs) + stw w2,-8(rd) + lwz w6,-24(rs) + stw w3,-12(rd) + lwz w7,-28(rs) + stw w4,-16(rd) + lwzu w8,-32(rs) + stw w5,-20(rd) + stw w6,-24(rd) + stw w7,-28(rd) + stwu w8,-32(rd) + bdnz 1b + + sub r4,rs,rc // point to 1st (leftmost) leftover byte (0..31) + sub r0,rd,rc // move dest ptr out of way + lswx r5,0,r4 // load xer bytes into r5-r12 + stswx r5,0,r0 // store them + blr + + +// Long, reverse aligned moves. We use FPRs. + +LReverseFloat: + andi. w4,rd,7 // W3 <- #bytes to doubleword-align destination + sub rc,rc,w4 // adjust count for alignment + srwi r0,rc,5 // number of 32-byte chunks to xfer + rlwinm rc,rc,0,0x1F // mask down to leftover bytes + mtctr r0 // set up loop count + beq 1f // dest already doubleword aligned + +// Doubleword align the destination. + + mtxer w4 // byte count to xer + cmpwi r0,0 // any chunks to xfer? + sub rs,rs,w4 // point to 1st bytes to xfer + sub rd,rd,w4 + lswx w1,0,rs // move w3 bytes to align dest + stswx w1,0,rd + beq- 2f // pathologic case, no chunks to xfer +1: + lfd f0,-8(rs) + lfd f1,-16(rs) + lfd f2,-24(rs) + lfdu f3,-32(rs) + stfd f0,-8(rd) + stfd f1,-16(rd) + stfd f2,-24(rd) + stfdu f3,-32(rd) + bdnz 1b +2: // rc = remaining bytes (0-31) + mtxer rc // set up count for string ops + sub r4,rs,rc // point to 1st (leftmost) leftover byte (0..31) + sub r0,rd,rc // move dest ptr out of way + lswx r5,0,r4 // load xer bytes into r5-r12 + stswx r5,0,r0 // store them + blr + + COMMPAGE_DESCRIPTOR(bcopy_g3,_COMM_PAGE_BCOPY,0,k64Bit+kHasAltivec,0) diff --git a/osfmk/ppc/commpage/bcopy_g4.s b/osfmk/ppc/commpage/bcopy_g4.s new file mode 100644 index 000000000..4750ae42e --- /dev/null +++ b/osfmk/ppc/commpage/bcopy_g4.s @@ -0,0 +1,621 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* ======================================= + * BCOPY, MEMCPY, and MEMMOVE for Mac OS X + * ======================================= + * + * Version of 2/20/2003, tuned for G4. The inner loops use DCBA to avoid + * reading destination cache lines. Only the 7450 actually benefits from + * this, and then only in the cold-cache case. On 7400s and 7455s, we + * patch the DCBAs into NOPs. + * + * Register usage. Note we use R2, so this code will not run in a PEF/CFM + * environment. Note also the rather delicate way we assign multiple uses + * to the same register. Beware. + * + * r0 = "w7" or temp (NB: cannot use r0 for any constant such as "c16") + * r2 = "w8" or vrsave ("rv") + * r3 = not used, as memcpy and memmove return 1st parameter as a value + * r4 = source ptr ("rs") + * r5 = count of bytes to move ("rc") + * r6 = "w1", "c16", or "cm17" + * r7 = "w2", "c32", or "cm33" + * r8 = "w3", "c48", or "cm49" + * r9 = "w4", or "cm1" + * r10 = "w5", "c96", or "cm97" + * r11 = "w6", "c128", or "cm129" + * r12 = destination ptr ("rd") + * v0 = permute vector ("vp") + * v1-v4 = qw's loaded from source + * v5-v7 = permuted qw's ("vw", "vx", "vy") + */ +#define rs r4 +#define rd r12 +#define rc r5 +#define rv r2 + +#define w1 r6 +#define w2 r7 +#define w3 r8 +#define w4 r9 +#define w5 r10 +#define w6 r11 +#define w7 r0 +#define w8 r2 + +#define c16 r6 +#define cm17 r6 +#define c32 r7 +#define cm33 r7 +#define c48 r8 +#define cm49 r8 +#define cm1 r9 +#define c96 r10 +#define cm97 r10 +#define c128 r11 +#define cm129 r11 + +#define vp v0 +#define vw v5 +#define vx v6 +#define vy v7 + +#define ASSEMBLER +#include +#include +#include +#include + + .text + .globl EXT(bcopy_g4) + +#define kMedium 32 // too long for inline loopless code +#define kLong 96 // long enough to justify use of Altivec + + +// Main entry points. + + .align 5 +bcopy_g4: // void bcopy(const void *src, void *dst, size_t len) + cmplwi rc,kMedium // short or long? + sub w1,r4,r3 // must move in reverse if (rd-rs)=1) + mtcrf 0x01,rc // save remaining byte count here for LShort16 + mtctr r0 // set up 16-byte loop + bne cr6,3f // source not 4-byte aligned + b 2f + + .align 4 +2: // loop over 16-byte aligned chunks + lfd f0,0(rs) + lfd f1,8(rs) + addi rs,rs,16 + stfd f0,0(rd) + stfd f1,8(rd) + addi rd,rd,16 + bdnz 2b + + b LShort16 + + .align 4 +3: // loop over 16-byte unaligned chunks + lwz w1,0(rs) + lwz w2,4(rs) + lwz w3,8(rs) + lwz w4,12(rs) + addi rs,rs,16 + stw w1,0(rd) + stw w2,4(rd) + stw w3,8(rd) + stw w4,12(rd) + addi rd,rd,16 + bdnz 3b + + b LShort16 + + +// Vector loops. First, we must 32-byte align the destination. +// w1 = (rd-rs), used to check for reverse and alignment +// w4 = #bytes to 32-byte align destination +// rc = long enough for at least one vector loop + +LFwdLong: + cmpwi w4,0 // dest already aligned? + sub rc,rc,w4 // adjust length + mtcrf 0x01,w4 // cr7 <- #bytes to align dest + rlwinm w2,w1,0,0xF // relatively 16-byte aligned? + mtcrf 0x02,w4 // finish moving #bytes to align to cr6 and cr7 + srwi r0,rc,6 // get # 64-byte chunks to xfer (>=1) + cmpwi cr5,w2,0 // set cr5 beq if relatively 16-byte aligned + beq LFwdAligned // dest is already aligned + +// 32-byte align destination. + + bf 31,1f // byte to move? + lbz w1,0(rs) + addi rs,rs,1 + stb w1,0(rd) + addi rd,rd,1 +1: + bf 30,2f // halfword? + lhz w1,0(rs) + addi rs,rs,2 + sth w1,0(rd) + addi rd,rd,2 +2: + bf 29,3f // word? + lwz w1,0(rs) + addi rs,rs,4 + stw w1,0(rd) + addi rd,rd,4 +3: + bf 28,4f // doubleword? + lwz w1,0(rs) + lwz w2,4(rs) + addi rs,rs,8 + stw w1,0(rd) + stw w2,4(rd) + addi rd,rd,8 +4: + bf 27,LFwdAligned // quadword? + lwz w1,0(rs) + lwz w2,4(rs) + lwz w3,8(rs) + lwz w4,12(rs) + addi rs,rs,16 + stw w1,0(rd) + stw w2,4(rd) + stw w3,8(rd) + stw w4,12(rd) + addi rd,rd,16 + + +// Destination is 32-byte aligned. +// r0 = count of 64-byte chunks to move (not 0) +// rd = 32-byte aligned +// rc = bytes remaining +// cr5 = beq if source is 16-byte aligned +// We set up many registers: +// ctr = number of 64-byte chunks to move +// r0/cr0 = leftover QWs to move +// cr7 = low 4 bits of rc (ie, leftover byte count 0-15) +// cr6 = beq if leftover byte count is 0 +// rv = original value of vrsave +// c16 etc = loaded + +LFwdAligned: + mfspr rv,vrsave // get bitmap of live vector registers + mtcrf 0x01,rc // move leftover count to cr7 for LShort16 + rlwinm w3,rc,0,28,31 // move last 0-15 byte count to w3 + mtctr r0 // set up loop count + cmpwi cr6,w3,0 // set cr6 on leftover byte count + oris w1,rv,0xFF00 // we use v0-v7 + rlwinm. r0,rc,28,30,31 // get number of quadword leftovers (0-3) and set cr0 + mtspr vrsave,w1 // update mask + li c16,16 // get constants used in ldvx/stvx + li c32,32 + li c48,48 + li c96,96 + li c128,128 + bne cr5,LForwardVecUnal // handle unaligned operands + b 1f + + .align 4 +1: // loop over 64-byte chunks + dcbt c96,rs + dcbt c128,rs + lvx v1,0,rs + lvx v2,c16,rs + lvx v3,c32,rs + lvx v4,c48,rs + addi rs,rs,64 + dcba 0,rd // patched to NOP on some machines + stvx v1,0,rd + stvx v2,c16,rd + dcba c32,rd // patched to NOP on some machines + stvx v3,c32,rd + stvx v4,c48,rd + addi rd,rd,64 + bdnz 1b + + beq 4f // no leftover quadwords + mtctr r0 +3: // loop over remaining quadwords (1-3) + lvx v1,0,rs + addi rs,rs,16 + stvx v1,0,rd + addi rd,rd,16 + bdnz 3b +4: + mtspr vrsave,rv // restore bitmap of live vr's + bne cr6,LShort16 // handle last 0-15 bytes if any + blr + + +// Long, forward, unaligned vector loop. + +LForwardVecUnal: + lvsl vp,0,rs // get permute vector to shift left + lvx v1,0,rs // prefetch 1st source quadword + b 1f + + .align 4 // align inner loops +1: // loop over 64-byte chunks + lvx v2,c16,rs + dcbt c96,rs + lvx v3,c32,rs + dcbt c128,rs + lvx v4,c48,rs + addi rs,rs,64 + vperm vw,v1,v2,vp + lvx v1,0,rs + vperm vx,v2,v3,vp + dcba 0,rd // patched to NOP on some machines + stvx vw,0,rd + vperm vy,v3,v4,vp + stvx vx,c16,rd + vperm vw,v4,v1,vp + dcba c32,rd // patched to NOP on some machines + stvx vy,c32,rd + stvx vw,c48,rd + addi rd,rd,64 + bdnz 1b + + beq- 4f // no leftover quadwords + mtctr r0 +3: // loop over remaining quadwords + lvx v2,c16,rs + addi rs,rs,16 + vperm vx,v1,v2,vp + vor v1,v2,v2 // v1 <- v2 + stvx vx,0,rd + addi rd,rd,16 + bdnz 3b +4: + mtspr vrsave,rv // restore bitmap of live vr's + bne cr6,LShort16 // handle last 0-15 bytes if any + blr + + +// Medium and long, reverse moves. We use altivec if the operands are long enough, +// else a lwz/stx loop. +// w1 = (rd-rs), used to check for reverse and alignment +// cr7 = bge if long + +LMediumReverse: + add rd,rd,rc // point to end of operands + add rs,rs,rc + andi. w4,rd,0x1F // w4 <- #bytes to 32-byte align destination + rlwinm w6,rd,0,0x3 // w6 <- #bytes to 4-byte align destination + bge cr7,LLongReverse // long enough for vectors + +// Scalar loop. +// w6 = #bytes to 4-byte align destination + + sub rc,rc,w6 // decrement length remaining + mtxer w6 // set up count for move + sub rs,rs,w6 // back up ptrs + sub rd,rd,w6 + srwi r0,rc,4 // get # 16-byte chunks (>=1) + mtcrf 0x01,rc // set remaining byte count here for LShortReverse16 + lswx w1,0,rs // move w6 bytes to align destination + stswx w1,0,rd + mtctr r0 // set up 16-byte loop + b 1f + + .align 4 +1: // loop over 16-byte aligned chunks + lwz w1,-4(rs) + lwz w2,-8(rs) + lwz w3,-12(rs) + lwzu w4,-16(rs) + stw w1,-4(rd) + stw w2,-8(rd) + stw w3,-12(rd) + stwu w4,-16(rd) + bdnz 1b + + b LShortReverse16 + + +// Reverse vector loops. First, we must 32-byte align the destination. +// w1 = (rd-rs), used to check for reverse and alignment +// w4/cr0 = #bytes to 32-byte align destination +// rc = long enough for at least one vector loop + +LLongReverse: + sub rc,rc,w4 // adjust length + mtcrf 0x01,w4 // cr7 <- #bytes to align dest + rlwinm w2,w1,0,0xF // relatively 16-byte aligned? + mtcrf 0x02,w4 // finish moving #bytes to align to cr6 and cr7 + srwi r0,rc,6 // get # 64-byte chunks to xfer (>=1) + cmpwi cr5,w2,0 // set cr5 beq if relatively 16-byte aligned + beq LReverseAligned // dest is already aligned + +// 32-byte align destination. + + bf 31,1f // byte to move? + lbzu w1,-1(rs) + stbu w1,-1(rd) +1: + bf 30,2f // halfword? + lhzu w1,-2(rs) + sthu w1,-2(rd) +2: + bf 29,3f // word? + lwzu w1,-4(rs) + stwu w1,-4(rd) +3: + bf 28,4f // doubleword? + lwz w1,-4(rs) + lwzu w2,-8(rs) + stw w1,-4(rd) + stwu w2,-8(rd) +4: + bf 27,LReverseAligned // quadword? + lwz w1,-4(rs) + lwz w2,-8(rs) + lwz w3,-12(rs) + lwzu w4,-16(rs) + stw w1,-4(rd) + stw w2,-8(rd) + stw w3,-12(rd) + stwu w4,-16(rd) + +// Destination is 32-byte aligned. +// r0 = count of 64-byte chunks to move (not 0) +// rd = 32-byte aligned +// rc = bytes remaining +// cr5 = beq if source is 16-byte aligned +// We set up many registers: +// ctr = number of 64-byte chunks to move +// r0/cr0 = leftover QWs to move +// cr7 = low 4 bits of rc (ie, leftover byte count 0-15) +// cr6 = beq if leftover byte count is 0 +// rv = original value of vrsave +// cm1 etc = loaded + +LReverseAligned: + mfspr rv,vrsave // get bitmap of live vector registers + mtcrf 0x01,rc // move leftover count to cr7 for LShort16 + rlwinm w3,rc,0,28,31 // move last 0-15 byte count to w3 + mtctr r0 // set up loop count + cmpwi cr6,w3,0 // set cr6 on leftover byte count + oris w1,rv,0xFF00 // we use v0-v7 + rlwinm. r0,rc,28,30,31 // get number of quadword leftovers (0-3) and set cr0 + mtspr vrsave,w1 // update mask + li cm1,-1 // get constants used in ldvx/stvx + li cm17,-17 + li cm33,-33 + li cm49,-49 + li cm97,-97 + li cm129,-129 + bne cr5,LReverseVecUnal // handle unaligned operands + b 1f + + .align 4 // align inner loops +1: // loop over 64-byte chunks + dcbt cm97,rs + dcbt cm129,rs + lvx v1,cm1,rs + lvx v2,cm17,rs + lvx v3,cm33,rs + lvx v4,cm49,rs + subi rs,rs,64 + stvx v1,cm1,rd + stvx v2,cm17,rd + stvx v3,cm33,rd + stvx v4,cm49,rd + subi rd,rd,64 + bdnz 1b + + beq 4f // no leftover quadwords + mtctr r0 +3: // loop over remaining quadwords (1-7) + lvx v1,cm1,rs + subi rs,rs,16 + stvx v1,cm1,rd + subi rd,rd,16 + bdnz 3b +4: + mtspr vrsave,rv // restore bitmap of live vr's + bne cr6,LShortReverse16 // handle last 0-15 bytes if any + blr + + +// Long, reverse, unaligned vector loop. + +LReverseVecUnal: + lvsl vp,0,rs // get permute vector to shift left + lvx v1,cm1,rs // v1 always looks ahead + b 1f + + .align 4 // align the inner loops +1: // loop over 64-byte chunks + lvx v2,cm17,rs + dcbt cm97,rs + lvx v3,cm33,rs + dcbt cm129,rs + lvx v4,cm49,rs + subi rs,rs,64 + vperm vw,v2,v1,vp + lvx v1,cm1,rs + vperm vx,v3,v2,vp + stvx vw,cm1,rd + vperm vy,v4,v3,vp + stvx vx,cm17,rd + vperm vw,v1,v4,vp + stvx vy,cm33,rd + stvx vw,cm49,rd + subi rd,rd,64 + bdnz 1b + + beq 3f // no leftover quadwords + mtctr r0 +2: // loop over 1-3 quadwords + lvx v2,cm17,rs + subi rs,rs,16 + vperm vx,v2,v1,vp + vor v1,v2,v2 // v1 <- v2 + stvx vx,cm1,rd + subi rd,rd,16 + bdnz 2b +3: + mtspr vrsave,rv // restore bitmap of live vr's + bne cr6,LShortReverse16 // handle last 0-15 bytes iff any + blr + + COMMPAGE_DESCRIPTOR(bcopy_g4,_COMM_PAGE_BCOPY,kHasAltivec,k64Bit,kCommPageDCBA) diff --git a/osfmk/ppc/commpage/bigcopy_970.s b/osfmk/ppc/commpage/bigcopy_970.s new file mode 100644 index 000000000..fa9e1245a --- /dev/null +++ b/osfmk/ppc/commpage/bigcopy_970.s @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* ==================================== + * Very Long Operand BCOPY for Mac OS X + * ==================================== + * + * Version of 6/11/2003, tuned for the IBM 970. This is for operands at + * least several pages long. It is called from bcopy()/memcpy()/memmove(). + * + * We use the following additional strategies not used by the shorter + * operand paths. Mostly, we try to optimize for memory bandwidth: + * 1. Use DCBZ128 to avoid reading destination lines. Because this code + * resides on the commmpage, it can use a private interface with the + * kernel to minimize alignment exceptions if the destination is + * uncached. The kernel will clear cr7 whenever it emulates a DCBZ or + * DCBZ128 on the commpage. Thus we take at most one exception per call, + * which is amortized across the very long operand. + * 2. Copy larger chunks per iteration to minimize R/W bus turnaround + * and maximize DRAM page locality (opening a new page is expensive.) + * 3. Touch in one source chunk ahead with DCBT. This is probably the + * least important change, and probably only helps restart the + * hardware stream at the start of each source page. + * + * Register usage. Note the rather delicate way we assign multiple uses + * to the same register. Beware. + * r0 = temp (NB: cannot use r0 for any constant such as "c16") + * r3 = not used, as memcpy and memmove return 1st parameter as a value + * r4 = source ptr ("rs") + * r5 = count of bytes to move ("rc") + * r6 = constant 16 ("c16") + * r7 = constant 32 (""c32") + * r8 = constant 48 (""c48") + * r9 = constant 128 (""c128") + * r10 = vrsave ("rv") + * r11 = constant 256 (""c256") + * r12 = destination ptr ("rd") + * r13 = constant 384 (""c384") + * r14 = temp ("rx") + * r15 = temp ("rt") + */ +#define rs r4 +#define rd r12 +#define rc r5 +#define rv r10 +#define rx r14 +#define rt r15 + +#define c16 r6 +#define c32 r7 +#define c48 r8 +#define c128 r9 +#define c256 r11 +#define c384 r13 + +// Offsets within the "red zone" (which is 224 bytes long): + +#define rzR13 -8 +#define rzR14 -12 +#define rzR15 -16 +#define rzV20 -32 +#define rzV21 -48 +#define rzV22 -64 +#define rzV23 -80 +#define rzV24 -96 +#define rzV25 -112 +#define rzV26 -128 +#define rzV27 -144 +#define rzV28 -160 +#define rzV29 -176 +#define rzV30 -192 +#define rzV31 -208 + + +#include +#include +#include +#include + + .text + .globl EXT(bigcopy_970) + + +// Entry point. This is a subroutine of bcopy(). When called: +// r4 = source ptr (aka "rs") +// r12 = dest ptr (aka "rd") +// r5 = length (>= 16K bytes) (aka "rc") +// +// We only do "forward" moves, ie non-overlapping or toward 0. +// +// We return with non-volatiles and r3 preserved. + + .align 5 +bigcopy_970: + stw r13,rzR13(r1) // spill non-volatile regs we use to redzone + stw r14,rzR14(r1) + stw r15,rzR15(r1) + li r0,rzV20 + neg rt,rd // start to cache-line-align destination + stvx v20,r1,r0 // we use all 32 VRs + li r0,rzV21 + stvx v21,r1,r0 + li r0,rzV22 + stvx v22,r1,r0 + li r0,rzV23 + stvx v23,r1,r0 + li r0,rzV24 + andi. rt,rt,127 // get #bytes to 128-byte align + stvx v24,r1,r0 + li r0,rzV25 + stvx v25,r1,r0 + li r0,rzV26 + sub rc,rc,rt // adjust length by #bytes to align destination + stvx v26,r1,r0 + li r0,rzV27 + stvx v27,r1,r0 + li r0,rzV28 + mtctr rt // #bytes to align destination + stvx v28,r1,r0 + li r0,rzV29 + stvx v29,r1,r0 + li r0,rzV30 + stvx v30,r1,r0 + li r0,rzV31 + stvx v31,r1,r0 + beq 2f // dest already 128-byte aligned + b 1f + + +// Cache-line-align destination. + + .align 5 +1: + lbz r0,0(rs) + addi rs,rs,1 + stb r0,0(rd) + addi rd,rd,1 + bdnz 1b + + +// Is source 16-byte aligned? Load constant offsets. + +2: + andi. r0,rs,15 // check source alignment + mfspr rv,vrsave // save caller's bitmask + li r0,-1 // we use all 32 VRs + li c16,16 // load the constant offsets for x-form ops + li c32,32 + li c48,48 + li c128,128 + li c256,256 + li c384,384 + mtspr vrsave,r0 + +// NB: the kernel clears cr7 if it emulates a dcbz128 on the commpage, +// and we dcbz only if cr7 beq is set. We check to be sure the dcbz's +// won't zero source bytes before we load them, since we zero before +// loading as this is faster than zeroing after loading and before storing. + + cmpw cr7,r0,r0 // initialize cr7 beq to use dcbz128 + sub rt,rs,rd // get (rs-rd) + cmplwi cr1,rt,512 // are we moving down less than 512 bytes? + +// Start fetching in source cache lines. + + dcbt c128,rs // first line already touched in + dcbt c256,rs + dcbt c384,rs + + bge++ cr1,3f // skip if not moving down less than 512 bytes + cmpw cr7,c16,c32 // cannot dcbz since it would zero source bytes +3: + beq LalignedLoop // handle aligned sources + lvsl v0,0,rs // get permute vector for left shift + lvxl v1,0,rs // prime the loop + b LunalignedLoop // enter unaligned loop + + +// Main loop for unaligned operands. We loop over 384-byte chunks (3 cache lines) +// since we need a few VRs for permuted destination QWs and the permute vector. + + .align 5 +LunalignedLoop: + subi rc,rc,384 // decrement byte count + addi rx,rs,384 // get address of next chunk + lvxl v2,c16,rs + lvxl v3,c32,rs + bne-- cr7,1f // skip dcbz's if cr7 beq has been turned off by kernel + dcbz128 0,rd // (also skip if moving down less than 512 bytes) + bne-- cr7,1f // catch it first time through + dcbz128 c128,rd + dcbz128 c256,rd +1: + addi rt,rs,64 + dcbt 0,rx // touch in next chunk + dcbt c128,rx + dcbt c256,rx + lvxl v4,c48,rs + addi rs,rs,128 + lvxl v5,0,rt + cmplwi rc,384 // another chunk to go? + lvxl v6,c16,rt + lvxl v7,c32,rt + lvxl v8,c48,rt + addi rt,rs,64 + vperm v25,v1,v2,v0 + lvxl v9,0,rs + lvxl v10,c16,rs + vperm v26,v2,v3,v0 + lvxl v11,c32,rs + lvxl v12,c48,rs + vperm v27,v3,v4,v0 + addi rs,rs,128 + lvxl v13,0,rt + lvxl v14,c16,rt + vperm v28,v4,v5,v0 + lvxl v15,c32,rt + lvxl v16,c48,rt + vperm v29,v5,v6,v0 + addi rt,rs,64 + lvxl v17,0,rs + lvxl v18,c16,rs + vperm v30,v6,v7,v0 + lvxl v19,c32,rs + lvxl v20,c48,rs + vperm v31,v7,v8,v0 + addi rs,rs,128 + lvxl v21,0,rt + lvxl v22,c16,rt + vperm v2,v8,v9,v0 + lvxl v23,c32,rt + lvxl v24,c48,rt + vperm v3,v9,v10,v0 + lvx v1,0,rs // get 1st qw of next chunk + vperm v4,v10,v11,v0 + + addi rt,rd,64 + stvxl v25,0,rd + stvxl v26,c16,rd + vperm v5,v11,v12,v0 + stvxl v27,c32,rd + stvxl v28,c48,rd + vperm v6,v12,v13,v0 + addi rd,rd,128 + stvxl v29,0,rt + stvxl v30,c16,rt + vperm v7,v13,v14,v0 + stvxl v31,c32,rt + stvxl v2,c48,rt + vperm v8,v14,v15,v0 + addi rt,rd,64 + stvxl v3,0,rd + stvxl v4,c16,rd + vperm v9,v15,v16,v0 + stvxl v5,c32,rd + stvxl v6,c48,rd + vperm v10,v16,v17,v0 + addi rd,rd,128 + stvxl v7,0,rt + vperm v11,v17,v18,v0 + stvxl v8,c16,rt + stvxl v9,c32,rt + vperm v12,v18,v19,v0 + stvxl v10,c48,rt + addi rt,rd,64 + vperm v13,v19,v20,v0 + stvxl v11,0,rd + stvxl v12,c16,rd + vperm v14,v20,v21,v0 + stvxl v13,c32,rd + vperm v15,v21,v22,v0 + stvxl v14,c48,rd + vperm v16,v22,v23,v0 + addi rd,rd,128 + stvxl v15,0,rt + vperm v17,v23,v24,v0 + stvxl v16,c16,rt + vperm v18,v24,v1,v0 + stvxl v17,c32,rt + stvxl v18,c48,rt + bge++ LunalignedLoop // loop if another 384 bytes to go + +// End of unaligned main loop. Handle up to 384 leftover bytes. + + srwi. r0,rc,5 // get count of 32-byte chunks remaining + beq Ldone // none + rlwinm rc,rc,0,0x1F // mask count down to 0..31 leftover bytes + mtctr r0 +1: // loop over 32-byte chunks + lvx v2,c16,rs + lvx v3,c32,rs + addi rs,rs,32 + vperm v8,v1,v2,v0 + vperm v9,v2,v3,v0 + vor v1,v3,v3 // v1 <- v3 + stvx v8,0,rd + stvx v9,c16,rd + addi rd,rd,32 + bdnz 1b + + b Ldone + + +// Aligned loop. Destination is 128-byte aligned, and source is 16-byte +// aligned. Loop over 512-byte chunks (4 cache lines.) + + .align 5 +LalignedLoop: + subi rc,rc,512 // decrement count + addi rx,rs,512 // address of next chunk + lvxl v1,0,rs + lvxl v2,c16,rs + bne-- cr7,1f // skip dcbz's if cr7 beq has been turned off by kernel + dcbz128 0,rd // (also skip if moving down less than 512 bytes) + bne-- cr7,1f // catch it first time through + dcbz128 c128,rd + dcbz128 c256,rd + dcbz128 c384,rd +1: + addi rt,rs,64 + dcbt 0,rx // touch in next chunk + dcbt c128,rx + dcbt c256,rx + dcbt c384,rx + lvxl v3,c32,rs + lvxl v4,c48,rs + addi rs,rs,128 + lvxl v5,0,rt + cmplwi rc,512 // another chunk to go? + lvxl v6,c16,rt + lvxl v7,c32,rt + lvxl v8,c48,rt + addi rt,rs,64 + lvxl v9,0,rs + lvxl v10,c16,rs + lvxl v11,c32,rs + lvxl v12,c48,rs + addi rs,rs,128 + lvxl v13,0,rt + lvxl v14,c16,rt + lvxl v15,c32,rt + lvxl v16,c48,rt + addi rt,rs,64 + lvxl v17,0,rs + lvxl v18,c16,rs + lvxl v19,c32,rs + lvxl v20,c48,rs + addi rs,rs,128 + lvxl v21,0,rt + lvxl v22,c16,rt + lvxl v23,c32,rt + lvxl v24,c48,rt + addi rt,rs,64 + lvxl v25,0,rs + lvxl v26,c16,rs + lvxl v27,c32,rs + lvxl v28,c48,rs + addi rs,rs,128 + lvxl v29,0,rt + lvxl v30,c16,rt + lvxl v31,c32,rt + lvxl v0,c48,rt + + addi rt,rd,64 + stvxl v1,0,rd + stvxl v2,c16,rd + stvxl v3,c32,rd + stvxl v4,c48,rd + addi rd,rd,128 + stvxl v5,0,rt + stvxl v6,c16,rt + stvxl v7,c32,rt + stvxl v8,c48,rt + addi rt,rd,64 + stvxl v9,0,rd + stvxl v10,c16,rd + stvxl v11,c32,rd + stvxl v12,c48,rd + addi rd,rd,128 + stvxl v13,0,rt + stvxl v14,c16,rt + stvxl v15,c32,rt + stvxl v16,c48,rt + addi rt,rd,64 + stvxl v17,0,rd + stvxl v18,c16,rd + stvxl v19,c32,rd + stvxl v20,c48,rd + addi rd,rd,128 + stvxl v21,0,rt + stvxl v22,c16,rt + stvxl v23,c32,rt + stvxl v24,c48,rt + addi rt,rd,64 + stvxl v25,0,rd + stvxl v26,c16,rd + stvxl v27,c32,rd + stvxl v28,c48,rd + addi rd,rd,128 + stvxl v29,0,rt + stvxl v30,c16,rt + stvxl v31,c32,rt + stvxl v0,c48,rt + bge++ LalignedLoop // loop if another 512 bytes to go + +// End of aligned main loop. Handle up to 511 leftover bytes. + + srwi. r0,rc,5 // get count of 32-byte chunks remaining + beq Ldone // none + rlwinm rc,rc,0,0x1F // mask count down to 0..31 leftover bytes + mtctr r0 +1: // loop over 32-byte chunks + lvx v1,0,rs + lvx v2,c16,rs + addi rs,rs,32 + stvx v1,0,rd + stvx v2,c16,rd + addi rd,rd,32 + bdnz 1b + + +// Done, except for 0..31 leftovers at end. Restore non-volatiles. +// rs = source ptr +// rd = dest ptr +// rc = count (0..31) +// rv = caller's vrsave + +Ldone: + cmpwi rc,0 // any leftover bytes? + lwz r13,rzR13(r1) // restore non-volatiles from redzone + lwz r14,rzR14(r1) + lwz r15,rzR15(r1) + li r0,rzV20 + lvx v20,r1,r0 + li r0,rzV21 + lvx v21,r1,r0 + li r0,rzV22 + lvx v22,r1,r0 + li r0,rzV23 + lvx v23,r1,r0 + li r0,rzV24 + lvx v24,r1,r0 + li r0,rzV25 + lvx v25,r1,r0 + li r0,rzV26 + lvx v26,r1,r0 + li r0,rzV27 + lvx v27,r1,r0 + li r0,rzV28 + lvx v28,r1,r0 + li r0,rzV29 + lvx v29,r1,r0 + li r0,rzV30 + lvx v30,r1,r0 + li r0,rzV31 + lvx v31,r1,r0 + mtspr vrsave,rv // restore caller's bitmask + beqlr // done if no leftover bytes + + +// Handle 1..31 leftover bytes at end. + + mtctr rc // set up loop count + b 1f + + .align 5 +1: + lbz r0,0(rs) + addi rs,rs,1 + stb r0,0(rd) + addi rd,rd,1 + bdnz 1b + + blr + + + COMMPAGE_DESCRIPTOR(bigcopy_970,_COMM_PAGE_BIGCOPY,0,0,0) // load on all machines for now + diff --git a/osfmk/ppc/commpage/bzero_128.s b/osfmk/ppc/commpage/bzero_128.s new file mode 100644 index 000000000..f97db603d --- /dev/null +++ b/osfmk/ppc/commpage/bzero_128.s @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define ASSEMBLER +#include +#include +#include +#include + + .text + .align 2 + .globl EXT(bzero_128) + + +// ********************* +// * B Z E R O _ 1 2 8 * +// ********************* +// +// For 64-bit processors with a 128-byte cache line. +// +// Register use: +// r0 = zero +// r3 = original ptr, not changed since memset returns it +// r4 = count of bytes to set +// r9 = working operand ptr +// We do not touch r2 and r10-r12, which some callers depend on. + + .align 5 +bzero_128: // void bzero(void *b, size_t len); + cmplwi cr7,r4,128 // too short for DCBZ128? + li r0,0 // get a 0 + neg r5,r3 // start to compute #bytes to align + mr r9,r3 // make copy of operand ptr (can't change r3) + blt cr7,Ltail // length < 128, too short for DCBZ + +// At least 128 bytes long, so compute alignment and #cache blocks. + + andi. r5,r5,0x7F // r5 <- #bytes to 128-byte align + sub r4,r4,r5 // adjust length + srwi r8,r4,7 // r8 <- 128-byte chunks + rlwinm r4,r4,0,0x7F // mask length down to remaining bytes + mtctr r8 // set up loop count + beq Ldcbz // skip if already aligned (r8!=0) + +// 128-byte align + + mtcrf 0x01,r5 // start to move #bytes to align to cr6 and cr7 + cmpwi cr1,r8,0 // any 128-byte cache lines to 0? + mtcrf 0x02,r5 + + bf 31,1f // byte? + stb r0,0(r9) + addi r9,r9,1 +1: + bf 30,2f // halfword? + sth r0,0(r9) + addi r9,r9,2 +2: + bf 29,3f // word? + stw r0,0(r9) + addi r9,r9,4 +3: + bf 28,4f // doubleword? + std r0,0(r9) + addi r9,r9,8 +4: + bf 27,5f // quadword? + std r0,0(r9) + std r0,8(r9) + addi r9,r9,16 +5: + bf 26,6f // 32-byte chunk? + std r0,0(r9) + std r0,8(r9) + std r0,16(r9) + std r0,24(r9) + addi r9,r9,32 +6: + bf 25,7f // 64-byte chunk? + std r0,0(r9) + std r0,8(r9) + std r0,16(r9) + std r0,24(r9) + std r0,32(r9) + std r0,40(r9) + std r0,48(r9) + std r0,56(r9) + addi r9,r9,64 +7: + beq cr1,Ltail // no chunks to dcbz128 + +// Loop doing 128-byte version of DCBZ instruction. +// NB: if the memory is cache-inhibited, the kernel will clear cr7 +// when it emulates the alignment exception. Eventually, we may want +// to check for this case. + +Ldcbz: + dcbz128 0,r9 // zero another 32 bytes + addi r9,r9,128 + bdnz Ldcbz + +// Store trailing bytes. +// r0 = 0 +// r4 = count +// r9 = ptr + +Ltail: + srwi. r5,r4,4 // r5 <- 16-byte chunks to 0 + mtcrf 0x01,r4 // remaining byte count to cr7 + mtctr r5 + beq 2f // skip if no 16-byte chunks +1: // loop over 16-byte chunks + std r0,0(r9) + std r0,8(r9) + addi r9,r9,16 + bdnz 1b +2: + bf 28,4f // 8-byte chunk? + std r0,0(r9) + addi r9,r9,8 +4: + bf 29,5f // word? + stw r0,0(r9) + addi r9,r9,4 +5: + bf 30,6f // halfword? + sth r0,0(r9) + addi r9,r9,2 +6: + bflr 31 // byte? + stb r0,0(r9) + blr + + COMMPAGE_DESCRIPTOR(bzero_128,_COMM_PAGE_BZERO,kCache128+k64Bit,0,kCommPageMTCRF) diff --git a/osfmk/ppc/commpage/bzero_32.s b/osfmk/ppc/commpage/bzero_32.s new file mode 100644 index 000000000..820120061 --- /dev/null +++ b/osfmk/ppc/commpage/bzero_32.s @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define ASSEMBLER +#include +#include +#include +#include + + .text + .align 2 + .globl EXT(bzero_32) + + +// ******************* +// * B Z E R O _ 3 2 * +// ******************* +// +// For 32-bit processors with a 32-byte cache line. +// +// Register use: +// r0 = zero +// r3 = original ptr, not changed since memset returns it +// r4 = count of bytes to set +// r9 = working operand ptr +// We do not touch r2 and r10-r12, which some callers depend on. + + .align 5 +bzero_32: // void bzero(void *b, size_t len); + cmplwi cr7,r4,32 // too short for DCBZ? + li r0,0 // get a 0 + neg r5,r3 // start to compute #bytes to align + mr r9,r3 // make copy of operand ptr (can't change r3) + blt cr7,Ltail // length < 32, too short for DCBZ + +// At least 32 bytes long, so compute alignment and #cache blocks. + + andi. r5,r5,0x1F // r5 <- #bytes to 32-byte align + sub r4,r4,r5 // adjust length + srwi r8,r4,5 // r8 <- #32-byte chunks + cmpwi cr1,r8,0 // any chunks? + mtctr r8 // set up loop count + beq 1f // skip if already 32-byte aligned (r8!=0) + +// 32-byte align. We just store 32 0s, rather than test and use conditional +// branches. We've already stored the first few bytes above. + + stw r0,0(r9) + stw r0,4(r9) + stw r0,8(r9) + stw r0,12(r9) + stw r0,16(r9) + stw r0,20(r9) + stw r0,24(r9) + stw r0,28(r9) + add r9,r9,r5 // now rp is 32-byte aligned + beq cr1,Ltail // skip if no 32-byte chunks + +// Loop doing 32-byte version of DCBZ instruction. +// NB: we take alignment exceptions on cache-inhibited memory. +// The kernel could be changed to zero cr7 when emulating a +// dcbz (as it does on 64-bit processors), so we could avoid all +// but the first. + +1: + andi. r5,r4,0x1F // will there be trailing bytes? + b 2f + .align 4 +2: + dcbz 0,r9 // zero another 32 bytes + addi r9,r9,32 + bdnz 2b + + beqlr // no trailing bytes + +// Store trailing bytes. + +Ltail: + andi. r5,r4,0x10 // test bit 27 separately + mtcrf 0x01,r4 // remaining byte count to cr7 + + beq 2f // no 16-byte chunks + stw r0,0(r9) + stw r0,4(r9) + stw r0,8(r9) + stw r0,12(r9) + addi r9,r9,16 +2: + bf 28,4f // 8-byte chunk? + stw r0,0(r9) + stw r0,4(r9) + addi r9,r9,8 +4: + bf 29,5f // word? + stw r0,0(r9) + addi r9,r9,4 +5: + bf 30,6f // halfword? + sth r0,0(r9) + addi r9,r9,2 +6: + bflr 31 // byte? + stb r0,0(r9) + blr + + COMMPAGE_DESCRIPTOR(bzero_32,_COMM_PAGE_BZERO,kCache32,0,0) diff --git a/osfmk/ppc/commpage/cacheflush.s b/osfmk/ppc/commpage/cacheflush.s new file mode 100644 index 000000000..a556ad6c2 --- /dev/null +++ b/osfmk/ppc/commpage/cacheflush.s @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define ASSEMBLER +#include +#include // EXT, LEXT +#include +#include + + .text + .align 2 + .globl EXT(commpage_flush_dcache) + .globl EXT(commpage_flush_icache) + + +// ********************************************* +// * C O M M P A G E _ F L U S H _ D C A C H E * +// ********************************************* +// +// r3 = ptr to 1st byte to flush +// r4 = length to flush (may be 0) + +commpage_flush_dcache: + cmpwi r4,0 // length 0? + lhz r5,_COMM_PAGE_CACHE_LINESIZE(0) + subi r9,r5,1 // get (linesize-1) + and r0,r3,r9 // get offset within line of 1st byte + add r4,r4,r0 // adjust length so we flush them all + add r4,r4,r9 // round length up... + andc r4,r4,r9 // ...to multiple of cache lines + beqlr-- // length was 0, so exit +1: + sub. r4,r4,r5 // more to go? + dcbf 0,r3 // flush another line + add r3,r3,r5 + bne 1b + sync // make sure lines are flushed before we return + blr + + COMMPAGE_DESCRIPTOR(commpage_flush_dcache,_COMM_PAGE_FLUSH_DCACHE,0,0,0) // matches all CPUs + + +// ********************************************* +// * C O M M P A G E _ F L U S H _ I C A C H E * +// ********************************************* +// +// r3 = ptr to 1st byte to flush +// r4 = length to flush (may be 0) + +commpage_flush_icache: + cmpwi r4,0 // length 0? + lhz r5,_COMM_PAGE_CACHE_LINESIZE(0) + subi r9,r5,1 // get (linesize-1) + and r0,r3,r9 // get offset within line of 1st byte + add r4,r4,r0 // adjust length so we flush them all + mr r7,r3 // copy ptr + add r4,r4,r9 // round length up... + andc r4,r4,r9 // ...to multiple of cache lines + mr r6,r4 // copy length + beqlr-- // length was 0, so exit +1: + sub. r4,r4,r5 // more to go? + dcbf 0,r3 // flush another line + add r3,r3,r5 + bne 1b + sync // make sure lines are flushed +2: + sub. r6,r6,r5 // more to go? + icbi 0,r7 + add r7,r7,r5 + bne 2b + isync // make sure we haven't prefetched old instructions + + blr + + COMMPAGE_DESCRIPTOR(commpage_flush_icache,_COMM_PAGE_FLUSH_ICACHE,0,0,0) // matches all CPUs + + diff --git a/osfmk/ppc/commpage/commpage.c b/osfmk/ppc/commpage/commpage.c index 907ae701d..e61140246 100644 --- a/osfmk/ppc/commpage/commpage.c +++ b/osfmk/ppc/commpage/commpage.c @@ -24,9 +24,22 @@ */ /* - * This is a simplifed version of the commpage support from 10.3. - * The supported feature is the tuning of _cpu_capabilities. - * There is no shared page for user processes. + * Here's what to do if you want to add a new routine to the comm page: + * + * 1. Add a definition for it's address in osfmk/ppc/cpu_capabilities.h, + * being careful to reserve room for future expansion. + * + * 2. Write one or more versions of the routine, each with it's own + * commpage_descriptor. The tricky part is getting the "special", + * "musthave", and "canthave" fields right, so that exactly one + * version of the routine is selected for every machine. + * The source files should be in osfmk/ppc/commpage/. + * + * 3. Add a ptr to your new commpage_descriptor(s) in the "routines" + * array in commpage_populate(). Of course, you'll also have to + * declare them "extern" in commpage_populate(). + * + * 4. Write the code in Libc to use the new routine. */ #include @@ -35,9 +48,80 @@ #include #include #include +#include +#include +#include + +static char *next = NULL; // next available byte in comm page +static int cur_routine = 0; // comm page address of "current" routine +static int matched; // true if we've found a match for "current" routine int _cpu_capabilities = 0; // define the capability vector +char *commPagePtr = NULL; // virtual address of comm page in kernel map + + +/* Allocate the commpages and add to the shared submap created by vm: + * 1. allocate pages in the kernel map (RW) + * 2. wire them down + * 3. make a memory entry out of them + * 4. map that entry into the shared comm region map (R-only) + */ +static void* +commpage_allocate( void ) +{ + extern vm_map_t com_region_map; // the shared submap, set up in vm init + vm_offset_t kernel_addr; // address of commpage in kernel map + vm_offset_t zero = 0; + vm_size_t size = _COMM_PAGE_AREA_USED; // size actually populated + ipc_port_t handle; + + if (com_region_map == NULL) + panic("commpage map is null"); + + if (vm_allocate(kernel_map,&kernel_addr,_COMM_PAGE_AREA_USED,VM_FLAGS_ANYWHERE)) + panic("cannot allocate commpage"); + + if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+_COMM_PAGE_AREA_USED,VM_PROT_DEFAULT,FALSE)) + panic("cannot wire commpage"); + + if (mach_make_memory_entry( kernel_map, // target map + &size, // size + kernel_addr, // offset (address in kernel map) + VM_PROT_DEFAULT, // map it RW + &handle, // this is the object handle we get + NULL )) // parent_entry + panic("cannot make entry for commpage"); + + if (vm_map_64( com_region_map, // target map (shared submap) + &zero, // address (map into 1st page in submap) + _COMM_PAGE_AREA_USED, // size + 0, // mask + VM_FLAGS_FIXED, // flags (it must be 1st page in submap) + handle, // port is the memory entry we just made + 0, // offset (map 1st page in memory entry) + FALSE, // copy + VM_PROT_READ, // cur_protection (R-only in user map) + VM_PROT_READ, // max_protection + VM_INHERIT_SHARE )) // inheritance + panic("cannot map commpage"); + + ipc_port_release(handle); + + return (void*) kernel_addr; // return address in kernel map +} + + +/* Get address (in kernel map) of a commpage field. */ + +static void* +commpage_addr_of( + int addr_at_runtime ) +{ + return (void*) (commPagePtr + addr_at_runtime - _COMM_PAGE_BASE_ADDRESS); +} + + /* Determine number of CPUs on this system. We cannot rely on * machine_info.max_cpus this early in the boot. */ @@ -86,6 +170,186 @@ commpage_init_cpu_capabilities( void ) } +/* Copy data into commpage. */ + + void +commpage_stuff( + int address, + void *source, + int length ) +{ + char *dest = commpage_addr_of(address); + + if (dest < next) + panic("commpage overlap: %08 - %08X", dest, next); + + bcopy((char*)source,dest,length); + + next = (dest + length); +} + + +/* Modify commpage code in-place for this specific platform. */ + +static void +commpage_change( + uint32_t *ptr, + int bytes, + uint32_t search_mask, + uint32_t search_pattern, + uint32_t new_mask, + uint32_t new_pattern, + int (*check)(uint32_t instruction) ) +{ + int words = bytes >> 2; + uint32_t word; + int found_one = 0; + + while( (--words) >= 0 ) { + word = *ptr; + if ((word & search_mask)==search_pattern) { + if ((check==NULL) || (check(word))) { // check instruction if necessary + found_one = 1; + word &= ~new_mask; + word |= new_pattern; + *ptr = word; + } + } + ptr++; + } + + if (!found_one) + panic("commpage opcode not found"); +} + + +/* Check to see if exactly one bit is set in a MTCRF instruction's FXM field. + */ +static int +commpage_onebit( + uint32_t mtcrf ) +{ + int x = (mtcrf >> 12) & 0xFF; // isolate the FXM field of the MTCRF + + if (x==0) + panic("commpage bad mtcrf"); + + return (x & (x-1))==0 ? 1 : 0; // return 1 iff exactly 1 bit set in FXM field +} + + +/* Handle kCommPageDCBA bit: this routine uses DCBA. If the machine we're + * running on doesn't benefit from use of that instruction, map them to NOPs + * in the commpage. + */ +static void +commpage_handle_dcbas( + int address, + int length ) +{ + uint32_t *ptr, search_mask, search, replace_mask, replace; + + if ((_cpu_capabilities & kDcbaAvailable) == 0) { + ptr = commpage_addr_of(address); + + search_mask = 0xFC0007FE; // search x-form opcode bits + search = 0x7C0005EC; // for a DCBA + replace_mask = 0xFFFFFFFF; // replace all bits... + replace = 0x60000000; // ...with a NOP + + commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL); + } +} + + +/* Handle kCommPageSYNC bit: this routine uses SYNC or LWSYNC. If we're + * running on a UP machine, map them to NOPs. + */ +static void +commpage_handle_syncs( + int address, + int length ) +{ + uint32_t *ptr, search_mask, search, replace_mask, replace; + + if (_NumCPUs() == 1) { + ptr = commpage_addr_of(address); + + search_mask = 0xFC0007FE; // search x-form opcode bits + search = 0x7C0004AC; // for a SYNC or LWSYNC + replace_mask = 0xFFFFFFFF; // replace all bits... + replace = 0x60000000; // ...with a NOP + + commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL); + } +} + + +/* Handle kCommPageMTCRF bit. When this was written (3/03), the assembler did not + * recognize the special form of MTCRF instructions, in which exactly one bit is set + * in the 8-bit mask field. Bit 11 of the instruction should be set in this case, + * since the 970 and probably other 64-bit processors optimize it. Once the assembler + * has been updated this code can be removed, though it need not be. + */ +static void +commpage_handle_mtcrfs( + int address, + int length ) +{ + uint32_t *ptr, search_mask, search, replace_mask, replace; + + if (_cpu_capabilities & k64Bit) { + ptr = commpage_addr_of(address); + + search_mask = 0xFC0007FE; // search x-form opcode bits + search = 0x7C000120; // for a MTCRF + replace_mask = 0x00100000; // replace bit 11... + replace = 0x00100000; // ...with a 1-bit + + commpage_change(ptr,length,search_mask,search,replace_mask,replace,commpage_onebit); + } +} + + +/* Copy a routine into comm page if it matches running machine. + */ +static void +commpage_stuff_routine( + commpage_descriptor *rd ) +{ + char *routine_code; + int must,cant; + + if (rd->commpage_address != cur_routine) { + if ((cur_routine!=0) && (matched==0)) + panic("commpage no match"); + cur_routine = rd->commpage_address; + matched = 0; + } + + must = _cpu_capabilities & rd->musthave; + cant = _cpu_capabilities & rd->canthave; + + if ((must == rd->musthave) && (cant == 0)) { + if (matched) + panic("commpage duplicate matches"); + matched = 1; + routine_code = ((char*)rd) + rd->code_offset; + + commpage_stuff(rd->commpage_address,routine_code,rd->code_length); + + if (rd->special & kCommPageDCBA) + commpage_handle_dcbas(rd->commpage_address,rd->code_length); + + if (rd->special & kCommPageSYNC) + commpage_handle_syncs(rd->commpage_address,rd->code_length); + + if (rd->special & kCommPageMTCRF) + commpage_handle_mtcrfs(rd->commpage_address,rd->code_length); + } +} + + /* Fill in commpage: called once, during kernel initialization, from the * startup thread before user-mode code is running. * See the top of this file for a list of what you have to do to add @@ -94,5 +358,136 @@ commpage_init_cpu_capabilities( void ) void commpage_populate( void ) { + char c1; + short c2; + addr64_t c8; + static double two52 = 1048576.0 * 1048576.0 * 4096.0; // 2**52 + static double ten6 = 1000000.0; // 10**6 + commpage_descriptor **rd; + short version = _COMM_PAGE_THIS_VERSION; + + + commPagePtr = (char*) commpage_allocate(); + commpage_init_cpu_capabilities(); + + + /* Stuff in the constants. We move things into the comm page in strictly + * ascending order, so we can check for overlap and panic if so. + */ + + commpage_stuff(_COMM_PAGE_VERSION,&version,2); + + commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int)); + + c1 = (_cpu_capabilities & kHasAltivec) ? -1 : 0; + commpage_stuff(_COMM_PAGE_ALTIVEC,&c1,1); + + c1 = (_cpu_capabilities & k64Bit) ? -1 : 0; + commpage_stuff(_COMM_PAGE_64_BIT,&c1,1); + + if (_cpu_capabilities & kCache32) + c2 = 32; + else if (_cpu_capabilities & kCache64) + c2 = 64; + else if (_cpu_capabilities & kCache128) + c2 = 128; + commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2); + + commpage_stuff(_COMM_PAGE_2_TO_52,&two52,8); + + commpage_stuff(_COMM_PAGE_10_TO_6,&ten6,8); + + c8 = 0; // 0 timestamp means "disabled" + commpage_stuff(_COMM_PAGE_TIMEBASE,&c8,8); + commpage_stuff(_COMM_PAGE_TIMESTAMP,&c8,8); + commpage_stuff(_COMM_PAGE_SEC_PER_TICK,&c8,8); + + + /* Now the routines. We try each potential routine in turn, + * and copy in any that "match" the platform we are running on. + * We require that exactly one routine match for each slot in the + * comm page, and panic if not. + * + * The check for overlap assumes that these routines are + * in strictly ascending order, sorted by address in the + * comm page. + */ + + extern commpage_descriptor mach_absolute_time_32; + extern commpage_descriptor mach_absolute_time_64; + extern commpage_descriptor spinlock_32_try_mp; + extern commpage_descriptor spinlock_32_try_up; + extern commpage_descriptor spinlock_64_try_mp; + extern commpage_descriptor spinlock_64_try_up; + extern commpage_descriptor spinlock_32_lock_mp; + extern commpage_descriptor spinlock_32_lock_up; + extern commpage_descriptor spinlock_64_lock_mp; + extern commpage_descriptor spinlock_64_lock_up; + extern commpage_descriptor spinlock_32_unlock_mp; + extern commpage_descriptor spinlock_32_unlock_up; + extern commpage_descriptor spinlock_64_unlock_mp; + extern commpage_descriptor spinlock_64_unlock_up; + extern commpage_descriptor pthread_getspecific_sprg3; + extern commpage_descriptor pthread_getspecific_uftrap; + extern commpage_descriptor gettimeofday_32; + extern commpage_descriptor gettimeofday_64; + extern commpage_descriptor commpage_flush_dcache; + extern commpage_descriptor commpage_flush_icache; + extern commpage_descriptor pthread_self_sprg3; + extern commpage_descriptor pthread_self_uftrap; + extern commpage_descriptor spinlock_relinquish; + extern commpage_descriptor bzero_32; + extern commpage_descriptor bzero_128; + extern commpage_descriptor bcopy_g3; + extern commpage_descriptor bcopy_g4; + extern commpage_descriptor bcopy_970; + extern commpage_descriptor bcopy_64; + extern commpage_descriptor bigcopy_970; + + static commpage_descriptor *routines[] = { + &mach_absolute_time_32, + &mach_absolute_time_64, + &spinlock_32_try_mp, + &spinlock_32_try_up, + &spinlock_64_try_mp, + &spinlock_64_try_up, + &spinlock_32_lock_mp, + &spinlock_32_lock_up, + &spinlock_64_lock_mp, + &spinlock_64_lock_up, + &spinlock_32_unlock_mp, + &spinlock_32_unlock_up, + &spinlock_64_unlock_mp, + &spinlock_64_unlock_up, + &pthread_getspecific_sprg3, + &pthread_getspecific_uftrap, + &gettimeofday_32, + &gettimeofday_64, + &commpage_flush_dcache, + &commpage_flush_icache, + &pthread_self_sprg3, + &pthread_self_uftrap, + &spinlock_relinquish, + &bzero_32, + &bzero_128, + &bcopy_g3, + &bcopy_g4, + &bcopy_970, + &bcopy_64, + &bigcopy_970, + NULL }; + + for( rd = routines; *rd != NULL ; rd++ ) + commpage_stuff_routine(*rd); + + if (!matched) + panic("commpage no match on last routine"); + + if (next > (commPagePtr + _COMM_PAGE_AREA_USED)) + panic("commpage overflow"); + + sync_cache_virtual((vm_offset_t) commPagePtr,_COMM_PAGE_AREA_USED); // make all that new code executable + } + diff --git a/osfmk/ppc/commpage/commpage_asm.s b/osfmk/ppc/commpage/commpage_asm.s index 5ec82596b..3c30b32a5 100644 --- a/osfmk/ppc/commpage/commpage_asm.s +++ b/osfmk/ppc/commpage/commpage_asm.s @@ -37,6 +37,26 @@ #define kLoopCnt 5 // Iterations of the timing loop #define kDCBA 22 // Bit in cr5 used as a flag in timing loop + +// commpage_set_timestamp() uses the red zone for temporary storage: + +#define rzSaveF1 -8 // caller's FPR1 +#define rzSaveF2 -16 // caller's FPR2 +#define rzSaveF3 -24 // caller's FPR3 +#define rzSaveF4 -32 // caller's FPR4 +#define rzSaveF5 -40 // caller's FPR5 +#define rzNewTimeBase -48 // used to load 64-bit TBR into a FPR + + +// commpage_set_timestamp() uses the following data. kkTicksPerSec remembers +// the number used to compute _COMM_PAGE_SEC_PER_TICK. Since this constant +// rarely changes, we use it to avoid needless recomputation. It is a double +// value, pre-initialize with an exponent of 2**52. + +#define kkBinary0 0 // offset in data to long long 0 (a constant) +#define kkDouble1 8 // offset in data to double 1.0 (a constant) +#define kkTicksPerSec 16 // offset in data to double(ticks_per_sec) + .data .align 3 // three doubleword fields Ldata: @@ -49,6 +69,102 @@ Ldata: .text .align 2 .globl EXT(commpage_time_dcba) + .globl EXT(commpage_set_timestamp) + + +/* *********************************************** + * * C O M M P A G E _ S E T _ T I M E S T A M P * + * *********************************************** + * + * Update the gettimeofday() shared data on the commpage, as follows: + * _COMM_PAGE_TIMESTAMP = a BSD-style pair of uint_32's for secs and usecs + * _COMM_PAGE_TIMEBASE = the timebase at which the timestamp was valid + * _COMM_PAGE_SEC_PER_TICK = multiply timebase ticks by this to get seconds (double) + * The convention is that if the timebase is 0, the data is invalid. Because other + * CPUs are reading the three values asynchronously and must get a consistent set, + * it is critical that we update them with the following protocol: + * 1. set timebase to 0 (atomically), to invalidate all three values + * 2. eieio (to create a barrier in stores to cacheable memory) + * 3. change timestamp and "secs per tick" + * 4. eieio + * 5. set timebase nonzero (atomically) + * This works because readers read the timebase, then the timestamp and divisor, sync + * if MP, then read the timebase a second time and check to be sure it is equal to the first. + * + * We could save a few cycles on 64-bit machines by special casing them, but it probably + * isn't necessary because this routine shouldn't be called very often. + * + * When called: + * r3 = upper half of timebase (timebase is disabled if 0) + * r4 = lower half of timebase + * r5 = seconds part of timestamp + * r6 = useconds part of timestamp + * r7 = divisor (ie, timebase ticks per sec) + * We set up: + * r8 = ptr to our static data (kkBinary0, kkDouble1, kkTicksPerSec) + * r9 = ptr to comm page in kernel map + * + * --> Interrupts must be disabled and rtclock locked when called. <-- + */ + + .align 5 +LEXT(commpage_set_timestamp) // void commpage_set_timestamp(tbr,secs,usecs,divisor) + mfmsr r11 // get MSR + ori r2,r11,MASK(MSR_FP) // turn FP on + mtmsr r2 + isync // wait until MSR changes take effect + + or. r0,r3,r4 // is timebase 0? (thus disabled) + lis r8,hi16(Ldata) // point to our data + lis r9,ha16(EXT(commPagePtr)) // get ptr to address of commpage in kernel map + stfd f1,rzSaveF1(r1) // save a FPR in the red zone + ori r8,r8,lo16(Ldata) + lwz r9,lo16(EXT(commPagePtr))(r9) // r9 <- commPagePtr + lfd f1,kkBinary0(r8) // get fixed 0s + li r0,_COMM_PAGE_BASE_ADDRESS // get va in user space of commpage + cmpwi cr1,r9,0 // is commpage allocated yet? + sub r9,r9,r0 // r9 <- commpage address, biased by user va + beq-- cr1,3f // skip if not allocated + stfd f1,_COMM_PAGE_TIMEBASE(r9) // turn off the timestamp (atomically) + eieio // make sure all CPUs see it is off + beq 3f // all we had to do is turn off timestamp + + lwz r0,kkTicksPerSec+4(r8) // get last ticks_per_sec (or 0 if first) + stw r3,rzNewTimeBase(r1) // store new timebase so we can lfd + stw r4,rzNewTimeBase+4(r1) + cmpw r0,r7 // do we need to recompute _COMM_PAGE_SEC_PER_TICK? + stw r5,_COMM_PAGE_TIMESTAMP(r9) // store the new timestamp + stw r6,_COMM_PAGE_TIMESTAMP+4(r9) + lfd f1,rzNewTimeBase(r1) // get timebase in a FPR so we can store atomically + beq++ 2f // same ticks_per_sec, no need to recompute + + stw r7,kkTicksPerSec+4(r8) // must recompute SEC_PER_TICK + stfd f2,rzSaveF2(r1) // we'll need a few more temp FPRs + stfd f3,rzSaveF3(r1) + stfd f4,rzSaveF4(r1) + stfd f5,rzSaveF5(r1) + lfd f2,_COMM_PAGE_2_TO_52(r9) // f2 <- double(2**52) + lfd f3,kkTicksPerSec(r8) // float new ticks_per_sec + 2**52 + lfd f4,kkDouble1(r8) // f4 <- double(1.0) + mffs f5 // save caller's FPSCR + mtfsfi 7,0 // clear Inexeact Exception bit, set round-to-nearest + fsub f3,f3,f2 // get ticks_per_sec + fdiv f3,f4,f3 // divide 1 by ticks_per_sec to get SEC_PER_TICK + stfd f3,_COMM_PAGE_SEC_PER_TICK(r9) + mtfsf 0xFF,f5 // restore FPSCR + lfd f2,rzSaveF2(r1) // restore FPRs + lfd f3,rzSaveF3(r1) + lfd f4,rzSaveF4(r1) + lfd f5,rzSaveF5(r1) +2: // f1 == new timestamp + eieio // wait until the stores take + stfd f1,_COMM_PAGE_TIMEBASE(r9) // then turn the timestamp back on (atomically) +3: // here once all fields updated + lfd f1,rzSaveF1(r1) // restore last FPR + mtmsr r11 // turn FP back off + isync + blr + /* *************************************** * * C O M M P A G E _ T I M E _ D C B A * diff --git a/osfmk/ppc/commpage/gettimeofday.s b/osfmk/ppc/commpage/gettimeofday.s new file mode 100644 index 000000000..f16855913 --- /dev/null +++ b/osfmk/ppc/commpage/gettimeofday.s @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define ASSEMBLER +#include +#include // EXT, LEXT +#include +#include + +#define USEC_PER_SEC 1000000 + + +/* The red zone is used to move data between GPRs and FPRs: */ + +#define rzTicks -8 // elapsed ticks since timestamp (double) +#define rzSeconds -16 // seconds since timestamp (double) +#define rzUSeconds -24 // useconds since timestamp (double) + + + .text + .align 2 + .globl EXT(gettimeofday_32) + .globl EXT(gettimeofday_64) + + +// ********************************* +// * G E T T I M E O F D A Y _ 3 2 * +// ********************************* +// +// This is a subroutine of gettimeofday.c that gets the seconds and microseconds +// in user mode, usually without having to make a system call. We do not deal with +// the timezone. The kernel maintains the following values in the comm page: +// +// _COMM_PAGE_TIMESTAMP = a BSD-style pair of uint_32's for seconds and microseconds +// +// _COMM_PAGE_TIMEBASE = the timebase at which the timestamp was valid +// +// _COMM_PAGE_SEC_PER_TICK = multiply timebase ticks by this to get seconds (double) +// +// _COMM_PAGE_2_TO_52 = double precision constant 2**52 +// +// _COMM_PAGE_10_TO_6 = double precision constant 10**6 +// +// We have to be careful to read these values atomically. The kernel updates them +// asynchronously to account for drift or time changes (eg, ntp.) We adopt the +// convention that (timebase==0) means the timestamp is invalid, in which case we +// return a bad status so our caller can make the system call. +// +// r3 = ptr to user's timeval structure (should not be null) + +gettimeofday_32: // int gettimeofday_32(timeval *tp); +0: + lwz r5,_COMM_PAGE_TIMEBASE+0(0) // r5,r6 = TBR at timestamp + lwz r6,_COMM_PAGE_TIMEBASE+4(0) + lwz r7,_COMM_PAGE_TIMESTAMP+0(0) // r7 = timestamp seconds + lwz r8,_COMM_PAGE_TIMESTAMP+4(0) // r8 = timestamp microseconds + lfd f1,_COMM_PAGE_SEC_PER_TICK(0) +1: + mftbu r10 // r10,r11 = current timebase + mftb r11 + mftbu r12 + cmplw r10,r12 + bne- 1b + or. r0,r5,r6 // timebase 0? (ie, is timestamp invalid?) + + sync // create a barrier (patched to NOP if UP) + + lwz r0,_COMM_PAGE_TIMEBASE+0(0) // then load data a 2nd time + lwz r12,_COMM_PAGE_TIMEBASE+4(0) + lwz r2,_COMM_PAGE_TIMESTAMP+0(0) + lwz r9,_COMM_PAGE_TIMESTAMP+4(0) + cmplw cr6,r5,r0 // did we read a consistent set? + cmplw cr7,r6,r12 + beq- 3f // timestamp is disabled so return bad status + cmplw cr1,r2,r7 + cmplw cr5,r9,r8 + crand cr0_eq,cr6_eq,cr7_eq + crand cr1_eq,cr1_eq,cr5_eq + crand cr0_eq,cr0_eq,cr1_eq + bne- 0b // loop until we have a consistent set of data + + subfc r11,r6,r11 // compute ticks since timestamp + lwz r9,_COMM_PAGE_2_TO_52(0) // get exponent for (2**52) + subfe r10,r5,r10 // complete 64-bit subtract + lfd f2,_COMM_PAGE_2_TO_52(0) // f3 <- (2**52) + srwi. r0,r10,2 // if more than 2**34 ticks have elapsed... + stw r11,rzTicks+4(r1) // store elapsed ticks into red zone + or r10,r10,r9 // convert long-long in (r10,r11) into double + bne- 3f // ...call kernel to reprime timestamp + + stw r10,rzTicks(r1) // complete double + lis r12,hi16(USEC_PER_SEC) + ori r12,r12,lo16(USEC_PER_SEC) + + lfd f3,rzTicks(r1) // get elapsed ticks since timestamp + 2**52 + fsub f4,f3,f2 // subtract 2**52 and normalize + fmul f5,f4,f1 // f5 <- elapsed seconds since timestamp + lfd f3,_COMM_PAGE_10_TO_6(0) // get 10**6 + fctiwz f6,f5 // convert to integer + stfd f6,rzSeconds(r1) // store integer seconds into red zone + stw r9,rzSeconds(r1) // prepare to reload as floating pt + lfd f6,rzSeconds(r1) // get seconds + 2**52 + fsub f6,f6,f2 // f6 <- integral seconds + fsub f6,f5,f6 // f6 <- fractional part of elapsed seconds + fmul f6,f6,f3 // f6 <- fractional elapsed useconds + fctiwz f6,f6 // convert useconds to integer + stfd f6,rzUSeconds(r1) // store useconds into red zone + + lwz r5,rzSeconds+4(r1) // r5 <- seconds since timestamp + lwz r6,rzUSeconds+4(r1) // r6 <- useconds since timestamp + add r7,r7,r5 // add elapsed seconds to timestamp seconds + add r8,r8,r6 // ditto useconds + + cmplw r8,r12 // r8 >= USEC_PER_SEC ? + blt 2f // no + addi r7,r7,1 // add 1 to secs + sub r8,r8,r12 // subtract USEC_PER_SEC from usecs +2: + stw r7,0(r3) // store secs//usecs into user's timeval + stw r8,4(r3) + li r3,0 // return success + blr +3: // too long since last timestamp or this code is disabled + li r3,1 // return bad status so our caller will make syscall + blr + + COMMPAGE_DESCRIPTOR(gettimeofday_32,_COMM_PAGE_GETTIMEOFDAY,0,k64Bit,kCommPageSYNC) + + +// ********************************* +// * G E T T I M E O F D A Y _ 6 4 * +// ********************************* + +gettimeofday_64: // int gettimeofday_64(timeval *tp); +0: + ld r6,_COMM_PAGE_TIMEBASE(0) // r6 = TBR at timestamp + ld r8,_COMM_PAGE_TIMESTAMP(0) // r8 = timestamp (seconds,useconds) + lfd f1,_COMM_PAGE_SEC_PER_TICK(0) + mftb r10 // r10 = get current timebase + lwsync // create a barrier if MP (patched to NOP if UP) + ld r11,_COMM_PAGE_TIMEBASE(0) // then get data a 2nd time + ld r12,_COMM_PAGE_TIMESTAMP(0) + cmpdi cr1,r6,0 // is the timestamp disabled? + cmpld cr6,r6,r11 // did we read a consistent set? + cmpld cr7,r8,r12 + beq-- cr1,3f // exit if timestamp disabled + crand cr6_eq,cr7_eq,cr6_eq + sub r11,r10,r6 // compute elapsed ticks from timestamp + bne-- cr6,0b // loop until we have a consistent set of data + + srdi. r0,r11,35 // has it been more than 2**35 ticks since last timestamp? + std r11,rzTicks(r1) // put ticks in redzone where we can "lfd" it + bne-- 3f // timestamp too old, so reprime + + lfd f3,rzTicks(r1) // get elapsed ticks since timestamp (fixed pt) + fcfid f4,f3 // float the tick count + fmul f5,f4,f1 // f5 <- elapsed seconds since timestamp + lfd f3,_COMM_PAGE_10_TO_6(0) // get 10**6 + fctidz f6,f5 // convert integer seconds to fixed pt + stfd f6,rzSeconds(r1) // save fixed pt integer seconds in red zone + fcfid f6,f6 // float the integer seconds + fsub f6,f5,f6 // f6 <- fractional part of elapsed seconds + fmul f6,f6,f3 // f6 <- fractional elapsed useconds + fctidz f6,f6 // convert useconds to fixed pt integer + stfd f6,rzUSeconds(r1) // store useconds into red zone + + lis r12,hi16(USEC_PER_SEC) // r12 <- 10**6 + srdi r7,r8,32 // extract seconds from doubleword timestamp + lwz r5,rzSeconds+4(r1) // r5 <- seconds since timestamp + ori r12,r12,lo16(USEC_PER_SEC) + lwz r6,rzUSeconds+4(r1) // r6 <- useconds since timestamp + add r7,r7,r5 // add elapsed seconds to timestamp seconds + add r8,r8,r6 // ditto useconds + + cmplw r8,r12 // r8 >= USEC_PER_SEC ? + blt 2f // no + addi r7,r7,1 // add 1 to secs + sub r8,r8,r12 // subtract USEC_PER_SEC from usecs +2: + stw r7,0(r3) // store secs//usecs into user's timeval + stw r8,4(r3) + li r3,0 // return success + blr +3: // too long since last timestamp or this code is disabled + li r3,1 // return bad status so our caller will make syscall + blr + + COMMPAGE_DESCRIPTOR(gettimeofday_64,_COMM_PAGE_GETTIMEOFDAY,k64Bit,0,kCommPageSYNC) + + diff --git a/osfmk/ppc/commpage/mach_absolute_time.s b/osfmk/ppc/commpage/mach_absolute_time.s new file mode 100644 index 000000000..865f91e9c --- /dev/null +++ b/osfmk/ppc/commpage/mach_absolute_time.s @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define ASSEMBLER +#include +#include // EXT, LEXT +#include +#include + + .text + .align 2 + .globl EXT(mach_absolute_time_32) + .globl EXT(mach_absolute_time_64) + + +// ********************************************* +// * M A C H _ A B S O L U T E _ T I M E _ 3 2 * +// ********************************************* + +mach_absolute_time_32: +1: + mftbu r3 + mftb r4 + mftbu r5 + cmplw r3,r5 + beqlr+ + b 1b + + COMMPAGE_DESCRIPTOR(mach_absolute_time_32,_COMM_PAGE_ABSOLUTE_TIME,0,k64Bit,0) + + +// ********************************************* +// * M A C H _ A B S O L U T E _ T I M E _ 6 4 * +// ********************************************* +// +// Why bother to special case for 64-bit? Because the "mftb" variants +// are 10 cycles each, and they are serialized. + +mach_absolute_time_64: + mftb r4 + srdi r3,r4,32 + blr + + COMMPAGE_DESCRIPTOR(mach_absolute_time_64,_COMM_PAGE_ABSOLUTE_TIME,k64Bit,0,0) + + diff --git a/osfmk/ppc/commpage/pthread.s b/osfmk/ppc/commpage/pthread.s new file mode 100644 index 000000000..a53c7ef3d --- /dev/null +++ b/osfmk/ppc/commpage/pthread.s @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include // EXT, LEXT +#include +#include + + .text + .align 2 + .globl EXT(pthread_getspecific_sprg3) + .globl EXT(pthread_getspecific_uftrap) + .globl EXT(pthread_self_sprg3) + .globl EXT(pthread_self_uftrap) + +#define USER_SPRG3 259 // user-mode-readable encoding for SPRG3 + + +// ***************************************************** +// * P T H R E A D _ G E T S P E C I F I C _ S P R G 3 * +// ***************************************************** +// +// For processors with user-readable SPRG3. Called with: +// r3 = word number +// r4 = offset to thread specific data (_PTHREAD_TSD_OFFSET) + +pthread_getspecific_sprg3: + slwi r5,r3,2 // convert word# to byte offset + mfspr r3,USER_SPRG3 // get per-thread cookie + add r5,r5,r4 // add in offset to first word + lwzx r3,r3,r5 // get the thread-specific word + blr + + COMMPAGE_DESCRIPTOR(pthread_getspecific_sprg3,_COMM_PAGE_PTHREAD_GETSPECIFIC,k64Bit,0,0) + + +// *************************************** +// * P T H R E A D _ S E L F _ S P R G 3 * +// *************************************** +// +// For processors with user-readable SPRG3. + +pthread_self_sprg3: + mfspr r3,USER_SPRG3 // get per-thread cookie + blr + + COMMPAGE_DESCRIPTOR(pthread_self_sprg3,_COMM_PAGE_PTHREAD_SELF,k64Bit,0,0) + + +// ******************************************************* +// * P T H R E A D _ G E T S P E C I F I C _ U F T R A P * +// ******************************************************* +// +// For processors that use the Ultra-Fast-Trap to get the thread-specific ptr. +// Called with: +// r3 = word number +// r4 = offset to thread specific data (_PTHREAD_TSD_OFFSET) + +pthread_getspecific_uftrap: + slwi r5,r3,2 // convert word# to byte offset + li r0,0x7FF2 // magic "pthread_self" ultra-fast trap code + sc + add r5,r5,r4 // add in offset to first word + lwzx r3,r3,r5 // get the thread-specific word + blr + + COMMPAGE_DESCRIPTOR(pthread_getspecific_uftrap,_COMM_PAGE_PTHREAD_GETSPECIFIC,0,k64Bit,0) + + +// ***************************************** +// * P T H R E A D _ S E L F _ U F T R A P * +// ***************************************** +// +// For processors that use the Ultra-Fast-Trap to get the thread-specific ptr. + +pthread_self_uftrap: + li r0,0x7FF2 // magic "pthread_self" ultra-fast trap code + sc // get r3==TLDP + blr + + COMMPAGE_DESCRIPTOR(pthread_self_uftrap,_COMM_PAGE_PTHREAD_SELF,0,k64Bit,0) diff --git a/osfmk/ppc/commpage/spinlocks.s b/osfmk/ppc/commpage/spinlocks.s new file mode 100644 index 000000000..e30999da1 --- /dev/null +++ b/osfmk/ppc/commpage/spinlocks.s @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include // EXT, LEXT +#include +#include + + .text + .align 2 + .globl EXT(spinlock_32_try_mp) + .globl EXT(spinlock_32_try_up) + .globl EXT(spinlock_32_lock_mp) + .globl EXT(spinlock_32_lock_up) + .globl EXT(spinlock_32_unlock_mp) + .globl EXT(spinlock_32_unlock_up) + + .globl EXT(spinlock_64_try_mp) + .globl EXT(spinlock_64_try_up) + .globl EXT(spinlock_64_lock_mp) + .globl EXT(spinlock_64_lock_up) + .globl EXT(spinlock_64_unlock_mp) + .globl EXT(spinlock_64_unlock_up) + + .globl EXT(spinlock_relinquish) + +#define MP_SPIN_TRIES 1000 + + +// The user mode spinlock library. There are many versions, +// in order to take advantage of a few special cases: +// - no barrier instructions (SYNC,ISYNC) are needed if UP +// - 64-bit processors can use LWSYNC instead of SYNC (if MP) +// - branch hints appropriate to the processor (+ vs ++ etc) +// - potentially custom relinquish strategies (not used at present) +// - fixes for errata as necessary + + +spinlock_32_try_mp: + mr r5, r3 + li r3, 1 +1: + lwarx r4,0,r5 + cmpwi r4,0 + bne- 2f + stwcx. r5,0,r5 + isync // cancel speculative execution + beqlr+ + b 1b +2: + li r3,0 + blr + + COMMPAGE_DESCRIPTOR(spinlock_32_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,k64Bit+kUP,0) + + +spinlock_32_try_up: + mr r5, r3 + li r3, 1 +1: + lwarx r4,0,r5 + cmpwi r4,0 + bne- 2f + stwcx. r5,0,r5 + beqlr+ + b 1b +2: + li r3,0 + blr + + COMMPAGE_DESCRIPTOR(spinlock_32_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,k64Bit,0) + + +spinlock_32_lock_mp: + li r5,MP_SPIN_TRIES +1: + lwarx r4,0,r3 + cmpwi r4,0 + bne- 2f + stwcx. r3,0,r3 + isync // cancel speculative execution + beqlr+ // we return void + b 1b +2: + subic. r5,r5,1 // try again before relinquish? + bne 1b + ba _COMM_PAGE_RELINQUISH + + COMMPAGE_DESCRIPTOR(spinlock_32_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,k64Bit+kUP,0) + + +spinlock_32_lock_up: +1: + lwarx r4,0,r3 + cmpwi r4,0 + bnea- _COMM_PAGE_RELINQUISH // always depress on UP (let lock owner run) + stwcx. r3,0,r3 + beqlr+ // we return void + b 1b + + COMMPAGE_DESCRIPTOR(spinlock_32_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,k64Bit,0) + + +spinlock_32_unlock_mp: + li r4,0 + sync // complete prior stores before unlock + stw r4,0(r3) + blr + + COMMPAGE_DESCRIPTOR(spinlock_32_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,0,k64Bit+kUP,0) + + +spinlock_32_unlock_up: + li r4,0 + stw r4,0(r3) + blr + + COMMPAGE_DESCRIPTOR(spinlock_32_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,kUP,k64Bit,0) + + +spinlock_64_try_mp: + mr r5, r3 + li r3, 1 +1: + lwarx r4,0,r5 + cmpwi r4,0 + bne-- 2f + stwcx. r5,0,r5 + isync // cancel speculative execution + beqlr++ + b 1b +2: + li r6,-4 + stwcx. r5,r6,r1 // clear the pending reservation (using red zone) + li r3,0 // Pass failure + blr + + COMMPAGE_DESCRIPTOR(spinlock_64_try_mp,_COMM_PAGE_SPINLOCK_TRY,k64Bit,kUP,0) + + +spinlock_64_try_up: + mr r5, r3 + li r3, 1 +1: + lwarx r4,0,r5 + cmpwi r4,0 + bne-- 2f + stwcx. r5,0,r5 + beqlr++ + b 1b +2: + li r6,-4 + stwcx. r5,r6,r1 // clear the pending reservation (using red zone) + li r3,0 + blr + + COMMPAGE_DESCRIPTOR(spinlock_64_try_up,_COMM_PAGE_SPINLOCK_TRY,k64Bit+kUP,0,0) + + +spinlock_64_lock_mp: + li r5,MP_SPIN_TRIES +1: + lwarx r4,0,r3 + cmpwi r4,0 + bne-- 2f + stwcx. r3,0,r3 + isync // cancel speculative execution + beqlr++ // we return void + b 1b +2: + li r6,-4 + stwcx. r3,r6,r1 // clear the pending reservation (using red zone) + subic. r5,r5,1 // try again before relinquish? + bne-- 1b // mispredict this one (a cheap back-off) + ba _COMM_PAGE_RELINQUISH + + COMMPAGE_DESCRIPTOR(spinlock_64_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,k64Bit,kUP,0) + + +spinlock_64_lock_up: +1: + lwarx r4,0,r3 + cmpwi r4,0 + bne-- 2f + stwcx. r3,0,r3 + beqlr++ // we return void + b 1b +2: // always relinquish on UP (let lock owner run) + li r6,-4 + stwcx. r3,r6,r1 // clear the pending reservation (using red zone) + ba _COMM_PAGE_RELINQUISH + + COMMPAGE_DESCRIPTOR(spinlock_64_lock_up,_COMM_PAGE_SPINLOCK_LOCK,k64Bit+kUP,0,0) + + +spinlock_64_unlock_mp: + li r4,0 + lwsync // complete prior stores before unlock + stw r4,0(r3) + blr + + COMMPAGE_DESCRIPTOR(spinlock_64_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit,kUP,0) + + +spinlock_64_unlock_up: + li r4,0 + stw r4,0(r3) + blr + + COMMPAGE_DESCRIPTOR(spinlock_64_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit+kUP,0,0) + + +spinlock_relinquish: + mr r12,r3 // preserve lockword ptr across relinquish + li r3,0 // THREAD_NULL + li r4,1 // SWITCH_OPTION_DEPRESS + li r5,1 // timeout (ms) + li r0,-61 // SYSCALL_THREAD_SWITCH + sc // relinquish + mr r3,r12 + ba _COMM_PAGE_SPINLOCK_LOCK + + COMMPAGE_DESCRIPTOR(spinlock_relinquish,_COMM_PAGE_RELINQUISH,0,0,0) + diff --git a/osfmk/ppc/console_feed.c b/osfmk/ppc/console_feed.c index 4c80ebf2d..e8abca4dc 100644 --- a/osfmk/ppc/console_feed.c +++ b/osfmk/ppc/console_feed.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #if MACH_KDB #include diff --git a/osfmk/ppc/cpu.c b/osfmk/ppc/cpu.c index be9336002..05b49814b 100644 --- a/osfmk/ppc/cpu.c +++ b/osfmk/ppc/cpu.c @@ -41,8 +41,12 @@ #include #include #include +#include #include #include +#include +#include +#include /* TODO: BOGUS TO BE REMOVED */ int real_ncpus = 1; @@ -95,20 +99,15 @@ cpu_control( cpu_subtype != cmd->cmd_cpu_subtype) return(KERN_FAILURE); + if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) { + return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */ + } + switch (cmd->cmd_op) { case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */ switch (cpu_subtype) { - case CPU_SUBTYPE_POWERPC_604: - { - oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ - mtpmc1(0x0); - mtpmc2(0x0); - ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ - return(KERN_SUCCESS); - } - case CPU_SUBTYPE_POWERPC_604e: case CPU_SUBTYPE_POWERPC_750: case CPU_SUBTYPE_POWERPC_7400: case CPU_SUBTYPE_POWERPC_7450: @@ -127,21 +126,6 @@ cpu_control( case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */ switch (cpu_subtype) { - case CPU_SUBTYPE_POWERPC_604: - if (count < (PROCESSOR_CONTROL_CMD_COUNT - + PROCESSOR_PM_REGS_COUNT_POWERPC_604)) - return(KERN_FAILURE); - else - { - perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; - oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ - mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); - mtpmc1(PERFMON_PMC1(perf_regs)); - mtpmc2(PERFMON_PMC2(perf_regs)); - ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ - return(KERN_SUCCESS); - } - case CPU_SUBTYPE_POWERPC_604e: case CPU_SUBTYPE_POWERPC_750: if (count < (PROCESSOR_CONTROL_CMD_COUNT + PROCESSOR_PM_REGS_COUNT_POWERPC_750)) @@ -184,17 +168,6 @@ cpu_control( case PROCESSOR_PM_SET_MMCR: switch (cpu_subtype) { - case CPU_SUBTYPE_POWERPC_604: - if (count < (PROCESSOR_CONTROL_CMD_COUNT + - PROCESSOR_PM_REGS_COUNT_POWERPC_604)) - return(KERN_FAILURE); - else - { - perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; - mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); - return(KERN_SUCCESS); - } - case CPU_SUBTYPE_POWERPC_604e: case CPU_SUBTYPE_POWERPC_750: if (count < (PROCESSOR_CONTROL_CMD_COUNT + PROCESSOR_PM_REGS_COUNT_POWERPC_750)) @@ -245,11 +218,6 @@ cpu_info_count( switch (flavor) { case PROCESSOR_PM_REGS_INFO: switch (cpu_subtype) { - case CPU_SUBTYPE_POWERPC_604: - *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604; - return(KERN_SUCCESS); - - case CPU_SUBTYPE_POWERPC_604e: case CPU_SUBTYPE_POWERPC_750: *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750; @@ -297,21 +265,6 @@ cpu_info( perf_regs = (processor_pm_regs_t) info; switch (cpu_subtype) { - case CPU_SUBTYPE_POWERPC_604: - - if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_604) - return(KERN_FAILURE); - - oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ - PERFMON_MMCR0(perf_regs) = mfmmcr0(); - PERFMON_PMC1(perf_regs) = mfpmc1(); - PERFMON_PMC2(perf_regs) = mfpmc2(); - ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ - - *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604; - return(KERN_SUCCESS); - - case CPU_SUBTYPE_POWERPC_604e: case CPU_SUBTYPE_POWERPC_750: if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750) @@ -448,8 +401,8 @@ cpu_start( { struct per_proc_info *proc_info; kern_return_t ret; + mapping *mp; - extern void (*exception_handlers[])(void); extern vm_offset_t intstack; extern vm_offset_t debstack; @@ -473,20 +426,20 @@ cpu_start( proc_info->debstack_top_ss = proc_info->debstackptr; #endif /* MACH_KDP || MACH_KDB */ proc_info->interrupts_enabled = 0; - proc_info->active_kloaded = (unsigned int)&active_kloaded[cpu]; - proc_info->active_stacks = (unsigned int)&active_stacks[cpu]; proc_info->need_ast = (unsigned int)&need_ast[cpu]; proc_info->FPU_owner = 0; proc_info->VMX_owner = 0; - + mp = (mapping *)(&proc_info->ppCIOmp); + mp->mpFlags = 0x01000000 | mpSpecial | 1; + mp->mpSpace = invalSpace; if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { /* TODO: get mutex lock reset_handler_lock */ resethandler_target.type = RESET_HANDLER_START; - resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu); - resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info); + resethandler_target.call_paddr = (vm_offset_t)_start_cpu; /* Note: these routines are always V=R */ + resethandler_target.arg__paddr = (vm_offset_t)proc_info; /* Note: these routines are always V=R */ ml_phys_write((vm_offset_t)&ResetHandler + 0, resethandler_target.type); @@ -519,6 +472,8 @@ cpu_start( } } +perfTrap perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */ + /* * Here is where we implement the receiver of the signaling protocol. * We wait for the signal status area to be passed to us. Then we snarf @@ -568,7 +523,7 @@ cpu_signal_handler( switch (holdParm0) { /* Decode SIGP message order */ case SIGPast: /* Should we do an AST? */ - pproc->numSIGPast++; /* Count this one */ + pproc->hwCtr.numSIGPast++; /* Count this one */ #if 0 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number()); #endif @@ -577,7 +532,7 @@ cpu_signal_handler( case SIGPcpureq: /* CPU specific function? */ - pproc->numSIGPcpureq++; /* Count this one */ + pproc->hwCtr.numSIGPcpureq++; /* Count this one */ switch (holdParm1) { /* Select specific function */ case CPRQtemp: /* Get the temperature */ @@ -618,6 +573,25 @@ cpu_signal_handler( return; + case CPRQsegload: + return; + + case CPRQchud: + parmAddr = (unsigned int *)holdParm2; /* Get the destination address */ + if(perfCpuSigHook) { + struct savearea *ssp = current_act()->mact.pcb; + if(ssp) { + (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0); + } + } + parmAddr[1] = 0; + parmAddr[0] = 0; /* Show we're done */ + return; + + case CPRQscom: + fwSCOM((scomcomm *)holdParm2); /* Do the function */ + return; + default: panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1); return; @@ -626,14 +600,14 @@ cpu_signal_handler( case SIGPdebug: /* Enter the debugger? */ - pproc->numSIGPdebug++; /* Count this one */ + pproc->hwCtr.numSIGPdebug++; /* Count this one */ debugger_is_slave[cpu]++; /* Bump up the count to show we're here */ hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */ __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */ return; /* All done now... */ case SIGPwake: /* Wake up CPU */ - pproc->numSIGPwake++; /* Count this one */ + pproc->hwCtr.numSIGPwake++; /* Count this one */ return; /* No need to do anything, the interrupt does it all... */ default: @@ -693,12 +667,12 @@ cpu_signal( if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */ if(signal == SIGPwake) { /* SIGPwake can merge into all others... */ - mpproc->numSIGPmwake++; /* Account for merged wakes */ + mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */ return KERN_SUCCESS; } if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */ - mpproc->numSIGPmast++; /* Account for merged ASTs */ + mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */ return KERN_SUCCESS; /* Don't bother to send this one... */ } @@ -706,7 +680,7 @@ cpu_signal( if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) { busybitset = 1; - mpproc->numSIGPmwake++; + mpproc->hwCtr.numSIGPmwake++; } } } @@ -714,7 +688,7 @@ cpu_signal( if((busybitset == 0) && (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy, (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */ - mpproc->numSIGPtimo++; /* Account for timeouts */ + mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */ return KERN_FAILURE; /* Timed out, take your ball and go home... */ } @@ -738,7 +712,8 @@ void cpu_doshutdown( void) { - processor_doshutdown(current_processor()); + enable_preemption(); + processor_offline(current_processor()); } void @@ -746,17 +721,14 @@ cpu_sleep( void) { struct per_proc_info *proc_info; - unsigned int cpu; + unsigned int cpu, i; + unsigned int wait_ncpus_sleep, ncpus_sleep; facility_context *fowner; - extern void (*exception_handlers[])(void); extern vm_offset_t intstack; extern vm_offset_t debstack; extern void _restart_cpu(void); cpu = cpu_number(); -#if 0 - kprintf("******* About to sleep cpu %d\n", cpu); -#endif proc_info = &per_proc_info[cpu]; @@ -782,8 +754,8 @@ cpu_sleep( extern void _start_cpu(void); resethandler_target.type = RESET_HANDLER_START; - resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu); - resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info); + resethandler_target.call_paddr = (vm_offset_t)_start_cpu; /* Note: these routines are always V=R */ + resethandler_target.arg__paddr = (vm_offset_t)proc_info; /* Note: these routines are always V=R */ ml_phys_write((vm_offset_t)&ResetHandler + 0, resethandler_target.type); @@ -795,6 +767,16 @@ cpu_sleep( __asm__ volatile("sync"); __asm__ volatile("isync"); } + + wait_ncpus_sleep = real_ncpus-1; + ncpus_sleep = 0; + while (wait_ncpus_sleep != ncpus_sleep) { + ncpus_sleep = 0; + for(i=1; i < real_ncpus ; i++) { + if ((*(volatile short *)&per_proc_info[i].cpu_flags) & SleepState) + ncpus_sleep++; + } + } } PE_cpu_machine_quiesce(proc_info->cpu_id); diff --git a/osfmk/ppc/cpu_capabilities.h b/osfmk/ppc/cpu_capabilities.h index 2b43b9da4..d142b9086 100644 --- a/osfmk/ppc/cpu_capabilities.h +++ b/osfmk/ppc/cpu_capabilities.h @@ -81,5 +81,115 @@ extern int _cpu_capabilities; static __inline__ int _NumCPUs( void ) { return (_cpu_capabilities & kNumCPUs) >> kNumCPUsShift; } #endif /* __ASSEMBLER__ */ + + +/* + * The shared kernel/user "comm page(s)": + * + * The last eight pages of every address space are reserved for the kernel/user + * "comm area". Because they can be addressed via a sign-extended 16-bit field, + * it is particularly efficient to access code or data in the comm area with + * absolute branches (ba, bla, bca) or absolute load/stores ("lwz r0,-4096(0)"). + * Because the comm area can be reached from anywhere, dyld is not needed. + * Although eight pages are reserved, presently only two are populated and mapped. + * + * Routines on the comm page(s) can be thought of as the firmware for extended processor + * instructions, whose opcodes are special forms of "bla". Ie, they are cpu + * capabilities. During system initialization, the kernel populates the comm page with + * code customized for the particular processor and platform. + * + * Because Mach VM cannot map the last page of an address space, the max length of + * the comm area is seven pages. + */ + +#define _COMM_PAGE_BASE_ADDRESS (-8*4096) // start at page -8, ie 0xFFFF8000 +#define _COMM_PAGE_AREA_LENGTH ( 7*4096) // reserved length of entire comm area +#define _COMM_PAGE_AREA_USED ( 2*4096) // we use two pages so far + +/* data in the comm page */ + +#define _COMM_PAGE_SIGNATURE (_COMM_PAGE_BASE_ADDRESS+0x000) // first few bytes are a signature +#define _COMM_PAGE_VERSION (_COMM_PAGE_BASE_ADDRESS+0x01E) // 16-bit version# +#define _COMM_PAGE_THIS_VERSION 1 // this is version 1 of the commarea format + +#define _COMM_PAGE_CPU_CAPABILITIES (_COMM_PAGE_BASE_ADDRESS+0x020) // mirror of extern int _cpu_capabilities +#define _COMM_PAGE_NCPUS (_COMM_PAGE_BASE_ADDRESS+0x021) // number of configured CPUs +#define _COMM_PAGE_ALTIVEC (_COMM_PAGE_BASE_ADDRESS+0x024) // nonzero if Altivec available +#define _COMM_PAGE_64_BIT (_COMM_PAGE_BASE_ADDRESS+0x025) // nonzero if 64-bit processor +#define _COMM_PAGE_CACHE_LINESIZE (_COMM_PAGE_BASE_ADDRESS+0x026) // cache line size (16-bit field) + +#define _COMM_PAGE_UNUSED1 (_COMM_PAGE_BASE_ADDRESS+0x030) // 16 unused bytes + +#define _COMM_PAGE_2_TO_52 (_COMM_PAGE_BASE_ADDRESS+0x040) // double float constant 2**52 +#define _COMM_PAGE_10_TO_6 (_COMM_PAGE_BASE_ADDRESS+0x048) // double float constant 10**6 + +#define _COMM_PAGE_UNUSED2 (_COMM_PAGE_BASE_ADDRESS+0x050) // 16 unused bytes + +#define _COMM_PAGE_TIMEBASE (_COMM_PAGE_BASE_ADDRESS+0x060) // used by gettimeofday() +#define _COMM_PAGE_TIMESTAMP (_COMM_PAGE_BASE_ADDRESS+0x068) // used by gettimeofday() +#define _COMM_PAGE_SEC_PER_TICK (_COMM_PAGE_BASE_ADDRESS+0x070) // used by gettimeofday() + +#define _COMM_PAGE_UNUSED3 (_COMM_PAGE_BASE_ADDRESS+0x080) // 384 unused bytes + + /* jump table (bla to this address, which may be a branch to the actual code somewhere else) */ + /* When new jump table entries are added, corresponding symbols should be added below */ + +#define _COMM_PAGE_ABSOLUTE_TIME (_COMM_PAGE_BASE_ADDRESS+0x200) // mach_absolute_time() +#define _COMM_PAGE_SPINLOCK_TRY (_COMM_PAGE_BASE_ADDRESS+0x220) // spinlock_try() +#define _COMM_PAGE_SPINLOCK_LOCK (_COMM_PAGE_BASE_ADDRESS+0x260) // spinlock_lock() +#define _COMM_PAGE_SPINLOCK_UNLOCK (_COMM_PAGE_BASE_ADDRESS+0x2a0) // spinlock_unlock() +#define _COMM_PAGE_PTHREAD_GETSPECIFIC (_COMM_PAGE_BASE_ADDRESS+0x2c0) // pthread_getspecific() +#define _COMM_PAGE_GETTIMEOFDAY (_COMM_PAGE_BASE_ADDRESS+0x2e0) // used by gettimeofday() +#define _COMM_PAGE_FLUSH_DCACHE (_COMM_PAGE_BASE_ADDRESS+0x4e0) // sys_dcache_flush() +#define _COMM_PAGE_FLUSH_ICACHE (_COMM_PAGE_BASE_ADDRESS+0x520) // sys_icache_invalidate() +#define _COMM_PAGE_PTHREAD_SELF (_COMM_PAGE_BASE_ADDRESS+0x580) // pthread_self() +#define _COMM_PAGE_UNUSED4 (_COMM_PAGE_BASE_ADDRESS+0x5a0) // 32 unused bytes +#define _COMM_PAGE_RELINQUISH (_COMM_PAGE_BASE_ADDRESS+0x5c0) // used by spinlocks + +#define _COMM_PAGE_UNUSED5 (_COMM_PAGE_BASE_ADDRESS+0x5e0) // 32 unused bytes + +#define _COMM_PAGE_BZERO (_COMM_PAGE_BASE_ADDRESS+0x600) // bzero() +#define _COMM_PAGE_BCOPY (_COMM_PAGE_BASE_ADDRESS+0x780) // bcopy() +#define _COMM_PAGE_MEMCPY (_COMM_PAGE_BASE_ADDRESS+0x7a0) // memcpy() +#define _COMM_PAGE_MEMMOVE (_COMM_PAGE_BASE_ADDRESS+0x7a0) // memmove() + +#define _COMM_PAGE_UNUSED6 (_COMM_PAGE_BASE_ADDRESS+0xF80) // 128 unused bytes + +#define _COMM_PAGE_BIGCOPY (_COMM_PAGE_BASE_ADDRESS+0x1000)// very-long-operand copies + +#define _COMM_PAGE_END (_COMM_PAGE_BASE_ADDRESS+0x1600)// end of common page + +#ifdef __ASSEMBLER__ +#ifdef __COMM_PAGE_SYMBOLS + +#define CREATE_COMM_PAGE_SYMBOL(symbol_name, symbol_address) \ + .org (symbol_address - _COMM_PAGE_BASE_ADDRESS) @\ +symbol_name: nop + + .text // Required to make a well behaved symbol file + + CREATE_COMM_PAGE_SYMBOL(___mach_absolute_time, _COMM_PAGE_ABSOLUTE_TIME) + CREATE_COMM_PAGE_SYMBOL(___spin_lock_try, _COMM_PAGE_SPINLOCK_TRY) + CREATE_COMM_PAGE_SYMBOL(___spin_lock, _COMM_PAGE_SPINLOCK_LOCK) + CREATE_COMM_PAGE_SYMBOL(___spin_unlock, _COMM_PAGE_SPINLOCK_UNLOCK) + CREATE_COMM_PAGE_SYMBOL(___pthread_getspecific, _COMM_PAGE_PTHREAD_GETSPECIFIC) + CREATE_COMM_PAGE_SYMBOL(___gettimeofday, _COMM_PAGE_GETTIMEOFDAY) + CREATE_COMM_PAGE_SYMBOL(___sys_dcache_flush, _COMM_PAGE_FLUSH_DCACHE) + CREATE_COMM_PAGE_SYMBOL(___sys_icache_invalidate, _COMM_PAGE_FLUSH_ICACHE) + CREATE_COMM_PAGE_SYMBOL(___pthread_self, _COMM_PAGE_PTHREAD_SELF) + CREATE_COMM_PAGE_SYMBOL(___spin_lock_relinquish, _COMM_PAGE_RELINQUISH) + CREATE_COMM_PAGE_SYMBOL(___bzero, _COMM_PAGE_BZERO) + CREATE_COMM_PAGE_SYMBOL(___bcopy, _COMM_PAGE_BCOPY) + CREATE_COMM_PAGE_SYMBOL(___memcpy, _COMM_PAGE_MEMCPY) +// CREATE_COMM_PAGE_SYMBOL(___memmove, _COMM_PAGE_MEMMOVE) + CREATE_COMM_PAGE_SYMBOL(___bigcopy, _COMM_PAGE_BIGCOPY) + CREATE_COMM_PAGE_SYMBOL(___end_comm_page, _COMM_PAGE_END) + + .data // Required to make a well behaved symbol file + .long 0 // Required to make a well behaved symbol file + +#endif /* __COMM_PAGE_SYMBOLS */ +#endif /* __ASSEMBLER__ */ + #endif /* __APPLE_API_PRIVATE */ #endif /* _PPC_CPU_CAPABILITIES_H */ diff --git a/osfmk/ppc/cpu_data.h b/osfmk/ppc/cpu_data.h index 5e5ef54bb..34c2ff7ea 100644 --- a/osfmk/ppc/cpu_data.h +++ b/osfmk/ppc/cpu_data.h @@ -30,6 +30,12 @@ #ifndef PPC_CPU_DATA #define PPC_CPU_DATA +typedef struct +{ + int preemption_level; + int simple_lock_count; + int interrupt_level; +} cpu_data_t; #define disable_preemption _disable_preemption #define enable_preemption _enable_preemption @@ -38,18 +44,25 @@ #define mp_enable_preemption _enable_preemption #define mp_enable_preemption_no_check _enable_preemption_no_check -extern cpu_data_t* get_cpu_data(void); - extern __inline__ thread_act_t current_act(void) { thread_act_t act; __asm__ volatile("mfsprg %0,1" : "=r" (act)); return act; -} +}; + +/* + * Note that the following function is ONLY guaranteed when preemption or interrupts are disabled + */ +extern __inline__ struct per_proc_info *getPerProc(void) +{ + struct per_proc_info *perproc; + __asm__ volatile("mfsprg %0,0" : "=r" (perproc)); + return perproc; +}; #define current_thread() current_act()->thread -extern void set_machine_current_thread(thread_t); extern void set_machine_current_act(thread_act_t); extern int get_preemption_level(void); diff --git a/osfmk/ppc/cswtch.s b/osfmk/ppc/cswtch.s index cc81fc49e..b5bf7db2c 100644 --- a/osfmk/ppc/cswtch.s +++ b/osfmk/ppc/cswtch.s @@ -41,66 +41,36 @@ .text /* - * void load_context(thread_t thread) + * void machine_load_context(thread_t thread) * - * Load the context for the first kernel thread, and go. - * - * NOTE - if DEBUG is set, the former routine is a piece - * of C capable of printing out debug info before calling the latter, - * otherwise both entry points are identical. + * Load the context for the first thread to run on a + * cpu, and go. */ .align 5 - .globl EXT(load_context) - -LEXT(load_context) - - .globl EXT(Load_context) - -LEXT(Load_context) - -/* - * Since this is the first thread, we came in on the interrupt - * stack. The first thread never returns, so there is no need to - e worry about saving its frame, hence we can reset the istackptr - * back to the saved_state structure at it's top - */ - + .globl EXT(machine_load_context) -/* - * get new thread pointer and set it into the active_threads pointer - * - */ - +LEXT(machine_load_context) mfsprg r6,0 lwz r0,PP_INTSTACK_TOP_SS(r6) stw r0,PP_ISTACKPTR(r6) - stw r3,PP_ACTIVE_THREAD(r6) - -/* Find the new stack and store it in active_stacks */ - - lwz r12,PP_ACTIVE_STACKS(r6) - lwz r1,THREAD_KERNEL_STACK(r3) - lwz r9,THREAD_TOP_ACT(r3) /* Point to the active activation */ - mtsprg 1,r9 - stw r1,0(r12) + lwz r9,THREAD_TOP_ACT(r3) /* Set up the current thread */ + mtsprg 1,r9 li r0,0 /* Clear a register */ - lwz r8,ACT_MACT_PCB(r9) /* Get the savearea used */ - rlwinm r7,r8,0,0,19 /* Switch to savearea base */ - lwz r11,SAVprev(r8) /* Get the previous savearea */ + lwz r3,ACT_MACT_PCB(r9) /* Get the savearea used */ mfmsr r5 /* Since we are passing control, get our MSR values */ - lwz r1,saver1(r8) /* Load new stack pointer */ - stw r0,saver3(r8) /* Make sure we pass in a 0 for the continuation */ - lwz r7,SACvrswap(r7) /* Get the translation from virtual to real */ + lwz r11,SAVprev+4(r3) /* Get the previous savearea */ + lwz r1,saver1+4(r3) /* Load new stack pointer */ + stw r0,saver3+4(r3) /* Make sure we pass in a 0 for the continuation */ stw r0,FM_BACKPTR(r1) /* zero backptr */ - stw r5,savesrr1(r8) /* Pass our MSR to the new guy */ - xor r3,r7,r8 /* Get the physical address of the new context save area */ + stw r5,savesrr1+4(r3) /* Pass our MSR to the new guy */ stw r11,ACT_MACT_PCB(r9) /* Unstack our savearea */ - b EXT(exception_exit) /* Go end it all... */ + stw r0,ACT_PREEMPT_CNT(r9) /* Enable preemption */ + b EXT(exception_exit) /* Go for it */ -/* struct thread_shuttle *Switch_context(struct thread_shuttle *old, - * void (*cont)(void), - * struct thread_shuttle *new) +/* thread_t Switch_context(thread_t old, + * void (*cont)(void), + * thread_t new) * * Switch from one thread to another. If a continuation is supplied, then * we do not need to save callee save registers. @@ -122,19 +92,18 @@ LEXT(Call_continuation) /* * Get the old kernel stack, and store into the thread structure. * See if a continuation is supplied, and skip state save if so. - * NB. Continuations are no longer used, so this test is omitted, - * as should the second argument, but it is in generic code. - * We always save state. This does not hurt even if continuations - * are put back in. + * + * Note that interrupts must be disabled before we get here (i.e., splsched) */ /* Context switches are double jumps. We pass the following to the * context switch firmware call: * - * R3 = switchee's savearea + * R3 = switchee's savearea, virtual if continuation, low order physical for full switch * R4 = old thread * R5 = new SRR0 * R6 = new SRR1 + * R7 = high order physical address of savearea for full switch * * savesrr0 is set to go to switch_in * savesrr1 is set to uninterruptible with translation on @@ -147,168 +116,139 @@ LEXT(Call_continuation) LEXT(Switch_context) mfsprg r12,0 ; Get the per_proc block - lwz r10,PP_ACTIVE_STACKS(r12) ; Get the pointer to the current stack #if DEBUG - lwz r11,PP_ISTACKPTR(r12) ; (DEBUG/TRACE) make sure we are not - mr. r11,r11 ; (DEBUG/TRACE) on the interrupt - bne+ notonintstack ; (DEBUG/TRACE) stack + lwz r0,PP_ISTACKPTR(r12) ; (DEBUG/TRACE) make sure we are not + mr. r0,r0 ; (DEBUG/TRACE) on the interrupt + bne++ notonintstack ; (DEBUG/TRACE) stack BREAKPOINT_TRAP notonintstack: #endif - stw r4,THREAD_CONTINUATION(r3) ; Set continuation into the thread - cmpwi cr1,r4,0 ; used waaaay down below - lwz r7,0(r10) ; Get the current stack -/* - * Make the new thread the current thread. - */ - - stw r7,THREAD_KERNEL_STACK(r3) ; Remember the current stack in the thread (do not need???) - stw r5, PP_ACTIVE_THREAD(r12) ; Make the new thread current - - lwz r11,THREAD_KERNEL_STACK(r5) ; Get the new stack pointer - lwz r5,THREAD_TOP_ACT(r5) ; Get the new activation - mtsprg 1,r5 - lwz r7,CTHREAD_SELF(r5) ; Pick up the user assist word lwz r8,ACT_MACT_PCB(r5) ; Get the PCB for the new guy + lwz r9,cioSpace(r5) ; Get copyin/out address space + cmpwi cr1,r4,0 ; Remeber if there is a continuation - used waaaay down below + lwz r7,CTHREAD_SELF(r5) ; Pick up the user assist word + lwz r11,ACT_MACT_BTE(r5) ; Get BlueBox Task Environment + lwz r6,cioRelo(r5) ; Get copyin/out relocation top + mtsprg 1,r5 + lwz r2,cioRelo+4(r5) ; Get copyin/out relocation bottom -#if 0 - lwz r0,SAVflags(r8) ; (TEST/DEBUG) - rlwinm r0,r0,24,24,31 ; (TEST/DEBUG) - cmplwi r0,SAVempty ; (TEST/DEBUG) - bne+ nnnn ; (TEST/DEBUG) - b . ; (TEST/DEBUG) -nnnn: ; (TEST/DEBUG) -#endif - - stw r11,0(r10) ; Save the new kernel stack address stw r7,UAW(r12) ; Save the assist word for the "ultra fast path" - - lwz r11,ACT_MACT_BTE(r5) ; Get BlueBox Task Environment - + lwz r7,ACT_MACT_SPF(r5) ; Get the special flags - lwz r10,ACT_KLOADED(r5) + sth r9,ppCIOmp+mpSpace(r12) ; Save the space + stw r6,ppCIOmp+mpNestReloc(r12) ; Save top part of physical address + stw r2,ppCIOmp+mpNestReloc+4(r12) ; Save bottom part of physical address stw r11,ppbbTaskEnv(r12) ; Save the bb task env - li r0,0 - cmpwi cr0,r10,0 - lwz r10,PP_ACTIVE_KLOADED(r12) + lwz r2,traceMask(0) ; Get the enabled traces stw r7,spcFlags(r12) ; Set per_proc copy of the special flags - beq cr0,.L_sw_ctx_not_kld - - stw r5,0(r10) - b .L_sw_ctx_cont - -.L_sw_ctx_not_kld: - stw r0,0(r10) /* act_kloaded = 0 */ - -.L_sw_ctx_cont: - lis r10,hi16(EXT(trcWork)) ; Get top of trace mask - rlwinm r7,r8,0,0,19 /* Switch to savearea base */ - ori r10,r10,lo16(EXT(trcWork)) ; Get bottom of mask - lwz r11,SAVprev(r8) /* Get the previous of the switchee's savearea */ - lwz r10,traceMask(r10) ; Get the enabled traces lis r0,hi16(CutTrace) ; Trace FW call - mr. r10,r10 ; Any tracing going on? + mr. r2,r2 ; Any tracing going on? + lwz r11,SAVprev+4(r8) ; Get the previous of the switchee savearea ori r0,r0,lo16(CutTrace) ; Trace FW call - beq+ cswNoTrc ; No trace today, dude... + beq++ cswNoTrc ; No trace today, dude... mr r10,r3 ; Save across trace lwz r2,THREAD_TOP_ACT(r3) ; Trace old activation mr r3,r11 ; Trace prev savearea sc ; Cut trace entry of context switch mr r3,r10 ; Restore -cswNoTrc: mfmsr r6 /* Get the MSR because the switched to thread should inherit it */ - lwz r7,SACvrswap(r7) /* Get the translation from virtual to real */ - stw r11,ACT_MACT_PCB(r5) /* Dequeue the savearea we're switching to */ - - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 /* Turn off the FP */ - lwz r2,curctx(r5) ; Grab our current context pointer - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 /* Turn off the vector */ - mr r4,r3 /* Save our old thread to pass back */ - - lhz r0,PP_CPU_NUMBER(r12) ; Get our CPU number +cswNoTrc: lwz r2,curctx(r5) ; Grab our current context pointer lwz r10,FPUowner(r12) ; Grab the owner of the FPU lwz r9,VMXowner(r12) ; Grab the owner of the vector + lhz r0,PP_CPU_NUMBER(r12) ; Get our CPU number + mfmsr r6 ; Get the MSR because the switched to thread should inherit it + stw r11,ACT_MACT_PCB(r5) ; Dequeue the savearea we are switching to + li r0,1 ; Get set to hold off quickfret + + rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off the FP cmplw r10,r2 ; Do we have the live float context? lwz r10,FPUlevel(r2) ; Get the live level + mr r4,r3 ; Save our old thread to pass back cmplw cr5,r9,r2 ; Do we have the live vector context? - bne+ cswnofloat ; Float is not ours... + rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off the vector + stw r0,holdQFret(r12) ; Make sure we hold off releasing quickfret + bne++ cswnofloat ; Float is not ours... cmplw r10,r11 ; Is the level the same? lwz r5,FPUcpu(r2) ; Get the owning cpu - bne+ cswnofloat ; Level not the same, this is not live... + bne++ cswnofloat ; Level not the same, this is not live... cmplw r5,r0 ; Still owned by this cpu? lwz r10,FPUsave(r2) ; Get the level - bne+ cswnofloat ; CPU claimed by someone else... + bne++ cswnofloat ; CPU claimed by someone else... mr. r10,r10 ; Is there a savearea here? ori r6,r6,lo16(MASK(MSR_FP)) ; Enable floating point - beq- cswnofloat ; No savearea to check... + beq-- cswnofloat ; No savearea to check... lwz r3,SAVlevel(r10) ; Get the level - lwz r5,SAVprev(r10) ; Get the previous of this savearea + lwz r5,SAVprev+4(r10) ; Get the previous of this savearea cmplw r3,r11 ; Is it for the current level? - bne+ cswnofloat ; Nope... + bne++ cswnofloat ; Nope... stw r5,FPUsave(r2) ; Pop off this savearea - rlwinm r5,r10,0,0,19 ; Move back to start of page - lwz r5,SACvrswap(r5) ; Get the virtual to real conversion - la r9,quickfret(r12) ; Point to the quickfret chain header - xor r5,r10,r5 ; Convert savearea address to real + + rlwinm r3,r10,0,0,19 ; Move back to start of page + + lwz r5,quickfret(r12) ; Get the first in quickfret list (top) + lwz r9,quickfret+4(r12) ; Get the first in quickfret list (bottom) + lwz r7,SACvrswap(r3) ; Get the virtual to real conversion (top) + lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom) + stw r5,SAVprev(r10) ; Link the old in (top) + stw r9,SAVprev+4(r10) ; Link the old in (bottom) + xor r3,r10,r3 ; Convert to physical + stw r7,quickfret(r12) ; Set the first in quickfret list (top) + stw r3,quickfret+4(r12) ; Set the first in quickfret list (bottom) #if FPVECDBG lis r0,hi16(CutTrace) ; (TEST/DEBUG) + mr r7,r2 ; (TEST/DEBUG) li r2,0x4401 ; (TEST/DEBUG) oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) lhz r0,PP_CPU_NUMBER(r12) ; (TEST/DEBUG) + mr r2,r7 ; (TEST/DEBUG) #endif -; -; Note: we need to do the atomic operation here because, even though -; it is impossible with the current implementation, that we may take a -; PTE miss between the load of the quickfret anchor and the subsequent -; store. The interrupt handler will dequeue everything on the list and -; we could end up using stale data. I do not like doing this... -; - -cswfpudq: lwarx r3,0,r9 ; Pick up the old chain head - stw r3,SAVprev(r10) ; Move it to the current guy - stwcx. r5,0,r9 ; Save it - bne- cswfpudq ; Someone chaged the list... - -cswnofloat: bne+ cr5,cswnovect ; Vector is not ours... +cswnofloat: bne++ cr5,cswnovect ; Vector is not ours... lwz r10,VMXlevel(r2) ; Get the live level cmplw r10,r11 ; Is the level the same? lwz r5,VMXcpu(r2) ; Get the owning cpu - bne+ cswnovect ; Level not the same, this is not live... + bne++ cswnovect ; Level not the same, this is not live... cmplw r5,r0 ; Still owned by this cpu? lwz r10,VMXsave(r2) ; Get the level - bne+ cswnovect ; CPU claimed by someone else... + bne++ cswnovect ; CPU claimed by someone else... mr. r10,r10 ; Is there a savearea here? oris r6,r6,hi16(MASK(MSR_VEC)) ; Enable vector - beq- cswnovect ; No savearea to check... + beq-- cswnovect ; No savearea to check... lwz r3,SAVlevel(r10) ; Get the level - lwz r5,SAVprev(r10) ; Get the previous of this savearea + lwz r5,SAVprev+4(r10) ; Get the previous of this savearea cmplw r3,r11 ; Is it for the current level? - bne+ cswnovect ; Nope... + bne++ cswnovect ; Nope... stw r5,VMXsave(r2) ; Pop off this savearea - rlwinm r5,r10,0,0,19 ; Move back to start of page - lwz r5,SACvrswap(r5) ; Get the virtual to real conversion - la r9,quickfret(r12) ; Point to the quickfret chain header - xor r5,r10,r5 ; Convert savearea address to real + rlwinm r3,r10,0,0,19 ; Move back to start of page + + lwz r5,quickfret(r12) ; Get the first in quickfret list (top) + lwz r9,quickfret+4(r12) ; Get the first in quickfret list (bottom) + lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top) + lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom) + stw r5,SAVprev(r10) ; Link the old in (top) + stw r9,SAVprev+4(r10) ; Link the old in (bottom) + xor r3,r10,r3 ; Convert to physical + stw r2,quickfret(r12) ; Set the first in quickfret list (top) + stw r3,quickfret+4(r12) ; Set the first in quickfret list (bottom) #if FPVECDBG lis r0,hi16(CutTrace) ; (TEST/DEBUG) @@ -317,31 +257,23 @@ cswnofloat: bne+ cr5,cswnovect ; Vector is not ours... sc ; (TEST/DEBUG) #endif -; -; Note: we need to do the atomic operation here because, even though -; it is impossible with the current implementation, that we may take a -; PTE miss between the load of the quickfret anchor and the subsequent -; store. The interrupt handler will dequeue everything on the list and -; we could end up using stale data. I do not like doing this... -; +cswnovect: li r0,0 ; Get set to release quickfret holdoff + rlwinm r11,r8,0,0,19 ; Switch to savearea base + lis r9,hi16(EXT(switch_in)) ; Get top of switch in routine + lwz r5,savesrr0+4(r8) ; Set up the new SRR0 + lwz r7,SACvrswap(r11) ; Get the high order V to R translation + lwz r11,SACvrswap+4(r11) ; Get the low order V to R translation + ori r9,r9,lo16(EXT(switch_in)) ; Bottom half of switch in + stw r0,holdQFret(r12) ; Make sure we release quickfret holdoff + stw r9,savesrr0+4(r8) ; Make us jump to the switch in routine -cswvecdq: lwarx r3,0,r9 ; Pick up the old chain head - stw r3,SAVprev(r10) ; Move it to the current guy - stwcx. r5,0,r9 ; Save it - bne- cswvecdq ; Someone chaged the list... - -cswnovect: lis r9,hi16(EXT(switch_in)) /* Get top of switch in routine */ - lwz r5,savesrr0(r8) /* Set up the new SRR0 */ - ori r9,r9,lo16(EXT(switch_in)) /* Bottom half of switch in */ + lwz r9,SAVflags(r8) /* Get the flags */ lis r0,hi16(SwitchContextCall) /* Top part of switch context */ - stw r9,savesrr0(r8) /* Make us jump to the switch in routine */ - li r10,MSR_SUPERVISOR_INT_OFF /* Get the switcher's MSR */ - lwz r9,SAVflags(r8) /* Get the flags */ - stw r10,savesrr1(r8) /* Set up for switch in */ - rlwinm r9,r9,0,15,13 /* Reset the syscall flag */ ori r0,r0,lo16(SwitchContextCall) /* Bottom part of switch context */ - xor r3,r7,r8 /* Get the physical address of the new context save area */ + stw r10,savesrr1+4(r8) /* Set up for switch in */ + rlwinm r9,r9,0,15,13 /* Reset the syscall flag */ + xor r3,r11,r8 /* Get the physical address of the new context save area */ stw r9,SAVflags(r8) /* Set the flags */ bne cr1,swtchtocont ; Switch to the continuation @@ -363,9 +295,11 @@ cswnovect: lis r9,hi16(EXT(switch_in)) /* Get top of switch in routine */ swtchtocont: - stw r5,savesrr0(r8) ; Set the pc - stw r6,savesrr1(r8) ; Set the next MSR to use - stw r4,saver3(r8) ; Make sure we pass back the old thread + + stw r5,savesrr0+4(r8) ; Set the pc + stw r6,savesrr1+4(r8) ; Set the next MSR to use + stw r4,saver3+4(r8) ; Make sure we pass back the old thread + mr r3,r8 ; Pass in the virtual address of savearea b EXT(exception_exit) ; Blocking on continuation, toss old context... @@ -378,7 +312,7 @@ swtchtocont: * with translation on. If we could, this should be done in lowmem_vectors * before translation is turned on. But we can't, dang it! * - * R3 = switcher's savearea + * R3 = switcher's savearea (32-bit virtual) * saver4 = old thread in switcher's save * saver5 = new SRR0 in switcher's save * saver6 = new SRR1 in switcher's save @@ -391,31 +325,30 @@ swtchtocont: .globl EXT(switch_in) LEXT(switch_in) - - lwz r4,saver4(r3) /* Get the old thread */ - lwz r9,THREAD_TOP_ACT(r4) /* Get the switched from ACT */ - lwz r5,saver5(r3) /* Get the srr0 value */ - lwz r10,ACT_MACT_PCB(r9) /* Get the top PCB on the old thread */ - lwz r6,saver6(r3) /* Get the srr1 value */ - stw r3,ACT_MACT_PCB(r9) /* Put the new one on top */ - stw r10,SAVprev(r3) /* Chain on the old one */ + lwz r4,saver4+4(r3) ; Get the old thread + lwz r5,saver5+4(r3) ; Get the srr0 value + + mfsprg r0,2 ; Get feature flags + lwz r9,THREAD_TOP_ACT(r4) ; Get the switched from ACT + lwz r6,saver6+4(r3) ; Get the srr1 value + rlwinm. r0,r0,0,pf64Bitb,pf64Bitb ; Check for 64-bit + lwz r10,ACT_MACT_PCB(r9) ; Get the top PCB on the old thread - mr r3,r4 /* Pass back the old thread */ + stw r3,ACT_MACT_PCB(r9) ; Put the new one on top + stw r10,SAVprev+4(r3) ; Chain on the old one - mtsrr0 r5 /* Set return point */ - mtsrr1 r6 /* Set return MSR */ - rfi /* Jam... */ - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 + mr r3,r4 ; Pass back the old thread + mtsrr0 r5 ; Set return point + mtsrr1 r6 ; Set return MSR + + bne++ siSixtyFour ; Go do 64-bit... + rfi ; Jam... + +siSixtyFour: + rfid ; Jam... /* * void fpu_save(facility_context ctx) @@ -431,12 +364,14 @@ LEXT(switch_in) LEXT(fpu_save) + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable + li r12,lo16(MASK(MSR_EE)) ; Get the EE bit + ori r2,r2,lo16(MASK(MSR_FP)) ; Get FP mfmsr r0 ; Get the MSR - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; But do interrupts only for now + andc r0,r0,r2 ; Clear FP, VEC + andc r2,r0,r12 ; Clear EE ori r2,r2,MASK(MSR_FP) ; Enable the floating point feature for now also - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off mtmsr r2 ; Set the MSR isync @@ -451,7 +386,7 @@ LEXT(fpu_save) mr. r3,r12 ; (TEST/DEBUG) li r2,0x6F00 ; (TEST/DEBUG) li r5,0 ; (TEST/DEBUG) - beq- noowneryet ; (TEST/DEBUG) + beq-- noowneryet ; (TEST/DEBUG) lwz r4,FPUlevel(r12) ; (TEST/DEBUG) lwz r5,FPUsave(r12) ; (TEST/DEBUG) @@ -464,17 +399,17 @@ noowneryet: oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) fsretry: mr. r12,r12 ; Anyone own the FPU? lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number - beq- fsret ; Nobody owns the FPU, no save required... + beq-- fsret ; Nobody owns the FPU, no save required... cmplw cr1,r3,r12 ; Is the specified context live? isync ; Force owner check first lwz r9,FPUcpu(r12) ; Get the cpu that context was last on - bne- cr1,fsret ; No, it is not... + bne-- cr1,fsret ; No, it is not... cmplw cr1,r9,r11 ; Was the context for this processor? - beq- cr1,fsgoodcpu ; Facility last used on this processor... + beq-- cr1,fsgoodcpu ; Facility last used on this processor... b fsret ; Someone else claimed it... @@ -488,7 +423,7 @@ fsgoodcpu: lwz r3,FPUsave(r12) ; Get the current FPU savearea for the threa lwz r8,SAVlevel(r3) ; Get the level this savearea is for cmplw cr1,r9,r8 ; Correct level? - beq- cr1,fsret ; The current level is already saved, bail out... + beq-- cr1,fsret ; The current level is already saved, bail out... fsneedone: bl EXT(save_get) ; Get a savearea for the context @@ -497,78 +432,26 @@ fsneedone: bl EXT(save_get) ; Get a savearea for the context lwz r12,FPUowner(r6) ; Get back our thread stb r4,SAVflags+2(r3) ; Mark this savearea as a float mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it... - beq- fsbackout ; If disowned, just toss savearea... + beq-- fsbackout ; If disowned, just toss savearea... lwz r4,facAct(r12) ; Get the activation associated with live context - mtlr r2 ; Restore return lwz r8,FPUsave(r12) ; Get the current top floating point savearea stw r4,SAVact(r3) ; Indicate the right activation for this context lwz r9,FPUlevel(r12) ; Get our current level indicator again stw r3,FPUsave(r12) ; Set this as the most current floating point context - stw r8,SAVprev(r3) ; And then chain this in front + stw r8,SAVprev+4(r3) ; And then chain this in front stw r9,SAVlevel(r3) ; Show level in savearea -; -; Save the current FPU state into the PCB of the thread that owns it. -; - - la r11,savefp0(r3) ; Point to the 1st line - dcbz 0,r11 ; Allocate the first savearea line - - la r11,savefp4(r3) ; Point to the 2nd line - stfd f0,savefp0(r3) - dcbz 0,r11 ; Allocate it - stfd f1,savefp1(r3) - stfd f2,savefp2(r3) - la r11,savefp8(r3) ; Point to the 3rd line - stfd f3,savefp3(r3) - dcbz 0,r11 ; Allocate it - stfd f4,savefp4(r3) - stfd f5,savefp5(r3) - stfd f6,savefp6(r3) - la r11,savefp12(r3) ; Point to the 4th line - stfd f7,savefp7(r3) - dcbz 0,r11 ; Allocate it - stfd f8,savefp8(r3) - stfd f9,savefp9(r3) - stfd f10,savefp10(r3) - la r11,savefp16(r3) ; Point to the 5th line - stfd f11,savefp11(r3) - dcbz 0,r11 ; Allocate it - stfd f12,savefp12(r3) - stfd f13,savefp13(r3) - stfd f14,savefp14(r3) - la r11,savefp20(r3) ; Point to the 6th line - stfd f15,savefp15(r3) - stfd f16,savefp16(r3) - stfd f17,savefp17(r3) - stfd f18,savefp18(r3) - la r11,savefp24(r3) ; Point to the 7th line - stfd f19,savefp19(r3) - dcbz 0,r11 ; Allocate it - stfd f20,savefp20(r3) - stfd f21,savefp21(r3) - stfd f22,savefp22(r3) - la r11,savefp28(r3) ; Point to the 8th line - stfd f23,savefp23(r3) - dcbz 0,r11 ; Allocate it - stfd f24,savefp24(r3) - stfd f25,savefp25(r3) - stfd f26,savefp26(r3) - stfd f27,savefp27(r3) - stfd f28,savefp28(r3) - - stfd f29,savefp29(r3) - stfd f30,savefp30(r3) - stfd f31,savefp31(r3) - + bl fp_store ; save all 32 FPRs in the save area at r3 + mtlr r2 ; Restore return + fsret: mtmsr r0 ; Put interrupts on if they were and floating point off isync blr -fsbackout: mr r12,r0 ; Save the original MSR - b EXT(save_ret_join) ; Toss savearea and return from there... +fsbackout: mr r4,r0 ; restore the original MSR + b EXT(save_ret_wMSR) ; Toss savearea and return from there... /* * fpu_switch() @@ -602,13 +485,12 @@ LEXT(fpu_switch) #endif /* DEBUG */ mfsprg r26,0 ; Get the per_processor block - mfmsr r19 ; Get the current MSR + mfmsr r19 ; Get the current MSR + mfsprg r17,1 ; Get the current thread mr r25,r4 ; Save the entry savearea lwz r22,FPUowner(r26) ; Get the thread that owns the FPU - lwz r10,PP_ACTIVE_THREAD(r26) ; Get the pointer to the active thread ori r19,r19,lo16(MASK(MSR_FP)) ; Enable the floating point feature - lwz r17,THREAD_TOP_ACT(r10) ; Now get the activation that is running mtmsr r19 ; Enable floating point instructions isync @@ -670,6 +552,7 @@ fswretry: mr. r22,r22 ; See if there is any live FP status mr r5,r31 ; (TEST/DEBUG) oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) + li r3,0 ; (TEST/DEBUG) #endif beq+ fsnosave ; Same level, so already saved... @@ -679,17 +562,55 @@ fsmstsave: stw r3,FPUowner(r26) ; Kill the context now eieio ; Make sure everyone sees it bl EXT(save_get) ; Go get a savearea - la r11,savefp0(r3) ; Point to the 1st line in new savearea - lwz r12,facAct(r22) ; Get the activation associated with the context - dcbz 0,r11 ; Allocate cache - stw r3,FPUsave(r22) ; Set this as the latest context savearea for the thread + mr. r31,r31 ; Are we saving the user state? + la r15,FPUsync(r22) ; Point to the sync word + beq++ fswusave ; Yeah, no need for lock... +; +; Here we make sure that the live context is not tossed while we are +; trying to push it. This can happen only for kernel context and +; then only by a race with act_machine_sv_free. +; +; We only need to hold this for a very short time, so no sniffing needed. +; If we find any change to the level, we just abandon. +; +fswsync: lwarx r19,0,r15 ; Get the sync word + li r0,1 ; Get the lock + cmplwi cr1,r19,0 ; Is it unlocked? + stwcx. r0,0,r15 ; Store lock and test reservation + cror cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked + bne-- fswsync ; Try again if lost reservation or locked... + + isync ; Toss speculation + + lwz r0,FPUlevel(r22) ; Pick up the level again + li r7,0 ; Get unlock value + cmplw r0,r31 ; Same level? + beq++ fswusave ; Yeah, we expect it to be... + + stw r7,FPUsync(r22) ; Unlock lock. No need to sync here + + bl EXT(save_ret) ; Toss save area because we are abandoning save + b fsnosave ; Skip the save... - stw r30,SAVprev(r3) ; Point us to the old context + .align 5 + +fswusave: lwz r12,facAct(r22) ; Get the activation associated with the context + stw r3,FPUsave(r22) ; Set this as the latest context savearea for the thread + mr. r31,r31 ; Check again if we were user level + stw r30,SAVprev+4(r3) ; Point us to the old context stw r31,SAVlevel(r3) ; Tag our level li r7,SAVfloat ; Get the floating point ID stw r12,SAVact(r3) ; Make sure we point to the right guy stb r7,SAVflags+2(r3) ; Set that we have a floating point save area + li r7,0 ; Get the unlock value + + beq-- fswnulock ; Skip unlock if user (we did not lock it)... + eieio ; Make sure that these updates make it out + stw r7,FPUsync(r22) ; Unlock it. + +fswnulock: + #if FPVECDBG lis r0,hi16(CutTrace) ; (TEST/DEBUG) li r2,0x7F03 ; (TEST/DEBUG) @@ -697,58 +618,7 @@ fsmstsave: stw r3,FPUowner(r26) ; Kill the context now sc ; (TEST/DEBUG) #endif -; -; Now we will actually save the old context -; - - la r11,savefp4(r3) ; Point to the 2nd line - stfd f0,savefp0(r3) - dcbz 0,r11 ; Allocate cache - stfd f1,savefp1(r3) - stfd f2,savefp2(r3) - la r11,savefp8(r3) ; Point to the 3rd line - stfd f3,savefp3(r3) - dcbz 0,r11 ; Allocate cache - stfd f4,savefp4(r3) - stfd f5,savefp5(r3) - stfd f6,savefp6(r3) - la r11,savefp12(r3) ; Point to the 4th line - stfd f7,savefp7(r3) - dcbz 0,r11 ; Allocate cache - stfd f8,savefp8(r3) - stfd f9,savefp9(r3) - stfd f10,savefp10(r3) - la r11,savefp16(r3) ; Point to the 5th line - stfd f11,savefp11(r3) - dcbz 0,r11 ; Allocate cache - stfd f12,savefp12(r3) - stfd f13,savefp13(r3) - stfd f14,savefp14(r3) - la r11,savefp20(r3) ; Point to the 6th line - stfd f15,savefp15(r3) - dcbz 0,r11 ; Allocate cache - stfd f16,savefp16(r3) - stfd f17,savefp17(r3) - stfd f18,savefp18(r3) - la r11,savefp24(r3) ; Point to the 7th line - stfd f19,savefp19(r3) - dcbz 0,r11 ; Allocate cache - stfd f20,savefp20(r3) - - stfd f21,savefp21(r3) - stfd f22,savefp22(r3) - la r11,savefp28(r3) ; Point to the 8th line - stfd f23,savefp23(r3) - dcbz 0,r11 ; allocate it - stfd f24,savefp24(r3) - stfd f25,savefp25(r3) - stfd f26,savefp26(r3) - stfd f27,savefp27(r3) - dcbz 0,r11 ; allocate it - stfd f28,savefp28(r3) - stfd f29,savefp29(r3) - stfd f30,savefp30(r3) - stfd f31,savefp31(r3) + bl fp_store ; store all 32 FPRs ; ; The context is all saved now and the facility is free. @@ -783,28 +653,30 @@ fsnosave: lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc li r16,FPUowner ; Displacement to float owner add r19,r18,r19 ; Point to the owner per_proc - li r0,0 fsinvothr: lwarx r18,r16,r19 ; Get the owner - cmplw r18,r29 ; Does he still have this context? - bne fsinvoths ; Nope... - stwcx. r0,r16,r19 ; Try to invalidate it - bne- fsinvothr ; Try again if there was a collision... + sub r0,r18,r29 ; Subtract one from the other + sub r11,r29,r18 ; Subtract the other from the one + or r11,r11,r0 ; Combine them + srawi r11,r11,31 ; Get a 0 if equal or -1 of not + and r18,r18,r11 ; Make 0 if same, unchanged if not + stwcx. r18,r16,r19 ; Try to invalidate it + bne-- fsinvothr ; Try again if there was a collision... -fsinvoths: cmplwi cr1,r14,0 ; Do we possibly have some context to load? + cmplwi cr1,r14,0 ; Do we possibly have some context to load? la r11,savefp0(r14) ; Point to first line to bring in stw r15,FPUlevel(r29) ; Set the "new" active level eieio stw r29,FPUowner(r26) ; Mark us as having the live context - beq+ cr1,MakeSureThatNoTerroristsCanHurtUsByGod ; No "new" context to load... + beq++ cr1,MakeSureThatNoTerroristsCanHurtUsByGod ; No "new" context to load... dcbt 0,r11 ; Touch line in - lwz r3,SAVprev(r14) ; Get the previous context + lwz r3,SAVprev+4(r14) ; Get the previous context lwz r0,SAVlevel(r14) ; Get the level of first facility savearea cmplw r0,r15 ; Top level correct to load? - bne- MakeSureThatNoTerroristsCanHurtUsByGod ; No, go initialize... + bne-- MakeSureThatNoTerroristsCanHurtUsByGod ; No, go initialize... stw r3,FPUsave(r29) ; Pop the context (we will toss the savearea later) @@ -815,6 +687,9 @@ fsinvoths: cmplwi cr1,r14,0 ; Do we possibly have some context to load? sc ; (TEST/DEBUG) #endif +// Note this code is used both by 32- and 128-byte processors. This means six extra DCBTs +// are executed on a 128-byte machine, but that is better than a mispredicted branch. + la r11,savefp4(r14) ; Point to next line dcbt 0,r11 ; Touch line in lfd f0, savefp0(r14) @@ -865,17 +740,15 @@ fsinvoths: cmplwi cr1,r14,0 ; Do we possibly have some context to load? mr r3,r14 ; Get the old savearea (we popped it before) bl EXT(save_ret) ; Toss it -fsenable: lwz r8,savesrr1(r25) ; Get the msr of the interrupted guy - rlwinm r5,r25,0,0,19 ; Get the page address of the savearea +fsenable: lwz r8,savesrr1+4(r25) ; Get the msr of the interrupted guy ori r8,r8,MASK(MSR_FP) ; Enable the floating point feature lwz r10,ACT_MACT_SPF(r17) ; Get the act special flags lwz r11,spcFlags(r26) ; Get per_proc spec flags cause not in sync with act - lwz r5,SACvrswap(r5) ; Get Virtual to Real translation oris r10,r10,hi16(floatUsed|floatCng) ; Set that we used floating point oris r11,r11,hi16(floatUsed|floatCng) ; Set that we used floating point rlwinm. r0,r8,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are doing this for user state - stw r8,savesrr1(r25) ; Set the msr of the interrupted guy - xor r3,r25,r5 ; Get the real address of the savearea + stw r8,savesrr1+4(r25) ; Set the msr of the interrupted guy + mr r3,r25 ; Pass the virtual addres of savearea beq- fsnuser ; We are not user state... stw r10,ACT_MACT_SPF(r17) ; Set the activation copy stw r11,spcFlags(r26) ; Set per_proc copy @@ -959,13 +832,14 @@ fsthesame: beq- cr1,fsenable ; Not saved yet, nothing to pop, go enable and exit... lwz r11,SAVlevel(r30) ; Get the level of top saved context - lwz r14,SAVprev(r30) ; Get the previous savearea + lwz r14,SAVprev+4(r30) ; Get the previous savearea cmplw r11,r31 ; Are live and saved the same? bne+ fsenable ; Level not the same, nothing to pop, go enable and exit... mr r3,r30 ; Get the old savearea (we popped it before) + stw r14,FPUsave(r22) ; Pop the savearea from the stack bl EXT(save_ret) ; Toss it b fsenable ; Go enable and exit... @@ -980,12 +854,13 @@ fsthesame: LEXT(toss_live_fpu) - + lis r0,hi16(MASK(MSR_VEC)) ; Get VEC mfmsr r9 ; Get the MSR - rlwinm r0,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interuptions + ori r0,r0,lo16(MASK(MSR_FP)) ; Add in FP rlwinm. r8,r9,0,MSR_FP_BIT,MSR_FP_BIT ; Are floats on right now? - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Make sure vectors are turned off - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Make sure floats are turned off + andc r9,r9,r0 ; Force off VEC and FP + ori r0,r0,lo16(MASK(MSR_EE)) ; Turn off EE + andc r0,r9,r0 ; Turn off EE now mtmsr r0 ; No interruptions isync beq+ tlfnotours ; Floats off, can not be live here... @@ -1000,9 +875,9 @@ LEXT(toss_live_fpu) lwz r6,FPUowner(r8) ; Get the thread that owns the floats li r0,0 ; Clear this just in case we need it cmplw r6,r3 ; Are we tossing our own context? - bne- tlfnotours ; Nope... + bne-- tlfnotours ; Nope... - fsub f1,f1,f1 ; Make a 0 + lfd f1,Zero(0) ; Make a 0 mtfsf 0xFF,f1 ; Clear it tlfnotours: lwz r11,FPUcpu(r3) ; Get the cpu on which we last loaded context @@ -1011,15 +886,18 @@ tlfnotours: lwz r11,FPUcpu(r3) ; Get the cpu on which we last loaded contex ori r12,r12,lo16(EXT(per_proc_info)) ; Set base per_proc li r10,FPUowner ; Displacement to float owner add r11,r12,r11 ; Point to the owner per_proc - li r0,0 ; Set a 0 to invalidate context tlfinvothr: lwarx r12,r10,r11 ; Get the owner - cmplw r12,r3 ; Does he still have this context? - bne+ tlfexit ; Nope, leave... - stwcx. r0,r10,r11 ; Try to invalidate it - bne- tlfinvothr ; Try again if there was a collision... -tlfexit: mtmsr r9 ; Restore interruptions + sub r0,r12,r3 ; Subtract one from the other + sub r8,r3,r12 ; Subtract the other from the one + or r8,r8,r0 ; Combine them + srawi r8,r8,31 ; Get a 0 if equal or -1 of not + and r12,r12,r8 ; Make 0 if same, unchanged if not + stwcx. r12,r10,r11 ; Try to invalidate it + bne-- tlfinvothr ; Try again if there was a collision... + + mtmsr r9 ; Restore interruptions isync ; Could be turning off floats here blr ; Leave... @@ -1052,11 +930,14 @@ tlfexit: mtmsr r9 ; Restore interruptions LEXT(vec_save) + + lis r2,hi16(MASK(MSR_VEC)) ; Get VEC mfmsr r0 ; Get the MSR - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Make sure vectors are turned off when we leave - rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; But do interrupts only for now + ori r2,r2,lo16(MASK(MSR_FP)) ; Add in FP + andc r0,r0,r2 ; Force off VEC and FP + ori r2,r2,lo16(MASK(MSR_EE)) ; Clear EE + andc r2,r0,r2 ; Clear EE for now oris r2,r2,hi16(MASK(MSR_VEC)) ; Enable the vector facility for now also - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force off fp mtmsr r2 ; Set the MSR isync @@ -1115,17 +996,17 @@ vsgoodcpu: lwz r3,VMXsave(r12) ; Get the current vector savearea for the th bne+ vsret ; VRsave is non-zero so we need to keep what is saved... - lwz r4,SAVprev(r3) ; Pick up the previous area + lwz r4,SAVprev+4(r3) ; Pick up the previous area lwz r5,SAVlevel(r4) ; Get the level associated with save stw r4,VMXsave(r12) ; Dequeue this savearea + li r4,0 ; Clear stw r5,VMXlevel(r12) ; Save the level - li r3,0 ; Clear - stw r3,VMXowner(r12) ; Show no live context here + stw r4,VMXowner(r12) ; Show no live context here eieio -vsbackout: mr r12,r0 ; Set the saved MSR - b EXT(save_ret_join) ; Toss the savearea and return from there... +vsbackout: mr r4,r0 ; restore the saved MSR + b EXT(save_ret_wMSR) ; Toss the savearea and return from there... .align 5 @@ -1141,286 +1022,22 @@ vsneedone: mr. r10,r10 ; Is VRsave set to 0? mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it... beq- vsbackout ; If disowned, just toss savearea... lwz r4,facAct(r12) ; Get the activation associated with live context - mtlr r2 ; Restore return lwz r8,VMXsave(r12) ; Get the current top vector savearea stw r4,SAVact(r3) ; Indicate the right activation for this context lwz r9,VMXlevel(r12) ; Get our current level indicator again stw r3,VMXsave(r12) ; Set this as the most current floating point context - stw r8,SAVprev(r3) ; And then chain this in front + stw r8,SAVprev+4(r3) ; And then chain this in front stw r9,SAVlevel(r3) ; Set level in savearea + mfcr r12 ; save CRs across call to vr_store + lwz r10,liveVRS(r6) ; Get the right VRSave register + + bl vr_store ; store live VRs into savearea as required (uses r4-r11) - mfcr r2 ; Save non-volatile CRs - lwz r10,liveVRS(r6) ; Get the right VRSave register - lis r9,0x5555 ; Mask with odd bits set - rlwinm r11,r10,1,0,31 ; Shift over 1 - ori r9,r9,0x5555 ; Finish mask - or r4,r10,r11 ; After this, even bits show which lines to zap - - andc r11,r4,r9 ; Clear out odd bits - - la r6,savevr0(r3) ; Point to line 0 - rlwinm r4,r11,15,0,15 ; Move line 8-15 flags to high order odd bits - or r4,r11,r4 ; Set the odd bits - ; (bit 0 is line 0, bit 1 is line 8, - ; bit 2 is line 1, bit 3 is line 9, etc. - rlwimi r4,r10,16,16,31 ; Put vrsave 0 - 15 into positions 16 - 31 - la r7,savevr2(r3) ; Point to line 1 - mtcrf 255,r4 ; Load up the CRs - stw r10,savevrvalid(r3) ; Save the validity information - mr r8,r6 ; Start registers off -; -; Save the current vector state -; - - bf 0,snol0 ; No line 0 to do... - dcba br0,r6 ; Allocate cache line 0 - -snol0: - la r6,savevr4(r3) ; Point to line 2 - bf 2,snol1 ; No line 1 to do... - dcba br0,r7 ; Allocate cache line 1 - -snol1: - la r7,savevr6(r3) ; Point to line 3 - bf 4,snol2 ; No line 2 to do... - dcba br0,r6 ; Allocate cache line 2 - -snol2: - li r11,16 ; Get offset for odd registers - bf 16,snovr0 ; Do not save VR0... - stvxl v0,br0,r8 ; Save VR0 - -snovr0: - la r9,savevr2(r3) ; Point to V2/V3 pair - bf 17,snovr1 ; Do not save VR1... - stvxl v1,r11,r8 ; Save VR1 - -snovr1: - la r6,savevr8(r3) ; Point to line 4 - bf 6,snol3 ; No line 3 to do... - dcba br0,r7 ; Allocate cache line 3 - -snol3: - la r8,savevr4(r3) ; Point to V4/V5 pair - bf 18,snovr2 ; Do not save VR2... - stvxl v2,br0,r9 ; Save VR2 - -snovr2: - bf 19,snovr3 ; Do not save VR3... - stvxl v3,r11,r9 ; Save VR3 - -snovr3: -; -; Note: CR4 is now free -; - la r7,savevr10(r3) ; Point to line 5 - bf 8,snol4 ; No line 4 to do... - dcba br0,r6 ; Allocate cache line 4 - -snol4: - la r9,savevr6(r3) ; Point to R6/R7 pair - bf 20,snovr4 ; Do not save VR4... - stvxl v4,br0,r8 ; Save VR4 - -snovr4: - bf 21,snovr5 ; Do not save VR5... - stvxl v5,r11,r8 ; Save VR5 - -snovr5: - mtcrf 0x08,r10 ; Set CRs for registers 16-19 - la r6,savevr12(r3) ; Point to line 6 - bf 10,snol5 ; No line 5 to do... - dcba br0,r7 ; Allocate cache line 5 - -snol5: - la r8,savevr8(r3) ; Point to V8/V9 pair - bf 22,snovr6 ; Do not save VR6... - stvxl v6,br0,r9 ; Save VR6 - -snovr6: - bf 23,snovr7 ; Do not save VR7... - stvxl v7,r11,r9 ; Save VR7 - -snovr7: -; -; Note: CR5 is now free -; - la r7,savevr14(r3) ; Point to line 7 - bf 12,snol6 ; No line 6 to do... - dcba br0,r6 ; Allocate cache line 6 - -snol6: - la r9,savevr10(r3) ; Point to V10/V11 pair - bf 24,snovr8 ; Do not save VR8... - stvxl v8,br0,r8 ; Save VR8 - -snovr8: - bf 25,snovr9 ; Do not save VR9... - stvxl v9,r11,r8 ; Save VR9 - -snovr9: - mtcrf 0x04,r10 ; Set CRs for registers 20-23 - la r6,savevr16(r3) ; Point to line 8 - bf 14,snol7 ; No line 7 to do... - dcba br0,r7 ; Allocate cache line 7 - -snol7: - la r8,savevr12(r3) ; Point to V12/V13 pair - bf 26,snovr10 ; Do not save VR10... - stvxl v10,br0,r9 ; Save VR10 - -snovr10: - bf 27,snovr11 ; Do not save VR11... - stvxl v11,r11,r9 ; Save VR11 - -snovr11: - -; -; Note: CR6 is now free -; - la r7,savevr18(r3) ; Point to line 9 - bf 1,snol8 ; No line 8 to do... - dcba br0,r6 ; Allocate cache line 8 - -snol8: - la r9,savevr14(r3) ; Point to V14/V15 pair - bf 28,snovr12 ; Do not save VR12... - stvxl v12,br0,r8 ; Save VR12 - -snovr12: - bf 29,snovr13 ; Do not save VR13... - stvxl v13,r11,r8 ; Save VR13 - -snovr13: - mtcrf 0x02,r10 ; Set CRs for registers 24-27 - la r6,savevr20(r3) ; Point to line 10 - bf 3,snol9 ; No line 9 to do... - dcba br0,r7 ; Allocate cache line 9 - -snol9: - la r8,savevr16(r3) ; Point to V16/V17 pair - bf 30,snovr14 ; Do not save VR14... - stvxl v14,br0,r9 ; Save VR14 - -snovr14: - bf 31,snovr15 ; Do not save VR15... - stvxl v15,r11,r9 ; Save VR15 - -snovr15: -; -; Note: CR7 is now free -; - la r7,savevr22(r3) ; Point to line 11 - bf 5,snol10 ; No line 10 to do... - dcba br0,r6 ; Allocate cache line 10 - -snol10: - la r9,savevr18(r3) ; Point to V18/V19 pair - bf 16,snovr16 ; Do not save VR16... - stvxl v16,br0,r8 ; Save VR16 - -snovr16: - bf 17,snovr17 ; Do not save VR17... - stvxl v17,r11,r8 ; Save VR17 - -snovr17: - mtcrf 0x01,r10 ; Set CRs for registers 28-31 -; -; Note: All registers have been or are accounted for in CRs -; - la r6,savevr24(r3) ; Point to line 12 - bf 7,snol11 ; No line 11 to do... - dcba br0,r7 ; Allocate cache line 11 - -snol11: - la r8,savevr20(r3) ; Point to V20/V21 pair - bf 18,snovr18 ; Do not save VR18... - stvxl v18,br0,r9 ; Save VR18 - -snovr18: - bf 19,snovr19 ; Do not save VR19... - stvxl v19,r11,r9 ; Save VR19 - -snovr19: - la r7,savevr26(r3) ; Point to line 13 - bf 9,snol12 ; No line 12 to do... - dcba br0,r6 ; Allocate cache line 12 - -snol12: - la r9,savevr22(r3) ; Point to V22/V23 pair - bf 20,snovr20 ; Do not save VR20... - stvxl v20,br0,r8 ; Save VR20 - -snovr20: - bf 21,snovr21 ; Do not save VR21... - stvxl v21,r11,r8 ; Save VR21 - -snovr21: - la r6,savevr28(r3) ; Point to line 14 - bf 11,snol13 ; No line 13 to do... - dcba br0,r7 ; Allocate cache line 13 - -snol13: - la r8,savevr24(r3) ; Point to V24/V25 pair - bf 22,snovr22 ; Do not save VR22... - stvxl v22,br0,r9 ; Save VR22 - -snovr22: - bf 23,snovr23 ; Do not save VR23... - stvxl v23,r11,r9 ; Save VR23 - -snovr23: - la r7,savevr30(r3) ; Point to line 15 - bf 13,snol14 ; No line 14 to do... - dcba br0,r6 ; Allocate cache line 14 - -snol14: - la r9,savevr26(r3) ; Point to V26/V27 pair - bf 24,snovr24 ; Do not save VR24... - stvxl v24,br0,r8 ; Save VR24 - -snovr24: - bf 25,snovr25 ; Do not save VR25... - stvxl v25,r11,r8 ; Save VR25 - -snovr25: - bf 15,snol15 ; No line 15 to do... - dcba br0,r7 ; Allocate cache line 15 - -snol15: -; -; Note: All cache lines allocated now -; - la r8,savevr28(r3) ; Point to V28/V29 pair - bf 26,snovr26 ; Do not save VR26... - stvxl v26,br0,r9 ; Save VR26 - -snovr26: - bf 27,snovr27 ; Do not save VR27... - stvxl v27,r11,r9 ; Save VR27 - -snovr27: - la r7,savevr30(r3) ; Point to V30/V31 pair - bf 28,snovr28 ; Do not save VR28... - stvxl v28,br0,r8 ; Save VR28 - -snovr28: - bf 29,snovr29 ; Do not save VR29... - stvxl v29,r11,r8 ; Save VR29 - -snovr29: - bf 30,snovr30 ; Do not save VR30... - stvxl v30,br0,r7 ; Save VR30 - -snovr30: - bf 31,snovr31 ; Do not save VR31... - stvxl v31,r11,r7 ; Save VR31 - -snovr31: - mtcrf 255,r2 ; Restore all cr - -vsret: mtmsr r0 ; Put interrupts on if they were and vector off + mtcrf 255,r12 ; Restore the non-volatile CRs + mtlr r2 ; restore return address + +vsret: mtmsr r0 ; Put interrupts on if they were and vector off isync blr @@ -1457,13 +1074,12 @@ LEXT(vec_switch) #endif /* DEBUG */ mfsprg r26,0 ; Get the per_processor block - mfmsr r19 ; Get the current MSR + mfmsr r19 ; Get the current MSR + mfsprg r17,1 ; Get the current thread mr r25,r4 ; Save the entry savearea - lwz r22,VMXowner(r26) ; Get the thread that owns the vector - lwz r10,PP_ACTIVE_THREAD(r26) ; Get the pointer to the active thread oris r19,r19,hi16(MASK(MSR_VEC)) ; Enable the vector feature - lwz r17,THREAD_TOP_ACT(r10) ; Now get the activation that is running + lwz r22,VMXowner(r26) ; Get the thread that owns the vector mtmsr r19 ; Enable vector instructions isync @@ -1534,7 +1150,7 @@ vsvretry: mr. r22,r22 ; See if there is any live vector status bne- cr2,vsnosave ; Live context saved and VRSave not 0, no save and keep context... - lwz r4,SAVprev(r30) ; Pick up the previous area + lwz r4,SAVprev+4(r30) ; Pick up the previous area li r5,0 ; Assume this is the only one (which should be the ususal case) mr. r4,r4 ; Was this the only one? stw r4,VMXsave(r22) ; Dequeue this savearea @@ -1557,15 +1173,55 @@ vsmstsave: stw r8,VMXowner(r26) ; Clear owner bl EXT(save_get) ; Go get a savearea - lwz r12,facAct(r22) ; Get the activation associated with the context - stw r3,VMXsave(r22) ; Set this as the latest context savearea for the thread + mr. r31,r31 ; Are we saving the user state? + la r15,VMXsync(r22) ; Point to the sync word + beq++ vswusave ; Yeah, no need for lock... +; +; Here we make sure that the live context is not tossed while we are +; trying to push it. This can happen only for kernel context and +; then only by a race with act_machine_sv_free. +; +; We only need to hold this for a very short time, so no sniffing needed. +; If we find any change to the level, we just abandon. +; +vswsync: lwarx r19,0,r15 ; Get the sync word + li r0,1 ; Get the lock + cmplwi cr1,r19,0 ; Is it unlocked? + stwcx. r0,0,r15 ; Store lock and test reservation + cror cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked + bne-- vswsync ; Try again if lost reservation or locked... - stw r30,SAVprev(r3) ; Point us to the old context + isync ; Toss speculation + + lwz r0,VMXlevel(r22) ; Pick up the level again + li r7,0 ; Get unlock value + cmplw r0,r31 ; Same level? + beq++ vswusave ; Yeah, we expect it to be... + + stw r7,VMXsync(r22) ; Unlock lock. No need to sync here + + bl EXT(save_ret) ; Toss save area because we are abandoning save + b vsnosave ; Skip the save... + + .align 5 + +vswusave: lwz r12,facAct(r22) ; Get the activation associated with the context + stw r3,VMXsave(r22) ; Set this as the latest context savearea for the thread + mr. r31,r31 ; Check again if we were user level + stw r30,SAVprev+4(r3) ; Point us to the old context stw r31,SAVlevel(r3) ; Tag our level li r7,SAVvector ; Get the vector ID stw r12,SAVact(r3) ; Make sure we point to the right guy stb r7,SAVflags+2(r3) ; Set that we have a vector save area + li r7,0 ; Get the unlock value + + beq-- vswnulock ; Skip unlock if user (we did not lock it)... + eieio ; Make sure that these updates make it out + stw r7,VMXsync(r22) ; Unlock it. + +vswnulock: + #if FPVECDBG lis r0,hi16(CutTrace) ; (TEST/DEBUG) li r2,0x5F03 ; (TEST/DEBUG) @@ -1574,273 +1230,7 @@ vsmstsave: stw r8,VMXowner(r26) ; Clear owner #endif lwz r10,liveVRS(r26) ; Get the right VRSave register - lis r9,0x5555 ; Mask with odd bits set - rlwinm r11,r10,1,0,31 ; Shift over 1 - ori r9,r9,0x5555 ; Finish mask - or r21,r10,r11 ; After this, even bits show which lines to zap - - andc r13,r21,r9 ; Clear out odd bits - - la r11,savevr0(r3) ; Point to line 0 - rlwinm r24,r13,15,0,15 ; Move line 8-15 flags to high order odd bits - or r24,r13,r24 ; Set the odd bits - ; (bit 0 is line 0, bit 1 is line 8, - ; bit 2 is line 1, bit 3 is line 9, etc. - rlwimi r24,r10,16,16,31 ; Put vrsave 0 - 15 into positions 16 - 31 - la r21,savevr2(r3) ; Point to line 1 - mtcrf 255,r24 ; Load up the CRs - stw r10,savevrvalid(r3) ; Save the validity information - mr r12,r11 ; Start registers off -; -; Save the current vector state -; - - bf 0,nol0 ; No line 0 to do... - dcba br0,r11 ; Allocate cache line 0 - -nol0: - la r11,savevr4(r3) ; Point to line 2 - bf 2,nol1 ; No line 1 to do... - dcba br0,r21 ; Allocate cache line 1 - -nol1: - la r21,savevr6(r3) ; Point to line 3 - bf 4,nol2 ; No line 2 to do... - dcba br0,r11 ; Allocate cache line 2 - -nol2: - li r14,16 ; Get offset for odd registers - bf 16,novr0 ; Do not save VR0... - stvxl v0,br0,r12 ; Save VR0 - -novr0: - la r13,savevr2(r3) ; Point to V2/V3 pair - bf 17,novr1 ; Do not save VR1... - stvxl v1,r14,r12 ; Save VR1 - -novr1: - la r11,savevr8(r3) ; Point to line 4 - bf 6,nol3 ; No line 3 to do... - dcba br0,r21 ; Allocate cache line 3 - -nol3: - la r12,savevr4(r3) ; Point to V4/V5 pair - bf 18,novr2 ; Do not save VR2... - stvxl v2,br0,r13 ; Save VR2 - -novr2: - bf 19,novr3 ; Do not save VR3... - stvxl v3,r14,r13 ; Save VR3 - -novr3: -; -; Note: CR4 is now free -; - la r21,savevr10(r3) ; Point to line 5 - bf 8,nol4 ; No line 4 to do... - dcba br0,r11 ; Allocate cache line 4 - -nol4: - la r13,savevr6(r3) ; Point to R6/R7 pair - bf 20,novr4 ; Do not save VR4... - stvxl v4,br0,r12 ; Save VR4 - -novr4: - bf 21,novr5 ; Do not save VR5... - stvxl v5,r14,r12 ; Save VR5 - -novr5: - mtcrf 0x08,r10 ; Set CRs for registers 16-19 - la r11,savevr12(r3) ; Point to line 6 - bf 10,nol5 ; No line 5 to do... - dcba br0,r21 ; Allocate cache line 5 - -nol5: - la r12,savevr8(r3) ; Point to V8/V9 pair - bf 22,novr6 ; Do not save VR6... - stvxl v6,br0,r13 ; Save VR6 - -novr6: - bf 23,novr7 ; Do not save VR7... - stvxl v7,r14,r13 ; Save VR7 - -novr7: -; -; Note: CR5 is now free -; - la r21,savevr14(r3) ; Point to line 7 - bf 12,nol6 ; No line 6 to do... - dcba br0,r11 ; Allocate cache line 6 - -nol6: - la r13,savevr10(r3) ; Point to V10/V11 pair - bf 24,novr8 ; Do not save VR8... - stvxl v8,br0,r12 ; Save VR8 - -novr8: - bf 25,novr9 ; Do not save VR9... - stvxl v9,r14,r12 ; Save VR9 - -novr9: - mtcrf 0x04,r10 ; Set CRs for registers 20-23 - la r11,savevr16(r3) ; Point to line 8 - bf 14,nol7 ; No line 7 to do... - dcba br0,r21 ; Allocate cache line 7 - -nol7: - la r12,savevr12(r3) ; Point to V12/V13 pair - bf 26,novr10 ; Do not save VR10... - stvxl v10,br0,r13 ; Save VR10 - -novr10: - bf 27,novr11 ; Do not save VR11... - stvxl v11,r14,r13 ; Save VR11 - -novr11: - -; -; Note: CR6 is now free -; - la r21,savevr18(r3) ; Point to line 9 - bf 1,nol8 ; No line 8 to do... - dcba br0,r11 ; Allocate cache line 8 - -nol8: - la r13,savevr14(r3) ; Point to V14/V15 pair - bf 28,novr12 ; Do not save VR12... - stvxl v12,br0,r12 ; Save VR12 - -novr12: - bf 29,novr13 ; Do not save VR13... - stvxl v13,r14,r12 ; Save VR13 - -novr13: - mtcrf 0x02,r10 ; Set CRs for registers 24-27 - la r11,savevr20(r3) ; Point to line 10 - bf 3,nol9 ; No line 9 to do... - dcba br0,r21 ; Allocate cache line 9 - -nol9: - la r12,savevr16(r3) ; Point to V16/V17 pair - bf 30,novr14 ; Do not save VR14... - stvxl v14,br0,r13 ; Save VR14 - -novr14: - bf 31,novr15 ; Do not save VR15... - stvxl v15,r14,r13 ; Save VR15 - -novr15: -; -; Note: CR7 is now free -; - la r21,savevr22(r3) ; Point to line 11 - bf 5,nol10 ; No line 10 to do... - dcba br0,r11 ; Allocate cache line 10 - -nol10: - la r13,savevr18(r3) ; Point to V18/V19 pair - bf 16,novr16 ; Do not save VR16... - stvxl v16,br0,r12 ; Save VR16 - -novr16: - bf 17,novr17 ; Do not save VR17... - stvxl v17,r14,r12 ; Save VR17 - -novr17: - mtcrf 0x01,r10 ; Set CRs for registers 28-31 -; -; Note: All registers have been or are accounted for in CRs -; - la r11,savevr24(r3) ; Point to line 12 - bf 7,nol11 ; No line 11 to do... - dcba br0,r21 ; Allocate cache line 11 - -nol11: - la r12,savevr20(r3) ; Point to V20/V21 pair - bf 18,novr18 ; Do not save VR18... - stvxl v18,br0,r13 ; Save VR18 - -novr18: - bf 19,novr19 ; Do not save VR19... - stvxl v19,r14,r13 ; Save VR19 - -novr19: - la r21,savevr26(r3) ; Point to line 13 - bf 9,nol12 ; No line 12 to do... - dcba br0,r11 ; Allocate cache line 12 - -nol12: - la r13,savevr22(r3) ; Point to V22/V23 pair - bf 20,novr20 ; Do not save VR20... - stvxl v20,br0,r12 ; Save VR20 - -novr20: - bf 21,novr21 ; Do not save VR21... - stvxl v21,r14,r12 ; Save VR21 - -novr21: - la r11,savevr28(r3) ; Point to line 14 - bf 11,nol13 ; No line 13 to do... - dcba br0,r21 ; Allocate cache line 13 - -nol13: - la r12,savevr24(r3) ; Point to V24/V25 pair - bf 22,novr22 ; Do not save VR22... - stvxl v22,br0,r13 ; Save VR22 - -novr22: - bf 23,novr23 ; Do not save VR23... - stvxl v23,r14,r13 ; Save VR23 - -novr23: - la r21,savevr30(r3) ; Point to line 15 - bf 13,nol14 ; No line 14 to do... - dcba br0,r11 ; Allocate cache line 14 - -nol14: - la r13,savevr26(r3) ; Point to V26/V27 pair - bf 24,novr24 ; Do not save VR24... - stvxl v24,br0,r12 ; Save VR24 - -novr24: - bf 25,novr25 ; Do not save VR25... - stvxl v25,r14,r12 ; Save VR25 - -novr25: - bf 15,nol15 ; No line 15 to do... - dcba br0,r21 ; Allocate cache line 15 - -nol15: -; -; Note: All cache lines allocated now -; - la r12,savevr28(r3) ; Point to V28/V29 pair - bf 26,novr26 ; Do not save VR26... - stvxl v26,br0,r13 ; Save VR26 - -novr26: - bf 27,novr27 ; Do not save VR27... - stvxl v27,r14,r13 ; Save VR27 - -novr27: - la r13,savevr30(r3) ; Point to V30/V31 pair - bf 28,novr28 ; Do not save VR28... - stvxl v28,br0,r12 ; Save VR28 - -novr28: - bf 29,novr29 ; Do not save VR29... - stvxl v29,r14,r12 ; Save VR29 - -novr29: - bf 30,novr30 ; Do not save VR30... - stvxl v30,br0,r13 ; Save VR30 - -novr30: - bf 31,novr31 ; Do not save VR31... - stvxl v31,r14,r13 ; Save VR31 - -novr31: - + bl vr_store ; store VRs into savearea according to vrsave (uses r4-r11) ; @@ -1889,27 +1279,29 @@ vsnosave: vspltisb v31,-10 ; Get 0xF6F6F6F6 li r16,VMXowner ; Displacement to vector owner add r19,r18,r19 ; Point to the owner per_proc vrlb v31,v31,v29 ; Get 0xDEADDEAD - li r0,0 vsinvothr: lwarx r18,r16,r19 ; Get the owner - cmplw r18,r29 ; Does he still have this context? - bne vsinvoths ; Nope... - stwcx. r0,r16,r19 ; Try to invalidate it - bne- vsinvothr ; Try again if there was a collision... - + + sub r0,r18,r29 ; Subtract one from the other + sub r11,r29,r18 ; Subtract the other from the one + or r11,r11,r0 ; Combine them + srawi r11,r11,31 ; Get a 0 if equal or -1 of not + and r18,r18,r11 ; Make 0 if same, unchanged if not + stwcx. r18,r16,r19 ; Try to invalidate it + bne-- vsinvothr ; Try again if there was a collision... -vsinvoths: cmplwi cr1,r14,0 ; Do we possibly have some context to load? + cmplwi cr1,r14,0 ; Do we possibly have some context to load? vmrghh v31,v30,v31 ; Get 0x7FFFDEAD. V31 keeps this value until the bitter end stw r15,VMXlevel(r29) ; Set the "new" active level eieio stw r29,VMXowner(r26) ; Mark us as having the live context - beq- cr1,ProtectTheAmericanWay ; Nothing to restore, first time use... + beq-- cr1,ProtectTheAmericanWay ; Nothing to restore, first time use... - lwz r3,SAVprev(r14) ; Get the previous context + lwz r3,SAVprev+4(r14) ; Get the previous context lwz r0,SAVlevel(r14) ; Get the level of first facility savearea cmplw r0,r15 ; Top level correct to load? - bne- ProtectTheAmericanWay ; No, go initialize... + bne-- ProtectTheAmericanWay ; No, go initialize... stw r3,VMXsave(r29) ; Pop the context (we will toss the savearea later) @@ -1920,390 +1312,23 @@ vsinvoths: cmplwi cr1,r14,0 ; Do we possibly have some context to load? sc ; (TEST/DEBUG) #endif - lwz r22,savevrsave(r25) ; Get the most current VRSAVE lwz r10,savevrvalid(r14) ; Get the valid VRs in the savearea - lis r9,0x5555 ; Mask with odd bits set + lwz r22,savevrsave(r25) ; Get the most current VRSAVE and r10,r10,r22 ; Figure out just what registers need to be loaded - ori r9,r9,0x5555 ; Finish mask - rlwinm r11,r10,1,0,31 ; Shift over 1 - or r12,r10,r11 ; After this, even bits show which lines to touch - andc r13,r12,r9 ; Clear out odd bits - - la r20,savevr0(r14) ; Point to line 0 - rlwinm r3,r13,15,0,15 ; Move line 8-15 flags to high order odd bits - la r21,savevr2(r3) ; Point to line 1 - or r3,r13,r3 ; Set the odd bits - ; (bit 0 is line 0, bit 1 is line 8, - ; bit 2 is line 1, bit 3 is line 9, etc. - rlwimi r3,r10,16,16,31 ; Put vrsave 0 - 15 into positions 16 - 31 - mtcrf 255,r3 ; Load up the CRs - mr r22,r20 ; Start registers off -; -; Load the new vector state -; - - bf 0,lnol0 ; No line 0 to do... - dcbt br0,r20 ; Touch cache line 0 - -lnol0: - la r20,savevr4(r14) ; Point to line 2 - bf 2,lnol1 ; No line 1 to do... - dcbt br0,r21 ; Touch cache line 1 - -lnol1: - la r21,savevr6(r14) ; Point to line 3 - bf 4,lnol2 ; No line 2 to do... - dcbt br0,r20 ; Touch cache line 2 - -lnol2: - li r30,16 ; Get offset for odd registers - bf 16,lnovr0 ; Do not restore VR0... - lvxl v0,br0,r22 ; Restore VR0 - -lnovr0: - la r23,savevr2(r14) ; Point to V2/V3 pair - bf 17,lnovr1 ; Do not restore VR1... - lvxl v1,r30,r22 ; Restore VR1 - -lnovr1: - la r20,savevr8(r14) ; Point to line 4 - bf 6,lnol3 ; No line 3 to do... - dcbt br0,r21 ; Touch cache line 3 - -lnol3: - la r22,savevr4(r14) ; Point to V4/V5 pair - bf 18,lnovr2 ; Do not restore VR2... - lvxl v2,br0,r23 ; Restore VR2 - -lnovr2: - bf 19,lnovr3 ; Do not restore VR3... - lvxl v3,r30,r23 ; Restore VR3 - -lnovr3: -; -; Note: CR4 is now free -; - la r21,savevr10(r14) ; Point to line 5 - bf 8,lnol4 ; No line 4 to do... - dcbt br0,r20 ; Touch cache line 4 - -lnol4: - la r23,savevr6(r14) ; Point to R6/R7 pair - bf 20,lnovr4 ; Do not restore VR4... - lvxl v4,br0,r22 ; Restore VR4 - -lnovr4: - bf 21,lnovr5 ; Do not restore VR5... - lvxl v5,r30,r22 ; Restore VR5 - -lnovr5: - mtcrf 0x08,r10 ; Set CRs for registers 16-19 - la r20,savevr12(r14) ; Point to line 6 - bf 10,lnol5 ; No line 5 to do... - dcbt br0,r21 ; Touch cache line 5 - -lnol5: - la r22,savevr8(r14) ; Point to V8/V9 pair - bf 22,lnovr6 ; Do not restore VR6... - lvxl v6,br0,r23 ; Restore VR6 - -lnovr6: - bf 23,lnovr7 ; Do not restore VR7... - lvxl v7,r30,r23 ; Restore VR7 - -lnovr7: -; -; Note: CR5 is now free -; - la r21,savevr14(r14) ; Point to line 7 - bf 12,lnol6 ; No line 6 to do... - dcbt br0,r20 ; Touch cache line 6 - -lnol6: - la r23,savevr10(r14) ; Point to V10/V11 pair - bf 24,lnovr8 ; Do not restore VR8... - lvxl v8,br0,r22 ; Restore VR8 - -lnovr8: - bf 25,lnovr9 ; Do not save VR9... - lvxl v9,r30,r22 ; Restore VR9 - -lnovr9: - mtcrf 0x04,r10 ; Set CRs for registers 20-23 - la r20,savevr16(r14) ; Point to line 8 - bf 14,lnol7 ; No line 7 to do... - dcbt br0,r21 ; Touch cache line 7 - -lnol7: - la r22,savevr12(r14) ; Point to V12/V13 pair - bf 26,lnovr10 ; Do not restore VR10... - lvxl v10,br0,r23 ; Restore VR10 - -lnovr10: - bf 27,lnovr11 ; Do not restore VR11... - lvxl v11,r30,r23 ; Restore VR11 - -lnovr11: - -; -; Note: CR6 is now free -; - la r21,savevr18(r14) ; Point to line 9 - bf 1,lnol8 ; No line 8 to do... - dcbt br0,r20 ; Touch cache line 8 - -lnol8: - la r23,savevr14(r14) ; Point to V14/V15 pair - bf 28,lnovr12 ; Do not restore VR12... - lvxl v12,br0,r22 ; Restore VR12 - -lnovr12: - bf 29,lnovr13 ; Do not restore VR13... - lvxl v13,r30,r22 ; Restore VR13 - -lnovr13: - mtcrf 0x02,r10 ; Set CRs for registers 24-27 - la r20,savevr20(r14) ; Point to line 10 - bf 3,lnol9 ; No line 9 to do... - dcbt br0,r21 ; Touch cache line 9 - -lnol9: - la r22,savevr16(r14) ; Point to V16/V17 pair - bf 30,lnovr14 ; Do not restore VR14... - lvxl v14,br0,r23 ; Restore VR14 - -lnovr14: - bf 31,lnovr15 ; Do not restore VR15... - lvxl v15,r30,r23 ; Restore VR15 - -lnovr15: -; -; Note: CR7 is now free -; - la r21,savevr22(r14) ; Point to line 11 - bf 5,lnol10 ; No line 10 to do... - dcbt br0,r20 ; Touch cache line 10 - -lnol10: - la r23,savevr18(r14) ; Point to V18/V19 pair - bf 16,lnovr16 ; Do not restore VR16... - lvxl v16,br0,r22 ; Restore VR16 - -lnovr16: - bf 17,lnovr17 ; Do not restore VR17... - lvxl v17,r30,r22 ; Restore VR17 - -lnovr17: - mtcrf 0x01,r10 ; Set CRs for registers 28-31 -; -; Note: All registers have been or are accounted for in CRs -; - la r20,savevr24(r14) ; Point to line 12 - bf 7,lnol11 ; No line 11 to do... - dcbt br0,r21 ; Touch cache line 11 - -lnol11: - la r22,savevr20(r14) ; Point to V20/V21 pair - bf 18,lnovr18 ; Do not restore VR18... - lvxl v18,br0,r23 ; Restore VR18 - -lnovr18: - bf 19,lnovr19 ; Do not restore VR19... - lvxl v19,r30,r23 ; Restore VR19 - -lnovr19: - la r21,savevr26(r14) ; Point to line 13 - bf 9,lnol12 ; No line 12 to do... - dcbt br0,r20 ; Touch cache line 12 - -lnol12: - la r23,savevr22(r14) ; Point to V22/V23 pair - bf 20,lnovr20 ; Do not restore VR20... - lvxl v20,br0,r22 ; Restore VR20 - -lnovr20: - bf 21,lnovr21 ; Do not restore VR21... - lvxl v21,r30,r22 ; Restore VR21 - -lnovr21: - la r20,savevr28(r14) ; Point to line 14 - bf 11,lnol13 ; No line 13 to do... - dcbt br0,r21 ; Touch cache line 13 - -lnol13: - la r22,savevr24(r14) ; Point to V24/V25 pair - bf 22,lnovr22 ; Do not restore VR22... - lvxl v22,br0,r23 ; Restore VR22 - -lnovr22: - bf 23,lnovr23 ; Do not restore VR23... - lvxl v23,r30,r23 ; Restore VR23 - -lnovr23: - la r21,savevr30(r14) ; Point to line 15 - bf 13,lnol14 ; No line 14 to do... - dcbt br0,r20 ; Touch cache line 14 - -lnol14: - la r23,savevr26(r14) ; Point to V26/V27 pair - bf 24,lnovr24 ; Do not restore VR24... - lvxl v24,br0,r22 ; Restore VR24 - -lnovr24: - bf 25,lnovr25 ; Do not restore VR25... - lvxl v25,r30,r22 ; Restore VR25 - -lnovr25: - bf 15,lnol15 ; No line 15 to do... - dcbt br0,r21 ; Touch cache line 15 - -lnol15: -; -; Note: All needed cache lines have been touched now -; - la r22,savevr28(r14) ; Point to V28/V29 pair - bf 26,lnovr26 ; Do not restore VR26... - lvxl v26,br0,r23 ; Restore VR26 - -lnovr26: - bf 27,lnovr27 ; Do not restore VR27... - lvxl v27,r30,r23 ; Restore VR27 - -lnovr27: - la r23,savevr30(r14) ; Point to V30/V31 pair - bf 28,lnovr28 ; Do not restore VR28... - lvxl v28,br0,r22 ; Restore VR28 - -lnovr28: - bf 29,lnovr29 ; Do not restore VR29... - lvxl v29,r30,r22 ; Restore VR29 - -lnovr29: - bf 30,lnovr30 ; Do not restore VR30... - lvxl v30,br0,r23 ; Restore VR30 - -lnovr30: -; -; Everything is restored now except for VR31. We need it to get -; the QNaNBarbarian value to put into idle vector registers. -; Note: V31 was set above to QNaNbarbarian -; - - cmpwi r10,-1 ; Handle the quick case of all registers in use - beq- mstlvr31 ; Not likely, but all are in use... - mtcrf 255,r10 ; Get mask of valid registers - - bt 0,ni0 ; Register is ok already... - vor v0,v31,v31 ; Copy into the next register -ni0: - bt 1,ni1 ; Register is ok already... - vor v1,v31,v31 ; Copy into the next register -ni1: - bt 2,ni2 ; Register is ok already... - vor v2,v31,v31 ; Copy into the next register -ni2: - bt 3,ni3 ; Register is ok already... - vor v3,v31,v31 ; Copy into the next register -ni3: - bt 4,ni4 ; Register is ok already... - vor v4,v31,v31 ; Copy into the next register -ni4: - bt 5,ni5 ; Register is ok already... - vor v5,v31,v31 ; Copy into the next register -ni5: - bt 6,ni6 ; Register is ok already... - vor v6,v31,v31 ; Copy into the next register -ni6: - bt 7,ni7 ; Register is ok already... - vor v7,v31,v31 ; Copy into the next register -ni7: - bt 8,ni8 ; Register is ok already... - vor v8,v31,v31 ; Copy into the next register -ni8: - bt 9,ni9 ; Register is ok already... - vor v9,v31,v31 ; Copy into the next register -ni9: - bt 10,ni10 ; Register is ok already... - vor v10,v31,v31 ; Copy into the next register -ni10: - bt 11,ni11 ; Register is ok already... - vor v11,v31,v31 ; Copy into the next register -ni11: - bt 12,ni12 ; Register is ok already... - vor v12,v31,v31 ; Copy into the next register -ni12: - bt 13,ni13 ; Register is ok already... - vor v13,v31,v31 ; Copy into the next register -ni13: - bt 14,ni14 ; Register is ok already... - vor v14,v31,v31 ; Copy into the next register -ni14: - bt 15,ni15 ; Register is ok already... - vor v15,v31,v31 ; Copy into the next register -ni15: - bt 16,ni16 ; Register is ok already... - vor v16,v31,v31 ; Copy into the next register -ni16: - bt 17,ni17 ; Register is ok already... - vor v17,v31,v31 ; Copy into the next register -ni17: - bt 18,ni18 ; Register is ok already... - vor v18,v31,v31 ; Copy into the next register -ni18: - bt 19,ni19 ; Register is ok already... - vor v19,v31,v31 ; Copy into the next register -ni19: - bt 20,ni20 ; Register is ok already... - vor v20,v31,v31 ; Copy into the next register -ni20: - bt 21,ni21 ; Register is ok already... - vor v21,v31,v31 ; Copy into the next register -ni21: - bt 22,ni22 ; Register is ok already... - vor v22,v31,v31 ; Copy into the next register -ni22: - bt 23,ni23 ; Register is ok already... - vor v23,v31,v31 ; Copy into the next register -ni23: - bt 24,ni24 ; Register is ok already... - vor v24,v31,v31 ; Copy into the next register -ni24: - bt 25,ni25 ; Register is ok already... - vor v25,v31,v31 ; Copy into the next register -ni25: - bt 26,ni26 ; Register is ok already... - vor v26,v31,v31 ; Copy into the next register -ni26: - bt 27,ni27 ; Register is ok already... - vor v27,v31,v31 ; Copy into the next register -ni27: - bt 28,ni28 ; Register is ok already... - vor v28,v31,v31 ; Copy into the next register -ni28: - bt 29,ni29 ; Register is ok already... - vor v29,v31,v31 ; Copy into the next register -ni29: - bt 30,ni30 ; Register is ok already... - vor v30,v31,v31 ; Copy into the next register -ni30: - bf 31,lnovr31 ; V31 is empty, no need to restore... - -mstlvr31: lvxl v31,r30,r23 ; Restore VR31 - -lnovr31: mr r3,r14 ; Get the old savearea (we popped it before) - bl EXT(save_ret) ; Toss it + mr r3,r14 ; r3 <- ptr to savearea with VRs + bl vr_load ; load VRs from save area based on vrsave in r10 + + bl EXT(save_ret) ; Toss the save area after loading VRs -vrenable: lwz r8,savesrr1(r25) ; Get the msr of the interrupted guy - rlwinm r5,r25,0,0,19 ; Get the page address of the savearea +vrenable: lwz r8,savesrr1+4(r25) ; Get the msr of the interrupted guy oris r8,r8,hi16(MASK(MSR_VEC)) ; Enable the vector facility lwz r10,ACT_MACT_SPF(r17) ; Get the act special flags lwz r11,spcFlags(r26) ; Get per_proc spec flags cause not in sync with act - lwz r5,SACvrswap(r5) ; Get Virtual to Real translation oris r10,r10,hi16(vectorUsed|vectorCng) ; Set that we used vectors oris r11,r11,hi16(vectorUsed|vectorCng) ; Set that we used vectors rlwinm. r0,r8,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are doing this for user state - stw r8,savesrr1(r25) ; Set the msr of the interrupted guy - xor r3,r25,r5 ; Get the real address of the savearea + stw r8,savesrr1+4(r25) ; Set the msr of the interrupted guy + mr r3,r25 ; Pass virtual address of the savearea beq- vrnuser ; We are not user state... stw r10,ACT_MACT_SPF(r17) ; Set the activation copy stw r11,spcFlags(r26) ; Set per_proc copy @@ -2385,13 +1410,14 @@ vsthesame: beq- cr1,vrenable ; Not saved yet, nothing to pop, go enable and exit... lwz r11,SAVlevel(r30) ; Get the level of top saved context - lwz r14,SAVprev(r30) ; Get the previous savearea + lwz r14,SAVprev+4(r30) ; Get the previous savearea cmplw r11,r31 ; Are live and saved the same? bne+ vrenable ; Level not the same, nothing to pop, go enable and exit... mr r3,r30 ; Get the old savearea (we popped it before) + stw r11,VMXsave(r22) ; Pop the vector stack bl EXT(save_ret) ; Toss it b vrenable ; Go enable and exit... @@ -2406,11 +1432,13 @@ vsthesame: LEXT(toss_live_vec) + lis r0,hi16(MASK(MSR_VEC)) ; Get VEC mfmsr r9 ; Get the MSR - rlwinm r0,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interuptions - rlwinm. r8,r9,0,MSR_VEC_BIT,MSR_VEC_BIT ; Is vector on right now? - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Make sure vector is turned off - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Make sure fpu is turned off + ori r0,r0,lo16(MASK(MSR_FP)) ; Add in FP + rlwinm. r8,r9,0,MSR_VEC_BIT,MSR_VEC_BIT ; Are vectors on right now? + andc r9,r9,r0 ; Force off VEC and FP + ori r0,r0,lo16(MASK(MSR_EE)) ; Turn off EE + andc r0,r9,r0 ; Turn off EE now mtmsr r0 ; No interruptions isync beq+ tlvnotours ; Vector off, can not be live here... @@ -2442,12 +1470,16 @@ tlvnotours: lwz r11,VMXcpu(r3) ; Get the cpu on which we last loaded contex li r0,0 ; Set a 0 to invalidate context tlvinvothr: lwarx r12,r10,r11 ; Get the owner - cmplw r12,r3 ; Does he still have this context? - bne+ tlvexit ; Nope, leave... - stwcx. r0,r10,r11 ; Try to invalidate it - bne- tlvinvothr ; Try again if there was a collision... -tlvexit: mtmsr r9 ; Restore interruptions + sub r0,r12,r3 ; Subtract one from the other + sub r8,r3,r12 ; Subtract the other from the one + or r8,r8,r0 ; Combine them + srawi r8,r8,31 ; Get a 0 if equal or -1 of not + and r12,r12,r8 ; Make 0 if same, unchanged if not + stwcx. r12,r10,r11 ; Try to invalidate it + bne-- tlvinvothr ; Try again if there was a collision... + + mtmsr r9 ; Restore interruptions isync ; Could be turning off vectors here blr ; Leave.... @@ -2477,22 +1509,26 @@ LEXT(vec_trash) ori r12,r12,lo16(EXT(per_proc_info)) ; Set base per_proc li r10,VMXowner ; Displacement to vector owner add r11,r12,r11 ; Point to the owner per_proc - li r0,0 ; Set a 0 to invalidate context vtinvothr: lwarx r12,r10,r11 ; Get the owner - cmplw r12,r3 ; Does he still have this context? - bne vtnotlive ; Nope, not live anywhere... - stwcx. r0,r10,r11 ; Try to invalidate it - bne- vtinvothr ; Try again if there was a collision... -vtnotlive: beqlr+ cr1 ; Leave if there is no savearea + sub r0,r12,r3 ; Subtract one from the other + sub r8,r3,r12 ; Subtract the other from the one + or r8,r8,r0 ; Combine them + srawi r8,r8,31 ; Get a 0 if equal or -1 of not + and r12,r12,r8 ; Make 0 if same, unchanged if not + stwcx. r12,r10,r11 ; Try to invalidate it + bne-- vtinvothr ; Try again if there was a collision... + + + beqlr++ cr1 ; Leave if there is no savearea lwz r8,SAVlevel(r9) ; Get the level of the savearea cmplw r8,r11 ; Savearea for the current level? - bnelr+ ; No, nothing to release... + bnelr++ ; No, nothing to release... - lwz r8,SAVprev(r9) ; Pick up the previous area + lwz r8,SAVprev+4(r9) ; Pick up the previous area mr. r8,r8 ; Is there a previous? - beq- vtnoprev ; Nope... + beq-- vtnoprev ; Nope... lwz r7,SAVlevel(r8) ; Get the level associated with save vtnoprev: stw r8,VMXsave(r3) ; Dequeue this savearea @@ -2511,8 +1547,7 @@ vtnoprev: stw r8,VMXsave(r3) ; Dequeue this savearea LEXT(fctx_test) - mfsprg r3,0 ; Get the per_proc block - lwz r3,PP_ACTIVE_THREAD(r3) ; Get the thread pointer + mfsprg r3,1 ; Get the current thread mr. r3,r3 ; Are we actually up and running? beqlr- ; No... @@ -2524,3 +1559,660 @@ LEXT(fctx_test) mtspr vrsave,r5 ; Set VRSave vor v0,v0,v0 ; Use vectors blr + + +// ******************* +// * f p _ s t o r e * +// ******************* +// +// Store FPRs into a save area. Called by fpu_save and fpu_switch. +// +// When called: +// floating pt is enabled +// r3 = ptr to save area +// +// We destroy: +// r11. + +fp_store: + mfsprg r11,2 ; get feature flags + mtcrf 0x02,r11 ; put cache line size bits in cr6 + la r11,savefp0(r3) ; point to 1st line + dcbz128 0,r11 ; establish 1st line no matter what linesize is + bt-- pf32Byteb,fp_st32 ; skip if a 32-byte machine + +// Store the FPRs on a 128-byte machine. + + stfd f0,savefp0(r3) + stfd f1,savefp1(r3) + la r11,savefp16(r3) ; Point to the 2nd cache line + stfd f2,savefp2(r3) + stfd f3,savefp3(r3) + dcbz128 0,r11 ; establish 2nd line + stfd f4,savefp4(r3) + stfd f5,savefp5(r3) + stfd f6,savefp6(r3) + stfd f7,savefp7(r3) + stfd f8,savefp8(r3) + stfd f9,savefp9(r3) + stfd f10,savefp10(r3) + stfd f11,savefp11(r3) + stfd f12,savefp12(r3) + stfd f13,savefp13(r3) + stfd f14,savefp14(r3) + stfd f15,savefp15(r3) + stfd f16,savefp16(r3) + stfd f17,savefp17(r3) + stfd f18,savefp18(r3) + stfd f19,savefp19(r3) + stfd f20,savefp20(r3) + stfd f21,savefp21(r3) + stfd f22,savefp22(r3) + stfd f23,savefp23(r3) + stfd f24,savefp24(r3) + stfd f25,savefp25(r3) + stfd f26,savefp26(r3) + stfd f27,savefp27(r3) + stfd f28,savefp28(r3) + stfd f29,savefp29(r3) + stfd f30,savefp30(r3) + stfd f31,savefp31(r3) + blr + +// Store FPRs on a 32-byte machine. + +fp_st32: + la r11,savefp4(r3) ; Point to the 2nd line + stfd f0,savefp0(r3) + dcbz 0,r11 ; Allocate cache + stfd f1,savefp1(r3) + stfd f2,savefp2(r3) + la r11,savefp8(r3) ; Point to the 3rd line + stfd f3,savefp3(r3) + dcbz 0,r11 ; Allocate cache + stfd f4,savefp4(r3) + stfd f5,savefp5(r3) + stfd f6,savefp6(r3) + la r11,savefp12(r3) ; Point to the 4th line + stfd f7,savefp7(r3) + dcbz 0,r11 ; Allocate cache + stfd f8,savefp8(r3) + stfd f9,savefp9(r3) + stfd f10,savefp10(r3) + la r11,savefp16(r3) ; Point to the 5th line + stfd f11,savefp11(r3) + dcbz 0,r11 ; Allocate cache + stfd f12,savefp12(r3) + stfd f13,savefp13(r3) + stfd f14,savefp14(r3) + la r11,savefp20(r3) ; Point to the 6th line + stfd f15,savefp15(r3) + dcbz 0,r11 ; Allocate cache + stfd f16,savefp16(r3) + stfd f17,savefp17(r3) + stfd f18,savefp18(r3) + la r11,savefp24(r3) ; Point to the 7th line + stfd f19,savefp19(r3) + dcbz 0,r11 ; Allocate cache + stfd f20,savefp20(r3) + + stfd f21,savefp21(r3) + stfd f22,savefp22(r3) + la r11,savefp28(r3) ; Point to the 8th line + stfd f23,savefp23(r3) + dcbz 0,r11 ; allocate it + stfd f24,savefp24(r3) + stfd f25,savefp25(r3) + stfd f26,savefp26(r3) + stfd f27,savefp27(r3) + + stfd f28,savefp28(r3) + stfd f29,savefp29(r3) + stfd f30,savefp30(r3) + stfd f31,savefp31(r3) + blr + + +// ******************* +// * v r _ s t o r e * +// ******************* +// +// Store VRs into savearea, according to bits set in passed vrsave bitfield. This routine is used +// both by vec_save and vec_switch. In order to minimize conditional branches and touching in +// unnecessary cache blocks, we either save all or none of the VRs in a block. We have separate paths +// for each cache block size. +// +// When called: +// interrupts are off, vectors are enabled +// r3 = ptr to save area +// r10 = vrsave (not 0) +// +// We destroy: +// r4 - r11, all CRs. + +vr_store: + mfsprg r9,2 ; get feature flags + stw r10,savevrvalid(r3) ; Save the validity information in savearea + slwi r8,r10,1 ; Shift over 1 + mtcrf 0x02,r9 ; put cache line size bits in cr6 where we can test + or r8,r10,r8 ; r8 <- even bits show which pairs are in use + bt-- pf32Byteb,vr_st32 ; skip if 32-byte cacheline processor + + +; Save vectors on a 128-byte linesize processor. We save all or none of the 8 registers in each of +; the four cache lines. This minimizes mispredicted branches yet handles cache lines optimally. + + slwi r7,r8,2 ; shift groups-of-2 over by 2 + li r4,16 ; load offsets for X-form stores + or r8,r7,r8 ; show if any in group of 4 are in use + li r5,32 + slwi r7,r8,4 ; shift groups-of-4 over by 4 + li r6,48 + or r11,r7,r8 ; show if any in group of 8 are in use + li r7,64 + mtcrf 0x80,r11 ; set CRs one at a time (faster) + li r8,80 + mtcrf 0x20,r11 + li r9,96 + mtcrf 0x08,r11 + li r10,112 + mtcrf 0x02,r11 + + bf 0,vr_st64b ; skip if none of vr0-vr7 are in use + la r11,savevr0(r3) ; get address of this group of registers in save area + dcbz128 0,r11 ; zero the line + stvxl v0,0,r11 ; save 8 VRs in the line + stvxl v1,r4,r11 + stvxl v2,r5,r11 + stvxl v3,r6,r11 + stvxl v4,r7,r11 + stvxl v5,r8,r11 + stvxl v6,r9,r11 + stvxl v7,r10,r11 + +vr_st64b: + bf 8,vr_st64c ; skip if none of vr8-vr15 are in use + la r11,savevr8(r3) ; get address of this group of registers in save area + dcbz128 0,r11 ; zero the line + stvxl v8,0,r11 ; save 8 VRs in the line + stvxl v9,r4,r11 + stvxl v10,r5,r11 + stvxl v11,r6,r11 + stvxl v12,r7,r11 + stvxl v13,r8,r11 + stvxl v14,r9,r11 + stvxl v15,r10,r11 + +vr_st64c: + bf 16,vr_st64d ; skip if none of vr16-vr23 are in use + la r11,savevr16(r3) ; get address of this group of registers in save area + dcbz128 0,r11 ; zero the line + stvxl v16,0,r11 ; save 8 VRs in the line + stvxl v17,r4,r11 + stvxl v18,r5,r11 + stvxl v19,r6,r11 + stvxl v20,r7,r11 + stvxl v21,r8,r11 + stvxl v22,r9,r11 + stvxl v23,r10,r11 + +vr_st64d: + bflr 24 ; done if none of vr24-vr31 are in use + la r11,savevr24(r3) ; get address of this group of registers in save area + dcbz128 0,r11 ; zero the line + stvxl v24,0,r11 ; save 8 VRs in the line + stvxl v25,r4,r11 + stvxl v26,r5,r11 + stvxl v27,r6,r11 + stvxl v28,r7,r11 + stvxl v29,r8,r11 + stvxl v30,r9,r11 + stvxl v31,r10,r11 + blr + +; Save vectors on a 32-byte linesize processor. We save in 16 groups of 2: we either save both +; or neither in each group. This cuts down on conditional branches. +; r8 = bitmask with bit n set (for even n) if either of that pair of VRs is in use +; r3 = savearea + +vr_st32: + mtcrf 0xFF,r8 ; set CR bits so we can branch on them + li r4,16 ; load offset for X-form stores + + bf 0,vr_st32b ; skip if neither VR in this pair is in use + la r11,savevr0(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v0,0,r11 ; save the two VRs in the line + stvxl v1,r4,r11 + +vr_st32b: + bf 2,vr_st32c ; skip if neither VR in this pair is in use + la r11,savevr2(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v2,0,r11 ; save the two VRs in the line + stvxl v3,r4,r11 + +vr_st32c: + bf 4,vr_st32d ; skip if neither VR in this pair is in use + la r11,savevr4(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v4,0,r11 ; save the two VRs in the line + stvxl v5,r4,r11 + +vr_st32d: + bf 6,vr_st32e ; skip if neither VR in this pair is in use + la r11,savevr6(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v6,0,r11 ; save the two VRs in the line + stvxl v7,r4,r11 + +vr_st32e: + bf 8,vr_st32f ; skip if neither VR in this pair is in use + la r11,savevr8(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v8,0,r11 ; save the two VRs in the line + stvxl v9,r4,r11 + +vr_st32f: + bf 10,vr_st32g ; skip if neither VR in this pair is in use + la r11,savevr10(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v10,0,r11 ; save the two VRs in the line + stvxl v11,r4,r11 + +vr_st32g: + bf 12,vr_st32h ; skip if neither VR in this pair is in use + la r11,savevr12(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v12,0,r11 ; save the two VRs in the line + stvxl v13,r4,r11 + +vr_st32h: + bf 14,vr_st32i ; skip if neither VR in this pair is in use + la r11,savevr14(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v14,0,r11 ; save the two VRs in the line + stvxl v15,r4,r11 + +vr_st32i: + bf 16,vr_st32j ; skip if neither VR in this pair is in use + la r11,savevr16(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v16,0,r11 ; save the two VRs in the line + stvxl v17,r4,r11 + +vr_st32j: + bf 18,vr_st32k ; skip if neither VR in this pair is in use + la r11,savevr18(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v18,0,r11 ; save the two VRs in the line + stvxl v19,r4,r11 + +vr_st32k: + bf 20,vr_st32l ; skip if neither VR in this pair is in use + la r11,savevr20(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v20,0,r11 ; save the two VRs in the line + stvxl v21,r4,r11 + +vr_st32l: + bf 22,vr_st32m ; skip if neither VR in this pair is in use + la r11,savevr22(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v22,0,r11 ; save the two VRs in the line + stvxl v23,r4,r11 + +vr_st32m: + bf 24,vr_st32n ; skip if neither VR in this pair is in use + la r11,savevr24(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v24,0,r11 ; save the two VRs in the line + stvxl v25,r4,r11 + +vr_st32n: + bf 26,vr_st32o ; skip if neither VR in this pair is in use + la r11,savevr26(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v26,0,r11 ; save the two VRs in the line + stvxl v27,r4,r11 + +vr_st32o: + bf 28,vr_st32p ; skip if neither VR in this pair is in use + la r11,savevr28(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v28,0,r11 ; save the two VRs in the line + stvxl v29,r4,r11 + +vr_st32p: + bflr 30 ; done if neither VR in this pair is in use + la r11,savevr30(r3) ; get address of this group of registers in save area + dcba 0,r11 ; establish the line wo reading it + stvxl v30,0,r11 ; save the two VRs in the line + stvxl v31,r4,r11 + blr + + +// ***************** +// * v r _ l o a d * +// ***************** +// +// Load live VRs from a savearea, according to bits set in a passed vector. This is the reverse +// of "vr_store". Like it, we avoid touching unnecessary cache blocks and minimize conditional +// branches by loading all VRs from a cache line, if we have to load any. If we don't load the VRs +// in a cache line, we bug them. Note that this behavior is slightly different from earlier kernels, +// which would bug all VRs that aren't live. +// +// When called: +// interrupts are off, vectors are enabled +// r3 = ptr to save area +// r10 = vector of live regs to load (ie, savevrsave & savevrvalid, may be 0) +// v31 = bugbug constant (0x7FFFDEAD7FFFDEAD7FFFDEAD7FFFDEAD) +// +// We destroy: +// r4 - r11, all CRs. + +vr_load: + mfsprg r9,2 ; get feature flags + li r6,1 ; assuming 32-byte, get (#VRs)-1 in a cacheline + mtcrf 0x02,r9 ; set cache line size bits in cr6 + lis r7,0xC000 ; assuming 32-byte, set bits 0-1 + bt-- pf32Byteb,vr_ld0 ; skip if 32-bit processor + li r6,7 ; 128-byte machines have 8 VRs in a cacheline + lis r7,0xFF00 ; so set bits 0-7 + +// Loop touching in cache blocks we will load from. +// r3 = savearea ptr +// r5 = we light bits for the VRs we will be loading +// r6 = 1 if 32-byte, 7 if 128-byte +// r7 = 0xC0000000 if 32-byte, 0xFF000000 if 128-byte +// r10 = live VR bits +// v31 = bugbug constant + +vr_ld0: + li r5,0 ; initialize set of VRs to load + la r11,savevr0(r3) ; get address of register file + b vr_ld2 ; enter loop in middle + + .align 5 +vr_ld1: ; loop over each cache line we will load + dcbt r4,r11 ; start prefetch of the line + andc r10,r10,r9 ; turn off the bits in this line + or r5,r5,r9 ; we will load all these +vr_ld2: ; initial entry pt + cntlzw r4,r10 ; get offset to next live VR + andc r4,r4,r6 ; cacheline align it + srw. r9,r7,r4 ; position bits for VRs in that cache line + slwi r4,r4,4 ; get byte offset within register file to that line + bne vr_ld1 ; loop if more bits in r10 + + bf-- pf128Byteb,vr_ld32 ; skip if not 128-byte lines + +// Handle a processor with 128-byte cache lines. Four groups of 8 VRs. +// r3 = savearea ptr +// r5 = 1st bit in each cacheline is 1 iff any reg in that line must be loaded +// r11 = addr(savevr0) +// v31 = bugbug constant + + mtcrf 0x80,r5 ; set up bits for conditional branches + li r4,16 ; load offsets for X-form stores + li r6,48 + mtcrf 0x20,r5 ; load CRs ona at a time, which is faster + li r7,64 + li r8,80 + mtcrf 0x08,r5 + li r9,96 + li r10,112 + mtcrf 0x02,r5 + li r5,32 + + bt 0,vr_ld128a ; skip if this line must be loaded + vor v0,v31,v31 ; no VR must be loaded, so bug them all + vor v1,v31,v31 + vor v2,v31,v31 + vor v3,v31,v31 + vor v4,v31,v31 + vor v5,v31,v31 + vor v6,v31,v31 + vor v7,v31,v31 + b vr_ld128b +vr_ld128a: ; must load from this line + lvxl v0,0,r11 + lvxl v1,r4,r11 + lvxl v2,r5,r11 + lvxl v3,r6,r11 + lvxl v4,r7,r11 + lvxl v5,r8,r11 + lvxl v6,r9,r11 + lvxl v7,r10,r11 + +vr_ld128b: ; here to handle next cache line + la r11,savevr8(r3) ; load offset to it + bt 8,vr_ld128c ; skip if this line must be loaded + vor v8,v31,v31 ; no VR must be loaded, so bug them all + vor v9,v31,v31 + vor v10,v31,v31 + vor v11,v31,v31 + vor v12,v31,v31 + vor v13,v31,v31 + vor v14,v31,v31 + vor v15,v31,v31 + b vr_ld128d +vr_ld128c: ; must load from this line + lvxl v8,0,r11 + lvxl v9,r4,r11 + lvxl v10,r5,r11 + lvxl v11,r6,r11 + lvxl v12,r7,r11 + lvxl v13,r8,r11 + lvxl v14,r9,r11 + lvxl v15,r10,r11 + +vr_ld128d: ; here to handle next cache line + la r11,savevr16(r3) ; load offset to it + bt 16,vr_ld128e ; skip if this line must be loaded + vor v16,v31,v31 ; no VR must be loaded, so bug them all + vor v17,v31,v31 + vor v18,v31,v31 + vor v19,v31,v31 + vor v20,v31,v31 + vor v21,v31,v31 + vor v22,v31,v31 + vor v23,v31,v31 + b vr_ld128f +vr_ld128e: ; must load from this line + lvxl v16,0,r11 + lvxl v17,r4,r11 + lvxl v18,r5,r11 + lvxl v19,r6,r11 + lvxl v20,r7,r11 + lvxl v21,r8,r11 + lvxl v22,r9,r11 + lvxl v23,r10,r11 + +vr_ld128f: ; here to handle next cache line + la r11,savevr24(r3) ; load offset to it + bt 24,vr_ld128g ; skip if this line must be loaded + vor v24,v31,v31 ; no VR must be loaded, so bug them all + vor v25,v31,v31 + vor v26,v31,v31 + vor v27,v31,v31 + vor v28,v31,v31 + vor v29,v31,v31 + vor v30,v31,v31 + blr +vr_ld128g: ; must load from this line + lvxl v24,0,r11 + lvxl v25,r4,r11 + lvxl v26,r5,r11 + lvxl v27,r6,r11 + lvxl v28,r7,r11 + lvxl v29,r8,r11 + lvxl v30,r9,r11 + lvxl v31,r10,r11 + blr + +// Handle a processor with 32-byte cache lines. Sixteen groups of two VRs. +// r5 = 1st bit in each cacheline is 1 iff any reg in that line must be loaded +// r11 = addr(savevr0) + +vr_ld32: + mtcrf 0xFF,r5 ; set up bits for conditional branches + li r4,16 ; load offset for X-form stores + + bt 0,vr_ld32load0 ; skip if we must load this line + vor v0,v31,v31 ; neither VR is live, so bug them both + vor v1,v31,v31 + b vr_ld32test2 +vr_ld32load0: ; must load VRs in this line + lvxl v0,0,r11 + lvxl v1,r4,r11 + +vr_ld32test2: ; here to handle next cache line + la r11,savevr2(r3) ; get offset to next cache line + bt 2,vr_ld32load2 ; skip if we must load this line + vor v2,v31,v31 ; neither VR is live, so bug them both + vor v3,v31,v31 + b vr_ld32test4 +vr_ld32load2: ; must load VRs in this line + lvxl v2,0,r11 + lvxl v3,r4,r11 + +vr_ld32test4: ; here to handle next cache line + la r11,savevr4(r3) ; get offset to next cache line + bt 4,vr_ld32load4 ; skip if we must load this line + vor v4,v31,v31 ; neither VR is live, so bug them both + vor v5,v31,v31 + b vr_ld32test6 +vr_ld32load4: ; must load VRs in this line + lvxl v4,0,r11 + lvxl v5,r4,r11 + +vr_ld32test6: ; here to handle next cache line + la r11,savevr6(r3) ; get offset to next cache line + bt 6,vr_ld32load6 ; skip if we must load this line + vor v6,v31,v31 ; neither VR is live, so bug them both + vor v7,v31,v31 + b vr_ld32test8 +vr_ld32load6: ; must load VRs in this line + lvxl v6,0,r11 + lvxl v7,r4,r11 + +vr_ld32test8: ; here to handle next cache line + la r11,savevr8(r3) ; get offset to next cache line + bt 8,vr_ld32load8 ; skip if we must load this line + vor v8,v31,v31 ; neither VR is live, so bug them both + vor v9,v31,v31 + b vr_ld32test10 +vr_ld32load8: ; must load VRs in this line + lvxl v8,0,r11 + lvxl v9,r4,r11 + +vr_ld32test10: ; here to handle next cache line + la r11,savevr10(r3) ; get offset to next cache line + bt 10,vr_ld32load10 ; skip if we must load this line + vor v10,v31,v31 ; neither VR is live, so bug them both + vor v11,v31,v31 + b vr_ld32test12 +vr_ld32load10: ; must load VRs in this line + lvxl v10,0,r11 + lvxl v11,r4,r11 + +vr_ld32test12: ; here to handle next cache line + la r11,savevr12(r3) ; get offset to next cache line + bt 12,vr_ld32load12 ; skip if we must load this line + vor v12,v31,v31 ; neither VR is live, so bug them both + vor v13,v31,v31 + b vr_ld32test14 +vr_ld32load12: ; must load VRs in this line + lvxl v12,0,r11 + lvxl v13,r4,r11 + +vr_ld32test14: ; here to handle next cache line + la r11,savevr14(r3) ; get offset to next cache line + bt 14,vr_ld32load14 ; skip if we must load this line + vor v14,v31,v31 ; neither VR is live, so bug them both + vor v15,v31,v31 + b vr_ld32test16 +vr_ld32load14: ; must load VRs in this line + lvxl v14,0,r11 + lvxl v15,r4,r11 + +vr_ld32test16: ; here to handle next cache line + la r11,savevr16(r3) ; get offset to next cache line + bt 16,vr_ld32load16 ; skip if we must load this line + vor v16,v31,v31 ; neither VR is live, so bug them both + vor v17,v31,v31 + b vr_ld32test18 +vr_ld32load16: ; must load VRs in this line + lvxl v16,0,r11 + lvxl v17,r4,r11 + +vr_ld32test18: ; here to handle next cache line + la r11,savevr18(r3) ; get offset to next cache line + bt 18,vr_ld32load18 ; skip if we must load this line + vor v18,v31,v31 ; neither VR is live, so bug them both + vor v19,v31,v31 + b vr_ld32test20 +vr_ld32load18: ; must load VRs in this line + lvxl v18,0,r11 + lvxl v19,r4,r11 + +vr_ld32test20: ; here to handle next cache line + la r11,savevr20(r3) ; get offset to next cache line + bt 20,vr_ld32load20 ; skip if we must load this line + vor v20,v31,v31 ; neither VR is live, so bug them both + vor v21,v31,v31 + b vr_ld32test22 +vr_ld32load20: ; must load VRs in this line + lvxl v20,0,r11 + lvxl v21,r4,r11 + +vr_ld32test22: ; here to handle next cache line + la r11,savevr22(r3) ; get offset to next cache line + bt 22,vr_ld32load22 ; skip if we must load this line + vor v22,v31,v31 ; neither VR is live, so bug them both + vor v23,v31,v31 + b vr_ld32test24 +vr_ld32load22: ; must load VRs in this line + lvxl v22,0,r11 + lvxl v23,r4,r11 + +vr_ld32test24: ; here to handle next cache line + la r11,savevr24(r3) ; get offset to next cache line + bt 24,vr_ld32load24 ; skip if we must load this line + vor v24,v31,v31 ; neither VR is live, so bug them both + vor v25,v31,v31 + b vr_ld32test26 +vr_ld32load24: ; must load VRs in this line + lvxl v24,0,r11 + lvxl v25,r4,r11 + +vr_ld32test26: ; here to handle next cache line + la r11,savevr26(r3) ; get offset to next cache line + bt 26,vr_ld32load26 ; skip if we must load this line + vor v26,v31,v31 ; neither VR is live, so bug them both + vor v27,v31,v31 + b vr_ld32test28 +vr_ld32load26: ; must load VRs in this line + lvxl v26,0,r11 + lvxl v27,r4,r11 + +vr_ld32test28: ; here to handle next cache line + la r11,savevr28(r3) ; get offset to next cache line + bt 28,vr_ld32load28 ; skip if we must load this line + vor v28,v31,v31 ; neither VR is live, so bug them both + vor v29,v31,v31 + b vr_ld32test30 +vr_ld32load28: ; must load VRs in this line + lvxl v28,0,r11 + lvxl v29,r4,r11 + +vr_ld32test30: ; here to handle next cache line + la r11,savevr30(r3) ; get offset to next cache line + bt 30,vr_ld32load30 ; skip if we must load this line + vor v30,v31,v31 ; neither VR is live, so bug them both + blr +vr_ld32load30: ; must load VRs in this line + lvxl v30,0,r11 + lvxl v31,r4,r11 + blr diff --git a/osfmk/ppc/db_asm.s b/osfmk/ppc/db_asm.s index d439fbb7c..7eeeb4e4b 100644 --- a/osfmk/ppc/db_asm.s +++ b/osfmk/ppc/db_asm.s @@ -32,60 +32,6 @@ #include #include -/* void - * db_phys_copy(src, dst, bytecount) - * vm_offset_t src; - * vm_offset_t dst; - * int bytecount - * - * This routine will copy bytecount bytes from physical address src to physical - * address dst. - */ -ENTRY(db_phys_copy, TAG_NO_FRAME_USED) - - /* Switch off data translations */ - mfmsr r6 - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 - mtmsr r7 - isync /* Ensure data translations are off */ - - subi r3, r3, 4 - subi r4, r4, 4 - - cmpwi r5, 3 - ble- .L_db_phys_copy_bytes -.L_db_phys_copy_loop: - lwz r0, 4(r3) - addi r3, r3, 4 - subi r5, r5, 4 - stw r0, 4(r4) - addi r4, r4, 4 - cmpwi r5, 3 - bgt+ .L_db_phys_copy_loop - - /* If no leftover bytes, we're done now */ - cmpwi r5, 0 - beq+ .L_db_phys_copy_done - -.L_db_phys_copy_bytes: - addi r3, r3, 3 - addi r4, r4, 3 -.L_db_phys_copy_byte_loop: - lbz r0, 1(r3) - addi r3, r3, 1 - subi r5, r5, 1 - stb r0, 1(r4) - addi r4, r4, 1 - cmpwi r5, 0 - bne+ .L_db_phys_copy_loop - -.L_db_phys_copy_done: - mtmsr r6 /* Restore original translations */ - isync /* Ensure data translations are off */ - - blr /* void * db_phys_cmp(src_a, src_b, bytecount) @@ -97,11 +43,15 @@ ENTRY(db_phys_copy, TAG_NO_FRAME_USED) * address src_b. */ +#warning THIS IS BROKEN FOR 64-BIT + /* Switch off data translations */ + lis r7,hi16(MASK(MSR_VEC)) + ori r7,r7,lo16(MASK(MSR_FP)) mfmsr r6 - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 + andc r6,r6,r7 ; Force FP and vec off + ori r7,r7,lo16(MASK(MSR_DR)) ; Set the DR bit + andc r7,r6,r7 ; Force DR off mtmsr r7 isync /* Ensure data translations are off */ diff --git a/osfmk/ppc/db_disasm.c b/osfmk/ppc/db_disasm.c index 83f96b0f6..810aeb110 100644 --- a/osfmk/ppc/db_disasm.c +++ b/osfmk/ppc/db_disasm.c @@ -69,6 +69,7 @@ db_disasm( db_task_printsym(db_disasm_symaddr, DB_STGY_ANY, task); db_printf(">"); } + db_printf("\n"); /* Make sure we have a new line for multiline displays */ dis_done(); return (loc+4); } diff --git a/osfmk/ppc/db_interface.c b/osfmk/ppc/db_interface.c index 3961ece78..ccce9c5b6 100644 --- a/osfmk/ppc/db_interface.c +++ b/osfmk/ppc/db_interface.c @@ -118,11 +118,6 @@ extern void kdbprinttrap( int code, int *pc, int sp); -extern int db_user_to_kernel_address( - task_t task, - vm_offset_t addr, - unsigned *kaddr, - int flag); extern void db_write_bytes_user_space( vm_offset_t addr, int size, @@ -253,68 +248,22 @@ kdbprinttrap( /* * */ -vm_offset_t db_vtophys( +addr64_t db_vtophys( pmap_t pmap, vm_offset_t va) { - register mapping *mp; - register vm_offset_t pa; + ppnum_t pp; + addr64_t pa; - pa = (vm_offset_t)LRA(pmap->space,(void *)va); - - if (pa != 0) - return(pa); - - mp = hw_lock_phys_vir(pmap->space, va); - if((unsigned int)mp&1) { - return 0; - } + pp = pmap_find_phys(pmap, (addr64_t)va); - if(!mp) { /* If it was not a normal page */ - pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ - return pa; /* Return physical address */ - } - - mp = hw_cpv(mp); /* Convert to virtual address */ - - if(!mp->physent) { - pa = (vm_offset_t)((mp->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); - } else { - pa = (vm_offset_t)((mp->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); - hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); - } + if (pp == 0) return(0); /* Couldn't find it */ + + pa = ((addr64_t)pp << 12) | (addr64_t)(va & 0xFFF); /* Get physical address */ return(pa); } -int -db_user_to_kernel_address( - task_t task, - vm_offset_t addr, - unsigned *kaddr, - int flag) -{ - unsigned int sr_val, raddr; - - raddr = (unsigned int)db_vtophys(task->map->pmap, trunc_page(addr)); /* Get the real address */ - - if (!raddr) { - if (flag) { - db_printf("\nno memory is assigned to address %08x\n", addr); - db_error(0); - /* NOTREACHED */ - } - return -1; - } - sr_val = SEG_REG_PROT | task->map->pmap->space - | ((addr >> 8) & 0x00F00000); - - mtsr(SR_COPYIN_NUM, sr_val); - sync(); - *kaddr = (addr & 0x0fffffff) | (SR_COPYIN_NUM << 28); - return(0); -} - /* * Read bytes from task address space for debugger. */ @@ -326,8 +275,8 @@ db_read_bytes( task_t task) { int n,max; - unsigned phys_dst; - unsigned phys_src; + addr64_t phys_dst; + addr64_t phys_src; pmap_t pmap; while (size > 0) { @@ -336,16 +285,15 @@ db_read_bytes( else pmap = kernel_pmap; - phys_src = (unsigned int)db_vtophys(pmap, trunc_page(addr)); + phys_src = db_vtophys(pmap, (vm_offset_t)addr); if (phys_src == 0) { db_printf("\nno memory is assigned to src address %08x\n", addr); db_error(0); /* NOTREACHED */ } - phys_src = phys_src| (addr & page_mask); - phys_dst = (unsigned int)db_vtophys(kernel_pmap, trunc_page(data)); + phys_dst = db_vtophys(kernel_pmap, (vm_offset_t)data); if (phys_dst == 0) { db_printf("\nno memory is assigned to dst address %08x\n", data); @@ -353,22 +301,20 @@ db_read_bytes( /* NOTREACHED */ } - phys_dst = phys_dst | (((vm_offset_t) data) & page_mask); - /* don't over-run any page boundaries - check src range */ - max = ppc_round_page(phys_src) - phys_src; + max = round_page_64(phys_src + 1) - phys_src; if (max > size) max = size; /* Check destination won't run over boundary either */ - n = ppc_round_page(phys_dst) - phys_dst; - if (n < max) - max = n; + n = round_page_64(phys_dst + 1) - phys_dst; + + if (n < max) max = n; size -= max; addr += max; phys_copy(phys_src, phys_dst, max); /* resync I+D caches */ - sync_cache(phys_dst, max); + sync_cache64(phys_dst, max); phys_src += max; phys_dst += max; @@ -386,13 +332,13 @@ db_write_bytes( task_t task) { int n,max; - unsigned phys_dst; - unsigned phys_src; + addr64_t phys_dst; + addr64_t phys_src; pmap_t pmap; while (size > 0) { - phys_src = (unsigned int)db_vtophys(kernel_pmap, trunc_page(data)); + phys_src = db_vtophys(kernel_pmap, (vm_offset_t)data); if (phys_src == 0) { db_printf("\nno memory is assigned to src address %08x\n", data); @@ -400,27 +346,24 @@ db_write_bytes( /* NOTREACHED */ } - phys_src = phys_src | (((vm_offset_t) data) & page_mask); - /* space stays as kernel space unless in another task */ if (task == NULL) pmap = kernel_pmap; else pmap = task->map->pmap; - phys_dst = (unsigned int)db_vtophys(pmap, trunc_page(addr)); + phys_dst = db_vtophys(pmap, (vm_offset_t)addr); if (phys_dst == 0) { db_printf("\nno memory is assigned to dst address %08x\n", addr); db_error(0); /* NOTREACHED */ } - phys_dst = phys_dst| (addr & page_mask); /* don't over-run any page boundaries - check src range */ - max = ppc_round_page(phys_src) - phys_src; + max = round_page_64(phys_src + 1) - phys_src; if (max > size) max = size; /* Check destination won't run over boundary either */ - n = ppc_round_page(phys_dst) - phys_dst; + n = round_page_64(phys_dst + 1) - phys_dst; if (n < max) max = n; size -= max; @@ -428,7 +371,7 @@ db_write_bytes( phys_copy(phys_src, phys_dst, max); /* resync I+D caches */ - sync_cache(phys_dst, max); + sync_cache64(phys_dst, max); phys_src += max; phys_dst += max; @@ -445,18 +388,16 @@ db_check_access( unsigned int kern_addr; if (task == kernel_task || task == TASK_NULL) { - if (kernel_task == TASK_NULL) - return(TRUE); + if (kernel_task == TASK_NULL) return(TRUE); task = kernel_task; } else if (task == TASK_NULL) { - if (current_act() == THR_ACT_NULL) - return(FALSE); + if (current_act() == THR_ACT_NULL) return(FALSE); task = current_act()->task; } + while (size > 0) { - if (db_user_to_kernel_address(task, addr, &kern_addr, 0) < 0) - return(FALSE); - n = ppc_trunc_page(addr+PPC_PGBYTES) - addr; + if(!pmap_find_phys(task->map->pmap, (addr64_t)addr)) return (FALSE); /* Fail if page not mapped */ + n = trunc_page_32(addr+PPC_PGBYTES) - addr; if (n > size) n = size; size -= n; @@ -472,7 +413,7 @@ db_phys_eq( task_t task2, vm_offset_t addr2) { - vm_offset_t physa, physb; + addr64_t physa, physb; if ((addr1 & (PPC_PGBYTES-1)) != (addr2 & (PPC_PGBYTES-1))) /* Is byte displacement the same? */ return FALSE; @@ -483,8 +424,8 @@ db_phys_eq( task1 = current_act()->task; /* If so, use that one */ } - if(!(physa = db_vtophys(task1->map->pmap, trunc_page(addr1)))) return FALSE; /* Get real address of the first */ - if(!(physb = db_vtophys(task2->map->pmap, trunc_page(addr2)))) return FALSE; /* Get real address of the second */ + if(!(physa = db_vtophys(task1->map->pmap, (vm_offset_t)trunc_page_32(addr1)))) return FALSE; /* Get real address of the first */ + if(!(physb = db_vtophys(task2->map->pmap, (vm_offset_t)trunc_page_32(addr2)))) return FALSE; /* Get real address of the second */ return (physa == physb); /* Check if they are equal, then return... */ } @@ -492,6 +433,16 @@ db_phys_eq( #define DB_USER_STACK_ADDR (0xc0000000) #define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(PPC_PGBYTES*3)) +boolean_t db_phys_cmp( + vm_offset_t a1, + vm_offset_t a2, + vm_size_t s1) { + + db_printf("db_phys_cmp: not implemented\n"); + return 0; +} + + int db_search_null( task_t task, @@ -503,65 +454,37 @@ db_search_null( register unsigned vaddr; register unsigned *kaddr; - kaddr = (unsigned *)*skaddr; - for (vaddr = *svaddr; vaddr > evaddr; ) { - if (vaddr % PPC_PGBYTES == 0) { - vaddr -= sizeof(unsigned); - if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0) - return(-1); - kaddr = (unsigned *)*skaddr; - } else { - vaddr -= sizeof(unsigned); - kaddr--; - } - if ((*kaddr == 0) ^ (flag == 0)) { - *svaddr = vaddr; - *skaddr = (unsigned)kaddr; - return(0); - } - } + db_printf("db_search_null: not implemented\n"); + return(-1); } +unsigned char *getProcName(struct proc *proc); + void db_task_name( task_t task) { - register char *p; + register unsigned char *p; register int n; unsigned int vaddr, kaddr; + unsigned char tname[33]; + int i; - vaddr = DB_USER_STACK_ADDR; - kaddr = 0; - - /* - * skip nulls at the end - */ - if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 0) < 0) { - db_printf(DB_NULL_TASK_NAME); - return; - } - /* - * search start of args - */ - if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 1) < 0) { - db_printf(DB_NULL_TASK_NAME); - return; - } - - n = DB_TASK_NAME_LEN-1; - p = (char *)kaddr + sizeof(unsigned); - for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0; - vaddr++, p++, n--) { - if (vaddr % PPC_PGBYTES == 0) { - if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) <0) - return; - p = (char*)kaddr; - } - db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p); + p = 0; + tname[0] = 0; + + if(task->bsd_info) p = getProcName((struct proc *)(task->bsd_info)); /* Point to task name */ + + if(p) { + for(i = 0; i < 32; i++) { /* Move no more than 32 bytes */ + tname[i] = p[i]; + if(p[i] == 0) break; + } + tname[i] = 0; + db_printf("%s", tname); } - while (n-- >= 0) /* compare with >= 0 for one more space */ - db_printf(" "); + else db_printf("no name"); } void diff --git a/osfmk/ppc/db_low_trace.c b/osfmk/ppc/db_low_trace.c index 5af46e4d9..32f830407 100644 --- a/osfmk/ppc/db_low_trace.c +++ b/osfmk/ppc/db_low_trace.c @@ -61,14 +61,11 @@ #include #include #include -#include #include #include -void db_dumpphys(struct phys_entry *pp); /* Dump from physent */ -void db_dumppca(struct mapping *mp); /* PCA */ +void db_dumppca(unsigned int ptegindex); void db_dumpmapping(struct mapping *mp); /* Dump out a mapping */ -void db_dumppmap(pmap_t pmap); /* Dump out a pmap */ extern kmod_info_t *kmod; /* Find the kmods */ db_addr_t db_low_trace_prev = 0; @@ -87,11 +84,11 @@ void db_low_trace(db_expr_t addr, int have_addr, db_expr_t count, char * modif) int c, i; unsigned int tempx, cnt; - unsigned int xbuf[8]; - unsigned int xTraceCurr, xTraceStart, xTraceEnd, cxltr, xxltr; + unsigned int xTraceCurr, xTraceStart, xTraceEnd, cxltr; db_addr_t next_addr; LowTraceRecord xltr; unsigned char cmark; + addr64_t xxltr; cnt = 16; /* Default to 16 entries */ @@ -111,29 +108,36 @@ void db_low_trace(db_expr_t addr, int have_addr, db_expr_t count, char * modif) return; /* Leave... */ } - if((unsigned int)addr&0x0000003F) { /* Proper alignment? */ - db_printf("address not aligned on trace entry boundary (0x40)\n"); /* Tell 'em */ + if((unsigned int)addr&0x0000007F) { /* Proper alignment? */ + db_printf("address not aligned on trace entry boundary (0x80)\n"); /* Tell 'em */ return; /* Leave... */ } - xxltr=(unsigned int)addr; /* Set the start */ - cxltr=((xTraceCurr==xTraceStart ? xTraceEnd : xTraceCurr)-sizeof(LowTraceRecord)); /* Get address of newest entry */ + xxltr = addr; /* Set the start */ + cxltr = ((xTraceCurr == xTraceStart ? xTraceEnd : xTraceCurr) - sizeof(LowTraceRecord)); /* Get address of newest entry */ db_low_trace_prev = addr; /* Starting point */ for(i=0; i < cnt; i++) { /* Dump the 16 (or all) entries */ - ReadReal(xxltr, (unsigned int *)&xltr); /* Get the first half */ - ReadReal(xxltr+32, &(((unsigned int *)&xltr)[8])); /* Get the second half */ + ReadReal((addr64_t)xxltr, (unsigned int *)&xltr); /* Get the first half */ + ReadReal((addr64_t)xxltr + 32, &(((unsigned int *)&xltr)[8])); /* Get the second half */ + ReadReal((addr64_t)xxltr + 64, &(((unsigned int *)&xltr)[16])); /* Get the second half */ + ReadReal((addr64_t)xxltr + 96, &(((unsigned int *)&xltr)[24])); /* Get the second half */ - db_printf("\n%s%08X %1X %08X %08X - %04X\n", (xxltr!=cxltr ? " " : "*"), + db_printf("\n%s%08llX %1X %08X %08X - %04X\n", (xxltr != cxltr ? " " : "*"), xxltr, xltr.LTR_cpu, xltr.LTR_timeHi, xltr.LTR_timeLo, - (xltr.LTR_excpt&0x8000 ? 0xFFFF : xltr.LTR_excpt*64)); /* Print the first line */ - db_printf(" %08X %08X %08X %08X %08X %08X %08X\n", - xltr.LTR_cr, xltr.LTR_srr0, xltr.LTR_srr1, xltr.LTR_dar, xltr.LTR_save, xltr.LTR_lr, xltr.LTR_ctr); - db_printf(" %08X %08X %08X %08X %08X %08X\n", - xltr.LTR_r0, xltr.LTR_r1, xltr.LTR_r2, xltr.LTR_r3, xltr.LTR_r4, xltr.LTR_r5); + (xltr.LTR_excpt & 0x8000 ? 0xFFFF : xltr.LTR_excpt * 64)); /* Print the first line */ + + db_printf(" DAR/DSR/CR: %016llX %08X %08X\n", xltr.LTR_dar, xltr.LTR_dsisr, xltr.LTR_cr); + + db_printf(" SRR0/SRR1 %016llX %016llX\n", xltr.LTR_srr0, xltr.LTR_srr1); + db_printf(" LR/CTR %016llX %016llX\n", xltr.LTR_lr, xltr.LTR_ctr); + + db_printf(" R0/R1/R2 %016llX %016llX %016llX\n", xltr.LTR_r0, xltr.LTR_r1, xltr.LTR_r2); + db_printf(" R3/R4/R5 %016llX %016llX %016llX\n", xltr.LTR_r3, xltr.LTR_r4, xltr.LTR_r5); + db_printf(" R6/sv/rsv %016llX %016llX %08X\n", xltr.LTR_r6, xltr.LTR_save, xltr.LTR_rsvd0); if((cnt != 16) && (xxltr == xTraceCurr)) break; /* If whole table dump, exit when we hit start again... */ @@ -158,14 +162,69 @@ void db_display_long(db_expr_t addr, int have_addr, db_expr_t count, char * modi int i; for(i=0; i<8; i++) { /* Print 256 bytes */ - db_printf("%08X %08X %08X %08X %08X %08X %08X %08X %08X\n", addr, /* Print a line */ + db_printf("%016llX %08X %08X %08X %08X %08X %08X %08X %08X\n", addr, /* Print a line */ ((unsigned long *)addr)[0], ((unsigned long *)addr)[1], ((unsigned long *)addr)[2], ((unsigned long *)addr)[3], ((unsigned long *)addr)[4], ((unsigned long *)addr)[5], ((unsigned long *)addr)[6], ((unsigned long *)addr)[7]); - addr=(db_expr_t)((unsigned int)addr+0x00000020); /* Point to next address */ + addr=(db_expr_t)(addr+0x00000020); /* Point to next address */ } db_next = addr; +} + +unsigned char xtran[256] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* 0x */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* 1x */ + ' ', '!', '"', '#', '$', '%', '&',0x27, '(', ')', '*', '+', ',', '-', '.', '/', /* 2x */ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', /* 3x */ + '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', /* 4x */ + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[',0x5C, ']', '^', '_', /* 5x */ + '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', /* 6x */ + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '.', /* 7x */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* 8x */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* 9x */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* Ax */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* Bx */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* Cx */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* Dx */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* Ex */ + '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', /* Fx */ +}; + +/* + * Print out 256 bytes in characters + * + * + * dc [entaddr] + */ +void db_display_char(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + int i, j, k; + unsigned char xlt[256], *xaddr; + + xaddr = (unsigned char *)addr; + + + for(i = 0; i < 8; i++) { /* Print 256 bytes */ + j = 0; + for(k = 0; k < 32; k++) { + xlt[j] = xtran[*xaddr]; + xaddr++; + j++; + if((k & 3) == 3) { + xlt[j] = ' '; + j++; + } + } + xlt[j] = 0; + + db_printf("%016llX %s\n", (addr64_t)(xaddr - 32), xlt); /* Print a line */ + } + + db_next = (db_expr_t)xaddr; + + } /* @@ -181,15 +240,13 @@ void db_display_real(db_expr_t addr, int have_addr, db_expr_t count, char * modi unsigned int xbuf[8]; for(i=0; i<8; i++) { /* Print 256 bytes */ - ReadReal((unsigned int)addr, &xbuf[0]); /* Get the real storage data */ - db_printf("%08X %08X %08X %08X %08X %08X %08X %08X %08X\n", addr, /* Print a line */ + ReadReal(addr, &xbuf[0]); /* Get the real storage data */ + db_printf("%016llX %08X %08X %08X %08X %08X %08X %08X %08X\n", addr, /* Print a line */ xbuf[0], xbuf[1], xbuf[2], xbuf[3], xbuf[4], xbuf[5], xbuf[6], xbuf[7]); - addr=(db_expr_t)((unsigned int)addr+0x00000020); /* Point to next address */ + addr = addr + 0x00000020; /* Point to next address */ } db_next = addr; - - } unsigned int dvspace = 0; @@ -202,30 +259,92 @@ unsigned int dvspace = 0; */ void db_display_mappings(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { - int i; - unsigned int xspace; + db_expr_t xspace; + pmap_t pmap; + addr64_t lnextva; - mapping *mp, *mpv; - vm_offset_t pa; + mapping *mp; + + if (db_expression(&xspace)) { /* Get the address space requested */ + if(xspace >= maxAdrSp) { + db_printf("requested address space (%llX) larger than max (%X)\n", xspace, maxAdrSp - 1); + return; + } + dvspace = xspace; /* Get the space or set default */ + } - if (db_expression(&xspace)) dvspace = xspace; /* Get the space or set default */ + db_printf("mapping information for %016llX in space %8X:\n", addr, dvspace); + + pmap = pmapTrans[dvspace].pmapVAddr; /* Find the pmap address */ + if(!pmap) { /* The pmap is not in use */ + db_printf("The space %X is not assigned to a pmap\n", dvspace); /* Say we are wrong */ + return; + } + + mp = hw_find_map(pmap, (addr64_t)addr, &lnextva); /* Try to find the mapping for this address */ + if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */ + db_printf("Timeout locking physical entry for virtual address %016ll8X\n", addr); + return; + } - db_printf("mapping information for %08X in space %08X:\n", addr, dvspace); - mp = hw_lock_phys_vir(dvspace, addr); /* Lock the physical entry for this mapping */ if(!mp) { /* Did we find one? */ db_printf("Not mapped\n"); return; /* Didn't find any, return FALSE... */ } - if((unsigned int)mp&1) { /* Did we timeout? */ - db_printf("Timeout locking physical entry for virtual address (%08X)\n", addr); /* Yeah, scream about it! */ - return; /* Bad hair day, return FALSE... */ + + mapping_drop_busy(mp); /* The mapping shouldn't be changing */ + + db_dumpmapping(mp); /* Dump it all out */ + + return; /* Tell them we did it */ + + +} + +/* + * Print out hash table data + * + * + * dh vaddr [space] (defaults to last entered) + */ +void db_display_hash(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + db_expr_t xspace; + unsigned int seg, vsid, ptegindex, htsize; + pmap_t pmap; + addr64_t lnextva, llva, vpn, esid; + uint64_t hash; + int s4bit; + + llva = (addr64_t)((unsigned int)addr); /* Make sure we are 64-bit now */ + + s4bit = !((per_proc_info[0].pf.Available & pf64Bit) == 0); /* Are we a big guy? */ + if (db_expression(&xspace)) { /* Get the address space requested */ + if(xspace >= maxAdrSp) { + db_printf("requested address space (%llX) larger than max (%X)\n", xspace, maxAdrSp - 1); + return; + } + dvspace = xspace; /* Get the space or set default */ } - printf("dumpaddr: space=%08X; vaddr=%08X\n", dvspace, addr); /* Say what address were dumping */ - mpv = hw_cpv(mp); /* Get virtual address of mapping */ - dumpmapping(mpv); - if(mpv->physent) { - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */ + + pmap = pmapTrans[dvspace].pmapVAddr; /* Find the pmap address */ + if(!pmap) { /* The pmap is not in use */ + db_printf("The space %X is not assigned to a pmap\n", dvspace); /* Say we are wrong */ + return; } + + hash = (uint64_t)pmap->space | ((uint64_t)pmap->space << maxAdrSpb) | ((uint64_t)pmap->space << (2 * maxAdrSpb)); /* Get hash value */ + hash = hash & 0x0000001FFFFFFFFF; /* Make sure we stay within supported ranges */ + + esid = ((llva >> 14) & -maxAdrSp) ^ hash; /* Get ESID */ + llva = ((llva >> 12) & 0xFFFF) ^ esid; /* Get index into hash table */ + + if(s4bit) htsize = hash_table_size >> 7; /* Get number of entries in hash table for 64-bit */ + else htsize = hash_table_size >> 6; /* get number of entries in hash table for 32-bit */ + + ptegindex = llva & (htsize - 1); /* Get the index to the pteg and pca */ + db_dumppca(ptegindex); /* dump the info */ + return; /* Tell them we did it */ @@ -239,133 +358,193 @@ void db_display_mappings(db_expr_t addr, int have_addr, db_expr_t count, char * void db_display_pmap(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { pmap_t pmap; + int i; + unsigned int v0, v1, st0, st1; - pmap = kernel_pmap; /* Start at the beginning */ + pmap = (pmap_t)addr; + if(!have_addr) pmap = kernel_pmap; /* Start at the beginning */ - db_printf("PMAP (real) Next Prev VRMask Space Bmaps Flags Ref spaceNum Resident Wired\n"); -// xxxxxxxx rrrrrrrr xxxxxxxx pppppppp vvvvvvvv ssssssss bbbbbbbb cccccccc vvvvvvvv nnnnnnnn rrrrrrrr wwwwwwwww + db_printf("PMAP (real) Next Prev Space Flags Ref spaceNum Resident Wired\n"); +// xxxxxxxx rrrrrrrrrrrrrrrr xxxxxxxx pppppppp ssssssss cccccccc vvvvvvvv nnnnnnnn rrrrrrrr wwwwwwwww while(1) { /* Do them all */ - db_printf("%08X %08X %08X %08X %08X %08X %08X %08X %08X %08X %08X %08X\n", - pmap, (unsigned int)pmap ^ pmap->pmapvr, - pmap->pmap_link.next, pmap->pmap_link.prev, pmap->pmapvr, - pmap->space, pmap->bmaps, pmap->vflags, pmap->ref_count, pmap->spaceNum, + db_printf("%08X %016llX %08X %08X %08X %08X %08X %08X %08X %08X\n", + pmap, (addr64_t)pmap ^ pmap->pmapvr, + pmap->pmap_link.next, pmap->pmap_link.prev, + pmap->space, pmap->pmapFlags, pmap->ref_count, pmap->spaceNum, pmap->stats.resident_count, pmap->stats.wired_count); + db_printf("lists = %d, rand = %08X, visits = %016llX, searches = %08X\n", + pmap->pmapCurLists, pmap->pmapRandNum, + pmap->pmapSearchVisits, pmap->pmapSearchCnt); -// xxxxxxxx rrrrrrrr xxxxxxxx pppppppp vvvvvvvv ssssssss bbbbbbbb cccccccc vvvvvvvv nnnnnnnn rrrrrrrr wwwwwwwww - db_printf(" SRs: %08X %08X %08X %08X %08X %08X %08X %08X\n", pmap->pmapSegs[0], pmap->pmapSegs[1], pmap->pmapSegs[2], pmap->pmapSegs[3], - pmap->pmapSegs[4], pmap->pmapSegs[5], pmap->pmapSegs[6], pmap->pmapSegs[7]); - db_printf(" %08X %08X %08X %08X %08X %08X %08X %08X\n", pmap->pmapSegs[8], pmap->pmapSegs[9], pmap->pmapSegs[10], pmap->pmapSegs[11], - pmap->pmapSegs[12], pmap->pmapSegs[13], pmap->pmapSegs[14], pmap->pmapSegs[15]); - - db_printf(" spmaps: %08X %08X %08X %08X %08X %08X %08X %08X\n", pmap->pmapPmaps[0], pmap->pmapPmaps[1], pmap->pmapPmaps[2], pmap->pmapPmaps[3], - pmap->pmapPmaps[4], pmap->pmapPmaps[5], pmap->pmapPmaps[6], pmap->pmapPmaps[7]); - db_printf(" %08X %08X %08X %08X %08X %08X %08X %08X\n", pmap->pmapPmaps[8], pmap->pmapPmaps[9], pmap->pmapPmaps[10], pmap->pmapPmaps[11], - pmap->pmapPmaps[12], pmap->pmapPmaps[13], pmap->pmapPmaps[14], pmap->pmapPmaps[15]); + db_printf("cctl = %08X, SCSubTag = %016llX\n", + pmap->pmapCCtl, pmap->pmapSCSubTag); + + for(i = 0; i < 16; i +=2) { + v0 = (pmap->pmapCCtl >> (31 - i) & 1); /* Get high order bit */ + v1 = (pmap->pmapCCtl >> (30 - i) & 1); /* Get high order bit */ + st0 = (pmap->pmapSCSubTag >> (60 - (4 * i))) & 0xF; /* Get the sub-tag */ + st1 = (pmap->pmapSCSubTag >> (56 - (4 * i))) & 0xF; /* Get the sub-tag */ + + db_printf(" %01X %01X %016llX/%016llX %01X %01X %016llX/%016llX\n", + v0, st0, pmap->pmapSegCache[i].sgcESID, pmap->pmapSegCache[i].sgcVSID, + v1, st1, pmap->pmapSegCache[i+1].sgcESID, pmap->pmapSegCache[i+1].sgcVSID); + } - pmap = (pmap_t)pmap->pmap_link.next; /* Skip to the next */ db_printf("\n"); + if(have_addr) break; /* Do only one if address supplied */ + pmap = (pmap_t)pmap->pmap_link.next; /* Skip to the next */ if(pmap == kernel_pmap) break; /* We've wrapped, we're done */ } return; } -/* - * print information about the passed in pmap block - */ -void db_dumppmap(pmap_t pmap) { - - db_printf("Dump of pmap block: %08X\n", pmap); - db_printf(" pmap_link: %08X %08X\n", pmap->pmap_link.next, pmap->pmap_link.prev); - db_printf(" pmapvr: %08X\n", pmap->pmapvr); - db_printf(" space: %08X\n", pmap->space); - db_printf(" bmaps: %08X\n", pmap->bmaps); - db_printf(" ref_count: %08X\n", pmap->ref_count); - db_printf(" spaceNum: %08X\n", pmap->spaceNum); - db_printf(" resident_count: %08X\n", pmap->stats.resident_count); - db_printf(" wired_count: %08X\n", pmap->stats.wired_count); - db_printf("\n"); +/* + * Checks the pmap skip lists + * + * + * cp pmap + */ +void db_check_pmaps(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + int i; + unsigned int ret; + uint64_t dumpa[32]; + pmap_t pmap; + + pmap = (pmap_t)addr; + if(!have_addr) pmap = kernel_pmap; /* If no map supplied, start with kernel */ + + while(1) { /* Do them all */ + ret = mapSkipListVerifyC(pmap, &dumpa); /* Check out the map */ + if(!ret) db_printf("Skiplists verified ok, pmap = %08X\n", pmap); + else { + db_printf("Verification failure at %08X, pmap = %08X\n", ret, pmap); + for(i = 0; i < 32; i += 4) { + db_printf("R%02d %016llX %016llX %016llX %016llX\n", i, + dumpa[i], dumpa[i + 1], dumpa[i + 2], dumpa[i + 3]); + } + } + if(have_addr) break; /* Do only one if address supplied */ + pmap = (pmap_t)pmap->pmap_link.next; /* Skip to the next */ + if(pmap == kernel_pmap) break; /* We've wrapped, we're done */ + } + return; } + /* - * Prints out a mapping control block + * Displays iokit junk * + * dp */ - -void db_dumpmapping(struct mapping *mp) { /* Dump out a mapping */ - db_printf("Dump of mapping block: %08X\n", mp); /* Header */ - db_printf(" next: %08X\n", mp->next); - db_printf(" hashnext: %08X\n", mp->hashnext); - db_printf(" PTEhash: %08X\n", mp->PTEhash); - db_printf(" PTEent: %08X\n", mp->PTEent); - db_printf(" physent: %08X\n", mp->physent); - db_printf(" PTEv: %08X\n", mp->PTEv); - db_printf(" PTEr: %08X\n", mp->PTEr); - db_printf(" pmap: %08X\n", mp->pmap); - - if(mp->physent) { /* Print physent if it exists */ - db_printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1); - } - else { - db_printf("Associated physical entry: none\n"); - } - - db_dumppca(mp); /* Dump out the PCA information */ - +void db_piokjunk(void); + +void db_display_iokit(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + db_piokjunk(); + return; } /* - * Prints out a PTEG control area + * Prints out a mapping control block * */ -void db_dumppca(struct mapping *mp) { /* PCA */ - - PCA *pca; - unsigned int *pteg, sdr; - - pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */ - __asm__ volatile("mfsdr1 %0" : "=r" (sdr)); - db_printf(" SDR1: %08X\n", sdr); - pteg=(unsigned int *)((unsigned int)pca-(((sdr&0x0000FFFF)+1)<<16)); - db_printf(" Dump of PCA: %08X\n", pca); /* Header */ - db_printf(" PCAlock: %08X\n", pca->PCAlock); - db_printf(" PCAallo: %08X\n", pca->flgs.PCAallo); - db_printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]); - db_printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]); - db_printf("Dump of PTEG: %08X\n", pteg); /* Header */ - db_printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]); - db_printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]); - db_printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]); - db_printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]); +void db_dumpmapping(struct mapping *mp) { /* Dump out a mapping */ + + pmap_t pmap; + int i; + + db_printf("Dump of mapping block: %08X, pmap: %08X (%016llX)\n", mp, pmapTrans[mp->mpSpace].pmapVAddr, + pmapTrans[mp->mpSpace].pmapPAddr); /* Header */ + db_printf(" mpFlags: %08X\n", mp->mpFlags); + db_printf(" mpSpace: %04X\n", mp->mpSpace); + db_printf(" mpBSize: %04X\n", mp->mpBSize); + db_printf(" mpPte: %08X\n", mp->mpPte); + db_printf(" mpPAddr: %08X\n", mp->mpPAddr); + db_printf(" mpVAddr: %016llX\n", mp->mpVAddr); + db_printf(" mpAlias: %016llX\n", mp->mpAlias); + db_printf(" mpList00: %016llX\n", mp->mpList0); + + for(i = 1; i < (mp->mpFlags & mpLists); i++) { /* Dump out secondary physical skip lists */ + db_printf(" mpList%02d: %016llX\n", i, mp->mpList[i - 1]); + } + return; } /* - * Dumps starting with a physical entry + * Prints out a PTEG and PCA + * */ -void db_dumpphys(struct phys_entry *pp) { /* Dump from physent */ +void db_dumppca(unsigned int ptegindex) { - mapping *mp; - PCA *pca; - unsigned int *pteg; + addr64_t pteg, pca, llva; + unsigned int xpteg[32], xpca[8], space, hash, pva, seg, api, va; + int i, s4bit; + unsigned long long llslot, llseg, llhash; - db_printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1); - mp = hw_cpv(pp->phys_link); - while(mp) { - db_dumpmapping(mp); - db_dumppca(mp); - mp = hw_cpv(mp->next); - } + s4bit = !((per_proc_info[0].pf.Available & pf64Bit) == 0); /* Are we a big guy? */ + + pteg = hash_table_base + (ptegindex << 6); /* Point to the PTEG */ + if(s4bit) pteg = hash_table_base + (ptegindex << 7); /* Point to the PTEG */ + pca = hash_table_base - ((ptegindex + 1) * 4); /* Point to the PCA */ + db_printf("PTEG = %016llX, PCA = %016llX (index = %08X)\n", pteg, pca, ptegindex); + ReadReal(pteg, &xpteg[0]); /* Get first half of the pteg */ + ReadReal(pteg + 0x20, &xpteg[8]); /* Get second half of the pteg */ + ReadReal(pca, &xpca[0]); /* Get pca */ + + db_printf("PCA: free = %02X, steal = %02X, auto = %02X, misc = %02X\n", + ((xpca[0] >> 24) & 255), ((xpca[0] >> 16) & 255), ((xpca[0] >> 8) & 255), xpca[0] & 255); + + if(!s4bit) { /* Little guy? */ + + for(i = 0; i < 16; i += 2) { /* Step through pteg */ + db_printf("%08X %08X - ", xpteg[i], xpteg[i + 1]); /* Dump the pteg slot */ + + if(xpteg[i] & 0x80000000) db_printf(" valid - "); /* Is it valid? */ + else db_printf("invalid - "); /* Nope, invalid */ + + space = (xpteg[i] >> 7) & (maxAdrSp - 1); /* Extract the space */ + hash = space | (space << maxAdrSpb) | (space << (2 * maxAdrSpb)); /* Get the hash */ + pva = ptegindex ^ hash; /* Get part of the vaddr */ + seg = (xpteg[i] >> 7) ^ hash; /* Get the segment number */ + api = (xpteg[i] & 0x3F); /* Get the API */ + va = ((seg << (28 - maxAdrSpb)) & 0xF0000000) | (api << 22) | ((pva << 12) & 0x003FF000); /* Get the vaddr */ + db_printf("va = %08X\n", va); + } + } + else { + ReadReal(pteg + 0x40, &xpteg[16]); /* Get third half of the pteg */ + ReadReal(pteg + 0x60, &xpteg[24]); /* Get fourth half of the pteg */ + + for(i = 0; i < 32; i += 4) { /* Step through pteg */ + db_printf("%08X%08X %08X%08X - ", xpteg[i], xpteg[i + 1], xpteg[i + 2], xpteg[i + 3]); /* Dump the pteg slot */ + + if(xpteg[i + 1] & 1) db_printf(" valid - "); /* Is it valid? */ + else db_printf("invalid - "); /* Nope, invalid */ + + llslot = ((long long)xpteg[i] << 32) | (long long)xpteg[i + 1]; /* Make a long long version of this */ + space = (llslot >> 12) & (maxAdrSp - 1); /* Extract the space */ + llhash = (unsigned long long)space | ((unsigned long long)space << maxAdrSpb) | ((unsigned long long)space << (2 * maxAdrSpb)); /* Get the hash */ + llhash = llhash & 0x0000001FFFFFFFFF; /* Make sure we stay within supported ranges */ + pva = (unsigned long long)ptegindex ^ llhash; /* Get part of the vaddr */ + llseg = (llslot >> 12) ^ llhash; /* Get the segment number */ + api = (llslot >> 7) & 0x1F; /* Get the API */ + llva = ((llseg << (28 - maxAdrSpb)) & 0xFFFFFFFFF0000000ULL) | (api << 23) | ((pva << 12) & 0x007FF000); /* Get the vaddr */ + db_printf("va = %016llX\n", llva); + } + } + return; } @@ -379,16 +558,30 @@ void db_dumpphys(struct phys_entry *pp) { /* Dump from physent */ */ void db_display_virtual(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { - int i, size, lines, rlines; + int i, size, lines, rlines; unsigned int xbuf[8]; - unsigned int xspace; + db_expr_t xspace; + pmap_t pmap; mapping *mp, *mpv; - vm_offset_t pa; + addr64_t pa; + ppnum_t pnum; + + if (db_expression(&xspace)) { /* Parse the space ID */ + if(xspace >= (1 << maxAdrSpb)) { /* Check if they gave us a sane space number */ + db_printf("Invalid space ID: %llX - max is %X\n", xspace, (1 << maxAdrSpb) - 1); + return; + } + dvspace = xspace; /* Get the space or set default */ + } - if (db_expression(&xspace)) dvspace = xspace; /* Get the space or set default */ + pmap = (pmap_t)pmapTrans[dvspace].pmapVAddr; /* Find the pmap address */ + if((unsigned int)pmap == 0) { /* Is there actually a pmap here? */ + db_printf("Address space not found: %X\n", dvspace); /* Complain */ + return; + } - addr&=-32; + addr &= -32; size = 4096 - (addr & 0x00000FFF); /* Bytes left on page */ lines = size / 32; /* Number of lines in first or only part */ @@ -396,63 +589,47 @@ void db_display_virtual(db_expr_t addr, int have_addr, db_expr_t count, char * m rlines = 8 - lines; if(rlines < 0) lines = 0; - db_printf("Dumping %08X (space=%08X); ", addr, dvspace); - mp = hw_lock_phys_vir(dvspace, addr); /* Lock the physical entry for this mapping */ - if(!mp) { /* Did we find one? */ + db_printf("Dumping %016llX (pmap = %08X, space = %X); ", addr, pmap, dvspace); + + pnum = pmap_find_phys(pmap, (addr64_t)addr); /* Phynd the Physical */ + if(!pnum) { /* Did we find one? */ db_printf("Not mapped\n"); return; /* Didn't find any, return FALSE... */ } - if((unsigned int)mp&1) { /* Did we timeout? */ - db_printf("Timeout locking physical entry for virtual address (%08X)\n", addr); /* Yeah, scream about it! */ - return; /* Bad hair day, return FALSE... */ - } - mpv = hw_cpv(mp); /* Get virtual address of mapping */ - if(!mpv->physent) { /* Was there a physical entry? */ - pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)addr & (PAGE_SIZE-1))); /* Get physical address from physent */ - } - else { - pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)addr & (PAGE_SIZE-1))); /* Get physical address from physent */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - } - db_printf("phys=%08X\n", pa); + + pa = (addr64_t)(pnum << 12) | (addr64_t)(addr & 0xFFF); /* Get the physical address */ + db_printf("phys = %016llX\n", pa); + for(i=0; iphysent) { /* Was there a physical entry? */ - pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)addr & (PAGE_SIZE-1))); /* Get physical address from physent */ - } - else { - pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)addr & (PAGE_SIZE-1))); /* Get physical address from physent */ - hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - } - db_printf("phys=%08X\n", pa); + + pa = (addr64_t)(pnum << 12) | (addr64_t)((unsigned int)addr & 0xFFF); /* Get the physical address */ + db_printf("phys = %016llX\n", pa); + for(i=0; itasks.next; task != (task_t)&pset->tasks.next; task = (task_t)task->pset_tasks.next) { /* Go through the tasks */ taskact = 0; /* Reset activation count */ db_printf("\nTask %4d @%08X:\n", tottasks, task); /* Show where we're at */ - for(act = (thread_act_t)task->thr_acts.next; act != (thread_act_t)&task->thr_acts; act = (thread_act_t)act->thr_acts.next) { /* Go through activations */ + for(act = (thread_act_t)task->threads.next; act != (thread_act_t)&task->threads; act = (thread_act_t)act->task_threads.next) { /* Go through activations */ db_printf(" Act %4d @%08X - p: %08X current context: %08X\n", taskact, act, act->mact.pcb, act->mact.curctx); @@ -497,8 +674,8 @@ void db_display_save(db_expr_t addr, int have_addr, db_expr_t count, char * modi while(save) { /* Do them all */ totsaves++; /* Count savearea */ - db_printf(" Norm %08X: %08X %08X - tot = %d\n", save, save->save_srr0, save->save_srr1, totsaves); - save = save->save_hdr.save_prev; /* Next one */ + db_printf(" Norm %08X: %016llX %016llX - tot = %d\n", save, save->save_srr0, save->save_srr1, totsaves); + save = (savearea *)save->save_hdr.save_prev; /* Next one */ if(chainsize++ > chainmax) { /* See if we might be in a loop */ db_printf(" Chain terminated by count (%d) before %08X\n", chainmax, save); break; @@ -510,7 +687,7 @@ void db_display_save(db_expr_t addr, int have_addr, db_expr_t count, char * modi while(save) { /* Do them all */ totsaves++; /* Count savearea */ db_printf(" FPU %08X: %08X - tot = %d\n", save, save->save_hdr.save_level, totsaves); - save = save->save_hdr.save_prev; /* Next one */ + save = (savearea *)save->save_hdr.save_prev; /* Next one */ if(chainsize++ > chainmax) { /* See if we might be in a loop */ db_printf(" Chain terminated by count (%d) before %08X\n", chainmax, save); break; @@ -522,7 +699,7 @@ void db_display_save(db_expr_t addr, int have_addr, db_expr_t count, char * modi while(save) { /* Do them all */ totsaves++; /* Count savearea */ db_printf(" Vec %08X: %08X - tot = %d\n", save, save->save_hdr.save_level, totsaves); - save = save->save_hdr.save_prev; /* Next one */ + save = (savearea *)save->save_hdr.save_prev; /* Next one */ if(chainsize++ > chainmax) { /* See if we might be in a loop */ db_printf(" Chain terminated by count (%d) before %08X\n", chainmax, save); break; @@ -531,7 +708,7 @@ void db_display_save(db_expr_t addr, int have_addr, db_expr_t count, char * modi if(CTable = act->mact.vmmControl) { /* Are there virtual machines? */ - for(vmid = 0; vmid < kVmmMaxContextsPerThread; vmid++) { + for(vmid = 0; vmid < kVmmMaxContexts; vmid++) { if(!(CTable->vmmc[vmid].vmmFlags & vmmInUse)) continue; /* Skip if vm is not in use */ @@ -547,7 +724,7 @@ void db_display_save(db_expr_t addr, int have_addr, db_expr_t count, char * modi while(save) { /* Do them all */ totsaves++; /* Count savearea */ db_printf(" FPU %08X: %08X - tot = %d\n", save, save->save_hdr.save_level, totsaves); - save = save->save_hdr.save_prev; /* Next one */ + save = (savearea *)save->save_hdr.save_prev; /* Next one */ if(chainsize++ > chainmax) { /* See if we might be in a loop */ db_printf(" Chain terminated by count (%d) before %08X\n", chainmax, save); break; @@ -559,7 +736,7 @@ void db_display_save(db_expr_t addr, int have_addr, db_expr_t count, char * modi while(save) { /* Do them all */ totsaves++; /* Count savearea */ db_printf(" Vec %08X: %08X - tot = %d\n", save, save->save_hdr.save_level, totsaves); - save = save->save_hdr.save_prev; /* Next one */ + save = (savearea *)save->save_hdr.save_prev; /* Next one */ if(chainsize++ > chainmax) { /* See if we might be in a loop */ db_printf(" Chain terminated by count (%d) before %08X\n", chainmax, save); break; @@ -604,6 +781,8 @@ void db_display_xregs(db_expr_t addr, int have_addr, db_expr_t count, char * mod db_printf("THRM2: %08X\n", dbspecrs[45]); db_printf("THRM3: %08X\n", dbspecrs[46]); db_printf("ICTC: %08X\n", dbspecrs[47]); + db_printf("L2CR2: %08X\n", dbspecrs[48]); + db_printf("DABR: %08X\n", dbspecrs[49]); db_printf("\n"); db_printf("DBAT: %08X %08X %08X %08X\n", dbspecrs[2], dbspecrs[3], dbspecrs[4], dbspecrs[5]); @@ -644,6 +823,192 @@ void db_display_xregs(db_expr_t addr, int have_addr, db_expr_t count, char * mod } +/* + * Check check mappings and hash table for consistency + * + * cm + */ +void db_check_mappings(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + addr64_t pteg, pca, llva, lnextva; + unsigned int xpteg[32], xpca[8], space, hash, pva, seg, api, va, free, free2, xauto, PTEGcnt, wimgkk, wimgxx, slotoff; + int i, j, fnderr, slot, slot2, k, s4bit; + pmap_t pmap; + mapping *mp; + ppnum_t ppn, pa, aoff; + unsigned long long llslot, llseg, llhash; + + s4bit = 0; /* Assume dinky? */ + if(per_proc_info[0].pf.Available & pf64Bit) s4bit = 1; /* Are we a big guy? */ + + PTEGcnt = hash_table_size / 64; /* Get the number of PTEGS */ + if(s4bit) PTEGcnt = PTEGcnt / 2; /* PTEGs are twice as big */ + + pteg = hash_table_base; /* Start of hash table */ + pca = hash_table_base - 4; /* Start of PCA */ + + for(i = 0; i < PTEGcnt; i++) { /* Step through them all */ + + fnderr = 0; + + ReadReal(pteg, &xpteg[0]); /* Get first half of the pteg */ + ReadReal(pteg + 0x20, &xpteg[8]); /* Get second half of the pteg */ + if(s4bit) { /* See if we need the other half */ + ReadReal(pteg + 0x40, &xpteg[16]); /* Get third half of the pteg */ + ReadReal(pteg + 0x60, &xpteg[24]); /* Get fourth half of the pteg */ + } + ReadReal(pca, &xpca[0]); /* Get pca */ + + if(xpca[0] & 0x00000001) { /* Is PCA locked? */ + db_printf("Unexpected locked PCA\n"); /* Yeah, this may be bad */ + fnderr = 1; /* Remember to print the pca/pteg pair later */ + } + + free = 0x80000000; + + for(j = 0; j < 7; j++) { /* Search for duplicates */ + slot = j * 2; /* Point to the slot */ + if(s4bit) slot = slot * 2; /* Adjust for bigger slots */ + if(!(xpca[0] & free)) { /* Check more if slot is allocated */ + for(k = j + 1; k < 8; k++) { /* Search remaining slots */ + slot2 = k * 2; /* Point to the slot */ + if(s4bit) slot2 = slot2 * 2; /* Adjust for bigger slots */ + if((xpteg[slot] == xpteg[slot2]) + && (!s4bit || (xpteg[slot + 1] == xpteg[slot2 + 1]))) { /* Do we have duplicates? */ + db_printf("Duplicate tags in pteg, slot %d and slot %d\n", j, k); + fnderr = 1; + } + } + } + free = free >> 1; /* Move slot over */ + } + + free = 0x80000000; + xauto = 0x00008000; + + for(j = 0; j < 8; j++) { /* Step through the slots */ + + slot = j * 2; /* Point to the slot */ + if(s4bit) slot = slot * 2; /* Hagfish? */ + if(xpca[0] & free) { /* Check if marked free */ + if((!s4bit && (xpteg[slot] & 0x80000000)) /* Is a supposedly free slot valid? */ + || (s4bit && (xpteg[slot + 1] & 1))) { + db_printf("Free slot still valid - %d\n", j); + fnderr = 1; + } + } + else { /* We have an in use slot here */ + + if(!(!s4bit && (xpteg[slot] & 0x80000000)) /* Is a supposedly in use slot valid? */ + && !(s4bit && (xpteg[slot + 1] & 1))) { + db_printf("Inuse slot not valid - %d\n", j); + fnderr = 1; + } + else { /* Slot is valid, check mapping */ + if(!s4bit) { /* Not Hagfish? */ + space = (xpteg[slot] >> 7) & (maxAdrSp - 1); /* Extract the space */ + hash = space | (space << maxAdrSpb) | (space << (2 * maxAdrSpb)); /* Get the hash */ + pva = i ^ hash; /* Get part of the vaddr */ + seg = (xpteg[slot] >> 7) ^ hash; /* Get the segment number */ + api = (xpteg[slot] & 0x3F); /* Get the API */ + va = ((seg << (28 - maxAdrSpb)) & 0xF0000000) | (api << 22) | ((pva << 12) & 0x003FF000); /* Get the vaddr */ + llva = (addr64_t)va; /* Make this a long long */ + wimgxx = xpteg[slot + 1] & 0x7F; /* Get the wimg and pp */ + ppn = xpteg[slot + 1] >> 12; /* Get physical page number */ + slotoff = (i * 64) + (j * 8) | 1; /* Get offset to slot and valid bit */ + } + else { /* Yes, Hagfish */ + llslot = ((long long)xpteg[slot] << 32) | (long long)xpteg[slot + 1]; /* Make a long long version of this */ + space = (llslot >> 12) & (maxAdrSp - 1); /* Extract the space */ + llhash = (unsigned long long)space | ((unsigned long long)space << maxAdrSpb) | ((unsigned long long)space << (2 * maxAdrSpb)); /* Get the hash */ + llhash = llhash & 0x0000001FFFFFFFFF; /* Make sure we stay within supported ranges */ + pva = i ^ llhash; /* Get part of the vaddr */ + llseg = ((llslot >> 12) ^ llhash); /* Get the segment number */ + api = (llslot >> 7) & 0x1F; /* Get the API */ + llva = ((llseg << (28 - maxAdrSpb)) & 0xFFFFFFFFF0000000ULL) | (api << 23) | ((pva << 12) & 0x007FF000); /* Get the vaddr */ + wimgxx = xpteg[slot + 3] & 0x7F; /* Get the wimg and pp */ + ppn = (xpteg[slot + 2] << 20) | (xpteg[slot + 3] >> 12); /* Get physical page number */ + slotoff = (i * 128) + (j * 16) | 1; /* Get offset to slot and valid bit */ + } + + pmap = pmapTrans[space].pmapVAddr; /* Find the pmap address */ + if(!pmap) { /* The pmap is not in use */ + db_printf("The space %08X is not assigned to a pmap, slot = %d\n", space, slot); /* Say we are wrong */ + fnderr = 1; + goto dcmout; + } + + mp = hw_find_map(pmap, llva, &lnextva); /* Try to find the mapping for this address */ +// db_printf("%08X - %017llX\n", mp, llva); + if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */ + db_printf("Timeout locking mapping for for virtual address %016ll8X, slot = %d\n", llva, j); + return; + } + + if(!mp) { /* Did we find one? */ + db_printf("Not mapped, slot = %d, va = %08X\n", j, (unsigned int)llva); + fnderr = 1; + goto dcmout; + } + + if((mp->mpFlags & 0xFF000000) > 0x01000000) { /* Is busy count too high? */ + db_printf("Busy count too high, slot = %d\n", j); + fnderr = 1; + } + + if(mp->mpFlags & mpBlock) { /* Is this a block map? */ + if(!(xpca[0] & xauto)) { /* Is it marked as such? */ + db_printf("mapping marked as block, PCA is not, slot = %d\n", j); + fnderr = 1; + } + } + else { /* Is a block */ + if(xpca[0] & xauto) { /* Is it marked as such? */ + db_printf("mapping not marked as block, PCA is, slot = %d\n", j); + fnderr = 1; + } + if(mp->mpPte != slotoff) { /* See if mapping PTEG offset is us */ + db_printf("mapping does not point to PTE, slot = %d\n", j); + fnderr = 1; + } + } + + wimgkk = (unsigned int)mp->mpVAddr; /* Get last half of vaddr where keys, etc are */ + wimgkk = (wimgkk ^ wimgxx) & 0x7F; /* XOR to find differences from PTE */ + if(wimgkk) { /* See if key in PTE is what we want */ + db_printf("key or WIMG does not match, slot = %d\n", j); + fnderr = 1; + } + + aoff = (ppnum_t)((llva >> 12) - (mp->mpVAddr >> 12)); /* Get the offset from vaddr */ + pa = aoff + mp->mpPAddr; /* Get the physical page number we expect */ + if(pa != ppn) { /* Is physical address expected? */ + db_printf("Physical address does not match, slot = %d\n", j); + fnderr = 1; + } + + mapping_drop_busy(mp); /* We're done with the mapping */ + } + + } +dcmout: + free = free >> 1; + xauto = xauto >> 1; + } + + + if(fnderr)db_dumppca(i); /* Print if error */ + + pteg = pteg + 64; /* Go to the next one */ + if(s4bit) pteg = pteg + 64; /* Hagfish? */ + pca = pca - 4; /* Go to the next one */ + + + } + + return; +} + /* * Displays all of the kmods in the system. * diff --git a/osfmk/ppc/db_low_trace.h b/osfmk/ppc/db_low_trace.h index 518bfdc6b..4de489146 100644 --- a/osfmk/ppc/db_low_trace.h +++ b/osfmk/ppc/db_low_trace.h @@ -59,6 +59,13 @@ void db_display_long( char *modif ); +void db_display_char( + db_expr_t addr, + int have_addr, + db_expr_t count, + char *modif +); + void db_display_real( db_expr_t addr, int have_addr, @@ -68,10 +75,14 @@ void db_display_real( void db_display_virtual(db_expr_t addr, int have_addr, db_expr_t count, char * modif); void db_display_mappings(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_display_hash(db_expr_t addr, int have_addr, db_expr_t count, char * modif); void db_display_pmap(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_display_iokit(db_expr_t addr, int have_addr, db_expr_t count, char * modif); void db_display_save(db_expr_t addr, int have_addr, db_expr_t count, char * modif); void db_display_xregs(db_expr_t addr, int have_addr, db_expr_t count, char * modif); void db_display_kmod(db_expr_t addr, int have_addr, db_expr_t count, char * modif); void db_gsnoop(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_check_mappings(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_check_pmaps(db_expr_t addr, int have_addr, db_expr_t count, char * modif); #endif /* !_DDB_DB_LTR_H_ */ diff --git a/osfmk/ppc/db_machdep.h b/osfmk/ppc/db_machdep.h index f7e6d29f9..cde656d26 100644 --- a/osfmk/ppc/db_machdep.h +++ b/osfmk/ppc/db_machdep.h @@ -68,8 +68,8 @@ #include #include -typedef vm_offset_t db_addr_t; /* address - unsigned */ -typedef int db_expr_t; /* expression - signed */ +typedef addr64_t db_addr_t; /* address - unsigned */ +typedef uint64_t db_expr_t; /* expression - signed??? try unsigned */ typedef struct savearea db_regs_t; db_regs_t ddb_regs; /* register state */ @@ -104,7 +104,7 @@ int db_inst_store(unsigned long); db_phys_eq(task1,addr1,task2,addr2) #define DB_VALID_KERN_ADDR(addr) \ ((addr) >= VM_MIN_KERNEL_ADDRESS && \ - (addr) < VM_MAX_KERNEL_ADDRESS) + (addr) < vm_last_addr) #define DB_VALID_ADDRESS(addr,user) \ ((!(user) && DB_VALID_KERN_ADDR(addr)) || \ ((user) && (addr) < VM_MAX_ADDRESS)) @@ -130,10 +130,6 @@ extern db_addr_t db_disasm( db_addr_t loc, boolean_t altfmt, task_t task); -extern vm_offset_t db_kvtophys( - space_t space, - vm_offset_t va); - extern void db_read_bytes( vm_offset_t addr, int size, @@ -186,11 +182,6 @@ extern void kdb_on( int cpu); extern void cnpollc( boolean_t on); - -extern void db_phys_copy( - vm_offset_t, - vm_offset_t, - vm_size_t); extern boolean_t db_phys_cmp( vm_offset_t, diff --git a/osfmk/ppc/db_trace.c b/osfmk/ppc/db_trace.c index 8029df721..df2fee21c 100644 --- a/osfmk/ppc/db_trace.c +++ b/osfmk/ppc/db_trace.c @@ -61,30 +61,17 @@ extern char FixedStackStart[], FixedStackEnd[]; ((((vm_offset_t)(va)) >= (vm_offset_t)&FixedStackStart) && \ (((vm_offset_t)(va)) < ((vm_offset_t)&FixedStackEnd))) -#if 0 - -#define INKERNELSTACK(va, th) \ - (th == THR_ACT_NULL || \ - (((vm_offset_t)(va)) >= th->thread->kernel_stack && \ - (((vm_offset_t)(va)) < th->thread->kernel_stack + \ - KERNEL_STACK_SIZE)) || \ - INFIXEDSTACK(va)) -#else #define INKERNELSTACK(va, th) 1 -#endif - -#ifdef __MACHO__ struct db_ppc_frame { struct db_ppc_frame *f_frame; int pad1; - db_addr_t f_retaddr; + uint32_t f_retaddr; int pad3; int pad4; int pad5; - db_addr_t f_arg[DB_NUMARGS_MAX]; + uint32_t f_arg[DB_NUMARGS_MAX]; }; -#endif #define TRAP 1 #define INTERRUPT 2 @@ -127,45 +114,45 @@ extern int _setjmp( */ struct db_variable db_regs[] = { /* XXX "pc" is an alias to "srr0"... */ - { "pc", (int *)&ddb_regs.save_srr0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "srr0", (int *)&ddb_regs.save_srr0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "srr1", (int *)&ddb_regs.save_srr1, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r0", (int *)&ddb_regs.save_r0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r1", (int *)&ddb_regs.save_r1, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r2", (int *)&ddb_regs.save_r2, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r3", (int *)&ddb_regs.save_r3, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r4", (int *)&ddb_regs.save_r4, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r5", (int *)&ddb_regs.save_r5, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r6", (int *)&ddb_regs.save_r6, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r7", (int *)&ddb_regs.save_r7, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r8", (int *)&ddb_regs.save_r8, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r9", (int *)&ddb_regs.save_r9, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r10", (int *)&ddb_regs.save_r10, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r11", (int *)&ddb_regs.save_r11, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r12", (int *)&ddb_regs.save_r12, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r13", (int *)&ddb_regs.save_r13, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r14", (int *)&ddb_regs.save_r14, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r15", (int *)&ddb_regs.save_r15, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r16", (int *)&ddb_regs.save_r16, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r17", (int *)&ddb_regs.save_r17, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r18", (int *)&ddb_regs.save_r18, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r19", (int *)&ddb_regs.save_r19, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r20", (int *)&ddb_regs.save_r20, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r21", (int *)&ddb_regs.save_r21, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r22", (int *)&ddb_regs.save_r22, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r23", (int *)&ddb_regs.save_r23, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r24", (int *)&ddb_regs.save_r24, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r25", (int *)&ddb_regs.save_r25, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r26", (int *)&ddb_regs.save_r26, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r27", (int *)&ddb_regs.save_r27, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r28", (int *)&ddb_regs.save_r28, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r29", (int *)&ddb_regs.save_r29, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r30", (int *)&ddb_regs.save_r30, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "r31", (int *)&ddb_regs.save_r31, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "cr", (int *)&ddb_regs.save_cr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "xer", (int *)&ddb_regs.save_xer, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "lr", (int *)&ddb_regs.save_lr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, - { "ctr", (int *)&ddb_regs.save_ctr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "pc", &ddb_regs.save_srr0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "srr0", &ddb_regs.save_srr0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "srr1", &ddb_regs.save_srr1, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r0", &ddb_regs.save_r0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r1", &ddb_regs.save_r1, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r2", &ddb_regs.save_r2, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r3", &ddb_regs.save_r3, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r4", &ddb_regs.save_r4, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r5", &ddb_regs.save_r5, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r6", &ddb_regs.save_r6, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r7", &ddb_regs.save_r7, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r8", &ddb_regs.save_r8, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r9", &ddb_regs.save_r9, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r10", &ddb_regs.save_r10, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r11", &ddb_regs.save_r11, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r12", &ddb_regs.save_r12, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r13", &ddb_regs.save_r13, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r14", &ddb_regs.save_r14, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r15", &ddb_regs.save_r15, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r16", &ddb_regs.save_r16, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r17", &ddb_regs.save_r17, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r18", &ddb_regs.save_r18, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r19", &ddb_regs.save_r19, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r20", &ddb_regs.save_r20, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r21", &ddb_regs.save_r21, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r22", &ddb_regs.save_r22, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r23", &ddb_regs.save_r23, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r24", &ddb_regs.save_r24, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r25", &ddb_regs.save_r25, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r26", &ddb_regs.save_r26, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r27", &ddb_regs.save_r27, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r28", &ddb_regs.save_r28, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r29", &ddb_regs.save_r29, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r30", &ddb_regs.save_r30, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r31", &ddb_regs.save_r31, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "cr", &ddb_regs.save_cr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "xer", &ddb_regs.save_xer, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "lr", &ddb_regs.save_lr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "ctr", &ddb_regs.save_ctr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, }; struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]); @@ -176,8 +163,10 @@ db_ppc_reg_value( int flag, db_var_aux_param_t ap) { - int *dp = 0; - db_expr_t null_reg = 0; + db_expr_t *dp = 0; + db_expr_t null_reg = 0; + uint32_t *dp32; + register thread_act_t thr_act = ap->thr_act; int cpu; @@ -187,71 +176,78 @@ db_ppc_reg_value( db_error("no user registers\n"); } if (thr_act == current_act()) { - if (IS_USER_TRAP((&ddb_regs))) - dp = vp->valuep; - else if (INFIXEDSTACK(ddb_regs.save_r1)) - db_error("cannot get/set user registers in nested interrupt\n"); + if (IS_USER_TRAP((&ddb_regs))) dp = vp->valuep; + else if (INFIXEDSTACK(ddb_regs.save_r1)) + db_error("cannot get/set user registers in nested interrupt\n"); } - } else { - if (thr_act == THR_ACT_NULL || thr_act == current_act()) { - dp = vp->valuep; - } else { - if (thr_act->thread && - !(thr_act->thread->state & TH_STACK_HANDOFF) && - thr_act->thread->kernel_stack) { - int cpu; - - for (cpu = 0; cpu < NCPUS; cpu++) { - if (cpu_to_processor(cpu)->state == PROCESSOR_RUNNING && - cpu_to_processor(cpu)->cpu_data->active_thread == thr_act->thread && saved_state[cpu]) { - dp = (int *) (((int)saved_state[cpu]) + - (((int) vp->valuep) - - (int) &ddb_regs)); - break; - } - } + } + else { + if (thr_act == THR_ACT_NULL || thr_act == current_act()) { + dp = vp->valuep; + } + else { + if (thr_act->thread && + !(thr_act->thread->state & TH_STACK_HANDOFF) && + thr_act->thread->kernel_stack) { + + int cpu; - if (dp == 0) - dp = &null_reg; - } else if (thr_act->thread && - (thr_act->thread->state&TH_STACK_HANDOFF)){ - /* only PC is valid */ - if (vp->valuep == (int *) &ddb_regs.save_srr0) { - dp = (int *)(&thr_act->thread->continuation); - } else { - dp = &null_reg; - } - } + for (cpu = 0; cpu < NCPUS; cpu++) { + if (cpu_to_processor(cpu)->state == PROCESSOR_RUNNING && + cpu_to_processor(cpu)->active_thread == thr_act->thread && saved_state[cpu]) { + + dp = (db_expr_t)(((uint32_t)saved_state[cpu]) + + (((uint32_t) vp->valuep) - + (uint32_t) &ddb_regs)); + break; + } + } + + if (dp == 0) dp = &null_reg; + } + else if (thr_act->thread && (thr_act->thread->state & TH_STACK_HANDOFF)){ + /* only PC is valid */ + if (vp->valuep == (int *) &ddb_regs.save_srr0) { + dp = (int *)(&thr_act->thread->continuation); + } + else { + dp = &null_reg; + } + } } } if (dp == 0) { - int cpu; if (!db_option(ap->modif, 'u')) { - for (cpu = 0; cpu < NCPUS; cpu++) { - if (cpu_to_processor(cpu)->state == PROCESSOR_RUNNING && - cpu_to_processor(cpu)->cpu_data->active_thread == thr_act->thread && saved_state[cpu]) { - dp = (int *) (((int)saved_state[cpu]) + - (((int) vp->valuep) - - (int) &ddb_regs)); - break; - } - } + for (cpu = 0; cpu < NCPUS; cpu++) { + if (cpu_to_processor(cpu)->state == PROCESSOR_RUNNING && + cpu_to_processor(cpu)->active_thread == thr_act->thread && saved_state[cpu]) { + dp = (int *) (((int)saved_state[cpu]) + + (((int) vp->valuep) - (int) &ddb_regs)); + break; + } + } } if (dp == 0) { - if (!thr_act || thr_act->mact.pcb == 0) - db_error("no pcb\n"); - dp = (int *)((int)thr_act->mact.pcb + - ((int)vp->valuep - (int)&ddb_regs)); + if (!thr_act || thr_act->mact.pcb == 0) db_error("no pcb\n"); + dp = (int *)((int)thr_act->mact.pcb + ((int)vp->valuep - (int)&ddb_regs)); } } - if (flag == DB_VAR_SET) - *dp = *valuep; - else - *valuep = *dp; + + if(vp->valuep == (int *) &ddb_regs.save_cr) { /* Is this the CR we are doing? */ + dp32 = (uint32_t *)dp; /* Make this easier */ + if (flag == DB_VAR_SET) *dp32 = *valuep; + else *valuep = *dp32; + } + else { /* Normal 64-bit registers */ + if (flag == DB_VAR_SET) *dp = *valuep; + else *valuep = *(unsigned long long *)dp; + } + return(0); } + void db_find_trace_symbols(void) { @@ -377,6 +373,13 @@ db_nextframe( /* falling down for unknown case */ default: miss_frame: + + if(!pmap_find_phys(kernel_pmap, (addr64_t)*fp)) { /* Check if this is valid */ + db_printf("Frame not mapped %08X\n",*fp); /* Say not found */ + *fp = 0; /* Show not found */ + break; /* Out of here */ + } + if ((*fp)->f_frame) *ip = (db_addr_t) db_get_task_value((int)&(*fp)->f_frame->f_retaddr, @@ -439,7 +442,7 @@ db_stack_trace_cmd( if (!have_addr && !trace_thread) { have_addr = TRUE; trace_thread = TRUE; - act_list = &(current_task()->thr_acts); + act_list = &(current_task()->threads); addr = (db_expr_t) queue_first(act_list); } else if (trace_thread) { @@ -447,11 +450,11 @@ db_stack_trace_cmd( if (!db_check_act_address_valid((thread_act_t)addr)) { if (db_lookup_task((task_t)addr) == -1) return; - act_list = &(((task_t)addr)->thr_acts); + act_list = &(((task_t)addr)->threads); addr = (db_expr_t) queue_first(act_list); } else { - act_list = &(((thread_act_t)addr)->task->thr_acts); + act_list = &(((thread_act_t)addr)->task->threads); thcount = db_lookup_task_act(((thread_act_t)addr)->task, (thread_act_t)addr); } @@ -465,7 +468,7 @@ db_stack_trace_cmd( return; } have_addr = TRUE; - act_list = &th->task->thr_acts; + act_list = &th->task->threads; addr = (db_expr_t) queue_first(act_list); } } @@ -504,7 +507,7 @@ next_thread: } if (trace_all_threads) db_printf("---------- Thread 0x%x (#%d of %d) ----------\n", - addr, thcount, th->task->thr_act_count); + addr, thcount, th->task->thread_count); next_activation: @@ -546,7 +549,7 @@ next_activation: for (cpu = 0; cpu < NCPUS; cpu++) { if (cpu_to_processor(cpu)->state == PROCESSOR_RUNNING && - cpu_to_processor(cpu)->cpu_data->active_thread == th->thread && + cpu_to_processor(cpu)->active_thread == th->thread && saved_state[cpu]) { break; } @@ -780,7 +783,7 @@ next_activation: if (trace_all_threads) { if (top_act != THR_ACT_NULL) th = top_act; - th = (thread_act_t) queue_next(&th->thr_acts); + th = (thread_act_t) queue_next(&th->task_threads); if (! queue_end(act_list, (queue_entry_t) th)) { db_printf("\n"); addr = (db_expr_t) th; diff --git a/osfmk/ppc/endian.h b/osfmk/ppc/endian.h index 15f706bb5..276714921 100644 --- a/osfmk/ppc/endian.h +++ b/osfmk/ppc/endian.h @@ -38,7 +38,7 @@ #define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ #define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp) */ -#if _BIG_ENDIAN /* Predefined by compiler */ +#ifdef __BIG_ENDIAN__ /* Predefined by compiler */ #define BYTE_ORDER BIG_ENDIAN /* byte order we use on ppc */ #define ENDIAN BIG #else diff --git a/osfmk/ppc/exception.h b/osfmk/ppc/exception.h index b44f941d1..16e3cf5e5 100644 --- a/osfmk/ppc/exception.h +++ b/osfmk/ppc/exception.h @@ -49,8 +49,9 @@ #include /* Per processor CPU features */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ struct procFeatures { - unsigned int Available; + unsigned int Available; /* 0x000 */ #define pfFloat 0x80000000 #define pfFloatb 0 #define pfAltivec 0x40000000 @@ -69,12 +70,24 @@ struct procFeatures { #define pfThermalb 7 #define pfThermInt 0x00800000 #define pfThermIntb 8 -#define pfNoL2PFNap 0x00008000 -#define pfNoL2PFNapb 16 -#define pfSlowNap 0x00004000 -#define pfSlowNapb 17 -#define pfNoMuMMCK 0x00002000 -#define pfNoMuMMCKb 18 +#define pfSlowNap 0x00400000 +#define pfSlowNapb 9 +#define pfNoMuMMCK 0x00200000 +#define pfNoMuMMCKb 10 +#define pfNoL2PFNap 0x00100000 +#define pfNoL2PFNapb 11 +#define pfSCOMFixUp 0x00080000 +#define pfSCOMFixUpb 12 +#define pfHasDcba 0x00040000 +#define pfHasDcbab 13 +#define pfL1fa 0x00010000 +#define pfL1fab 15 +#define pfL2 0x00008000 +#define pfL2b 16 +#define pfL2fa 0x00004000 +#define pfL2fab 17 +#define pfL2i 0x00002000 +#define pfL2ib 18 #define pfLClck 0x00001000 #define pfLClckb 19 #define pfWillNap 0x00000800 @@ -83,49 +96,50 @@ struct procFeatures { #define pfNoMSRirb 21 #define pfL3pdet 0x00000200 #define pfL3pdetb 22 -#define pfL1i 0x00000100 -#define pfL1ib 23 -#define pfL1d 0x00000080 -#define pfL1db 24 -#define pfL1fa 0x00000040 -#define pfL1fab 25 -#define pfL2 0x00000020 -#define pfL2b 26 -#define pfL2fa 0x00000010 -#define pfL2fab 27 -#define pfL2i 0x00000008 -#define pfL2ib 28 +#define pf128Byte 0x00000080 +#define pf128Byteb 24 +#define pf32Byte 0x00000020 +#define pf32Byteb 26 +#define pf64Bit 0x00000010 +#define pf64Bitb 27 #define pfL3 0x00000004 #define pfL3b 29 #define pfL3fa 0x00000002 #define pfL3fab 30 #define pfValid 0x00000001 #define pfValidb 31 - unsigned short rptdProc; - unsigned short lineSize; - unsigned int l1iSize; - unsigned int l1dSize; - unsigned int l2cr; - unsigned int l2Size; - unsigned int l3cr; - unsigned int l3Size; - unsigned int pfHID0; - unsigned int pfHID1; - unsigned int pfHID2; - unsigned int pfHID3; - unsigned int pfMSSCR0; - unsigned int pfMSSCR1; - unsigned int pfICTRL; - unsigned int pfLDSTCR; - unsigned int pfLDSTDB; - unsigned int l2crOriginal; - unsigned int l3crOriginal; - unsigned int pfBootConfig; - unsigned int reserved[4]; + unsigned short rptdProc; /* 0x004 */ + unsigned short lineSize; /* 0x006 */ + unsigned int l1iSize; /* 0x008 */ + unsigned int l1dSize; /* 0x00C */ + unsigned int l2cr; /* 0x010 */ + unsigned int l2Size; /* 0x014 */ + unsigned int l3cr; /* 0x018 */ + unsigned int l3Size; /* 0x01C */ + unsigned int pfMSSCR0; /* 0x020 */ + unsigned int pfMSSCR1; /* 0x024 */ + unsigned int pfICTRL; /* 0x028 */ + unsigned int pfLDSTCR; /* 0x02C */ + unsigned int pfLDSTDB; /* 0x030 */ + unsigned int pfMaxVAddr; /* 0x034 */ + unsigned int pfMaxPAddr; /* 0x038 */ + unsigned int pfPTEG; /* 0x03C */ + uint64_t pfHID0; /* 0x040 */ + uint64_t pfHID1; /* 0x048 */ + uint64_t pfHID2; /* 0x050 */ + uint64_t pfHID3; /* 0x058 */ + uint64_t pfHID4; /* 0x060 */ + uint64_t pfHID5; /* 0x068 */ + unsigned int l2crOriginal; /* 0x070 */ + unsigned int l3crOriginal; /* 0x074 */ + unsigned int pfBootConfig; /* 0x07C */ + unsigned int reserved[1]; /* 0x80 */ }; +#pragma pack() typedef struct procFeatures procFeatures; +#pragma pack(4) /* Make sure the structure stays as we defined it */ struct thrmControl { unsigned int maxTemp; /* Maximum temprature before damage */ unsigned int throttleTemp; /* Temprature at which to throttle down */ @@ -134,11 +148,130 @@ struct thrmControl { unsigned int thrm3val; /* Value for thrm3 register */ unsigned int rsvd[3]; /* Pad to cache line */ }; +#pragma pack() typedef struct thrmControl thrmControl; +/* + * + * Various performance counters + */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ +struct hwCtrs { + + unsigned int hwInVains; /* In vain */ + unsigned int hwResets; /* Reset */ + unsigned int hwMachineChecks; /* Machine check */ + unsigned int hwDSIs; /* DSIs */ + unsigned int hwISIs; /* ISIs */ + unsigned int hwExternals; /* Externals */ + unsigned int hwAlignments; /* Alignment */ + unsigned int hwPrograms; /* Program */ + unsigned int hwFloatPointUnavailable; /* Floating point */ + unsigned int hwDecrementers; /* Decrementer */ + unsigned int hwIOErrors; /* I/O error */ + unsigned int hwrsvd0; /* Reserved */ + unsigned int hwSystemCalls; /* System call */ + unsigned int hwTraces; /* Trace */ + unsigned int hwFloatingPointAssists; /* Floating point assist */ + unsigned int hwPerformanceMonitors; /* Performance monitor */ + unsigned int hwAltivecs; /* VMX */ + unsigned int hwrsvd1; /* Reserved */ + unsigned int hwrsvd2; /* Reserved */ + unsigned int hwrsvd3; /* Reserved */ + unsigned int hwInstBreakpoints; /* Instruction breakpoint */ + unsigned int hwSystemManagements; /* System management */ + unsigned int hwAltivecAssists; /* Altivec Assist */ + unsigned int hwThermal; /* Thermals */ + unsigned int hwrsvd5; /* Reserved */ + unsigned int hwrsvd6; /* Reserved */ + unsigned int hwrsvd7; /* Reserved */ + unsigned int hwrsvd8; /* Reserved */ + unsigned int hwrsvd9; /* Reserved */ + unsigned int hwrsvd10; /* Reserved */ + unsigned int hwrsvd11; /* Reserved */ + unsigned int hwrsvd12; /* Reserved */ + unsigned int hwrsvd13; /* Reserved */ + unsigned int hwTrace601; /* Trace */ + unsigned int hwSIGPs; /* SIGP */ + unsigned int hwPreemptions; /* Preemption */ + unsigned int hwContextSwitchs; /* Context switch */ + unsigned int hwShutdowns; /* Shutdowns */ + unsigned int hwChokes; /* System ABENDs */ + unsigned int hwDataSegments; /* Data Segment Interruptions */ + unsigned int hwInstructionSegments; /* Instruction Segment Interruptions */ + unsigned int hwSoftPatches; /* Soft Patch interruptions */ + unsigned int hwMaintenances; /* Maintenance interruptions */ + unsigned int hwInstrumentations; /* Instrumentation interruptions */ + unsigned int hwrsvd14; /* Reswerved */ +/* 0x0B4 */ + + unsigned int hwspare0[17]; /* Reserved */ + unsigned int hwRedrives; /* Number of redriven interrupts */ + unsigned int hwSteals; /* PTE Steals */ +/* 0x100 */ + + unsigned int hwMckHang; /* ? */ + unsigned int hwMckSLBPE; /* ? */ + unsigned int hwMckTLBPE; /* ? */ + unsigned int hwMckERCPE; /* ? */ + unsigned int hwMckL1DPE; /* ? */ + unsigned int hwMckL1TPE; /* ? */ + unsigned int hwMckUE; /* ? */ + unsigned int hwMckIUE; /* ? */ + unsigned int hwMckIUEr; /* ? */ + unsigned int hwMckDUE; /* ? */ + unsigned int hwMckDTW; /* ? */ + unsigned int hwMckUnk; /* ? */ + unsigned int hwMckExt; /* ? */ + unsigned int hwMckICachePE; /* ? */ + unsigned int hwMckITagPE; /* ? */ + unsigned int hwMckIEratPE; /* ? */ + unsigned int hwMckDEratPE; /* ? */ + unsigned int hwspare2[15]; /* Pad to next 128 bndry */ +/* 0x180 */ + + unsigned int napStamp[2]; /* Time base when we napped */ + unsigned int napTotal[2]; /* Total nap time in ticks */ + unsigned int numSIGPast; /* Number of SIGP asts recieved */ + unsigned int numSIGPcpureq; /* Number of SIGP cpu requests recieved */ + unsigned int numSIGPdebug; /* Number of SIGP debugs recieved */ + unsigned int numSIGPwake; /* Number of SIGP wakes recieved */ + unsigned int numSIGPtimo; /* Number of SIGP send timeouts */ + unsigned int numSIGPmast; /* Number of SIGPast messages merged */ + unsigned int numSIGPmwake; /* Number of SIGPwake messages merged */ + + unsigned int hwspare3[21]; /* Pad to 512 */ + +}; +#pragma pack() + +typedef struct hwCtrs hwCtrs; + +struct patch_entry { + unsigned int *addr; + unsigned int data; + unsigned int type; + unsigned int value; +}; + +typedef struct patch_entry patch_entry_t; + +#define PATCH_INVALID 0 +#define PATCH_PROCESSOR 1 +#define PATCH_FEATURE 2 + +#define PATCH_TABLE_SIZE 12 + +#define PatchExt32 0x80000000 +#define PatchExt32b 0 +#define PatchLwsync 0x40000000 +#define PatchLwsyncb 1 + /* When an exception is taken, this info is accessed via sprg0 */ /* We should always have this one on a cache line boundary */ + +#pragma pack(4) /* Make sure the structure stays as we defined it */ struct per_proc_info { unsigned short cpu_number; unsigned short cpu_flags; /* Various low-level flags */ @@ -148,44 +281,40 @@ struct per_proc_info { vm_offset_t debstackptr; vm_offset_t debstack_top_ss; - unsigned int tempwork1; /* Temp work area - monitor use carefully */ - unsigned int save_exception_type; + unsigned int spcFlags; /* Special thread flags */ + unsigned int Uassist; /* User Assist Word */ unsigned int old_thread; /* PPC cache line boundary here - 020 */ - unsigned int active_kloaded; /* pointer to active_kloaded[CPU_NO] */ - unsigned int active_stacks; /* pointer to active_stacks[CPU_NO] */ + unsigned int rsrvd020[2]; unsigned int need_ast; /* pointer to need_ast[CPU_NO] */ /* * Note: the following two pairs of words need to stay in order and each pair must * be in the same reservation (line) granule */ - struct facility_context *FPU_owner; /* Owner of the FPU on this cpu */ - unsigned int pprsv1; - struct facility_context *VMX_owner; /* Owner of the VMX on this cpu */ - unsigned int pprsv2; - unsigned int next_savearea; /* pointer to the next savearea */ + struct facility_context *FPU_owner; /* Owner of the FPU on this cpu */ + unsigned int liveVRSave; /* VRSave assiciated with live vector registers */ + struct facility_context *VMX_owner; /* Owner of the VMX on this cpu */ + unsigned int holdQFret; /* Hold off releasing quickfret list */ + unsigned int save_exception_type; /* PPC cache line boundary here - 040 */ - unsigned int quickfret; /* List of saveareas to release */ - unsigned int lclfree; /* Pointer to local savearea list */ + addr64_t quickfret; /* List of saveareas to release */ + addr64_t lclfree; /* Pointer to local savearea list */ unsigned int lclfreecnt; /* Entries in local savearea list */ - unsigned int Lastpmap; /* Last user pmap loaded */ - unsigned int userspace; /* Last loaded user memory space ID */ - unsigned int userpmap; /* User pmap - real address */ - unsigned int liveVRSave; /* VRSave assiciated with live vector registers */ - unsigned int spcFlags; /* Special thread flags */ + unsigned int spcTRc; /* Special trace count */ + unsigned int spcTRp; /* Special trace buffer pointer */ + unsigned int ppbbTaskEnv; /* BlueBox Task Environment */ /* PPC cache line boundary here - 060 */ boolean_t interrupts_enabled; - unsigned int ppbbTaskEnv; /* BlueBox Task Environment */ IOInterruptHandler interrupt_handler; void * interrupt_nub; unsigned int interrupt_source; void * interrupt_target; void * interrupt_refCon; - time_base_enable_t time_base_enable; + uint64_t next_savearea; /* pointer to the next savearea */ /* PPC cache line boundary here - 080 */ unsigned int MPsigpStat; /* Signal Processor status (interlocked update for this one) */ @@ -205,6 +334,9 @@ struct per_proc_info { #define SIGPwake 3 /* Wake up a sleeping processor */ #define CPRQtemp 0 /* Get temprature of processor */ #define CPRQtimebase 1 /* Get timebase of processor */ +#define CPRQsegload 2 /* Segment registers reload */ +#define CPRQscom 3 /* SCOM */ +#define CPRQchud 4 /* CHUD perfmon */ unsigned int MPsigpParm0; /* SIGP parm 0 */ unsigned int MPsigpParm1; /* SIGP parm 1 */ unsigned int MPsigpParm2; /* SIGP parm 2 */ @@ -215,41 +347,79 @@ struct per_proc_info { /* PPC cache line boundary here - 0A0 */ procFeatures pf; /* Processor features */ - /* PPC cache line boundary here - 100 */ - thrmControl thrm; /* Thermal controls */ - /* PPC cache line boundary here - 120 */ - unsigned int napStamp[2]; /* Time base when we napped */ - unsigned int napTotal[2]; /* Total nap time in ticks */ - unsigned int numSIGPast; /* Number of SIGP asts recieved */ - unsigned int numSIGPcpureq; /* Number of SIGP cpu requests recieved */ - unsigned int numSIGPdebug; /* Number of SIGP debugs recieved */ - unsigned int numSIGPwake; /* Number of SIGP wakes recieved */ + thrmControl thrm; /* Thermal controls */ /* PPC cache line boundary here - 140 */ - unsigned int numSIGPtimo; /* Number of SIGP send timeouts */ - unsigned int numSIGPmast; /* Number of SIGPast messages merged */ - unsigned int numSIGPmwake; /* Number of SIGPwake messages merged */ - unsigned int spcTRc; /* Special trace count */ - unsigned int spcTRp; /* Special trace buffer pointer */ - unsigned int Uassist; /* User Assist Word */ - vm_offset_t VMMareaPhys; /* vmm state page physical addr */ - unsigned int FAMintercept; /* vmm FAM Exceptions to intercept */ + unsigned int ppRsvd140[8]; /* Reserved */ /* PPC cache line boundary here - 160 */ + time_base_enable_t time_base_enable; + unsigned int ppRsvd164[4]; /* Reserved */ cpu_data_t pp_cpu_data; /* cpu data info */ - unsigned int rsrvd170[4]; /* Reserved slots */ /* PPC cache line boundary here - 180 */ - unsigned int rsrvd180[8]; /* Reserved slots */ + unsigned int ppRsvd180[2]; /* Reserved */ + uint64_t validSegs; /* Valid SR/STB slots */ + addr64_t ppUserPmap; /* Current user state pmap (physical address) */ + unsigned int ppUserPmapVirt; /* Current user state pmap (virtual address) */ + unsigned int ppMapFlags; /* Mapping flags */ /* PPC cache line boundary here - 1A0 */ - unsigned int rsrvd1A0[8]; /* Reserved slots */ + unsigned short ppInvSeg; /* Forces complete invalidate of SRs/SLB (this must stay with ppInvSeg) */ + unsigned short ppCurSeg; /* Set to 1 if user segments, 0 if kernel (this must stay with ppInvSeg) */ + unsigned int ppSegSteal; /* Count of segment slot steals */ + ppnum_t VMMareaPhys; /* vmm state page physical addr */ + unsigned int VMMXAFlgs; /* vmm extended flags */ + unsigned int FAMintercept; /* vmm FAM Exceptions to intercept */ + unsigned int rsrvd1B4[3]; /* Reserved slots */ /* PPC cache line boundary here - 1C0 */ - unsigned int rsrvd1C0[8]; /* Reserved slots */ + unsigned int ppCIOmp[16]; /* Linkage mapping for copyin/out - 64 bytes */ + + /* PPC cache line boundary here - 200 */ + uint64_t tempr0; /* temporary savearea */ + uint64_t tempr1; + uint64_t tempr2; + uint64_t tempr3; + + uint64_t tempr4; + uint64_t tempr5; + uint64_t tempr6; + uint64_t tempr7; + + uint64_t tempr8; + uint64_t tempr9; + uint64_t tempr10; + uint64_t tempr11; + + uint64_t tempr12; + uint64_t tempr13; + uint64_t tempr14; + uint64_t tempr15; + + uint64_t tempr16; + uint64_t tempr17; + uint64_t tempr18; + uint64_t tempr19; + + uint64_t tempr20; + uint64_t tempr21; + uint64_t tempr22; + uint64_t tempr23; - /* PPC cache line boundary here - 1E0 */ + uint64_t tempr24; + uint64_t tempr25; + uint64_t tempr26; + uint64_t tempr27; + + uint64_t tempr28; + uint64_t tempr29; + uint64_t tempr30; + uint64_t tempr31; + + + /* PPC cache line boundary here - 300 */ double emfp0; /* Copies of floating point registers */ double emfp1; /* Used for emulation purposes */ double emfp2; @@ -290,12 +460,12 @@ struct per_proc_info { double emfp30; double emfp31; -/* - 2E0 */ +/* - 400 */ unsigned int emfpscr_pad; unsigned int emfpscr; unsigned int empadfp[6]; -/* - 300 */ +/* - 420 */ unsigned int emvr0[4]; /* Copies of vector registers used both */ unsigned int emvr1[4]; /* for full vector emulation or */ unsigned int emvr2[4]; /* as saveareas while assisting denorms */ @@ -330,25 +500,37 @@ struct per_proc_info { unsigned int emvr31[4]; unsigned int emvscr[4]; unsigned int empadvr[4]; -/* - 520 */ +/* - 640 */ +/* note implicit dependence on kSkipListMaxLists, which must be <= 28 */ + addr64_t skipListPrev[28]; /* prev ptrs saved as side effect of calling mapSearchFull() */ + +/* - 720 */ unsigned int patcharea[56]; -/* - 600 */ +/* - 800 */ + + hwCtrs hwCtr; /* Hardware exception counters */ +/* - A00 */ + + unsigned int pppadpage[384]; /* Pad to end of page */ +/* - 1000 */ + }; -#define pp_active_thread pp_cpu_data.active_thread #define pp_preemption_count pp_cpu_data.preemption_level #define pp_simple_lock_count pp_cpu_data.simple_lock_count #define pp_interrupt_level pp_cpu_data.interrupt_level +#pragma pack() + extern struct per_proc_info per_proc_info[NCPUS]; + extern char *trap_type[]; -#endif /* ndef ASSEMBLER */ -/* with this savearea should be redriven */ +#endif /* ndef ASSEMBLER */ /* with this savearea should be redriven */ /* cpu_flags defs */ #define SIGPactive 0x8000 @@ -356,8 +538,11 @@ extern char *trap_type[]; #define turnEEon 0x2000 #define traceBE 0x1000 /* user mode BE tracing in enabled */ #define traceBEb 3 /* bit number for traceBE */ -#define BootDone 0x0100 +#define SleepState 0x0800 +#define SleepStateb 4 +#define mcountOff 0x0400 #define SignalReady 0x0200 +#define BootDone 0x0100 #define loadMSR 0x7FF4 #define T_VECTOR_SIZE 4 /* function pointer size */ @@ -396,7 +581,7 @@ extern char *trap_type[]; #define T_INVALID_EXCP10 (0x1D * T_VECTOR_SIZE) #define T_INVALID_EXCP11 (0x1E * T_VECTOR_SIZE) #define T_INVALID_EXCP12 (0x1F * T_VECTOR_SIZE) -#define T_INVALID_EXCP13 (0x20 * T_VECTOR_SIZE) +#define T_EMULATE (0x20 * T_VECTOR_SIZE) #define T_RUNMODE_TRACE (0x21 * T_VECTOR_SIZE) /* 601 only */ @@ -406,12 +591,20 @@ extern char *trap_type[]; #define T_SHUTDOWN (0x25 * T_VECTOR_SIZE) #define T_CHOKE (0x26 * T_VECTOR_SIZE) +#define T_DATA_SEGMENT (0x27 * T_VECTOR_SIZE) +#define T_INSTRUCTION_SEGMENT (0x28 * T_VECTOR_SIZE) + +#define T_SOFT_PATCH (0x29 * T_VECTOR_SIZE) +#define T_MAINTENANCE (0x2A * T_VECTOR_SIZE) +#define T_INSTRUMENTATION (0x2B * T_VECTOR_SIZE) +#define T_ARCHDEP0 (0x2C * T_VECTOR_SIZE) + #define T_AST (0x100 * T_VECTOR_SIZE) #define T_MAX T_CHOKE /* Maximum exception no */ #define T_FAM 0x00004000 -#define EXCEPTION_VECTOR(exception) (exception * 0x100 /T_VECTOR_SIZE ) +#define EXCEPTION_VECTOR(exception) (exception * 0x100 / T_VECTOR_SIZE ) /* * System choke (failure) codes @@ -424,17 +617,21 @@ extern char *trap_type[]; #define failNoSavearea 4 #define failSaveareaCorr 5 #define failBadLiveContext 6 +#define failSkipLists 7 +#define failUnalignedStk 8 /* Always must be last - update failNames table in model_dep.c as well */ -#define failUnknown 7 +#define failUnknown 9 #ifndef ASSEMBLER +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct resethandler { unsigned int type; vm_offset_t call_paddr; vm_offset_t arg__paddr; } resethandler_t; +#pragma pack() extern resethandler_t ResetHandler; @@ -442,5 +639,7 @@ extern resethandler_t ResetHandler; #define RESET_HANDLER_NULL 0x0 #define RESET_HANDLER_START 0x1 +#define RESET_HANDLER_BUPOR 0x2 +#define RESET_HANDLER_IGNORE 0x3 #endif /* _PPC_EXCEPTION_H_ */ diff --git a/osfmk/ppc/genassym.c b/osfmk/ppc/genassym.c index 9cd0d9cbc..de20106ce 100644 --- a/osfmk/ppc/genassym.c +++ b/osfmk/ppc/genassym.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -63,14 +64,15 @@ #include #include #include -#include #include -#include #include #include #include +#include #include #include +#include +#include #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE)0)->MEMBER) @@ -80,13 +82,21 @@ int main(int argc, char *argv[]) { /* Process Control Block */ - - DECLARE("ACT_MACT_KSP", offsetof(struct thread_activation *, mact.ksp)); - DECLARE("ACT_MACT_BEDA", offsetof(struct thread_activation *, mact.bbDescAddr)); - DECLARE("ACT_MACT_BTS", offsetof(struct thread_activation *, mact.bbTableStart)); - DECLARE("ACT_MACT_BTE", offsetof(struct thread_activation *, mact.bbTaskEnv)); - DECLARE("ACT_MACT_SPF", offsetof(struct thread_activation *, mact.specFlags)); - DECLARE("qactTimer", offsetof(struct thread_activation *, mact.qactTimer)); + DECLARE("ACT_MACT_KSP", offsetof(thread_act_t, mact.ksp)); + DECLARE("ACT_MACT_BEDA", offsetof(thread_act_t, mact.bbDescAddr)); + DECLARE("ACT_MACT_BTS", offsetof(thread_act_t, mact.bbTableStart)); + DECLARE("ACT_MACT_BTE", offsetof(thread_act_t, mact.bbTaskEnv)); + DECLARE("ACT_MACT_SPF", offsetof(thread_act_t, mact.specFlags)); + DECLARE("ACT_PREEMPT_CNT", offsetof(thread_act_t, mact.preemption_count)); + DECLARE("qactTimer", offsetof(thread_act_t, mact.qactTimer)); + DECLARE("cioSpace", offsetof(thread_act_t, mact.cioSpace)); + DECLARE("cioRelo", offsetof(thread_act_t, mact.cioRelo)); + DECLARE("cioSwitchAway", cioSwitchAway); + DECLARE("cioSwitchAwayb", cioSwitchAwayb); + DECLARE("bbTrap", offsetof(thread_act_t, mact.bbTrap)); + DECLARE("bbSysCall", offsetof(thread_act_t, mact.bbSysCall)); + DECLARE("bbInterrupt", offsetof(thread_act_t, mact.bbInterrupt)); + DECLARE("bbPending", offsetof(thread_act_t, mact.bbPending)); DECLARE("floatUsed", floatUsed); DECLARE("vectorUsed", vectorUsed); @@ -98,13 +108,10 @@ int main(int argc, char *argv[]) DECLARE("vectorCngbit", vectorCngbit); DECLARE("userProtKey", userProtKey); DECLARE("userProtKeybit", userProtKeybit); - DECLARE("trapUnalign", trapUnalign); - DECLARE("trapUnalignbit", trapUnalignbit); - DECLARE("notifyUnalign", notifyUnalign); - DECLARE("notifyUnalignbit", notifyUnalignbit); DECLARE("bbThread", bbThread); DECLARE("bbThreadbit", bbThreadbit); + DECLARE("bbNoMachSC", bbNoMachSC); DECLARE("bbNoMachSCbit",bbNoMachSCbit); DECLARE("bbPreemptive", bbPreemptive); DECLARE("bbPreemptivebit", bbPreemptivebit); @@ -115,35 +122,30 @@ int main(int argc, char *argv[]) DECLARE("FamVMenabit", FamVMenabit); DECLARE("FamVMmode", FamVMmode); DECLARE("FamVMmodebit", FamVMmodebit); + DECLARE("perfMonitor", perfMonitor); + DECLARE("perfMonitorbit", perfMonitorbit); /* Per Proc info structure */ DECLARE("PP_CPU_NUMBER", offsetof(struct per_proc_info *, cpu_number)); DECLARE("PP_CPU_FLAGS", offsetof(struct per_proc_info *, cpu_flags)); + DECLARE("SleepState", SleepState); DECLARE("PP_ISTACKPTR", offsetof(struct per_proc_info *, istackptr)); DECLARE("PP_INTSTACK_TOP_SS", offsetof(struct per_proc_info *, intstack_top_ss)); DECLARE("PP_DEBSTACKPTR", offsetof(struct per_proc_info *, debstackptr)); DECLARE("PP_DEBSTACK_TOP_SS", offsetof(struct per_proc_info *, debstack_top_ss)); - DECLARE("PP_TEMPWORK1", offsetof(struct per_proc_info *, tempwork1)); - DECLARE("PP_USERSPACE", offsetof(struct per_proc_info *, userspace)); - DECLARE("PP_USERPMAP", offsetof(struct per_proc_info *, userpmap)); - DECLARE("PP_LASTPMAP", offsetof(struct per_proc_info *, Lastpmap)); DECLARE("FPUowner", offsetof(struct per_proc_info *, FPU_owner)); DECLARE("VMXowner", offsetof(struct per_proc_info *, VMX_owner)); + DECLARE("holdQFret", offsetof(struct per_proc_info *, holdQFret)); DECLARE("PP_SAVE_EXCEPTION_TYPE", offsetof(struct per_proc_info *, save_exception_type)); - DECLARE("PP_ACTIVE_KLOADED", offsetof(struct per_proc_info *, active_kloaded)); - DECLARE("PP_ACTIVE_STACKS", offsetof(struct per_proc_info *, active_stacks)); DECLARE("PP_NEED_AST", offsetof(struct per_proc_info *, need_ast)); DECLARE("quickfret", offsetof(struct per_proc_info *, quickfret)); DECLARE("lclfree", offsetof(struct per_proc_info *, lclfree)); DECLARE("lclfreecnt", offsetof(struct per_proc_info *, lclfreecnt)); DECLARE("PP_INTS_ENABLED", offsetof(struct per_proc_info *, interrupts_enabled)); DECLARE("UAW", offsetof(struct per_proc_info *, Uassist)); - DECLARE("VMMareaPhys", offsetof(struct per_proc_info *, VMMareaPhys)); - DECLARE("FAMintercept", offsetof(struct per_proc_info *, FAMintercept)); DECLARE("next_savearea", offsetof(struct per_proc_info *, next_savearea)); - DECLARE("PP_ACTIVE_THREAD", offsetof(struct per_proc_info *, pp_active_thread)); - DECLARE("PP_PREEMPT_CNT", offsetof(struct per_proc_info *, pp_preemption_count)); + DECLARE("PP_CPU_DATA", offsetof(struct per_proc_info *, pp_cpu_data)); DECLARE("PP_SIMPLE_LOCK_CNT", offsetof(struct per_proc_info *, pp_simple_lock_count)); DECLARE("PP_INTERRUPT_LVL", offsetof(struct per_proc_info *, pp_interrupt_level)); DECLARE("ppbbTaskEnv", offsetof(struct per_proc_info *, ppbbTaskEnv)); @@ -167,30 +169,20 @@ int main(int argc, char *argv[]) DECLARE("pfCanNapb", pfCanNapb); DECLARE("pfCanDoze", pfCanDoze); DECLARE("pfCanDozeb", pfCanDozeb); - DECLARE("pfCanDoze", pfCanDoze); - DECLARE("pfCanDozeb", pfCanDozeb); DECLARE("pfThermal", pfThermal); DECLARE("pfThermalb", pfThermalb); DECLARE("pfThermInt", pfThermInt); DECLARE("pfThermIntb", pfThermIntb); - DECLARE("pfWillNap", pfWillNap); - DECLARE("pfWillNapb", pfWillNapb); - DECLARE("pfNoMSRir", pfNoMSRir); - DECLARE("pfNoMSRirb", pfNoMSRirb); - DECLARE("pfNoL2PFNap", pfNoL2PFNap); - DECLARE("pfNoL2PFNapb", pfNoL2PFNapb); DECLARE("pfSlowNap", pfSlowNap); DECLARE("pfSlowNapb", pfSlowNapb); DECLARE("pfNoMuMMCK", pfNoMuMMCK); DECLARE("pfNoMuMMCKb", pfNoMuMMCKb); - DECLARE("pfLClck", pfLClck); - DECLARE("pfLClckb", pfLClckb); - DECLARE("pfL3pdet", pfL3pdet); - DECLARE("pfL3pdetb", pfL3pdetb); - DECLARE("pfL1i", pfL1i); - DECLARE("pfL1ib", pfL1ib); - DECLARE("pfL1d", pfL1d); - DECLARE("pfL1db", pfL1db); + DECLARE("pfNoL2PFNap", pfNoL2PFNap); + DECLARE("pfNoL2PFNapb", pfNoL2PFNapb); + DECLARE("pfSCOMFixUp", pfSCOMFixUp); + DECLARE("pfSCOMFixUpb", pfSCOMFixUpb); + DECLARE("pfHasDcba", pfHasDcba); + DECLARE("pfHasDcbab", pfHasDcbab); DECLARE("pfL1fa", pfL1fa); DECLARE("pfL1fab", pfL1fab); DECLARE("pfL2", pfL2); @@ -199,6 +191,20 @@ int main(int argc, char *argv[]) DECLARE("pfL2fab", pfL2fab); DECLARE("pfL2i", pfL2i); DECLARE("pfL2ib", pfL2ib); + DECLARE("pfLClck", pfLClck); + DECLARE("pfLClckb", pfLClckb); + DECLARE("pfWillNap", pfWillNap); + DECLARE("pfWillNapb", pfWillNapb); + DECLARE("pfNoMSRir", pfNoMSRir); + DECLARE("pfNoMSRirb", pfNoMSRirb); + DECLARE("pfL3pdet", pfL3pdet); + DECLARE("pfL3pdetb", pfL3pdetb); + DECLARE("pf128Byte", pf128Byte); + DECLARE("pf128Byteb", pf128Byteb); + DECLARE("pf32Byte", pf32Byte); + DECLARE("pf32Byteb", pf32Byteb); + DECLARE("pf64Bit", pf64Bit); + DECLARE("pf64Bitb", pf64Bitb); DECLARE("pfL3", pfL3); DECLARE("pfL3b", pfL3b); DECLARE("pfL3fa", pfL3fa); @@ -217,6 +223,8 @@ int main(int argc, char *argv[]) DECLARE("pfHID1", offsetof(struct per_proc_info *, pf.pfHID1)); DECLARE("pfHID2", offsetof(struct per_proc_info *, pf.pfHID2)); DECLARE("pfHID3", offsetof(struct per_proc_info *, pf.pfHID3)); + DECLARE("pfHID4", offsetof(struct per_proc_info *, pf.pfHID4)); + DECLARE("pfHID5", offsetof(struct per_proc_info *, pf.pfHID5)); DECLARE("pfMSSCR0", offsetof(struct per_proc_info *, pf.pfMSSCR0)); DECLARE("pfMSSCR1", offsetof(struct per_proc_info *, pf.pfMSSCR1)); DECLARE("pfICTRL", offsetof(struct per_proc_info *, pf.pfICTRL)); @@ -225,6 +233,9 @@ int main(int argc, char *argv[]) DECLARE("pfl2crOriginal", offsetof(struct per_proc_info *, pf.l2crOriginal)); DECLARE("pfl3crOriginal", offsetof(struct per_proc_info *, pf.l3crOriginal)); DECLARE("pfBootConfig", offsetof(struct per_proc_info *, pf.pfBootConfig)); + DECLARE("pfPTEG", offsetof(struct per_proc_info *, pf.pfPTEG)); + DECLARE("pfMaxVAddr", offsetof(struct per_proc_info *, pf.pfMaxVAddr)); + DECLARE("pfMaxPAddr", offsetof(struct per_proc_info *, pf.pfMaxPAddr)); DECLARE("pfSize", sizeof(procFeatures)); DECLARE("thrmmaxTemp", offsetof(struct per_proc_info *, thrm.maxTemp)); @@ -234,8 +245,52 @@ int main(int argc, char *argv[]) DECLARE("thrm3val", offsetof(struct per_proc_info *, thrm.thrm3val)); DECLARE("thrmSize", sizeof(thrmControl)); - DECLARE("napStamp", offsetof(struct per_proc_info *, napStamp)); - DECLARE("napTotal", offsetof(struct per_proc_info *, napTotal)); + DECLARE("validSegs", offsetof(struct per_proc_info *, validSegs)); + DECLARE("ppUserPmapVirt", offsetof(struct per_proc_info *, ppUserPmapVirt)); + DECLARE("ppUserPmap", offsetof(struct per_proc_info *, ppUserPmap)); + DECLARE("ppMapFlags", offsetof(struct per_proc_info *, ppMapFlags)); + DECLARE("ppInvSeg", offsetof(struct per_proc_info *, ppInvSeg)); + DECLARE("ppCurSeg", offsetof(struct per_proc_info *, ppCurSeg)); + DECLARE("ppSegSteal", offsetof(struct per_proc_info *, ppSegSteal)); + + DECLARE("VMMareaPhys", offsetof(struct per_proc_info *, VMMareaPhys)); + DECLARE("VMMXAFlgs", offsetof(struct per_proc_info *, VMMXAFlgs)); + DECLARE("FAMintercept", offsetof(struct per_proc_info *, FAMintercept)); + + DECLARE("ppCIOmp", offsetof(struct per_proc_info *, ppCIOmp)); + + DECLARE("tempr0", offsetof(struct per_proc_info *, tempr0)); + DECLARE("tempr1", offsetof(struct per_proc_info *, tempr1)); + DECLARE("tempr2", offsetof(struct per_proc_info *, tempr2)); + DECLARE("tempr3", offsetof(struct per_proc_info *, tempr3)); + DECLARE("tempr4", offsetof(struct per_proc_info *, tempr4)); + DECLARE("tempr5", offsetof(struct per_proc_info *, tempr5)); + DECLARE("tempr6", offsetof(struct per_proc_info *, tempr6)); + DECLARE("tempr7", offsetof(struct per_proc_info *, tempr7)); + DECLARE("tempr8", offsetof(struct per_proc_info *, tempr8)); + DECLARE("tempr9", offsetof(struct per_proc_info *, tempr9)); + DECLARE("tempr10", offsetof(struct per_proc_info *, tempr10)); + DECLARE("tempr11", offsetof(struct per_proc_info *, tempr11)); + DECLARE("tempr12", offsetof(struct per_proc_info *, tempr12)); + DECLARE("tempr13", offsetof(struct per_proc_info *, tempr13)); + DECLARE("tempr14", offsetof(struct per_proc_info *, tempr14)); + DECLARE("tempr15", offsetof(struct per_proc_info *, tempr15)); + DECLARE("tempr16", offsetof(struct per_proc_info *, tempr16)); + DECLARE("tempr17", offsetof(struct per_proc_info *, tempr17)); + DECLARE("tempr18", offsetof(struct per_proc_info *, tempr18)); + DECLARE("tempr19", offsetof(struct per_proc_info *, tempr19)); + DECLARE("tempr20", offsetof(struct per_proc_info *, tempr20)); + DECLARE("tempr21", offsetof(struct per_proc_info *, tempr21)); + DECLARE("tempr22", offsetof(struct per_proc_info *, tempr22)); + DECLARE("tempr23", offsetof(struct per_proc_info *, tempr23)); + DECLARE("tempr24", offsetof(struct per_proc_info *, tempr24)); + DECLARE("tempr25", offsetof(struct per_proc_info *, tempr25)); + DECLARE("tempr26", offsetof(struct per_proc_info *, tempr26)); + DECLARE("tempr27", offsetof(struct per_proc_info *, tempr27)); + DECLARE("tempr28", offsetof(struct per_proc_info *, tempr28)); + DECLARE("tempr29", offsetof(struct per_proc_info *, tempr29)); + DECLARE("tempr30", offsetof(struct per_proc_info *, tempr30)); + DECLARE("tempr31", offsetof(struct per_proc_info *, tempr31)); DECLARE("emfp0", offsetof(struct per_proc_info *, emfp0)); DECLARE("emfp1", offsetof(struct per_proc_info *, emfp1)); @@ -305,9 +360,92 @@ int main(int argc, char *argv[]) DECLARE("emvr30", offsetof(struct per_proc_info *, emvr30)); DECLARE("emvr31", offsetof(struct per_proc_info *, emvr31)); DECLARE("empadvr", offsetof(struct per_proc_info *, empadvr)); + DECLARE("skipListPrev", offsetof(struct per_proc_info *, skipListPrev)); DECLARE("ppSize", sizeof(struct per_proc_info)); DECLARE("patcharea", offsetof(struct per_proc_info *, patcharea)); + DECLARE("hwCounts", offsetof(struct per_proc_info *, hwCtr)); + DECLARE("hwInVains", offsetof(struct per_proc_info *, hwCtr.hwInVains)); + DECLARE("hwResets", offsetof(struct per_proc_info *, hwCtr.hwResets)); + DECLARE("hwMachineChecks", offsetof(struct per_proc_info *, hwCtr.hwMachineChecks)); + DECLARE("hwDSIs", offsetof(struct per_proc_info *, hwCtr.hwDSIs)); + DECLARE("hwISIs", offsetof(struct per_proc_info *, hwCtr.hwISIs)); + DECLARE("hwExternals", offsetof(struct per_proc_info *, hwCtr.hwExternals)); + DECLARE("hwAlignments", offsetof(struct per_proc_info *, hwCtr.hwAlignments)); + DECLARE("hwPrograms", offsetof(struct per_proc_info *, hwCtr.hwPrograms)); + DECLARE("hwFloatPointUnavailable", offsetof(struct per_proc_info *, hwCtr.hwFloatPointUnavailable)); + DECLARE("hwDecrementers", offsetof(struct per_proc_info *, hwCtr.hwDecrementers)); + DECLARE("hwIOErrors", offsetof(struct per_proc_info *, hwCtr.hwIOErrors)); + DECLARE("hwrsvd0", offsetof(struct per_proc_info *, hwCtr.hwrsvd0)); + DECLARE("hwSystemCalls", offsetof(struct per_proc_info *, hwCtr.hwSystemCalls)); + DECLARE("hwTraces", offsetof(struct per_proc_info *, hwCtr.hwTraces)); + DECLARE("hwFloatingPointAssists", offsetof(struct per_proc_info *, hwCtr.hwFloatingPointAssists)); + DECLARE("hwPerformanceMonitors", offsetof(struct per_proc_info *, hwCtr.hwPerformanceMonitors)); + DECLARE("hwAltivecs", offsetof(struct per_proc_info *, hwCtr.hwAltivecs)); + DECLARE("hwrsvd1", offsetof(struct per_proc_info *, hwCtr.hwrsvd1)); + DECLARE("hwrsvd2", offsetof(struct per_proc_info *, hwCtr.hwrsvd2)); + DECLARE("hwrsvd3", offsetof(struct per_proc_info *, hwCtr.hwrsvd3)); + DECLARE("hwInstBreakpoints", offsetof(struct per_proc_info *, hwCtr.hwInstBreakpoints)); + DECLARE("hwSystemManagements", offsetof(struct per_proc_info *, hwCtr.hwSystemManagements)); + DECLARE("hwAltivecAssists", offsetof(struct per_proc_info *, hwCtr.hwAltivecAssists)); + DECLARE("hwThermal", offsetof(struct per_proc_info *, hwCtr.hwThermal)); + DECLARE("hwrsvd5", offsetof(struct per_proc_info *, hwCtr.hwrsvd5)); + DECLARE("hwrsvd6", offsetof(struct per_proc_info *, hwCtr.hwrsvd6)); + DECLARE("hwrsvd7", offsetof(struct per_proc_info *, hwCtr.hwrsvd7)); + DECLARE("hwrsvd8", offsetof(struct per_proc_info *, hwCtr.hwrsvd8)); + DECLARE("hwrsvd9", offsetof(struct per_proc_info *, hwCtr.hwrsvd9)); + DECLARE("hwrsvd10", offsetof(struct per_proc_info *, hwCtr.hwrsvd10)); + DECLARE("hwrsvd11", offsetof(struct per_proc_info *, hwCtr.hwrsvd11)); + DECLARE("hwrsvd12", offsetof(struct per_proc_info *, hwCtr.hwrsvd12)); + DECLARE("hwrsvd13", offsetof(struct per_proc_info *, hwCtr.hwrsvd13)); + DECLARE("hwTrace601", offsetof(struct per_proc_info *, hwCtr.hwTrace601)); + DECLARE("hwSIGPs", offsetof(struct per_proc_info *, hwCtr.hwSIGPs)); + DECLARE("hwPreemptions", offsetof(struct per_proc_info *, hwCtr.hwPreemptions)); + DECLARE("hwContextSwitchs", offsetof(struct per_proc_info *, hwCtr.hwContextSwitchs)); + DECLARE("hwShutdowns", offsetof(struct per_proc_info *, hwCtr.hwShutdowns)); + DECLARE("hwChokes", offsetof(struct per_proc_info *, hwCtr.hwChokes)); + DECLARE("hwDataSegments", offsetof(struct per_proc_info *, hwCtr.hwDataSegments)); + DECLARE("hwInstructionSegments", offsetof(struct per_proc_info *, hwCtr.hwInstructionSegments)); + DECLARE("hwSoftPatches", offsetof(struct per_proc_info *, hwCtr.hwSoftPatches)); + DECLARE("hwMaintenances", offsetof(struct per_proc_info *, hwCtr.hwMaintenances)); + DECLARE("hwInstrumentations", offsetof(struct per_proc_info *, hwCtr.hwInstrumentations)); + DECLARE("hwRedrives", offsetof(struct per_proc_info *, hwCtr.hwRedrives)); + DECLARE("hwSteals", offsetof(struct per_proc_info *, hwCtr.hwSteals)); + + DECLARE("hwMckHang", offsetof(struct per_proc_info *, hwCtr.hwMckHang)); + DECLARE("hwMckSLBPE", offsetof(struct per_proc_info *, hwCtr.hwMckSLBPE)); + DECLARE("hwMckTLBPE", offsetof(struct per_proc_info *, hwCtr.hwMckTLBPE)); + DECLARE("hwMckERCPE", offsetof(struct per_proc_info *, hwCtr.hwMckERCPE)); + DECLARE("hwMckL1DPE", offsetof(struct per_proc_info *, hwCtr.hwMckL1DPE)); + DECLARE("hwMckL1TPE", offsetof(struct per_proc_info *, hwCtr.hwMckL1TPE)); + DECLARE("hwMckUE", offsetof(struct per_proc_info *, hwCtr.hwMckUE)); + DECLARE("hwMckIUE", offsetof(struct per_proc_info *, hwCtr.hwMckIUE)); + DECLARE("hwMckIUEr", offsetof(struct per_proc_info *, hwCtr.hwMckIUEr)); + DECLARE("hwMckDUE", offsetof(struct per_proc_info *, hwCtr.hwMckDUE)); + DECLARE("hwMckDTW", offsetof(struct per_proc_info *, hwCtr.hwMckDTW)); + DECLARE("hwMckUnk", offsetof(struct per_proc_info *, hwCtr.hwMckUnk)); + DECLARE("hwMckExt", offsetof(struct per_proc_info *, hwCtr.hwMckExt)); + DECLARE("hwMckICachePE", offsetof(struct per_proc_info *, hwCtr.hwMckICachePE)); + DECLARE("hwMckITagPE", offsetof(struct per_proc_info *, hwCtr.hwMckITagPE)); + DECLARE("hwMckIEratPE", offsetof(struct per_proc_info *, hwCtr.hwMckIEratPE)); + DECLARE("hwMckDEratPE", offsetof(struct per_proc_info *, hwCtr.hwMckDEratPE)); + + DECLARE("napStamp", offsetof(struct per_proc_info *, hwCtr.napStamp)); + DECLARE("napTotal", offsetof(struct per_proc_info *, hwCtr.napTotal)); + + DECLARE("patchAddr", offsetof(struct patch_entry *, addr)); + DECLARE("patchData", offsetof(struct patch_entry *, data)); + DECLARE("patchType", offsetof(struct patch_entry *, type)); + DECLARE("patchValue", offsetof(struct patch_entry *, value)); + DECLARE("peSize", sizeof(patch_entry_t)); + DECLARE("PATCH_PROCESSOR", PATCH_PROCESSOR); + DECLARE("PATCH_FEATURE", PATCH_FEATURE); + DECLARE("PATCH_TABLE_SIZE", PATCH_TABLE_SIZE); + DECLARE("PatchExt32", PatchExt32); + DECLARE("PatchExt32b", PatchExt32b); + DECLARE("PatchLwsync", PatchLwsync); + DECLARE("PatchLwsyncb", PatchLwsyncb); + DECLARE("RESETHANDLER_TYPE", offsetof(struct resethandler *, type)); DECLARE("RESETHANDLER_CALL", offsetof(struct resethandler *, call_paddr)); DECLARE("RESETHANDLER_ARG", offsetof(struct resethandler *, arg__paddr)); @@ -318,47 +456,44 @@ int main(int argc, char *argv[]) #define IKSBASE (u_int)STACK_IKS(0) /* values from kern/thread.h */ - DECLARE("THREAD_TOP_ACT", offsetof(struct thread_shuttle *, top_act)); - DECLARE("THREAD_KERNEL_STACK", offsetof(struct thread_shuttle *, kernel_stack)); - DECLARE("THREAD_CONTINUATION", offsetof(struct thread_shuttle *, continuation)); - DECLARE("THREAD_RECOVER", offsetof(struct thread_shuttle *, recover)); + DECLARE("THREAD_TOP_ACT", offsetof(thread_t, top_act)); + DECLARE("THREAD_KERNEL_STACK", offsetof(thread_act_t, kernel_stack)); + DECLARE("THREAD_RECOVER", offsetof(thread_act_t, recover)); DECLARE("THREAD_FUNNEL_LOCK", - offsetof(struct thread_shuttle *, funnel_lock)); + offsetof(thread_act_t, funnel_lock)); DECLARE("THREAD_FUNNEL_STATE", - offsetof(struct thread_shuttle *, funnel_state)); + offsetof(thread_act_t, funnel_state)); DECLARE("LOCK_FNL_MUTEX", offsetof(struct funnel_lock *, fnl_mutex)); #if MACH_LDEBUG - DECLARE("THREAD_MUTEX_COUNT", offsetof(struct thread_shuttle *, mutex_count)); + DECLARE("THREAD_MUTEX_COUNT", offsetof(thread_t, mutex_count)); #endif /* MACH_LDEBUG */ - DECLARE("THREAD_PSET", offsetof(struct thread_shuttle *, processor_set)); - DECLARE("THREAD_LINKS", offsetof(struct thread_shuttle *, links)); - DECLARE("THREAD_PSTHRN", offsetof(struct thread_shuttle *, pset_threads.next)); /* values from kern/thread_act.h */ - DECLARE("ACT_TASK", offsetof(struct thread_activation *, task)); - DECLARE("ACT_THREAD", offsetof(struct thread_activation *, thread)); - DECLARE("ACT_LOWER", offsetof(struct thread_activation *, lower)); - DECLARE("ACT_MACT_PCB", offsetof(struct thread_activation *, mact.pcb)); - DECLARE("ACT_AST", offsetof(struct thread_activation *, ast)); - DECLARE("ACT_VMMAP", offsetof(struct thread_activation *, map)); - DECLARE("ACT_KLOADED", offsetof(struct thread_activation *, kernel_loaded)); - DECLARE("ACT_KLOADING", offsetof(struct thread_activation *, kernel_loading)); - DECLARE("vmmCEntry", offsetof(struct thread_activation *, mact.vmmCEntry)); - DECLARE("vmmControl", offsetof(struct thread_activation *, mact.vmmControl)); - DECLARE("curctx", offsetof(struct thread_activation *, mact.curctx)); - DECLARE("deferctx", offsetof(struct thread_activation *, mact.deferctx)); - DECLARE("facctx", offsetof(struct thread_activation *, mact.facctx)); + DECLARE("ACT_TASK", offsetof(thread_act_t, task)); + DECLARE("ACT_THREAD", offsetof(thread_act_t, thread)); + DECLARE("ACT_LOWER", offsetof(thread_act_t, lower)); + DECLARE("ACT_MACT_PCB", offsetof(thread_act_t, mact.pcb)); + DECLARE("ACT_MACT_UPCB", offsetof(thread_act_t, mact.upcb)); + DECLARE("ACT_AST", offsetof(thread_act_t, ast)); + DECLARE("ACT_VMMAP", offsetof(thread_act_t, map)); + DECLARE("vmmCEntry", offsetof(thread_act_t, mact.vmmCEntry)); + DECLARE("vmmControl", offsetof(thread_act_t, mact.vmmControl)); + DECLARE("curctx", offsetof(thread_act_t, mact.curctx)); + DECLARE("deferctx", offsetof(thread_act_t, mact.deferctx)); + DECLARE("facctx", offsetof(thread_act_t, mact.facctx)); #ifdef MACH_BSD - DECLARE("CTHREAD_SELF", offsetof(struct thread_activation *, mact.cthread_self)); + DECLARE("CTHREAD_SELF", offsetof(thread_act_t, mact.cthread_self)); #endif DECLARE("FPUsave", offsetof(struct facility_context *,FPUsave)); DECLARE("FPUlevel", offsetof(struct facility_context *,FPUlevel)); DECLARE("FPUcpu", offsetof(struct facility_context *,FPUcpu)); + DECLARE("FPUsync", offsetof(struct facility_context *,FPUsync)); DECLARE("VMXsave", offsetof(struct facility_context *,VMXsave)); DECLARE("VMXlevel", offsetof(struct facility_context *,VMXlevel)); DECLARE("VMXcpu", offsetof(struct facility_context *,VMXcpu)); + DECLARE("VMXsync", offsetof(struct facility_context *,VMXsync)); DECLARE("facAct", offsetof(struct facility_context *,facAct)); /* Values from vmachmon.h */ @@ -376,9 +511,21 @@ int main(int argc, char *argv[]) DECLARE("kVmmGetFloatState", kVmmGetFloatState); DECLARE("kVmmGetVectorState", kVmmGetVectorState); DECLARE("kVmmSetTimer", kVmmSetTimer); + DECLARE("kVmmGetTimer", kVmmGetTimer); DECLARE("kVmmExecuteVM", kVmmExecuteVM); DECLARE("kVmmProtectPage", kVmmProtectPage); - + DECLARE("kVmmMapList", kVmmMapList); + DECLARE("kVmmUnmapList", kVmmUnmapList); + DECLARE("kVmmSetXA", kVmmSetXA); + DECLARE("kVmmGetXA", kVmmGetXA); + DECLARE("kVmmMapPage64", kVmmMapPage64); + DECLARE("kVmmGetPageMapping64", kVmmGetPageMapping64); + DECLARE("kVmmUnmapPage64", kVmmUnmapPage64); + DECLARE("kVmmGetPageDirtyFlag64", kVmmGetPageDirtyFlag64); + DECLARE("kVmmMapExecute64", kVmmMapExecute64); + DECLARE("kVmmProtectExecute64", kVmmProtectExecute64); + DECLARE("kVmmMapList64", kVmmMapList64); + DECLARE("kVmmUnmapList64", kVmmUnmapList64); DECLARE("kvmmExitToHost", kvmmExitToHost); DECLARE("kvmmResumeGuest", kvmmResumeGuest); DECLARE("kvmmGetGuestRegister", kvmmGetGuestRegister); @@ -393,6 +540,7 @@ int main(int argc, char *argv[]) DECLARE("kVmmReturnProgramException", kVmmReturnProgramException); DECLARE("kVmmReturnSystemCall", kVmmReturnSystemCall); DECLARE("kVmmReturnTraceException", kVmmReturnTraceException); + DECLARE("kVmmInvalidAdSpace", kVmmInvalidAdSpace); DECLARE("kVmmProtXtnd", kVmmProtXtnd); DECLARE("kVmmProtNARW", kVmmProtNARW); @@ -401,97 +549,167 @@ int main(int argc, char *argv[]) DECLARE("kVmmProtRORO", kVmmProtRORO); DECLARE("vmmFlags", offsetof(struct vmmCntrlEntry *, vmmFlags)); + DECLARE("vmmXAFlgs", offsetof(struct vmmCntrlEntry *, vmmXAFlgs)); + DECLARE("vmmPmap", offsetof(struct vmmCntrlEntry *, vmmPmap)); DECLARE("vmmInUseb", vmmInUseb); DECLARE("vmmInUse", vmmInUse); - DECLARE("vmmPmap", offsetof(struct vmmCntrlEntry *, vmmPmap)); DECLARE("vmmContextKern", offsetof(struct vmmCntrlEntry *, vmmContextKern)); DECLARE("vmmContextPhys", offsetof(struct vmmCntrlEntry *, vmmContextPhys)); DECLARE("vmmContextUser", offsetof(struct vmmCntrlEntry *, vmmContextUser)); DECLARE("vmmFacCtx", offsetof(struct vmmCntrlEntry *, vmmFacCtx)); - DECLARE("vmmLastMap", offsetof(struct vmmCntrlEntry *, vmmLastMap)); + DECLARE("vmmLastMap", offsetof(struct vmmCntrlTable *, vmmLastMap)); + DECLARE("vmmGFlags", offsetof(struct vmmCntrlTable *, vmmGFlags)); + DECLARE("vmmc", offsetof(struct vmmCntrlTable *, vmmc)); + DECLARE("vmmAdsp", offsetof(struct vmmCntrlTable *, vmmAdsp)); + DECLARE("vmmLastAdSp", vmmLastAdSp); DECLARE("vmmFAMintercept", offsetof(struct vmmCntrlEntry *, vmmFAMintercept)); DECLARE("vmmCEntrySize", sizeof(struct vmmCntrlEntry)); - DECLARE("kVmmMaxContextsPerThread", kVmmMaxContextsPerThread); + DECLARE("kVmmMaxContexts", kVmmMaxContexts); DECLARE("interface_version", offsetof(struct vmm_state_page_t *, interface_version)); DECLARE("thread_index", offsetof(struct vmm_state_page_t *, thread_index)); DECLARE("vmmStat", offsetof(struct vmm_state_page_t *, vmmStat)); DECLARE("vmmCntrl", offsetof(struct vmm_state_page_t *, vmmCntrl)); + DECLARE("vmm_proc_state", offsetof(struct vmm_state_page_t *, vmm_proc_state)); + DECLARE("return_code", offsetof(struct vmm_state_page_t *, return_code)); + + DECLARE("return_params", offsetof(struct vmm_state_page_t *, vmmRet.vmmrp32.return_params)); + DECLARE("return_paramsX", offsetof(struct vmm_state_page_t *, vmmRet.vmmrp64.return_params)); + +#if 0 DECLARE("return_params", offsetof(struct vmm_state_page_t *, return_params)); DECLARE("vmm_proc_state", offsetof(struct vmm_state_page_t *, vmm_proc_state)); +#endif DECLARE("vmmppcVRs", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcVRs)); DECLARE("vmmppcVSCR", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcVSCR)); - DECLARE("vmmppcVSCRshadow", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcVSCRshadow)); DECLARE("vmmppcFPRs", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcFPRs)); DECLARE("vmmppcFPSCR", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcFPSCR)); - DECLARE("vmmppcFPSCRshadow", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcFPSCRshadow)); - - DECLARE("vmmppcpc", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcPC)); - DECLARE("vmmppcmsr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcMSR)); - DECLARE("vmmppcr0", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x00)); - DECLARE("vmmppcr1", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x04)); - DECLARE("vmmppcr2", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x08)); - DECLARE("vmmppcr3", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x0C)); - DECLARE("vmmppcr4", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x10)); - DECLARE("vmmppcr5", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x14)); - - DECLARE("vmmppcr6", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x18)); - DECLARE("vmmppcr7", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x1C)); - DECLARE("vmmppcr8", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x20)); - DECLARE("vmmppcr9", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x24)); - DECLARE("vmmppcr10", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x28)); - DECLARE("vmmppcr11", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x2C)); - DECLARE("vmmppcr12", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x30)); - DECLARE("vmmppcr13", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x34)); - - DECLARE("vmmppcr14", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x38)); - DECLARE("vmmppcr15", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x3C)); - DECLARE("vmmppcr16", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x40)); - DECLARE("vmmppcr17", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x44)); - DECLARE("vmmppcr18", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x48)); - DECLARE("vmmppcr19", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x4C)); - DECLARE("vmmppcr20", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x50)); - DECLARE("vmmppcr21", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x54)); - - DECLARE("vmmppcr22", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x58)); - DECLARE("vmmppcr23", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x5C)); - DECLARE("vmmppcr24", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x60)); - DECLARE("vmmppcr25", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x64)); - DECLARE("vmmppcr26", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x68)); - DECLARE("vmmppcr27", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x6C)); - DECLARE("vmmppcr28", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x70)); - DECLARE("vmmppcr29", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x74)); - - DECLARE("vmmppcr30", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x78)); - DECLARE("vmmppcr31", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcGPRs+0x7C)); - DECLARE("vmmppccr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcCR)); - DECLARE("vmmppcxer", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcXER)); - DECLARE("vmmppclr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcLR)); - DECLARE("vmmppcctr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcCTR)); - DECLARE("vmmppcmq", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcMQ)); - DECLARE("vmmppcvrsave", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcVRSave)); + + DECLARE("vmmppcpc", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcPC)); + DECLARE("vmmppcmsr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcMSR)); + DECLARE("vmmppcr0", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x00)); + DECLARE("vmmppcr1", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x04)); + DECLARE("vmmppcr2", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x08)); + DECLARE("vmmppcr3", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x0C)); + DECLARE("vmmppcr4", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x10)); + DECLARE("vmmppcr5", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x14)); + + DECLARE("vmmppcr6", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x18)); + DECLARE("vmmppcr7", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x1C)); + DECLARE("vmmppcr8", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x20)); + DECLARE("vmmppcr9", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x24)); + DECLARE("vmmppcr10", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x28)); + DECLARE("vmmppcr11", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x2C)); + DECLARE("vmmppcr12", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x30)); + DECLARE("vmmppcr13", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x34)); + + DECLARE("vmmppcr14", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x38)); + DECLARE("vmmppcr15", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x3C)); + DECLARE("vmmppcr16", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x40)); + DECLARE("vmmppcr17", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x44)); + DECLARE("vmmppcr18", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x48)); + DECLARE("vmmppcr19", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x4C)); + DECLARE("vmmppcr20", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x50)); + DECLARE("vmmppcr21", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x54)); + + DECLARE("vmmppcr22", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x58)); + DECLARE("vmmppcr23", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x5C)); + DECLARE("vmmppcr24", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x60)); + DECLARE("vmmppcr25", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x64)); + DECLARE("vmmppcr26", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x68)); + DECLARE("vmmppcr27", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x6C)); + DECLARE("vmmppcr28", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x70)); + DECLARE("vmmppcr29", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x74)); + + DECLARE("vmmppcr30", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x78)); + DECLARE("vmmppcr31", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcGPRs+0x7C)); + DECLARE("vmmppccr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcCR)); + DECLARE("vmmppcxer", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcXER)); + DECLARE("vmmppclr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcLR)); + DECLARE("vmmppcctr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcCTR)); + DECLARE("vmmppcmq", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcMQ)); + DECLARE("vmmppcvrsave", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs32.ppcVRSave)); + + DECLARE("vmmppcXpc", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcPC)); + DECLARE("vmmppcXmsr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcMSR)); + DECLARE("vmmppcXr0", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x00)); + DECLARE("vmmppcXr1", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x08)); + DECLARE("vmmppcXr2", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x10)); + DECLARE("vmmppcXr3", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x18)); + DECLARE("vmmppcXr4", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x20)); + DECLARE("vmmppcXr5", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x28)); + + DECLARE("vmmppcXr6", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x30)); + DECLARE("vmmppcXr7", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x38)); + DECLARE("vmmppcXr8", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x40)); + DECLARE("vmmppcXr9", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x48)); + DECLARE("vmmppcXr10", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x50)); + DECLARE("vmmppcXr11", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x58)); + DECLARE("vmmppcXr12", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x60)); + DECLARE("vmmppcXr13", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x68)); + + DECLARE("vmmppcXr14", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x70)); + DECLARE("vmmppcXr15", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x78)); + DECLARE("vmmppcXr16", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x80)); + DECLARE("vmmppcXr17", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x88)); + DECLARE("vmmppcXr18", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x90)); + DECLARE("vmmppcXr19", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0x98)); + DECLARE("vmmppcXr20", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xA0)); + DECLARE("vmmppcXr21", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xA8)); + + DECLARE("vmmppcXr22", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xB0)); + DECLARE("vmmppcXr23", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xB8)); + DECLARE("vmmppcXr24", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xC0)); + DECLARE("vmmppcXr25", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xC8)); + DECLARE("vmmppcXr26", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xD0)); + DECLARE("vmmppcXr27", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xD8)); + DECLARE("vmmppcXr28", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xE0)); + DECLARE("vmmppcXr29", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xE8)); + + DECLARE("vmmppcXr30", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xF0)); + DECLARE("vmmppcXr31", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcGPRs+0xF8)); + DECLARE("vmmppcXcr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcCR)); + DECLARE("vmmppcXxer", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcXER)); + DECLARE("vmmppcXlr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcLR)); + DECLARE("vmmppcXctr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcCTR)); + DECLARE("vmmppcXvrsave", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcRegs.ppcRegs64.ppcVRSave)); DECLARE("vmmppcvscr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcVSCR+0x00)); DECLARE("vmmppcfpscrpad", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcFPSCR)); DECLARE("vmmppcfpscr", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcFPSCR+4)); - DECLARE("famguestr0", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_register)); - DECLARE("famguestr1", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_register+0x4)); - DECLARE("famguestr2", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_register+0x8)); - DECLARE("famguestr3", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_register+0xC)); - DECLARE("famguestr4", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_register+0x10)); - DECLARE("famguestr5", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_register+0x14)); - DECLARE("famguestr6", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_register+0x18)); - DECLARE("famguestr7", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_register+0x1C)); - DECLARE("famguestpc", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_pc)); - DECLARE("famguestmsr", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.guest_msr)); - - DECLARE("famdispcode", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.fastassist_dispatch_code)); - DECLARE("famrefcon", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.fastassist_refcon)); - DECLARE("famparam", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.fastassist_parameter)); - DECLARE("famhandler", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.fastassist_dispatch)); - DECLARE("famintercepts", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.fastassist_intercepts)); + DECLARE("famguestr0", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_register)); + DECLARE("famguestr1", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_register+0x4)); + DECLARE("famguestr2", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_register+0x8)); + DECLARE("famguestr3", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_register+0xC)); + DECLARE("famguestr4", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_register+0x10)); + DECLARE("famguestr5", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_register+0x14)); + DECLARE("famguestr6", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_register+0x18)); + DECLARE("famguestr7", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_register+0x1C)); + DECLARE("famguestpc", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_pc)); + DECLARE("famguestmsr", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.guest_msr)); + DECLARE("famdispcode", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.fastassist_dispatch_code)); + DECLARE("famrefcon", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.fastassist_refcon)); + DECLARE("famparam", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.fastassist_parameter)); + DECLARE("famhandler", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.fastassist_dispatch)); + DECLARE("famintercepts", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs32.fastassist_intercepts)); + + DECLARE("famguestXr0", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_register)); + DECLARE("famguestXr1", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_register+0x8)); + DECLARE("famguestXr2", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_register+0x10)); + DECLARE("famguestXr3", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_register+0x18)); + DECLARE("famguestXr4", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_register+0x20)); + DECLARE("famguestXr5", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_register+0x28)); + DECLARE("famguestXr6", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_register+0x30)); + DECLARE("famguestXr7", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_register+0x38)); + DECLARE("famguestXpc", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_pc)); + DECLARE("famguestXmsr", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.guest_msr)); + DECLARE("famdispcodeX", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.fastassist_dispatch_code)); + DECLARE("famrefconX", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.fastassist_refcon)); + DECLARE("famparamX", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.fastassist_parameter)); + DECLARE("famhandlerX", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.fastassist_dispatch)); + DECLARE("faminterceptsX", offsetof(struct vmm_state_page_t *, vmm_fastassist_state.vmmfs64.fastassist_intercepts)); DECLARE("vmmFloatCngd", vmmFloatCngd); DECLARE("vmmFloatCngdb", vmmFloatCngdb); @@ -499,8 +717,6 @@ int main(int argc, char *argv[]) DECLARE("vmmVectCngdb", vmmVectCngdb); DECLARE("vmmTimerPop", vmmTimerPop); DECLARE("vmmTimerPopb", vmmTimerPopb); - DECLARE("vmmMapDone", vmmMapDone); - DECLARE("vmmMapDoneb", vmmMapDoneb); DECLARE("vmmFAMmode", vmmFAMmode); DECLARE("vmmFAMmodeb", vmmFAMmodeb); DECLARE("vmmSpfSave", vmmSpfSave); @@ -515,14 +731,15 @@ int main(int argc, char *argv[]) DECLARE("vmmVectVAssb", vmmVectVAssb); DECLARE("vmmXStart", vmmXStart); DECLARE("vmmXStartb", vmmXStartb); - DECLARE("vmmXStop", vmmXStop); + DECLARE("vmmXStop", vmmXStop); DECLARE("vmmXStopb", vmmXStopb); - DECLARE("vmmKey", vmmKey); - DECLARE("vmmKeyb", vmmKeyb); + DECLARE("vmmKey", vmmKey); + DECLARE("vmmKeyb", vmmKeyb); DECLARE("vmmFamSet", vmmFamSet); DECLARE("vmmFamSetb", vmmFamSetb); DECLARE("vmmFamEna", vmmFamEna); DECLARE("vmmFamEnab", vmmFamEnab); + DECLARE("vmm64Bit", vmm64Bit); /* values from kern/task.h */ DECLARE("TASK_SYSCALLS_MACH", @@ -534,13 +751,50 @@ int main(int argc, char *argv[]) DECLARE("VMMAP_PMAP", offsetof(struct vm_map *, pmap)); /* values from machine/pmap.h */ - DECLARE("PMAP_SPACE", offsetof(struct pmap *, space)); - DECLARE("PMAP_BMAPS", offsetof(struct pmap *, bmaps)); - DECLARE("PMAP_PMAPVR", offsetof(struct pmap *, pmapvr)); - DECLARE("PMAP_VFLAGS", offsetof(struct pmap *, vflags)); - DECLARE("PMAP_USAGE", offsetof(struct pmap *, pmapUsage)); - DECLARE("PMAP_SEGS", offsetof(struct pmap *, pmapSegs)); - DECLARE("PMAP_SIZE", pmapSize); + DECLARE("pmapSpace", offsetof(struct pmap *, space)); + DECLARE("spaceNum", offsetof(struct pmap *, spaceNum)); + DECLARE("pmapSXlk", offsetof(struct pmap *, pmapSXlk)); + DECLARE("pmapCCtl", offsetof(struct pmap *, pmapCCtl)); + DECLARE("pmapCCtlVal", pmapCCtlVal); + DECLARE("pmapCCtlLck", pmapCCtlLck); + DECLARE("pmapCCtlLckb", pmapCCtlLckb); + DECLARE("pmapCCtlGen", pmapCCtlGen); + DECLARE("pmapSegCacheCnt", pmapSegCacheCnt); + DECLARE("pmapSegCacheUse", pmapSegCacheUse); + DECLARE("pmapvr", offsetof(struct pmap *, pmapvr)); + DECLARE("pmapFlags", offsetof(struct pmap *, pmapFlags)); + DECLARE("pmapKeys", pmapKeys); + DECLARE("pmapKeyDef", pmapKeyDef); + DECLARE("pmapSCSubTag", offsetof(struct pmap *, pmapSCSubTag)); + DECLARE("pmapSegCache", offsetof(struct pmap *, pmapSegCache)); + DECLARE("pmapCurLists", offsetof(struct pmap *, pmapCurLists)); + DECLARE("pmapRandNum", offsetof(struct pmap *, pmapRandNum)); + DECLARE("pmapSkipLists", offsetof(struct pmap *, pmapSkipLists)); + DECLARE("pmapSearchVisits", offsetof(struct pmap *, pmapSearchVisits)); + DECLARE("pmapSearchCnt", offsetof(struct pmap *, pmapSearchCnt)); + DECLARE("pmapSize", pmapSize); + DECLARE("kSkipListFanoutShift", kSkipListFanoutShift); + DECLARE("kSkipListMaxLists", kSkipListMaxLists); + DECLARE("invalSpace", invalSpace); + + DECLARE("sgcESID", offsetof(struct sgc *, sgcESID)); + DECLARE("sgcESmsk", sgcESmsk); + DECLARE("sgcVSID", offsetof(struct sgc *, sgcVSID)); + DECLARE("sgcVSmsk", sgcVSmsk); + DECLARE("sgcVSKeys", sgcVSKeys); + DECLARE("sgcVSKeyUsr", sgcVSKeyUsr); + DECLARE("sgcVSNoEx", sgcVSNoEx); + DECLARE("pmapPAddr", offsetof(struct pmapTransTab *, pmapPAddr)); + DECLARE("pmapVAddr", offsetof(struct pmapTransTab *, pmapVAddr)); + DECLARE("pmapTransSize", sizeof(pmapTransTab)); + DECLARE("pmapResidentCnt", offsetof(struct pmap *, stats.resident_count)); + + DECLARE("maxAdrSp", maxAdrSp); + DECLARE("maxAdrSpb", maxAdrSpb); + + /* values from kern/processor.h */ + DECLARE("psthreads", offsetof(struct processor_set *, threads)); + DECLARE("psthreadcnt", offsetof(struct processor_set *, thread_count)); /* values from kern/processor.h */ DECLARE("psthreads", offsetof(struct processor_set *, threads)); @@ -558,16 +812,9 @@ int main(int argc, char *argv[]) DECLARE("MACH_TRAP_FUNCTION", offsetof(mach_trap_t *, mach_trap_function)); - DECLARE("HOST_SELF", offsetof(host_t, host_self)); - - DECLARE("PPCcallmax", sizeof(PPCcalls)); + DECLARE("MACH_TRAP_TABLE_COUNT", MACH_TRAP_TABLE_COUNT); - /* values from cpu_data.h */ - DECLARE("CPU_ACTIVE_THREAD", offsetof(cpu_data_t *, active_thread)); - DECLARE("CPU_PREEMPTION_LEVEL", offsetof(cpu_data_t *, preemption_level)); - DECLARE("CPU_SIMPLE_LOCK_COUNT", - offsetof(cpu_data_t *, simple_lock_count)); - DECLARE("CPU_INTERRUPT_LEVEL",offsetof(cpu_data_t *, interrupt_level)); + DECLARE("PPCcallmax", sizeof(PPCcalls)); /* Misc values used by assembler */ DECLARE("AST_ALL", AST_ALL); @@ -604,6 +851,8 @@ int main(int argc, char *argv[]) DECLARE("LTR_srr0", offsetof(struct LowTraceRecord *, LTR_srr0)); DECLARE("LTR_srr1", offsetof(struct LowTraceRecord *, LTR_srr1)); DECLARE("LTR_dar", offsetof(struct LowTraceRecord *, LTR_dar)); + DECLARE("LTR_dsisr", offsetof(struct LowTraceRecord *, LTR_dsisr)); + DECLARE("LTR_rsvd0", offsetof(struct LowTraceRecord *, LTR_rsvd0)); DECLARE("LTR_save", offsetof(struct LowTraceRecord *, LTR_save)); DECLARE("LTR_lr", offsetof(struct LowTraceRecord *, LTR_lr)); DECLARE("LTR_ctr", offsetof(struct LowTraceRecord *, LTR_ctr)); @@ -613,6 +862,7 @@ int main(int argc, char *argv[]) DECLARE("LTR_r3", offsetof(struct LowTraceRecord *, LTR_r3)); DECLARE("LTR_r4", offsetof(struct LowTraceRecord *, LTR_r4)); DECLARE("LTR_r5", offsetof(struct LowTraceRecord *, LTR_r5)); + DECLARE("LTR_r6", offsetof(struct LowTraceRecord *, LTR_r6)); DECLARE("LTR_size", sizeof(struct LowTraceRecord)); /* Values from pexpert.h */ @@ -620,58 +870,117 @@ int main(int argc, char *argv[]) DECLARE("PECFIbusrate", offsetof(struct clock_frequency_info_t *, bus_clock_rate_hz)); /* Values from pmap_internals.h and mappings.h */ - DECLARE("mmnext", offsetof(struct mapping *, next)); - DECLARE("mmhashnext", offsetof(struct mapping *, hashnext)); - DECLARE("mmPTEhash", offsetof(struct mapping *, PTEhash)); - DECLARE("mmPTEent", offsetof(struct mapping *, PTEent)); - DECLARE("mmPTEv", offsetof(struct mapping *, PTEv)); - DECLARE("mmPTEr", offsetof(struct mapping *, PTEr)); - DECLARE("mmphysent", offsetof(struct mapping *, physent)); - DECLARE("mmpmap", offsetof(struct mapping *, pmap)); - - DECLARE("bmnext", offsetof(struct blokmap *, next)); - DECLARE("bmstart", offsetof(struct blokmap *, start)); - DECLARE("bmend", offsetof(struct blokmap *, end)); - DECLARE("bmcurrent", offsetof(struct blokmap *, current)); - DECLARE("bmPTEr", offsetof(struct blokmap *, PTEr)); - DECLARE("bmspace", offsetof(struct blokmap *, space)); - DECLARE("blkFlags", offsetof(struct blokmap *, blkFlags)); - DECLARE("blkPerm", blkPerm); - DECLARE("blkRem", blkRem); - DECLARE("blkPermbit", blkPermbit); - DECLARE("blkRembit", blkRembit); - DECLARE("BLKREMMAX", BLKREMMAX); - + + DECLARE("mpFlags", offsetof(struct mapping *, mpFlags)); + DECLARE("mpBusy", mpBusy); + DECLARE("mpPIndex", mpPIndex); + DECLARE("mpSpecial", mpSpecial); + DECLARE("mpSpecialb", mpSpecialb); + DECLARE("mpFIP", mpFIP); + DECLARE("mpFIPb", mpFIPb); + DECLARE("mpRemovable", mpRemovable); + DECLARE("mpRemovableb", mpRemovableb); + DECLARE("mpNest", mpNest); + DECLARE("mpNestb", mpNestb); + DECLARE("mpPerm", mpPerm); + DECLARE("mpPermb", mpPermb); + DECLARE("mpBlock", mpBlock); + DECLARE("mpBlockb", mpBlockb); + DECLARE("mpRIP", mpRIP); + DECLARE("mpRIPb", mpRIPb); + DECLARE("mpRSVD1", mpRSVD1); + DECLARE("mpLists", mpLists); + DECLARE("mpListsb", mpListsb); + + DECLARE("mpSpace", offsetof(struct mapping *, mpSpace)); + DECLARE("mpBSize", offsetof(struct mapping *, mpBSize)); + DECLARE("mpPte", offsetof(struct mapping *, mpPte)); + DECLARE("mpHValid", mpHValid); + DECLARE("mpHValidb", mpHValidb); + + DECLARE("mpPAddr", offsetof(struct mapping *, mpPAddr)); + DECLARE("mpVAddr", offsetof(struct mapping *, mpVAddr)); + DECLARE("mpHWFlags", mpHWFlags); + DECLARE("mpPP", mpPP); + DECLARE("mpPPb", mpPPb); + DECLARE("mpKKN", mpKKN); + DECLARE("mpKKNb", mpKKNb); + DECLARE("mpWIMG", mpWIMG); + DECLARE("mpWIMGb", mpWIMGb); + DECLARE("mpW", mpW); + DECLARE("mpWb", mpWb); + DECLARE("mpI", mpI); + DECLARE("mpIb", mpIb); + DECLARE("mpM", mpM); + DECLARE("mpMb", mpMb); + DECLARE("mpG", mpG); + DECLARE("mpGb", mpGb); + DECLARE("mpWIMGe", mpWIMGe); + DECLARE("mpC", mpC); + DECLARE("mpCb", mpCb); + DECLARE("mpR", mpR); + DECLARE("mpRb", mpRb); + DECLARE("mpAlias", offsetof(struct mapping *, mpAlias)); + DECLARE("mpNestReloc", offsetof(struct mapping *, mpNestReloc)); + DECLARE("mpBlkRemCur", offsetof(struct mapping *, mpBlkRemCur)); + DECLARE("mpList0", offsetof(struct mapping *, mpList0)); + DECLARE("mpList ", offsetof(struct mapping *, mpList)); + DECLARE("mpBasicSize", mpBasicSize); + DECLARE("mpBasicLists", mpBasicLists); + DECLARE("mbvrswap", offsetof(struct mappingblok *, mapblokvrswap)); DECLARE("mbfree", offsetof(struct mappingblok *, mapblokfree)); DECLARE("mapcsize", sizeof(struct mappingctl)); - DECLARE("pephyslink", offsetof(struct phys_entry *, phys_link)); - DECLARE("pepte1", offsetof(struct phys_entry *, pte1)); + DECLARE("ppLink", offsetof(struct phys_entry *, ppLink)); + DECLARE("ppLock", ppLock); + DECLARE("ppN", ppN); + DECLARE("ppFlags", ppFlags); + DECLARE("ppI", ppI); + DECLARE("ppIb", ppIb); + DECLARE("ppG", ppG); + DECLARE("ppGb", ppGb); + DECLARE("ppR", ppR); + DECLARE("ppRb", ppRb); + DECLARE("ppC", ppC); + DECLARE("ppCb", ppCb); + DECLARE("ppPP", ppPP); + DECLARE("ppPPb", ppPPb); + DECLARE("ppPPe", ppPPe); - DECLARE("PCAlock", offsetof(struct PCA *, PCAlock)); DECLARE("PCAallo", offsetof(struct PCA *, flgs.PCAallo)); DECLARE("PCAfree", offsetof(struct PCA *, flgs.PCAalflgs.PCAfree)); DECLARE("PCAauto", offsetof(struct PCA *, flgs.PCAalflgs.PCAauto)); - DECLARE("PCAslck", offsetof(struct PCA *, flgs.PCAalflgs.PCAslck)); + DECLARE("PCAmisc", offsetof(struct PCA *, flgs.PCAalflgs.PCAmisc)); + DECLARE("PCAlock", PCAlock); + DECLARE("PCAlockb", PCAlockb); DECLARE("PCAsteal", offsetof(struct PCA *, flgs.PCAalflgs.PCAsteal)); - DECLARE("PCAgas", offsetof(struct PCA *, PCAgas)); - DECLARE("PCAhash", offsetof(struct PCA *, PCAhash)); + DECLARE("mrPhysTab", offsetof(struct mem_region *, mrPhysTab)); + DECLARE("mrStart", offsetof(struct mem_region *, mrStart)); + DECLARE("mrEnd", offsetof(struct mem_region *, mrEnd)); + DECLARE("mrAStart", offsetof(struct mem_region *, mrAStart)); + DECLARE("mrAEnd", offsetof(struct mem_region *, mrAEnd)); + DECLARE("mrSize", sizeof(struct mem_region)); + + DECLARE("mapRemChunk", mapRemChunk); + + DECLARE("mapRetCode", mapRetCode); + DECLARE("mapRtOK", mapRtOK); + DECLARE("mapRtBadLk", mapRtBadLk); + DECLARE("mapRtPerm", mapRtPerm); + DECLARE("mapRtNotFnd", mapRtNotFnd); + DECLARE("mapRtBlock", mapRtBlock); + DECLARE("mapRtNest", mapRtNest); + DECLARE("mapRtRemove", mapRtRemove); + DECLARE("mapRtMapDup", mapRtMapDup); + +#if 0 DECLARE("MFpcaptr", offsetof(struct mappingflush *, pcaptr)); DECLARE("MFmappingcnt", offsetof(struct mappingflush *, mappingcnt)); DECLARE("MFmapping", offsetof(struct mappingflush *, mapping)); DECLARE("MFmappingSize", sizeof(struct mfmapping)); - - DECLARE("SVlock", offsetof(struct Saveanchor *, savelock)); - DECLARE("SVpoolfwd", offsetof(struct Saveanchor *, savepoolfwd)); - DECLARE("SVpoolbwd", offsetof(struct Saveanchor *, savepoolbwd)); - DECLARE("SVfree", offsetof(struct Saveanchor *, savefree)); - DECLARE("SVfreecnt", offsetof(struct Saveanchor *, savefreecnt)); - DECLARE("SVadjust", offsetof(struct Saveanchor *, saveadjust)); - DECLARE("SVinuse", offsetof(struct Saveanchor *, saveinuse)); - DECLARE("SVtarget", offsetof(struct Saveanchor *, savetarget)); - DECLARE("SVsize", sizeof(struct Saveanchor)); +#endif #if 1 DECLARE("GDsave", offsetof(struct GDWorkArea *, GDsave)); @@ -691,9 +1000,6 @@ int main(int argc, char *argv[]) DECLARE("GDrowbuf2", offsetof(struct GDWorkArea *, GDrowbuf2)); #endif - DECLARE("dgLock", offsetof(struct diagWork *, dgLock)); - DECLARE("dgFlags", offsetof(struct diagWork *, dgFlags)); - DECLARE("dgMisc0", offsetof(struct diagWork *, dgMisc0)); DECLARE("enaExpTrace", enaExpTrace); DECLARE("enaExpTraceb", enaExpTraceb); DECLARE("enaUsrFCall", enaUsrFCall); @@ -704,6 +1010,8 @@ int main(int argc, char *argv[]) DECLARE("enaDiagSCsb", enaDiagSCsb); DECLARE("enaDiagEM", enaDiagEM); DECLARE("enaDiagEMb", enaDiagEMb); + DECLARE("enaNotifyEM", enaNotifyEM); + DECLARE("enaNotifyEMb", enaNotifyEMb); DECLARE("disLkType", disLkType); DECLARE("disLktypeb", disLktypeb); DECLARE("disLkThread", disLkThread); @@ -718,12 +1026,6 @@ int main(int argc, char *argv[]) DECLARE("dgMisc4", offsetof(struct diagWork *, dgMisc4)); DECLARE("dgMisc5", offsetof(struct diagWork *, dgMisc5)); - DECLARE("traceMask", offsetof(struct traceWork *, traceMask)); - DECLARE("traceCurr", offsetof(struct traceWork *, traceCurr)); - DECLARE("traceStart", offsetof(struct traceWork *, traceStart)); - DECLARE("traceEnd", offsetof(struct traceWork *, traceEnd)); - DECLARE("traceMsnd", offsetof(struct traceWork *, traceMsnd)); - DECLARE("SACnext", offsetof(struct savearea_comm *, sac_next)); DECLARE("SACprev", offsetof(struct savearea_comm *, sac_prev)); DECLARE("SACvrswap", offsetof(struct savearea_comm *, sac_vrswap)); @@ -749,11 +1051,16 @@ int main(int argc, char *argv[]) DECLARE("SAVflags", offsetof(struct savearea_comm *, save_flags)); DECLARE("SAVlevel", offsetof(struct savearea_comm *, save_level)); DECLARE("SAVtime", offsetof(struct savearea_comm *, save_time)); + DECLARE("savemisc0", offsetof(struct savearea_comm *, save_misc0)); + DECLARE("savemisc1", offsetof(struct savearea_comm *, save_misc1)); + DECLARE("savemisc2", offsetof(struct savearea_comm *, save_misc2)); + DECLARE("savemisc3", offsetof(struct savearea_comm *, save_misc3)); + DECLARE("SAVsize", sizeof(struct savearea)); DECLARE("SAVsizefpu", sizeof(struct savearea_vec)); DECLARE("SAVsizevec", sizeof(struct savearea_fpu)); DECLARE("SAVcommsize", sizeof(struct savearea_comm)); - + DECLARE("savesrr0", offsetof(struct savearea *, save_srr0)); DECLARE("savesrr1", offsetof(struct savearea *, save_srr1)); DECLARE("savecr", offsetof(struct savearea *, save_cr)); @@ -768,6 +1075,13 @@ int main(int argc, char *argv[]) DECLARE("savevrsave", offsetof(struct savearea *, save_vrsave)); DECLARE("savevscr", offsetof(struct savearea *, save_vscr)); + DECLARE("savemmcr0", offsetof(struct savearea *, save_mmcr0)); + DECLARE("savemmcr1", offsetof(struct savearea *, save_mmcr1)); + DECLARE("savemmcr2", offsetof(struct savearea *, save_mmcr2)); + DECLARE("savepmc", offsetof(struct savearea *, save_pmc)); + + DECLARE("saveinstr", offsetof(struct savearea *, save_instr)); + DECLARE("saver0", offsetof(struct savearea *, save_r0)); DECLARE("saver1", offsetof(struct savearea *, save_r1)); DECLARE("saver2", offsetof(struct savearea *, save_r2)); @@ -801,23 +1115,6 @@ int main(int argc, char *argv[]) DECLARE("saver30", offsetof(struct savearea *, save_r30)); DECLARE("saver31", offsetof(struct savearea *, save_r31)); - DECLARE("savesr0", offsetof(struct savearea *, save_sr0)); - DECLARE("savesr1", offsetof(struct savearea *, save_sr1)); - DECLARE("savesr2", offsetof(struct savearea *, save_sr2)); - DECLARE("savesr3", offsetof(struct savearea *, save_sr3)); - DECLARE("savesr4", offsetof(struct savearea *, save_sr4)); - DECLARE("savesr5", offsetof(struct savearea *, save_sr5)); - DECLARE("savesr6", offsetof(struct savearea *, save_sr6)); - DECLARE("savesr7", offsetof(struct savearea *, save_sr7)); - DECLARE("savesr8", offsetof(struct savearea *, save_sr8)); - DECLARE("savesr9", offsetof(struct savearea *, save_sr9)); - DECLARE("savesr10", offsetof(struct savearea *, save_sr10)); - DECLARE("savesr11", offsetof(struct savearea *, save_sr11)); - DECLARE("savesr12", offsetof(struct savearea *, save_sr12)); - DECLARE("savesr13", offsetof(struct savearea *, save_sr13)); - DECLARE("savesr14", offsetof(struct savearea *, save_sr14)); - DECLARE("savesr15", offsetof(struct savearea *, save_sr15)); - DECLARE("savefp0", offsetof(struct savearea_fpu *, save_fp0)); DECLARE("savefp1", offsetof(struct savearea_fpu *, save_fp1)); DECLARE("savefp2", offsetof(struct savearea_fpu *, save_fp2)); @@ -924,25 +1221,51 @@ int main(int argc, char *argv[]) DECLARE("procState", offsetof(struct processor *, state)); DECLARE("CPU_SUBTYPE_POWERPC_ALL", CPU_SUBTYPE_POWERPC_ALL); - DECLARE("CPU_SUBTYPE_POWERPC_601", CPU_SUBTYPE_POWERPC_601); - DECLARE("CPU_SUBTYPE_POWERPC_602", CPU_SUBTYPE_POWERPC_602); - DECLARE("CPU_SUBTYPE_POWERPC_603", CPU_SUBTYPE_POWERPC_603); - DECLARE("CPU_SUBTYPE_POWERPC_603e", CPU_SUBTYPE_POWERPC_603e); - DECLARE("CPU_SUBTYPE_POWERPC_603ev", CPU_SUBTYPE_POWERPC_603ev); - DECLARE("CPU_SUBTYPE_POWERPC_604", CPU_SUBTYPE_POWERPC_604); - DECLARE("CPU_SUBTYPE_POWERPC_604e", CPU_SUBTYPE_POWERPC_604e); - DECLARE("CPU_SUBTYPE_POWERPC_620", CPU_SUBTYPE_POWERPC_620); DECLARE("CPU_SUBTYPE_POWERPC_750", CPU_SUBTYPE_POWERPC_750); DECLARE("CPU_SUBTYPE_POWERPC_7400", CPU_SUBTYPE_POWERPC_7400); DECLARE("CPU_SUBTYPE_POWERPC_7450", CPU_SUBTYPE_POWERPC_7450); + DECLARE("CPU_SUBTYPE_POWERPC_970", CPU_SUBTYPE_POWERPC_970); DECLARE("shdIBAT", offsetof(struct shadowBAT *, IBATs)); DECLARE("shdDBAT", offsetof(struct shadowBAT *, DBATs)); - + /* Low Memory Globals */ + + DECLARE("lgVerCode", offsetof(struct lowglo *, lgVerCode)); + DECLARE("lgPPStart", offsetof(struct lowglo *, lgPPStart)); + DECLARE("trcWork", offsetof(struct lowglo *, lgTrcWork)); + DECLARE("traceMask", offsetof(struct lowglo *, lgTrcWork.traceMask)); + DECLARE("traceCurr", offsetof(struct lowglo *, lgTrcWork.traceCurr)); + DECLARE("traceStart", offsetof(struct lowglo *, lgTrcWork.traceStart)); + DECLARE("traceEnd", offsetof(struct lowglo *, lgTrcWork.traceEnd)); + DECLARE("traceMsnd", offsetof(struct lowglo *, lgTrcWork.traceMsnd)); + + DECLARE("Zero", offsetof(struct lowglo *, lgZero)); + DECLARE("saveanchor", offsetof(struct lowglo *, lgSaveanchor)); + + DECLARE("SVlock", offsetof(struct lowglo *, lgSaveanchor.savelock)); + DECLARE("SVpoolfwd", offsetof(struct lowglo *, lgSaveanchor.savepoolfwd)); + DECLARE("SVpoolbwd", offsetof(struct lowglo *, lgSaveanchor.savepoolbwd)); + DECLARE("SVfree", offsetof(struct lowglo *, lgSaveanchor.savefree)); + DECLARE("SVfreecnt", offsetof(struct lowglo *, lgSaveanchor.savefreecnt)); + DECLARE("SVadjust", offsetof(struct lowglo *, lgSaveanchor.saveadjust)); + DECLARE("SVinuse", offsetof(struct lowglo *, lgSaveanchor.saveinuse)); + DECLARE("SVtarget", offsetof(struct lowglo *, lgSaveanchor.savetarget)); + DECLARE("SVsize", sizeof(struct Saveanchor)); + + DECLARE("tlbieLock", offsetof(struct lowglo *, lgTlbieLck)); + + DECLARE("dgFlags", offsetof(struct lowglo *, lgdgWork.dgFlags)); + DECLARE("dgLock", offsetof(struct lowglo *, lgdgWork.dgLock)); + DECLARE("dgMisc0", offsetof(struct lowglo *, lgdgWork.dgMisc0)); - return(0); /* For ANSI C :-) */ - - + DECLARE("lgKillResv", offsetof(struct lowglo *, lgKillResv)); + + DECLARE("scomcpu", offsetof(struct scomcomm *, scomcpu)); + DECLARE("scomfunc", offsetof(struct scomcomm *, scomfunc)); + DECLARE("scomreg", offsetof(struct scomcomm *, scomreg)); + DECLARE("scomstat", offsetof(struct scomcomm *, scomstat)); + DECLARE("scomdata", offsetof(struct scomcomm *, scomdata)); + return(0); /* For ANSI C :-) */ } diff --git a/osfmk/ppc/hw_counters.h b/osfmk/ppc/hw_counters.h index 712c9f3e5..ac85d507d 100644 --- a/osfmk/ppc/hw_counters.h +++ b/osfmk/ppc/hw_counters.h @@ -33,6 +33,7 @@ #error This file is only useful on PowerPC. #endif +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct hw_counters { @@ -58,8 +59,9 @@ typedef struct hw_counters { unsigned int hw_rsvd3; /* Reserved */ unsigned int hw_InstBreakpoints; /* Instruction breakpoint */ unsigned int hw_SystemManagements; /* System management */ - unsigned int hw_rsvd4; /* Reserved */ - unsigned int hw_AltivecAssists; /* Altivec Assist */ + unsigned int hw_AltivecAssists; /* Altivec Assist */ + unsigned int hw_Thermal; /* Thermals */ + unsigned int hw_rsvd5; /* Reserved */ unsigned int hw_rsvd6; /* Reserved */ unsigned int hw_rsvd7; /* Reserved */ unsigned int hw_rsvd8; /* Reserved */ @@ -68,15 +70,23 @@ typedef struct hw_counters { unsigned int hw_rsvd11; /* Reserved */ unsigned int hw_rsvd12; /* Reserved */ unsigned int hw_rsvd13; /* Reserved */ - unsigned int hw_rsvd14; /* Reserved */ unsigned int hw_Trace601; /* Trace */ unsigned int hw_SIGPs; /* SIGP */ unsigned int hw_Preemptions; /* Preemption */ unsigned int hw_ContextSwitchs; /* Context switch */ + unsigned int hw_Shutdowns; /* Shutdowns */ + unsigned int hw_Chokes; /* System ABENDs */ + unsigned int hw_DataSegments; /* Data Segment Interruptions */ + unsigned int hw_InstructionSegments; /* Instruction Segment Interruptions */ + unsigned int hw_SoftPatches; /* Soft Patch interruptions */ + unsigned int hw_Maintenances; /* Maintenance interruptions */ + unsigned int hw_Instrumentations; /* Instrumentation interruptions */ + unsigned int hw_rsvd14; /* Reswerved */ - unsigned int hw_spare[27]; /* Pad to 256 bytes */ + unsigned int hw_spare[19]; /* Pad to 256 bytes */ } hw_counters; +#pragma pack() extern hw_counters hw_counts(NCPUS); diff --git a/osfmk/ppc/hw_exception.s b/osfmk/ppc/hw_exception.s index 312bb42ce..ee177d39f 100644 --- a/osfmk/ppc/hw_exception.s +++ b/osfmk/ppc/hw_exception.s @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -53,7 +54,8 @@ #define VERIFYSAVE 0 #define FPVECDBG 0 - +#define INSTRUMENT 0 + /* * thandler(type) * @@ -97,17 +99,17 @@ LEXT(thandler) ; Trap handler lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer + mfsprg r13,1 ; Get the current thread cmpwi cr0,r1,0 ; Are we on interrupt stack? - lwz r6,PP_ACTIVE_THREAD(r25) ; Get the pointer to the currently active thread + lwz r6,ACT_THREAD(r13) ; Get the shuttle beq- cr0,EXT(ihandler) ; If on interrupt stack, treat this as interrupt... - lwz r13,THREAD_TOP_ACT(r6) ; Point to the active activation lwz r26,ACT_MACT_SPF(r13) ; Get special flags lwz r8,ACT_MACT_PCB(r13) ; Get the last savearea used rlwinm. r26,r26,0,bbThreadbit,bbThreadbit ; Do we have Blue Box Assist active? lwz r1,ACT_MACT_KSP(r13) ; Get the top of kernel stack bnel- checkassist ; See if we should assist this stw r4,ACT_MACT_PCB(r13) ; Point to our savearea - stw r8,SAVprev(r4) ; Queue the new save area in the front + stw r8,SAVprev+4(r4) ; Queue the new save area in the front #if VERIFYSAVE bl versave ; (TEST/DEBUG) @@ -116,16 +118,17 @@ LEXT(thandler) ; Trap handler lwz r9,THREAD_KERNEL_STACK(r6) ; Get our kernel stack start cmpwi cr1,r1,0 ; Are we already on kernel stack? stw r13,SAVact(r4) ; Mark the savearea as belonging to this activation - lwz r26,saver1(r4) ; Get the stack at interrupt time + lwz r26,saver1+4(r4) ; Get the stack at interrupt time bne+ cr1,.L_kstackfree ; We are not on kernel stack yet... subi r1,r26,FM_REDZONE ; Make a red zone on interrupt time kernel stack .L_kstackfree: - lwz r7,savesrr1(r4) ; Pick up the entry MSR + lwz r7,savesrr1+4(r4) ; Pick up the entry MSR sub r9,r1,r9 ; Get displacment into the kernel stack li r0,0 ; Make this 0 + rlwinm. r0,r9,0,28,31 ; Verify that we have a 16-byte aligned stack (and get a 0) cmplwi cr2,r9,KERNEL_STACK_SIZE ; Do we still have room on the stack? beq cr1,.L_state_on_kstack ; using above test for pcb/stack @@ -133,12 +136,13 @@ LEXT(thandler) ; Trap handler .L_state_on_kstack: lwz r9,savevrsave(r4) ; Get the VRSAVE register + bne-- kernelStackUnaligned ; Stack is unaligned... rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? subi r1,r1,FM_SIZE ; Push a header onto the current stack - bgt- cr2,kernelStackBad ; Kernel stack is bogus... + bgt-- cr2,kernelStackBad ; Kernel stack is bogus... kernelStackNotBad: ; Vector was off - beq+ tvecoff ; Vector off, do not save vrsave... + beq++ tvecoff ; Vector off, do not save vrsave... stw r9,liveVRS(r25) ; Set the live value tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame @@ -148,7 +152,7 @@ tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame * which links back to the trapped routine. The second is * that which the C routine below will need */ - lwz r3,savesrr0(r4) ; Get the point of interruption + lwz r3,savesrr0+4(r4) ; Get the point of interruption stw r3,FM_LR_SAVE(r1) ; save old instr ptr as LR value stwu r1, -FM_SIZE(r1) ; and make new frame #endif /* DEBUG */ @@ -175,12 +179,17 @@ tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame rlwinm. r0,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes) cmpi cr2,r3,T_PREEMPT ; Is this a preemption? + + beq-- .L_check_VM + stw r4,ACT_MACT_UPCB(r13) ; Store user savearea +.L_check_VM: crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes) - lwz r6,savedar(r4) ; Get the DAR + lwz r6,savedar(r4) ; Get the DAR (top) + lwz r7,savedar+4(r4) ; Get the DAR (bottom) - beq- cr2, .L_call_trap ; Do not turn on interrupts for T_PREEMPT + beq- cr2,.L_call_trap ; Do not turn on interrupts for T_PREEMPT beq- exitFromVM ; Any true trap but T_MACHINE_CHECK exits us from the VM... /* syscall exception might warp here if there's nothing left @@ -191,10 +200,10 @@ tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame bl EXT(trap) + lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable mfmsr r7 ; Get the MSR - rlwinm r7,r7,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r7,r7,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear the interrupt enable mask + ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE + andc r7,r7,r10 ; Turn off VEC, FP, and EE mtmsr r7 ; Disable for interrupts mfsprg r10,0 ; Restore the per_proc info /* @@ -204,21 +213,21 @@ tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame */ thread_return: - lwz r4,SAVprev(r3) ; Pick up the previous savearea lwz r11,SAVflags(r3) ; Get the flags of the current savearea - lwz r8,savesrr1(r3) ; Get the MSR we are going to + lwz r0,savesrr1+4(r3) ; Get the MSR we are going to + lwz r4,SAVprev+4(r3) ; Pick up the previous savearea + mfsprg r8,1 ; Get the current thread rlwinm r11,r11,0,15,13 ; Clear the syscall flag - lwz r1,PP_ACTIVE_THREAD(r10) ; Get the active thread - rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user? - mfsprg r8,1 ; Get the current activation + rlwinm. r0,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user? + lwz r1,ACT_THREAD(r8) ; Get the shuttle stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared) - + + lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none) + addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty - beq- chkfac ; We are not leaving the kernel yet... + beq-- chkfac ; We are not leaving the kernel yet... - lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack - addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer b chkfac ; Go end it all... @@ -241,6 +250,12 @@ kernelStackBad: li r3,failStack ; Bad stack code sc ; System ABEND +kernelStackUnaligned: + lis r0,hi16(Choke) ; Choke code + ori r0,r0,lo16(Choke) ; and the rest + li r3,failUnalignedStk ; Unaligned stack code + sc ; System ABEND + /* * shandler(type) @@ -276,43 +291,44 @@ kernelStackBad: .globl EXT(shandler) LEXT(shandler) ; System call handler + lwz r7,savesrr1+4(r4) ; Get the SRR1 value mfsprg r25,0 ; Get the per proc area - lwz r0,saver0(r4) ; Get the original syscall number + lwz r0,saver0+4(r4) ; Get the original syscall number lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer + mfsprg r13,1 ; Get the current thread rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check mr. r17,r17 ; Are we on interrupt stack? - lwz r7,savesrr1(r4) ; Get the SRR1 value - beq- EXT(ihandler) ; On interrupt stack, not allowed... lwz r9,savevrsave(r4) ; Get the VRsave register + beq-- EXT(ihandler) ; On interrupt stack, not allowed... rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? - lwz r16,PP_ACTIVE_THREAD(r25) ; Get the thread pointer - mfsprg r13,1 ; Pick up the active thread + lwz r16,ACT_THREAD(r13) ; Get the shuttle - beq+ svecoff ; Vector off, do not save vrsave... + beq++ svecoff ; Vector off, do not save vrsave... stw r9,liveVRS(r25) ; Set the live value ; ; Check if SCs are being redirected for the BlueBox or to VMM ; svecoff: lwz r6,ACT_MACT_SPF(r13) ; Pick up activation special flags - mtcrf 0x41,r6 ; Check special flags + mtcrf 0x40,r6 ; Check special flags + mtcrf 0x01,r6 ; Check special flags crmove cr6_eq,runningVMbit ; Remember if we are in VMM - bne cr6,sVMchecked ; Not running VM + bne++ cr6,sVMchecked ; Not running VM lwz r18,spcFlags(r25) ; Load per_proc special flags rlwinm. r18,r18,0,FamVMmodebit,FamVMmodebit ; Is FamVMmodebit set? beq sVMchecked ; Not in FAM cmpwi r0,0x6004 ; Is it vmm_dispatch syscall: bne sVMchecked - lwz r26,saver3(r4) ; Get the original syscall number + lwz r26,saver3+4(r4) ; Get the original syscall number cmpwi cr6,r26,kvmmExitToHost ; vmm_exit_to_host request sVMchecked: - bf+ bbNoMachSCbit,noassist ; Take branch if SCs are not redirected + bf++ bbNoMachSCbit,noassist ; Take branch if SCs are not redirected lwz r26,ACT_MACT_BEDA(r13) ; Pick up the pointer to the blue box exception area b EXT(atomic_switch_syscall) ; Go to the assist... noassist: cmplwi r15,0x7000 ; Do we have a fast path trap? lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB - beql+ fastpath ; We think it is a fastpath... + beql fastpath ; We think it is a fastpath... lwz r1,ACT_MACT_KSP(r13) ; Get the kernel stack pointer #if DEBUG @@ -322,16 +338,17 @@ noassist: cmplwi r15,0x7000 ; Do we have a fast path trap? #endif /* DEBUG */ stw r4,ACT_MACT_PCB(r13) ; Point to our savearea + stw r4,ACT_MACT_UPCB(r13) ; Store user savearea li r0,0 ; Clear this out - stw r14,SAVprev(r4) ; Queue the new save area in the front + stw r14,SAVprev+4(r4) ; Queue the new save area in the front stw r13,SAVact(r4) ; Point the savearea at its activation #if VERIFYSAVE bl versave ; (TEST/DEBUG) #endif + lwz r15,saver1+4(r4) ; Grab interrupt time stack mr r30,r4 ; Save pointer to the new context savearea - lwz r15,saver1(r4) ; Grab interrupt time stack stw r0,ACT_MACT_KSP(r13) ; Mark stack as busy with 0 val stw r15,FM_BACKPTR(r1) ; Link stack frame backwards @@ -340,44 +357,38 @@ noassist: cmplwi r15,0x7000 ; Do we have a fast path trap? * which links back to the trapped routine. The second is * that which the C routine below will need */ - lwz r8,savesrr0(r30) ; Get the point of interruption + lwz r8,savesrr0+4(r30) ; Get the point of interruption stw r8,FM_LR_SAVE(r1) ; Save old instr ptr as LR value stwu r1, -FM_SIZE(r1) ; and make new frame #endif /* DEBUG */ - mfmsr r11 ; Get the MSR lwz r15,SAVflags(r30) ; Get the savearea flags + lwz r0,saver0+4(r30) ; Get R0 back + mfmsr r11 ; Get the MSR + stwu r1,-(FM_SIZE+ARG_SIZE)(r1) ; Make a stack frame ori r11,r11,lo16(MASK(MSR_EE)) ; Turn on interruption enabled bit - lwz r0,saver0(r30) ; Get R0 back - oris r15,r15,SAVsyscall >> 16 ; Mark that it this is a syscall rlwinm r10,r0,0,0,19 ; Keep only the top part - stwu r1,-(FM_SIZE+ARG_SIZE)(r1) ; Make a stack frame + oris r15,r15,SAVsyscall >> 16 ; Mark that it this is a syscall cmplwi r10,0x6000 ; Is it the special ppc-only guy? stw r15,SAVflags(r30) ; Save syscall marker - beq- cr6,exitFromVM ; It is time to exit from alternate context... + beq-- cr6,exitFromVM ; It is time to exit from alternate context... - beq- ppcscall ; Call the ppc-only system call handler... + beq-- ppcscall ; Call the ppc-only system call handler... + mr. r0,r0 ; What kind is it? mtmsr r11 ; Enable interruptions - lwz r0,saver0(r30) ; Get the system call selector - mr. r0,r0 ; What kind is it? - blt- .L_kernel_syscall ; System call number if negative, this is a mach call... + blt-- .L_kernel_syscall ; System call number if negative, this is a mach call... + lwz r8,ACT_TASK(r13) ; Get our task cmpwi cr0,r0,0x7FFA ; Special blue box call? - beq- .L_notify_interrupt_syscall ; Yeah, call it... + beq-- .L_notify_interrupt_syscall ; Yeah, call it... - lwz r8,ACT_TASK(r13) ; Get our task - lis r10,hi16(EXT(c_syscalls_unix)) ; Get top half of counter address lwz r7,TASK_SYSCALLS_UNIX(r8) ; Get the current count - ori r10,r10,lo16(EXT(c_syscalls_unix)) ; Get low half of counter address - addi r7,r7,1 ; Bump it - lwz r9,0(r10) ; Get counter - stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it mr r3,r30 ; Get PCB/savearea mr r4,r13 ; current activation - addi r9,r9,1 ; Add 1 - stw r9,0(r10) ; Save it back + addi r7,r7,1 ; Bump it + stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it bl EXT(unix_syscall) ; Check out unix... .L_call_server_syscall_exception: @@ -389,9 +400,16 @@ noassist: cmplwi r15,0x7000 ; Do we have a fast path trap? b EXT(doexception) ; Go away, never to return... .L_notify_interrupt_syscall: - lwz r3,saver3(r30) ; Get the new PC address to pass in + lwz r3,saver3+4(r30) ; Get the new PC address to pass in bl EXT(syscall_notify_interrupt) - b .L_syscall_return +/* + * Ok, return from C function, R3 = return value + * + * saved state is still in R30 and the active thread is in R16 . + */ + mr r31,r16 ; Move the current thread pointer + stw r3,saver3+4(r30) ; Stash the return code + b .L_thread_syscall_ret_check_ast ; ; Handle PPC-only system call interface @@ -435,9 +453,19 @@ ppcscall: rlwinm r11,r0,2,18,29 ; Make an index into the table mr r3,r30 ; Pass the savearea mr r4,r13 ; Pass the activation mr. r11,r11 ; See if there is a function here - mtlr r11 ; Set the function address + mtctr r11 ; Set the function address beq- .L_call_server_syscall_exception ; Disabled call... - blrl ; Call it +#if INSTRUMENT + mfspr r4,pmc1 ; Get stamp + stw r4,0x6100+(9*16)+0x0(0) ; Save it + mfspr r4,pmc2 ; Get stamp + stw r4,0x6100+(9*16)+0x4(0) ; Save it + mfspr r4,pmc3 ; Get stamp + stw r4,0x6100+(9*16)+0x8(0) ; Save it + mfspr r4,pmc4 ; Get stamp + stw r4,0x6100+(9*16)+0xC(0) ; Save it +#endif + bctrl ; Call it .globl EXT(ppcscret) @@ -447,10 +475,50 @@ LEXT(ppcscret) bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return.... mfsprg r10,0 ; Get the per_proc blt+ .L_thread_syscall_return ; Return, but no ASTs.... - lwz r0,saver0(r30) ; Restore the system call number + lwz r0,saver0+4(r30) ; Restore the system call number b .L_call_server_syscall_exception ; Go to common exit... + +/* + * we get here for mach system calls + * when kdebug tracing is enabled + */ + +ksystrace: + mr r4,r30 ; Pass in saved state + bl EXT(syscall_trace) + + cmplw r31,r29 ; Is this syscall in the table? + add r31,r27,r28 ; Point right to the syscall table entry + + bge- .L_call_server_syscall_exception ; The syscall number is invalid + + lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address +; +; NOTE: We do not support more than 8 parameters for PPC. The only +; system call to use more than 8 is mach_msg_overwrite_trap and it +; uses 9. We pass a 0 in as number 9. +; + lwz r3,saver3+4(r30) ; Restore r3 + lwz r4,saver4+4(r30) ; Restore r4 + mtctr r0 ; Set the function call address + lwz r5,saver5+4(r30) ; Restore r5 + lwz r6,saver6+4(r30) ; Restore r6 + lwz r7,saver7+4(r30) ; Restore r7 + li r0,0 ; Clear this out + lwz r8,saver8+4(r30) ; Restore r8 + lwz r9,saver9+4(r30) ; Restore r9 + lwz r10,saver10+4(r30) ; Restore r10 + stw r0,FM_ARG0(r1) ; Clear that 9th parameter just in case some fool uses it + bctrl ; perform the actual syscall + + mr r4,r30 ; Pass in the savearea + bl EXT(syscall_trace_end) ; Trace the exit of the system call + b .L_mach_return + + + /* Once here, we know that the syscall was -ve * we should still have r1=ksp, * r16 = pointer to current thread, @@ -459,203 +527,86 @@ LEXT(ppcscret) * r30 = pointer to saved state (in pcb) */ - .align 5 + .align 5 .L_kernel_syscall: ; ; Call a function that can print out our syscall info ; Note that we don t care about any volatiles yet ; - mr r4,r30 - bl EXT(syscall_trace) - lwz r0,saver0(r30) ; Get the system call selector */ - neg r31,r0 ; Make system call number positive and put in r31 - lis r29,hi16(EXT(mach_trap_count)) ; High part of valid trap number - ori r29,r29,lo16(EXT(mach_trap_count)) ; Low part of valid trap number - lis r28,hi16(EXT(mach_trap_table)) ; High part of trap table - lwz r29,0(r29) ; Get the first invalid system call number - ori r28,r28,lo16(EXT(mach_trap_table)) ; Low part of trap table - - cmplw r31,r29 ; See if we have a valid system call number - slwi r31,r31,MACH_TRAP_OFFSET_POW2 ; Get offset into table - - bge- .L_call_server_syscall_exception ; System call number of bogus - - add r31,r31,r28 ; Point to the system call entry - lis r28,hi16(EXT(kern_invalid)) ; Get the high invalid routine address - lwz r0,MACH_TRAP_FUNCTION(r31) ; Grab the system call routine address - ori r28,r28,lo16(EXT(kern_invalid)) ; Get the low part of the invalid routine address - lwz r29,MACH_TRAP_ARGC(r31) ; Get the number of arguments in the call - cmplw r0,r28 ; Is this an invalid entry? - beq- .L_call_server_syscall_exception ; Yes, it is invalid... - -/* get arg count. If argc > 8 then not all args were in regs, - * so we must perform copyin. - */ - cmpwi cr0,r29,8 ; Do we have more than 8 arguments? - ble+ .L_syscall_got_args ; Nope, no copy in needed... - -/* argc > 8 - perform a copyin */ -/* if the syscall came from kernel space, we can just copy */ - - lwz r0,savesrr1(r30) ; Pick up exception time MSR - andi. r0,r0,MASK(MSR_PR) ; Check the priv bit - bne+ .L_syscall_arg_copyin ; We are not priviliged... - -/* we came from a privilaged task, just do a copy */ -/* get user's stack pointer */ - - lwz r28,saver1(r30) ; Get the stack pointer - - subi r29,r29,8 ; Get the number of arguments to copy - - addi r28,r28,COPYIN_ARG0_OFFSET-4 ; Point to source - 4 - addi r27,r1,FM_ARG0-4 ; Point to sink - 4 - -.L_syscall_copy_word_loop: - addic. r29,r29,-1 ; Count down the number of arguments left - lwz r0,4(r28) ; Pick up the argument from the stack - addi r28,r28,4 ; Point to the next source - stw r0,4(r27) ; Store the argument - addi r27,r27,4 ; Point to the next sink - bne+ .L_syscall_copy_word_loop ; Move all arguments... - b .L_syscall_got_args ; Go call it now... - - -/* we came from a user task, pay the price of a real copyin */ -/* set recovery point */ - - .align 5 - -.L_syscall_arg_copyin: - lwz r8,ACT_VMMAP(r13) ; Get the vm_map for this activation - lis r28,hi16(.L_syscall_copyin_recover) - lwz r8,VMMAP_PMAP(r8) ; Get the pmap - ori r28,r28,lo16(.L_syscall_copyin_recover) - addi r8,r8,PMAP_SEGS ; Point to the pmap SR slots - stw r28,THREAD_RECOVER(r16) ; R16 still holds thread ptr - -/* We can manipulate the COPYIN segment register quite easily - * here, but we've also got to make sure we don't go over a - * segment boundary - hence some mess. - * Registers from 12-29 are free for our use. - */ - - - lwz r28,saver1(r30) ; Get the stack pointer - subi r29,r29,8 ; Get the number of arguments to copy - addi r28,r28,COPYIN_ARG0_OFFSET ; Set source in user land - -/* set up SR_COPYIN to allow us to copy, we may need to loop - * around if we change segments. We know that this previously - * pointed to user space, so the sid doesn't need setting. - */ - - rlwinm r7,r28,6,26,29 ; Get index to the segment slot - -.L_syscall_copyin_seg_loop: - lwzx r10,r8,r7 ; Get the source SR value - rlwinm r26,r28,0,4,31 ; Clear the segment number from source address - mtsr SR_COPYIN,r10 ; Set the copyin SR - isync - - oris r26,r26,(SR_COPYIN_NUM << (28-16)) ; Insert the copyin segment number into source address - - addi r27,r1,FM_ARG0-4 ; Point to area - 4 where we will store the arguments + lwz r10,ACT_TASK(r13) ; Get our task + lwz r0,saver0+4(r30) + lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable + lis r28,hi16(EXT(mach_trap_table)) ; Get address of table + ori r8,r8,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable + lwz r8,0(r8) ; Get kdebug_enable + + lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count + neg r31,r0 ; Make this positive + slwi r27,r31,MACH_TRAP_OFFSET_POW2 ; Convert index to offset + ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table + addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count + cmplwi r8,0 ; Is kdebug_enable non-zero + stw r7,TASK_SYSCALLS_MACH(r10) ; Save count + bne-- ksystrace ; yes, tracing enabled + + cmplwi r31,MACH_TRAP_TABLE_COUNT ; Is this syscall in the table? + add r31,r27,r28 ; Point right to the syscall table entry + + bge-- .L_call_server_syscall_exception ; The syscall number is invalid -.L_syscall_copyin_word_loop: - lwz r0,0(r26) ; MAY CAUSE PAGE FAULT! - subi r29,r29,1 ; Decrement count - addi r26,r26,4 ; Bump input - stw r0,4(r27) ; Save the copied in word - mr. r29,r29 ; Are they all moved? - addi r27,r27,4 ; Bump output - beq+ .L_syscall_copyin_done ; Escape if we are done... - - rlwinm. r0,r26,0,4,29 ; Did we just step into a new segment? - addi r28,r28,4 ; Bump up user state address also - bne+ .L_syscall_copyin_word_loop ; We are still on the same segment... - - addi r7,r7,4 ; Bump to next slot - b .L_syscall_copyin_seg_loop ; On new segment! remap - -/* Don't bother restoring SR_COPYIN, we can leave it trashed */ -/* clear thread recovery as we're done touching user data */ - - .align 5 - -.L_syscall_copyin_done: - li r0,0 - stw r0,THREAD_RECOVER(r16) ; R16 still holds thread ptr - -.L_syscall_got_args: - lwz r0,MACH_TRAP_FUNCTION(r31) ; Get function address - lwz r8,ACT_TASK(r13) ; Get our task - lis r10,hi16(EXT(c_syscalls_mach)) ; Get top half of counter address - lwz r7,TASK_SYSCALLS_MACH(r8) ; Get the current count - lwz r3,saver3(r30) ; Restore r3 - addi r7,r7,1 ; Bump it - ori r10,r10,lo16(EXT(c_syscalls_mach)) ; Get low half of counter address - stw r7,TASK_SYSCALLS_MACH(r8) ; Save it - lwz r4,saver4(r30) ; Restore r4 - lwz r9,0(r10) ; Get counter - mtctr r0 ; Set function address - lwz r5,saver5(r30) ; Restore r5 - lwz r6,saver6(r30) ; Restore r6 - addi r9,r9,1 ; Add 1 - lwz r7,saver7(r30) ; Restore r7 - lwz r8,saver8(r30) ; Restore r8 - stw r9,0(r10) ; Save it back - lwz r9,saver9(r30) ; Restore r9 - lwz r10,saver10(r30) ; Restore r10 - + lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address ; -; Note that all arguments from the system call are passed into the function +; NOTE: We do not support more than 8 parameters for PPC. The only +; system call to use more than 8 is mach_msg_overwrite_trap and it +; uses 9. We pass a 0 in as number 9. ; - - bctrl ; Perform the actual syscall - -/* 'standard' syscall returns here - INTERRUPTS ARE STILL ON */ - -/* r3 contains value that we're going to return to the user - */ + lwz r3,saver3+4(r30) ; Restore r3 + lwz r4,saver4+4(r30) ; Restore r4 + lwz r5,saver5+4(r30) ; Restore r5 + mtctr r0 ; Set the function call address + lwz r6,saver6+4(r30) ; Restore r6 + lwz r7,saver7+4(r30) ; Restore r7 + lwz r8,saver8+4(r30) ; Restore r8 + li r0,0 ; Clear this out + lwz r9,saver9+4(r30) ; Restore r9 + lwz r10,saver10+4(r30) ; Restore r10 + stw r0,FM_ARG0(r1) ; Clear that 9th parameter just in case some fool uses it + bctrl ; perform the actual syscall /* * Ok, return from C function, R3 = return value * * get the active thread's PCB pointer and thus pointer to user state - * saved state is still in R30 and the active thread is in R16 . + * saved state is still in R30 and the active thread is in R16 */ -/* Store return value into saved state structure, since - * we need to pick up the value from here later - the - * syscall may perform a thread_set_syscall_return +.L_mach_return: + mr r31,r16 ; Move the current thread pointer + stw r3,saver3+4(r30) ; Stash the return code + cmpi cr0,r3,KERN_INVALID_ARGUMENT ; deal with invalid system calls + beq- cr0,.L_mach_invalid_ret ; otherwise fall through into the normal return path +.L_mach_invalid_arg: + + +/* 'standard' syscall returns here - INTERRUPTS ARE STILL ON + * the syscall may perform a thread_set_syscall_return * followed by a thread_exception_return, ending up * at thread_syscall_return below, with SS_R3 having * been set up already - */ - -/* When we are here, r16 should point to the current thread, + * + * When we are here, r31 should point to the current thread, * r30 should point to the current pcb + * r3 contains value that we're going to return to the user + * which has already been stored back into the save area */ - -/* save off return value, we must load it - * back anyway for thread_exception_return - */ - -.L_syscall_return: - mr r31,r16 ; Move the current thread pointer - stw r3,saver3(r30) ; Stash the return code - - mr r4,r30 ; Pass in the savearea - bl EXT(syscall_trace_end) ; Trace the exit of the system call - + .L_thread_syscall_ret_check_ast: + lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable mfmsr r12 ; Get the current MSR - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r12,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions enable bit + ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE + andc r12,r12,r10 ; Turn off VEC, FP, and EE mtmsr r12 ; Turn interruptions off mfsprg r10,0 ; Get the per_processor block @@ -665,7 +616,7 @@ LEXT(ppcscret) lwz r4,PP_NEED_AST(r10) ; Get the pointer to the ast requests lwz r4,0(r4) ; Get the flags cmpi cr0,r4, 0 ; Any pending asts? - beq+ cr0,.L_syscall_no_ast ; Nope... + beq++ cr0,.L_syscall_no_ast ; Nope... /* Yes there is, call ast_taken * pretending that the user thread took an AST exception here, @@ -674,9 +625,9 @@ LEXT(ppcscret) #if DEBUG /* debug assert - make sure that we're not returning to kernel */ - lwz r3,savesrr1(r30) + lwz r3,savesrr1+4(r30) andi. r3,r3,MASK(MSR_PR) - bne+ scrnotkern ; returning to user level, check + bne++ scrnotkern ; returning to user level, check lis r0,hi16(Choke) ; Choke code ori r0,r0,lo16(Choke) ; and the rest @@ -691,6 +642,27 @@ scrnotkern: bl EXT(ast_taken) ; Process the pending ast b .L_thread_syscall_ret_check_ast ; Go see if there was another... +.L_mach_invalid_ret: +/* + * need to figure out why we got an KERN_INVALID_ARG + * if it was due to a non-existent system call + * then we want to throw an exception... otherwise + * we want to pass the error code back to the caller + */ + lwz r0,saver0+4(r30) ; reload the original syscall number + neg r28,r0 ; Make this positive + slwi r27,r28,MACH_TRAP_OFFSET_POW2 ; Convert index to offset + lis r28,hi16(EXT(mach_trap_table)) ; Get address of table + ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table + add r28,r27,r28 ; Point right to the syscall table entry + lwz r27,MACH_TRAP_FUNCTION(r28) ; Pick up the function address + lis r28,hi16(EXT(kern_invalid)) ; Get high half of invalid syscall function + ori r28,r28,lo16(EXT(kern_invalid)) ; Get low half of invalid syscall function + cmpw cr0,r27,r28 ; Check if this is an invalid system call + beq-- .L_call_server_syscall_exception ; We have a bad system call + b .L_mach_invalid_arg ; a system call returned KERN_INVALID_ARG + + /* thread_exception_return returns to here, almost all * registers intact. It expects a full context restore * of what it hasn't restored itself (ie. what we use). @@ -706,40 +678,18 @@ scrnotkern: .L_thread_syscall_return: mr r3,r30 ; Get savearea to the correct register for common exit - mfsprg r8,1 ; Now find the current activation lwz r11,SAVflags(r30) ; Get the flags lwz r5,THREAD_KERNEL_STACK(r31) ; Get the base pointer to the stack + lwz r4,SAVprev+4(r30) ; Get the previous save area rlwinm r11,r11,0,15,13 ; Clear the syscall flag - lwz r4,SAVprev(r30) ; Get the previous save area - stw r11,SAVflags(r30) ; Stick back the flags + mfsprg r8,1 ; Now find the current activation addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty - stw r4,ACT_MACT_PCB(r8) ; Save previous save area + stw r11,SAVflags(r30) ; Stick back the flags stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer + stw r4,ACT_MACT_PCB(r8) ; Save previous save area b chkfac ; Go end it all... - .align 5 - -.L_syscall_copyin_recover: - -/* This is the catcher for any data faults in the copyin - * of arguments from the user's stack. - * r30 still holds a pointer to the PCB - * - * call syscall_error(EXC_BAD_ACCESS, EXC_PPC_VM_PROT_READ, sp, ssp), - * - * we already had a frame so we can do this - */ - - li r3,EXC_BAD_ACCESS ; Set bad access code - li r4,EXC_PPC_VM_PROT_READ ; Set protection exception - lwz r5,saver1(r30) ; Point to the stack - mr r6,r30 ; Pass savearea - - bl EXT(syscall_error) ; Generate error... - b .L_syscall_return ; Continue out... - - /* * thread_exception_return() * @@ -754,10 +704,10 @@ LEXT(thread_bootstrap_return) ; NOTE: THIS IS GOING AWAY IN A FEW DAYS.... LEXT(thread_exception_return) ; Directly return to user mode .L_thread_exc_ret_check_ast: + lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable mfmsr r3 ; Get the MSR - rlwinm r3,r3,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r3,r3,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r3,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear EE + ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE + andc r3,r3,r10 ; Turn off VEC, FP, and EE mtmsr r3 ; Disable interrupts /* Check to see if there's an outstanding AST */ @@ -770,7 +720,7 @@ LEXT(thread_exception_return) ; Directly return to user mode lwz r4,PP_NEED_AST(r10) lwz r4,0(r4) cmpi cr0,r4, 0 - beq cr0,.L_exc_ret_no_ast + beq+ cr0,.L_exc_ret_no_ast /* Yes there is, call ast_taken * pretending that the user thread took an AST exception here, @@ -800,7 +750,7 @@ LEXT(thread_exception_return) ; Directly return to user mode * get the active thread's PCB pointer and thus pointer to user state */ - lwz r3,savesrr1(r30) + lwz r3,savesrr1+4(r30) andi. r3,r3,MASK(MSR_PR) bne+ ret_user2 ; We are ok... @@ -817,9 +767,9 @@ ret_user2: * which takes PCB pointer in R3, not in r30! */ lwz r0,SAVflags(r30) ; Grab the savearea flags - mr r3,r30 ; Copy pcb pointer into r3 in case we need it andis. r0,r0,SAVsyscall>>16 ; Are we returning from a syscall? - beq- cr0,thread_return ; Nope, must be a thread return... + mr r3,r30 ; Copy pcb pointer into r3 in case we need it + beq-- cr0,thread_return ; Nope, must be a thread return... b .L_thread_syscall_return ; Join up with the system call return... ; @@ -835,7 +785,7 @@ makeDummyCtx: li r4,SAVgeneral ; Get the general context type li r0,0 ; Get a 0 stb r4,SAVflags+2(r3) ; Set type - addi r2,r3,savevscr ; Point past what we are clearing + addi r2,r3,savefpscr+4 ; Point past what we are clearing mr r4,r3 ; Save the start cleardummy: stw r0,0(r4) ; Clear stuff @@ -845,7 +795,7 @@ cleardummy: stw r0,0(r4) ; Clear stuff lis r2,hi16(MSR_EXPORT_MASK_SET) ; Set the high part of the user MSR ori r2,r2,lo16(MSR_EXPORT_MASK_SET) ; And the low part - stw r2,savesrr1(r3) ; Set the default user MSR + stw r2,savesrr1+4(r3) ; Set the default user MSR b thread_return ; Go let em try to execute, hah! @@ -869,33 +819,32 @@ LEXT(ihandler) ; Interrupt handler */ * interrupt stack. */ - lwz r10,savesrr1(r4) ; Get SRR1 + lwz r10,savesrr1+4(r4) ; Get SRR1 lwz r7,savevrsave(r4) ; Get the VRSAVE register mfsprg r25,0 ; Get the per_proc block li r14,0 ; Zero this for now rlwinm. r13,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? lwz r1,PP_ISTACKPTR(r25) ; Get the interrupt stack - li r13,0 ; Zero this for now - lwz r16,PP_ACTIVE_THREAD(r25) ; Get the thread pointer + mfsprg r13,1 ; Get the current thread + li r16,0 ; Zero this for now beq+ ivecoff ; Vector off, do not save vrsave... stw r7,liveVRS(r25) ; Set the live value ivecoff: li r0,0 ; Get a constant 0 - cmplwi cr1,r16,0 ; Are we still booting? - -ifpoff: mr. r1,r1 ; Is it active? - beq- cr1,ihboot1 ; We are still coming up... - lwz r13,THREAD_TOP_ACT(r16) ; Pick up the active thread + rlwinm r5,r10,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? + mr. r1,r1 ; Is it active? + cmplwi cr2,r5,0 ; cr2_eq == 1 if yes + lwz r16,ACT_THREAD(r13) ; Get the shuttle lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB - -ihboot1: lwz r9,saver1(r4) ; Pick up the rupt time stack - stw r14,SAVprev(r4) ; Queue the new save area in the front + lwz r9,saver1+4(r4) ; Pick up the rupt time stack + stw r14,SAVprev+4(r4) ; Queue the new save area in the front stw r13,SAVact(r4) ; Point the savearea at its activation - beq- cr1,ihboot4 ; We are still coming up... stw r4,ACT_MACT_PCB(r13) ; Point to our savearea + beq cr2,ifromk + stw r4,ACT_MACT_UPCB(r13) ; Store user savearea -ihboot4: bne .L_istackfree ; Nope... +ifromk: bne .L_istackfree ; Nope... /* We're already on the interrupt stack, get back the old * stack pointer and make room for a frame @@ -912,19 +861,27 @@ ihboot4: bne .L_istackfree ; Nope... subi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack sub r5,r1,r5 ; Get displacement into debug stack cmplwi cr2,r5,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack - blt+ ihsetback ; Yeah, that is ok too... + blt+ cr2,ihsetback ; Yeah, that is ok too... lis r0,hi16(Choke) ; Choke code ori r0,r0,lo16(Choke) ; and the rest li r3,failStack ; Bad stack code sc ; System ABEND +intUnalignedStk: + lis r0,hi16(Choke) ; Choke code + ori r0,r0,lo16(Choke) ; and the rest + li r3,failUnalignedStk ; Unaligned stack code + sc ; System ABEND + .align 5 .L_istackfree: - lwz r10,SAVflags(r4) + rlwinm. r0,r1,0,28,31 ; Check if stack is aligned (and get 0) + lwz r10,SAVflags(r4) ; Get savearea flags + bne-- intUnalignedStk ; Stack is unaligned... stw r0,PP_ISTACKPTR(r25) ; Mark the stack in use - oris r10,r10,HIGH_ADDR(SAVrststk) ; Indicate we reset stack when we return from this one + oris r10,r10,hi16(SAVrststk) ; Indicate we reset stack when we return from this one stw r10,SAVflags(r4) ; Stick it back /* @@ -949,13 +906,13 @@ ihbootnover: ; (TEST/DEBUG) * which links back to the trapped routine. The second is * that which the C routine below will need */ - lwz r5,savesrr0(r4) ; Get interrupt address + lwz r5,savesrr0+4(r4) ; Get interrupt address stw r5,FM_LR_SAVE(r1) ; save old instr ptr as LR value stwu r1,-FM_SIZE(r1) ; Make another new frame for C routine #endif /* DEBUG */ lwz r5,savedsisr(r4) ; Get the DSISR - lwz r6,savedar(r4) ; Get the DAR + lwz r6,savedar+4(r4) ; Get the DAR bl EXT(interrupt) @@ -968,26 +925,22 @@ ihbootnover: ; (TEST/DEBUG) LEXT(ihandler_ret) ; Marks our return point from debugger entry + lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable mfmsr r0 ; Get our MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Flip off the interrupt enabled bit + ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE + andc r0,r0,r10 ; Turn off VEC, FP, and EE mtmsr r0 ; Make sure interrupts are disabled mfsprg r10,0 ; Get the per_proc block lwz r7,SAVflags(r3) ; Pick up the flags - lwz r8,PP_ACTIVE_THREAD(r10) ; and the active thread - lwz r9,SAVprev(r3) ; Get previous save area + mfsprg r8,1 ; Get the current thread + lwz r9,SAVprev+4(r3) ; Get previous save area cmplwi cr1,r8,0 ; Are we still initializing? - lwz r12,savesrr1(r3) ; Get the MSR we will load on return - beq- cr1,ihboot2 ; Skip if we are still in init... + lwz r12,savesrr1+4(r3) ; Get the MSR we will load on return lwz r8,THREAD_TOP_ACT(r8) ; Pick up the active thread - -ihboot2: andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack? - beq- cr1,ihboot3 ; Skip if we are still in init... + andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack? stw r9,ACT_MACT_PCB(r8) ; Point to previous context savearea - -ihboot3: mr r4,r3 ; Move the savearea pointer + mr r4,r3 ; Move the savearea pointer beq .L_no_int_ast2 ; Get going if not the top-o-stack... @@ -1000,7 +953,7 @@ ihboot3: mr r4,r3 ; Move the savearea pointer lwz r9,PP_INTSTACK_TOP_SS(r10) ; Get the empty stack value andc r7,r7,r11 ; Remove the stack reset bit in case we pass this one stw r9,PP_ISTACKPTR(r10) ; Save that saved state ptr - lwz r3,PP_PREEMPT_CNT(r10) ; Get preemption level + lwz r3,ACT_PREEMPT_CNT(r8) ; Get preemption level stw r7,SAVflags(r4) ; Save the flags cmplwi r3, 0 ; Check for preemption bne .L_no_int_ast ; Do not preempt if level is not zero @@ -1037,7 +990,7 @@ ihboot3: mr r4,r3 ; Move the savearea pointer rlwinm r7,r7,0,15,13 ; Clear the syscall flag li r4,0 ; Assume for a moment that we are in init stw r7,SAVflags(r3) ; Set the flags with cleared syscall flag - beq- cr1,chkfac ; Jump away if we are in init... + beq-- cr1,chkfac ; Jump away if we are in init... lwz r4,ACT_MACT_PCB(r8) ; Get the new level marker @@ -1063,31 +1016,27 @@ ihboot3: mr r4,r3 ; Move the savearea pointer ; are going to user state. CR2_eq will be set to indicate deferred. ; -chkfac: mr r31,r10 ; Move per_proc address - mr r30,r4 ; Preserve new level - lwz r29,savesrr1(r3) ; Get the current MSR +chkfac: lwz r29,savesrr1+4(r3) ; Get the current MSR mr. r28,r8 ; Are we still in boot? + mr r31,r10 ; Move per_proc address + mr r30,r4 ; Preserve new level mr r27,r3 ; Save the old level - beq- chkenax ; Yeah, skip it all... + beq-- chkenax ; Yeah, skip it all... rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going into user state? -#if 0 - beq+ lllll ; (TEST/DEBUG) - BREAKPOINT_TRAP ; (TEST/DEBUG) -lllll: -#endif - lwz r20,curctx(r28) ; Get our current context lwz r26,deferctx(r28) ; Get any deferred context switch + li r0,1 ; Get set to hold off quickfret rlwinm r29,r29,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point for now lwz r21,FPUlevel(r20) ; Get the facility level cmplwi cr2,r26,0 ; Are we going into a deferred context later? rlwinm r29,r29,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector for now crnor cr2_eq,cr0_eq,cr2_eq ; Set cr2_eq if going to user state and there is deferred - cmplw r27,r21 ; Are we returning from the active level? lhz r19,PP_CPU_NUMBER(r31) ; Get our CPU number - bne+ fpuchkena ; Nope... + cmplw r27,r21 ; Are we returning from the active level? + stw r0,holdQFret(r31) ; Make sure we hold off releasing quickfret + bne++ fpuchkena ; Nope... ; ; First clean up any live context we are returning from @@ -1099,20 +1048,27 @@ lllll: eieio ; Make sure this gets out before owner clear +#if ppSize != 4096 +#error per_proc_info is not 4k in size +#endif + lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc - mulli r22,r22,ppSize ; Find offset to the owner per_proc + slwi r22,r22,12 ; FInd offset to the owner per_proc ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc li r24,FPUowner ; Displacement to FPU owner add r22,r23,r22 ; Point to the owner per_proc - li r0,0 ; We need this in a bit fpuinvothr: lwarx r23,r24,r22 ; Get the owner - cmplw r23,r20 ; Does he still have this context? - bne fpuinvoths ; Nope... - stwcx. r0,r24,r22 ; Try to invalidate it - bne- fpuinvothr ; Try again if there was a collision... - -fpuinvoths: isync + + sub r0,r23,r20 ; Subtract one from the other + sub r21,r20,r23 ; Subtract the other from the one + or r21,r21,r0 ; Combine them + srawi r21,r21,31 ; Get a 0 if equal or -1 of not + and r23,r23,r21 ; Make 0 if same, unchanged if not + stwcx. r23,r24,r22 ; Try to invalidate it + bne-- fpuinvothr ; Try again if there was a collision... + + isync ; ; Now if there is a savearea associated with the popped context, release it. @@ -1122,24 +1078,31 @@ fpuinvoths: isync lwz r22,FPUsave(r20) ; Get pointer to the first savearea li r21,0 ; Assume we popped all the way out mr. r22,r22 ; Is there anything there? - beq+ fpusetlvl ; No, see if we need to enable... + beq++ fpusetlvl ; No, see if we need to enable... lwz r21,SAVlevel(r22) ; Get the level of that savearea cmplw r21,r27 ; Is this the saved copy of the live stuff? bne fpusetlvl ; No, leave as is... - lwz r24,SAVprev(r22) ; Pick up the previous area + lwz r24,SAVprev+4(r22) ; Pick up the previous area li r21,0 ; Assume we popped all the way out mr. r24,r24 ; Any more context stacked? - beq- fpuonlyone ; Nope... + beq-- fpuonlyone ; Nope... lwz r21,SAVlevel(r24) ; Get the level associated with save fpuonlyone: stw r24,FPUsave(r20) ; Dequeue this savearea rlwinm r3,r22,0,0,19 ; Find main savearea header - lwz r3,SACvrswap(r3) ; Get the virtual to real conversion - la r9,quickfret(r31) ; Point to the quickfret chain header + + lwz r8,quickfret(r31) ; Get the first in quickfret list (top) + lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom) + lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top) + lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom) + stw r8,SAVprev(r22) ; Link the old in (top) + stw r9,SAVprev+4(r22) ; Link the old in (bottom) xor r3,r22,r3 ; Convert to physical + stw r2,quickfret(r31) ; Set the first in quickfret list (top) + stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom) #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) @@ -1148,11 +1111,6 @@ fpuonlyone: stw r24,FPUsave(r20) ; Dequeue this savearea sc ; (TEST/DEBUG) #endif -fpufpucdq: lwarx r0,0,r9 ; Pick up the old chain head - stw r0,SAVprev(r22) ; Move it to the current guy - stwcx. r3,0,r9 ; Save it - bne- fpufpucdq ; Someone chaged the list... - fpusetlvl: stw r21,FPUlevel(r20) ; Save the level ; @@ -1162,33 +1120,65 @@ fpusetlvl: stw r21,FPUlevel(r20) ; Save the level ; going into user state. ; -fpuchkena: bt- cr2_eq,fpuhasdfrd ; Skip if deferred, R26 already set up... +fpuchkena: bt-- cr2_eq,fpuhasdfrd ; Skip if deferred, R26 already set up... mr r26,r20 ; Use the non-deferred value -fpuhasdfrd: lwz r21,FPUowner(r31) ; Get the ID of the live context +fpuhasdfrd: +#if 0 + rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; (TEST/DEBUG) Going into user state? + beq fpunusrstt ; (TEST/DEBUG) Nope... + lwz r23,FPUlevel(r26) ; (TEST/DEBUG) Get the level ID + lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea + mr. r23,r23 ; (TEST/DEBUG) Should be level 0 + beq++ fpulvl0 ; (TEST/DEBUG) Yes... + BREAKPOINT_TRAP ; (TEST/DEBUG) + +fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context? + beq fpunusrstt ; (TEST/DEBUG) No... + lwz r23,SAVlevel(r24) ; (TEST/DEBUG) Get level of context + lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer + mr. r23,r23 ; (TEST/DEBUG) Is this our user context? + beq++ fpulvl0b ; (TEST/DEBUG) Yes... + BREAKPOINT_TRAP ; (TEST/DEBUG) + +fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain? + beq++ fpunusrstt ; (TEST/DEBUG) Nope... + BREAKPOINT_TRAP ; (TEST/DEBUG) + +fpunusrstt: ; (TEST/DEBUG) +#endif + + lwz r21,FPUowner(r31) ; Get the ID of the live context lwz r23,FPUlevel(r26) ; Get the level ID - cmplw cr3,r26,r21 ; Do we have the live context? lwz r24,FPUcpu(r26) ; Get the CPU that the context was last dispatched on - bne- cr3,chkvec ; No, can not possibly enable... + cmplw cr3,r26,r21 ; Do we have the live context? cmplw r30,r23 ; Are we about to launch the live level? + bne-- cr3,chkvec ; No, can not possibly enable... cmplw cr1,r19,r24 ; Was facility used on this processor last? - bne- chkvec ; No, not live... - bne- cr1,chkvec ; No, wrong cpu, have to enable later.... + bne-- chkvec ; No, not live... + bne-- cr1,chkvec ; No, wrong cpu, have to enable later.... lwz r24,FPUsave(r26) ; Get the first savearea mr. r24,r24 ; Any savearea? - beq+ fpuena ; Nope... + beq++ fpuena ; Nope... lwz r25,SAVlevel(r24) ; Get the level of savearea - lwz r0,SAVprev(r24) ; Get the previous + lwz r0,SAVprev+4(r24) ; Get the previous cmplw r30,r25 ; Is savearea for the level we are launching? - bne+ fpuena ; No, just go enable... + bne++ fpuena ; No, just go enable... stw r0,FPUsave(r26) ; Pop the chain rlwinm r3,r24,0,0,19 ; Find main savearea header - lwz r3,SACvrswap(r3) ; Get the virtual to real conversion - la r9,quickfret(r31) ; Point to the quickfret chain header + + lwz r8,quickfret(r31) ; Get the first in quickfret list (top) + lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom) + lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top) + lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom) + stw r8,SAVprev(r24) ; Link the old in (top) + stw r9,SAVprev+4(r24) ; Link the old in (bottom) xor r3,r24,r3 ; Convert to physical + stw r2,quickfret(r31) ; Set the first in quickfret list (top) + stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom) #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) @@ -1196,34 +1186,11 @@ fpuhasdfrd: lwz r21,FPUowner(r31) ; Get the ID of the live context oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif - -fpuhascdq: lwarx r0,0,r9 ; Pick up the old chain head - stw r0,SAVprev(r24) ; Move it to the current guy - stwcx. r3,0,r9 ; Save it - bne- fpuhascdq ; Someone chaged the list... fpuena: ori r29,r29,lo16(MASK(MSR_FP)) ; Enable facility chkvec: -#if 0 - rlwinm. r21,r29,0,MSR_PR_BIT,MSR_PR_BIT ; (TEST/DEBUG) - beq+ ppppp ; (TEST/DEBUG) - lwz r21,FPUlevel(r26) ; (TEST/DEBUG) - mr. r21,r21 ; (TEST/DEBUG) - bne- qqqqq ; (TEST/DEBUG) - lwz r21,FPUsave(r26) ; (TEST/DEBUG) - mr. r21,r21 ; (TEST/DEBUG) - beq+ ppppp ; (TEST/DEBUG) - lwz r22,SAVlevel(r21) ; (TEST/DEBUG) - mr. r22,r22 ; (TEST/DEBUG) - beq+ ppppp ; (TEST/DEBUG) -qqqqq: - BREAKPOINT_TRAP ; (TEST/DEBUG) - -ppppp: ; (TEST/DEBUG) -#endif - lwz r21,VMXlevel(r20) ; Get the facility level cmplw r27,r21 ; Are we returning from the active level? @@ -1241,19 +1208,22 @@ ppppp: ; (TEST/DEBUG) eieio ; Make sure this gets out before owner clear lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc - mulli r22,r22,ppSize ; Find offset to the owner per_proc + slwi r22,r22,12 ; Find offset to the owner per_proc ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc li r24,VMXowner ; Displacement to VMX owner add r22,r23,r22 ; Point to the owner per_proc - li r0,0 ; We need this in a bit vmxinvothr: lwarx r23,r24,r22 ; Get the owner - cmplw r23,r20 ; Does he still have this context? - bne vmxinvoths ; Nope... - stwcx. r0,r24,r22 ; Try to invalidate it - bne- vmxinvothr ; Try again if there was a collision... - -vmxinvoths: isync + + sub r0,r23,r20 ; Subtract one from the other + sub r21,r20,r23 ; Subtract the other from the one + or r21,r21,r0 ; Combine them + srawi r21,r21,31 ; Get a 0 if equal or -1 of not + and r23,r23,r21 ; Make 0 if same, unchanged if not + stwcx. r23,r24,r22 ; Try to invalidate it + bne-- vmxinvothr ; Try again if there was a collision... + + isync ; ; Now if there is a savearea associated with the popped context, release it. @@ -1263,24 +1233,31 @@ vmxinvoths: isync lwz r22,VMXsave(r20) ; Get pointer to the first savearea li r21,0 ; Assume we popped all the way out mr. r22,r22 ; Is there anything there? - beq+ vmxsetlvl ; No, see if we need to enable... + beq++ vmxsetlvl ; No, see if we need to enable... lwz r21,SAVlevel(r22) ; Get the level of that savearea cmplw r21,r27 ; Is this the saved copy of the live stuff? bne vmxsetlvl ; No, leave as is... - lwz r24,SAVprev(r22) ; Pick up the previous area + lwz r24,SAVprev+4(r22) ; Pick up the previous area li r21,0 ; Assume we popped all the way out mr. r24,r24 ; Any more context? - beq- vmxonlyone ; Nope... + beq-- vmxonlyone ; Nope... lwz r21,SAVlevel(r24) ; Get the level associated with save vmxonlyone: stw r24,VMXsave(r20) ; Dequeue this savearea rlwinm r3,r22,0,0,19 ; Find main savearea header - lwz r3,SACvrswap(r3) ; Get the virtual to real conversion - la r9,quickfret(r31) ; Point to the quickfret chain header - xor r3,r22,r3 ; Convert to physical + + lwz r8,quickfret(r31) ; Get the first in quickfret list (top) + lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom) + lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top) + lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom) + stw r8,SAVprev(r22) ; Link the old in (top) + stw r9,SAVprev+4(r22) ; Link the old in (bottom) + xor r3,r24,r3 ; Convert to physical + stw r2,quickfret(r31) ; Set the first in quickfret list (top) + stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom) #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) @@ -1288,11 +1265,6 @@ vmxonlyone: stw r24,VMXsave(r20) ; Dequeue this savearea oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif - -vmxhscdq: lwarx r0,0,r9 ; Pick up the old chain head - stw r0,SAVprev(r22) ; Move it to the current guy - stwcx. r3,0,r9 ; Save it - bne- vmxhscdq ; Someone chaged the list... vmxsetlvl: stw r21,VMXlevel(r20) ; Save the level @@ -1304,26 +1276,33 @@ vmxchkena: lwz r21,VMXowner(r31) ; Get the ID of the live context lwz r23,VMXlevel(r26) ; Get the level ID cmplw r26,r21 ; Do we have the live context? lwz r24,VMXcpu(r26) ; Get the CPU that the context was last dispatched on - bne- setena ; No, can not possibly enable... + bne-- setena ; No, can not possibly enable... cmplw r30,r23 ; Are we about to launch the live level? cmplw cr1,r19,r24 ; Was facility used on this processor last? - bne- setena ; No, not live... - bne- cr1,setena ; No, wrong cpu, have to enable later.... + bne-- setena ; No, not live... + bne-- cr1,setena ; No, wrong cpu, have to enable later.... lwz r24,VMXsave(r26) ; Get the first savearea mr. r24,r24 ; Any savearea? - beq+ vmxena ; Nope... + beq++ vmxena ; Nope... lwz r25,SAVlevel(r24) ; Get the level of savearea - lwz r0,SAVprev(r24) ; Get the previous + lwz r0,SAVprev+4(r24) ; Get the previous cmplw r30,r25 ; Is savearea for the level we are launching? - bne+ vmxena ; No, just go enable... + bne++ vmxena ; No, just go enable... stw r0,VMXsave(r26) ; Pop the chain rlwinm r3,r24,0,0,19 ; Find main savearea header - lwz r3,SACvrswap(r3) ; Get the virtual to real conversion - la r9,quickfret(r31) ; Point to the quickfret chain header + + lwz r8,quickfret(r31) ; Get the first in quickfret list (top) + lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom) + lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top) + lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom) + stw r8,SAVprev(r24) ; Link the old in (top) + stw r9,SAVprev+4(r24) ; Link the old in (bottom) xor r3,r24,r3 ; Convert to physical + stw r2,quickfret(r31) ; Set the first in quickfret list (top) + stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom) #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) @@ -1332,20 +1311,19 @@ vmxchkena: lwz r21,VMXowner(r31) ; Get the ID of the live context sc ; (TEST/DEBUG) #endif -vmxckcdq: lwarx r0,0,r9 ; Pick up the old chain head - stw r0,SAVprev(r24) ; Move it to the current guy - stwcx. r3,0,r9 ; Save it - bne- vmxckcdq ; Someone chaged the list... - vmxena: oris r29,r29,hi16(MASK(MSR_VEC)) ; Enable facility - -setena: rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we about to launch user state? - rlwinm r20,r29,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector - stw r29,savesrr1(r27) ; Turn facility on or off +setena: lwz r18,cioSpace(r28) ; Get the space ID in case we are launching user + rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we about to launch user state? + li r0,0 ; Get set to release quickfret holdoff crmove cr7_eq,cr0_eq ; Remember if we are going to user state - lwz r19,deferctx(r28) ; Get any deferred facility context switch rlwimi. r20,r29,(((31-floatCngbit)+(MSR_FP_BIT+1))&31),floatCngbit,floatCngbit ; Set flag if we enabled floats + lwz r19,deferctx(r28) ; Get any deferred facility context switch + rlwinm r20,r29,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector + stw r29,savesrr1+4(r27) ; Turn facility on or off + stw r0,holdQFret(r31) ; Release quickfret + oris r18,r18,hi16(cioSwitchAway) ; Set the switch-away bit in case we go to user + beq setenaa ; Neither float nor vector turned on.... lwz r5,ACT_MACT_SPF(r28) ; Get activation copy @@ -1365,15 +1343,15 @@ setenaa: mfdec r24 ; Get decrementer nodefer: lwz r22,qactTimer(r28) ; Get high order quick activation timer mr. r24,r24 ; See if it has popped already... lwz r23,qactTimer+4(r28) ; Get low order qact timer - ble- chkenax ; We have popped or are just about to... + ble- chkifuser ; We have popped or are just about to... segtb: mftbu r20 ; Get the upper time base mftb r21 ; Get the low mftbu r19 ; Get upper again or. r0,r22,r23 ; Any time set? cmplw cr1,r20,r19 ; Did they change? - beq+ chkenax ; No time set.... - bne- cr1,segtb ; Timebase ticked, get them again... + beq++ chkifuser ; No time set.... + bne-- cr1,segtb ; Timebase ticked, get them again... subfc r6,r21,r23 ; Subtract current from qact time li r0,0 ; Make a 0 @@ -1381,23 +1359,25 @@ segtb: mftbu r20 ; Get the upper time base subfze r0,r0 ; Get a 0 if qact was bigger than current, -1 otherwise andc. r12,r5,r0 ; Set 0 if qact has passed andc r13,r6,r0 ; Set 0 if qact has passed - bne chkenax ; If high order is non-zero, this is too big for a decrementer + bne chkifuser ; If high order is non-zero, this is too big for a decrementer cmplw r13,r24 ; Is this earlier than the decrementer? (logical compare takes care of high bit on) - bge+ chkenax ; No, do not reset decrementer... + bge++ chkifuser ; No, do not reset decrementer... mtdec r13 ; Set our value +chkifuser: beq-- cr7,chkenax ; Skip this if we are going to kernel... + stw r18,cioSpace(r28) ; Half-invalidate to force MapUserAddressSpace to reload SRs + chkenax: #if DEBUG lwz r20,SAVact(r27) ; (TEST/DEBUG) Make sure our restore - lwz r21,PP_ACTIVE_THREAD(r31) ; (TEST/DEBUG) with the current act. + mfsprg r21, 1 ; (TEST/DEBUG) with the current act. cmpwi r21,0 ; (TEST/DEBUG) - beq- yeswereok ; (TEST/DEBUG) - lwz r21,THREAD_TOP_ACT(r21) ; (TEST/DEBUG) + beq-- yeswereok ; (TEST/DEBUG) cmplw r21,r20 ; (TEST/DEBUG) - beq+ yeswereok ; (TEST/DEBUG) + beq++ yeswereok ; (TEST/DEBUG) lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest @@ -1408,13 +1388,40 @@ chkenax: yeswereok: #endif - rlwinm r5,r27,0,0,19 ; Round savearea down to page bndry - lwz r5,SACvrswap(r5) ; Get the conversion from virtual to real - xor r3,r27,r5 ; Flip to physical address + mr r3,r27 ; Pass savearea back b EXT(exception_exit) ; We are all done now... +; +; Null PPC call - performance testing, does absolutely nothing +; + + .align 5 + + .globl EXT(ppcNull) + +LEXT(ppcNull) + + li r3,-1 ; Make sure we test no asts + blr + + +; +; Instrumented null PPC call - performance testing, does absolutely nothing +; Forces various timestamps to be returned. +; + + .align 5 + + .globl EXT(ppcNullinst) + +LEXT(ppcNullinst) + + li r3,-1 ; Make sure we test no asts + blr + + /* * Here's where we handle the fastpath stuff * We'll do what we can here because registers are already @@ -1433,8 +1440,10 @@ yeswereok: .align 5 -fastpath: cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber? - bnelr- cr3 ; Not a fast path... +fastpath: cmplwi cr3,r0,0x7FF5 ; Is this a null fastpath? + beq-- cr3,fastexutl ; Yes, bail fast... + cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber? + bnelr-- cr3 ; Not a fast path... /* * void cthread_set_self(cproc_t p) @@ -1450,17 +1459,15 @@ fastpath: cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber? CthreadSetSelfNumber: - lwz r5,saver3(r4) /* Retrieve the self number */ + lwz r5,saver3+4(r4) /* Retrieve the self number */ stw r5,CTHREAD_SELF(r13) /* Remember it */ stw r5,UAW(r25) /* Prime the per_proc_info with it */ .globl EXT(fastexit) EXT(fastexit): - rlwinm r9,r4,0,0,19 /* Round down to the base savearea block */ - lwz r9,SACvrswap(r9) /* Get the conversion from virtual to real */ - xor r3,r4,r9 /* Switch savearea to physical addressing */ - b EXT(exception_exit) /* Go back to the caller... */ +fastexutl: mr r3,r4 ; Pass back savearea + b EXT(exception_exit) ; Go back to the caller... /* @@ -1474,12 +1481,12 @@ EXT(fastexit): checkassist: lwz r0,saveexception(r4) ; Get the exception code - lwz r23,savesrr1(r4) ; Get the interrupted MSR + lwz r23,savesrr1+4(r4) ; Get the interrupted MSR lwz r26,ACT_MACT_BEDA(r13) ; Get Blue Box Descriptor Area mtcrf 0x18,r23 ; Check what SRR1 says lwz r24,ACT_MACT_BTS(r13) ; Get the table start cmplwi r0,T_AST ; Check for T_AST trap - lwz r27,savesrr0(r4) ; Get trapped address + lwz r27,savesrr0+4(r4) ; Get trapped address crnand cr1_eq,SRR1_PRG_TRAP_BIT,MSR_PR_BIT ; We need both trap and user state sub r24,r27,r24 ; See how far into it we are cror cr0_eq,cr0_eq,cr1_eq ; Need to bail if AST or not trap or not user state @@ -1509,7 +1516,7 @@ exitFromVM: mr r30,r4 ; Get the savearea LEXT(retFromVM) mfsprg r10,0 ; Restore the per_proc info mr r8,r3 ; Get the activation - lwz r4,SAVprev(r30) ; Pick up the previous savearea + lwz r4,SAVprev+4(r30) ; Pick up the previous savearea mr r3,r30 ; Put savearea in proper register for common code lwz r11,SAVflags(r30) ; Get the flags of the current savearea rlwinm r11,r11,0,15,13 ; Clear the syscall flag @@ -1543,15 +1550,13 @@ LEXT(retFromVM) .align 5 .globl EXT(chandler) -LEXT(chandler) /* Choke handler */ +LEXT(chandler) ; Choke handler - lis r25,hi16(EXT(trcWork)) ; (TEST/DEBUG) - li r31,0 ; (TEST/DEBUG) - ori r25,r25,lo16(EXT(trcWork)) ; (TEST/DEBUG) - stw r31,traceMask(r25) ; (TEST/DEBUG) + li r31,0 ; Get a 0 + mfsprg r25,0 ; Get the per_proc + stw r31,traceMask(0) ; Force tracing off right now - mfsprg r25,0 ; Get the per_proc lwz r1,PP_DEBSTACKPTR(r25) ; Get debug stack pointer cmpwi r1,-1 ; Are we already choking? @@ -1567,7 +1572,7 @@ chokespin: addi r31,r31,1 ; Spin and hope for an analyzer connection... chokefirst: li r0,-1 ; Set choke value mr. r1,r1 ; See if we are on debug stack yet - lwz r10,saver1(r4) ; + lwz r10,saver1+4(r4) ; stw r0,PP_DEBSTACKPTR(r25) ; Show we are choking bne chokestart ; We are not on the debug stack yet... @@ -1592,7 +1597,23 @@ chokestart: li r0,0 ; Get a zero ; versave: -#if 1 +#if 0 + lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + lwz r23,0(r22) ; (TEST/DEBUG) + mr. r23,r23 ; (TEST/DEBUG) + beqlr- ; (TEST/DEBUG) + mfsprg r20,0 ; (TEST/DEBUG) + lwz r21,pfAvailable(r20) ; (TEST/DEBUG) + mr. r21,r21 ; (TEST/DEBUG) + bnelr+ ; (TEST/DEBUG) + + stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks + BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger +#endif + +#if 0 + ;; This code is broken and migration will make the matter even worse ; ; Make sure that all savearea chains have the right type on them ; @@ -1671,190 +1692,9 @@ versavetype: stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger -versvok: lwz r20,SAVprev(r20) ; (TEST/DEBUG) Get the previous one +versvok: lwz r20,SAVprev+4(r20) ; (TEST/DEBUG) Get the previous one b versavetype ; (TEST/DEBUG) Go check its type... #endif -#if 0 -; -; Make sure there are no circular links in the float chain -; And that FP is marked busy in it. -; And the only the top is marked invalid. -; And that the owning PCB is correct. -; - - lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) - lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) - ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) - ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) - li r20,0 ; (TEST/DEBUG) - lwz r26,0(r27) ; (TEST/DEBUG) - lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) - mr. r26,r26 ; (TEST/DEBUG) - lwz r28,psthreads(r28) ; (TEST/DEBUG) - bnelr- ; (TEST/DEBUG) - -fcknxtth: mr. r27,r27 ; (TEST/DEBUG) - beqlr- ; (TEST/DEBUG) - - lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) - -fckact: mr. r26,r26 ; (TEST/DEBUG) - bne+ fckact2 ; (TEST/DEBUG) - - lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line - subi r27,r27,1 ; (TEST/DEBUG) - b fcknxtth ; (TEST/DEBUG) - -fckact2: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain - li r29,1 ; (TEST/DEBUG) - li r22,0 ; (TEST/DEBUG) - -fckact3: mr. r20,r20 ; (TEST/DEBUG) Are there any? - beq+ fckact5 ; (TEST/DEBUG) No... - - addi r22,r22,1 ; (TEST/DEBUG) Count chain depth - - lwz r21,SAVflags(r20) ; (TEST/DEBUG) Get the flags - rlwinm. r21,r21,0,1,1 ; (TEST/DEBUG) FP busy? - bne+ fckact3a ; (TEST/DEBUG) Yeah... - lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) - ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) - stw r27,0(r27) ; (TEST/DEBUG) - BREAKPOINT_TRAP ; (TEST/DEBUG) Die - -fckact3a: cmplwi r22,1 ; (TEST/DEBUG) At first SA? - beq+ fckact3b ; (TEST/DEBUG) Yeah, invalid is ok... - lwz r21,SAVlvlfp(r20) ; (TEST/DEBUG) Get level - cmplwi r21,1 ; (TEST/DEBUG) Is it invalid? - bne+ fckact3b ; (TEST/DEBUG) Nope, it is ok... - lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) - ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) - stw r27,0(r27) ; (TEST/DEBUG) - BREAKPOINT_TRAP ; (TEST/DEBUG) Die - -fckact3b: lwz r21,SAVact(r20) ; (TEST/DEBUG) Get the owner - cmplw r21,r26 ; (TEST/DEBUG) Correct activation? - beq+ fckact3c ; (TEST/DEBUG) Yup... - lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) - ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) - stw r27,0(r27) ; (TEST/DEBUG) - BREAKPOINT_TRAP ; (TEST/DEBUG) Die - -fckact3c: ; (TEST/DEBUG) - lbz r21,SAVflags+3(r20) ; (TEST/DEBUG) Pick up the test byte - mr. r21,r21 ; (TEST/DEBUG) marked? - beq+ fckact4 ; (TEST/DEBUG) No, good... - - lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) - ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) - stw r27,0(r27) ; (TEST/DEBUG) - BREAKPOINT_TRAP ; (TEST/DEBUG) - -fckact4: stb r29,SAVflags+3(r20) ; (TEST/DEBUG) Set the test byte - lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Next in list - b fckact3 ; (TEST/DEBUG) Try it... - -fckact5: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain - li r29,0 ; (TEST/DEBUG) - -fckact6: mr. r20,r20 ; (TEST/DEBUG) Are there any? - beq+ fcknact ; (TEST/DEBUG) No... - - stb r29,SAVflags+3(r20) ; (TEST/DEBUG) Clear the test byte - lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Next in list - b fckact6 ; (TEST/DEBUG) Try it... - -fcknact: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation - b fckact ; (TEST/DEBUG) -#endif - -#if 0 -; -; Make sure in use count matches found savearea. This is -; not always accurate. There is a variable "fuzz" factor in count. - - lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) - lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) - ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) - ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) - li r20,0 ; (TEST/DEBUG) - lwz r26,0(r27) ; (TEST/DEBUG) - lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) - mr. r26,r26 ; (TEST/DEBUG) - lwz r28,psthreads(r28) ; (TEST/DEBUG) - bnelr- ; (TEST/DEBUG) - -cknxtth: mr. r27,r27 ; (TEST/DEBUG) - beq- cktotal ; (TEST/DEBUG) - - lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) - -ckact: mr. r26,r26 ; (TEST/DEBUG) - bne+ ckact2 ; (TEST/DEBUG) - - lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line - subi r27,r27,1 ; (TEST/DEBUG) - b cknxtth ; (TEST/DEBUG) - -ckact2: lwz r29,ACT_MACT_PCB(r26) ; (TEST/DEBUG) - -cknorm: mr. r29,r29 ; (TEST/DEBUG) - beq- cknormd ; (TEST/DEBUG) - - addi r20,r20,1 ; (TEST/DEBUG) Count normal savearea - - lwz r29,SAVprev(r29) ; (TEST/DEBUG) - b cknorm ; (TEST/DEBUG) - -cknormd: lwz r29,ACT_MACT_FPU(r26) ; (TEST/DEBUG) - -ckfpu: mr. r29,r29 ; (TEST/DEBUG) - beq- ckfpud ; (TEST/DEBUG) - - lwz r21,SAVflags(r29) ; (TEST/DEBUG) - rlwinm. r21,r21,0,0,0 ; (TEST/DEBUG) See if already counted - bne- cknfpu ; (TEST/DEBUG) - - addi r20,r20,1 ; (TEST/DEBUG) Count fpu savearea - -cknfpu: lwz r29,SAVprefp(r29) ; (TEST/DEBUG) - b ckfpu ; (TEST/DEBUG) - -ckfpud: lwz r29,ACT_MACT_VMX(r26) ; (TEST/DEBUG) - -ckvmx: mr. r29,r29 ; (TEST/DEBUG) - beq- ckvmxd ; (TEST/DEBUG) - - lwz r21,SAVflags(r29) ; (TEST/DEBUG) - rlwinm. r21,r21,0,0,1 ; (TEST/DEBUG) See if already counted - bne- cknvmx ; (TEST/DEBUG) - - addi r20,r20,1 ; (TEST/DEBUG) Count vector savearea - -cknvmx: lwz r29,SAVprevec(r29) ; (TEST/DEBUG) - b ckvmx ; (TEST/DEBUG) - -ckvmxd: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation - b ckact ; (TEST/DEBUG) - -cktotal: lis r28,hi16(EXT(saveanchor)) ; (TEST/DEBUG) - lis r27,hi16(EXT(real_ncpus)) ; (TEST/DEBUG) - ori r28,r28,lo16(EXT(saveanchor)) ; (TEST/DEBUG) - ori r27,r27,lo16(EXT(real_ncpus)) ; (TEST/DEBUG) - - lwz r21,SVinuse(r28) ; (TEST/DEBUG) - lwz r27,0(r27) ; (TEST/DEBUG) Get the number of CPUs - sub. r29,r21,r20 ; (TEST/DEBUG) Get number accounted for - blt- badsave ; (TEST/DEBUG) Have too many in use... - sub r26,r29,r27 ; (TEST/DEBUG) Should be 1 unaccounted for for each processor - cmpwi r26,10 ; (TEST/DEBUG) Allow a 10 area slop factor - bltlr+ ; (TEST/DEBUG) - -badsave: lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) - ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) - stw r27,0(r27) ; (TEST/DEBUG) - BREAKPOINT_TRAP ; (TEST/DEBUG) -#endif #endif diff --git a/osfmk/ppc/hw_lock.s b/osfmk/ppc/hw_lock.s index ca6d2268f..1b2352d36 100644 --- a/osfmk/ppc/hw_lock.s +++ b/osfmk/ppc/hw_lock.s @@ -27,9 +27,6 @@ #include #include #include - -#include - #include #include #include @@ -42,45 +39,34 @@ #define ILK_LOCKED 0x01 #define WAIT_FLAG 0x02 -#define SLOCK_FAST 0x02 #define TH_FN_OWNED 0x01 -; -; NOTE: make sure that PREEMPTSTACK in aligned_data is -; set the same as it is here. This is the number of -; traceback entries we can handle per processor -; -; A value of 0 disables the stack. -; -#define PREEMPTSTACK 0 #define CHECKNMI 0 #define CHECKLOCKS 1 -#include - -#define PROLOG(space) \ - stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \ - mflr r0 __ASMNL__ \ - stw r3,FM_ARG0(r1) __ASMNL__ \ +#define PROLOG(space) \ + stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \ + mflr r0 __ASMNL__ \ + stw r3,FM_ARG0(r1) __ASMNL__ \ stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__ -#define EPILOG \ - lwz r1,0(r1) __ASMNL__ \ - lwz r0,FM_LR_SAVE(r1) __ASMNL__ \ - mtlr r0 __ASMNL__ +#define EPILOG \ + lwz r1,0(r1) __ASMNL__ \ + lwz r0,FM_LR_SAVE(r1) __ASMNL__ \ + mtlr r0 __ASMNL__ #if MACH_LDEBUG && CHECKLOCKS /* * Routines for general lock debugging. */ -/* Gets lock check flags in CR6: CR bits 24-27 */ +/* + * Gets lock check flags in CR6: CR bits 24-27 + */ -#define CHECK_SETUP(rg) \ - lis rg,hi16(EXT(dgWork)) __ASMNL__ \ - ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \ - lbz rg,dgFlags(rg) __ASMNL__ \ - mtcrf 2,rg __ASMNL__ +#define CHECK_SETUP(rg) \ + lbz rg,dgFlags(0) __ASMNL__ \ + mtcrf 2,rg __ASMNL__ /* @@ -88,15 +74,15 @@ * mismatch. Detects calls to Mutex functions with * type simplelock and vice versa. */ -#define CHECK_MUTEX_TYPE() \ - bt 24+disLktypeb,1f __ASMNL__ \ - lwz r10,MUTEX_TYPE(r3) __ASMNL__ \ - cmpwi r10,MUTEX_TAG __ASMNL__ \ - beq+ 1f __ASMNL__ \ - lis r3,hi16(not_a_mutex) __ASMNL__ \ - ori r3,r3,lo16(not_a_mutex) __ASMNL__ \ - bl EXT(panic) __ASMNL__ \ - lwz r3,FM_ARG0(r1) __ASMNL__ \ +#define CHECK_MUTEX_TYPE() \ + bt 24+disLktypeb,1f __ASMNL__ \ + lwz r10,MUTEX_TYPE(r3) __ASMNL__ \ + cmpwi r10,MUTEX_TAG __ASMNL__ \ + beq+ 1f __ASMNL__ \ + lis r3,hi16(not_a_mutex) __ASMNL__ \ + ori r3,r3,lo16(not_a_mutex) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ 1: .data @@ -104,15 +90,15 @@ not_a_mutex: STRINGD "not a mutex!\n\000" .text -#define CHECK_SIMPLE_LOCK_TYPE() \ - bt 24+disLktypeb,1f __ASMNL__ \ - lwz r10,SLOCK_TYPE(r3) __ASMNL__ \ - cmpwi r10,USLOCK_TAG __ASMNL__ \ - beq+ 1f __ASMNL__ \ - lis r3,hi16(not_a_slock) __ASMNL__ \ - ori r3,r3,lo16(not_a_slock) __ASMNL__ \ - bl EXT(panic) __ASMNL__ \ - lwz r3,FM_ARG0(r1) __ASMNL__ \ +#define CHECK_SIMPLE_LOCK_TYPE() \ + bt 24+disLktypeb,1f __ASMNL__ \ + lhz r10,SLOCK_TYPE(r3) __ASMNL__ \ + cmpwi r10,USLOCK_TAG __ASMNL__ \ + beq+ 1f __ASMNL__ \ + lis r3,hi16(not_a_slock) __ASMNL__ \ + ori r3,r3,lo16(not_a_slock) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ 1: .data @@ -120,24 +106,26 @@ not_a_slock: STRINGD "not a simple lock!\n\000" .text -#define CHECK_NO_SIMPLELOCKS() \ - bt 24+disLkNmSimpb,2f __ASMNL__ \ - mfmsr r11 __ASMNL__ \ - rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \ - rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \ - rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ - mtmsr r10 __ASMNL__ \ - isync __ASMNL__ \ - mfsprg r10,0 __ASMNL__ \ - lwz r10,PP_SIMPLE_LOCK_CNT(r10) __ASMNL__ \ - cmpwi r10,0 __ASMNL__ \ - beq+ 1f __ASMNL__ \ - lis r3,hi16(simple_locks_held) __ASMNL__ \ - ori r3,r3,lo16(simple_locks_held) __ASMNL__ \ - bl EXT(panic) __ASMNL__ \ - lwz r3,FM_ARG0(r1) __ASMNL__ \ -1: __ASMNL__ \ - mtmsr r11 __ASMNL__ \ +#define CHECK_NO_SIMPLELOCKS() \ + bt 24+disLkNmSimpb,2f __ASMNL__ \ + lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \ + ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \ + mfmsr r11 __ASMNL__ \ + andc r11,r11,r10 __ASMNL__ \ + ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \ + andc r10,r11,r10 __ASMNL__ \ + mtmsr r10 __ASMNL__ \ + isync __ASMNL__ \ + mfsprg r10,0 __ASMNL__ \ + lwz r10,PP_SIMPLE_LOCK_CNT(r10) __ASMNL__ \ + cmpwi r10,0 __ASMNL__ \ + beq+ 1f __ASMNL__ \ + lis r3,hi16(simple_locks_held) __ASMNL__ \ + ori r3,r3,lo16(simple_locks_held) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ +1: __ASMNL__ \ + mtmsr r11 __ASMNL__ \ 2: .data @@ -148,55 +136,58 @@ simple_locks_held: /* * Verifies return to the correct thread in "unlock" situations. */ - -#define CHECK_THREAD(thread_offset) \ - bt 24+disLkThreadb,2f __ASMNL__ \ - mfmsr r11 __ASMNL__ \ - rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \ - rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \ - rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ - mtmsr r10 __ASMNL__ \ - isync __ASMNL__ \ - mfsprg r10,0 __ASMNL__ \ - lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \ - cmpwi r10,0 __ASMNL__ \ - beq- 1f __ASMNL__ \ - lwz r9,thread_offset(r3) __ASMNL__ \ - cmpw r9,r10 __ASMNL__ \ - beq+ 1f __ASMNL__ \ - lis r3,hi16(wrong_thread) __ASMNL__ \ - ori r3,r3,lo16(wrong_thread) __ASMNL__ \ - bl EXT(panic) __ASMNL__ \ - lwz r3,FM_ARG0(r1) __ASMNL__ \ -1: __ASMNL__ \ - mtmsr r11 __ASMNL__ \ +#define CHECK_THREAD(thread_offset) \ + bt 24+disLkThreadb,2f __ASMNL__ \ + lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \ + ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \ + mfmsr r11 __ASMNL__ \ + andc r11,r11,r10 __ASMNL__ \ + ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \ + andc r10,r11,r10 __ASMNL__ \ + mtmsr r10 __ASMNL__ \ + isync __ASMNL__ \ + mfsprg r10,1 __ASMNL__ \ + lwz r10,ACT_THREAD(r10) __ASMNL__ \ + cmpwi r10,0 __ASMNL__ \ + beq- 1f __ASMNL__ \ + lwz r9,thread_offset(r3) __ASMNL__ \ + cmpw r9,r10 __ASMNL__ \ + beq+ 1f __ASMNL__ \ + lis r3,hi16(wrong_thread) __ASMNL__ \ + ori r3,r3,lo16(wrong_thread) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ +1: __ASMNL__ \ + mtmsr r11 __ASMNL__ \ 2: .data wrong_thread: STRINGD "wrong thread!\n\000" .text -#define CHECK_MYLOCK(thread_offset) \ - bt 24+disLkMyLckb,2f __ASMNL__ \ - mfmsr r11 __ASMNL__ \ - rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 __ASMNL__ \ - rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 __ASMNL__ \ - rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ - mtmsr r10 __ASMNL__ \ - isync __ASMNL__ \ - mfsprg r10,0 __ASMNL__ \ - lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \ - cmpwi r10,0 __ASMNL__ \ - beq- 1f __ASMNL__ \ - lwz r9, thread_offset(r3) __ASMNL__ \ - cmpw r9,r10 __ASMNL__ \ - bne+ 1f __ASMNL__ \ - lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \ - ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \ - bl EXT(panic) __ASMNL__ \ - lwz r3,FM_ARG0(r1) __ASMNL__ \ -1: __ASMNL__ \ - mtmsr r11 __ASMNL__ \ +#define CHECK_MYLOCK(thread_offset) \ + bt 24+disLkMyLckb,2f __ASMNL__ \ + lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \ + ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \ + mfmsr r11 __ASMNL__ \ + andc r11,r11,r10 __ASMNL__ \ + ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \ + andc r10,r11,r10 __ASMNL__ \ + mtmsr r10 __ASMNL__ \ + isync __ASMNL__ \ + mfsprg r10,1 __ASMNL__ \ + lwz r10,ACT_THREAD(r10) __ASMNL__ \ + cmpwi r10,0 __ASMNL__ \ + beq- 1f __ASMNL__ \ + lwz r9, thread_offset(r3) __ASMNL__ \ + cmpw r9,r10 __ASMNL__ \ + bne+ 1f __ASMNL__ \ + lis r3, hi16(mylock_attempt) __ASMNL__ \ + ori r3,r3,lo16(mylock_attempt) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ +1: __ASMNL__ \ + mtmsr r11 __ASMNL__ \ 2: .data @@ -216,16 +207,17 @@ mylock_attempt: #endif /* MACH_LDEBUG */ /* - * void hw_lock_init(hw_lock_t) + * void hw_lock_init(hw_lock_t) * - * Initialize a hardware lock. These locks should be cache aligned and a multiple - * of cache size. + * Initialize a hardware lock. */ + .align 5 + .globl EXT(hw_lock_init) -ENTRY(hw_lock_init, TAG_NO_FRAME_USED) +LEXT(hw_lock_init) - li r0, 0 /* set lock to free == 0 */ - stw r0, 0(r3) /* Initialize the lock */ + li r0, 0 ; set lock to free == 0 + stw r0, 0(r3) ; Initialize the lock blr /* @@ -234,429 +226,409 @@ ENTRY(hw_lock_init, TAG_NO_FRAME_USED) * Unconditionally release lock. * Release preemption level. */ - - .align 5 .globl EXT(hw_lock_unlock) LEXT(hw_lock_unlock) -#if 0 - lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ - lis r5,0xFFFF /* (TEST/DEBUG) */ - oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif - sync /* Flush writes done under lock */ - li r0, 0 /* set lock to free */ + .globl EXT(hwulckPatch_isync) +LEXT(hwulckPatch_isync) + isync + .globl EXT(hwulckPatch_eieio) +LEXT(hwulckPatch_eieio) + eieio + li r0, 0 ; set lock to free stw r0, 0(r3) - b epStart /* Go enable preemption... */ - - -/* - * Special case for internal use. Uses same lock code, but sets up so - * that there will be no disabling of preemption after locking. Generally - * used for mutex locks when obtaining the interlock although there is - * nothing stopping other uses. - */ + b epStart ; Go enable preemption... -lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */ - ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */ - cmplwi cr1,r1,0 /* Set flag to disable disable preemption */ - lwz r4,0(r4) /* Get the timerout value */ - b lockComm /* Join on up... */ - /* * void hw_lock_lock(hw_lock_t) * - * Acquire lock, spinning until it becomes available. - * Return with preemption disabled. - * Apparently not used except by mach_perf. - * We will just set a default timeout and jump into the NORMAL timeout lock. + * Acquire lock, spinning until it becomes available. + * Return with preemption disabled. + * We will just set a default timeout and jump into the NORMAL timeout lock. */ - .align 5 .globl EXT(hw_lock_lock) LEXT(hw_lock_lock) - -lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */ - ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */ - cmplw cr1,r1,r1 /* Set flag to enable disable preemption */ - lwz r4,0(r4) /* Get the timerout value */ - b lockComm /* Join on up... */ +lockDisa: + li r4,0 ; no timeout value + b lckcomm ; Join on up... /* - * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout) - * - * Try to acquire spin-lock. Return success (1) or failure (0). - * Attempt will fail after timeout ticks of the timebase. - * We try fairly hard to get this lock. We disable for interruptions, but - * reenable after a "short" timeout (128 ticks, we may want to change this). - * After checking to see if the large timeout value (passed in) has expired and a - * sufficient number of cycles have gone by (to insure pending 'rupts are taken), - * we return either in abject failure, or disable and go back to the lock sniff routine. - * If the sniffer finds the lock free, it jumps right up and tries to grab it. - * - * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL - * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT! + * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout) * + * Try to acquire spin-lock. Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. + * We try fairly hard to get this lock. We disable for interruptions, but + * reenable after a "short" timeout (128 ticks, we may want to change this). + * After checking to see if the large timeout value (passed in) has expired and a + * sufficient number of cycles have gone by (to insure pending 'rupts are taken), + * we return either in abject failure, or disable and go back to the lock sniff routine. + * If the sniffer finds the lock free, it jumps right up and tries to grab it. */ .align 5 .globl EXT(hw_lock_to) LEXT(hw_lock_to) -#if 0 - lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ - lis r5,0xEEEE /* (TEST/DEBUG) */ - oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif - #if CHECKNMI - mflr r12 ; (TEST/DEBUG) - bl EXT(ml_sense_nmi) ; (TEST/DEBUG) - mtlr r12 ; (TEST/DEBUG) + mflr r12 ; (TEST/DEBUG) + bl EXT(ml_sense_nmi) ; (TEST/DEBUG) + mtlr r12 ; (TEST/DEBUG) #endif - cmplw cr1,r1,r1 /* Set flag to enable disable preemption */ - -lockComm: mfmsr r9 /* Get the MSR value */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - mr r5,r3 /* Get the address of the lock */ - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */ - - mtmsr r7 /* Turn off interruptions */ - isync ; May have turned off vec and fp here - mftb r8 /* Get the low part of the time base */ +lckcomm: + mfsprg r6,1 ; Get the current activation + lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level + addi r5,r5,1 ; Bring up the disable count + stw r5,ACT_PREEMPT_CNT(r6) ; Save it back + mr r5,r3 ; Get the address of the lock + li r8,0 ; Set r8 to zero + +lcktry: lwarx r6,0,r5 ; Grab the lock value + andi. r3,r6,ILK_LOCKED ; Is it locked? + ori r6,r6,ILK_LOCKED ; Set interlock + bne-- lckspin ; Yeah, wait for it to clear... + stwcx. r6,0,r5 ; Try to seize that there durn lock + bne-- lcktry ; Couldn't get it... + li r3,1 ; return true + isync ; Make sure we don't use a speculativily loaded value + blr ; Go on home... + +lckspin: li r6,lgKillResv ; Get killing field + stwcx. r6,0,r6 ; Kill reservation -lcktry: lwarx r6,0,r5 /* Grab the lock value */ - andi. r3,r6,ILK_LOCKED /* Is it locked? */ - ori r6,r6,ILK_LOCKED /* Set interlock */ - bne- lcksniff /* Yeah, wait for it to clear... */ - stwcx. r6,0,r5 /* Try to seize that there durn lock */ - bne- lcktry /* Couldn't get it... */ - li r3,1 /* return true */ - isync /* Make sure we don't use a speculativily loaded value */ - beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */ - mtmsr r9 ; Restore interrupt state - blr /* Go on home... */ + mr. r4,r4 ; Test timeout value + bne++ lockspin0 + lis r4,hi16(EXT(LockTimeOut)) ; Get the high part + ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part + lwz r4,0(r4) ; Get the timeout value +lockspin0: + mr. r8,r8 ; Is r8 set to zero + bne++ lockspin1 ; If yes, first spin attempt + lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r9 ; Get the MSR value + ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable + ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r9,r9,r0 ; Clear FP and VEC + andc r7,r9,r7 ; Clear EE as well + mtmsr r7 ; Turn off interruptions + isync ; May have turned off vec and fp here + mftb r8 ; Get timestamp on entry + b lcksniff + +lockspin1: mtmsr r7 ; Turn off interruptions + mftb r8 ; Get timestamp on entry + +lcksniff: lwz r3,0(r5) ; Get that lock in here + andi. r3,r3,ILK_LOCKED ; Is it free yet? + beq++ lckretry ; Yeah, try for it again... - .align 5 - -lcksniff: lwz r3,0(r5) /* Get that lock in here */ - andi. r3,r3,ILK_LOCKED /* Is it free yet? */ - beq+ lcktry /* Yeah, try for it again... */ - - mftb r10 /* Time stamp us now */ - sub r10,r10,r8 /* Get the elapsed time */ - cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */ - blt+ lcksniff /* Not yet... */ + mftb r10 ; Time stamp us now + sub r10,r10,r8 ; Get the elapsed time + cmplwi r10,128 ; Have we been spinning for 128 tb ticks? + blt++ lcksniff ; Not yet... - mtmsr r9 /* Say, any interrupts pending? */ + mtmsr r9 ; Say, any interrupts pending? -/* The following instructions force the pipeline to be interlocked to that only one - instruction is issued per cycle. The insures that we stay enabled for a long enough - time; if it's too short, pending interruptions will not have a chance to be taken */ - - subi r4,r4,128 /* Back off elapsed time from timeout value */ - or r4,r4,r4 /* Do nothing here but force a single cycle delay */ - mr. r4,r4 /* See if we used the whole timeout */ - li r3,0 /* Assume a timeout return code */ - or r4,r4,r4 /* Do nothing here but force a single cycle delay */ - - ble- lckfail /* We failed */ - mtmsr r7 /* Disable for interruptions */ - mftb r8 /* Get the low part of the time base */ - b lcksniff /* Now that we've opened an enable window, keep trying... */ +; The following instructions force the pipeline to be interlocked to that only one +; instruction is issued per cycle. The insures that we stay enabled for a long enough +; time; if it's too short, pending interruptions will not have a chance to be taken -lckfail: /* We couldn't get the lock */ - li r3,0 /* Set failure return code */ - blr /* Return, head hanging low... */ + subi r4,r4,128 ; Back off elapsed time from timeout value + or r4,r4,r4 ; Do nothing here but force a single cycle delay + mr. r4,r4 ; See if we used the whole timeout + li r3,0 ; Assume a timeout return code + or r4,r4,r4 ; Do nothing here but force a single cycle delay + + ble-- lckfail ; We failed + b lockspin1 ; Now that we've opened an enable window, keep trying... +lckretry: + mtmsr r9 ; Restore interrupt state + li r8,1 ; Insure that R8 is not 0 + b lcktry +lckfail: ; We couldn't get the lock + li r3,0 ; Set failure return code + blr ; Return, head hanging low... /* - * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout) - * - * Try to acquire spin-lock. The second parameter is the bit mask to test and set. - * multiple bits may be set. Return success (1) or failure (0). - * Attempt will fail after timeout ticks of the timebase. - * We try fairly hard to get this lock. We disable for interruptions, but - * reenable after a "short" timeout (128 ticks, we may want to shorten this). - * After checking to see if the large timeout value (passed in) has expired and a - * sufficient number of cycles have gone by (to insure pending 'rupts are taken), - * we return either in abject failure, or disable and go back to the lock sniff routine. - * If the sniffer finds the lock free, it jumps right up and tries to grab it. - * - * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY - * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND - * RESTORE FROM THE STACK. + * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout) * + * Try to acquire spin-lock. The second parameter is the bit mask to test and set. + * multiple bits may be set. Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. + * We try fairly hard to get this lock. We disable for interruptions, but + * reenable after a "short" timeout (128 ticks, we may want to shorten this). + * After checking to see if the large timeout value (passed in) has expired and a + * sufficient number of cycles have gone by (to insure pending 'rupts are taken), + * we return either in abject failure, or disable and go back to the lock sniff routine. + * If the sniffer finds the lock free, it jumps right up and tries to grab it. */ - .align 5 - - nop ; Force loop alignment to cache line - nop - nop - nop - .globl EXT(hw_lock_bit) LEXT(hw_lock_bit) - mfmsr r9 /* Get the MSR value */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */ + li r10,0 - mtmsr r7 /* Turn off interruptions */ - isync ; May have turned off vec and fp here - - mftb r8 /* Get the low part of the time base */ - -bittry: lwarx r6,0,r3 /* Grab the lock value */ - and. r0,r6,r4 /* See if any of the lock bits are on */ - or r6,r6,r4 /* Turn on the lock bits */ - bne- bitsniff /* Yeah, wait for it to clear... */ - stwcx. r6,0,r3 /* Try to seize that there durn lock */ - beq+ bitgot /* We got it, yahoo... */ - b bittry /* Just start up again if the store failed... */ +bittry: lwarx r6,0,r3 ; Grab the lock value + and. r0,r6,r4 ; See if any of the lock bits are on + or r6,r6,r4 ; Turn on the lock bits + bne-- bitspin ; Yeah, wait for it to clear... + stwcx. r6,0,r3 ; Try to seize that there durn lock + bne-- bittry ; Just start up again if the store failed... + + li r3,1 ; Set good return code + isync ; Make sure we don't use a speculativily loaded value + blr .align 5 + +bitspin: li r11,lgKillResv ; Get killing field + stwcx. r11,0,r11 ; Kill reservation -bitsniff: lwz r6,0(r3) /* Get that lock in here */ - and. r0,r6,r4 /* See if any of the lock bits are on */ - beq+ bittry /* Yeah, try for it again... */ + mr. r10,r10 ; Is r8 set to zero + li r10,1 ; Close gate + beq-- bit1sttime ; If yes, first spin attempt + +bitspin0: mtmsr r7 ; Turn off interruptions + mftb r8 ; Get the low part of the time base + +bitsniff: lwz r6,0(r3) ; Get that lock in here + and. r0,r6,r4 ; See if any of the lock bits are on + beq++ bitretry ; Yeah, try for it again... - mftb r6 /* Time stamp us now */ - sub r6,r6,r8 /* Get the elapsed time */ - cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */ - blt+ bitsniff /* Not yet... */ + mftb r6 ; Time stamp us now + sub r6,r6,r8 ; Get the elapsed time + cmplwi r6,128 ; Have we been spinning for 128 tb ticks? + blt++ bitsniff ; Not yet... - mtmsr r9 /* Say, any interrupts pending? */ + mtmsr r9 ; Say, any interrupts pending? -/* The following instructions force the pipeline to be interlocked to that only one - instruction is issued per cycle. The insures that we stay enabled for a long enough - time. If it's too short, pending interruptions will not have a chance to be taken -*/ - - subi r5,r5,128 /* Back off elapsed time from timeout value */ - or r5,r5,r5 /* Do nothing here but force a single cycle delay */ - mr. r5,r5 /* See if we used the whole timeout */ - or r5,r5,r5 /* Do nothing here but force a single cycle delay */ +; The following instructions force the pipeline to be interlocked to that only one +; instruction is issued per cycle. The insures that we stay enabled for a long enough +; time. If it's too short, pending interruptions will not have a chance to be taken + + subi r5,r5,128 ; Back off elapsed time from timeout value + or r5,r5,r5 ; Do nothing here but force a single cycle delay + mr. r5,r5 ; See if we used the whole timeout + or r5,r5,r5 ; Do nothing here but force a single cycle delay - ble- bitfail /* We failed */ - mtmsr r7 /* Disable for interruptions */ - mftb r8 /* Get the low part of the time base */ - b bitsniff /* Now that we've opened an enable window, keep trying... */ + bgt++ bitspin0 ; Now that we've opened an enable window, keep trying... + + li r3,0 ; Set failure return code + blr ; Return, head hanging low... + +bitretry: mtmsr r9 ; Enable for interruptions + b bittry + +bit1sttime: lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r9 ; Get the MSR value + ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable + ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r9,r9,r0 ; Clear FP and VEC + andc r7,r9,r7 ; Clear EE as well + mtmsr r7 ; Turn off interruptions + isync ; May have turned off vec and fp here + mftb r8 ; Get the low part of the time base + b bitsniff .align 5 -bitgot: mtmsr r9 /* Enable for interruptions */ - li r3,1 /* Set good return code */ - isync /* Make sure we don't use a speculativily loaded value */ - blr - -bitfail: li r3,0 /* Set failure return code */ - blr /* Return, head hanging low... */ - /* - * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit) + * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit) * - * Release bit based spin-lock. The second parameter is the bit mask to clear. - * Multiple bits may be cleared. + * Release bit based spin-lock. The second parameter is the bit mask to clear. + * Multiple bits may be cleared. * - * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY - * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND - * RESTORE FROM THE STACK. */ - .align 5 .globl EXT(hw_unlock_bit) LEXT(hw_unlock_bit) - sync - -ubittry: lwarx r0,0,r3 /* Grab the lock value */ - andc r0,r0,r4 /* Clear the lock bits */ - stwcx. r0,0,r3 /* Try to clear that there durn lock */ - bne- ubittry /* Try again, couldn't save it... */ + .globl EXT(hwulckbPatch_isync) +LEXT(hwulckbPatch_isync) + isync + .globl EXT(hwulckbPatch_eieio) +LEXT(hwulckbPatch_eieio) + eieio +ubittry: lwarx r0,0,r3 ; Grab the lock value + andc r0,r0,r4 ; Clear the lock bits + stwcx. r0,0,r3 ; Try to clear that there durn lock + bne- ubittry ; Try again, couldn't save it... - blr /* Leave... */ + blr ; Leave... /* - * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value, + * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value, * unsigned int newb, unsigned int timeout) * - * Try to acquire spin-lock. The second parameter is the bit mask to check. - * The third is the value of those bits and the 4th is what to set them to. - * Return success (1) or failure (0). - * Attempt will fail after timeout ticks of the timebase. - * We try fairly hard to get this lock. We disable for interruptions, but - * reenable after a "short" timeout (128 ticks, we may want to shorten this). - * After checking to see if the large timeout value (passed in) has expired and a - * sufficient number of cycles have gone by (to insure pending 'rupts are taken), - * we return either in abject failure, or disable and go back to the lock sniff routine. - * If the sniffer finds the lock free, it jumps right up and tries to grab it. - * + * Try to acquire spin-lock. The second parameter is the bit mask to check. + * The third is the value of those bits and the 4th is what to set them to. + * Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. + * We try fairly hard to get this lock. We disable for interruptions, but + * reenable after a "short" timeout (128 ticks, we may want to shorten this). + * After checking to see if the large timeout value (passed in) has expired and a + * sufficient number of cycles have gone by (to insure pending 'rupts are taken), + * we return either in abject failure, or disable and go back to the lock sniff routine. + * If the sniffer finds the lock free, it jumps right up and tries to grab it. */ - .align 5 - - nop ; Force loop alignment to cache line - nop - nop - nop - .globl EXT(hw_lock_mbits) LEXT(hw_lock_mbits) - mfmsr r9 ; Get the MSR value - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible + li r10,0 - mtmsr r8 ; Turn off interruptions - isync ; May have turned off vectors or float here - mftb r10 ; Get the low part of the time base - -mbittry: lwarx r12,0,r3 ; Grab the lock value - and r0,r12,r4 ; Clear extra bits - andc r12,r12,r4 ; Clear all bits in the bit mask - or r12,r12,r6 ; Turn on the lock bits - cmplw r0,r5 ; Are these the right bits? - bne- mbitsniff ; Nope, wait for it to clear... - stwcx. r12,0,r3 ; Try to seize that there durn lock - beq+ mbitgot ; We got it, yahoo... - b mbittry ; Just start up again if the store failed... +mbittry: lwarx r12,0,r3 ; Grab the lock value + and r0,r12,r4 ; Clear extra bits + andc r12,r12,r4 ; Clear all bits in the bit mask + or r12,r12,r6 ; Turn on the lock bits + cmplw r0,r5 ; Are these the right bits? + bne-- mbitspin ; Nope, wait for it to clear... + stwcx. r12,0,r3 ; Try to seize that there durn lock + beq++ mbitgot ; We got it, yahoo... + b mbittry ; Just start up again if the store failed... .align 5 +mbitspin: li r11,lgKillResv ; Point to killing field + stwcx. r11,0,r11 ; Kill it -mbitsniff: lwz r12,0(r3) ; Get that lock in here - and r0,r12,r4 ; Clear extra bits - cmplw r0,r5 ; Are these the right bits? - beq+ mbittry ; Yeah, try for it again... + mr. r10,r10 ; Is r10 set to zero + bne++ mbitspin0 ; If yes, first spin attempt + lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r9 ; Get the MSR value + ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable + ori r8,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r9,r9,r0 ; Clear FP and VEC + andc r8,r9,r8 ; Clear EE as well + mtmsr r8 ; Turn off interruptions + isync ; May have turned off vectors or float here + mftb r10 ; Get the low part of the time base + b mbitsniff +mbitspin0: + mtmsr r8 ; Turn off interruptions + mftb r10 ; Get the low part of the time base +mbitsniff: + lwz r12,0(r3) ; Get that lock in here + and r0,r12,r4 ; Clear extra bits + cmplw r0,r5 ; Are these the right bits? + beq++ mbitretry ; Yeah, try for it again... - mftb r11 ; Time stamp us now - sub r11,r11,r10 ; Get the elapsed time - cmplwi r11,128 ; Have we been spinning for 128 tb ticks? - blt+ mbitsniff ; Not yet... + mftb r11 ; Time stamp us now + sub r11,r11,r10 ; Get the elapsed time + cmplwi r11,128 ; Have we been spinning for 128 tb ticks? + blt++ mbitsniff ; Not yet... - mtmsr r9 ; Say, any interrupts pending? + mtmsr r9 ; Say, any interrupts pending? ; The following instructions force the pipeline to be interlocked to that only one ; instruction is issued per cycle. The insures that we stay enabled for a long enough ; time. If it is too short, pending interruptions will not have a chance to be taken - subi r7,r7,128 ; Back off elapsed time from timeout value - or r7,r7,r7 ; Do nothing here but force a single cycle delay - mr. r7,r7 ; See if we used the whole timeout - or r7,r7,r7 ; Do nothing here but force a single cycle delay + subi r7,r7,128 ; Back off elapsed time from timeout value + or r7,r7,r7 ; Do nothing here but force a single cycle delay + mr. r7,r7 ; See if we used the whole timeout + or r7,r7,r7 ; Do nothing here but force a single cycle delay - ble- mbitfail ; We failed - mtmsr r8 ; Disable for interruptions - mftb r10 ; Get the low part of the time base - b mbitsniff ; Now that we have opened an enable window, keep trying... + ble-- mbitfail ; We failed + b mbitspin0 ; Now that we have opened an enable window, keep trying... +mbitretry: + mtmsr r9 ; Enable for interruptions + li r10,1 ; Make sure this is non-zero + b mbittry .align 5 - -mbitgot: mtmsr r9 ; Enable for interruptions - li r3,1 ; Set good return code - isync ; Make sure we do not use a speculativily loaded value +mbitgot: + li r3,1 ; Set good return code + isync ; Make sure we do not use a speculativily loaded value blr -mbitfail: li r3,0 ; Set failure return code - blr ; Return, head hanging low... - +mbitfail: li r3,0 ; Set failure return code + blr ; Return, head hanging low... /* * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout) * - * Spin until word hits 0 or timeout. - * Return success (1) or failure (0). - * Attempt will fail after timeout ticks of the timebase. + * Spin until word hits 0 or timeout. + * Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. * - * The theory is that a processor will bump a counter as it signals - * other processors. Then it will spin untl the counter hits 0 (or - * times out). The other processors, as it receives the signal will - * decrement the counter. - * - * The other processors use interlocked update to decrement, this one - * does not need to interlock. + * The theory is that a processor will bump a counter as it signals + * other processors. Then it will spin untl the counter hits 0 (or + * times out). The other processors, as it receives the signal will + * decrement the counter. * + * The other processors use interlocked update to decrement, this one + * does not need to interlock. */ - .align 5 - .globl EXT(hw_cpu_sync) LEXT(hw_cpu_sync) - mftb r10 ; Get the low part of the time base - mr r9,r3 ; Save the sync word address - li r3,1 ; Assume we work + mftb r10 ; Get the low part of the time base + mr r9,r3 ; Save the sync word address + li r3,1 ; Assume we work -csynctry: lwz r11,0(r9) ; Grab the sync value - mr. r11,r11 ; Counter hit 0? - beqlr- ; Yeah, we are sunk... - mftb r12 ; Time stamp us now +csynctry: lwz r11,0(r9) ; Grab the sync value + mr. r11,r11 ; Counter hit 0? + beqlr- ; Yeah, we are sunk... + mftb r12 ; Time stamp us now - sub r12,r12,r10 ; Get the elapsed time - cmplw r4,r12 ; Have we gone too long? - bge+ csynctry ; Not yet... + sub r12,r12,r10 ; Get the elapsed time + cmplw r4,r12 ; Have we gone too long? + bge+ csynctry ; Not yet... - li r3,0 ; Set failure... - blr ; Return, head hanging low... + li r3,0 ; Set failure... + blr ; Return, head hanging low... /* * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout) * - * Spin until word changes or timeout. - * Return success (1) or failure (0). - * Attempt will fail after timeout ticks of the timebase. - * - * This is used to insure that a processor passes a certain point. - * An example of use is to monitor the last interrupt time in the - * per_proc block. This can be used to insure that the other processor - * has seen at least one interrupt since a specific time. + * Spin until word changes or timeout. + * Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. * + * This is used to insure that a processor passes a certain point. + * An example of use is to monitor the last interrupt time in the + * per_proc block. This can be used to insure that the other processor + * has seen at least one interrupt since a specific time. */ - .align 5 - .globl EXT(hw_cpu_wcng) LEXT(hw_cpu_wcng) - mftb r10 ; Get the low part of the time base - mr r9,r3 ; Save the sync word address - li r3,1 ; Assume we work + mftb r10 ; Get the low part of the time base + mr r9,r3 ; Save the sync word address + li r3,1 ; Assume we work -wcngtry: lwz r11,0(r9) ; Grab the value - cmplw r11,r4 ; Do they still match? - bnelr- ; Nope, cool... - mftb r12 ; Time stamp us now +wcngtry: lwz r11,0(r9) ; Grab the value + cmplw r11,r4 ; Do they still match? + bnelr- ; Nope, cool... + mftb r12 ; Time stamp us now - sub r12,r12,r10 ; Get the elapsed time - cmplw r5,r12 ; Have we gone too long? - bge+ wcngtry ; Not yet... + sub r12,r12,r10 ; Get the elapsed time + cmplw r5,r12 ; Have we gone too long? + bge+ wcngtry ; Not yet... - li r3,0 ; Set failure... - blr ; Return, head hanging low... + li r3,0 ; Set failure... + blr ; Return, head hanging low... /* - * unsigned int hw_lock_try(hw_lock_t) + * unsigned int hw_lock_try(hw_lock_t) * - * Try to acquire spin-lock. Return success (1) or failure (0) - * Returns with preemption disabled on success. + * Try to acquire spin-lock. Return success (1) or failure (0) + * Returns with preemption disabled on success. * */ .align 5 @@ -664,115 +636,101 @@ wcngtry: lwz r11,0(r9) ; Grab the value LEXT(hw_lock_try) -#if 0 - lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ - lis r5,0x9999 /* (TEST/DEBUG) */ - oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif - mfmsr r9 /* Save the MSR value */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */ + lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r9 ; Get the MSR value + ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable + ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r9,r9,r0 ; Clear FP and VEC + andc r7,r9,r7 ; Clear EE as well -#if MACH_LDEBUG - lis r5, 0x10 /* roughly 1E6 */ - mtctr r5 -#endif /* MACH_LDEBUG */ - - mtmsr r7 /* Disable interruptions and thus, preemption */ - isync ; May have turned off fp/vec here -.L_lock_try_loop: + mtmsr r7 ; Disable interruptions and thus, preemption -#if MACH_LDEBUG - bdnz+ 0f /* Count attempts */ - mtmsr r9 /* Restore enablement */ - BREAKPOINT_TRAP /* Get to debugger */ - mtmsr r7 /* Disable interruptions and thus, preemption */ -0: -#endif /* MACH_LDEBUG */ + lwz r5,0(r3) ; Quick load + andi. r6,r5,ILK_LOCKED ; TEST... + bne-- .L_lock_try_failed ; No go... - lwarx r5,0,r3 /* Ld from addr of arg and reserve */ +.L_lock_try_loop: + lwarx r5,0,r3 ; Ld from addr of arg and reserve - andi. r6,r5,ILK_LOCKED /* TEST... */ + andi. r6,r5,ILK_LOCKED ; TEST... ori r5,r5,ILK_LOCKED - bne- .L_lock_try_failed /* branch if taken. Predict free */ + bne-- .L_lock_try_failedX ; branch if taken. Predict free - stwcx. r5,0,r3 /* And SET (if still reserved) */ - mfsprg r6,0 /* Get the per_proc block */ - bne- .L_lock_try_loop /* If set failed, loop back */ + stwcx. r5,0,r3 ; And SET (if still reserved) + bne-- .L_lock_try_loop ; If set failed, loop back isync - lwz r5,PP_PREEMPT_CNT(r6) /* Get the preemption level */ - addi r5,r5,1 /* Bring up the disable count */ - stw r5,PP_PREEMPT_CNT(r6) /* Save it back */ + mfsprg r6,1 ; Get current activation + lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level + addi r5,r5,1 ; Bring up the disable count + stw r5,ACT_PREEMPT_CNT(r6) ; Save it back - mtmsr r9 /* Allow interruptions now */ - li r3,1 /* Set that the lock was free */ + mtmsr r9 ; Allow interruptions now + li r3,1 ; Set that the lock was free blr +.L_lock_try_failedX: + li r6,lgKillResv ; Killing field + stwcx. r6,0,r6 ; Kill reservation + .L_lock_try_failed: - mtmsr r9 /* Allow interruptions now */ - li r3,0 /* FAILURE - lock was taken */ + mtmsr r9 ; Allow interruptions now + li r3,0 ; FAILURE - lock was taken blr /* - * unsigned int hw_lock_held(hw_lock_t) - * - * Return 1 if lock is held - * Doesn't change preemption state. - * N.B. Racy, of course. + * unsigned int hw_lock_held(hw_lock_t) * + * Return 1 if lock is held + * Doesn't change preemption state. + * N.B. Racy, of course. */ .align 5 .globl EXT(hw_lock_held) LEXT(hw_lock_held) -#if 0 - lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ - lis r5,0x8888 /* (TEST/DEBUG) */ - oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif - isync /* Make sure we don't use a speculativily fetched lock */ - lwz r3, 0(r3) /* Return value of lock */ + isync ; Make sure we don't use a speculativily fetched lock + lwz r3, 0(r3) ; Get lock value + andi. r6,r3,ILK_LOCKED ; Extract the ILK_LOCKED bit blr /* * uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest) * - * Compare old to area if equal, store new, and return true - * else return false and no store - * This is an atomic operation - * + * Compare old to area if equal, store new, and return true + * else return false and no store + * This is an atomic operation */ .align 5 .globl EXT(hw_compare_and_store) LEXT(hw_compare_and_store) - mr r6,r3 /* Save the old value */ + mr r6,r3 ; Save the old value -cstry: lwarx r9,0,r5 /* Grab the area value */ - li r3,1 /* Assume it works */ - cmplw cr0,r9,r6 /* Does it match the old value? */ - bne- csfail /* No, it must have changed... */ - stwcx. r4,0,r5 /* Try to save the new value */ - bne- cstry /* Didn't get it, try again... */ - isync /* Just hold up prefetch */ - blr /* Return... */ +cstry: lwarx r9,0,r5 ; Grab the area value + li r3,1 ; Assume it works + cmplw cr0,r9,r6 ; Does it match the old value? + bne-- csfail ; No, it must have changed... + stwcx. r4,0,r5 ; Try to save the new value + bne-- cstry ; Didn't get it, try again... + isync ; Just hold up prefetch + blr ; Return... -csfail: li r3,0 /* Set failure */ - blr /* Better luck next time... */ +csfail: li r3,lgKillResv ; Killing field + stwcx. r3,0,r3 ; Blow reservation + + li r3,0 ; Set failure + blr ; Better luck next time... /* * uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt) * - * Atomically add the second parameter to the first. - * Returns the result. + * Atomically add the second parameter to the first. + * Returns the result. * */ .align 5 @@ -780,20 +738,20 @@ csfail: li r3,0 /* Set failure */ LEXT(hw_atomic_add) - mr r6,r3 /* Save the area */ + mr r6,r3 ; Save the area -addtry: lwarx r3,0,r6 /* Grab the area value */ - add r3,r3,r4 /* Add the value */ - stwcx. r3,0,r6 /* Try to save the new value */ - bne- addtry /* Didn't get it, try again... */ - blr /* Return... */ +addtry: lwarx r3,0,r6 ; Grab the area value + add r3,r3,r4 ; Add the value + stwcx. r3,0,r6 ; Try to save the new value + bne-- addtry ; Didn't get it, try again... + blr ; Return... /* * uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt) * - * Atomically subtract the second parameter from the first. - * Returns the result. + * Atomically subtract the second parameter from the first. + * Returns the result. * */ .align 5 @@ -801,41 +759,40 @@ addtry: lwarx r3,0,r6 /* Grab the area value */ LEXT(hw_atomic_sub) - mr r6,r3 /* Save the area */ + mr r6,r3 ; Save the area -subtry: lwarx r3,0,r6 /* Grab the area value */ - sub r3,r3,r4 /* Subtract the value */ - stwcx. r3,0,r6 /* Try to save the new value */ - bne- subtry /* Didn't get it, try again... */ - blr /* Return... */ +subtry: lwarx r3,0,r6 ; Grab the area value + sub r3,r3,r4 ; Subtract the value + stwcx. r3,0,r6 ; Try to save the new value + bne-- subtry ; Didn't get it, try again... + blr ; Return... /* * uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask) * - * Atomically ORs the second parameter into the first. - * Returns the result. - * + * Atomically ORs the second parameter into the first. + * Returns the result. */ .align 5 .globl EXT(hw_atomic_or) LEXT(hw_atomic_or) - mr r6,r3 ; Save the area + mr r6,r3 ; Save the area -ortry: lwarx r3,0,r6 ; Grab the area value - or r3,r3,r4 ; OR the value - stwcx. r3,0,r6 ; Try to save the new value - bne- ortry ; Did not get it, try again... - blr ; Return... +ortry: lwarx r3,0,r6 ; Grab the area value + or r3,r3,r4 ; OR the value + stwcx. r3,0,r6 ; Try to save the new value + bne-- ortry ; Did not get it, try again... + blr ; Return... /* * uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask) * - * Atomically ANDs the second parameter with the first. - * Returns the result. + * Atomically ANDs the second parameter with the first. + * Returns the result. * */ .align 5 @@ -843,22 +800,22 @@ ortry: lwarx r3,0,r6 ; Grab the area value LEXT(hw_atomic_and) - mr r6,r3 ; Save the area + mr r6,r3 ; Save the area -andtry: lwarx r3,0,r6 ; Grab the area value - and r3,r3,r4 ; AND the value - stwcx. r3,0,r6 ; Try to save the new value - bne- andtry ; Did not get it, try again... - blr ; Return... +andtry: lwarx r3,0,r6 ; Grab the area value + and r3,r3,r4 ; AND the value + stwcx. r3,0,r6 ; Try to save the new value + bne-- andtry ; Did not get it, try again... + blr ; Return... /* * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp) * - * Atomically inserts the element at the head of the list - * anchor is the pointer to the first element - * element is the pointer to the element to insert - * disp is the displacement into the element to the chain pointer + * Atomically inserts the element at the head of the list + * anchor is the pointer to the first element + * element is the pointer to the element to insert + * disp is the displacement into the element to the chain pointer * */ .align 5 @@ -866,91 +823,92 @@ andtry: lwarx r3,0,r6 ; Grab the area value LEXT(hw_queue_atomic) - mr r7,r4 /* Make end point the same as start */ - mr r8,r5 /* Copy the displacement also */ - b hw_queue_comm /* Join common code... */ + mr r7,r4 ; Make end point the same as start + mr r8,r5 ; Copy the displacement also + b hw_queue_comm ; Join common code... /* * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp) * - * Atomically inserts the list of elements at the head of the list - * anchor is the pointer to the first element - * first is the pointer to the first element to insert - * last is the pointer to the last element to insert - * disp is the displacement into the element to the chain pointer - * + * Atomically inserts the list of elements at the head of the list + * anchor is the pointer to the first element + * first is the pointer to the first element to insert + * last is the pointer to the last element to insert + * disp is the displacement into the element to the chain pointer */ .align 5 .globl EXT(hw_queue_atomic_list) LEXT(hw_queue_atomic_list) - mr r7,r5 /* Make end point the same as start */ - mr r8,r6 /* Copy the displacement also */ + mr r7,r5 ; Make end point the same as start + mr r8,r6 ; Copy the displacement also hw_queue_comm: - lwarx r9,0,r3 /* Pick up the anchor */ - stwx r9,r8,r7 /* Chain that to the end of the new stuff */ - eieio ; Make sure this store makes it before the anchor update - stwcx. r4,0,r3 /* Try to chain into the front */ - bne- hw_queue_comm /* Didn't make it, try again... */ - - blr /* Return... */ + lwarx r9,0,r3 ; Pick up the anchor + stwx r9,r8,r7 ; Chain that to the end of the new stuff + eieio ; Make sure this store makes it before the anchor update + stwcx. r4,0,r3 ; Try to chain into the front + bne-- hw_queue_comm ; Didn't make it, try again... + + blr ; Return... /* * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp) * - * Atomically removes the first element in a list and returns it. - * anchor is the pointer to the first element - * disp is the displacement into the element to the chain pointer - * Returns element if found, 0 if empty. - * + * Atomically removes the first element in a list and returns it. + * anchor is the pointer to the first element + * disp is the displacement into the element to the chain pointer + * Returns element if found, 0 if empty. */ .align 5 .globl EXT(hw_dequeue_atomic) LEXT(hw_dequeue_atomic) - mr r5,r3 /* Save the anchor */ + mr r5,r3 ; Save the anchor hw_dequeue_comm: - lwarx r3,0,r5 /* Pick up the anchor */ - mr. r3,r3 /* Is the list empty? */ - beqlr- /* Leave it list empty... */ - lwzx r9,r4,r3 /* Get the next in line */ - stwcx. r9,0,r5 /* Try to chain into the front */ - beqlr+ ; Got the thing, go away with it... - b hw_dequeue_comm ; Did not make it, try again... + lwarx r3,0,r5 ; Pick up the anchor + mr. r3,r3 ; Is the list empty? + beq-- hdcFail ; Leave it list empty... + lwzx r9,r4,r3 ; Get the next in line + stwcx. r9,0,r5 ; Try to chain into the front + beqlr++ ; Got the thing, go away with it... + b hw_dequeue_comm ; Did not make it, try again... + +hdcFail: li r4,lgKillResv ; Killing field + stwcx. r4,0,r4 ; Dump reservation + blr ; Leave... + /* - * void mutex_init(mutex_t* l, etap_event_t etap) + * void mutex_init(mutex_t* l, etap_event_t etap) + * */ + .align 5 + .globl EXT(mutex_init) -ENTRY(mutex_init,TAG_NO_FRAME_USED) +LEXT(mutex_init) PROLOG(0) li r10, 0 - stw r10, LOCK_DATA(r3) /* clear lock word */ - sth r10, MUTEX_WAITERS(r3) /* init waiter count */ + stw r10, LOCK_DATA(r3) ; clear lock word + sth r10, MUTEX_WAITERS(r3) ; init waiter count sth r10, MUTEX_PROMOTED_PRI(r3) #if MACH_LDEBUG - stw r10, MUTEX_PC(r3) /* init caller pc */ - stw r10, MUTEX_THREAD(r3) /* and owning thread */ + stw r10, MUTEX_PC(r3) ; init caller pc + stw r10, MUTEX_THREAD(r3) ; and owning thread li r10, MUTEX_TAG - stw r10, MUTEX_TYPE(r3) /* set lock type */ + stw r10, MUTEX_TYPE(r3) ; set lock type #endif /* MACH_LDEBUG */ - -#if ETAP_LOCK_TRACE - bl EXT(etap_mutex_init) /* init ETAP data */ -#endif /* ETAP_LOCK_TRACE */ - EPILOG blr /* - * void mutex_lock(mutex_t*) + * void mutex_lock(mutex_t*) + * */ - .align 5 .globl EXT(mutex_lock) LEXT(mutex_lock) @@ -959,21 +917,30 @@ LEXT(mutex_lock) LEXT(_mutex_lock) #if !MACH_LDEBUG - mfsprg r6,1 /* load the current thread */ + mfsprg r6,1 ; load the current thread + lwz r5,0(r3) ; Get the lock quickly + mr. r5,r5 ; Quick check + bne-- L_mutex_lock_slow ; Can not get it right now... + L_mutex_lock_loop: - lwarx r5,0,r3 /* load the mutex lock */ + lwarx r5,0,r3 ; load the mutex lock mr. r5,r5 - bne- L_mutex_lock_slow /* go to the slow path */ - stwcx. r6,0,r3 /* grab the lock */ - bne- L_mutex_lock_loop /* loop back if failed */ - isync /* stop prefeteching */ + bne-- L_mutex_lock_slowX ; go to the slow path + stwcx. r6,0,r3 ; grab the lock + bne-- L_mutex_lock_loop ; loop back if failed + isync ; stop prefeteching blr + +L_mutex_lock_slowX: + li r5,lgKillResv ; Killing field + stwcx. r5,0,r5 ; Kill reservation + L_mutex_lock_slow: #endif #if CHECKNMI - mflr r12 ; (TEST/DEBUG) - bl EXT(ml_sense_nmi) ; (TEST/DEBUG) - mtlr r12 ; (TEST/DEBUG) + mflr r12 ; (TEST/DEBUG) + bl EXT(ml_sense_nmi) ; (TEST/DEBUG) + mtlr r12 ; (TEST/DEBUG) #endif PROLOG(12) @@ -983,7 +950,9 @@ L_mutex_lock_slow: bne L_mutex_lock_assert_wait_1 lis r3,hi16(L_mutex_lock_assert_wait_panic_str) ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str) + PROLOG(0) bl EXT(panic) + BREAKPOINT_TRAP ; We die here anyway .data L_mutex_lock_assert_wait_panic_str: @@ -993,37 +962,20 @@ L_mutex_lock_assert_wait_panic_str: L_mutex_lock_assert_wait_1: lwz r3,FM_ARG0(r1) #endif - -#if ETAP_LOCK_TRACE - li r0, 0 - stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */ - stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */ - stw r0,MISSED(r1) /* clear local miss marker */ -#endif /* ETAP_LOCK_TRACE */ - CHECK_SETUP(r12) CHECK_MUTEX_TYPE() CHECK_NO_SIMPLELOCKS() - .L_ml_retry: -#if 0 - mfsprg r4,0 /* (TEST/DEBUG) */ - lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ - lwz r4,PP_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */ - lis r5,0xAAAA /* (TEST/DEBUG) */ - oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif - - bl lockDisa /* Go get a lock on the mutex's interlock lock */ - mr. r4,r3 /* Did we get it? */ - lwz r3,FM_ARG0(r1) /* Restore the lock address */ - bne+ mlGotInt /* We got it just fine... */ + bl lockDisa ; Go get a lock on the mutex's interlock lock + mr. r4,r3 ; Did we get it? + lwz r3,FM_ARG0(r1) ; Restore the lock address + bne+ mlGotInt ; We got it just fine... - lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message - ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message - bl EXT(panic) ; Call panic - BREAKPOINT_TRAP ; We die here anyway, can not get the lock + lis r3,hi16(mutex_failed1) ; Get the failed mutex message + ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message + PROLOG(0) + bl EXT(panic) ; Call panic + BREAKPOINT_TRAP ; We die here anyway, can not get the lock .data mutex_failed1: @@ -1032,29 +984,30 @@ mutex_failed1: mlGotInt: -/* Note that there is no reason to do a load and reserve here. We already - hold the interlock lock and no one can touch this field unless they - have that, so, we're free to play */ - - lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */ - rlwinm. r9,r4,30,2,31 /* So, can we have it? */ - bne- mlInUse /* Nope, sombody's playing already... */ +; Note that there is no reason to do a load and reserve here. We already +; hold the interlock lock and no one can touch this field unless they +; have that, so, we're free to play + + lwz r4,LOCK_DATA(r3) ; Get the mutex's lock field + rlwinm. r9,r4,30,2,31 ; So, can we have it? + bne- mlInUse ; Nope, sombody's playing already... #if MACH_LDEBUG - mfmsr r11 ; Note: no need to deal with fp or vec here - rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 + li r5,lo16(MASK(MSR_EE)) ; Get the EE bit + mfmsr r11 ; Note: no need to deal with fp or vec here + andc r5,r11,r5 mtmsr r5 - mfsprg r9,0 /* Get the per_proc block */ - lwz r5,0(r1) /* Get previous save frame */ - lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */ - lwz r8, PP_ACTIVE_THREAD(r9) /* Get the active thread */ - stw r5,MUTEX_PC(r3) /* Save our caller */ - mr. r8,r8 /* Is there any thread? */ - stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */ - beq- .L_ml_no_active_thread /* No owning thread... */ - lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */ - addi r9,r9,1 /* Bump it up */ - stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */ + mfsprg r9,1 ; Get the current activation + lwz r5,0(r1) ; Get previous save frame + lwz r5,FM_LR_SAVE(r5) ; Get our caller's address + lwz r8, ACT_THREAD(r9) ; Get the active thread + stw r5,MUTEX_PC(r3) ; Save our caller + mr. r8,r8 ; Is there any thread? + stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread + beq- .L_ml_no_active_thread ; No owning thread... + lwz r9,THREAD_MUTEX_COUNT(r8) ; Get the mutex count + addi r9,r9,1 ; Bump it up + stw r9,THREAD_MUTEX_COUNT(r8) ; Stash it back .L_ml_no_active_thread: mtmsr r11 #endif /* MACH_LDEBUG */ @@ -1065,112 +1018,86 @@ mlGotInt: lwz r3,FM_ARG0(r1) beq mlUnlock ori r5,r5,WAIT_FLAG -mlUnlock: - sync - stw r5,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */ - -#if ETAP_LOCK_TRACE - mflr r4 - lwz r5,SWT_HI(r1) - lwz r6,SWT_LO(r1) - bl EXT(etap_mutex_hold) /* collect hold timestamp */ -#endif /* ETAP_LOCK_TRACE */ - EPILOG /* Restore all saved registers */ +mlUnlock: eieio + stw r5,LOCK_DATA(r3) ; grab the mutexlock and free the interlock - b epStart /* Go enable preemption... */ + EPILOG ; Restore all saved registers + b epStart ; Go enable preemption... -/* - * We come to here when we have a resource conflict. In other words, - * the mutex is held. - */ +; We come to here when we have a resource conflict. In other words, +; the mutex is held. mlInUse: -#if ETAP_LOCK_TRACE - lwz r7,MISSED(r1) - cmpwi r7,0 /* did we already take a wait timestamp ? */ - bne .L_ml_block /* yup. carry-on */ - bl EXT(etap_mutex_miss) /* get wait timestamp */ - stw r3,SWT_HI(r1) /* store timestamp */ - stw r4,SWT_LO(r1) - li r7, 1 /* mark wait timestamp as taken */ - stw r7,MISSED(r1) - lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ -.L_ml_block: -#endif /* ETAP_LOCK_TRACE */ - CHECK_SETUP(r12) - CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */ - + CHECK_MYLOCK(MUTEX_THREAD) ; Assert we don't own the lock already */ -/* Note that we come in here with the interlock set. The wait routine - * will unlock it before waiting. - */ - ori r4,r4,WAIT_FLAG /* Set the wait flag */ - stw r4,LOCK_DATA(r3) - rlwinm r4,r4,0,0,29 /* Extract the lock owner */ - bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */ +; Note that we come in here with the interlock set. The wait routine +; will unlock it before waiting. + + ori r4,r4,WAIT_FLAG ; Set the wait flag + stw r4,LOCK_DATA(r3) + rlwinm r4,r4,0,0,29 ; Extract the lock owner + bl EXT(mutex_lock_wait) ; Wait for our turn at the lock - lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ - b .L_ml_retry /* and try again... */ + lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog) + b .L_ml_retry ; and try again... /* - * void _mutex_try(mutex_t*) + * void _mutex_try(mutex_t*) * */ - .align 5 .globl EXT(mutex_try) LEXT(mutex_try) .globl EXT(_mutex_try) LEXT(_mutex_try) #if !MACH_LDEBUG - mfsprg r6,1 /* load the current thread */ + mfsprg r6,1 ; load the current thread + lwz r5,0(r3) ; Get the lock value + mr. r5,r5 ; Quick check + bne-- L_mutex_try_slow ; Can not get it now... + L_mutex_try_loop: - lwarx r5,0,r3 /* load the lock value */ + lwarx r5,0,r3 ; load the lock value mr. r5,r5 - bne- L_mutex_try_slow /* branch to the slow path */ - stwcx. r6,0,r3 /* grab the lock */ - bne- L_mutex_try_loop /* retry if failed */ - isync /* stop prefetching */ + bne-- L_mutex_try_slowX ; branch to the slow path + stwcx. r6,0,r3 ; grab the lock + bne-- L_mutex_try_loop ; retry if failed + isync ; stop prefetching li r3, 1 blr + +L_mutex_try_slowX: + li r5,lgKillResv ; Killing field + stwcx. r5,0,r5 ; Kill reservation + L_mutex_try_slow: + #endif - PROLOG(8) /* reserve space for SWT_HI and SWT_LO */ + PROLOG(8) ; reserve space for SWT_HI and SWT_LO -#if ETAP_LOCK_TRACE - li r5, 0 - stw r5, STW_HI(r1) /* set wait time to 0 (HI) */ - stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */ -#endif /* ETAP_LOCK_TRACE */ - -#if 0 - lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ - lis r5,0xBBBB /* (TEST/DEBUG) */ - oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif CHECK_SETUP(r12) CHECK_MUTEX_TYPE() CHECK_NO_SIMPLELOCKS() - lwz r6,LOCK_DATA(r3) /* Quick check */ - rlwinm. r6,r6,30,2,31 /* to see if someone has this lock already */ - bne- mtFail /* Someone's got it already... */ - - bl lockDisa /* Go get a lock on the mutex's interlock lock */ - mr. r4,r3 /* Did we get it? */ - lwz r3,FM_ARG0(r1) /* Restore the lock address */ - bne+ mtGotInt /* We got it just fine... */ - - lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message - ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message - bl EXT(panic) ; Call panic - BREAKPOINT_TRAP ; We die here anyway, can not get the lock + lwz r6,LOCK_DATA(r3) ; Quick check + rlwinm. r6,r6,30,2,31 ; to see if someone has this lock already + bne- mtFail ; Someone's got it already... + + bl lockDisa ; Go get a lock on the mutex's interlock lock + mr. r4,r3 ; Did we get it? */ + lwz r3,FM_ARG0(r1) ; Restore the lock address + bne+ mtGotInt ; We got it just fine... + + lis r3,hi16(mutex_failed2) ; Get the failed mutex message + ori r3,r3,lo16(mutex_failed2) ; Get the failed mutex message + PROLOG(0) + bl EXT(panic) ; Call panic + BREAKPOINT_TRAP ; We die here anyway, can not get the lock .data mutex_failed2: @@ -1179,29 +1106,34 @@ mutex_failed2: mtGotInt: -/* Note that there is no reason to do a load and reserve here. We already - hold the interlock and no one can touch at this field unless they - have that, so, we're free to play */ +; Note that there is no reason to do a load and reserve here. We already +; hold the interlock and no one can touch at this field unless they +; have that, so, we're free to play - lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */ - rlwinm. r9,r4,30,2,31 /* So, can we have it? */ - bne- mtInUse /* Nope, sombody's playing already... */ + lwz r4,LOCK_DATA(r3) ; Get the mutex's lock field + rlwinm. r9,r4,30,2,31 ; So, can we have it? + bne- mtInUse ; Nope, sombody's playing already... #if MACH_LDEBUG - mfmsr r11 - rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 + lis r9,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r11 ; Get the MSR value + ori r9,r9,lo16(MASK(MSR_FP)) ; Get FP enable + ori r5,r9,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r11,r11,r9 ; Clear FP and VEC + andc r5,r11,r5 ; Clear EE as well + mtmsr r5 - mfsprg r9,0 /* Get the per_proc block */ - lwz r5,0(r1) /* Get previous save frame */ - lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */ - lwz r8, PP_ACTIVE_THREAD(r9) /* Get the active thread */ - stw r5,MUTEX_PC(r3) /* Save our caller */ - mr. r8,r8 /* Is there any thread? */ - stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */ - beq- .L_mt_no_active_thread /* No owning thread... */ - lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */ - addi r9, r9, 1 /* Bump it up */ - stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */ + mfsprg r9,1 ; Get the current activation + lwz r5,0(r1) ; Get previous save frame + lwz r5,FM_LR_SAVE(r5) ; Get our caller's address + lwz r8,ACT_THREAD(r9) ; Get the active thread + stw r5,MUTEX_PC(r3) ; Save our caller + mr. r8,r8 ; Is there any thread? + stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread + beq- .L_mt_no_active_thread ; No owning thread... + lwz r9, THREAD_MUTEX_COUNT(r8) ; Get the mutex count + addi r9, r9, 1 ; Bump it up + stw r9, THREAD_MUTEX_COUNT(r8) ; Stash it back .L_mt_no_active_thread: mtmsr r11 #endif /* MACH_LDEBUG */ @@ -1212,87 +1144,108 @@ mtGotInt: lwz r3,FM_ARG0(r1) beq mtUnlock ori r5,r5,WAIT_FLAG -mtUnlock: - sync /* Push it all out */ - stw r5,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */ -#if ETAP_LOCK_TRACE - lwz r4,0(r1) /* Back chain the stack */ - lwz r5,SWT_HI(r1) - lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */ - lwz r6,SWT_LO(r1) - bl EXT(etap_mutex_hold) /* collect hold timestamp */ -#endif /* ETAP_LOCK_TRACE */ +mtUnlock: eieio + stw r5,LOCK_DATA(r3) ; grab the mutexlock and free the interlock - bl epStart /* Go enable preemption... */ + bl epStart ; Go enable preemption... li r3, 1 - EPILOG /* Restore all saved registers */ - blr /* Return... */ + EPILOG ; Restore all saved registers + blr ; Return... -/* - * We come to here when we have a resource conflict. In other words, - * the mutex is held. - */ +; We come to here when we have a resource conflict. In other words, +; the mutex is held. mtInUse: - rlwinm r4,r4,0,0,30 /* Get the unlock value */ - stw r4,LOCK_DATA(r3) /* free the interlock */ - bl epStart /* Go enable preemption... */ + rlwinm r4,r4,0,0,30 ; Get the unlock value + stw r4,LOCK_DATA(r3) ; free the interlock + bl epStart ; Go enable preemption... -mtFail: li r3,0 /* Set failure code */ - EPILOG /* Restore all saved registers */ - blr /* Return... */ +mtFail: li r3,0 ; Set failure code + EPILOG ; Restore all saved registers + blr ; Return... /* - * void mutex_unlock(mutex_t* l) + * void mutex_unlock_rwcmb(mutex_t* l) + * */ + .align 5 + .globl EXT(mutex_unlock_rwcmb) + +LEXT(mutex_unlock_rwcmb) + .globl EXT(mulckPatch_isync) +LEXT(mulckPatch_isync) + isync + .globl EXT(mulckPatch_eieio) +LEXT(mulckPatch_eieio) + eieio + lwz r5,0(r3) ; Get the lock + rlwinm. r4,r5,0,30,31 ; Quick check + bne-- L_mutex_unlock_slow ; Can not get it now... + +L_mutex_unlock_rwcmb_loop: + lwarx r5,0,r3 + rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set + li r5,0 ; Clear the mutexlock + bne-- L_mutex_unlock_rwcmb_slowX + stwcx. r5,0,r3 + bne-- L_mutex_unlock_rwcmb_loop + blr + +L_mutex_unlock_rwcmb_slowX: + li r5,lgKillResv ; Killing field + stwcx. r5,0,r5 ; Dump reservation + b L_mutex_unlock_slow ; Join slow path... + +/* + * void mutex_unlock(mutex_t* l) + * + */ .align 5 .globl EXT(mutex_unlock) LEXT(mutex_unlock) #if !MACH_LDEBUG sync + lwz r5,0(r3) ; Get the lock + rlwinm. r4,r5,0,30,31 ; Quick check + bne-- L_mutex_unlock_slow ; Can not get it now... + L_mutex_unlock_loop: lwarx r5,0,r3 - rlwinm. r4,r5,0,30,31 /* Bail if pending waiter or interlock set */ - li r5,0 /* Clear the mutexlock */ - bne- L_mutex_unlock_slow + rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set + li r5,0 ; Clear the mutexlock + bne-- L_mutex_unlock_slowX stwcx. r5,0,r3 - bne- L_mutex_unlock_loop + bne-- L_mutex_unlock_loop blr -L_mutex_unlock_slow: +L_mutex_unlock_slowX: + li r5,lgKillResv ; Killing field + stwcx. r5,0,r5 ; Dump reservation + #endif + +L_mutex_unlock_slow: + PROLOG(0) -#if ETAP_LOCK_TRACE - bl EXT(etap_mutex_unlock) /* collect ETAP data */ - lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ -#endif /* ETAP_LOCK_TRACE */ - CHECK_SETUP(r12) CHECK_MUTEX_TYPE() CHECK_THREAD(MUTEX_THREAD) -#if 0 - mfsprg r4,0 /* (TEST/DEBUG) */ - lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ - lwz r4,PP_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */ - lis r5,0xCCCC /* (TEST/DEBUG) */ - oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif - bl lockDisa /* Go get a lock on the mutex's interlock lock */ - mr. r4,r3 /* Did we get it? */ - lwz r3,FM_ARG0(r1) /* Restore the lock address */ - bne+ muGotInt /* We got it just fine... */ - - lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message - ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message - bl EXT(panic) ; Call panic - BREAKPOINT_TRAP ; We die here anyway, can not get the lock + bl lockDisa ; Go get a lock on the mutex's interlock lock + mr. r4,r3 ; Did we get it? + lwz r3,FM_ARG0(r1) ; Restore the lock address + bne+ muGotInt ; We got it just fine... + + lis r3,hi16(mutex_failed3) ; Get the failed mutex message + ori r3,r3,lo16(mutex_failed3) ; Get the failed mutex message + PROLOG(0) + bl EXT(panic) ; Call panic + BREAKPOINT_TRAP ; We die here anyway, can not get the lock .data mutex_failed3: @@ -1302,22 +1255,27 @@ mutex_failed3: muGotInt: lwz r4,LOCK_DATA(r3) - andi. r5,r4,WAIT_FLAG /* are there any waiters ? */ + andi. r5,r4,WAIT_FLAG ; are there any waiters ? rlwinm r4,r4,0,0,29 - beq+ muUnlock /* Nope, we're done... */ + beq+ muUnlock ; Nope, we're done... - bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */ - lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ - lwz r5,LOCK_DATA(r3) /* load the lock */ + bl EXT(mutex_unlock_wakeup) ; yes, wake a thread + lwz r3,FM_ARG0(r1) ; restore r3 (saved in prolog) + lwz r5,LOCK_DATA(r3) ; load the lock muUnlock: #if MACH_LDEBUG - mfmsr r11 - rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 + lis r8,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r11 ; Get the MSR value + ori r8,r8,lo16(MASK(MSR_FP)) ; Get FP enable + ori r9,r8,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r11,r11,r8 ; Clear FP and VEC + andc r9,r11,r9 ; Clear EE as well + mtmsr r9 - mfsprg r9,0 - lwz r9,PP_ACTIVE_THREAD(r9) - stw r9,MUTEX_THREAD(r3) /* disown thread */ + mfsprg r9,1 + lwz r9,ACT_THREAD(r9) + stw r9,MUTEX_THREAD(r3) ; disown thread cmpwi r9,0 beq- .L_mu_no_active_thread lwz r8,THREAD_MUTEX_COUNT(r9) @@ -1327,139 +1285,193 @@ muUnlock: mtmsr r11 #endif /* MACH_LDEBUG */ - andi. r5,r5,WAIT_FLAG /* Get the unlock value */ - sync /* Make sure it's all there before we release */ - stw r5,LOCK_DATA(r3) /* unlock the interlock and lock */ - - EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */ - b epStart /* Go enable preemption... */ + andi. r5,r5,WAIT_FLAG ; Get the unlock value + eieio + stw r5,LOCK_DATA(r3) ; unlock the interlock and lock + + EPILOG ; Deal with the stack now, enable_preemption doesn't always want one + b epStart ; Go enable preemption... /* - * void interlock_unlock(hw_lock_t lock) + * boolean_t mutex_preblock(mutex_t*, thread_t) */ + .align 5 + .globl EXT(mutex_preblock) + +LEXT(mutex_preblock) + mr r6,r3 + lwz r5,LOCK_DATA(r3) + mr. r3,r5 + beqlr+ + mr r3,r6 + + PROLOG(0) + stw r4,(FM_ARG0-4)(r1) + bl EXT(hw_lock_try) + mr. r4,r3 + lwz r3,FM_ARG0(r1) + bne+ mpbGotInt + + li r3,0 + + EPILOG + + blr + +mpbGotInt: + lwz r6,LOCK_DATA(r3) + rlwinm. r5,r6,0,0,30 + bne+ mpbInUse + + stw r5,LOCK_DATA(r3) + + bl epStart + + li r3,0 + + EPILOG + + blr + +mpbInUse: + lwz r4,(FM_ARG0-4)(r1) + rlwinm r5,r6,0,0,29 + bl EXT(mutex_preblock_wait) + lwz r4,FM_ARG0(r1) + mr. r3,r3 + lwz r5,LOCK_DATA(r4) + rlwinm r5,r5,0,0,30 + beq- mpbUnlock0 + ori r5,r5,WAIT_FLAG + + eieio + stw r5,LOCK_DATA(r4) + + bl epStart + + li r3,1 + + EPILOG + + blr + +mpbUnlock0: + eieio + stw r5,LOCK_DATA(r4) + + bl epStart + + li r3,0 + + EPILOG + + blr + +/* + * void interlock_unlock(hw_lock_t lock) + */ .align 5 .globl EXT(interlock_unlock) LEXT(interlock_unlock) -#if 0 - lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ - lis r5,0xDDDD /* (TEST/DEBUG) */ - oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif lwz r10,LOCK_DATA(r3) rlwinm r10,r10,0,0,30 - sync + eieio stw r10,LOCK_DATA(r3) - b epStart /* Go enable preemption... */ + b epStart ; Go enable preemption... -/* - * Here is where we enable preemption. We need to be protected - * against ourselves, we can't chance getting interrupted and modifying - * our processor wide preemption count after we'sve loaded it up. So, - * we need to disable all 'rupts. Actually, we could use a compare - * and swap to do this, but, since there are no MP considerations - * (we are dealing with a CPU local field) it is much, much faster - * to disable. +/* + * void _enable_preemption_no_check(void) * - * Note that if we are not genned MP, the calls here will be no-opped via - * a #define and since the _mp forms are the same, likewise a #define - * will be used to route to the other forms + * This version does not check if we get preempted or not */ - -/* This version does not check if we get preempted or not */ - - .align 4 .globl EXT(_enable_preemption_no_check) LEXT(_enable_preemption_no_check) - cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */ - b epCommn /* Join up with the other enable code... */ + cmplw cr1,r1,r1 ; Force zero cr so we know not to check if preempted + b epCommn ; Join up with the other enable code... -/* This version checks if we get preempted or not */ - +/* + * void _enable_preemption(void) + * + * This version checks if we get preempted or not + */ .align 5 .globl EXT(_enable_preemption) LEXT(_enable_preemption) -epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */ - -/* - * Common enable preemption code - */ - -epCommn: mfmsr r9 /* Save the old MSR */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtmsr r8 /* Interrupts off */ - isync ; May have mess with vec/fp here - - mfsprg r3,0 /* Get the per_proc block */ - li r8,-1 /* Get a decrimenter */ - lwz r5,PP_PREEMPT_CNT(r3) /* Get the preemption level */ - add. r5,r5,r8 /* Bring down the disable count */ -#if 0 - mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early - mr. r4,r4 ; (TEST/DEBUG) - beq- epskptrc0 ; (TEST/DEBUG) - lis r0,hi16(CutTrace) ; (TEST/DEBUG) - lis r4,0xBBBB ; (TEST/DEBUG) - oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) - sc ; (TEST/DEBUG) -epskptrc0: mr. r5,r5 ; (TEST/DEBUG) -#endif -#if MACH_LDEBUG - blt- epTooFar /* Yeah, we did... */ -#endif /* MACH_LDEBUG */ - stw r5,PP_PREEMPT_CNT(r3) /* Save it back */ - - beq+ epCheckPreempt /* Go check if we need to be preempted... */ - -epNoCheck: mtmsr r9 /* Restore the interrupt level */ - blr /* Leave... */ - -#if MACH_LDEBUG +; Here is where we enable preemption. We need to be protected +; against ourselves, we can't chance getting interrupted and modifying +; our processor wide preemption count after we'sve loaded it up. So, +; we need to disable all 'rupts. Actually, we could use a compare +; and swap to do this, but, since there are no MP considerations +; (we are dealing with a CPU local field) it is much, much faster +; to disable. +; +; Note that if we are not genned MP, the calls here will be no-opped via +; a #define and since the _mp forms are the same, likewise a #define +; will be used to route to the other forms + +epStart: + cmplwi cr1,r1,0 ; Force non-zero cr so we know to check if preempted + +epCommn: + mfsprg r3,1 ; Get current activation + li r8,-1 ; Get a decrementer + lwz r5,ACT_PREEMPT_CNT(r3) ; Get the preemption level + add. r5,r5,r8 ; Bring down the disable count + blt- epTooFar ; Yeah, we did... + stw r5,ACT_PREEMPT_CNT(r3) ; Save it back + crandc cr0_eq,cr0_eq,cr1_eq + beq+ epCheckPreempt ; Go check if we need to be preempted... + blr ; Leave... epTooFar: - lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */ - lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */ - ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */ - ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */ - mtlr r6 /* Get the address of the panic routine */ - mtmsr r9 /* Restore interruptions */ - blrl /* Panic... */ + mr r4,r5 + lis r3,hi16(epTooFarStr) ; First half of panic string + ori r3,r3,lo16(epTooFarStr) ; Second half of panic string + PROLOG(0) + bl EXT(panic) + BREAKPOINT_TRAP ; We die here anyway .data epTooFarStr: - STRINGD "_enable_preemption: preemption_level <= 0!\000" - .text -#endif /* MACH_LDEBUG */ + STRINGD "_enable_preemption: preemption_level %d\n\000" + .text .align 5 - epCheckPreempt: - lwz r7,PP_NEED_AST(r3) /* Get the AST request address */ - li r5,AST_URGENT /* Get the requests we do honor */ - lwz r7,0(r7) /* Get the actual, real live, extra special AST word */ - lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */ - and. r7,r7,r5 ; Should we preempt? - ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */ - beq+ epCPno ; No preemption here... - - andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off - -epCPno: mtmsr r9 /* Allow interrupts if we can */ - beqlr+ ; We probably will not preempt... - sc /* Do the preemption */ - blr /* Now, go away now... */ + lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r9 ; Get the MSR value + ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable + andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off + beq+ epCPno ; No preemption here... + ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r9,r9,r0 ; Clear FP and VEC + andc r7,r9,r7 ; Clear EE as well + mtmsr r7 ; Turn off interruptions + isync ; May have turned off vec and fp here + mfsprg r3,0 ; Get per_proc + lwz r7,PP_NEED_AST(r3) ; Get the AST request address + li r5,AST_URGENT ; Get the requests we do honor + lwz r7,0(r7) ; Get the actual, real live, extra special AST word + lis r0,hi16(DoPreemptCall) ; Just in case, get the top of firmware call + and. r7,r7,r5 ; Should we preempt? + ori r0,r0,lo16(DoPreemptCall) ; Merge in bottom part + mtmsr r9 ; Allow interrupts if we can +epCPno: + beqlr+ ; We probably will not preempt... + sc ; Do the preemption + blr ; Now, go away now... /* + * void disable_preemption(void) + * * Here is where we disable preemption. Since preemption is on a * per processor basis (a thread runs on one CPU at a time) we don't * need any cross-processor synchronization. We do, however, need to @@ -1468,355 +1480,275 @@ epCPno: mtmsr r9 /* Allow interrupts if we can */ * disablement, and this is platform specific code, we'll just kick the * MSR. We'll save a couple of orders of magnitude over using SPLs. */ - .align 5 - - nop ; Use these 5 nops to force daPreComm - nop ; to a line boundary. - nop - nop - nop - .globl EXT(_disable_preemption) LEXT(_disable_preemption) -daPreAll: mfmsr r9 /* Save the old MSR */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtmsr r8 /* Interrupts off */ - isync ; May have mess with fp/vec - -daPreComm: mfsprg r6,0 /* Get the per_proc block */ - lwz r5,PP_PREEMPT_CNT(r6) /* Get the preemption level */ - addi r5,r5,1 /* Bring up the disable count */ - stw r5,PP_PREEMPT_CNT(r6) /* Save it back */ -#if 0 - mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early - mr. r4,r4 ; (TEST/DEBUG) - beq- epskptrc1 ; (TEST/DEBUG) - lis r0,hi16(CutTrace) ; (TEST/DEBUG) - lis r4,0xAAAA ; (TEST/DEBUG) - oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) - sc ; (TEST/DEBUG) -epskptrc1: ; (TEST/DEBUG) -#endif - -; -; Set PREEMPTSTACK above to enable a preemption traceback stack. -; -; NOTE: make sure that PREEMPTSTACK in aligned_data is -; set the same as it is here. This is the number of -; traceback entries we can handle per processor -; -; A value of 0 disables the stack. -; -#if PREEMPTSTACK - cmplwi r5,PREEMPTSTACK ; Maximum depth - lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread - bgt- nopredeb ; Too many to stack... - mr. r6,r6 ; During boot? - beq- nopredeb ; Yes, do not do backtrace... - lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation - lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used - mr. r0,r6 ; Any saved context? - beq- nosaveds ; No... - lwz r0,saver1(r6) ; Get end of savearea chain - -nosaveds: li r11,0 ; Clear callers callers callers return - li r10,0 ; Clear callers callers callers callers return - li r8,0 ; Clear callers callers callers callers callers return - lwz r2,0(r1) ; Get callers callers stack frame - lwz r12,8(r2) ; Get our callers return - lwz r4,0(r2) ; Back chain - - xor r2,r4,r2 ; Form difference - cmplwi r2,8192 ; Within a couple of pages? - mr r2,r4 ; Move register - bge- nosaveher2 ; No, no back chain then... - lwz r11,8(r2) ; Get our callers return - lwz r4,0(r2) ; Back chain - - xor r2,r4,r2 ; Form difference - cmplwi r2,8192 ; Within a couple of pages? - mr r2,r4 ; Move register - bge- nosaveher2 ; No, no back chain then... - lwz r10,8(r2) ; Get our callers return - lwz r4,0(r2) ; Back chain - - xor r2,r4,r2 ; Form difference - cmplwi r2,8192 ; Within a couple of pages? - mr r2,r4 ; Move register - bge- nosaveher2 ; No, no back chain then... - lwz r8,8(r2) ; Get our callers return - -nosaveher2: - addi r5,r5,-1 ; Get index to slot - mfspr r6,pir ; Get our processor - mflr r4 ; Get our return - rlwinm r6,r6,8,0,23 ; Index to processor slot - lis r2,hi16(EXT(DBGpreempt)) ; Stack high order - rlwinm r5,r5,4,0,27 ; Index to stack slot - ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order - add r2,r2,r5 ; Point to slot - add r2,r2,r6 ; Move to processor - stw r4,0(r2) ; Save our return - stw r11,4(r2) ; Save callers caller - stw r10,8(r2) ; Save callers callers caller - stw r8,12(r2) ; Save callers callers callers caller -nopredeb: -#endif - mtmsr r9 /* Allow interruptions now */ - - blr /* Return... */ + mfsprg r6,1 ; Get the current activation + lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level + addi r5,r5,1 ; Bring up the disable count + stw r5,ACT_PREEMPT_CNT(r6) ; Save it back + blr ; Return... /* - * Return the active thread for both inside and outside osfmk consumption + * int get_preemption_level(void) + * + * Return the current preemption level */ - .align 5 - .globl EXT(current_thread) - -LEXT(current_thread) - -#if 1 - mfsprg r3,1 - lwz r3,ACT_THREAD(r3) - blr -#else - mfmsr r9 /* Save the old MSR */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtmsr r8 /* Interrupts off */ - isync - mfsprg r6,0 /* Get the per_proc */ - lwz r3,PP_ACTIVE_THREAD(r6) /* Get the active thread */ - mfsprg r4,1 - lwz r4,ACT_THREAD(r4) - cmplw cr0,r4,r3 - beq current_thread_cont - lis r5,hi16(L_current_thread_paniced) - ori r5,r5,lo16(L_current_thread_paniced) - lwz r6,0(r5) - mr. r6,r6 - bne current_thread_cont - stw r9,0(r5) - mr r5,r4 - mr r4,r3 - lis r3,hi16(L_current_thread_panic) - ori r3,r3,lo16(L_current_thread_panic) - bl EXT(panic) + .globl EXT(get_preemption_level) - .data -L_current_thread_panic: - STRINGD "current_thread: spr1 not sync %x %x %x\n\000" -L_current_thread_paniced: - .long 0 - .text -current_thread_cont: -#endif - mtmsr r9 /* Restore interruptions to entry */ - blr /* Return... */ +LEXT(get_preemption_level) + + mfsprg r6,1 ; Get current activation + lwz r3,ACT_PREEMPT_CNT(r6) ; Get the preemption level + blr ; Return... /* - * Set the active thread + * int get_simple_lock_count(void) + * + * Return the simple lock count + * */ .align 5 - .globl EXT(set_machine_current_thread) -LEXT(set_machine_current_thread) - - mfmsr r9 /* Save the old MSR */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtmsr r8 /* Interrupts off */ - isync ; May have messed with fp/vec - mfsprg r6,0 /* Get the per_proc */ - stw r3,PP_ACTIVE_THREAD(r6) /* Set the active thread */ - mtmsr r9 /* Restore interruptions to entry */ - blr /* Return... */ + .globl EXT(get_simple_lock_count) -/* - * Set the current activation - */ - .align 5 - .globl EXT(set_machine_current_act) -LEXT(set_machine_current_act) - mtsprg 1,r3 /* Set spr1 with the active thread */ - blr /* Return... */ +LEXT(get_simple_lock_count) + +#if MACH_LDEBUG + lis r3,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r9 ; Get the MSR value + ori r3,r3,lo16(MASK(MSR_FP)) ; Get FP enable + ori r8,r3,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r9,r9,r3 ; Clear FP and VEC + andc r8,r9,r8 ; Clear EE as well + mtmsr r8 ; Interrupts off + isync ; May have messed with vec/fp + mfsprg r6,0 ; Get the per_proc + lwz r3,PP_SIMPLE_LOCK_CNT(r6) ; Get the simple lock count + mtmsr r9 ; Restore interruptions to entry +#else + li r3,0 ; simple lock count not updated +#endif + blr ; Return... /* - * Return the current activation + * void ppc_usimple_lock_init(simple_lock_t, etap_event_t) + * + * Initialize a simple lock. */ .align 5 - .globl EXT(current_act) -LEXT(current_act) - mfsprg r3,1 - blr - + .globl EXT(ppc_usimple_lock_init) +LEXT(ppc_usimple_lock_init) + li r0, 0 ; set lock to free == 0 + stw r0, 0(r3) ; Initialize the lock + blr + /* - * Return the current preemption level + * void ppc_usimple_lock(simple_lock_t) + * */ - .align 5 - .globl EXT(get_preemption_level) + .globl EXT(ppc_usimple_lock) -LEXT(get_preemption_level) - - mfmsr r9 /* Save the old MSR */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtmsr r8 /* Interrupts off */ - isync - mfsprg r6,0 /* Get the per_proc */ - lwz r3,PP_PREEMPT_CNT(r6) /* Get the preemption level */ - mtmsr r9 /* Restore interruptions to entry */ - blr /* Return... */ +LEXT(ppc_usimple_lock) +#if CHECKNMI + mflr r12 ; (TEST/DEBUG) + bl EXT(ml_sense_nmi) ; (TEST/DEBUG) + mtlr r12 ; (TEST/DEBUG) +#endif -/* - * Return the cpu_data - */ - - .align 5 - .globl EXT(get_cpu_data) - -LEXT(get_cpu_data) - - mfsprg r3,0 /* Get the per_proc */ - addi r3,r3,PP_ACTIVE_THREAD /* Get the pointer to the CPU data from per proc */ - blr /* Return... */ + mfsprg r6,1 ; Get the current activation + lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level + addi r5,r5,1 ; Bring up the disable count + stw r5,ACT_PREEMPT_CNT(r6) ; Save it back + mr r5,r3 ; Get the address of the lock + li r8,0 ; Set r8 to zero + li r4,0 ; Set r4 to zero + +slcktry: lwarx r11,0,r5 ; Grab the lock value + andi. r3,r11,ILK_LOCKED ; Is it locked? + ori r11,r6,ILK_LOCKED ; Set interlock + bne-- slckspin ; Yeah, wait for it to clear... + stwcx. r11,0,r5 ; Try to seize that there durn lock + bne-- slcktry ; Couldn't get it... + isync ; Make sure we don't use a speculativily loaded value + blr ; Go on home... + +slckspin: li r11,lgKillResv ; Killing field + stwcx. r11,0,r11 ; Kill reservation + + mr. r4,r4 ; Test timeout value + bne++ slockspin0 + lis r4,hi16(EXT(LockTimeOut)) ; Get the high part + ori r4,r4,lo16(EXT(LockTimeOut)) ; And the low part + lwz r4,0(r4) ; Get the timerout value + +slockspin0: mr. r8,r8 ; Is r8 set to zero + bne++ slockspin1 ; If yes, first spin attempt + lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r9 ; Get the MSR value + ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable + ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r9,r9,r0 ; Clear FP and VEC + andc r7,r9,r7 ; Clear EE as well + mtmsr r7 ; Turn off interruptions + isync ; May have turned off vec and fp here + mftb r8 ; Get timestamp on entry + b slcksniff + +slockspin1: mtmsr r7 ; Turn off interruptions + mftb r8 ; Get timestamp on entry + +slcksniff: lwz r3,0(r5) ; Get that lock in here + andi. r3,r3,ILK_LOCKED ; Is it free yet? + beq++ slckretry ; Yeah, try for it again... + + mftb r10 ; Time stamp us now + sub r10,r10,r8 ; Get the elapsed time + cmplwi r10,128 ; Have we been spinning for 128 tb ticks? + blt++ slcksniff ; Not yet... + + mtmsr r9 ; Say, any interrupts pending? +; The following instructions force the pipeline to be interlocked to that only one +; instruction is issued per cycle. The insures that we stay enabled for a long enough +; time; if it's too short, pending interruptions will not have a chance to be taken -/* - * Return the simple lock count - */ - - .align 5 - .globl EXT(get_simple_lock_count) + subi r4,r4,128 ; Back off elapsed time from timeout value + or r4,r4,r4 ; Do nothing here but force a single cycle delay + mr. r4,r4 ; See if we used the whole timeout + li r3,0 ; Assume a timeout return code + or r4,r4,r4 ; Do nothing here but force a single cycle delay + + ble-- slckfail ; We failed + b slockspin1 ; Now that we've opened an enable window, keep trying... +slckretry: + mtmsr r9 ; Restore interrupt state + li r8,1 ; Show already through once + b slcktry +slckfail: ; We couldn't get the lock + lis r3,hi16(slckpanic_str) + ori r3,r3,lo16(slckpanic_str) + mr r4,r5 + mflr r5 + PROLOG(0) + bl EXT(panic) + BREAKPOINT_TRAP ; We die here anyway -LEXT(get_simple_lock_count) - - mfmsr r9 /* Save the old MSR */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtmsr r8 /* Interrupts off */ - isync ; May have messed with vec/fp - mfsprg r6,0 /* Get the per_proc */ - lwz r3,PP_SIMPLE_LOCK_CNT(r6) /* Get the simple lock count */ - mtmsr r9 /* Restore interruptions to entry */ - blr /* Return... */ + .data +slckpanic_str: + STRINGD "ppc_usimple_lock: simple lock deadlock detection l=0x%08X, pc=0x%08X\n\000" + .text /* - * fast_usimple_lock(): + * unsigned int ppc_usimple_lock_try(simple_lock_t) * - * If EE is off, get the simple lock without incrementing the preemption count and - * mark The simple lock with SLOCK_FAST. - * If EE is on, call usimple_lock(). */ .align 5 - .globl EXT(fast_usimple_lock) + .globl EXT(ppc_usimple_lock_try) -LEXT(fast_usimple_lock) +LEXT(ppc_usimple_lock_try) #if CHECKNMI - b EXT(usimple_lock) ; (TEST/DEBUG) + mflr r12 ; (TEST/DEBUG) + bl EXT(ml_sense_nmi) ; (TEST/DEBUG) + mtlr r12 ; (TEST/DEBUG) #endif - mfmsr r9 - andi. r7,r9,lo16(MASK(MSR_EE)) - bne- L_usimple_lock_c -L_usimple_lock_loop: - lwarx r4,0,r3 - li r5,ILK_LOCKED|SLOCK_FAST - mr. r4,r4 - bne- L_usimple_lock_c - stwcx. r5,0,r3 - bne- L_usimple_lock_loop - isync - blr -L_usimple_lock_c: - b EXT(usimple_lock) + lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable + mfmsr r9 ; Get the MSR value + ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable + ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too + andc r9,r9,r0 ; Clear FP and VEC + andc r7,r9,r7 ; Clear EE as well + mtmsr r7 ; Disable interruptions and thus, preemption + mfsprg r6,1 ; Get current activation + + lwz r11,0(r3) ; Get the lock + andi. r5,r11,ILK_LOCKED ; Check it... + bne-- slcktryfail ; Quickly fail... + +slcktryloop: + lwarx r11,0,r3 ; Ld from addr of arg and reserve + + andi. r5,r11,ILK_LOCKED ; TEST... + ori r5,r6,ILK_LOCKED + bne-- slcktryfailX ; branch if taken. Predict free + + stwcx. r5,0,r3 ; And SET (if still reserved) + bne-- slcktryloop ; If set failed, loop back + + isync + + lwz r5,ACT_PREEMPT_CNT(r6) ; Get the preemption level + addi r5,r5,1 ; Bring up the disable count + stw r5,ACT_PREEMPT_CNT(r6) ; Save it back + + mtmsr r9 ; Allow interruptions now + li r3,1 ; Set that the lock was free + blr + +slcktryfailX: + li r5,lgKillResv ; Killing field + stwcx. r5,0,r5 ; Kill reservation + +slcktryfail: + mtmsr r9 ; Allow interruptions now + li r3,0 ; FAILURE - lock was taken + blr + /* - * fast_usimple_lock_try(): + * void ppc_usimple_unlock_rwcmb(simple_lock_t) * - * If EE is off, try to get the simple lock. The preemption count doesn't get incremented and - * if successfully held, the simple lock is marked with SLOCK_FAST. - * If EE is on, call usimple_lock_try() */ .align 5 - .globl EXT(fast_usimple_lock_try) + .globl EXT(ppc_usimple_unlock_rwcmb) -LEXT(fast_usimple_lock_try) +LEXT(ppc_usimple_unlock_rwcmb) #if CHECKNMI - b EXT(usimple_lock_try) ; (TEST/DEBUG) + mflr r12 ; (TEST/DEBUG) + bl EXT(ml_sense_nmi) ; (TEST/DEBUG) + mtlr r12 ; (TEST/DEBUG) #endif - mfmsr r9 - andi. r7,r9,lo16(MASK(MSR_EE)) - bne- L_usimple_lock_try_c -L_usimple_lock_try_loop: - lwarx r4,0,r3 - li r5,ILK_LOCKED|SLOCK_FAST - mr. r4,r4 - bne- L_usimple_lock_try_fail - stwcx. r5,0,r3 - bne- L_usimple_lock_try_loop - li r3,1 - isync - blr -L_usimple_lock_try_fail: - li r3,0 - blr -L_usimple_lock_try_c: - b EXT(usimple_lock_try) + li r0,0 + .globl EXT(sulckPatch_isync) +LEXT(sulckPatch_isync) + isync + .globl EXT(sulckPatch_eieio) +LEXT(sulckPatch_eieio) + eieio + stw r0, LOCK_DATA(r3) + + b epStart ; Go enable preemption... /* - * fast_usimple_unlock(): + * void ppc_usimple_unlock_rwmb(simple_lock_t) * - * If the simple lock is marked SLOCK_FAST, release it without decrementing the preemption count. - * Call usimple_unlock() otherwise. */ .align 5 - .globl EXT(fast_usimple_unlock) + .globl EXT(ppc_usimple_unlock_rwmb) -LEXT(fast_usimple_unlock) +LEXT(ppc_usimple_unlock_rwmb) #if CHECKNMI - b EXT(usimple_unlock) ; (TEST/DEBUG) + mflr r12 ; (TEST/DEBUG) + bl EXT(ml_sense_nmi) ; (TEST/DEBUG) + mtlr r12 ; (TEST/DEBUG) #endif - lwz r5,LOCK_DATA(r3) - li r0,0 - cmpi cr0,r5,ILK_LOCKED|SLOCK_FAST - bne- L_usimple_unlock_c - sync -#if 0 - mfmsr r9 - andi. r7,r9,lo16(MASK(MSR_EE)) - beq L_usimple_unlock_cont - lis r3,hi16(L_usimple_unlock_panic) - ori r3,r3,lo16(L_usimple_unlock_panic) - bl EXT(panic) + li r0,0 + sync + stw r0, LOCK_DATA(r3) - .data -L_usimple_unlock_panic: - STRINGD "fast_usimple_unlock: interrupts not disabled\n\000" - .text -L_usimple_unlock_cont: -#endif - stw r0, LOCK_DATA(r3) - blr -L_usimple_unlock_c: - b EXT(usimple_unlock) + b epStart ; Go enable preemption... /* - * enter_funnel_section(): + * void enter_funnel_section(funnel_t *) * */ .align 5 @@ -1825,46 +1757,45 @@ L_usimple_unlock_c: LEXT(enter_funnel_section) #if !MACH_LDEBUG - lis r10,hi16(EXT(kdebug_enable)) - ori r10,r10,lo16(EXT(kdebug_enable)) - lwz r10,0(r10) - lis r11,hi16(EXT(split_funnel_off)) - ori r11,r11,lo16(EXT(split_funnel_off)) - lwz r11,0(r11) - or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off - bne- L_enter_funnel_section_slow1 ; If set, call the slow path - mfsprg r6,1 ; Get the current activation - lwz r7,LOCK_FNL_MUTEX(r3) - mfmsr r11 - rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 - mtmsr r10 ; Turn off EE - isync ; May have messed with vec/fp - mr r9,r6 + lis r10,hi16(EXT(kdebug_enable)) + ori r10,r10,lo16(EXT(kdebug_enable)) + lwz r10,0(r10) + lis r11,hi16(EXT(split_funnel_off)) + ori r11,r11,lo16(EXT(split_funnel_off)) + lwz r11,0(r11) + or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off + bne- L_enter_funnel_section_slow ; If set, call the slow path + mfsprg r6,1 ; Get the current activation + lwz r7,LOCK_FNL_MUTEX(r3) + + lwz r5,0(r7) ; Get lock quickly + mr. r5,r5 ; Locked? + bne-- L_enter_funnel_section_slow ; Yup... + L_enter_funnel_section_loop: - lwarx r5,0,r7 ; Load the mutex lock - mr. r5,r5 - bne- L_enter_funnel_section_slow ; Go to the slow path - stwcx. r6,0,r7 ; Grab the lock - bne- L_enter_funnel_section_loop ; Loop back if failed - isync ; Stop prefeteching - lwz r6,ACT_THREAD(r6) ; Get the current thread - li r7,TH_FN_OWNED - stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state - stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference - mtmsr r11 - blr + lwarx r5,0,r7 ; Load the mutex lock + mr. r5,r5 + bne-- L_enter_funnel_section_slowX ; Go to the slow path + stwcx. r6,0,r7 ; Grab the lock + bne-- L_enter_funnel_section_loop ; Loop back if failed + isync ; Stop prefeteching + lwz r6,ACT_THREAD(r6) ; Get the current thread + li r7,TH_FN_OWNED + stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference + stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state + blr + +L_enter_funnel_section_slowX: + li r4,lgKillResv ; Killing field + stwcx. r4,0,r4 ; Kill reservation L_enter_funnel_section_slow: - mtmsr r11 -L_enter_funnel_section_slow1: #endif - li r4,TRUE - b EXT(thread_funnel_set) + li r4,TRUE + b EXT(thread_funnel_set) /* - * exit_funnel_section(): + * void exit_funnel_section(void) * */ .align 5 @@ -1872,42 +1803,61 @@ L_enter_funnel_section_slow1: LEXT(exit_funnel_section) + mfsprg r6,1 ; Get the current activation + lwz r6,ACT_THREAD(r6) ; Get the current thread + lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock + mr. r3,r3 ; Check on funnel held + beq- L_exit_funnel_section_ret ; #if !MACH_LDEBUG - mfsprg r6,1 ; Get the current activation - lwz r6,ACT_THREAD(r6) ; Get the current thread - lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock - mr. r3,r3 ; Check on funnel held - beq- L_exit_funnel_section_ret ; - lis r10,hi16(EXT(kdebug_enable)) - ori r10,r10,lo16(EXT(kdebug_enable)) - lwz r10,0(r10) - mr. r10,r10 - bne- L_exit_funnel_section_slow1 ; If set, call the slow path - lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock - mfmsr r11 - rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 - mtmsr r10 ; Turn off EE - isync ; May have messed with fp/vec - sync + lis r10,hi16(EXT(kdebug_enable)) + ori r10,r10,lo16(EXT(kdebug_enable)) + lwz r10,0(r10) + mr. r10,r10 + bne- L_exit_funnel_section_slow ; If set, call the slow path + lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock + .globl EXT(retfsectPatch_isync) +LEXT(retfsectPatch_isync) + isync + .globl EXT(retfsectPatch_eieio) +LEXT(retfsectPatch_eieio) + eieio + + lwz r5,0(r7) ; Get lock + rlwinm. r4,r5,0,30,31 ; Quick check for bail if pending waiter or interlock set + bne-- L_exit_funnel_section_slow ; No can get... + L_exit_funnel_section_loop: - lwarx r5,0,r7 - rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set - li r5,0 ; Clear the mutexlock - bne- L_exit_funnel_section_slow - stwcx. r5,0,r7 ; Release the funnel mutexlock - bne- L_exit_funnel_section_loop - li r7,0 - stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state - stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference - mtmsr r11 -L_exit_funnel_section_ret: - blr + lwarx r5,0,r7 + rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set + li r5,0 ; Clear the mutexlock + bne-- L_exit_funnel_section_slowX + stwcx. r5,0,r7 ; Release the funnel mutexlock + bne-- L_exit_funnel_section_loop + li r7,0 + stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state + stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference + blr ; Return + +L_exit_funnel_section_slowX: + li r4,lgKillResv ; Killing field + stwcx. r4,0,r4 ; Kill it + L_exit_funnel_section_slow: - mtmsr r11 -L_exit_funnel_section_slow1: #endif - li r4,FALSE - b EXT(thread_funnel_set) + li r4,FALSE + b EXT(thread_funnel_set) +L_exit_funnel_section_ret: + blr + +; +; This is bring up code +; + .align 5 + .globl EXT(condStop) + +LEXT(condStop) + +XcondStop: cmplw r3,r4 ; Check if these are equal + beq-- XcondStop ; Stop here until they are different + blr ; Return. diff --git a/osfmk/ppc/hw_perfmon.c b/osfmk/ppc/hw_perfmon.c new file mode 100644 index 000000000..7cb2d123e --- /dev/null +++ b/osfmk/ppc/hw_perfmon.c @@ -0,0 +1,945 @@ +/* + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +decl_simple_lock_data(,hw_perfmon_lock) +static task_t hw_perfmon_owner = TASK_NULL; +static int hw_perfmon_thread_count = 0; + +/* Notes: + * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched + * (can only count user events anyway) + * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs + * -virtual counter PMI is passed up as a breakpoint exception + */ + +int perfmon_init(void) +{ + simple_lock_init(&hw_perfmon_lock, FALSE); + return KERN_SUCCESS; +} + +/* PMC Facility Owner: + * TASK_NULL - no one owns it + * kernel_task - owned by hw_perfmon + * other task - owned by another task + */ + +int perfmon_acquire_facility(task_t task) +{ + kern_return_t retval = KERN_SUCCESS; + + simple_lock(&hw_perfmon_lock); + + if(hw_perfmon_owner==task) { +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n"); +#endif + retval = KERN_SUCCESS; + /* already own it */ + } else if(hw_perfmon_owner==TASK_NULL) { /* no one owns it */ + hw_perfmon_owner = task; + hw_perfmon_thread_count = 0; +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n"); +#endif + retval = KERN_SUCCESS; + } else { /* someone already owns it */ + if(hw_perfmon_owner==kernel_task) { + if(hw_perfmon_thread_count==0) { /* kernel owns it but no threads using it */ + hw_perfmon_owner = task; + hw_perfmon_thread_count = 0; +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n"); +#endif + retval = KERN_SUCCESS; + } else { +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n"); +#endif + retval = KERN_RESOURCE_SHORTAGE; + } + } else { /* non-kernel owner */ +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n"); +#endif + retval = KERN_RESOURCE_SHORTAGE; + } + } + + simple_unlock(&hw_perfmon_lock); + return retval; +} + +int perfmon_release_facility(task_t task) +{ + kern_return_t retval = KERN_SUCCESS; + task_t old_perfmon_owner = hw_perfmon_owner; + + simple_lock(&hw_perfmon_lock); + + if(task!=hw_perfmon_owner) { + retval = KERN_NO_ACCESS; + } else { + if(old_perfmon_owner==kernel_task) { + if(hw_perfmon_thread_count>0) { +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n"); +#endif + retval = KERN_NO_ACCESS; + } else { +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n"); +#endif + hw_perfmon_owner = TASK_NULL; + retval = KERN_SUCCESS; + } + } else { +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_release_facility - RELEASED: user task was owner\n"); +#endif + hw_perfmon_owner = TASK_NULL; + retval = KERN_SUCCESS; + } + } + + simple_unlock(&hw_perfmon_lock); + return retval; +} + +int perfmon_enable(thread_act_t thr_act) +{ + struct savearea *sv = thr_act->mact.pcb; + kern_return_t kr; + kern_return_t retval = KERN_SUCCESS; + int curPMC; + + if(thr_act->mact.specFlags & perfMonitor) { + return KERN_SUCCESS; /* already enabled */ + } else if(perfmon_acquire_facility(kernel_task)!=KERN_SUCCESS) { + return KERN_RESOURCE_SHORTAGE; /* facility is in use */ + } else { /* kernel_task owns the faciltity and this thread has not yet been counted */ + simple_lock(&hw_perfmon_lock); + hw_perfmon_thread_count++; + simple_unlock(&hw_perfmon_lock); + } + + sv->save_mmcr1 = 0; + sv->save_mmcr2 = 0; + + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + { + ppc32_mmcr0_reg_t mmcr0_reg; + + mmcr0_reg.value = 0; + mmcr0_reg.field.disable_counters_always = TRUE; + mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */ + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + case CPU_SUBTYPE_POWERPC_970: + { + ppc64_mmcr0_reg_t mmcr0_reg; + + mmcr0_reg.value = 0; + mmcr0_reg.field.disable_counters_always = TRUE; + mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */ + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + default: + retval = KERN_FAILURE; + break; + } + + if(retval==KERN_SUCCESS) { + for(curPMC=0; curPMCsave_pmc[curPMC] = 0; + thr_act->mact.pmcovfl[curPMC] = 0; + } + thr_act->mact.perfmonFlags = 0; + thr_act->mact.specFlags |= perfMonitor; /* enable perf monitor facility for this thread */ + if(thr_act==current_act()) { + per_proc_info[cpu_number()].spcFlags |= perfMonitor; /* update per_proc */ + } + } + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2); +#endif + + return retval; +} + +int perfmon_disable(thread_act_t thr_act) +{ + struct savearea *sv = thr_act->mact.pcb; + int curPMC; + + if(!(thr_act->mact.specFlags & perfMonitor)) { + return KERN_NO_ACCESS; /* not enabled */ + } else { + simple_lock(&hw_perfmon_lock); + hw_perfmon_thread_count--; + simple_unlock(&hw_perfmon_lock); + perfmon_release_facility(kernel_task); /* will release if hw_perfmon_thread_count is 0 */ + } + + thr_act->mact.specFlags &= ~perfMonitor; /* disable perf monitor facility for this thread */ + if(thr_act==current_act()) { + per_proc_info[cpu_number()].spcFlags &= ~perfMonitor; /* update per_proc */ + } + sv->save_mmcr0 = 0; + sv->save_mmcr1 = 0; + sv->save_mmcr2 = 0; + + for(curPMC=0; curPMCsave_pmc[curPMC] = 0; + thr_act->mact.pmcovfl[curPMC] = 0; + thr_act->mact.perfmonFlags = 0; + } + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2); +#endif + + return KERN_SUCCESS; +} + +int perfmon_clear_counters(thread_act_t thr_act) +{ + struct savearea *sv = thr_act->mact.pcb; + int curPMC; + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number()); +#endif + + /* clear thread copy */ + for(curPMC=0; curPMCsave_pmc[curPMC] = 0; + thr_act->mact.pmcovfl[curPMC] = 0; + } + + return KERN_SUCCESS; +} + +int perfmon_write_counters(thread_act_t thr_act, uint64_t *pmcs) +{ + struct savearea *sv = thr_act->mact.pcb; + int curPMC; + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]); +#endif + + /* update thread copy */ + for(curPMC=0; curPMCsave_pmc[curPMC] = pmcs[curPMC] & 0x7FFFFFFF; + thr_act->mact.pmcovfl[curPMC] = (pmcs[curPMC]>>31) & 0xFFFFFFFF; + } + + return KERN_SUCCESS; +} + +int perfmon_read_counters(thread_act_t thr_act, uint64_t *pmcs) +{ + struct savearea *sv = thr_act->mact.pcb; + int curPMC; + + /* retrieve from thread copy */ + for(curPMC=0; curPMCmact.pmcovfl[curPMC]; + pmcs[curPMC] = pmcs[curPMC]<<31; + pmcs[curPMC] |= (sv->save_pmc[curPMC] & 0x7FFFFFFF); + } + + /* zero any unused counters on this platform */ + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + pmcs[PMC_7] = 0; + pmcs[PMC_8] = 0; + break; + default: + break; + } + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]); +#endif + + return KERN_SUCCESS; +} + +int perfmon_start_counters(thread_act_t thr_act) +{ + struct savearea *sv = thr_act->mact.pcb; + kern_return_t retval = KERN_SUCCESS; + + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + { + ppc32_mmcr0_reg_t mmcr0_reg; + mmcr0_reg.value = sv->save_mmcr0; + mmcr0_reg.field.disable_counters_always = FALSE; + /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */ + mmcr0_reg.field.on_pmi_stop_counting = FALSE; + mmcr0_reg.field.enable_pmi = FALSE; + mmcr0_reg.field.enable_pmi_on_pmc1 = FALSE; + mmcr0_reg.field.enable_pmi_on_pmcn = FALSE; + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + case CPU_SUBTYPE_POWERPC_7450: + { + ppc32_mmcr0_reg_t mmcr0_reg; + mmcr0_reg.value = sv->save_mmcr0; + mmcr0_reg.field.disable_counters_always = FALSE; + mmcr0_reg.field.on_pmi_stop_counting = TRUE; + mmcr0_reg.field.enable_pmi = TRUE; + mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE; + mmcr0_reg.field.enable_pmi_on_pmcn = TRUE; + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + case CPU_SUBTYPE_POWERPC_970: + { + ppc64_mmcr0_reg_t mmcr0_reg; + mmcr0_reg.value = sv->save_mmcr0; + mmcr0_reg.field.disable_counters_always = FALSE; + mmcr0_reg.field.on_pmi_stop_counting = TRUE; + mmcr0_reg.field.enable_pmi = TRUE; + mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE; + mmcr0_reg.field.enable_pmi_on_pmcn = TRUE; + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + default: + retval = KERN_FAILURE; + break; + } + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2); +#endif + + return retval; +} + +int perfmon_stop_counters(thread_act_t thr_act) +{ + struct savearea *sv = thr_act->mact.pcb; + kern_return_t retval = KERN_SUCCESS; + + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + { + ppc32_mmcr0_reg_t mmcr0_reg; + mmcr0_reg.value = sv->save_mmcr0; + mmcr0_reg.field.disable_counters_always = TRUE; + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + case CPU_SUBTYPE_POWERPC_970: + { + ppc64_mmcr0_reg_t mmcr0_reg; + mmcr0_reg.value = sv->save_mmcr0; + mmcr0_reg.field.disable_counters_always = TRUE; + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + default: + retval = KERN_FAILURE; + break; + } + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2); +#endif + + return retval; +} + +int perfmon_set_event(thread_act_t thr_act, int pmc, int event) +{ + struct savearea *sv = thr_act->mact.pcb; + kern_return_t retval = KERN_SUCCESS; + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2); +#endif + + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + { + ppc32_mmcr0_reg_t mmcr0_reg; + ppc32_mmcr1_reg_t mmcr1_reg; + + mmcr0_reg.value = sv->save_mmcr0; + mmcr1_reg.value = sv->save_mmcr1; + + switch(pmc) { + case PMC_1: + mmcr0_reg.field.pmc1_event = event; + sv->save_mmcr0 = mmcr0_reg.value; + break; + case PMC_2: + mmcr0_reg.field.pmc2_event = event; + sv->save_mmcr0 = mmcr0_reg.value; + break; + case PMC_3: + mmcr1_reg.field.pmc3_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_4: + mmcr1_reg.field.pmc4_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + default: + retval = KERN_FAILURE; + break; + } + } + break; + case CPU_SUBTYPE_POWERPC_7450: + { + ppc32_mmcr0_reg_t mmcr0_reg; + ppc32_mmcr1_reg_t mmcr1_reg; + + mmcr0_reg.value = sv->save_mmcr0; + mmcr1_reg.value = sv->save_mmcr1; + + switch(pmc) { + case PMC_1: + mmcr0_reg.field.pmc1_event = event; + sv->save_mmcr0 = mmcr0_reg.value; + break; + case PMC_2: + mmcr0_reg.field.pmc2_event = event; + sv->save_mmcr0 = mmcr0_reg.value; + break; + case PMC_3: + mmcr1_reg.field.pmc3_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_4: + mmcr1_reg.field.pmc4_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_5: + mmcr1_reg.field.pmc5_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_6: + mmcr1_reg.field.pmc6_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + default: + retval = KERN_FAILURE; + break; + } + } + break; + case CPU_SUBTYPE_POWERPC_970: + { + ppc64_mmcr0_reg_t mmcr0_reg; + ppc64_mmcr1_reg_t mmcr1_reg; + + mmcr0_reg.value = sv->save_mmcr0; + mmcr1_reg.value = sv->save_mmcr1; + + switch(pmc) { + case PMC_1: + mmcr0_reg.field.pmc1_event = event; + sv->save_mmcr0 = mmcr0_reg.value; + break; + case PMC_2: + mmcr0_reg.field.pmc2_event = event; + sv->save_mmcr0 = mmcr0_reg.value; + break; + case PMC_3: + mmcr1_reg.field.pmc3_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_4: + mmcr1_reg.field.pmc4_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_5: + mmcr1_reg.field.pmc5_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_6: + mmcr1_reg.field.pmc6_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_7: + mmcr1_reg.field.pmc7_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + case PMC_8: + mmcr1_reg.field.pmc8_event = event; + sv->save_mmcr1 = mmcr1_reg.value; + break; + default: + retval = KERN_FAILURE; + break; + } + } + break; + default: + retval = KERN_FAILURE; + break; + } + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2); +#endif + + return retval; +} + +int perfmon_set_event_func(thread_act_t thr_act, uint32_t f) +{ + struct savearea *sv = thr_act->mact.pcb; + kern_return_t retval = KERN_SUCCESS; + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_event_func - func=%s\n", + f==PPC_PERFMON_FUNC_FPU ? "FUNC" : + f==PPC_PERFMON_FUNC_ISU ? "ISU" : + f==PPC_PERFMON_FUNC_IFU ? "IFU" : + f==PPC_PERFMON_FUNC_VMX ? "VMX" : + f==PPC_PERFMON_FUNC_IDU ? "IDU" : + f==PPC_PERFMON_FUNC_GPS ? "GPS" : + f==PPC_PERFMON_FUNC_LSU0 ? "LSU0" : + f==PPC_PERFMON_FUNC_LSU1A ? "LSU1A" : + f==PPC_PERFMON_FUNC_LSU1B ? "LSU1B" : + f==PPC_PERFMON_FUNC_SPECA ? "SPECA" : + f==PPC_PERFMON_FUNC_SPECB ? "SPECB" : + f==PPC_PERFMON_FUNC_SPECC ? "SPECC" : + "UNKNOWN"); +#endif /* HWPERFMON_DEBUG */ + + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + retval = KERN_FAILURE; /* event functional unit only applies to 970 */ + break; + case CPU_SUBTYPE_POWERPC_970: + { + ppc64_mmcr1_reg_t mmcr1_reg; + ppc_func_unit_t func_unit; + + func_unit.value = f; + mmcr1_reg.value = sv->save_mmcr1; + + mmcr1_reg.field.ttm0_select = func_unit.field.TTM0SEL; + mmcr1_reg.field.ttm1_select = func_unit.field.TTM1SEL; + mmcr1_reg.field.ttm2_select = 0; /* not used */ + mmcr1_reg.field.ttm3_select = func_unit.field.TTM3SEL; + mmcr1_reg.field.speculative_event = func_unit.field.SPECSEL; + mmcr1_reg.field.lane0_select = func_unit.field.TD_CP_DBGxSEL; + mmcr1_reg.field.lane1_select = func_unit.field.TD_CP_DBGxSEL; + mmcr1_reg.field.lane2_select = func_unit.field.TD_CP_DBGxSEL; + mmcr1_reg.field.lane3_select = func_unit.field.TD_CP_DBGxSEL; + + sv->save_mmcr1 = mmcr1_reg.value; + } + break; + default: + retval = KERN_FAILURE; + break; + } + + return retval; +} + +int perfmon_set_threshold(thread_act_t thr_act, int threshold) +{ + struct savearea *sv = thr_act->mact.pcb; + kern_return_t retval = KERN_SUCCESS; + + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + { + ppc32_mmcr0_reg_t mmcr0_reg; + + mmcr0_reg.value = sv->save_mmcr0; + + if(threshold>63) { /* no multiplier on 750 */ + int newThreshold = 63; +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold); +#endif + threshold = newThreshold; + } + mmcr0_reg.field.threshold_value = threshold; + + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + { + ppc32_mmcr0_reg_t mmcr0_reg; + ppc32_mmcr2_reg_t mmcr2_reg; + + mmcr0_reg.value = sv->save_mmcr0; + mmcr2_reg.value = sv->save_mmcr2; + + if(threshold<=(2*63)) { /* 2x multiplier */ + if(threshold%2 != 0) { + int newThreshold = 2*(threshold/2); +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold, newThreshold); +#endif + threshold = newThreshold; + } + mmcr2_reg.field.threshold_multiplier = 0; + } else if(threshold<=(32*63)) { /* 32x multiplier */ + if(threshold%32 != 0) { + int newThreshold = 32*(threshold/32); +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold, newThreshold); +#endif + threshold = newThreshold; + } + mmcr2_reg.field.threshold_multiplier = 1; + } else { + int newThreshold = 32*63; +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold); +#endif + threshold = newThreshold; + mmcr2_reg.field.threshold_multiplier = 1; + } + mmcr0_reg.field.threshold_value = threshold; + + sv->save_mmcr0 = mmcr0_reg.value; + sv->save_mmcr2 = mmcr2_reg.value; + + } + break; + case CPU_SUBTYPE_POWERPC_970: + { + ppc64_mmcr0_reg_t mmcr0_reg; + + mmcr0_reg.value = sv->save_mmcr0; + + if(threshold>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */ + int newThreshold = 63; +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold); +#endif + threshold = newThreshold; + } + mmcr0_reg.field.threshold_value = threshold; + + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + default: + retval = KERN_FAILURE; + break; + } + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2); +#endif + + return retval; +} + +int perfmon_set_tbsel(thread_act_t thr_act, int tbsel) +{ + struct savearea *sv = thr_act->mact.pcb; + kern_return_t retval = KERN_SUCCESS; + + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + { + ppc32_mmcr0_reg_t mmcr0_reg; + + mmcr0_reg.value = sv->save_mmcr0; + switch(tbsel) { + case 0x0: + case 0x1: + case 0x2: + case 0x3: + mmcr0_reg.field.timebase_bit_selector = tbsel; + break; + default: + retval = KERN_FAILURE; + } + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + case CPU_SUBTYPE_POWERPC_970: + { + ppc64_mmcr0_reg_t mmcr0_reg; + + mmcr0_reg.value = sv->save_mmcr0; + switch(tbsel) { + case 0x0: + case 0x1: + case 0x2: + case 0x3: + mmcr0_reg.field.timebase_bit_selector = tbsel; + break; + default: + retval = KERN_FAILURE; + } + sv->save_mmcr0 = mmcr0_reg.value; + } + break; + default: + retval = KERN_FAILURE; + break; + } + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2); +#endif + + return retval; +} + +int perfmon_control(struct savearea *ssp) +{ + mach_port_t thr_port = CAST_DOWN(mach_port_t, ssp->save_r3); + int action = (int)ssp->save_r4; + int pmc = (int)ssp->save_r5; + int val = (int)ssp->save_r6; + uint64_t *usr_pmcs_p = CAST_DOWN(uint64_t *, ssp->save_r7); + thread_act_t thr_act = THREAD_NULL; + uint64_t kern_pmcs[MAX_CPUPMC_COUNT]; + kern_return_t retval = KERN_SUCCESS; + int error; + boolean_t oldlevel; + + thr_act = (thread_act_t) port_name_to_act(thr_port); // convert user space thread port name to a thread_act_t + if(!thr_act) { + ssp->save_r3 = KERN_INVALID_ARGUMENT; + return 1; /* Return and check for ASTs... */ + } + + if(thr_act!=current_act()) { + thread_suspend(thr_act); + } + +#ifdef HWPERFMON_DEBUG + // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p); +#endif + + oldlevel = ml_set_interrupts_enabled(FALSE); + + /* individual actions which do not require perfmon facility to be enabled */ + if(action==PPC_PERFMON_DISABLE) { + retval = perfmon_disable(thr_act); + } + else if(action==PPC_PERFMON_ENABLE) { + retval = perfmon_enable(thr_act); + } + + else { /* individual actions which do require perfmon facility to be enabled */ + if(!(thr_act->mact.specFlags & perfMonitor)) { /* perfmon not enabled */ +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n"); +#endif + retval = KERN_NO_ACCESS; + goto perfmon_return; + } + + if(action==PPC_PERFMON_SET_EVENT) { + retval = perfmon_set_event(thr_act, pmc, val); + } + else if(action==PPC_PERFMON_SET_THRESHOLD) { + retval = perfmon_set_threshold(thr_act, val); + } + else if(action==PPC_PERFMON_SET_TBSEL) { + retval = perfmon_set_tbsel(thr_act, val); + } + else if(action==PPC_PERFMON_SET_EVENT_FUNC) { + retval = perfmon_set_event_func(thr_act, val); + } + else if(action==PPC_PERFMON_ENABLE_PMI_BRKPT) { + if(val) { + thr_act->mact.perfmonFlags |= PERFMONFLAG_BREAKPOINT_FOR_PMI; + } else { + thr_act->mact.perfmonFlags &= ~PERFMONFLAG_BREAKPOINT_FOR_PMI; + } + retval = KERN_SUCCESS; + } + + /* combinable actions */ + else { + if(action & PPC_PERFMON_STOP_COUNTERS) { + error = perfmon_stop_counters(thr_act); + if(error!=KERN_SUCCESS) { + retval = error; + goto perfmon_return; + } + } + if(action & PPC_PERFMON_CLEAR_COUNTERS) { + error = perfmon_clear_counters(thr_act); + if(error!=KERN_SUCCESS) { + retval = error; + goto perfmon_return; + } + } + if(action & PPC_PERFMON_WRITE_COUNTERS) { + if(error = copyin((void *)usr_pmcs_p, (void *)kern_pmcs, MAX_CPUPMC_COUNT*sizeof(uint64_t))) { + retval = error; + goto perfmon_return; + } + error = perfmon_write_counters(thr_act, kern_pmcs); + if(error!=KERN_SUCCESS) { + retval = error; + goto perfmon_return; + } + } + if(action & PPC_PERFMON_READ_COUNTERS) { + error = perfmon_read_counters(thr_act, kern_pmcs); + if(error!=KERN_SUCCESS) { + retval = error; + goto perfmon_return; + } + if(error = copyout((void *)kern_pmcs, (void *)usr_pmcs_p, MAX_CPUPMC_COUNT*sizeof(uint64_t))) { + retval = error; + goto perfmon_return; + } + } + if(action & PPC_PERFMON_START_COUNTERS) { + error = perfmon_start_counters(thr_act); + if(error!=KERN_SUCCESS) { + retval = error; + goto perfmon_return; + } + } + } + } + + perfmon_return: + ml_set_interrupts_enabled(oldlevel); + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp->save_mmcr0, ssp->save_pmc[PMC_1], ssp->save_pmc[PMC_2], ssp->save_pmc[PMC_3], ssp->save_pmc[PMC_4], ssp->save_pmc[PMC_5], ssp->save_pmc[PMC_6], ssp->save_pmc[PMC_7], ssp->save_pmc[PMC_8]); +#endif + + if(thr_act!=current_act()) { + thread_resume(thr_act); + } + +#ifdef HWPERFMON_DEBUG + if(retval!=KERN_SUCCESS) { + kprintf("perfmon_control - ERROR: retval=%d\n", retval); + } +#endif /* HWPERFMON_DEBUG */ + + ssp->save_r3 = retval; + return 1; /* Return and check for ASTs... */ +} + +int perfmon_handle_pmi(struct savearea *ssp) +{ + int curPMC; + kern_return_t retval = KERN_SUCCESS; + thread_act_t thr_act = current_act(); + +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_handle_pmi: got rupt\n"); +#endif + + if(!(thr_act->mact.specFlags & perfMonitor)) { /* perfmon not enabled */ +#ifdef HWPERFMON_DEBUG + kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n"); +#endif + return KERN_FAILURE; + } + + for(curPMC=0; curPMCmact.pcb->save_pmc[curPMC] & 0x80000000) { + if(thr_act->mact.pmcovfl[curPMC]==0xFFFFFFFF && (thr_act->mact.perfmonFlags & PERFMONFLAG_BREAKPOINT_FOR_PMI)) { + doexception(EXC_BREAKPOINT, EXC_PPC_PERFMON, (unsigned int)ssp->save_srr0); // pass up a breakpoint exception + return KERN_SUCCESS; + } else { + thr_act->mact.pmcovfl[curPMC]++; + thr_act->mact.pcb->save_pmc[curPMC] = 0; + } + } + } + + if(retval==KERN_SUCCESS) { + switch(machine_slot[0].cpu_subtype) { + case CPU_SUBTYPE_POWERPC_7450: + { + ppc32_mmcr0_reg_t mmcr0_reg; + + mmcr0_reg.value = thr_act->mact.pcb->save_mmcr0; + mmcr0_reg.field.disable_counters_always = FALSE; + mmcr0_reg.field.enable_pmi = TRUE; + thr_act->mact.pcb->save_mmcr0 = mmcr0_reg.value; + } + retval = KERN_SUCCESS; + break; + case CPU_SUBTYPE_POWERPC_970: + { + ppc64_mmcr0_reg_t mmcr0_reg; + + mmcr0_reg.value = thr_act->mact.pcb->save_mmcr0; + mmcr0_reg.field.disable_counters_always = FALSE; + mmcr0_reg.field.enable_pmi = TRUE; + thr_act->mact.pcb->save_mmcr0 = mmcr0_reg.value; + } + retval = KERN_SUCCESS; + break; + default: + retval = KERN_FAILURE; + break; + } + } + + return retval; +} diff --git a/osfmk/ppc/hw_perfmon.h b/osfmk/ppc/hw_perfmon.h new file mode 100644 index 000000000..1f70b3266 --- /dev/null +++ b/osfmk/ppc/hw_perfmon.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _HW_PERFMON_H_ +#define _HW_PERFMON_H_ + +#ifndef __ppc__ +#error This file is only useful on PowerPC. +#endif + +#define MAX_CPUPMC_COUNT 8 + +#define PMC_1 0 +#define PMC_2 1 +#define PMC_3 2 +#define PMC_4 3 +#define PMC_5 4 +#define PMC_6 5 +#define PMC_7 6 +#define PMC_8 7 + +/* these actions can be combined and simultaneously performed with a single call to perfmon_control() */ +typedef enum { + PPC_PERFMON_CLEAR_COUNTERS = 0x0002, + PPC_PERFMON_START_COUNTERS = 0x0004, + PPC_PERFMON_STOP_COUNTERS = 0x0008, + PPC_PERFMON_READ_COUNTERS = 0x0010, + PPC_PERFMON_WRITE_COUNTERS = 0x0020 +} perfmon_multi_action_t; + +/* these actions can not be combined and each requires a separate call to perfmon_control() */ +typedef enum { + PPC_PERFMON_ENABLE = 0x00010000, + PPC_PERFMON_DISABLE = 0x00020000, + PPC_PERFMON_SET_EVENT = 0x00030000, + PPC_PERFMON_SET_THRESHOLD = 0x00040000, + PPC_PERFMON_SET_TBSEL = 0x00050000, + PPC_PERFMON_SET_EVENT_FUNC = 0x00060000, + PPC_PERFMON_ENABLE_PMI_BRKPT = 0x00070000 +} perfmon_single_action_t; + +/* used to select byte lane and speculative events (currently 970 only) */ +typedef enum { /* SPECSEL[0:1] TD_CP_DBGxSEL[0:1] TTM3SEL[0:1] TTM1SEL[0:1] TTM0SEL[0:1] */ + PPC_PERFMON_FUNC_FPU = 0, /* 00 00 00 00 00 */ + PPC_PERFMON_FUNC_ISU = 1, /* 00 00 00 00 01 */ + PPC_PERFMON_FUNC_IFU = 2, /* 00 00 00 00 10 */ + PPC_PERFMON_FUNC_VMX = 3, /* 00 00 00 00 11 */ + PPC_PERFMON_FUNC_IDU = 64, /* 00 01 00 00 00 */ + PPC_PERFMON_FUNC_GPS = 76, /* 00 01 00 11 00 */ + PPC_PERFMON_FUNC_LSU0 = 128, /* 00 10 00 00 00 */ + PPC_PERFMON_FUNC_LSU1A = 192, /* 00 11 00 00 00 */ + PPC_PERFMON_FUNC_LSU1B = 240, /* 00 11 11 00 00 */ + PPC_PERFMON_FUNC_SPECA = 256, /* 01 00 00 00 00 */ + PPC_PERFMON_FUNC_SPECB = 512, /* 10 00 00 00 00 */ + PPC_PERFMON_FUNC_SPECC = 768, /* 11 00 00 00 00 */ +} perfmon_functional_unit_t; + +#ifdef MACH_KERNEL_PRIVATE +int perfmon_acquire_facility(task_t task); +int perfmon_release_facility(task_t task); + +extern int perfmon_disable(thread_act_t thr_act); +extern int perfmon_init(void); +extern int perfmon_control(struct savearea *save); +extern int perfmon_handle_pmi(struct savearea *ssp); + +/* perfmonFlags */ +#define PERFMONFLAG_BREAKPOINT_FOR_PMI 0x1 + +#endif /* MACH_KERNEL_PRIVATE */ + +/* + * From user space: + * + * int perfmon_control(thread_t thread, perfmon_action_t action, int pmc, u_int32_t val, u_int64_t *pmcs); + * + * r3: thread + * r4: action + * r5: pmc + * r6: event/threshold/tbsel/count + * r7: pointer to space for PMC counts: uint64_t[MAX_CPUPMC_COUNT] + * + * perfmon_control(thread, PPC_PERFMON_CLEAR_COUNTERS, 0, 0, NULL); + * perfmon_control(thread, PPC_PERFMON_START_COUNTERS, 0, 0, NULL); + * perfmon_control(thread, PPC_PERFMON_STOP_COUNTERS, 0, 0, NULL); + * perfmon_control(thread, PPC_PERFMON_READ_COUNTERS, 0, 0, uint64_t *pmcs); + * perfmon_control(thread, PPC_PERFMON_WRITE_COUNTERS, 0, 0, uint64_t *pmcs); + * perfmon_control(thread, PPC_PERFMON_ENABLE, 0, 0, NULL); + * perfmon_control(thread, PPC_PERFMON_DISABLE, 0, 0, NULL); + * perfmon_control(thread, PPC_PERFMON_SET_EVENT, int pmc, int event, NULL); + * perfmon_control(thread, PPC_PERFMON_SET_THRESHOLD, 0, int threshold, NULL); + * perfmon_control(thread, PPC_PERFMON_SET_TBSEL, 0, int tbsel, NULL); + * perfmon_control(thread, PPC_PERFMON_SET_EVENT_FUNC, 0, perfmon_functional_unit_t func, NULL); + * perfmon_control(thread, PPC_PERFMON_ENABLE_PMI_BRKPT, 0, boolean_t enable, NULL); + * + */ + +#endif /* _HW_PERFMON_H_ */ diff --git a/osfmk/ppc/hw_perfmon_mmcr.h b/osfmk/ppc/hw_perfmon_mmcr.h new file mode 100644 index 000000000..13d636dcd --- /dev/null +++ b/osfmk/ppc/hw_perfmon_mmcr.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _HW_PERFMON_MMCR_H_ +#define _HW_PERFMON_MMCR_H_ + +#ifndef __ppc__ +#error This file is only useful on PowerPC. +#endif + +typedef struct { + uint32_t disable_counters_always : 1; /* 0: disable counters */ + uint32_t disable_counters_supervisor : 1; /* 1: disable counters (supervisor) */ + uint32_t disable_counters_user : 1; /* 2: disable counters (user) */ + uint32_t disable_counters_marked : 1; /* 3: disable counters (marked bit == 1) */ + uint32_t disable_counters_unmarked : 1; /* 4: disable counters (marked bit == 0) */ + uint32_t enable_pmi : 1; /* 5: performance monitor interrupt enable */ + uint32_t on_pmi_stop_counting : 1; /* 6: disable counters (pmi) */ + uint32_t timebase_bit_selector : 2; /* 7-8: TBL bit for TB events */ + uint32_t enable_timebase_pmi : 1; /* 9: enable pmi on TBL bit transition */ + uint32_t threshold_value : 6; /* 10-15: threshold value */ + uint32_t enable_pmi_on_pmc1 : 1; /* 16: enable pmi on pmc1 overflow */ + uint32_t enable_pmi_on_pmcn : 1; /* 17: enable pmi on any pmc except pmc1 overflow */ + uint32_t enable_pmi_trigger : 1; /* 18: enable triggering of pmcn by pmc1 overflow */ + uint32_t pmc1_event : 7; /* 19-25: pmc1 event select */ + uint32_t pmc2_event : 6; /* 26-31: pmc2 event select */ +} ppc32_mmcr0_bits_t; + +typedef union { + uint32_t value; + ppc32_mmcr0_bits_t field; +} ppc32_mmcr0_reg_t; + +typedef struct { + uint32_t pmc3_event : 5; + uint32_t pmc4_event : 5; + uint32_t pmc5_event : 5; + uint32_t pmc6_event : 6; + uint32_t /*reserved*/ : 11; +} ppc32_mmcr1_bits_t; + +typedef union { + uint32_t value; + ppc32_mmcr1_bits_t field; +} ppc32_mmcr1_reg_t; + +typedef struct { + uint32_t threshold_multiplier : 1; + uint32_t /*reserved*/ : 31; +} ppc32_mmcr2_bits_t; + +typedef union { + uint32_t value; + ppc32_mmcr2_bits_t field; +} ppc32_mmcr2_reg_t; + +typedef struct { + uint32_t /* reserved */ : 32; /* 0-31: reserved */ + uint32_t disable_counters_always : 1; /* 32: disable counters */ + uint32_t disable_counters_supervisor : 1; /* 33: disable counters (supervisor) */ + uint32_t disable_counters_user : 1; /* 34: disable counters (user) */ + uint32_t disable_counters_marked : 1; /* 35: disable counters (marked bit == 1) */ + uint32_t disable_counters_unmarked : 1; /* 36: disable counters (marked bit == 0) */ + uint32_t enable_pmi : 1; /* 37: performance monitor interrupt enable */ + uint32_t on_pmi_stop_counting : 1; /* 38: disable counters (pmi) */ + uint32_t timebase_bit_selector : 2; /* 39-40: TBL bit for timebase events */ + uint32_t enable_timebase_pmi : 1; /* 41: enable pmi on TBL bit transition */ + uint32_t threshold_value : 6; /* 42-47: threshold value */ + uint32_t enable_pmi_on_pmc1 : 1; /* 48: enable pmi on pmc1 overflow */ + uint32_t enable_pmi_on_pmcn : 1; /* 49: enable pmi on any pmc except pmc1 overflow */ + uint32_t enable_pmi_trigger : 1; /* 50: enable triggering of pmcn by pmc1 overflow */ + uint32_t pmc1_event : 5; /* 51-55: pmc1 event select */ + uint32_t perfmon_event_occurred : 1; /* 56: performance monitor event has occurred */ + uint32_t /* reserved */ : 1; /* 57: reserved */ + uint32_t pmc2_event : 5; /* 58-62: pmc2 event select */ + uint32_t disable_counters_hypervisor : 1; /* 63: disable counters (hypervisor) */ +} ppc64_mmcr0_bits_t; + +typedef union { + uint64_t value; + ppc64_mmcr0_bits_t field; +} ppc64_mmcr0_reg_t; + +typedef struct { + uint32_t ttm0_select : 2; /* 0-1: FPU/ISU/IFU/VMX unit select */ + uint32_t /* reserved */ : 1; /* 2: reserved */ + uint32_t ttm1_select : 2; /* 3-4: IDU/ISU/ISU unit select */ + uint32_t /* reserved */ : 1; /* 5: reserved */ + uint32_t ttm2_select : 2; /* 6-7: IFU/LSU0 unit select */ + uint32_t /* reserved */ : 1; /* 8: reserved */ + uint32_t ttm3_select : 2; /* 9-10: LSU1 select */ + uint32_t /* reserved */ : 1; /* 11: reserved */ + uint32_t lane0_select : 2; /* 12-13: Byte lane 0 unit select (TD_CP_DBG0SEL) */ + uint32_t lane1_select : 2; /* 14-15: Byte lane 1 unit select (TD_CP_DBG1SEL) */ + uint32_t lane2_select : 2; /* 16-17: Byte lane 2 unit select (TD_CP_DBG2SEL) */ + uint32_t lane3_select : 2; /* 18-19: Byte lane 3 unit select (TD_CP_DBG3SEL) */ + uint32_t /* reserved */ : 4; /* 20-23: reserved */ + uint32_t pmc1_adder_lane_select : 1; /* 24: PMC1 Event Adder Lane Select (PMC1_ADDER_SELECT) */ + uint32_t pmc2_adder_lane_select : 1; /* 25: PMC2 Event Adder Lane Select (PMC2_ADDER_SELECT) */ + uint32_t pmc6_adder_lane_select : 1; /* 26: PMC6 Event Adder Lane Select (PMC6_ADDER_SELECT) */ + uint32_t pmc5_adder_lane_select : 1; /* 27: PMC5 Event Adder Lane Select (PMC5_ADDER_SELECT) */ + uint32_t pmc8_adder_lane_select : 1; /* 28: PMC8 Event Adder Lane Select (PMC8_ADDER_SELECT) */ + uint32_t pmc7_adder_lane_select : 1; /* 29: PMC7 Event Adder Lane Select (PMC7_ADDER_SELECT) */ + uint32_t pmc3_adder_lane_select : 1; /* 30: PMC3 Event Adder Lane Select (PMC3_ADDER_SELECT) */ + uint32_t pmc4_adder_lane_select : 1; /* 31: PMC4 Event Adder Lane Select (PMC4_ADDER_SELECT) */ + uint32_t pmc3_event : 5; /* 32-36: pmc3 event select */ + uint32_t pmc4_event : 5; /* 37-41: pmc4 event select */ + uint32_t pmc5_event : 5; /* 42-46: pmc5 event select */ + uint32_t pmc6_event : 5; /* 47-51: pmc6 event select */ + uint32_t pmc7_event : 5; /* 52-56: pmc7 event select */ + uint32_t pmc8_event : 5; /* 57-61: pmc8 event select */ + uint32_t speculative_event : 2; /* 62-63: SPeCulative count event SELector */ +} ppc64_mmcr1_bits_t; + +typedef union { + uint64_t value; + ppc64_mmcr1_bits_t field; +} ppc64_mmcr1_reg_t; + +typedef struct { + uint32_t /* reserved */ : 32; /* 0-31: reserved */ + uint32_t siar_sdar_same_instruction : 1; /* 32: SIAR and SDAR are from same instruction */ + uint32_t disable_counters_pmc1_pmc4 : 1; /* 33: disable counters PMC1-PMC4 */ + uint32_t disable_counters_pmc5_pmc8 : 1; /* 34: disable counters PMC5-PMC8 */ + uint32_t problem_state_siar : 1; /* 35: MSR[PR] bit when SIAR set */ + uint32_t hypervisor_state_siar : 1; /* 36: MSR[HV] bit when SIAR set */ + uint32_t /* reserved */ : 3; /* 37-39: reserved */ + uint32_t threshold_start_event : 3; /* 40-42: threshold start event */ + uint32_t threshold_end_event : 3; /* 43-45: threshold end event */ + uint32_t /* reserved */ : 3; /* 46-48: reserved */ + uint32_t imr_select : 1; /* 49: imr select */ + uint32_t imr_mark : 2; /* 50-51: imr mark */ + uint32_t imr_mask : 4; /* 52-55: imr mask */ + uint32_t imr_match : 4; /* 56-59: imr match */ + uint32_t disable_counters_tags_inactive : 1; /* 60: disable counters in tags inactive mode */ + uint32_t disable_counters_tags_active : 1; /* 61: disable counters in tags active mode */ + uint32_t disable_counters_wait_state : 1; /* 62: freeze counters in wait state (CNTL[31]=0) */ + uint32_t sample_enable : 1; /* 63: sampling enabled */ +} ppc64_mmcra_bits_t; + +typedef union { + uint64_t value; + ppc64_mmcra_bits_t field; +} ppc64_mmcra_reg_t; + +/* PPC_PERFMON_FUNC_* values are taken apart to fill in the appropriate configuration bitfields: */ +typedef struct { + uint32_t /* reserved */ : 22; + uint32_t SPECSEL : 2; + uint32_t TD_CP_DBGxSEL : 2; + uint32_t TTM3SEL : 2; + uint32_t TTM1SEL : 2; + uint32_t TTM0SEL : 2; +} ppc_func_bits_t; + +typedef union { + uint32_t value; + ppc_func_bits_t field; +} ppc_func_unit_t; + +#endif /* _HW_PERFMON_MMCR_H_ */ diff --git a/osfmk/ppc/hw_vm.s b/osfmk/ppc/hw_vm.s index 2bb659aae..0a4c4c2ea 100644 --- a/osfmk/ppc/hw_vm.s +++ b/osfmk/ppc/hw_vm.s @@ -34,3065 +34,3936 @@ #include #include #include -#include #include -#define PERFTIMES 0 + +#define INSTRUMENT 0 .text -/* - * - * Random notes and musings... - * - * Access to mappings via the PTEG hash must be done with the list locked. - * Access via the physical entries is controlled by the physent lock. - * Access to mappings is controlled by the PTEG lock once they are queued. - * If they are not on the list, they don't really exist, so - * only one processor at a time can find them, so no access control is needed. - * - * The second half of the PTE is kept in the physical entry. It is done this - * way, because there may be multiple mappings that refer to the same physical - * page (i.e., address aliases or synonymns). We must do it this way, because - * maintenance of the reference and change bits becomes nightmarish if each mapping - * has its own. One side effect of this, and not necessarily a bad one, is that - * all mappings for a single page can have a single WIMG, protection state, and RC bits. - * The only "bad" thing, is the reference bit. With a single copy, we can not get - * a completely accurate working set calculation, i.e., we can't tell which mapping was - * used to reference the page, all we can tell is that the physical page was - * referenced. - * - * The master copys of the reference and change bits are kept in the phys_entry. - * Other than the reference and change bits, changes to the phys_entry are not - * allowed if it has any mappings. The master reference and change bits must be - * changed via atomic update. - * - * Invalidating a PTE merges the RC bits into the phys_entry. - * - * Before checking the reference and/or bits, ALL mappings to the physical page are - * invalidated. - * - * PTEs are never explicitly validated, they are always faulted in. They are also - * not visible outside of the hw_vm modules. Complete seperation of church and state. - * - * Removal of a mapping is invalidates its PTE. - * - * So, how do we deal with mappings to I/O space? We don't have a physent for it. - * Within the mapping is a copy of the second half of the PTE. This is used - * ONLY when there is no physical entry. It is swapped into the PTE whenever - * it is built. There is no need to swap it back out, because RC is not - * maintained for these mappings. - * - * So, I'm starting to get concerned about the number of lwarx/stcwx loops in - * this. Satisfying a mapped address with no stealing requires one lock. If we - * steal an entry, there's two locks and an atomic update. Invalidation of an entry - * takes one lock and, if there is a PTE, another lock and an atomic update. Other - * operations are multiples (per mapping) of the above. Maybe we should look for - * an alternative. So far, I haven't found one, but I haven't looked hard. - */ +; +; 0 0 1 2 3 4 4 5 6 +; 0 8 6 4 2 0 8 6 3 +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; +; 0 0 1 +; 0 8 6 +; +--------+--------+--------+ +; |//////BB|BBBBBBBB|BBBB////| - SID - base +; +--------+--------+--------+ +; +; 0 0 1 +; 0 8 6 +; +--------+--------+--------+ +; |////////|11111111|111111//| - SID - copy 1 +; +--------+--------+--------+ +; +; 0 0 1 +; 0 8 6 +; +--------+--------+--------+ +; |////////|//222222|22222222| - SID - copy 2 +; +--------+--------+--------+ +; +; 0 0 1 +; 0 8 6 +; +--------+--------+--------+ +; |//////33|33333333|33//////| - SID - copy 3 - not needed +; +--------+--------+--------+ for 65 bit VPN +; +; 0 0 1 2 3 4 4 5 5 +; 0 8 6 4 2 0 8 1 5 +; +--------+--------+--------+--------+--------+--------+--------+ +; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all +; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed +; 0 0 1 2 3 4 4 5 5 +; 0 8 6 4 2 0 8 1 5 +; +--------+--------+--------+--------+--------+--------+--------+ +; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA +; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment" +; part of EA to make +; room for SID base +; +; +; 0 0 1 2 3 4 4 5 5 +; 0 8 6 4 2 0 8 1 5 +; +--------+--------+--------+--------+--------+--------+--------+ +; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed +; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA +; +; 0 0 1 2 3 4 4 5 6 7 7 +; 0 8 6 4 2 0 8 6 4 2 9 +; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ +; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN +; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ +; -/* hw_add_map(struct mapping *mp, space_t space, vm_offset_t va) - Adds a mapping - * - * Adds a mapping to the PTEG hash list. +/* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping * - * Interrupts must be disabled before calling. + * Maps a page or block into a pmap * - * Using the space and the virtual address, we hash into the hash table - * and get a lock on the PTEG hash chain. Then we chain the - * mapping to the front of the list. + * Returns 0 if add worked or the vaddr of the first overlap if not * + * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates + * + * 1) bump mapping busy count + * 2) lock pmap share + * 3) find mapping full path - finds all possible list previous elements + * 4) upgrade pmap to exclusive + * 5) add mapping to search list + * 6) find physent + * 7) lock physent + * 8) add to physent + * 9) unlock physent + * 10) unlock pmap + * 11) drop mapping busy count + * + * + * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates + * + * 1) bump mapping busy count + * 2) lock pmap share + * 3) find mapping full path - finds all possible list previous elements + * 4) upgrade pmap to exclusive + * 5) add mapping to search list + * 6) unlock pmap + * 7) drop mapping busy count + * */ .align 5 .globl EXT(hw_add_map) LEXT(hw_add_map) - -#if PERFTIMES && DEBUG - mr r7,r3 - mflr r11 - li r3,20 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r7 - mtlr r11 -#endif - - mfmsr r0 /* Get the MSR */ - eqv r6,r6,r6 /* Fill the bottom with foxes */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r11,r4,6,6,25 /* Position the space for the VSID */ - mfspr r10,sdr1 /* Get hash table base and size */ - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwimi r11,r5,30,2,5 /* Insert the segment no. to make a VSID */ - mfsprg r12,2 ; Get feature flags - rlwimi r6,r10,16,0,15 /* Make table size -1 out of mask */ - rlwinm r7,r5,26,10,25 /* Isolate the page index */ - or r8,r10,r6 /* Point to the last byte in table */ - rlwinm r9,r5,4,0,3 ; Move nybble 1 up to 0 - xor r7,r7,r11 /* Get primary hash */ - mtcrf 0x04,r12 ; Set the features - andi. r12,r0,0x7FCF /* Disable translation and interruptions */ - rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */ - addi r8,r8,1 /* Point to the PTEG Control Area */ - xor r9,r9,r5 ; Splooch vaddr nybble 0 and 1 together - and r7,r7,r6 /* Wrap the hash */ - rlwimi r11,r5,10,26,31 /* Move API into pte ID */ - rlwinm r9,r9,6,27,29 ; Get splooched bits in place - add r8,r8,r7 /* Point to our PCA entry */ - rlwinm r10,r4,2,27,29 ; Get low 3 bits of the VSID for look-aside hash - - bt pfNoMSRirb,hamNoMSR ; No MSR... - - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b hamNoMSRx + + stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r17,FM_ARG0+0x00(r1) ; Save a register + stw r18,FM_ARG0+0x04(r1) ; Save a register + stw r19,FM_ARG0+0x08(r1) ; Save a register + mfsprg r19,2 ; Get feature flags + stw r20,FM_ARG0+0x0C(r1) ; Save a register + stw r21,FM_ARG0+0x10(r1) ; Save a register + mtcrf 0x02,r19 ; move pf64Bit cr6 + stw r22,FM_ARG0+0x14(r1) ; Save a register + stw r23,FM_ARG0+0x18(r1) ; Save a register + stw r24,FM_ARG0+0x1C(r1) ; Save a register + stw r25,FM_ARG0+0x20(r1) ; Save a register + stw r26,FM_ARG0+0x24(r1) ; Save a register + stw r27,FM_ARG0+0x28(r1) ; Save a register + stw r28,FM_ARG0+0x2C(r1) ; Save a register + stw r29,FM_ARG0+0x30(r1) ; Save a register + stw r30,FM_ARG0+0x34(r1) ; Save a register + stw r31,FM_ARG0+0x38(r1) ; Save a register + stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + rlwinm r11,r4,0,0,19 ; Round down to get mapping block address + mr r28,r3 ; Save the pmap + mr r31,r4 ; Save the mapping + bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint) + lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap + lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping + + b hamSF1x ; Done... + +hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap + ld r21,mbvrswap(r11) ; Get conversion mask for mapping + +hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit + + mr r17,r11 ; Save the MSR + xor r28,r28,r20 ; Convert the pmap to physical addressing + xor r31,r31,r21 ; Convert the mapping to physical addressing + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkShared ; Go get a shared lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + lwz r24,mpFlags(r31) ; Pick up the flags + bne-- hamBadLock ; Nope... + + li r21,0 ; Remember that we have the shared lock -hamNoMSR: mr r4,r0 ; Save R0 - mr r2,r3 ; Save - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r0,r4 ; Restore - mr r3,r2 ; Restore -hamNoMSRx: +; +; Note that we do a full search (i.e., no shortcut level skips, etc.) +; here so that we will know the previous elements so we can dequeue them +; later. +; - la r4,PCAhash(r8) /* Point to the mapping hash area */ - xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID - isync /* Get rid of anything prefetched before we ref storage */ -/* - * We've now got the address of our PCA, the hash chain anchor, our API subhash, - * and word 0 of the PTE (the virtual part). - * - * Now, we just lock the PCA. - */ +hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half + lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half + mr r3,r28 ; Pass in pmap to search + lhz r23,mpBSize(r31) ; Get the block size for later + mr r29,r4 ; Save top half of vaddr for later + mr r30,r5 ; Save bottom half of vaddr for later + +#if INSTRUMENT + mfspr r0,pmc1 ; INSTRUMENT - saveinstr[16] - Take stamp before mapSearchFull + stw r0,0x6100+(16*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r0,pmc2 ; INSTRUMENT - Get stamp + stw r0,0x6100+(16*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r0,pmc3 ; INSTRUMENT - Get stamp + stw r0,0x6100+(16*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r0,pmc4 ; INSTRUMENT - Get stamp + stw r0,0x6100+(16*16)+0xC(0) ; INSTRUMENT - Save it +#endif + + bl EXT(mapSearchFull) ; Go see if we can find it + +#if INSTRUMENT + mfspr r0,pmc1 ; INSTRUMENT - saveinstr[14] - Take stamp after mapSearchFull + stw r0,0x6100+(17*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r0,pmc2 ; INSTRUMENT - Get stamp + stw r0,0x6100+(17*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r0,pmc3 ; INSTRUMENT - Get stamp + stw r0,0x6100+(17*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r0,pmc4 ; INSTRUMENT - Get stamp + stw r0,0x6100+(17*16)+0xC(0) ; INSTRUMENT - Save it +#endif + + andi. r0,r24,mpNest ; See if we are a nest + rlwinm r23,r23,12,0,19 ; Convert standard block size to bytes + lis r0,0x8000 ; Get 0xFFFFFFFF80000000 + li r22,0 ; Assume high part of size is 0 + beq++ hamNoNest ; This is not a nest... + + rlwinm r22,r23,16,16,31 ; Convert partially converted size to segments + rlwinm r23,r23,16,0,3 ; Finish shift + +hamNoNest: add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit + mr. r3,r3 ; Did we find a mapping here? + or r0,r0,r30 ; Make sure a carry will propagate all the way in 64-bit + crmove cr5_eq,cr0_eq ; Remember that if we found the mapping + addc r9,r0,r23 ; Add size to get last page in new range + or. r0,r4,r5 ; Are we beyond the end? + adde r8,r29,r22 ; Add the rest of the length on + bne-- cr5,hamOverlay ; Yeah, this is no good, can not double map... + rlwinm r9,r9,0,0,31 ; Clean top half of sum + beq++ hamFits ; We are at the end... + + cmplw cr1,r9,r5 ; Is the bottom part of our end less? + cmplw r8,r4 ; Is our end before the next (top part) + crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal? + cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less + + bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay... - li r12,1 /* Get the locked value */ - dcbt 0,r4 /* We'll need the hash area in a sec, so get it */ - add r4,r4,r9 /* Point to the right mapping hash slot */ - -ptegLckx: lwarx r10,0,r8 /* Get the PTEG lock */ - mr. r10,r10 /* Is it locked? */ - bne- ptegLckwx /* Yeah... */ - stwcx. r12,0,r8 /* Take take it */ - bne- ptegLckx /* Someone else was trying, try again... */ - b ptegSXgx /* All done... */ - - .align 4 - -ptegLckwx: mr. r10,r10 /* Check if it's already held */ - beq+ ptegLckx /* It's clear... */ - lwz r10,0(r8) /* Get lock word again... */ - b ptegLckwx /* Wait... */ - - .align 4 - -ptegSXgx: isync /* Make sure we haven't used anything yet */ - - lwz r7,0(r4) /* Pick up the anchor of hash list */ - stw r3,0(r4) /* Save the new head */ - stw r7,mmhashnext(r3) /* Chain in the old head */ - - stw r4,mmPTEhash(r3) /* Point to the head of the hash list */ - - sync /* Make sure the chain is updated */ - stw r10,0(r8) /* Unlock the hash list */ - mtmsr r0 /* Restore translation and interruptions */ - isync /* Toss anything done with DAT off */ -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,21 - bl EXT(dbgLog2) ; end of hw_add_map - mr r3,r4 - mtlr r11 -#endif - blr /* Leave... */ - - -/* mp=hw_lock_phys_vir(space, va) - Finds and locks a physical entry by vaddr. - * - * Returns the mapping with the associated physent locked if found, or a - * zero and no lock if not. It we timed out trying to get a the lock on - * the physical entry, we retun a 1. A physical entry can never be on an - * odd boundary, so we can distinguish between a mapping and a timeout code. - * - * Interrupts must be disabled before calling. - * - * Using the space and the virtual address, we hash into the hash table - * and get a lock on the PTEG hash chain. Then we search the chain for the - * mapping for our virtual address. From there, we extract the pointer to - * the physical entry. - * - * Next comes a bit of monkey business. we need to get a lock on the physical - * entry. But, according to our rules, we can't get it after we've gotten the - * PTEG hash lock, we could deadlock if we do. So, we need to release the - * hash lock. The problem is, though, that as soon as we release it, some - * other yahoo may remove our mapping between the time that we release the - * hash lock and obtain the phys entry lock. So, we can't count on the - * mapping once we release the lock. Instead, after we lock the phys entry, - * we search the mapping list (phys_link) for our translation. If we don't find it, - * we unlock the phys entry, bail out, and return a 0 for the mapping address. If we - * did find it, we keep the lock and return the address of the mapping block. - * - * What happens when a mapping is found, but there is no physical entry? - * This is what happens when there is I/O area mapped. It one of these mappings - * is found, the mapping is returned, as is usual for this call, but we don't - * try to lock anything. There could possibly be some problems here if another - * processor releases the mapping while we still alre using it. Hope this - * ain't gonna happen. - * - * Taaa-dahhh! Easy as pie, huh? - * - * So, we have a few hacks hacks for running translate off in here. - * First, when we call the lock routine, we have carnel knowlege of the registers is uses. - * That way, we don't need a stack frame, which we can't have 'cause the stack is in - * virtual storage. But wait, as if that's not enough... We need one more register. So, - * we cram the LR into the CTR and return from there. - * - */ - .align 5 - .globl EXT(hw_lock_phys_vir) - -LEXT(hw_lock_phys_vir) - -#if PERFTIMES && DEBUG - mflr r11 - mr r5,r3 - li r3,22 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r5 - mtlr r11 -#endif - mfmsr r12 /* Get the MSR */ - eqv r6,r6,r6 /* Fill the bottom with foxes */ - mfsprg r9,2 ; Get feature flags - rlwinm r11,r3,6,6,25 /* Position the space for the VSID */ - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - mfspr r5,sdr1 /* Get hash table base and size */ - rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */ - mtcrf 0x04,r9 ; Set the features - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwimi r6,r5,16,0,15 /* Make table size -1 out of mask */ - andi. r0,r12,0x7FCF /* Disable translation and interruptions */ - rlwinm r9,r4,4,0,3 ; Move nybble 1 up to 0 - rlwinm r7,r4,26,10,25 /* Isolate the page index */ - or r8,r5,r6 /* Point to the last byte in table */ - xor r7,r7,r11 /* Get primary hash */ - rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */ - addi r8,r8,1 /* Point to the PTEG Control Area */ - xor r9,r9,r4 ; Splooch vaddr nybble 0 and 1 together - and r7,r7,r6 /* Wrap the hash */ - rlwimi r11,r4,10,26,31 /* Move API into pte ID */ - rlwinm r9,r9,6,27,29 ; Get splooched bits in place - add r8,r8,r7 /* Point to our PCA entry */ - rlwinm r10,r3,2,27,29 ; Get low 3 bits of the VSID for look-aside hash - - bt pfNoMSRirb,hlpNoMSR ; No MSR... - - mtmsr r0 ; Translation and all off - isync ; Toss prefetch - b hlpNoMSRx +; +; Here we try to convert to an exclusive lock. This will fail if someone else +; has it shared. +; +hamFits: mr. r21,r21 ; Do we already have the exclusive lock? + la r3,pmapSXlk(r28) ; Point to the pmap search lock -hlpNoMSR: mr r3,r0 ; Get the new MSR - li r0,loadMSR ; Get the MSR setter SC - sc ; Set it -hlpNoMSRx: - - la r3,PCAhash(r8) /* Point to the mapping hash area */ - xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID - isync /* Make sure translation is off before we ref storage */ - -/* - * We've now got the address of our PCA, the hash chain anchor, our API subhash, - * and word 0 of the PTE (the virtual part). - * - * Now, we just lock the PCA and find our mapping, if it exists. - */ + bne-- hamGotX ; We already have the exclusive... + + bl sxlkPromote ; Try to promote shared to exclusive + mr. r3,r3 ; Could we? + beq++ hamGotX ; Yeah... + +; +; Since we could not promote our lock, we need to convert to it. +; That means that we drop the shared lock and wait to get it +; exclusive. Since we release the lock, we need to do the look up +; again. +; - dcbt 0,r3 /* We'll need the hash area in a sec, so get it */ - add r3,r3,r9 /* Point to the right mapping hash slot */ + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkConvert ; Convert shared to exclusive + mr. r3,r3 ; Could we? + bne-- hamBadLock ; Nope, we must have timed out... -ptegLcka: lwarx r10,0,r8 /* Get the PTEG lock */ - li r5,1 /* Get the locked value */ - mr. r10,r10 /* Is it locked? */ - bne- ptegLckwa /* Yeah... */ - stwcx. r5,0,r8 /* Take take it */ - bne- ptegLcka /* Someone else was trying, try again... */ - b ptegSXga /* All done... */ + li r21,1 ; Remember that we have the exclusive lock + b hamRescan ; Go look again... - .align 4 + .align 5 -ptegLckwa: mr. r10,r10 /* Check if it's already held */ - beq+ ptegLcka /* It's clear... */ - lwz r10,0(r8) /* Get lock word again... */ - b ptegLckwa /* Wait... */ +hamGotX: +#if INSTRUMENT + mfspr r3,pmc1 ; INSTRUMENT - saveinstr[18] - Take stamp before mapSearchFull + stw r3,0x6100+(18*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r3,pmc2 ; INSTRUMENT - Get stamp + stw r3,0x6100+(18*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r3,pmc3 ; INSTRUMENT - Get stamp + stw r3,0x6100+(18*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r3,pmc4 ; INSTRUMENT - Get stamp + stw r4,0x6100+(18*16)+0xC(0) ; INSTRUMENT - Save it +#endif + mr r3,r28 ; Get the pmap to insert into + mr r4,r31 ; Point to the mapping + bl EXT(mapInsert) ; Insert the mapping into the list + +#if INSTRUMENT + mfspr r4,pmc1 ; INSTRUMENT - saveinstr[19] - Take stamp before mapSearchFull + stw r4,0x6100+(19*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r4,pmc2 ; INSTRUMENT - Get stamp + stw r4,0x6100+(19*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r4,pmc3 ; INSTRUMENT - Get stamp + stw r4,0x6100+(19*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r4,pmc4 ; INSTRUMENT - Get stamp + stw r4,0x6100+(19*16)+0xC(0) ; INSTRUMENT - Save it +#endif + + lhz r8,mpSpace(r31) ; Get the address space + mfsdr1 r7 ; Get the hash table base/bounds + lwz r4,pmapResidentCnt(r28) ; Get the mapped page count + andi. r0,r24,mpNest|mpBlock ; Is this a nest or block? + + rlwimi r8,r8,14,4,17 ; Double address space + rlwinm r9,r30,20,16,31 ; Isolate the page number + rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14) + rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash + rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size) + rlwinm r7,r7,0,16,31 ; Isolate length mask (or count) + addi r4,r4,1 ; Bump up the mapped page count + xor r10,r10,r8 ; Calculate the low 32 bits of the VSID + stw r4,pmapResidentCnt(r28) ; Set the mapped page count + xor r9,r9,r10 ; Get the hash to the PTEG + + bne-- hamDoneNP ; This is a block or nest, therefore, no physent... + + bl mapPhysFindLock ; Go find and lock the physent + + bt++ pf64Bitb,ham64 ; This is 64-bit... + + lwz r11,ppLink+4(r3) ; Get the alias chain pointer + rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size + slwi r9,r9,6 ; Make PTEG offset + ori r7,r7,0xFFC0 ; Stick in the bottom part + rlwinm r12,r11,0,0,25 ; Clean it up + and r9,r9,r7 ; Wrap offset into table + mr r4,r31 ; Set the link to install + stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid) + stw r12,mpAlias+4(r31) ; Move to the mapping + bl mapPhyCSet32 ; Install the link + b hamDone ; Go finish up... + + .align 5 - .align 4 +ham64: li r0,0xFF ; Get mask to clean up alias pointer + subfic r7,r7,46 ; Get number of leading zeros + eqv r4,r4,r4 ; Get all ones + ld r11,ppLink(r3) ; Get the alias chain pointer + rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + srd r4,r4,r7 ; Get the wrap mask + sldi r9,r9,7 ; Change hash to PTEG offset + andc r11,r11,r0 ; Clean out the lock and flags + and r9,r9,r4 ; Wrap to PTEG + mr r4,r31 + stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid) + std r11,mpAlias(r31) ; Set the alias pointer in the mapping + + bl mapPhyCSet64 ; Install the link + +hamDone: bl mapPhysUnlock ; Unlock the physent chain -ptegSXga: isync /* Make sure we haven't used anything yet */ +hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list - mflr r0 /* Get the LR */ - lwz r9,0(r3) /* Pick up the first mapping block */ - mtctr r0 /* Stuff it into the CTR */ + mr r3,r31 ; Get the mapping pointer + bl mapDropBusy ; Drop the busy count -findmapa: + li r3,0 ; Set successful return + li r4,0 ; Set successful return - mr. r3,r9 /* Did we hit the end? */ - bne+ chkmapa /* Nope... */ - - stw r3,0(r8) /* Unlock the PTEG lock - Note: we never saved anything while we - had the lock, so we don't need a sync - before we unlock it */ +hamReturn: bt++ pf64Bitb,hamR64 ; Yes... -vbail: mtmsr r12 /* Restore translation and interruptions */ - isync /* Make sure translation is cool */ -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,23 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 -#endif - bctr /* Return in abject failure... */ - - .align 4 - -chkmapa: lwz r10,mmPTEv(r3) /* Pick up our virtual ID */ - lwz r9,mmhashnext(r3) /* Pick up next mapping block */ - cmplw r10,r11 /* Have we found ourself? */ - bne- findmapa /* Nope, still wandering... */ - - lwz r9,mmphysent(r3) /* Get our physical entry pointer */ - li r5,0 /* Clear this out */ - mr. r9,r9 /* Is there, like, a physical entry? */ - stw r5,0(r8) /* Unlock the PTEG lock - Note: we never saved anything while we - had the lock, so we don't need a sync - before we unlock it */ - - beq- vbail /* If there is no physical entry, it's time - to leave... */ - -/* Here we want to call hw_lock_bit. We don't want to use the stack, 'cause it's - * in virtual storage, and we're in real. So, we've carefully looked at the code - * in hw_lock_bit (and unlock) and cleverly don't use any of the registers that it uses. - * Be very, very aware of how you change this code. By the way, it uses: - * R0, R6, R7, R8, and R9. R3, R4, and R5 contain parameters - * Unfortunatly, we need to stash R9 still. So... Since we know we will not be interrupted - * ('cause we turned off interruptions and translation is off) we will use SPRG3... - */ - - lwz r10,mmPTEhash(r3) /* Save the head of the hash-alike chain. We need it to find ourselves later */ - lis r5,HIGH_ADDR(EXT(LockTimeOut)) /* Get address of timeout value */ - la r3,pephyslink(r9) /* Point to the lock word */ - ori r5,r5,LOW_ADDR(EXT(LockTimeOut)) /* Get second half of address */ - li r4,PHYS_LOCK /* Get the lock bit value */ - lwz r5,0(r5) /* Pick up the timeout value */ - mtsprg 3,r9 /* Save R9 in SPRG3 */ - - bl EXT(hw_lock_bit) /* Go do the lock */ - - mfsprg r9,3 /* Restore pointer to the phys_entry */ - mr. r3,r3 /* Did we timeout? */ - lwz r4,pephyslink(r9) /* Pick up first mapping block */ - beq- penterr /* Bad deal, we timed out... */ + mtmsr r17 ; Restore enables/translation/etc. + isync + b hamReturnC ; Join common... - rlwinm r4,r4,0,0,26 ; Clear out the flags from first link +hamR64: mtmsrd r17 ; Restore enables/translation/etc. + isync -findmapb: mr. r3,r4 /* Did we hit the end? */ - bne+ chkmapb /* Nope... */ +hamReturnC: +#if INSTRUMENT + mfspr r0,pmc1 ; INSTRUMENT - saveinstr[20] - Take stamp before mapSearchFull + stw r0,0x6100+(20*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r0,pmc2 ; INSTRUMENT - Get stamp + stw r0,0x6100+(20*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r0,pmc3 ; INSTRUMENT - Get stamp + stw r0,0x6100+(20*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r0,pmc4 ; INSTRUMENT - Get stamp + stw r0,0x6100+(20*16)+0xC(0) ; INSTRUMENT - Save it +#endif + lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return + lwz r17,FM_ARG0+0x00(r1) ; Save a register + lwz r18,FM_ARG0+0x04(r1) ; Save a register + lwz r19,FM_ARG0+0x08(r1) ; Save a register + lwz r20,FM_ARG0+0x0C(r1) ; Save a register + mtlr r0 ; Restore the return + lwz r21,FM_ARG0+0x10(r1) ; Save a register + lwz r22,FM_ARG0+0x14(r1) ; Save a register + lwz r23,FM_ARG0+0x18(r1) ; Save a register + lwz r24,FM_ARG0+0x1C(r1) ; Save a register + lwz r25,FM_ARG0+0x20(r1) ; Save a register + lwz r26,FM_ARG0+0x24(r1) ; Save a register + lwz r27,FM_ARG0+0x28(r1) ; Save a register + lwz r28,FM_ARG0+0x2C(r1) ; Save a register + lwz r29,FM_ARG0+0x30(r1) ; Save a register + lwz r30,FM_ARG0+0x34(r1) ; Save a register + lwz r31,FM_ARG0+0x38(r1) ; Save a register + lwz r1,0(r1) ; Pop the stack - la r3,pephyslink(r9) /* Point to where the lock is */ - li r4,PHYS_LOCK /* Get the lock bit value */ - bl EXT(hw_unlock_bit) /* Go unlock the physentry */ + blr ; Leave... - li r3,0 /* Say we failed */ - b vbail /* Return in abject failure... */ -penterr: li r3,1 /* Set timeout */ - b vbail /* Return in abject failure... */ - .align 5 -chkmapb: lwz r6,mmPTEv(r3) /* Pick up our virtual ID */ - lwz r4,mmnext(r3) /* Pick up next mapping block */ - cmplw r6,r11 /* Have we found ourself? */ - lwz r5,mmPTEhash(r3) /* Get the start of our hash chain */ - bne- findmapb /* Nope, still wandering... */ - cmplw r5,r10 /* On the same hash chain? */ - bne- findmapb /* Nope, keep looking... */ +hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags + li r0,mpC|mpR ; Get a mask to turn off RC bits + lwz r23,mpFlags(r31) ; Get the requested flags + lwz r20,mpVAddr(r3) ; Get the overlay address + lwz r8,mpVAddr(r31) ; Get the requested address + lwz r21,mpVAddr+4(r3) ; Get the overlay address + lwz r9,mpVAddr+4(r31) ; Get the requested address + lhz r10,mpBSize(r3) ; Get the overlay length + lhz r11,mpBSize(r31) ; Get the requested length + lwz r24,mpPAddr(r3) ; Get the overlay physical address + lwz r25,mpPAddr(r31) ; Get the requested physical address + andc r21,r21,r0 ; Clear RC bits + andc r9,r9,r0 ; Clear RC bits + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list + + rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one? + mr r3,r20 ; Save the top of the colliding address + rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address + + bne++ hamRemv ; Removing, go say so so we help... + + cmplw r20,r8 ; High part of vaddr the same? + cmplw cr1,r21,r9 ; Low part? + crand cr5_eq,cr0_eq,cr1_eq ; Remember if same + + cmplw r10,r11 ; Size the same? + cmplw cr1,r24,r25 ; Physical address? + crand cr5_eq,cr5_eq,cr0_eq ; Remember + crand cr5_eq,cr5_eq,cr1_eq ; Remember if same + + xor r23,r23,r22 ; Check for differences in flags + ori r23,r23,mpFIP ; "Fault in Progress" is ok to be different + xori r23,r23,mpFIP ; Force mpFIP off + rlwinm. r0,r23,0,mpSpecialb,mpListsb-1 ; See if any important flags are different + crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check + bf-- cr5_eq,hamReturn ; This is not the same, so we just return a collision... + + ori r4,r4,mapRtMapDup ; Set duplicate + b hamReturn ; And leave... + +hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision + b hamReturn ; Come back yall... + + .align 5 + +hamBadLock: li r3,0 ; Set lock time out error code + li r4,mapRtBadLk ; Set lock time out error code + b hamReturn ; Leave.... + + - b vbail /* Return in glorious triumph... */ /* - * hw_rem_map(mapping) - remove a mapping from the system. - * - * Upon entry, R3 contains a pointer to a mapping block and the associated - * physical entry is locked if there is one. + * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system. * - * If the mapping entry indicates that there is a PTE entry, we invalidate - * if and merge the reference and change information into the phys_entry. + * Upon entry, R3 contains a pointer to a pmap. Since vaddr is + * a 64-bit quantity, it is a long long so it is in R4 and R5. + * + * We return the virtual address of the removed mapping as a + * R3. * - * Next, we remove the mapping from the phys_ent and the PTEG hash list. + * Note that this is designed to be called from 32-bit mode with a stack. * - * Unlock any locks that are left, and exit. + * We disable translation and all interruptions here. This keeps is + * from having to worry about a deadlock due to having anything locked + * and needing it to process a fault. * * Note that this must be done with both interruptions off and VM off * - * Note that this code depends upon the VSID being of the format 00SXXXXX - * where S is the segment number. - * - * + * Remove mapping via pmap, regular page, no pte + * + * 1) lock pmap share + * 2) find mapping full path - finds all possible list previous elements + * 4) upgrade pmap to exclusive + * 3) bump mapping busy count + * 5) remove mapping from search list + * 6) unlock pmap + * 7) lock physent + * 8) remove from physent + * 9) unlock physent + * 10) drop mapping busy count + * 11) drain mapping busy count + * + * + * Remove mapping via pmap, regular page, with pte + * + * 1) lock pmap share + * 2) find mapping full path - finds all possible list previous elements + * 3) upgrade lock to exclusive + * 4) bump mapping busy count + * 5) lock PTEG + * 6) invalidate pte and tlbie + * 7) atomic merge rc into physent + * 8) unlock PTEG + * 9) remove mapping from search list + * 10) unlock pmap + * 11) lock physent + * 12) remove from physent + * 13) unlock physent + * 14) drop mapping busy count + * 15) drain mapping busy count + * + * + * Remove mapping via pmap, I/O or block + * + * 1) lock pmap share + * 2) find mapping full path - finds all possible list previous elements + * 3) upgrade lock to exclusive + * 4) bump mapping busy count + * 5) mark remove-in-progress + * 6) check and bump remove chunk cursor if needed + * 7) unlock pmap + * 8) if something to invalidate, go to step 11 + + * 9) drop busy + * 10) return with mapRtRemove to force higher level to call again + + * 11) Lock PTEG + * 12) invalidate ptes, no tlbie + * 13) unlock PTEG + * 14) repeat 11 - 13 for all pages in chunk + * 15) if not final chunk, go to step 9 + * 16) invalidate tlb entries for the whole block map but no more than the full tlb + * 17) lock pmap share + * 18) find mapping full path - finds all possible list previous elements + * 19) upgrade lock to exclusive + * 20) remove mapping from search list + * 21) drop mapping busy count + * 22) drain mapping busy count + * */ .align 5 .globl EXT(hw_rem_map) LEXT(hw_rem_map) -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,24 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 -#endif - mfsprg r9,2 ; Get feature flags - mfmsr r0 /* Save the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtcrf 0x04,r9 ; Set the features - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ - bt pfNoMSRirb,lmvNoMSR ; No MSR... - - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b lmvNoMSRx - -lmvNoMSR: - mr r6,r0 - mr r4,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r4 - mr r0,r6 +; +; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE +; THE HW_PURGE_* ROUTINES ALSO +; -lmvNoMSRx: +#define hrmStackSize ((31-15+1)*4)+4 + stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r15,FM_ARG0+0x00(r1) ; Save a register + stw r16,FM_ARG0+0x04(r1) ; Save a register + stw r17,FM_ARG0+0x08(r1) ; Save a register + stw r18,FM_ARG0+0x0C(r1) ; Save a register + stw r19,FM_ARG0+0x10(r1) ; Save a register + mfsprg r19,2 ; Get feature flags + stw r20,FM_ARG0+0x14(r1) ; Save a register + stw r21,FM_ARG0+0x18(r1) ; Save a register + mtcrf 0x02,r19 ; move pf64Bit cr6 + stw r22,FM_ARG0+0x1C(r1) ; Save a register + stw r23,FM_ARG0+0x20(r1) ; Save a register + stw r24,FM_ARG0+0x24(r1) ; Save a register + stw r25,FM_ARG0+0x28(r1) ; Save a register + stw r26,FM_ARG0+0x2C(r1) ; Save a register + stw r27,FM_ARG0+0x30(r1) ; Save a register + stw r28,FM_ARG0+0x34(r1) ; Save a register + stw r29,FM_ARG0+0x38(r1) ; Save a register + stw r30,FM_ARG0+0x3C(r1) ; Save a register + stw r31,FM_ARG0+0x40(r1) ; Save a register + stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr + stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint) + lwz r9,pmapvr+4(r3) ; Get conversion mask + b hrmSF1x ; Done... + +hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask + +hrmSF1x: + bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit + + xor r28,r3,r9 ; Convert the pmap to physical addressing - - lwz r6,mmPTEhash(r3) /* Get pointer to hash list anchor */ - lwz r5,mmPTEv(r3) /* Get the VSID */ - dcbt 0,r6 /* We'll need that chain in a bit */ +; +; Here is where we join in from the hw_purge_* routines +; - rlwinm r7,r6,0,0,25 /* Round hash list down to PCA boundary */ - li r12,1 /* Get the locked value */ - subi r6,r6,mmhashnext /* Make the anchor look like an entry */ +hrmJoin: mfsprg r19,2 ; Get feature flags again (for alternate entries) -ptegLck1: lwarx r10,0,r7 /* Get the PTEG lock */ - mr. r10,r10 /* Is it locked? */ - bne- ptegLckw1 /* Yeah... */ - stwcx. r12,0,r7 /* Try to take it */ - bne- ptegLck1 /* Someone else was trying, try again... */ - b ptegSXg1 /* All done... */ + mr r17,r11 ; Save the MSR + mr r29,r4 ; Top half of vaddr + mr r30,r5 ; Bottom half of vaddr - .align 4 - -ptegLckw1: mr. r10,r10 /* Check if it's already held */ - beq+ ptegLck1 /* It's clear... */ - lwz r10,0(r7) /* Get lock word again... */ - b ptegLckw1 /* Wait... */ + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkShared ; Go get a shared lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + bne-- hrmBadLock ; Nope... - .align 4 - -ptegSXg1: isync /* Make sure we haven't used anything yet */ - - lwz r12,mmhashnext(r3) /* Prime with our forward pointer */ - lwz r4,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */ +; +; Note that we do a full search (i.e., no shortcut level skips, etc.) +; here so that we will know the previous elements so we can dequeue them +; later. Note: we get back mpFlags in R7. +; -srchmaps: mr. r10,r6 /* Save the previous entry */ - bne+ mapok /* No error... */ + mr r3,r28 ; Pass in pmap to search + mr r4,r29 ; High order of address + mr r5,r30 ; Low order of address + bl EXT(mapSearchFull) ; Go see if we can find it + + andi. r0,r7,lo16(mpPerm|mpSpecial|mpNest) ; Is this nested, special, or a perm mapping? + mr r20,r7 ; Remember mpFlags + rlwinm r0,r7,0,mpRemovableb,mpRemovableb ; Are we allowed to remove it? + crmove cr5_eq,cr0_eq ; Remember if we should remove this + mr. r31,r3 ; Did we? (And remember mapping address for later) + cmplwi cr1,r0,0 ; Are we allowed to remove? + mr r15,r4 ; Save top of next vaddr + crorc cr5_eq,cr5_eq,cr1_eq ; cr5_eq is true if this is not removable + mr r16,r5 ; Save bottom of next vaddr + beq hrmNotFound ; Nope, not found... + + bf-- cr5_eq,hrmPerm ; This one can't be removed... +; +; Here we try to promote to an exclusive lock. This will fail if someone else +; has it shared. +; - lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */ - ori r0,r0,LOW_ADDR(Choke) - sc /* Firmware Heimlich manuever */ - - .align 4 - -mapok: lwz r6,mmhashnext(r6) /* Look at the next one */ - cmplwi cr5,r4,0 /* Is there a PTE? */ - cmplw r6,r3 /* Have we found ourselves? */ - bne+ srchmaps /* Nope, get your head together... */ + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkPromote ; Try to promote shared to exclusive + mr. r3,r3 ; Could we? + beq++ hrmGotX ; Yeah... - stw r12,mmhashnext(r10) /* Remove us from the queue */ - rlwinm r9,r5,1,0,3 /* Move in the segment */ - rlwinm r8,r4,6,4,19 /* Line PTEG disp up to a page */ - rlwinm r11,r5,5,4,19 /* Line up the VSID */ - lwz r10,mmphysent(r3) /* Point to the physical entry */ +; +; Since we could not promote our lock, we need to convert to it. +; That means that we drop the shared lock and wait to get it +; exclusive. Since we release the lock, we need to do the look up +; again. +; + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkConvert ; Convert shared to exclusive + mr. r3,r3 ; Could we? + bne-- hrmBadLock ; Nope, we must have timed out... + + mr r3,r28 ; Pass in pmap to search + mr r4,r29 ; High order of address + mr r5,r30 ; Low order of address + bl EXT(mapSearchFull) ; Rescan the list + + andi. r0,r7,lo16(mpPerm|mpSpecial|mpNest) ; Is this nested, special, or a perm mapping? + rlwinm r0,r7,0,mpRemovableb,mpRemovableb ; Are we allowed to remove it? + crmove cr5_eq,cr0_eq ; Remember if we should remove this + mr. r31,r3 ; Did we lose it when we converted? + cmplwi cr1,r0,0 ; Are we allowed to remove? + mr r20,r7 ; Remember mpFlags + crorc cr5_eq,cr5_eq,cr1_eq ; cr5_eq is true if this is not removable + mr r15,r4 ; Save top of next vaddr + mr r16,r5 ; Save bottom of next vaddr + beq-- hrmNotFound ; Yeah, we did, someone tossed it for us... - beq+ cr5,nopte /* There's no PTE to invalidate... */ + bf-- cr5_eq,hrmPerm ; This one can't be removed... + +; +; We have an exclusive lock on the mapping chain. And we +; also have the busy count bumped in the mapping so it can +; not vanish on us. +; + +hrmGotX: mr r3,r31 ; Get the mapping + bl mapBumpBusy ; Bump up the busy count - xor r8,r8,r11 /* Back hash to virt index */ - lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ - rlwimi r9,r5,22,4,9 /* Move in the API */ - ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ - mfspr r11,pvr /* Find out what kind of machine we are */ - rlwimi r9,r8,0,10,19 /* Create the virtual address */ - rlwinm r11,r11,16,16,31 /* Isolate CPU type */ +; +; Invalidate any PTEs associated with this +; mapping (more than one if a block) and accumulate the reference +; and change bits. +; +; Here is also where we need to split 32- and 64-bit processing +; - stw r5,0(r4) /* Make the PTE invalid */ + lwz r21,mpPte(r31) ; Grab the offset to the PTE + rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine + mfsdr1 r29 ; Get the hash table base and size + rlwinm r0,r20,0,mpBlockb,mpBlockb ; Is this a block mapping? + andi. r2,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping? + cmplwi cr5,r0,0 ; Remember if this is a block mapping + rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE + ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit) + cmpwi cr1,r0,0 ; Have we made a PTE for this yet? + rlwinm r21,r21,0,0,30 ; Clear out valid bit + crorc cr0_eq,cr1_eq,cr0_eq ; No need to look at PTE if none or a special mapping + rlwimi r23,r30,0,0,31 ; Insert low under high part of address + andc r29,r29,r2 ; Clean up hash table base + li r22,0 ; Clear this on out (also sets RC to 0 if we bail) + mr r30,r23 ; Move the now merged vaddr to the correct register + add r26,r29,r21 ; Point to the PTEG slot + + bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version... + + rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry + bne- cr5,hrmBlock32 ; Go treat block specially... + subfic r9,r9,-4 ; Get the PCA entry offset + bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE... + add r7,r9,r29 ; Point to the PCA slot - cmplwi cr1,r11,3 /* Is this a 603? */ - sync /* Make sure the invalid is stored */ - -tlbhang1: lwarx r5,0,r12 /* Get the TLBIE lock */ - rlwinm r11,r4,29,29,31 /* Get the bit position of entry */ - mr. r5,r5 /* Is it locked? */ - lis r6,0x8000 /* Start up a bit mask */ - li r5,1 /* Get our lock word */ - bne- tlbhang1 /* It's locked, go wait... */ - stwcx. r5,0,r12 /* Try to get it */ - bne- tlbhang1 /* We was beat... */ + + bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA) + + lwz r21,mpPte(r31) ; Get the quick pointer again + lwz r5,0(r26) ; Get the top of PTE - srw r6,r6,r11 /* Make a "free slot" mask */ - lwz r5,PCAallo(r7) /* Get the allocation control bits */ - rlwinm r11,r6,24,8,15 /* Make the autogen bit to turn off */ - or r5,r5,r6 /* turn on the free bit */ - rlwimi r11,r11,24,16,23 /* Get lock bit mask to turn it off */ + rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE + rlwinm r21,r21,0,0,30 ; Clear out valid bit + rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE + stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake) + beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate... - andc r5,r5,r11 /* Turn off the lock and autogen bits in allocation flags */ - li r11,0 /* Lock clear value */ + stw r5,0(r26) ; Invalidate the PTE - tlbie r9 /* Invalidate it everywhere */ + li r9,tlbieLock ; Get the TLBIE lock + sync ; Make sure the invalid PTE is actually in memory + +hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock + mr. r5,r5 ; Is it locked? + li r5,1 ; Get locked indicator + bne- hrmPtlb32 ; It is locked, go spin... + stwcx. r5,0,r9 ; Try to get it + bne- hrmPtlb32 ; We was beat... + + rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP? + + tlbie r30 ; Invalidate it all corresponding TLB entries - beq- cr1,its603a /* It's a 603, skip the tlbsync... */ - - eieio /* Make sure that the tlbie happens first */ - tlbsync /* wait for everyone to catch up */ - isync + beq- hrmNTlbs ; Jump if we can not do a TLBSYNC.... -its603a: sync /* Make sure of it all */ - stw r11,0(r12) /* Clear the tlbie lock */ - eieio /* Make sure those RC bit are loaded */ - stw r5,PCAallo(r7) /* Show that the slot is free */ - stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */ + eieio ; Make sure that the tlbie happens first + tlbsync ; Wait for everyone to catch up + sync ; Make sure of it all + +hrmNTlbs: li r0,0 ; Clear this + rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries) + stw r0,tlbieLock(0) ; Clear the tlbie lock + lis r0,0x8000 ; Get bit for slot 0 + eieio ; Make sure those RC bit have been stashed in PTE + + srw r0,r0,r2 ; Get the allocation hash mask + lwz r22,4(r26) ; Get the latest reference and change bits + or r6,r6,r0 ; Show that this slot is free + +hrmUlckPCA32: + eieio ; Make sure all updates come first + stw r6,0(r7) ; Unlock the PTEG + +; +; Now, it is time to remove the mapping and unlock the chain. +; But first, we need to make sure no one else is using this +; mapping so we drain the busy now +; -nopte: mr. r10,r10 /* See if there is a physical entry */ - la r9,pephyslink(r10) /* Point to the physical mapping chain */ - beq- nophys /* No physical entry, we're done... */ - beq- cr5,nadamrg /* No PTE to merge... */ +hrmPysDQ32: mr r3,r31 ; Point to the mapping + bl mapDrainBusy ; Go wait until mapping is unused - lwz r6,4(r4) /* Get the latest reference and change bits */ - la r12,pepte1(r10) /* Point right at the master copy */ - rlwinm r6,r6,0,23,24 /* Extract just the RC bits */ + mr r3,r28 ; Get the pmap to remove from + mr r4,r31 ; Point to the mapping + bl EXT(mapRemove) ; Remove the mapping from the list -mrgrc: lwarx r8,0,r12 /* Get the master copy */ - or r8,r8,r6 /* Merge in latest RC */ - stwcx. r8,0,r12 /* Save it back */ - bne- mrgrc /* If it changed, try again... */ -nadamrg: li r11,0 /* Clear this out */ - lwz r12,mmnext(r3) /* Prime with our next */ + lwz r4,pmapResidentCnt(r28) ; Get the mapped page count + andi. r0,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping? + cmplwi cr1,r0,0 ; Special thingie? + la r3,pmapSXlk(r28) ; Point to the pmap search lock + subi r4,r4,1 ; Drop down the mapped page count + stw r4,pmapResidentCnt(r28) ; Set the mapped page count + bl sxlkUnlock ; Unlock the search list + + bne-- cr1,hrmRetn32 ; This one has no real memory associated with it so we are done... - sync ; Make sure all is saved + bl mapPhysFindLock ; Go find and lock the physent - stw r11,0(r7) /* Unlock the hash chain now so we don't - lock out another processor during - our next little search */ + lwz r9,ppLink+4(r3) ; Get first mapping + + mr r4,r22 ; Get the RC bits we just got + bl mapPhysMerge ; Go merge the RC bits + + rlwinm r9,r9,0,0,25 ; Clear the flags from the mapping pointer -srchpmap: mr. r10,r9 /* Save the previous entry */ - bne+ mapok1 /* No error... */ + cmplw r9,r31 ; Are we the first on the list? + bne- hrmNot1st ; Nope... - lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */ - ori r0,r0,LOW_ADDR(Choke) - sc /* Firmware Heimlich maneuver */ + li r9,0 ; Get a 0 + lwz r4,mpAlias+4(r31) ; Get our new forward pointer + stw r9,mpAlias+4(r31) ; Make sure we are off the chain + bl mapPhyCSet32 ; Go set the physent link and preserve flags - .align 4 + b hrmPhyDQd ; Join up and unlock it all... -mapok1: lwz r9,mmnext(r9) /* Look at the next one */ - rlwinm r8,r9,0,27,31 ; Save the flags (including the lock) - rlwinm r9,r9,0,0,26 ; Clear out the flags from first link - cmplw r9,r3 /* Have we found ourselves? */ - bne+ srchpmap /* Nope, get your head together... */ + .align 5 - rlwimi r12,r8,0,27,31 ; Insert the lock and flags */ - stw r12,mmnext(r10) /* Remove us from the queue */ +hrmPerm: li r8,-4096 ; Get the value we need to round down to a page + and r8,r8,r31 ; Get back to a page + lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap - mtmsr r0 /* Interrupts and translation back on */ - isync -#if PERFTIMES && DEBUG - mflr r11 - li r3,25 - bl EXT(dbgLog2) ; Start of hw_add_map - mtlr r11 -#endif - blr /* Return... */ + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list + + xor r3,r31,r8 ; Flip mapping address to virtual + ori r3,r3,mapRtPerm ; Set permanent mapping error + b hrmErRtn + +hrmBadLock: li r3,mapRtBadLk ; Set bad lock + b hrmErRtn + +hrmEndInSight: + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list + +hrmDoneChunk: + mr r3,r31 ; Point to the mapping + bl mapDropBusy ; Drop the busy here since we need to come back + li r3,mapRtRemove ; Say we are still removing this + b hrmErRtn - .align 4 + .align 5 + +hrmNotFound: + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list + li r3,0 ; Make sure we know we did not find it -nophys: li r4,0 /* Make sure this is 0 */ - sync /* Make sure that chain is updated */ - stw r4,0(r7) /* Unlock the hash chain */ - mtmsr r0 /* Interrupts and translation back on */ - isync -#if PERFTIMES && DEBUG - mflr r11 - li r3,25 - bl EXT(dbgLog2) ; Start of hw_add_map - mtlr r11 -#endif - blr /* Return... */ +hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint) + mtmsr r17 ; Restore enables/translation/etc. + isync + b hrmRetnCmn ; Join the common return code... -/* - * hw_prot(physent, prot) - Change the protection of a physical page - * - * Upon entry, R3 contains a pointer to a physical entry which is locked. - * R4 contains the PPC protection bits. - * - * The first thing we do is to slam the new protection into the phys entry. - * Then we scan the mappings and process each one. - * - * Acquire the lock on the PTEG hash list for the mapping being processed. - * - * If the current mapping has a PTE entry, we invalidate - * it and merge the reference and change information into the phys_entry. - * - * Next, slam the protection bits into the entry and unlock the hash list. - * - * Note that this must be done with both interruptions off and VM off - * - * - */ +hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc. + isync + b hrmRetnCmn ; Join the common return code... .align 5 - .globl EXT(hw_prot) -LEXT(hw_prot) -#if PERFTIMES && DEBUG - mflr r11 - mr r7,r3 -// lwz r5,4(r3) - li r5,0x1111 - li r3,26 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r7 - mtlr r11 -#endif - mfsprg r9,2 ; Get feature flags - mfmsr r0 /* Save the MSR */ - li r5,pepte1 /* Get displacement to the second word of master pte */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtcrf 0x04,r9 ; Set the features - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ - - bt pfNoMSRirb,hpNoMSR ; No MSR... - - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b hpNoMSRx +hrmNot1st: mr. r8,r9 ; Remember and test current node + beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us... + lwz r9,mpAlias+4(r9) ; Chain to the next + cmplw r9,r31 ; Is this us? + bne- hrmNot1st ; Not us... + + lwz r9,mpAlias+4(r9) ; Get our forward pointer + stw r9,mpAlias+4(r8) ; Unchain us -hpNoMSR: - mr r10,r0 - mr r7,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r0,r10 - mr r3,r7 -hpNoMSRx: + nop ; For alignment + +hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain +hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page + mr r3,r31 ; Copy the pointer to the mapping + lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap + bl mapDrainBusy ; Go wait until mapping is unused + xor r3,r31,r8 ; Flip mapping address to virtual - lwz r10,pephyslink(r3) /* Get the first mapping block */ - rlwinm r10,r10,0,0,26 ; Clear out the flags from first link + mtmsr r17 ; Restore enables/translation/etc. + isync -/* - * Note that we need to to do the interlocked update here because another processor - * can be updating the reference and change bits even though the physical entry - * is locked. All modifications to the PTE portion of the physical entry must be - * done via interlocked update. - */ +hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr + lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return + lwz r17,FM_ARG0+0x08(r1) ; Restore a register + lwz r18,FM_ARG0+0x0C(r1) ; Restore a register + mr. r6,r6 ; Should we pass back the "next" vaddr? + lwz r19,FM_ARG0+0x10(r1) ; Restore a register + lwz r20,FM_ARG0+0x14(r1) ; Restore a register + mtlr r0 ; Restore the return + + rlwinm r16,r16,0,0,19 ; Clean to a page boundary + beq hrmNoNextAdr ; Do not pass back the next vaddr... + stw r15,0(r6) ; Pass back the top of the next vaddr + stw r16,4(r6) ; Pass back the bottom of the next vaddr + +hrmNoNextAdr: + lwz r15,FM_ARG0+0x00(r1) ; Restore a register + lwz r16,FM_ARG0+0x04(r1) ; Restore a register + lwz r21,FM_ARG0+0x18(r1) ; Restore a register + rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit + lwz r22,FM_ARG0+0x1C(r1) ; Restore a register + lwz r23,FM_ARG0+0x20(r1) ; Restore a register + lwz r24,FM_ARG0+0x24(r1) ; Restore a register + lwz r25,FM_ARG0+0x28(r1) ; Restore a register + lwz r26,FM_ARG0+0x2C(r1) ; Restore a register + lwz r27,FM_ARG0+0x30(r1) ; Restore a register + lwz r28,FM_ARG0+0x34(r1) ; Restore a register + lwz r29,FM_ARG0+0x38(r1) ; Restore a register + lwz r30,FM_ARG0+0x3C(r1) ; Restore a register + lwz r31,FM_ARG0+0x40(r1) ; Restore a register + lwz r1,0(r1) ; Pop the stack + blr ; Leave... + +; +; Here is where we come when all is lost. Somehow, we failed a mapping function +; that must work... All hope is gone. Alas, we die....... +; -protcng: lwarx r8,r5,r3 /* Get the master copy */ - rlwimi r8,r4,0,30,31 /* Move in the protection bits */ - stwcx. r8,r5,r3 /* Save it back */ - bne- protcng /* If it changed, try again... */ +hrmPanic: lis r0,hi16(Choke) ; System abend + ori r0,r0,lo16(Choke) ; System abend + li r3,failMapping ; Show that we failed some kind of mapping thing + sc +; +; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed +; in the range. Then, if we did not finish, return a code indicating that we need to +; be called again. Eventually, we will finish and then, we will do a TLBIE for each +; PTEG up to the point where we have cleared it all (64 for 32-bit architecture) +; +; A potential speed up is that we stop the invalidate loop once we have walked through +; the hash table once. This really is not worth the trouble because we need to have +; mapped 1/2 of physical RAM in an individual block. Way unlikely. +; +; We should rethink this and see if we think it will be faster to check PTE and +; only invalidate the specific PTE rather than all block map PTEs in the PTEG. +; -protnext: mr. r10,r10 /* Are there any more mappings? */ - beq- protdone /* Naw... */ + .align 5 - lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */ - lwz r5,mmPTEv(r10) /* Get the virtual address */ - rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */ +hrmBlock32: + lhz r23,mpSpace(r31) ; Get the address space hash + lhz r25,mpBSize(r31) ; Get the number of pages in block + lwz r9,mpBlkRemCur(r31) ; Get our current remove position + ori r0,r20,mpRIP ; Turn on the remove in progress flag + mfsdr1 r29 ; Get the hash table base and size + rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash + lwz r27,mpVAddr+4(r31) ; Get the base vaddr + sub r4,r25,r9 ; Get number of pages left + cmplw cr1,r9,r25 ; Have we already hit the end? + addi r10,r9,mapRemChunk ; Point to the start of the next chunk + addi r2,r4,-mapRemChunk ; See if mapRemChunk or more + rlwinm r26,r29,16,7,15 ; Get the hash table size + srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more + stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on + subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk) + cmpwi cr7,r2,0 ; Remember if we have finished + slwi r0,r9,12 ; Make cursor into page offset + or r24,r24,r23 ; Get full hash + and r4,r4,r2 ; If more than a chunk, bring this back to 0 + rlwinm r29,r29,0,0,15 ; Isolate the hash table base + add r27,r27,r0 ; Adjust vaddr to start of current chunk + addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize) + + bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk... + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end) + bl sxlkUnlock ; Unlock the search list while we are invalidating + + rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment + rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27) + xor r24,r24,r8 ; Get the proper VSID + rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27) + ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length + rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset + rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units + add r22,r22,r30 ; Get end address (in PTEG units) + +hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index + xor r23,r23,r24 ; Hash it + and r23,r23,r26 ; Wrap it into the table + rlwinm r3,r23,28,4,29 ; Change to PCA offset + subfic r3,r3,-4 ; Get the PCA entry offset + add r7,r3,r29 ; Point to the PCA slot + cmplw cr5,r30,r22 ; Check if we reached the end of the range + addi r30,r30,64 ; bump to the next vaddr + + bl mapLockPteg ; Lock the PTEG + + rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA + add r5,r23,r29 ; Point to the PTEG + li r0,0 ; Set an invalid PTE value + beq+ hrmBNone32 ; No block map PTEs in this PTEG... + mtcrf 0x80,r4 ; Set CRs to select PTE slots + mtcrf 0x40,r4 ; Set CRs to select PTE slots - li r12,1 /* Get the locked value */ + bf 0,hrmSlot0 ; No autogen here + stw r0,0x00(r5) ; Invalidate PTE -protLck1: lwarx r11,0,r7 /* Get the PTEG lock */ - mr. r11,r11 /* Is it locked? */ - bne- protLckw1 /* Yeah... */ - stwcx. r12,0,r7 /* Try to take it */ - bne- protLck1 /* Someone else was trying, try again... */ - b protSXg1 /* All done... */ - - .align 4 +hrmSlot0: bf 1,hrmSlot1 ; No autogen here + stw r0,0x08(r5) ; Invalidate PTE -protLckw1: mr. r11,r11 /* Check if it's already held */ - beq+ protLck1 /* It's clear... */ - lwz r11,0(r7) /* Get lock word again... */ - b protLckw1 /* Wait... */ - - .align 4 +hrmSlot1: bf 2,hrmSlot2 ; No autogen here + stw r0,0x10(r5) ; Invalidate PTE -protSXg1: isync /* Make sure we haven't used anything yet */ +hrmSlot2: bf 3,hrmSlot3 ; No autogen here + stw r0,0x18(r5) ; Invalidate PTE - lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */ +hrmSlot3: bf 4,hrmSlot4 ; No autogen here + stw r0,0x20(r5) ; Invalidate PTE - rlwinm r9,r5,1,0,3 /* Move in the segment */ - lwz r2,mmPTEr(r10) ; Get the mapping copy of the PTE - mr. r6,r6 /* See if there is a PTE here */ - rlwinm r8,r5,31,2,25 /* Line it up */ - rlwimi r2,r4,0,30,31 ; Move protection bits into the mapping copy - - beq+ protul /* There's no PTE to invalidate... */ - - xor r8,r8,r6 /* Back hash to virt index */ - rlwimi r9,r5,22,4,9 /* Move in the API */ - lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ - rlwinm r5,r5,0,1,31 /* Clear the valid bit */ - ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ - mfspr r11,pvr /* Find out what kind of machine we are */ - rlwimi r9,r8,6,10,19 /* Create the virtual address */ - rlwinm r11,r11,16,16,31 /* Isolate CPU type */ - - stw r5,0(r6) /* Make the PTE invalid */ - cmplwi cr1,r11,3 /* Is this a 603? */ - sync /* Make sure the invalid is stored */ - -tlbhangp: lwarx r11,0,r12 /* Get the TLBIE lock */ - rlwinm r8,r6,29,29,31 /* Get the bit position of entry */ - mr. r11,r11 /* Is it locked? */ - lis r5,0x8000 /* Start up a bit mask */ - li r11,1 /* Get our lock word */ - bne- tlbhangp /* It's locked, go wait... */ - stwcx. r11,0,r12 /* Try to get it */ - bne- tlbhangp /* We was beat... */ - - li r11,0 /* Lock clear value */ +hrmSlot4: bf 5,hrmSlot5 ; No autogen here + stw r0,0x28(r5) ; Invalidate PTE - tlbie r9 /* Invalidate it everywhere */ +hrmSlot5: bf 6,hrmSlot6 ; No autogen here + stw r0,0x30(r5) ; Invalidate PTE - beq- cr1,its603p /* It's a 603, skip the tlbsync... */ - - eieio /* Make sure that the tlbie happens first */ - tlbsync /* wait for everyone to catch up */ - isync - -its603p: stw r11,0(r12) /* Clear the lock */ - srw r5,r5,r8 /* Make a "free slot" mask */ - sync /* Make sure of it all */ +hrmSlot6: bf 7,hrmSlot7 ; No autogen here + stw r0,0x38(r5) ; Invalidate PTE - lwz r6,4(r6) /* Get the latest reference and change bits */ - stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */ - rlwinm r6,r6,0,23,24 /* Extract the RC bits */ - lwz r9,PCAallo(r7) /* Get the allocation control bits */ - rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */ - rlwimi r2,r6,0,23,24 ; Put the latest RC bit in mapping copy - or r9,r9,r5 /* Set the slot free */ - rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */ - andc r9,r9,r8 /* Clear the auto and lock bits */ - li r5,pepte1 /* Get displacement to the second word of master pte */ - stw r9,PCAallo(r7) /* Store the allocation controls */ - -protmod: lwarx r11,r5,r3 /* Get the master copy */ - or r11,r11,r6 /* Merge in latest RC */ - stwcx. r11,r5,r3 /* Save it back */ - bne- protmod /* If it changed, try again... */ - -protul: li r4,0 /* Get a 0 */ - stw r2,mmPTEr(r10) ; Save the updated mapping PTE - lwz r10,mmnext(r10) /* Get the next */ +hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen + or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared + andc r6,r6,r0 ; Turn off all the old autogen bits - sync ; Make sure stores are complete +hrmBNone32: eieio ; Make sure all updates come first - stw r4,0(r7) /* Unlock the hash chain */ - b protnext /* Go get the next one */ + stw r6,0(r7) ; Unlock and set the PCA - .align 4 + bne+ cr5,hrmBInv32 ; Go invalidate the next... -protdone: mtmsr r0 /* Interrupts and translation back on */ - isync -#if PERFTIMES && DEBUG - mflr r11 - li r3,27 - bl EXT(dbgLog2) ; Start of hw_add_map - mtlr r11 -#endif - blr /* Return... */ + bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again... + mr r3,r31 ; Copy the pointer to the mapping + bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one -/* - * hw_prot_virt(mapping, prot) - Change the protection of single page - * - * Upon entry, R3 contains a pointer (real) to a mapping. - * R4 contains the PPC protection bits. - * - * Acquire the lock on the PTEG hash list for the mapping being processed. - * - * If the current mapping has a PTE entry, we invalidate - * it and merge the reference and change information into the phys_entry. - * - * Next, slam the protection bits into the entry, merge the RC bits, - * and unlock the hash list. - * - * Note that this must be done with both interruptions off and VM off - * - * - */ + sync ; Make sure memory is consistent + + subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here) + li r6,63 ; Assume full invalidate for now + srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise + andc r6,r6,r5 ; Clear max if we have less to do + and r5,r25,r5 ; Clear count if we have more than max + lwz r27,mpVAddr+4(r31) ; Get the base vaddr again + li r7,tlbieLock ; Get the TLBIE lock + or r5,r5,r6 ; Get number of TLBIEs needed + +hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock + mr. r2,r2 ; Is it locked? + li r2,1 ; Get our lock value + bne- hrmBTLBlck ; It is locked, go wait... + stwcx. r2,0,r7 ; Try to get it + bne- hrmBTLBlck ; We was beat... + +hrmBTLBi: addic. r5,r5,-1 ; See if we did them all + tlbie r27 ; Invalidate it everywhere + addi r27,r27,0x1000 ; Up to the next page + bge+ hrmBTLBi ; Make sure we have done it all... + + rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP? + li r2,0 ; Lock clear value + + sync ; Make sure all is quiet + beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC.... + + eieio ; Make sure that the tlbie happens first + tlbsync ; Wait for everyone to catch up + sync ; Wait for quiet again + +hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkShared ; Go get a shared lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + bne- hrmPanic ; Nope... + + lwz r4,mpVAddr(r31) ; High order of address + lwz r5,mpVAddr+4(r31) ; Low order of address + mr r3,r28 ; Pass in pmap to search + mr r29,r4 ; Save this in case we need it (only promote fails) + mr r30,r5 ; Save this in case we need it (only promote fails) + bl EXT(mapSearchFull) ; Go see if we can find it + + mr. r3,r3 ; Did we? (And remember mapping address for later) + mr r15,r4 ; Save top of next vaddr + mr r16,r5 ; Save bottom of next vaddr + beq- hrmPanic ; Nope, not found... + + cmplw r3,r31 ; Same mapping? + bne- hrmPanic ; Not good... + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkPromote ; Try to promote shared to exclusive + mr. r3,r3 ; Could we? + mr r3,r31 ; Restore the mapping pointer + beq+ hrmBDone1 ; Yeah... + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkConvert ; Convert shared to exclusive + mr. r3,r3 ; Could we? + bne-- hrmPanic ; Nope, we must have timed out... + + mr r3,r28 ; Pass in pmap to search + mr r4,r29 ; High order of address + mr r5,r30 ; Low order of address + bl EXT(mapSearchFull) ; Rescan the list + + mr. r3,r3 ; Did we lose it when we converted? + mr r15,r4 ; Save top of next vaddr + mr r16,r5 ; Save bottom of next vaddr + beq-- hrmPanic ; Yeah, we did, someone tossed it for us... + +hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused + + mr r3,r28 ; Get the pmap to remove from + mr r4,r31 ; Point to the mapping + bl EXT(mapRemove) ; Remove the mapping from the list + + lwz r4,pmapResidentCnt(r28) ; Get the mapped page count + la r3,pmapSXlk(r28) ; Point to the pmap search lock + subi r4,r4,1 ; Drop down the mapped page count + stw r4,pmapResidentCnt(r28) ; Set the mapped page count + bl sxlkUnlock ; Unlock the search list + + b hrmRetn32 ; We are all done, get out... +; +; Here we handle the 64-bit version of hw_rem_map +; + .align 5 - .globl EXT(hw_prot_virt) - -LEXT(hw_prot_virt) -#if PERFTIMES && DEBUG - mflr r11 - mr r7,r3 -// lwz r5,4(r3) - li r5,0x1111 - li r3,40 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r7 - mtlr r11 -#endif - mfsprg r9,2 ; Get feature flags - mfmsr r0 /* Save the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtcrf 0x04,r9 ; Set the features - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + +hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry + bne-- cr5,hrmBlock64 ; Go treat block specially... + subfic r9,r9,-4 ; Get the PCA entry offset + bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE... + add r7,r9,r29 ; Point to the PCA slot + + bl mapLockPteg ; Go lock up the PTEG + + lwz r21,mpPte(r31) ; Get the quick pointer again + ld r5,0(r26) ; Get the top of PTE + + rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE + rlwinm r21,r21,0,0,30 ; Clear out valid bit + sldi r23,r5,16 ; Shift AVPN up to EA format + rldicr r5,r5,0,62 ; Clear the valid bit + rldimi r23,r30,0,36 ; Insert the page portion of the VPN + stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake) + beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate... + + std r5,0(r26) ; Invalidate the PTE + + li r9,tlbieLock ; Get the TLBIE lock + + sync ; Make sure the invalid PTE is actually in memory + +hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock + rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to + mr. r5,r5 ; Is it locked? + li r5,1 ; Get locked indicator + bne-- hrmPtlb64w ; It is locked, go spin... + stwcx. r5,0,r9 ; Try to get it + bne-- hrmPtlb64 ; We was beat... + + tlbie r23 ; Invalidate it all corresponding TLB entries - bt pfNoMSRirb,hpvNoMSR ; No MSR... + eieio ; Make sure that the tlbie happens first + tlbsync ; Wait for everyone to catch up + isync + + ptesync ; Make sure of it all + li r0,0 ; Clear this + rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries) + stw r0,tlbieLock(0) ; Clear the tlbie lock + oris r0,r0,0x8000 ; Assume slot 0 + eieio ; Make sure those RC bit have been stashed in PTE + srw r0,r0,r2 ; Get slot mask to deallocate - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b hpvNoMSRx + lwz r22,12(r26) ; Get the latest reference and change bits + or r6,r6,r0 ; Make the guy we killed free -hpvNoMSR: - mr r5,r0 - mr r7,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r7 - mr r0,r5 -hpvNoMSRx: +hrmUlckPCA64: + eieio ; Make sure all updates come first + + stw r6,0(r7) ; Unlock and change the PCA + +hrmPysDQ64: mr r3,r31 ; Point to the mapping + bl mapDrainBusy ; Go wait until mapping is unused + + mr r3,r28 ; Get the pmap to insert into + mr r4,r31 ; Point to the mapping + bl EXT(mapRemove) ; Remove the mapping from the list + + andi. r0,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping? + lwz r4,pmapResidentCnt(r28) ; Get the mapped page count + cmplwi cr1,r0,0 ; Special thingie? + la r3,pmapSXlk(r28) ; Point to the pmap search lock + subi r4,r4,1 ; Drop down the mapped page count + stw r4,pmapResidentCnt(r28) ; Set the mapped page count + bl sxlkUnlock ; Unlock the search list + + bne-- cr1,hrmRetn64 ; This one has no real memory associated with it so we are done... + bl mapPhysFindLock ; Go find and lock the physent + li r0,0xFF ; Get mask to clean up mapping pointer + ld r9,ppLink(r3) ; Get first mapping + rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + mr r4,r22 ; Get the RC bits we just got -/* - * Note that we need to to do the interlocked update here because another processor - * can be updating the reference and change bits even though the physical entry - * is locked. All modifications to the PTE portion of the physical entry must be - * done via interlocked update. - */ + bl mapPhysMerge ; Go merge the RC bits - lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */ - lwz r5,mmPTEv(r3) /* Get the virtual address */ - rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */ - - li r12,1 /* Get the locked value */ - -protvLck1: lwarx r11,0,r7 /* Get the PTEG lock */ - mr. r11,r11 /* Is it locked? */ - bne- protvLckw1 /* Yeah... */ - stwcx. r12,0,r7 /* Try to take it */ - bne- protvLck1 /* Someone else was trying, try again... */ - b protvSXg1 /* All done... */ + andc r9,r9,r0 ; Clean up the mapping pointer - .align 4 - -protvLckw1: mr. r11,r11 /* Check if it's already held */ - beq+ protvLck1 /* It's clear... */ - lwz r11,0(r7) /* Get lock word again... */ - b protvLckw1 /* Wait... */ + cmpld r9,r31 ; Are we the first on the list? + bne- hrmNot1st64 ; Nope... - .align 4 - -protvSXg1: isync /* Make sure we haven't used anything yet */ - - lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */ - lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part + li r9,0 ; Get a 0 + ld r4,mpAlias(r31) ; Get our forward pointer + + std r9,mpAlias(r31) ; Make sure we are off the chain + bl mapPhyCSet64 ; Go set the physent link and preserve flags - rlwinm r9,r5,1,0,3 /* Move in the segment */ - cmplwi cr7,r6,0 ; Any PTE to invalidate? - rlwimi r2,r4,0,30,31 ; Move in the new protection bits - rlwinm r8,r5,31,2,25 /* Line it up */ - - beq+ cr7,pvnophys /* There's no PTE to invalidate... */ - - xor r8,r8,r6 /* Back hash to virt index */ - rlwimi r9,r5,22,4,9 /* Move in the API */ - lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ - rlwinm r5,r5,0,1,31 /* Clear the valid bit */ - ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ - mfspr r11,pvr /* Find out what kind of machine we are */ - rlwimi r9,r8,6,10,19 /* Create the virtual address */ - rlwinm r11,r11,16,16,31 /* Isolate CPU type */ - - stw r5,0(r6) /* Make the PTE invalid */ - cmplwi cr1,r11,3 /* Is this a 603? */ - sync /* Make sure the invalid is stored */ + b hrmPhyDQd64 ; Join up and unlock it all... + +hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory + stwcx. r5,0,r5 ; Clear the pending reservation -tlbhangpv: lwarx r11,0,r12 /* Get the TLBIE lock */ - rlwinm r8,r6,29,29,31 /* Get the bit position of entry */ - mr. r11,r11 /* Is it locked? */ - lis r5,0x8000 /* Start up a bit mask */ - li r11,1 /* Get our lock word */ - bne- tlbhangpv /* It's locked, go wait... */ - stwcx. r11,0,r12 /* Try to get it */ - bne- tlbhangpv /* We was beat... */ - li r11,0 /* Lock clear value */ +hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation + mr. r5,r5 ; is it locked? + beq++ hrmPtlb64 ; Nope... + b hrmPtlb64x ; Sniff some more... + + .align 5 + +hrmNot1st64: + mr. r8,r9 ; Remember and test current node + beq- hrmNotFound ; Could not find our node... + ld r9,mpAlias(r9) ; Chain to the next + cmpld r9,r31 ; Is this us? + bne- hrmNot1st64 ; Not us... + + ld r9,mpAlias(r9) ; Get our forward pointer + std r9,mpAlias(r8) ; Unchain us + + nop ; For alignment + +hrmPhyDQd64: + bl mapPhysUnlock ; Unlock the physent chain - tlbie r9 /* Invalidate it everywhere */ +hrmRetn64: rldicr r8,r31,0,51 ; Find start of page + mr r3,r31 ; Copy the pointer to the mapping + lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap + bl mapDrainBusy ; Go wait until mapping is unused - beq- cr1,its603pv /* It's a 603, skip the tlbsync... */ - - eieio /* Make sure that the tlbie happens first */ - tlbsync /* wait for everyone to catch up */ - isync + xor r3,r31,r8 ; Flip mapping address to virtual -its603pv: stw r11,0(r12) /* Clear the lock */ - srw r5,r5,r8 /* Make a "free slot" mask */ - sync /* Make sure of it all */ - - lwz r6,4(r6) /* Get the latest reference and change bits */ - stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */ - rlwinm r6,r6,0,23,24 /* Extract the RC bits */ - lwz r9,PCAallo(r7) /* Get the allocation control bits */ - rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */ - lwz r10,mmphysent(r3) ; Get any physical entry - or r9,r9,r5 /* Set the slot free */ - rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */ - andc r9,r9,r8 /* Clear the auto and lock bits */ - mr. r10,r10 ; Is there a physical entry? - li r5,pepte1 /* Get displacement to the second word of master pte */ - stw r9,PCAallo(r7) /* Store the allocation controls */ - rlwimi r2,r6,0,23,24 ; Stick in RC bits - beq- pvnophys ; No physical entry... - -protvmod: lwarx r11,r5,r10 /* Get the master copy */ - or r11,r11,r6 /* Merge in latest RC */ - stwcx. r11,r5,r10 /* Save it back */ - bne- protvmod /* If it changed, try again... */ - -pvnophys: li r4,0 /* Get a 0 */ - stw r2,mmPTEr(r3) ; Set the real part of the PTE - - sync ; Make sure everything is stored - - stw r4,0(r7) /* Unlock the hash chain */ - mtmsr r0 ; Restore interrupts and translation + mtmsrd r17 ; Restore enables/translation/etc. isync - -#if PERFTIMES && DEBUG - mflr r11 - li r3,41 - bl EXT(dbgLog2) - mtlr r11 -#endif - blr /* Return... */ + + b hrmRetnCmn ; Join the common return path... -/* - * hw_attr_virt(mapping, attr) - Change the attributes of single page - * - * Upon entry, R3 contains a pointer (real) to a mapping. - * R4 contains the WIMG bits. - * - * Acquire the lock on the PTEG hash list for the mapping being processed. - * - * If the current mapping has a PTE entry, we invalidate - * it and merge the reference and change information into the phys_entry. - * - * Next, slam the WIMG bits into the entry, merge the RC bits, - * and unlock the hash list. - * - * Note that this must be done with both interruptions off and VM off - * - * - */ +; +; Check hrmBlock32 for comments. +; .align 5 - .globl EXT(hw_attr_virt) + +hrmBlock64: + lhz r24,mpSpace(r31) ; Get the address space hash + lhz r25,mpBSize(r31) ; Get the number of pages in block + lwz r9,mpBlkRemCur(r31) ; Get our current remove position + ori r0,r20,mpRIP ; Turn on the remove in progress flag + mfsdr1 r29 ; Get the hash table base and size + ld r27,mpVAddr(r31) ; Get the base vaddr + rlwinm r5,r29,0,27,31 ; Isolate the size + sub r4,r25,r9 ; Get number of pages left + cmplw cr1,r9,r25 ; Have we already hit the end? + addi r10,r9,mapRemChunk ; Point to the start of the next chunk + addi r2,r4,-mapRemChunk ; See if mapRemChunk or more + stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on + srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more + subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk) + cmpwi cr7,r2,0 ; Remember if we are doing the last chunk + and r4,r4,r2 ; If more than a chunk, bring this back to 0 + srdi r27,r27,12 ; Change address into page index + addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize) + add r27,r27,r9 ; Adjust vaddr to start of current chunk + + bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk... + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end) + bl sxlkUnlock ; Unlock the search list while we are invalidating + + rlwimi r24,r24,14,4,17 ; Insert a copy of space hash + eqv r26,r26,r26 ; Get all foxes here + rldimi r24,r24,28,8 ; Make a couple copies up higher + rldicr r29,r29,0,47 ; Isolate just the hash table base + subfic r5,r5,46 ; Get number of leading zeros + srd r26,r26,r5 ; Shift the size bits over + mr r30,r27 ; Get start of chunk to invalidate + rldicr r26,r26,0,56 ; Make length in PTEG units + add r22,r4,r30 ; Get end page number + +hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID + rldicr r0,r0,0,49 ; Clean all but segment portion + rlwinm r2,r30,0,16,31 ; Get the current page index + xor r0,r0,r24 ; Form VSID + xor r8,r2,r0 ; Hash the vaddr + sldi r8,r8,7 ; Make into PTEG offset + and r23,r8,r26 ; Wrap into the hash table + rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here) + subfic r3,r3,-4 ; Get the PCA entry offset + add r7,r3,r29 ; Point to the PCA slot + + cmplw cr5,r30,r22 ; Have we reached the end of the range? + + bl mapLockPteg ; Lock the PTEG + + rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any + add r5,r23,r29 ; Point to the PTEG + li r0,0 ; Set an invalid PTE value + beq++ hrmBNone64 ; No block map PTEs in this PTEG... + mtcrf 0x80,r4 ; Set CRs to select PTE slots + mtcrf 0x40,r4 ; Set CRs to select PTE slots -LEXT(hw_attr_virt) -#if PERFTIMES && DEBUG - mflr r11 - mr r7,r3 -// lwz r5,4(r3) - li r5,0x1111 - li r3,40 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r7 - mtlr r11 -#endif - mfsprg r9,2 ; Get feature flags - mfmsr r0 /* Save the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - mtcrf 0x04,r9 ; Set the features - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ - bt pfNoMSRirb,havNoMSR ; No MSR... + bf 0,hrmSlot0s ; No autogen here + std r0,0x00(r5) ; Invalidate PTE - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b havNoMSRx - -havNoMSR: - mr r5,r0 - mr r7,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r7 - mr r0,r5 -havNoMSRx: +hrmSlot0s: bf 1,hrmSlot1s ; No autogen here + std r0,0x10(r5) ; Invalidate PTE -/* - * Note that we need to to do the interlocked update here because another processor - * can be updating the reference and change bits even though the physical entry - * is locked. All modifications to the PTE portion of the physical entry must be - * done via interlocked update. - */ - - lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */ - lwz r5,mmPTEv(r3) /* Get the virtual address */ - rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */ +hrmSlot1s: bf 2,hrmSlot2s ; No autogen here + std r0,0x20(r5) ; Invalidate PTE - li r12,1 /* Get the locked value */ +hrmSlot2s: bf 3,hrmSlot3s ; No autogen here + std r0,0x30(r5) ; Invalidate PTE -attrvLck1: lwarx r11,0,r7 /* Get the PTEG lock */ - mr. r11,r11 /* Is it locked? */ - bne- attrvLckw1 /* Yeah... */ - stwcx. r12,0,r7 /* Try to take it */ - bne- attrvLck1 /* Someone else was trying, try again... */ - b attrvSXg1 /* All done... */ - - .align 4 +hrmSlot3s: bf 4,hrmSlot4s ; No autogen here + std r0,0x40(r5) ; Invalidate PTE -attrvLckw1: mr. r11,r11 /* Check if it's already held */ - beq+ attrvLck1 /* It's clear... */ - lwz r11,0(r7) /* Get lock word again... */ - b attrvLckw1 /* Wait... */ - - .align 4 +hrmSlot4s: bf 5,hrmSlot5s ; No autogen here + std r0,0x50(r5) ; Invalidate PTE -attrvSXg1: isync /* Make sure we haven't used anything yet */ +hrmSlot5s: bf 6,hrmSlot6s ; No autogen here + std r0,0x60(r5) ; Invalidate PTE - lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */ - lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part +hrmSlot6s: bf 7,hrmSlot7s ; No autogen here + std r0,0x70(r5) ; Invalidate PTE - rlwinm r9,r5,1,0,3 /* Move in the segment */ - mr. r6,r6 /* See if there is a PTE here */ - rlwimi r2,r4,0,25,28 ; Move in the new attribute bits - rlwinm r8,r5,31,2,25 /* Line it up and check if empty */ - - beq+ avnophys /* There's no PTE to invalidate... */ - - xor r8,r8,r6 /* Back hash to virt index */ - rlwimi r9,r5,22,4,9 /* Move in the API */ - lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ - rlwinm r5,r5,0,1,31 /* Clear the valid bit */ - ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ - mfspr r11,pvr /* Find out what kind of machine we are */ - rlwimi r9,r8,6,10,19 /* Create the virtual address */ - rlwinm r11,r11,16,16,31 /* Isolate CPU type */ - stw r5,0(r6) /* Make the PTE invalid */ - cmplwi cr1,r11,3 /* Is this a 603? */ - sync /* Make sure the invalid is stored */ - -tlbhangav: lwarx r11,0,r12 /* Get the TLBIE lock */ - rlwinm r8,r6,29,29,31 /* Get the bit position of entry */ - mr. r11,r11 /* Is it locked? */ - lis r5,0x8000 /* Start up a bit mask */ - li r11,1 /* Get our lock word */ - bne- tlbhangav /* It's locked, go wait... */ - stwcx. r11,0,r12 /* Try to get it */ - bne- tlbhangav /* We was beat... */ +hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen + or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared + andc r6,r6,r0 ; Turn off all the old autogen bits + +hrmBNone64: eieio ; Make sure all updates come first + stw r6,0(r7) ; Unlock and set the PCA + + addi r30,r30,1 ; bump to the next PTEG + bne++ cr5,hrmBInv64 ; Go invalidate the next... + + bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again... + + mr r3,r31 ; Copy the pointer to the mapping + bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one + + sync ; Make sure memory is consistent + + subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here) + li r6,255 ; Assume full invalidate for now + srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise + andc r6,r6,r5 ; Clear max if we have less to do + and r5,r25,r5 ; Clear count if we have more than max + sldi r24,r24,28 ; Get the full XOR value over to segment position + ld r27,mpVAddr(r31) ; Get the base vaddr + li r7,tlbieLock ; Get the TLBIE lock + or r5,r5,r6 ; Get number of TLBIEs needed - li r11,0 /* Lock clear value */ +hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock + mr. r2,r2 ; Is it locked? + li r2,1 ; Get our lock value + bne-- hrmBTLBlcm ; It is locked, go wait... + stwcx. r2,0,r7 ; Try to get it + bne-- hrmBTLBlcl ; We was beat... + +hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID + rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra + addic. r5,r5,-1 ; See if we did them all + xor r2,r2,r24 ; Make the VSID + rldimi r2,r27,0,36 ; Insert the page portion of the VPN + rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta - tlbie r9 /* Invalidate it everywhere */ + tlbie r2 ; Invalidate it everywhere + addi r27,r27,0x1000 ; Up to the next page + bge++ hrmBTLBj ; Make sure we have done it all... - beq- cr1,its603av /* It's a 603, skip the tlbsync... */ + sync ; Make sure all is quiet - eieio /* Make sure that the tlbie happens first */ - tlbsync /* wait for everyone to catch up */ + eieio ; Make sure that the tlbie happens first + tlbsync ; wait for everyone to catch up isync - -its603av: stw r11,0(r12) /* Clear the lock */ - srw r5,r5,r8 /* Make a "free slot" mask */ - sync /* Make sure of it all */ - - lwz r6,4(r6) /* Get the latest reference and change bits */ - stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */ - rlwinm r6,r6,0,23,24 /* Extract the RC bits */ - lwz r9,PCAallo(r7) /* Get the allocation control bits */ - rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */ - lwz r10,mmphysent(r3) ; Get any physical entry - or r9,r9,r5 /* Set the slot free */ - rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */ - andc r9,r9,r8 /* Clear the auto and lock bits */ - mr. r10,r10 ; Is there a physical entry? - li r5,pepte1 /* Get displacement to the second word of master pte */ - stw r9,PCAallo(r7) /* Store the allocation controls */ - rlwimi r2,r6,0,23,24 ; Stick in RC bits - beq- avnophys ; No physical entry... - -attrvmod: lwarx r11,r5,r10 /* Get the master copy */ - or r11,r11,r6 /* Merge in latest RC */ - stwcx. r11,r5,r10 /* Save it back */ - bne- attrvmod /* If it changed, try again... */ - -avnophys: li r4,0 /* Get a 0 */ - stw r2,mmPTEr(r3) ; Set the real part of the PTE - - sync ; Make sure that everything is updated - - stw r4,0(r7) /* Unlock the hash chain */ - - rlwinm r2,r2,0,0,19 ; Clear back to page boundary - -attrflsh: cmplwi r4,(4096-32) ; Are we about to do the last line on page? - dcbst r2,r4 ; Flush cache because we changed attributes - addi r4,r4,32 ; Bump up cache - blt+ attrflsh ; Do the whole page... - sync - li r4,0 -attrimvl: cmplwi r4,(4096-32) ; Are we about to do the last line on page? - dcbi r2,r4 ; Invalidate dcache because we changed attributes - icbi r2,r4 ; Invalidate icache because we changed attributes - addi r4,r4,32 ; Bump up cache - blt+ attrimvl ; Do the whole page... - sync + li r2,0 ; Lock clear value - mtmsr r0 ; Restore interrupts and translation - isync + ptesync ; Wait for quiet again + sync ; Make sure that is done + + stw r2,tlbieLock(0) ; Clear the tlbie lock + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkShared ; Go get a shared lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + bne- hrmPanic ; Nope... + + lwz r4,mpVAddr(r31) ; High order of address + lwz r5,mpVAddr+4(r31) ; Low order of address + mr r3,r28 ; Pass in pmap to search + mr r29,r4 ; Save this in case we need it (only promote fails) + mr r30,r5 ; Save this in case we need it (only promote fails) + bl EXT(mapSearchFull) ; Go see if we can find it + + mr. r3,r3 ; Did we? (And remember mapping address for later) + mr r15,r4 ; Save top of next vaddr + mr r16,r5 ; Save bottom of next vaddr + beq- hrmPanic ; Nope, not found... + + cmpld r3,r31 ; Same mapping? + bne- hrmPanic ; Not good... + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkPromote ; Try to promote shared to exclusive + mr. r3,r3 ; Could we? + mr r3,r31 ; Restore the mapping pointer + beq+ hrmBDone2 ; Yeah... + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkConvert ; Convert shared to exclusive + mr. r3,r3 ; Could we? + bne-- hrmPanic ; Nope, we must have timed out... + + mr r3,r28 ; Pass in pmap to search + mr r4,r29 ; High order of address + mr r5,r30 ; Low order of address + bl EXT(mapSearchFull) ; Rescan the list + + mr. r3,r3 ; Did we lose it when we converted? + mr r15,r4 ; Save top of next vaddr + mr r16,r5 ; Save bottom of next vaddr + beq-- hrmPanic ; Yeah, we did, someone tossed it for us... + +hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused + + mr r3,r28 ; Get the pmap to remove from + mr r4,r31 ; Point to the mapping + bl EXT(mapRemove) ; Remove the mapping from the list + + lwz r4,pmapResidentCnt(r28) ; Get the mapped page count + la r3,pmapSXlk(r28) ; Point to the pmap search lock + subi r4,r4,1 ; Drop down the mapped page count + stw r4,pmapResidentCnt(r28) ; Set the mapped page count + bl sxlkUnlock ; Unlock the search list + + b hrmRetn64 ; We are all done, get out... + +hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line + stwcx. r2,0,r2 ; Unreserve it + +hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock + mr. r2,r2 ; Is it held? + beq++ hrmBTLBlcl ; Nope... + b hrmBTLBlcn ; Yeah... -#if PERFTIMES && DEBUG - mflr r11 - li r3,41 - bl EXT(dbgLog2) - mtlr r11 -#endif - blr /* Return... */ /* - * hw_pte_comm(physent) - Do something to the PTE pointing to a physical page - * - * Upon entry, R3 contains a pointer to a physical entry which is locked. - * Note that this must be done with both interruptions off and VM off - * - * First, we set up CRs 5 and 7 to indicate which of the 7 calls this is. + * mapping *hw_purge_phys(physent) - remove a mapping from the system * - * Now we scan the mappings to invalidate any with an active PTE. + * Upon entry, R3 contains a pointer to a physent. * - * Acquire the lock on the PTEG hash list for the mapping being processed. + * This function removes the first mapping from a physical entry + * alias list. It locks the list, extracts the vaddr and pmap from + * the first entry. It then jumps into the hw_rem_map function. + * NOTE: since we jump into rem_map, we need to set up the stack + * identically. Also, we set the next parm to 0 so we do not + * try to save a next vaddr. + * + * We return the virtual address of the removed mapping as a + * R3. * - * If the current mapping has a PTE entry, we invalidate - * it and merge the reference and change information into the phys_entry. + * Note that this is designed to be called from 32-bit mode with a stack. * - * Next, unlock the hash list and go on to the next mapping. + * We disable translation and all interruptions here. This keeps is + * from having to worry about a deadlock due to having anything locked + * and needing it to process a fault. * + * Note that this must be done with both interruptions off and VM off + * + * + * Remove mapping via physical page (mapping_purge) + * + * 1) lock physent + * 2) extract vaddr and pmap + * 3) unlock physent + * 4) do "remove mapping via pmap" + * * - * */ .align 5 - .globl EXT(hw_inv_all) - -LEXT(hw_inv_all) + .globl EXT(hw_purge_phys) + +LEXT(hw_purge_phys) + stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r15,FM_ARG0+0x00(r1) ; Save a register + stw r16,FM_ARG0+0x04(r1) ; Save a register + stw r17,FM_ARG0+0x08(r1) ; Save a register + stw r18,FM_ARG0+0x0C(r1) ; Save a register + stw r19,FM_ARG0+0x10(r1) ; Save a register + stw r20,FM_ARG0+0x14(r1) ; Save a register + stw r21,FM_ARG0+0x18(r1) ; Save a register + stw r22,FM_ARG0+0x1C(r1) ; Save a register + stw r23,FM_ARG0+0x20(r1) ; Save a register + stw r24,FM_ARG0+0x24(r1) ; Save a register + stw r25,FM_ARG0+0x28(r1) ; Save a register + li r6,0 ; Set no next address return + stw r26,FM_ARG0+0x2C(r1) ; Save a register + stw r27,FM_ARG0+0x30(r1) ; Save a register + stw r28,FM_ARG0+0x34(r1) ; Save a register + stw r29,FM_ARG0+0x38(r1) ; Save a register + stw r30,FM_ARG0+0x3C(r1) ; Save a register + stw r31,FM_ARG0+0x40(r1) ; Save a register + stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr + stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit + + bl mapPhysLock ; Lock the physent + + bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint) + + lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping + li r0,0x3F ; Set the bottom stuff to clear + b hppJoin ; Join the common... + +hppSF: li r0,0xFF + ld r12,ppLink(r3) ; Get the pointer to the first mapping + rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + +hppJoin: andc. r12,r12,r0 ; Clean and test link + beq-- hppNone ; There are no more mappings on physical page + + lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table + lhz r7,mpSpace(r12) ; Get the address space hash + ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table + slwi r0,r7,2 ; Multiply space by 4 + lwz r4,mpVAddr(r12) ; Get the top of the vaddr + slwi r7,r7,3 ; Multiply space by 8 + lwz r5,mpVAddr+4(r12) ; and the bottom + add r7,r7,r0 ; Get correct displacement into translate table + lwz r28,0(r28) ; Get the actual translation map - li r9,0x800 /* Indicate invalidate all */ - li r2,0 ; No inadvertant modifications please - b hw_pte_comm /* Join in the fun... */ - + add r28,r28,r7 ; Point to the pmap translation + + bl mapPhysUnlock ; Time to unlock the physical entry + + bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint) + + lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap + b hrmJoin ; Go remove the mapping... + +hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap + b hrmJoin ; Go remove the mapping... .align 5 - .globl EXT(hw_tst_mod) + +hppNone: bl mapPhysUnlock ; Time to unlock the physical entry + + bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)... + + mtmsr r11 ; Restore enables/translation/etc. + isync + b hppRetnCmn ; Join the common return code... -LEXT(hw_tst_mod) +hppSF3: mtmsrd r11 ; Restore enables/translation/etc. + isync - lwz r8,pepte1(r3) ; Get the saved PTE image - li r9,0x400 /* Indicate test modify */ - li r2,0 ; No inadvertant modifications please - rlwinm. r8,r8,25,31,31 ; Make change bit into return code - beq+ hw_pte_comm ; Assume we do not know if it is set... - mr r3,r8 ; Set the return code - blr ; Return quickly... +; +; NOTE: we have not used any registers other than the volatiles to this point +; - .align 5 - .globl EXT(hw_tst_ref) +hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return -LEXT(hw_tst_ref) - lwz r8,pepte1(r3) ; Get the saved PTE image - li r9,0x200 /* Indicate test reference bit */ - li r2,0 ; No inadvertant modifications please - rlwinm. r8,r8,24,31,31 ; Make reference bit into return code - beq+ hw_pte_comm ; Assume we do not know if it is set... - mr r3,r8 ; Set the return code - blr ; Return quickly... + li r3,0 ; Clear high order mapping address because we are 32-bit + mtlr r12 ; Restore the return + lwz r1,0(r1) ; Pop the stack + blr ; Leave... /* - * Note that the following are all in one CR for ease of use later + * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system. + * + * Upon entry, R3 contains a pointer to a pmap. Since vaddr is + * a 64-bit quantity, it is a long long so it is in R4 and R5. + * + * We return the virtual address of the removed mapping as a + * R3. + * + * Note that this is designed to be called from 32-bit mode with a stack. + * + * We disable translation and all interruptions here. This keeps is + * from having to worry about a deadlock due to having anything locked + * and needing it to process a fault. + * + * Note that this must be done with both interruptions off and VM off + * + * Remove a mapping which can be reestablished by VM + * */ - .align 4 - .globl EXT(hw_set_mod) -LEXT(hw_set_mod) - - li r9,0x008 /* Indicate set modify bit */ - li r2,0x4 ; Set set C, clear none - b hw_pte_comm /* Join in the fun... */ + .align 5 + .globl EXT(hw_purge_map) + +LEXT(hw_purge_map) + stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r15,FM_ARG0+0x00(r1) ; Save a register + stw r16,FM_ARG0+0x04(r1) ; Save a register + stw r17,FM_ARG0+0x08(r1) ; Save a register + stw r18,FM_ARG0+0x0C(r1) ; Save a register + stw r19,FM_ARG0+0x10(r1) ; Save a register + mfsprg r19,2 ; Get feature flags + stw r20,FM_ARG0+0x14(r1) ; Save a register + stw r21,FM_ARG0+0x18(r1) ; Save a register + mtcrf 0x02,r19 ; move pf64Bit cr6 + stw r22,FM_ARG0+0x1C(r1) ; Save a register + stw r23,FM_ARG0+0x20(r1) ; Save a register + stw r24,FM_ARG0+0x24(r1) ; Save a register + stw r25,FM_ARG0+0x28(r1) ; Save a register + stw r26,FM_ARG0+0x2C(r1) ; Save a register + stw r27,FM_ARG0+0x30(r1) ; Save a register + stw r28,FM_ARG0+0x34(r1) ; Save a register + stw r29,FM_ARG0+0x38(r1) ; Save a register + stw r30,FM_ARG0+0x3C(r1) ; Save a register + stw r31,FM_ARG0+0x40(r1) ; Save a register + stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr + stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint) + lwz r9,pmapvr+4(r3) ; Get conversion mask + b hpmSF1x ; Done... + +hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask + +hpmSF1x: + bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit + + xor r28,r3,r9 ; Convert the pmap to physical addressing + + mr r17,r11 ; Save the MSR + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkExclusive ; Go get an exclusive lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + bne-- hrmBadLock ; Nope... +; +; Note that we do a full search (i.e., no shortcut level skips, etc.) +; here so that we will know the previous elements so we can dequeue them +; later. +; +hpmSearch: + mr r3,r28 ; Pass in pmap to search + mr r29,r4 ; Top half of vaddr + mr r30,r5 ; Bottom half of vaddr + bl EXT(mapSearchFull) ; Rescan the list + mr. r31,r3 ; Did we? (And remember mapping address for later) + or r0,r4,r5 ; Are we beyond the end? + mr r15,r4 ; Save top of next vaddr + cmplwi cr1,r0,0 ; See if there is another + mr r16,r5 ; Save bottom of next vaddr + bne-- hpmGotOne ; We found one, go check it out... + +hpmCNext: bne++ cr1,hpmSearch ; There is another to check... + b hrmNotFound ; No more in pmap to check... + +hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags + andi. r9,r20,lo16(mpSpecial|mpNest|mpPerm|mpBlock) ; Are we allowed to remove it? + beq++ hrmGotX ; Found, branch to remove the mapping... + b hpmCNext ; Nope... +/* + * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space + * + * Upon entry, R3 contains a pointer to a pmap. + * pa is a pointer to the physent + * + * This function removes the first mapping for a specific pmap from a physical entry + * alias list. It locks the list, extracts the vaddr and pmap from + * the first apporpriate entry. It then jumps into the hw_rem_map function. + * NOTE: since we jump into rem_map, we need to set up the stack + * identically. Also, we set the next parm to 0 so we do not + * try to save a next vaddr. + * + * We return the virtual address of the removed mapping as a + * R3. + * + * Note that this is designed to be called from 32-bit mode with a stack. + * + * We disable translation and all interruptions here. This keeps is + * from having to worry about a deadlock due to having anything locked + * and needing it to process a fault. + * + * Note that this must be done with both interruptions off and VM off + * + * + * Remove mapping via physical page (mapping_purge) + * + * 1) lock physent + * 2) extract vaddr and pmap + * 3) unlock physent + * 4) do "remove mapping via pmap" + * + * + */ - .align 4 - .globl EXT(hw_clr_mod) + .align 5 + .globl EXT(hw_purge_space) + +LEXT(hw_purge_space) + stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r15,FM_ARG0+0x00(r1) ; Save a register + stw r16,FM_ARG0+0x04(r1) ; Save a register + stw r17,FM_ARG0+0x08(r1) ; Save a register + mfsprg r2,2 ; Get feature flags + stw r18,FM_ARG0+0x0C(r1) ; Save a register + stw r19,FM_ARG0+0x10(r1) ; Save a register + stw r20,FM_ARG0+0x14(r1) ; Save a register + stw r21,FM_ARG0+0x18(r1) ; Save a register + stw r22,FM_ARG0+0x1C(r1) ; Save a register + mtcrf 0x02,r2 ; move pf64Bit cr6 + stw r23,FM_ARG0+0x20(r1) ; Save a register + stw r24,FM_ARG0+0x24(r1) ; Save a register + stw r25,FM_ARG0+0x28(r1) ; Save a register + stw r26,FM_ARG0+0x2C(r1) ; Save a register + stw r27,FM_ARG0+0x30(r1) ; Save a register + li r6,0 ; Set no next address return + stw r28,FM_ARG0+0x34(r1) ; Save a register + stw r29,FM_ARG0+0x38(r1) ; Save a register + stw r30,FM_ARG0+0x3C(r1) ; Save a register + stw r31,FM_ARG0+0x40(r1) ; Save a register + stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr + stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint) + + lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap + + b hpsSF1x ; Done... + +hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap + +hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit + + xor r4,r4,r9 ; Convert the pmap to physical addressing + + bl mapPhysLock ; Lock the physent + + lwz r8,pmapSpace(r4) ; Get the space hash + + bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint) + + lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping + +hpsSrc32: rlwinm. r12,r12,0,0,25 ; Clean and test mapping address + beq hpsNone ; Did not find one... + + lhz r10,mpSpace(r12) ; Get the space + + cmplw r10,r8 ; Is this one of ours? + beq hpsFnd ; Yes... + + lwz r12,mpAlias+4(r12) ; Chain on to the next + b hpsSrc32 ; Check it out... -LEXT(hw_clr_mod) + .align 5 + +hpsSF: li r0,0xFF + ld r12,ppLink(r3) ; Get the pointer to the first mapping + rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + +hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address + beq hpsNone ; Did not find one... + + lhz r10,mpSpace(r12) ; Get the space + + cmplw r10,r8 ; Is this one of ours? + beq hpsFnd ; Yes... + + ld r12,mpAlias(r12) ; Chain on to the next + b hpsSrc64 ; Check it out... + + .align 5 - li r9,0x004 /* Indicate clear modify bit */ - li r2,0x1 ; Set set none, clear C - b hw_pte_comm /* Join in the fun... */ +hpsFnd: mr r28,r4 ; Set the pmap physical address + lwz r4,mpVAddr(r12) ; Get the top of the vaddr + lwz r5,mpVAddr+4(r12) ; and the bottom + + bl mapPhysUnlock ; Time to unlock the physical entry + b hrmJoin ; Go remove the mapping... + + .align 5 + +hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry + bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)... - .align 4 - .globl EXT(hw_set_ref) + mtmsr r11 ; Restore enables/translation/etc. + isync + b hpsRetnCmn ; Join the common return code... -LEXT(hw_set_ref) - - li r9,0x002 /* Indicate set reference */ - li r2,0x8 ; Set set R, clear none - b hw_pte_comm /* Join in the fun... */ +hpsSF3: mtmsrd r11 ; Restore enables/translation/etc. + isync - .align 5 - .globl EXT(hw_clr_ref) +; +; NOTE: we have not used any registers other than the volatiles to this point +; -LEXT(hw_clr_ref) - - li r9,0x001 /* Indicate clear reference bit */ - li r2,0x2 ; Set set none, clear R - b hw_pte_comm /* Join in the fun... */ +hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return + + li r3,0 ; Set return code + mtlr r12 ; Restore the return + lwz r1,0(r1) ; Pop the stack + blr ; Leave... /* - * This is the common stuff. + * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space + * + * Upon entry, R3 contains a pointer to a physent. + * space is the space ID from the pmap in question + * + * We return the virtual address of the found mapping in + * R3. Note that the mapping busy is bumped. + * + * Note that this is designed to be called from 32-bit mode with a stack. + * + * We disable translation and all interruptions here. This keeps is + * from having to worry about a deadlock due to having anything locked + * and needing it to process a fault. + * */ .align 5 + .globl EXT(hw_find_space) + +LEXT(hw_find_space) + stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + mr r8,r4 ; Remember the space + stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit -hw_pte_comm: /* Common routine for pte tests and manips */ + bl mapPhysLock ; Lock the physent -#if PERFTIMES && DEBUG - mflr r11 - mr r7,r3 - lwz r4,4(r3) - mr r5,r9 - li r3,28 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r7 - mtlr r11 -#endif - mfsprg r8,2 ; Get feature flags - lwz r10,pephyslink(r3) /* Get the first mapping block */ - mfmsr r0 /* Save the MSR */ - rlwinm. r10,r10,0,0,26 ; Clear out the flags from first link and see if we are mapped - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtcrf 0x04,r8 ; Set the features - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ - beq- comnmap ; No mapping - dcbt br0,r10 ; Touch the first mapping in before the isync - -comnmap: - - bt pfNoMSRirb,hpcNoMSR ; No MSR... - - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b hpcNoMSRx + bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint) + + lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping -hpcNoMSR: - mr r5,r0 - mr r7,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r7 - mr r0,r5 -hpcNoMSRx: +hfsSrc32: rlwinm. r12,r12,0,0,25 ; Clean and test mapping address + beq hfsNone ; Did not find one... + + lhz r10,mpSpace(r12) ; Get the space + + cmplw r10,r8 ; Is this one of ours? + beq hfsFnd ; Yes... + + lwz r12,mpAlias+4(r12) ; Chain on to the next + b hfsSrc32 ; Check it out... - mtcrf 0x05,r9 /* Set the call type flags into cr5 and 7 */ + .align 5 + +hfsSF: li r0,0xFF + ld r12,ppLink(r3) ; Get the pointer to the first mapping + rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + +hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address + beq hfsNone ; Did not find one... + + lhz r10,mpSpace(r12) ; Get the space + + cmplw r10,r8 ; Is this one of ours? + beq hfsFnd ; Yes... + + ld r12,mpAlias(r12) ; Chain on to the next + b hfsSrc64 ; Check it out... + + .align 5 + +hfsFnd: mr r8,r3 ; Save the physent + mr r3,r12 ; Point to the mapping + bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear - beq- commdone ; Nothing us mapped to this page... - b commnext ; Jump to first pass (jump here so we can align loop) + mr r3,r8 ; Get back the physical entry + li r7,0xFFF ; Get a page size mask + bl mapPhysUnlock ; Time to unlock the physical entry - .align 5 + andc r3,r12,r7 ; Move the mapping back down to a page + lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap + xor r12,r3,r12 ; Convert to virtual + b hfsRet ; Time to return + + .align 5 + +hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry + +hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)... -commnext: lwz r11,mmnext(r10) ; Get the pointer to the next mapping (if any) - lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */ - lwz r5,mmPTEv(r10) /* Get the virtual address */ - mr. r11,r11 ; More mappings to go? - rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */ - beq- commnxtch ; No more mappings... - dcbt br0,r11 ; Touch the next mapping + mtmsr r11 ; Restore enables/translation/etc. + isync + b hfsRetnCmn ; Join the common return code... -commnxtch: li r12,1 /* Get the locked value */ +hfsSF3: mtmsrd r11 ; Restore enables/translation/etc. + isync -commLck1: lwarx r11,0,r7 /* Get the PTEG lock */ - mr. r11,r11 /* Is it locked? */ - bne- commLckw1 /* Yeah... */ - stwcx. r12,0,r7 /* Try to take it */ - bne- commLck1 /* Someone else was trying, try again... */ - b commSXg1 /* All done... */ - - .align 4 +; +; NOTE: we have not used any registers other than the volatiles to this point +; -commLckw1: mr. r11,r11 /* Check if it's already held */ - beq+ commLck1 /* It's clear... */ - lwz r11,0(r7) /* Get lock word again... */ - b commLckw1 /* Wait... */ - - .align 4 +hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed + lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return + + mtlr r12 ; Restore the return + lwz r1,0(r1) ; Pop the stack + blr ; Leave... -commSXg1: isync /* Make sure we haven't used anything yet */ - lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */ +; +; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap +; Returns 0 if not found or the virtual address of the mapping if +; if is. Also, the mapping has the busy count bumped. +; + .align 5 + .globl EXT(hw_find_map) - rlwinm r9,r5,1,0,3 /* Move in the segment */ - mr. r6,r6 /* See if there is a PTE entry here */ - rlwinm r8,r5,31,2,25 /* Line it up and check if empty */ - - beq+ commul /* There's no PTE to invalidate... */ - - xor r8,r8,r6 /* Back hash to virt index */ - rlwimi r9,r5,22,4,9 /* Move in the API */ - lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ - rlwinm r5,r5,0,1,31 /* Clear the valid bit */ - ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ - rlwimi r9,r8,6,10,19 /* Create the virtual address */ - - stw r5,0(r6) /* Make the PTE invalid */ - mfspr r4,pvr /* Find out what kind of machine we are */ - sync /* Make sure the invalid is stored */ - -tlbhangco: lwarx r11,0,r12 /* Get the TLBIE lock */ - rlwinm r8,r6,29,29,31 /* Get the bit position of entry */ - mr. r11,r11 /* Is it locked? */ - lis r5,0x8000 /* Start up a bit mask */ - li r11,1 /* Get our lock word */ - bne- tlbhangco /* It's locked, go wait... */ - stwcx. r11,0,r12 /* Try to get it */ - bne- tlbhangco /* We was beat... */ - - rlwinm r4,r4,16,16,31 /* Isolate CPU type */ - li r11,0 /* Lock clear value */ - cmplwi r4,3 /* Is this a 603? */ +LEXT(hw_find_map) + stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r25,FM_ARG0+0x00(r1) ; Save a register + stw r26,FM_ARG0+0x04(r1) ; Save a register + mr r25,r6 ; Remember address of next va + stw r27,FM_ARG0+0x08(r1) ; Save a register + stw r28,FM_ARG0+0x0C(r1) ; Save a register + stw r29,FM_ARG0+0x10(r1) ; Save a register + stw r30,FM_ARG0+0x14(r1) ; Save a register + stw r31,FM_ARG0+0x18(r1) ; Save a register + stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return - tlbie r9 /* Invalidate it everywhere */ + lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap + lwz r7,pmapvr+4(r3) ; Get the second part - beq- its603co /* It's a 603, skip the tlbsync... */ - - eieio /* Make sure that the tlbie happens first */ - tlbsync /* wait for everyone to catch up */ - isync - -its603co: stw r11,0(r12) /* Clear the lock */ - srw r5,r5,r8 /* Make a "free slot" mask */ - sync /* Make sure of it all */ - lwz r6,4(r6) /* Get the latest reference and change bits */ - lwz r9,PCAallo(r7) /* Get the allocation control bits */ - stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */ - rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */ - or r9,r9,r5 /* Set the slot free */ - rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */ - rlwinm r4,r6,0,23,24 /* Extract the RC bits */ - andc r9,r9,r8 /* Clear the auto and lock bits */ - li r5,pepte1 /* Get displacement to the second word of master pte */ - stw r9,PCAallo(r7) /* Store the allocation controls */ - -commmod: lwarx r11,r5,r3 /* Get the master copy */ - or r11,r11,r4 /* Merge in latest RC */ - stwcx. r11,r5,r3 /* Save it back */ - bne- commmod /* If it changed, try again... */ - b commulnl ; Skip loading the old real part... + bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit + + mr r27,r11 ; Remember the old MSR + mr r26,r12 ; Remember the feature bits -commul: lwz r6,mmPTEr(r10) ; Get the real part + xor r28,r3,r7 ; Change the common 32- and 64-bit half -commulnl: rlwinm r12,r2,5,23,24 ; Get the "set" bits - rlwinm r11,r2,7,23,24 ; Get the "clear" bits + bf-- pf64Bitb,hfmSF1 ; skip if 32-bit... - or r6,r6,r12 ; Set the bits to come on - andc r6,r6,r11 ; Clear those to come off + rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top - stw r6,mmPTEr(r10) ; Set the new RC +hfmSF1: mr r29,r4 ; Save top half of vaddr + mr r30,r5 ; Save the bottom half + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkShared ; Go get a shared lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + bne-- hfmBadLock ; Nope... - lwz r10,mmnext(r10) /* Get the next */ - li r4,0 /* Make sure this is 0 */ - mr. r10,r10 ; Is there another mapping? + mr r3,r28 ; get the pmap address + mr r4,r29 ; Get bits 0:31 to look for + mr r5,r30 ; Get bits 32:64 + + bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags) - sync ; Make sure that all is saved + rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit + mr. r31,r3 ; Save the mapping if we found it + cmplwi cr1,r0,0 ; Are we removing? + mr r29,r4 ; Save next va high half + crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing + mr r30,r5 ; Save next va low half + li r6,0 ; Assume we did not find it + li r26,0xFFF ; Get a mask to relocate to start of mapping page - stw r4,0(r7) /* Unlock the hash chain */ - bne+ commnext ; Go get the next if there is one... - -/* - * Now that all PTEs have been invalidated and the master RC bits are updated, - * we go ahead and figure out what the original call was and do that. Note that - * another processor could be messing around and may have entered one of the - * PTEs we just removed into the hash table. Too bad... You takes yer chances. - * If there's a problem with that, it's because some higher level was trying to - * do something with a mapping that it shouldn't. So, the problem's really - * there, nyaaa, nyaaa, nyaaa... nyaaa, nyaaa... nyaaa! So there! - */ + bt-- cr0_eq,hfmNotFnd ; We did not find it... -commdone: li r5,pepte1 /* Get displacement to the second word of master pte */ - blt cr5,commfini /* We're finished, it was invalidate all... */ - bgt cr5,commtst /* It was a test modified... */ - beq cr5,commtst /* It was a test reference... */ + bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear -/* - * Note that we need to to do the interlocked update here because another processor - * can be updating the reference and change bits even though the physical entry - * is locked. All modifications to the PTE portion of the physical entry must be - * done via interlocked update. - */ + andc r4,r31,r26 ; Get back to the mapping page start - rlwinm r12,r2,5,23,24 ; Get the "set" bits - rlwinm r11,r2,7,23,24 ; Get the "clear" bits +; Note: we can treat 32- and 64-bit the same here. Because we are going from +; physical to virtual and we only do 32-bit virtual, we only need the low order +; word of the xor. -commcng: lwarx r8,r5,r3 /* Get the master copy */ - or r8,r8,r12 ; Set the bits to come on - andc r8,r8,r11 ; Clear those to come off - stwcx. r8,r5,r3 /* Save it back */ - bne- commcng /* If it changed, try again... */ + lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap + li r6,-1 ; Indicate we found it and it is not being removed + xor r31,r31,r4 ; Flip to virtual - mtmsr r0 /* Interrupts and translation back on */ - isync -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,29 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 -#endif - blr /* Return... */ - - .align 4 - -commtst: lwz r8,pepte1(r3) /* Get the PTE */ - bne- cr5,commtcb ; This is for the change bit... - mtmsr r0 ; Interrupts and translation back on - rlwinm r3,r8,24,31,31 ; Copy reference bit to bit 31 - isync ; Toss prefetching -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,29 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 -#endif - blr ; Return... +hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list - .align 4 + rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit + and r3,r3,r6 ; Clear if not found or removing -commtcb: rlwinm r3,r8,25,31,31 ; Copy change bit to bit 31 +hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes... -commfini: mtmsr r0 ; Interrupts and translation back on - isync ; Toss prefetching + mtmsr r27 ; Restore enables/translation/etc. + isync + b hfmReturnC ; Join common... + +hfmR64: mtmsrd r27 ; Restore enables/translation/etc. + isync + +hfmReturnC: stw r29,0(r25) ; Save the top of the next va + stw r30,4(r25) ; Save the bottom of the next va + lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + lwz r25,FM_ARG0+0x00(r1) ; Restore a register + lwz r26,FM_ARG0+0x04(r1) ; Restore a register + and r3,r3,r6 ; Clear return if the mapping is being removed + lwz r27,FM_ARG0+0x08(r1) ; Restore a register + mtlr r0 ; Restore the return + lwz r28,FM_ARG0+0x0C(r1) ; Restore a register + lwz r29,FM_ARG0+0x10(r1) ; Restore a register + lwz r30,FM_ARG0+0x14(r1) ; Restore a register + lwz r31,FM_ARG0+0x18(r1) ; Restore a register + lwz r1,0(r1) ; Pop the stack + blr ; Leave... + + .align 5 + +hfmBadLock: li r3,1 ; Set lock time out error code + b hfmReturn ; Leave.... -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,29 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 -#endif - blr ; Return... /* - * unsigned int hw_test_rc(mapping *mp, boolean_t reset); + * unsigned int hw_walk_phys(pp, preop, op, postop, parm) + * walks all mapping for a physical page and performs + * specified operations on each. * - * Test the RC bits for a specific mapping. If reset is non-zero, clear them. - * We return the RC value in the mapping if there is no PTE or if C is set. - * (Note: R is always set with C.) Otherwise we invalidate the PTE and - * collect the RC bits from there, also merging them into the global copy. - * - * For now, we release the PTE slot and leave it invalid. In the future, we - * may consider re-validating and not releasing the slot. It would be faster, - * but our current implementation says that we will have not PTEs valid - * without the reference bit set. + * pp is unlocked physent + * preop is operation to perform on physent before walk. This would be + * used to set cache attribute or protection + * op is the operation to perform on each mapping during walk + * postop is operation to perform in the phsyent after walk. this would be + * used to set or reset the RC bits. + * + * We return the RC bits from before postop is run. + * + * Note that this is designed to be called from 32-bit mode with a stack. * - * We will special case C==1 && not reset to just return the RC. + * We disable translation and all interruptions here. This keeps is + * from having to worry about a deadlock due to having anything locked + * and needing it to process a fault. * - * Probable state is worst performance state: C bit is off and there is a PTE. + * We lock the physent, execute preop, and then walk each mapping in turn. + * If there is a PTE, it is invalidated and the RC merged into the physent. + * Then we call the op function. + * Then we revalidate the PTE. + * Once all all mappings are finished, we save the physent RC and call the + * postop routine. Then we unlock the physent and return the RC. + * + * */ -#define htrReset 31 - .align 5 - .globl EXT(hw_test_rc) + .globl EXT(hw_walk_phys) + +LEXT(hw_walk_phys) + stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r25,FM_ARG0+0x00(r1) ; Save a register + stw r26,FM_ARG0+0x04(r1) ; Save a register + stw r27,FM_ARG0+0x08(r1) ; Save a register + stw r28,FM_ARG0+0x0C(r1) ; Save a register + mr r25,r7 ; Save the parm + stw r29,FM_ARG0+0x10(r1) ; Save a register + stw r30,FM_ARG0+0x14(r1) ; Save a register + stw r31,FM_ARG0+0x18(r1) ; Save a register + stw r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit + + mr r26,r11 ; Save the old MSR + lis r27,hi16(hwpOpBase) ; Get high order of op base + slwi r4,r4,7 ; Convert preop to displacement + ori r27,r27,lo16(hwpOpBase) ; Get low order of op base + slwi r5,r5,7 ; Convert op to displacement + add r12,r4,r27 ; Point to the preop routine + slwi r28,r6,7 ; Convert postop to displacement + mtctr r12 ; Set preop routine + add r28,r28,r27 ; Get the address of the postop routine + add r27,r5,r27 ; Get the address of the op routine -LEXT(hw_test_rc) + bl mapPhysLock ; Lock the physent - mfsprg r9,2 ; Get feature flags - mfmsr r0 ; Save the MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - mr. r4,r4 ; See if we have a reset to do later - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruption mask - crnot htrReset,cr0_eq ; Remember reset - mtcrf 0x04,r9 ; Set the features - rlwinm r12,r12,0,28,25 ; Clear IR and DR + mr r29,r3 ; Save the physent address + + bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint) + + bctrl ; Call preop routine + bne- hwpEarly32 ; preop says to bail now... - bt pfNoMSRirb,htrNoMSR ; No MSR... + mtctr r27 ; Set up the op function address + lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping + +hwpSrc32: rlwinm. r31,r31,0,0,25 ; Clean and test mapping address + beq hwpNone32 ; Did not find one... - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b htrNoMSRx +; +; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4, +; PTE low in R5. The PCA address is in R7. The PTEG come back locked. +; If there is no PTE, PTE low is obtained from mapping +; + bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent + + bctrl ; Call the op function + + crmove cr1_eq,cr0_eq ; Save the return code + + mr. r3,r3 ; Was there a previously valid PTE? + beq- hwpNxt32 ; Nope... -htrNoMSR: - mr r2,r0 - mr r7,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r7 - mr r0,r2 -htrNoMSRx: + stw r5,4(r3) ; Store second half of PTE + eieio ; Make sure we do not reorder + stw r4,0(r3) ; Revalidate the PTE + + eieio ; Make sure all updates come first + stw r6,0(r7) ; Unlock the PCA - lwz r2,mmPTEr(r3) ; Get the real part - lwz r7,mmPTEhash(r3) ; Get pointer to hash list anchor - rlwinm. r12,r2,0,24,24 ; Is the change bit on? - lwz r5,mmPTEv(r3) ; Get the virtual address - crnor cr0_eq,cr0_eq,htrReset ; Set if C=1 && not reset - rlwinm r7,r7,0,0,25 ; Round hash list down to PCA boundary - bt cr0_eq,htrcset ; Special case changed but no reset case... +hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now... + lwz r31,mpAlias+4(r31) ; Chain on to the next + b hwpSrc32 ; Check it out... - li r12,1 ; Get the locked value + .align 5 -htrLck1: lwarx r11,0,r7 ; Get the PTEG lock - mr. r11,r11 ; Is it locked? - bne- htrLckw1 ; Yeah... - stwcx. r12,0,r7 ; Try to take it - bne- htrLck1 ; Someone else was trying, try again... - b htrSXg1 ; All done... +hwpNone32: mtctr r28 ; Get the post routine address - .align 4 + lwz r30,ppLink+4(r29) ; Save the old RC + mr r3,r29 ; Get the physent address + bctrl ; Call post routine -htrLckw1: mr. r11,r11 ; Check if it is already held - beq+ htrLck1 ; It is clear... - lwz r11,0(r7) ; Get lock word again... - b htrLckw1 ; Wait... + bl mapPhysUnlock ; Unlock the physent + + mtmsr r26 ; Restore translation/mode/etc. + isync - .align 4 + b hwpReturn ; Go restore registers and return... -htrSXg1: isync ; Make sure we have not used anything yet + .align 5 - lwz r6,mmPTEent(r3) ; Get the pointer to the PTE now that the lock is set - lwz r2,mmPTEr(r3) ; Get the mapping copy of the real part +hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC + mr r3,r29 ; Get the physent address + bl mapPhysUnlock ; Unlock the physent + + mtmsr r26 ; Restore translation/mode/etc. + isync + + b hwpReturn ; Go restore registers and return... - rlwinm r9,r5,1,0,3 ; Move in the segment - mr. r6,r6 ; Any PTE to invalidate? - rlwinm r8,r5,31,2,25 ; Line it up + .align 5 - beq+ htrnopte ; There is no PTE to invalidate... - - xor r8,r8,r6 ; Back hash to virt index - rlwimi r9,r5,22,4,9 ; Move in the API - lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock - rlwinm r5,r5,0,1,31 ; Clear the valid bit - ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part - mfspr r11,pvr ; Find out what kind of machine we are - rlwimi r9,r8,6,10,19 ; Create the virtual address - rlwinm r11,r11,16,16,31 ; Isolate CPU type - - stw r5,0(r6) ; Make the PTE invalid - cmplwi cr1,r11,3 ; Is this a 603? - sync ; Make sure the invalid is stored - -htrtlbhang: lwarx r11,0,r12 ; Get the TLBIE lock - rlwinm r8,r6,29,29,31 ; Get the bit position of entry - mr. r11,r11 ; Is it locked? - lis r5,0x8000 ; Start up a bit mask - li r11,1 ; Get our lock word - bne- htrtlbhang ; It is locked, go wait... - stwcx. r11,0,r12 ; Try to get it - bne- htrtlbhang ; We was beat... +hwp64: bctrl ; Call preop routine + bne-- hwpEarly64 ; preop says to bail now... - li r11,0 ; Lock clear value + mtctr r27 ; Set up the op function address + + li r0,0xFF + ld r31,ppLink(r3) ; Get the pointer to the first mapping + rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + +hwpSrc64: andc. r31,r31,r0 ; Clean and test mapping address + beq hwpNone64 ; Did not find one... +; +; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4, +; PTE low in R5. PTEG comes back locked if there is one +; + bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent - tlbie r9 ;Invalidate it everywhere + bctrl ; Call the op function - beq- cr1,htr603 ; It is a 603, skip the tlbsync... + crmove cr1_eq,cr0_eq ; Save the return code - eieio ; Make sure that the tlbie happens first - tlbsync ; wait for everyone to catch up - isync + mr. r3,r3 ; Was there a previously valid PTE? + beq-- hwpNxt64 ; Nope... + + std r5,8(r3) ; Save bottom of PTE + eieio ; Make sure we do not reorder + std r4,0(r3) ; Revalidate the PTE -htr603: stw r11,0(r12) ; Clear the lock - srw r5,r5,r8 ; Make a "free slot" mask - sync ; Make sure of it all - - lwz r6,4(r6) ; Get the latest reference and change bits - stw r11,mmPTEent(r3) ; Clear the pointer to the PTE - rlwinm r6,r6,0,23,24 ; Extract the RC bits - lwz r9,PCAallo(r7) ; Get the allocation control bits - rlwinm r8,r5,24,8,15 ; Make the autogen bit to turn off - lwz r10,mmphysent(r3) ; Get any physical entry - or r9,r9,r5 ; Set the slot free - rlwimi r8,r8,24,16,23 ; Get lock bit mask to turn it off - andc r9,r9,r8 ; Clear the auto and lock bits - mr. r10,r10 ; Is there a physical entry? - li r5,pepte1 ; Get displacement to the second word of master pte - stw r9,PCAallo(r7) ; Store the allocation controls - rlwimi r2,r6,0,23,24 ; Stick in RC bits - beq- htrnopte ; No physical entry... - -htrmrc: lwarx r11,r5,r10 ; Get the master copy - or r11,r11,r6 ; Merge in latest RC - stwcx. r11,r5,r10 ; Save it back - bne- htrmrc ; If it changed, try again... - -htrnopte: rlwinm r5,r2,25,30,31 ; Position RC and mask off - bf htrReset,htrnorst ; No reset to do... - rlwinm r2,r2,0,25,22 ; Clear the RC if requested - -htrnorst: li r4,0 ; Get a 0 - stw r2,mmPTEr(r3) ; Set the real part of the PTE - - sync ; Make sure that stuff is all stored - - stw r4,0(r7) ; Unlock the hash chain + eieio ; Make sure all updates come first + stw r6,0(r7) ; Unlock the PCA + +hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now... + ld r31,mpAlias(r31) ; Chain on to the next + li r0,0xFF + rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + b hwpSrc64 ; Check it out... - mr r3,r5 ; Get the old RC to pass back - mtmsr r0 ; Restore interrupts and translation + .align 5 + +hwpNone64: mtctr r28 ; Get the post routine address + + lwz r30,ppLink+4(r29) ; Save the old RC + mr r3,r29 ; Get the physent address + bctrl ; Call post routine + + bl mapPhysUnlock ; Unlock the physent + + mtmsrd r26 ; Restore translation/mode/etc. isync - blr ; Return... + b hwpReturn ; Go restore registers and return... + + .align 5 - .align 4 +hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC + mr r3,r29 ; Get the physent address + bl mapPhysUnlock ; Unlock the physent + + mtmsrd r26 ; Restore translation/mode/etc. + isync + +hwpReturn: lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return + lwz r25,FM_ARG0+0x00(r1) ; Restore a register + lwz r26,FM_ARG0+0x04(r1) ; Restore a register + mr r3,r30 ; Pass back the RC + lwz r27,FM_ARG0+0x08(r1) ; Restore a register + lwz r28,FM_ARG0+0x0C(r1) ; Restore a register + mtlr r0 ; Restore the return + lwz r29,FM_ARG0+0x10(r1) ; Restore a register + lwz r30,FM_ARG0+0x14(r1) ; Restore a register + lwz r31,FM_ARG0+0x18(r1) ; Restore a register + lwz r1,0(r1) ; Pop the stack + blr ; Leave... -htrcset: rlwinm r3,r2,25,30,31 ; Position RC and mask off - mtmsr r0 ; Restore interrupts and translation - isync - blr ; Return... +; +; The preop/op/postop function table. +; Each function must be 64-byte aligned and be no more than +; 16 instructions. If more than 16, we must fix address calculations +; at the start of hwpOpBase +; +; The routine must set CR0_EQ in order to continue scan. +; If CR0_EQ is not set, an early return from the function is made. +; -/* - * hw_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) - Sets the default physical page attributes - * - * Note that this must be done with both interruptions off and VM off - * Move the passed in attributes into the pte image in the phys entry - * - * - */ + .align 7 + +hwpOpBase: + +; Function 0 - No operation + +hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set + blr ; Just return... .align 5 - .globl EXT(hw_phys_attr) -LEXT(hw_phys_attr) +; This is the continuation of function 4 - Set attributes in mapping -#if PERFTIMES && DEBUG - mflr r11 - mr r8,r3 - mr r7,r5 - mr r5,r4 -// lwz r4,4(r3) - li r4,0x1111 - li r3,30 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r8 - mr r4,r5 - mr r5,r7 - mtlr r11 -#endif - mfsprg r9,2 ; Get feature flags - mfmsr r0 /* Save the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - andi. r5,r5,0x0078 /* Clean up the WIMG */ - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtcrf 0x04,r9 ; Set the features - rlwimi r5,r4,0,30,31 /* Move the protection into the wimg register */ - la r6,pepte1(r3) /* Point to the default pte */ - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ - - bt pfNoMSRirb,hpaNoMSR ; No MSR... - - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b hpaNoMSRx +; We changed the attributes of a mapped page. Make sure there are no cache paradoxes. +; NOTE: Do we have to deal with i-cache here? + +hwpSAM: li r11,4096 ; Get page size -hpaNoMSR: - mr r10,r0 - mr r4,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r4 - mr r0,r10 -hpaNoMSRx: - -atmattr: lwarx r10,0,r6 /* Get the pte */ - rlwimi r10,r5,0,25,31 /* Move in the new attributes */ - stwcx. r10,0,r6 /* Try it on for size */ - bne- atmattr /* Someone else was trying, try again... */ - - mtmsr r0 /* Interrupts and translation back on */ - isync -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r10 - li r3,31 - bl EXT(dbgLog2) ; Start of hw_add_map - mtlr r11 -#endif - blr /* All done... */ +hwpSAMinvd: sub. r11,r11,r9 ; Back off a line + dcbf r11,r5 ; Flush the line in the data cache + bgt++ hwpSAMinvd ; Go do the rest of it... + + sync ; Make sure it is done + li r11,4096 ; Get page size + +hwpSAMinvi: sub. r11,r11,r9 ; Back off a line + icbi r11,r5 ; Flush the line in the icache + bgt++ hwpSAMinvi ; Go do the rest of it... + + sync ; Make sure it is done + cmpw r0,r0 ; Make sure we return CR0_EQ + blr ; Return... -/* - * handlePF - handle a page fault interruption - * - * If the fault can be handled, this routine will RFI directly, - * otherwise it will return with all registers as in entry. - * - * Upon entry, state and all registers have been saved in savearea. - * This is pointed to by R13. - * IR and DR are off, interrupts are masked, - * Floating point be disabled. - * R3 is the interrupt code. - * - * If we bail, we must restore cr5, and all registers except 6 and - * 3. - * - */ - - .align 5 - .globl EXT(handlePF) -LEXT(handlePF) +; Function 1 - Set protection in physent -/* - * This first part does a quick check to see if we can handle the fault. - * We can't handle any kind of protection exceptions here, so we pass - * them up to the next level. - * - * The mapping lists are kept in MRS (most recently stolen) - * order on queues anchored within from the - * PTEG to which the virtual address hashes. This is further segregated by - * the low-order 3 bits of the VSID XORed with the segment number and XORed - * with bits 4-7 of the vaddr in an attempt to keep the searches - * short. - * - * MRS is handled by moving the entry to the head of its list when stolen in the - * assumption that it will be revalidated soon. Entries are created on the head - * of the list because they will be used again almost immediately. - * - * We need R13 set to the savearea, R3 set to the interrupt code, and R2 - * set to the per_proc. - * - * NOTE: In order for a page-fault redrive to work, the translation miss - * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur - * before we come here. - */ + .set .,hwpOpBase+(1*128) ; Generate error if previous function too long + +hwpSPrtPhy: li r5,ppLink+4 ; Get offset for flag part of physent - cmplwi r3,T_INSTRUCTION_ACCESS /* See if this is for the instruction */ - lwz r8,savesrr1(r13) ; Get the MSR to determine mode - beq- gotIfetch ; We have an IFETCH here... +hwpSPrtPhX: lwarx r4,r5,r29 ; Get the old flags + rlwimi r4,r25,0,ppPPb-32,ppPPe-32 ; Stick in the new protection + stwcx. r4,r5,r29 ; Try to stuff it + bne-- hwpSPrtPhX ; Try again... +; Note: CR0_EQ is set because of stwcx. + blr ; Return... - lwz r7,savedsisr(r13) /* Get the DSISR */ - lwz r6,savedar(r13) /* Get the fault address */ - b ckIfProt ; Go check if this is a protection fault... -gotIfetch: mr r7,r8 ; IFETCH info is in SRR1 - lwz r6,savesrr0(r13) /* Get the instruction address */ +; Function 2 - Set protection in mapping -ckIfProt: rlwinm. r7,r7,0,1,1 ; Is this a protection exception? - beqlr- ; Yes... (probably not though) + .set .,hwpOpBase+(2*128) ; Generate error if previous function too long -/* - * We will need to restore registers if we bail after this point. - * Note that at this point several SRs have been changed to the kernel versions. - * Therefore, for these we must build these values. - */ +hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags + lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping + rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent? + li r0,lo16(mpPP) ; Get protection bits + crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent + rlwinm r2,r25,0,mpPPb-32,mpPPb-32+2 ; Position new protection + beqlr-- ; Leave if permanent mapping (before we trash R5)... + andc r5,r5,r0 ; Clear the old prot bits + or r5,r5,r2 ; Move in the prot bits + rlwimi r8,r5,0,20,31 ; Copy into the mapping copy + cmpw r0,r0 ; Make sure we return CR0_EQ + stw r8,mpVAddr+4(r31) ; Set the flag part of mapping + blr ; Leave... + +; Function 3 - Set attributes in physent -#if PERFTIMES && DEBUG - mflr r11 - mr r5,r6 - mr r4,r3 - li r3,32 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 - mfsprg r2,0 -#endif - lwz r3,PP_USERPMAP(r2) ; Get the user pmap (not needed if kernel access, but optimize for user??) - rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Supervisor state access? - rlwinm r5,r6,6,26,29 ; Get index to the segment slot - eqv r1,r1,r1 ; Fill the bottom with foxes - bne+ notsuper ; Go do the user mode interrupt stuff... - - cmplwi cr1,r5,SR_COPYIN_NUM*4 ; See if this is the copyin/copyout segment - rlwinm r3,r6,24,8,11 ; Make the kernel VSID - bne+ cr1,havevsid ; We are done if we do not want the copyin/out guy... - - mfsr r3,SR_COPYIN ; Get the copy vsid - b havevsid ; Join up... - - .align 5 - -notsuper: addi r5,r5,PMAP_SEGS ; Get offset to table - lwzx r3,r3,r5 ; Get the VSID - -havevsid: mfspr r5,sdr1 /* Get hash table base and size */ - cror cr1_eq,cr0_eq,cr0_eq ; Remember if kernel fault for later - rlwinm r9,r6,2,2,5 ; Move nybble 1 up to 0 (keep aligned with VSID) - rlwimi r1,r5,16,0,15 /* Make table size -1 out of mask */ - rlwinm r3,r3,6,2,25 /* Position the space for the VSID */ - rlwinm r7,r6,26,10,25 /* Isolate the page index */ - xor r9,r9,r3 ; Splooch vaddr nybble 0 (from VSID) and 1 together - or r8,r5,r1 /* Point to the last byte in table */ - xor r7,r7,r3 /* Get primary hash */ - rlwinm r3,r3,1,1,24 /* Position VSID for pte ID */ - addi r8,r8,1 /* Point to the PTEG Control Area */ - rlwinm r9,r9,8,27,29 ; Get splooched bits in place - and r7,r7,r1 /* Wrap the hash */ - rlwimi r3,r6,10,26,31 /* Move API into pte ID */ - add r8,r8,r7 /* Point to our PCA entry */ - rlwinm r12,r3,27,27,29 ; Get low 3 bits of the VSID for look-aside hash - la r11,PCAhash(r8) /* Point to the mapping hash area */ - xor r9,r9,r12 ; Finish splooching nybble 0, 1, and the low bits of the VSID + .set .,hwpOpBase+(3*128) ; Generate error if previous function too long +hwpSAtrPhy: li r5,ppLink+4 ; Get offset for flag part of physent -/* - * We have about as much as we need to start searching the autogen (aka block maps) - * and mappings. From here on, any kind of failure will bail, and - * contention will either bail or restart from here. - * - * - */ - - li r12,1 /* Get the locked value */ - dcbt 0,r11 /* We'll need the hash area in a sec, so get it */ - add r11,r11,r9 /* Point to the right mapping hash slot */ - -ptegLck: lwarx r10,0,r8 /* Get the PTEG lock */ - mr. r10,r10 /* Is it locked? */ - bne- ptegLckw /* Yeah... */ - stwcx. r12,0,r8 /* Take take it */ - bne- ptegLck /* Someone else was trying, try again... */ - b ptegSXg /* All done... */ +hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags + rlwimi r4,r25,0,ppIb-32,ppGb-32 ; Stick in the new attributes + stwcx. r4,r5,r29 ; Try to stuff it + bne-- hwpSAtrPhX ; Try again... +; Note: CR0_EQ is set because of stwcx. + blr ; Return... - .align 4 +; Function 4 - Set attributes in mapping -ptegLckw: mr. r10,r10 /* Check if it's already held */ - beq+ ptegLck /* It's clear... */ - lwz r10,0(r8) /* Get lock word again... */ - b ptegLckw /* Wait... */ + .set .,hwpOpBase+(4*128) ; Generate error if previous function too long + +hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags + lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping + li r2,0x10 ; Force on coherent + rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent? + li r0,lo16(mpWIMG) ; Get wimg mask + crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent + rlwimi r2,r2,mpIb-ppIb,mpIb-32,mpIb-32 ; Copy in the cache inhibited bit + beqlr-- ; Leave if permanent mapping (before we trash R5)... + andc r5,r5,r0 ; Clear the old wimg + rlwimi r2,r2,32-(mpGb-ppGb),mpGb-32,mpGb-32 ; Copy in the guarded bit + mfsprg r9,2 ; Feature flags + or r5,r5,r2 ; Move in the new wimg + rlwimi r8,r5,0,20,31 ; Copy into the mapping copy + lwz r2,mpPAddr(r31) ; Get the physical address + li r0,0xFFF ; Start a mask + andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size + rlwinm r5,r0,0,1,0 ; Copy to top half + stw r8,mpVAddr+4(r31) ; Set the flag part of mapping + rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left + and r5,r5,r2 ; Clean stuff in top 32 bits + andc r2,r2,r0 ; Clean bottom too + rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address + b hwpSAM ; Join common - .align 5 +; NOTE: we moved the remainder of the code out of here because it +; did not fit in the 128 bytes allotted. It got stuck into the free space +; at the end of the no-op function. + + + - nop ; Force ISYNC to last instruction in IFETCH - nop - nop +; Function 5 - Clear reference in physent -ptegSXg: isync /* Make sure we haven't used anything yet */ + .set .,hwpOpBase+(5*128) ; Generate error if previous function too long - lwz r9,0(r11) /* Pick up first mapping block */ - mr r5,r11 /* Get the address of the anchor */ - mr r7,r9 /* Save the first in line */ - b findmap ; Take space and force loop to cache line - -findmap: mr. r12,r9 /* Are there more? */ - beq- tryAuto /* Nope, nothing in mapping list for us... */ - - lwz r10,mmPTEv(r12) /* Get unique PTE identification */ - lwz r9,mmhashnext(r12) /* Get the chain, just in case */ - cmplw r10,r3 /* Did we hit our PTE? */ - lwz r0,mmPTEent(r12) /* Get the pointer to the hash table entry */ - mr r5,r12 /* Save the current as previous */ - bne- findmap ; Nothing here, try the next... - -; Cache line boundary here - - cmplwi cr1,r0,0 /* Is there actually a PTE entry in the hash? */ - lwz r2,mmphysent(r12) /* Get the physical entry */ - bne- cr1,MustBeOK /* There's an entry in the hash table, so, this must - have been taken care of already... */ - lis r4,0x8000 ; Tell PTE inserter that this was not an auto - cmplwi cr2,r2,0 /* Is there a physical entry? */ - li r0,0x0100 /* Force on the reference bit whenever we make a PTE valid */ - bne+ cr2,gotphys /* Skip down if we have a physical entry */ - li r0,0x0180 /* When there is no physical entry, force on - both R and C bits to keep hardware from - updating the PTE to set them. We don't - keep track of RC for I/O areas, so this is ok */ - -gotphys: lwz r2,mmPTEr(r12) ; Get the second part of the PTE - b insert /* Go insert into the PTEG... */ - -MustBeOK: li r10,0 /* Get lock clear value */ - li r3,T_IN_VAIN /* Say that we handled it */ - stw r10,PCAlock(r8) /* Clear the PTEG lock */ - -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,33 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 -#endif - blr /* Blow back and handle exception */ +hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent +hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags + rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R + stwcx. r4,r5,r29 ; Try to stuff it + bne-- hwpCRefPhX ; Try again... +; Note: CR0_EQ is set because of stwcx. + blr ; Return... -/* - * We couldn't find it in the mapping list. As a last try, we will - * see if we can autogen it from the block mapped list. - * - * A block mapped area is defined as a contiguous virtual area that is mapped to - * a contiguous physical area. The olde-tyme IBM VM/XA Interpretive Execution - * architecture referred to this as a V=F, or Virtual = Fixed area. - * - * We consider a V=F area to be a single entity, adjacent areas can not be merged - * or overlapped. The protection and memory attributes are the same and reference - * and change indications are not kept. The areas are not considered part of the - * physical RAM of the machine and do not have any associated physical table - * entries. Their primary use is intended for mapped I/O areas (e.g., framebuffers) - * although certain areas of RAM, such as the kernel V=R memory, can be mapped. - * - * We also have a problem in the case of copyin/out: that access is done - * within the kernel for a user address. Unfortunately, the user isn't - * necessarily the current guy. That means that we don't have access to the - * right autogen list. We can't support this kind of access. So, we need to do - * a quick check here and cause a fault if an attempt to copyin or out to - * any autogenned area. - * - * The lists must be kept short. - * - * NOTE: kernel_pmap_store must be in V=R storage!!!!!!!!!!!!!! - */ - - .align 5 +; Function 6 - Clear reference in mapping -tryAuto: rlwinm. r11,r3,0,5,24 ; Check if this is a kernel VSID - lis r10,HIGH_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the top part of kernel block map anchor - crandc cr0_eq,cr1_eq,cr0_eq ; Set if kernel access and non-zero VSID (copyin or copyout) - mfsprg r11,0 ; Get the per_proc area - beq- cr0,realFault ; Can not autogen for copyin/copyout... - ori r10,r10,LOW_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the bottom part - beq- cr1,bmInKernel ; We are in kernel... (cr1 set way back at entry) - - lwz r10,PP_USERPMAP(r11) ; Get the user pmap - la r10,PMAP_BMAPS(r10) ; Point to the chain anchor - b bmInKernel ; Jump over alignment gap... - nop - nop - nop - nop - nop - nop -bmInKernel: -#ifndef CHIP_ERRATA_MAX_V1 - lwarx r9,0,r10 -#endif /* CHIP_ERRATA_MAX_V1 */ - -bmapLck: lwarx r9,0,r10 ; Get the block map anchor and lock - rlwinm. r5,r9,0,31,31 ; Is it locked? - ori r5,r5,1 ; Set the lock - bne- bmapLckw ; Yeah... - stwcx. r5,0,r10 ; Lock the bmap list - bne- bmapLck ; Someone else was trying, try again... - b bmapSXg ; All done... - - .align 4 - -bmapLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held - beq+ bmapLck ; Not no more... - lwz r9,0(r10) ; Get lock word again... - b bmapLckw ; Check it out... - - .align 5 - - nop ; Force ISYNC to last instruction in IFETCH - nop - nop - -bmapSXg: rlwinm. r4,r9,0,0,26 ; Clear out flags and lock - isync ; Make sure we have not used anything yet - bne+ findAuto ; We have something, let us go... - -bmapNone: stw r9,0(r10) ; Unlock it, we have nothing here - ; No sync here because we have not changed anything - -/* - * When we come here, we know that we can't handle this. Restore whatever - * state that we trashed and go back to continue handling the interrupt. - */ + .set .,hwpOpBase+(6*128) ; Generate error if previous function too long -realFault: li r10,0 /* Get lock clear value */ - lwz r3,saveexception(r13) /* Figure out the exception code again */ - stw r10,PCAlock(r8) /* Clear the PTEG lock */ -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,33 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 -#endif - blr /* Blow back and handle exception */ - - .align 5 - -findAuto: mr. r4,r4 ; Is there more? - beq- bmapNone ; No more... - lwz r5,bmstart(r4) ; Get the bottom of range - lwz r11,bmend(r4) ; Get the top of range - cmplw cr0,r6,r5 ; Are we before the entry? - cmplw cr1,r6,r11 ; Are we after the entry? - cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range - bne+ cr1,faGot ; Found it... - - lwz r4,bmnext(r4) ; Get the next one - b findAuto ; Check it out... - -faGot: - lwz r7,blkFlags(r4) ; Get the flags - rlwinm. r7,r7,0,blkRembit,blkRembit ; is this mapping partially removed - bne bmapNone ; Pending remove, bail out - rlwinm r6,r6,0,0,19 ; Round to page - lwz r2,bmPTEr(r4) ; Get the real part of the PTE - sub r5,r6,r5 ; Get offset into area - stw r9,0(r10) ; Unlock it, we are done with it (no sync needed) - add r2,r2,r5 ; Adjust the real address - - lis r4,0x8080 /* Indicate that this was autogened */ - li r0,0x0180 /* Autogenned areas always set RC bits. - This keeps the hardware from having - to do two storage writes */ - -/* - * Here where we insert the PTE into the hash. The PTE image is in R3, R2. - * The PTEG allocation controls are a bit map of the state of the PTEG. The - * PCAlock bits are a temporary lock for the specified PTE. PCAfree indicates that - * the PTE slot is empty. PCAauto means that it comes from an autogen area. These - * guys do not keep track of reference and change and are actually "wired". - * They're easy to maintain. PCAsteal - * is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these - * fields fit in a single word and are loaded and stored under control of the - * PTEG control area lock (PCAlock). - * - * Note that PCAauto does not contribute to the steal calculations at all. Originally - * it did, autogens were second in priority. This can result in a pathalogical - * case where an instruction can not make forward progress, or one PTE slot - * thrashes. - * - * Physically, the fields are arranged: - * 0: PCAfree - * 1: PCAauto - * 2: PCAlock - * 3: PCAsteal - */ - -insert: lwz r10,PCAallo(r8) /* Get the PTEG controls */ - eqv r6,r6,r6 /* Get all ones */ - mr r11,r10 /* Make a copy */ - rlwimi r6,r10,8,16,23 /* Insert sliding steal position */ - rlwimi r11,r11,24,24,31 /* Duplicate the locked field */ - addi r6,r6,-256 /* Form mask */ - rlwimi r11,r11,16,0,15 /* This gives us a quadrupled lock mask */ - rlwinm r5,r10,31,24,0 /* Slide over the mask for next time */ - mr r9,r10 /* Make a copy to test */ - not r11,r11 /* Invert the quadrupled lock */ - or r2,r2,r0 /* Force on R, and maybe C bit */ - and r9,r9,r11 /* Remove the locked guys */ - rlwimi r5,r5,8,24,24 /* Wrap bottom bit to top in mask */ - rlwimi r9,r11,0,16,31 /* Put two copies of the unlocked entries at the end */ - rlwinm r6,r6,0,16,7 ; Remove the autogens from the priority calculations - rlwimi r10,r5,0,24,31 /* Move steal map back in */ - and r9,r9,r6 /* Set the starting point for stealing */ - -/* So, now we have in R9: - byte 0 = ~locked & free - byte 1 = 0 - byte 2 = ~locked & (PCAsteal - 1) - byte 3 = ~locked - - Each bit position represents (modulo 8) a PTE. If it is 1, it is available for - allocation at its priority level, left to right. - - Additionally, the PCA steal field in R10 has been rotated right one bit. -*/ - - - rlwinm r21,r10,8,0,7 ; Isolate just the old autogen bits - cntlzw r6,r9 /* Allocate a slot */ - mr r14,r12 /* Save our mapping for later */ - cmplwi r6,32 ; Was there anything available? - rlwinm r7,r6,29,30,31 /* Get the priority slot we got this from */ - rlwinm r6,r6,0,29,31 ; Isolate bit position - srw r11,r4,r6 /* Position the PTEG control bits */ - slw r21,r21,r6 ; Move corresponding old autogen flag to bit 0 - mr r22,r11 ; Get another copy of the selected slot - - beq- realFault /* Arghh, no slots! Take the long way 'round... */ - - /* Remember, we've already set up the mask pattern - depending upon how we got here: - if got here from simple mapping, R4=0x80000000, - if we got here from autogen it is 0x80800000. */ - - rlwinm r6,r6,3,26,28 /* Start calculating actual PTE address */ - rlwimi r22,r22,24,8,15 ; Duplicate selected slot in second byte - rlwinm. r11,r11,0,8,15 /* Isolate just the auto bit (remember about it too) */ - andc r10,r10,r22 /* Turn off the free and auto bits */ - add r6,r8,r6 /* Get position into PTEG control area */ - cmplwi cr1,r7,1 /* Set the condition based upon the old PTE type */ - sub r6,r6,r1 /* Switch it to the hash table */ - or r10,r10,r11 /* Turn auto on if it is (PTEG control all set up now) */ - subi r6,r6,1 /* Point right */ - stw r10,PCAallo(r8) /* Allocate our slot */ - dcbt br0,r6 ; Touch in the PTE - bne wasauto /* This was autogenned... */ - - stw r6,mmPTEent(r14) /* Link the mapping to the PTE slot */ - -/* - * So, now we're here and what exactly do we have? We've got: - * 1) a full PTE entry, both top and bottom words in R3 and R2 - * 2) an allocated slot in the PTEG. - * 3) R8 still points to the PTEG Control Area (PCA) - * 4) R6 points to the PTE entry. - * 5) R1 contains length of the hash table-1. We use this to back-translate - * a PTE to a virtual address so we can invalidate TLBs. - * 6) R11 has a copy of the PCA controls we set. - * 7a) R7 indicates what the PTE slot was before we got to it. 0 shows - * that it was empty and 2 or 3, that it was - * a we've stolen a live one. CR1 is set to LT for empty and GT - * otherwise. - * 7b) Bit 0 of R21 is 1 if the stolen PTE was autogenned - * 8) So far as our selected PTE, it should be valid if it was stolen - * and invalid if not. We could put some kind of assert here to - * check, but I think that I'd rather leave it in as a mysterious, - * non-reproducable bug. - * 9) The new PTE's mapping has been moved to the front of its PTEG hash list - * so that it's kept in some semblance of a MRU list. - * 10) R14 points to the mapping we're adding. - * - * So, what do we have to do yet? - * 1) If we stole a slot, we need to invalidate the PTE completely. - * 2) If we stole one AND it was not an autogen, - * copy the entire old PTE (including R and C bits) to its mapping. - * 3) Set the new PTE in the PTEG and make sure it is valid. - * 4) Unlock the PTEG control area. - * 5) Go back to the interrupt handler, changing the interrupt - * code to "in vain" which will restore the registers and bail out. - * - */ -wasauto: oris r3,r3,0x8000 /* Turn on the valid bit */ - blt+ cr1,slamit /* It was empty, go slam it on in... */ - - lwz r10,0(r6) /* Grab the top part of the PTE */ - rlwinm r12,r6,6,4,19 /* Match up the hash to a page boundary */ - rlwinm r5,r10,5,4,19 /* Extract the VSID to a page boundary */ - rlwinm r10,r10,0,1,31 /* Make it invalid */ - xor r12,r5,r12 /* Calculate vaddr */ - stw r10,0(r6) /* Invalidate the PTE */ - rlwinm r5,r10,7,27,29 ; Move nybble 0 up to subhash position - rlwimi r12,r10,1,0,3 /* Move in the segment portion */ - lis r9,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ - xor r5,r5,r10 ; Splooch nybble 0 and 1 - rlwimi r12,r10,22,4,9 /* Move in the API */ - ori r9,r9,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ - rlwinm r4,r10,27,27,29 ; Get low 3 bits of the VSID for look-aside hash - - sync /* Make sure the invalid is stored */ - - xor r4,r4,r5 ; Finish splooching nybble 0, 1, and the low bits of the VSID - -tlbhang: lwarx r5,0,r9 /* Get the TLBIE lock */ - - rlwinm r4,r4,0,27,29 ; Clean up splooched hash value +hwpCRefMap: li r0,lo16(mpR) ; Get reference bit + lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping + andc r5,r5,r0 ; Clear in PTE copy + andc r8,r8,r0 ; and in the mapping + cmpw r0,r0 ; Make sure we return CR0_EQ + stw r8,mpVAddr+4(r31) ; Set the flag part of mapping + blr ; Return... - mr. r5,r5 /* Is it locked? */ - add r4,r4,r8 /* Point to the offset into the PCA area */ - li r5,1 /* Get our lock word */ - bne- tlbhang /* It's locked, go wait... */ - - la r4,PCAhash(r4) /* Point to the start of the hash chain for the PTE we're replacing */ - - stwcx. r5,0,r9 /* Try to get it */ - bne- tlbhang /* We was beat... */ - mfspr r7,pvr /* Find out what kind of machine we are */ - li r5,0 /* Lock clear value */ - rlwinm r7,r7,16,16,31 /* Isolate CPU type */ +; Function 7 - Clear change in physent - tlbie r12 /* Invalidate it everywhere */ + .set .,hwpOpBase+(7*128) ; Generate error if previous function too long - cmplwi r7,3 /* Is this a 603? */ - stw r5,0(r9) /* Clear the lock */ - - beq- its603 /* It's a 603, skip the tlbsync... */ - - eieio /* Make sure that the tlbie happens first */ - tlbsync /* wait for everyone to catch up */ - isync - -its603: rlwinm. r21,r21,0,0,0 ; See if we just stole an autogenned entry - sync /* Make sure of it all */ +hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent - bne slamit ; The old was an autogen, time to slam the new in... - - lwz r9,4(r6) /* Get the real portion of old PTE */ - lwz r7,0(r4) /* Get the first element. We can't get to here - if we aren't working with a mapping... */ - mr r0,r7 ; Save pointer to first element - -findold: mr r1,r11 ; Save the previous guy - mr. r11,r7 /* Copy and test the chain */ - beq- bebad /* Assume it's not zero... */ +hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags + rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C + stwcx. r4,r5,r29 ; Try to stuff it + bne-- hwpCCngPhX ; Try again... +; Note: CR0_EQ is set because of stwcx. + blr ; Return... - lwz r5,mmPTEv(r11) /* See if this is the old active one */ - cmplw cr2,r11,r14 /* Check if this is actually the new one */ - cmplw r5,r10 /* Is this us? (Note: valid bit kept off in mappings) */ - lwz r7,mmhashnext(r11) /* Get the next one in line */ - beq- cr2,findold /* Don't count the new one... */ - cmplw cr2,r11,r0 ; Check if we are first on the list - bne+ findold /* Not it (and assume the worst)... */ - lwz r12,mmphysent(r11) /* Get the pointer to the physical entry */ - beq- cr2,nomove ; We are first, no need to requeue... +; Function 8 - Clear change in mapping - stw r11,0(r4) ; Chain us to the head - stw r0,mmhashnext(r11) ; Chain the old head to us - stw r7,mmhashnext(r1) ; Unlink us + .set .,hwpOpBase+(8*128) ; Generate error if previous function too long + +hwpCCngMap: li r0,lo16(mpC) ; Get change bit + lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping + andc r5,r5,r0 ; Clear in PTE copy + andc r8,r8,r0 ; and in the mapping + cmpw r0,r0 ; Make sure we return CR0_EQ + stw r8,mpVAddr+4(r31) ; Set the flag part of mapping + blr ; Return... -nomove: li r5,0 /* Clear this on out */ - - mr. r12,r12 /* Is there a physical entry? */ - stw r5,mmPTEent(r11) ; Clear the PTE entry pointer - li r5,pepte1 /* Point to the PTE last half */ - stw r9,mmPTEr(r11) ; Squirrel away the whole thing (RC bits are in here) - - beq- mrgmrcx ; No physical entry for this one... - rlwinm r11,r9,0,23,24 /* Keep only the RC bits */ +; Function 9 - Set reference in physent -mrgmrcx: lwarx r9,r5,r12 /* Get the master copy */ - or r9,r9,r11 /* Merge in latest RC */ - stwcx. r9,r5,r12 /* Save it back */ - bne- mrgmrcx /* If it changed, try again... */ + .set .,hwpOpBase+(9*128) ; Generate error if previous function too long -/* - * Here's where we finish up. We save the real part of the PTE, eieio it, to make sure it's - * out there before the top half (with the valid bit set). - */ +hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent + +hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags + ori r4,r4,lo16(ppR) ; Set the reference + stwcx. r4,r5,r29 ; Try to stuff it + bne-- hwpSRefPhX ; Try again... +; Note: CR0_EQ is set because of stwcx. + blr ; Return... -slamit: stw r2,4(r6) /* Stash the real part */ - li r4,0 /* Get a lock clear value */ - eieio /* Erect a barricade */ - stw r3,0(r6) /* Stash the virtual part and set valid on */ - - stw r4,PCAlock(r8) /* Clear the PCA lock */ - - li r3,T_IN_VAIN /* Say that we handled it */ - sync /* Go no further until the stores complete */ -#if PERFTIMES && DEBUG - mflr r11 - mr r4,r3 - li r3,33 - bl EXT(dbgLog2) ; Start of hw_add_map - mr r3,r4 - mtlr r11 -#endif - blr /* Back to the fold... */ - -bebad: lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */ - ori r0,r0,LOW_ADDR(Choke) - sc /* Firmware Heimlich maneuver */ -/* - * This walks the hash table or DBATs to locate the physical address of a virtual one. - * The space is provided. If it is the kernel space, the DBATs are searched first. Failing - * that, the hash table is accessed. Zero is returned for failure, so it must be special cased. - * This is usually used for debugging, so we try not to rely - * on anything that we don't have to. - */ +; Function 10 - Set reference in mapping -ENTRY(LRA, TAG_NO_FRAME_USED) + .set .,hwpOpBase+(10*128) ; Generate error if previous function too long + +hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping + ori r5,r5,lo16(mpR) ; Set reference in PTE low + ori r8,r8,lo16(mpR) ; Set reference in mapping + cmpw r0,r0 ; Make sure we return CR0_EQ + stw r8,mpVAddr+4(r31) ; Set the flag part of mapping + blr ; Return... + +; Function 11 - Set change in physent - mfsprg r8,2 ; Get feature flags - mfmsr r10 /* Save the current MSR */ - rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - mtcrf 0x04,r8 ; Set the features - xoris r5,r3,HIGH_ADDR(PPC_SID_KERNEL) /* Clear the top half if equal */ - andi. r9,r10,0x7FCF /* Turn off interrupts and translation */ - eqv r12,r12,r12 /* Fill the bottom with foxes */ + .set .,hwpOpBase+(11*128) ; Generate error if previous function too long - bt pfNoMSRirb,lraNoMSR ; No MSR... +hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent - mtmsr r9 ; Translation and all off - isync ; Toss prefetch - b lraNoMSRx +hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags + ori r4,r4,lo16(ppC) ; Set the change bit + stwcx. r4,r5,r29 ; Try to stuff it + bne-- hwpSCngPhX ; Try again... +; Note: CR0_EQ is set because of stwcx. + blr ; Return... -lraNoMSR: - mr r7,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r9 ; Get new MSR - sc ; Set it - mr r3,r7 -lraNoMSRx: - - cmplwi r5,LOW_ADDR(PPC_SID_KERNEL) /* See if this is kernel space */ - rlwinm r11,r3,6,6,25 /* Position the space for the VSID */ - isync /* Purge pipe */ - bne- notkernsp /* This is not for the kernel... */ - - mfspr r5,dbat0u /* Get the virtual address and length */ - eqv r8,r8,r8 /* Get all foxes */ - rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */ - rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */ - beq- ckbat1 /* not valid, skip this one... */ - sub r7,r4,r7 /* Subtract out the base */ - rlwimi r8,r5,15,0,14 /* Get area length - 1 */ - mfspr r6,dbat0l /* Get the real part */ - cmplw r7,r8 /* Check if it is in the range */ - bng+ fndbat /* Yup, she's a good un... */ - -ckbat1: mfspr r5,dbat1u /* Get the virtual address and length */ - eqv r8,r8,r8 /* Get all foxes */ - rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */ - rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */ - beq- ckbat2 /* not valid, skip this one... */ - sub r7,r4,r7 /* Subtract out the base */ - rlwimi r8,r5,15,0,14 /* Get area length - 1 */ - mfspr r6,dbat1l /* Get the real part */ - cmplw r7,r8 /* Check if it is in the range */ - bng+ fndbat /* Yup, she's a good un... */ - -ckbat2: mfspr r5,dbat2u /* Get the virtual address and length */ - eqv r8,r8,r8 /* Get all foxes */ - rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */ - rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */ - beq- ckbat3 /* not valid, skip this one... */ - sub r7,r4,r7 /* Subtract out the base */ - rlwimi r8,r5,15,0,14 /* Get area length - 1 */ - mfspr r6,dbat2l /* Get the real part */ - cmplw r7,r8 /* Check if it is in the range */ - bng- fndbat /* Yup, she's a good un... */ - -ckbat3: mfspr r5,dbat3u /* Get the virtual address and length */ - eqv r8,r8,r8 /* Get all foxes */ - rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */ - rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */ - beq- notkernsp /* not valid, skip this one... */ - sub r7,r4,r7 /* Subtract out the base */ - rlwimi r8,r5,15,0,14 /* Get area length - 1 */ - mfspr r6,dbat3l /* Get the real part */ - cmplw r7,r8 /* Check if it is in the range */ - bgt+ notkernsp /* No good... */ - -fndbat: rlwinm r6,r6,0,0,14 /* Clean up the real address */ - mtmsr r10 /* Restore state */ - add r3,r7,r6 /* Relocate the offset to real */ - isync /* Purge pipe */ - blr /* Bye, bye... */ - -notkernsp: mfspr r5,sdr1 /* Get hash table base and size */ - rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */ - rlwimi r12,r5,16,0,15 /* Make table size -1 out of mask */ - rlwinm r7,r4,26,10,25 /* Isolate the page index */ - andc r5,r5,r12 /* Clean up the hash table */ - xor r7,r7,r11 /* Get primary hash */ - rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */ - and r7,r7,r12 /* Wrap the hash */ - rlwimi r11,r4,10,26,31 /* Move API into pte ID */ - add r5,r7,r5 /* Point to the PTEG */ - oris r11,r11,0x8000 /* Slam on valid bit so's we don't match an invalid one */ - - li r9,8 /* Get the number of PTEs to check */ - lwz r6,0(r5) /* Preload the virtual half */ - -fndpte: subi r9,r9,1 /* Count the pte */ - lwz r3,4(r5) /* Get the real half */ - cmplw cr1,r6,r11 /* Is this what we want? */ - lwz r6,8(r5) /* Start to get the next virtual half */ - mr. r9,r9 /* Any more to try? */ - addi r5,r5,8 /* Bump to next slot */ - beq cr1,gotxlate /* We found what we were looking for... */ - bne+ fndpte /* Go try the next PTE... */ - - mtmsr r10 /* Restore state */ - li r3,0 /* Show failure */ - isync /* Purge pipe */ - blr /* Leave... */ - -gotxlate: mtmsr r10 /* Restore state */ - rlwimi r3,r4,0,20,31 /* Cram in the page displacement */ - isync /* Purge pipe */ - blr /* Return... */ +; Function 12 - Set change in mapping + .set .,hwpOpBase+(12*128) ; Generate error if previous function too long +hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping + ori r5,r5,lo16(mpC) ; Set change in PTE low + ori r8,r8,lo16(mpC) ; Set chage in mapping + cmpw r0,r0 ; Make sure we return CR0_EQ + stw r8,mpVAddr+4(r31) ; Set the flag part of mapping + blr ; Return... -/* - * struct blokmap *hw_add_blk(pmap_t pmap, struct blokmap *bmr) - * - * This is used to add a block mapping entry to the MRU list whose top - * node is anchored at bmaps. This is a real address and is also used as - * the lock. - * - * Overlapping areas are not allowed. If we find one, we return it's address and - * expect the upper layers to panic. We only check this for a debug build... - * - */ +; Function 13 - Test reference in physent - .align 5 - .globl EXT(hw_add_blk) + .set .,hwpOpBase+(13*128) ; Generate error if previous function too long + +hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent + rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0 + blr ; Return (CR0_EQ set to continue if reference is off)... -LEXT(hw_add_blk) - mfsprg r9,2 ; Get feature flags - lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation - mfmsr r0 /* Save the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtcrf 0x04,r9 ; Set the features - xor r3,r3,r6 ; Get real address of bmap anchor - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ - la r3,PMAP_BMAPS(r3) ; Point to bmap header - - bt pfNoMSRirb,habNoMSR ; No MSR... +; Function 14 - Test reference in mapping - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b habNoMSRx - -habNoMSR: - mr r9,r0 - mr r8,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r8 - mr r0,r9 -habNoMSRx: + .set .,hwpOpBase+(14*128) ; Generate error if previous function too long -abLck: lwarx r9,0,r3 ; Get the block map anchor and lock - rlwinm. r8,r9,0,31,31 ; Is it locked? - ori r8,r9,1 ; Set the lock - bne- abLckw ; Yeah... - stwcx. r8,0,r3 ; Lock the bmap list - bne- abLck ; Someone else was trying, try again... - b abSXg ; All done... - - .align 4 +hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0 + blr ; Return (CR0_EQ set to continue if reference is off)... + +; Function 15 - Test change in physent -abLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held - beq+ abLck ; Not no more... - lwz r9,0(r3) ; Get lock word again... - b abLckw ; Check it out... + .set .,hwpOpBase+(15*128) ; Generate error if previous function too long - .align 5 +hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent + rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0 + blr ; Return (CR0_EQ set to continue if reference is off)... + + +; Function 16 - Test change in mapping + + .set .,hwpOpBase+(16*128) ; Generate error if previous function too long - nop ; Force ISYNC to last instruction in IFETCH - nop +hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0 + blr ; Return (CR0_EQ set to continue if reference is off)... + + .set .,hwpOpBase+(17*128) ; Generate error if previous function too long + -abSXg: rlwinm r11,r9,0,0,26 ; Clear out flags and lock - isync ; Make sure we have not used anything yet ; +; int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping. +; +; Returns: +; mapRtOK - if all is ok +; mapRtBadLk - if mapping lock fails +; mapRtPerm - if mapping is permanent +; mapRtNotFnd - if mapping is not found +; mapRtBlock - if mapping is a block ; -; + .align 5 + .globl EXT(hw_protect) - lwz r7,bmstart(r4) ; Get start - lwz r8,bmend(r4) ; Get end - mr r2,r11 ; Get chain - -abChk: mr. r10,r2 ; End of chain? - beq abChkD ; Yes, chain is ok... - lwz r5,bmstart(r10) ; Get start of current area - lwz r6,bmend(r10) ; Get end of current area - - cmplw cr0,r8,r5 ; Is the end of the new before the old? - cmplw cr1,r8,r6 ; Is the end of the new after the old? - cmplw cr6,r6,r7 ; Is the end of the old before the new? - cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in old - cmplw cr7,r6,r8 ; Is the end of the old after the new? - lwz r2,bmnext(r10) ; Get pointer to the next - cror cr6_eq,cr6_lt,cr7_gt ; Set cr2_eq if old not in new - crand cr1_eq,cr1_eq,cr6_eq ; Set cr1_eq if no overlap - beq+ cr1,abChk ; Ok check the next... - - lwz r8,blkFlags(r10) ; Get the flags - rlwinm. r8,r8,0,blkRembit,blkRembit ; Check the blkRem bit - beq abRet ; Is the mapping partially removed - ori r10,r10,2 ; Indicate that this block is partially removed -abRet: - stw r9,0(r3) ; Unlock - mtmsr r0 ; Restore xlation and rupts - mr r3,r10 ; Pass back the overlap - isync ; - blr ; Return... +LEXT(hw_protect) + stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r24,FM_ARG0+0x00(r1) ; Save a register + stw r25,FM_ARG0+0x04(r1) ; Save a register + mr r25,r7 ; Remember address of next va + stw r26,FM_ARG0+0x08(r1) ; Save a register + stw r27,FM_ARG0+0x0C(r1) ; Save a register + stw r28,FM_ARG0+0x10(r1) ; Save a register + mr r24,r6 ; Save the new protection flags + stw r29,FM_ARG0+0x14(r1) ; Save a register + stw r30,FM_ARG0+0x18(r1) ; Save a register + stw r31,FM_ARG0+0x1C(r1) ; Save a register + stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return -abChkD: stw r11,bmnext(r4) ; Chain this on in - rlwimi r4,r9,0,27,31 ; Copy in locks and flags - sync ; Make sure that is done - - stw r4,0(r3) ; Unlock and chain the new first one - mtmsr r0 ; Restore xlation and rupts - li r3,0 ; Pass back a no failure return code - isync - blr ; Return... + lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap + lwz r7,pmapvr+4(r3) ; Get the second part -/* - * struct blokmap *hw_rem_blk(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) - * - * This is used to remove a block mapping entry from the list that - * is anchored at bmaps. bmaps is a virtual address and is also used as - * the lock. - * - * Note that this function clears a single block that contains - * any address within the range sva to eva (inclusive). To entirely - * clear any range, hw_rem_blk must be called repeatedly until it - * returns a 0. - * - * The block is removed from the list and all hash table entries - * corresponding to the mapped block are invalidated and the TLB - * entries are purged. If the block is large, this could take - * quite a while. We need to hash every possible address in the - * range and lock down the PCA. - * - * If we attempt to remove a permanent entry, we will not do it. - * The block address will be ored with 1 and returned. - * - * - */ + bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit - .align 5 - .globl EXT(hw_rem_blk) + mr r27,r11 ; Remember the old MSR + mr r26,r12 ; Remember the feature bits -LEXT(hw_rem_blk) + xor r28,r3,r7 ; Change the common 32- and 64-bit half - mfsprg r9,2 ; Get feature flags - lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation - mfmsr r0 /* Save the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtcrf 0x04,r9 ; Set the features - xor r3,r3,r6 ; Get real address of bmap anchor - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ - la r3,PMAP_BMAPS(r3) ; Point to the bmap chain head + bf-- pf64Bitb,hpSF1 ; skip if 32-bit... + + rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top - bt pfNoMSRirb,hrbNoMSR ; No MSR... +hpSF1: mr r29,r4 ; Save top half of vaddr + mr r30,r5 ; Save the bottom half + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkShared ; Go get a shared lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + bne-- hpBadLock ; Nope... - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b hrbNoMSRx + mr r3,r28 ; get the pmap address + mr r4,r29 ; Get bits 0:31 to look for + mr r5,r30 ; Get bits 32:64 -hrbNoMSR: - mr r9,r0 - mr r8,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r8 - mr r0,r9 -hrbNoMSRx: - li r7,0 - cmp cr5,r0,r7 ; Request to invalidate the ptes - b rbLck + bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags) -rbunlink: - lwz r4,bmstart(r10) ; Get start of current mapping - lwz r5,bmend(r10) ; Get end of current mapping - cmp cr5,r3,r3 ; Request to unlink the mapping - -rbLck: lwarx r9,0,r3 ; Get the block map anchor and lock - rlwinm. r8,r9,0,31,31 ; Is it locked? - ori r8,r9,1 ; Set the lock - bne- rbLckw ; Yeah... - stwcx. r8,0,r3 ; Lock the bmap list - bne- rbLck ; Someone else was trying, try again... - b rbSXg ; All done... + andi. r7,r7,lo16(mpSpecial|mpNest|mpPerm|mpBlock|mpRIP) ; Are we allowed to change it or is it being removed? + mr. r31,r3 ; Save the mapping if we found it + cmplwi cr1,r7,0 ; Anything special going on? + mr r29,r4 ; Save next va high half + mr r30,r5 ; Save next va low half - .align 4 + beq-- hpNotFound ; Not found... -rbLckw: rlwinm. r11,r9,0,31,31 ; Check if it is still held - beq+ rbLck ; Not no more... - lwz r9,0(r3) ; Get lock word again... - b rbLckw ; Check it out... + bne-- cr1,hpNotAllowed ; Something special is happening... - .align 5 + bt++ pf64Bitb,hpDo64 ; Split for 64 bit + + bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent + + rlwimi r5,r24,0,mpPPb-32,mpPPb-32+2 ; Stick in the new pp + mr. r3,r3 ; Was there a previously valid PTE? + + stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest) + + beq-- hpNoOld32 ; Nope... - nop ; Force ISYNC to last instruction in IFETCH - nop + stw r5,4(r3) ; Store second half of PTE + eieio ; Make sure we do not reorder + stw r4,0(r3) ; Revalidate the PTE + + eieio ; Make sure all updates come first + stw r6,0(r7) ; Unlock PCA + +hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list -rbSXg: rlwinm. r2,r9,0,0,26 ; Clear out flags and lock - mr r10,r3 ; Keep anchor as previous pointer - isync ; Make sure we have not used anything yet + li r3,mapRtOK ; Set normal return + b hpR32 ; Join common... + + .align 5 - beq- rbMT ; There is nothing in the list -rbChk: mr r12,r10 ; Save the previous - mr. r10,r2 ; End of chain? - beq rbMT ; Yes, nothing to do... - lwz r11,bmstart(r10) ; Get start of current area - lwz r6,bmend(r10) ; Get end of current area +hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent + + rldimi r5,r24,0,mpPPb ; Stick in the new pp + mr. r3,r3 ; Was there a previously valid PTE? + + stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest) + + beq-- hpNoOld64 ; Nope... - cmplw cr0,r5,r11 ; Is the end of range before the start of the area? - cmplw cr1,r4,r6 ; Is the start of range after the end of the area? - cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range - lwz r2,bmnext(r10) ; Get the next one - beq+ cr1,rbChk ; Not this one, check the next... + std r5,8(r3) ; Store second half of PTE + eieio ; Make sure we do not reorder + std r4,0(r3) ; Revalidate the PTE - cmplw cr1,r12,r3 ; Is the current mapping the first one? + eieio ; Make sure all updates come first + stw r6,0(r7) ; Unlock PCA - bne cr5,rbblkRem ; Do we have to unchain the mapping +hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list - bne cr1,rbnFirst ; Yes, is this the first mapping? - rlwimi r9,r2,0,0,26 ; Yes, Change the lock value - ori r2,r9,1 ; Turn on the lock bit -rbnFirst: - stw r2,bmnext(r12) ; Unchain us - sync - b rbDone + li r3,mapRtOK ; Set normal return + b hpR64 ; Join common... -rbblkRem: - - lwz r8,blkFlags(r10) ; Get the flags + .align 5 + +hpReturn: bt++ pf64Bitb,hpR64 ; Yes... + +hpR32: mtmsr r27 ; Restore enables/translation/etc. + isync + b hpReturnC ; Join common... + +hpR64: mtmsrd r27 ; Restore enables/translation/etc. + isync + +hpReturnC: stw r29,0(r25) ; Save the top of the next va + stw r30,4(r25) ; Save the bottom of the next va + lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + lwz r24,FM_ARG0+0x00(r1) ; Save a register + lwz r25,FM_ARG0+0x04(r1) ; Save a register + lwz r26,FM_ARG0+0x08(r1) ; Save a register + mtlr r0 ; Restore the return + lwz r27,FM_ARG0+0x0C(r1) ; Save a register + lwz r28,FM_ARG0+0x10(r1) ; Save a register + lwz r29,FM_ARG0+0x14(r1) ; Save a register + lwz r30,FM_ARG0+0x18(r1) ; Save a register + lwz r31,FM_ARG0+0x1C(r1) ; Save a register + lwz r1,0(r1) ; Pop the stack + blr ; Leave... + + .align 5 + +hpBadLock: li r3,mapRtBadLk ; Set lock time out error code + b hpReturn ; Leave.... - rlwinm. r7,r8,0,blkPermbit,blkPermbit ; is this a permanent block? +hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list - bne- rbPerm ; This is permanent, do not remove... + li r3,mapRtNotFnd ; Set that we did not find the requested page + b hpReturn ; Leave.... + +hpNotAllowed: + rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed? + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bne-- hpNotFound ; Yeah... + bl sxlkUnlock ; Unlock the search list + + li r3,mapRtBlock ; Assume it was a block + andi. r7,r7,lo16(mpBlock) ; Is this a block? + bne++ hpReturn ; Yes, leave... + + li r3,mapRtPerm ; Set that we hit a permanent page + b hpReturn ; Leave.... - rlwinm. r7,r8,0,blkRembit,blkRembit ; is this mapping partially removed - beq rbblkRemcont ; If not, check the max size - lwz r11,bmcurrent(r10) ; If yes, resume for the current page +; +; int hw_test_rc(pmap, va, reset) - tests RC on a specific va +; +; Returns following code ORed with RC from mapping +; mapRtOK - if all is ok +; mapRtBadLk - if mapping lock fails +; mapRtNotFnd - if mapping is not found +; + .align 5 + .globl EXT(hw_test_rc) - cmp cr5,r11,r6 ; No partial remove left - beq cr5, rbpendret ; But there is a pending remove +LEXT(hw_test_rc) + stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stw r24,FM_ARG0+0x00(r1) ; Save a register + stw r25,FM_ARG0+0x04(r1) ; Save a register + stw r26,FM_ARG0+0x08(r1) ; Save a register + stw r27,FM_ARG0+0x0C(r1) ; Save a register + stw r28,FM_ARG0+0x10(r1) ; Save a register + mr r24,r6 ; Save the reset request + stw r29,FM_ARG0+0x14(r1) ; Save a register + stw r30,FM_ARG0+0x18(r1) ; Save a register + stw r31,FM_ARG0+0x1C(r1) ; Save a register + stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return -rbblkRemcont: - bne rbblkRemcont1 ; Is it the first remove + lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap + lwz r7,pmapvr+4(r3) ; Get the second part - oris r8,r8,hi16(blkRem) ; Yes - stw r8,blkFlags(r10) ; set the blkRem bit in blkFlags -rbblkRemcont1: - lis r5,hi16(BLKREMMAX*4096) ; Load maximun size tear down - ori r5,r5,lo16(BLKREMMAX*4096) ; Load maximun size tear down - sub r7,r6,r11 ; Get the remaining size to tear down - cmp cr5,r7,r5 ; Compare against the maximun size - ble cr5,rbfullblk ; If less or equal, go remove the mapping + bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit - add r7,r11,r5 ; Add the max size tear down to the current page - stw r7,bmcurrent(r10) ; Update the current page - subi r6,r7,1 ; Set the current end of the partial tear down - b rbcont + mr r27,r11 ; Remember the old MSR + mr r26,r12 ; Remember the feature bits -rbfullblk: - stw r6,bmcurrent(r10) ; Update the current page + xor r28,r3,r7 ; Change the common 32- and 64-bit half -rbcont: - lwz r8,bmspace(r10) ; Get the VSID - sync - stw r9,0(r3) ; Unlock and chain the new first one - - eqv r4,r4,r4 ; Fill the bottom with foxes - mfspr r12,sdr1 ; Get hash table base and size - rlwinm r8,r8,6,0,25 ; Align VSID to PTEG - rlwimi r4,r12,16,0,15 ; Make table size - 1 out of mask - andc r12,r12,r4 ; Clean up address of hash table - rlwinm r5,r11,26,6,25 ; Rotate virtual start address into PTEG units - add r12,r12,r4 ; Point to PCA - 1 - rlwinm r6,r6,26,6,25 ; Rotate virtual end address into PTEG units - addi r12,r12,1 ; Point to PCA base - sub r6,r6,r5 ; Get the total number of PTEGs to clear - cmplw r6,r4 ; See if this wraps all the way around - blt rbHash ; Nope, length is right - subi r6,r4,32+31 ; Back down to correct length - -rbHash: rlwinm r5,r5,0,10,25 ; Keep only the page index - xor r2,r8,r5 ; Hash into table - and r2,r2,r4 ; Wrap into the table - add r2,r2,r12 ; Point right at the PCA - -rbLcka: lwarx r7,0,r2 ; Get the PTEG lock - mr. r7,r7 ; Is it locked? - bne- rbLckwa ; Yeah... - li r7,1 ; Get the locked value - stwcx. r7,0,r2 ; Take it - bne- rbLcka ; Someone else was trying, try again... - b rbSXga ; All done... - -rbLckwa: mr. r7,r7 ; Check if it is already held - beq+ rbLcka ; It is clear... - lwz r7,0(r2) ; Get lock word again... - b rbLckwa ; Wait... - -rbSXga: isync ; Make sure nothing used yet - lwz r7,PCAallo(r2) ; Get the allocation word - rlwinm. r11,r7,8,0,7 ; Isolate the autogenerated PTEs - or r7,r7,r11 ; Release the autogen slots - beq+ rbAintNone ; There are not any here - mtcrf 0xC0,r11 ; Set the branch masks for autogens - sub r11,r2,r4 ; Move back to the hash table + 1 - rlwinm r7,r7,0,16,7 ; Clear the autogen field - subi r11,r11,1 ; Point to the PTEG - stw r7,PCAallo(r2) ; Update the flags - li r7,0 ; Get an invalid PTE value - - bf 0,rbSlot1 ; No autogen here - stw r7,0x00(r11) ; Invalidate PTE -rbSlot1: bf 1,rbSlot2 ; No autogen here - stw r7,0x08(r11) ; Invalidate PTE -rbSlot2: bf 2,rbSlot3 ; No autogen here - stw r7,0x10(r11) ; Invalidate PTE -rbSlot3: bf 3,rbSlot4 ; No autogen here - stw r7,0x18(r11) ; Invalidate PTE -rbSlot4: bf 4,rbSlot5 ; No autogen here - stw r7,0x20(r11) ; Invalidate PTE -rbSlot5: bf 5,rbSlot6 ; No autogen here - stw r7,0x28(r11) ; Invalidate PTE -rbSlot6: bf 6,rbSlot7 ; No autogen here - stw r7,0x30(r11) ; Invalidate PTE -rbSlot7: bf 7,rbSlotx ; No autogen here - stw r7,0x38(r11) ; Invalidate PTE -rbSlotx: - -rbAintNone: li r7,0 ; Clear this out - sync ; To make SMP happy - addic. r6,r6,-64 ; Decrement the count - stw r7,PCAlock(r2) ; Release the PTEG lock - addi r5,r5,64 ; Move up by adjusted page number - bge+ rbHash ; Not done... - - sync ; Make sure the memory is quiet + bf-- pf64Bitb,htrSF1 ; skip if 32-bit... -; -; Here we take the easy way out and just purge the entire TLB. This is -; certainly faster and definitly easier than blasting just the correct ones -; in the range, we only need one lock and one TLBSYNC. We would hope -; that most blocks are more than 64 pages (256K) and on every machine -; up to Book E, 64 TLBIEs will invalidate the entire table. -; + rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top - li r5,64 ; Get number of TLB entries to purge - lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock - li r6,0 ; Start at 0 - ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part +htrSF1: mr r29,r4 ; Save top half of vaddr + mr r30,r5 ; Save the bottom half -rbTlbL: lwarx r2,0,r12 ; Get the TLBIE lock - mr. r2,r2 ; Is it locked? - li r2,1 ; Get our lock value - bne- rbTlbL ; It is locked, go wait... - stwcx. r2,0,r12 ; Try to get it - bne- rbTlbL ; We was beat... - -rbTlbN: addic. r5,r5,-1 ; See if we did them all - tlbie r6 ; Invalidate it everywhere - addi r6,r6,0x1000 ; Up to the next page - bgt+ rbTlbN ; Make sure we have done it all... - - mfspr r5,pvr ; Find out what kind of machine we are - li r2,0 ; Lock clear value - - rlwinm r5,r5,16,16,31 ; Isolate CPU type - cmplwi r5,3 ; Is this a 603? - sync ; Make sure all is quiet - beq- rbits603a ; It is a 603, skip the tlbsync... + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkShared ; Go get a shared lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + li r25,0 ; Clear RC + bne-- htrBadLock ; Nope... + + mr r3,r28 ; get the pmap address + mr r4,r29 ; Get bits 0:31 to look for + mr r5,r30 ; Get bits 32:64 - eieio ; Make sure that the tlbie happens first - tlbsync ; wait for everyone to catch up - isync + bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags) -rbits603a: sync ; Wait for quiet again - stw r2,0(r12) ; Unlock invalidates + andi. r0,r7,lo16(mpSpecial|mpNest|mpPerm|mpBlock|mpRIP) ; Are we allowed to change it or is it being removed? + mr. r31,r3 ; Save the mapping if we found it + cmplwi cr1,r0,0 ; Are we removing it? + crorc cr0_eq,cr0_eq,cr1_eq ; Did we not find it or is it being removed? - sync ; Make sure that is done + bt-- cr0_eq,htrNotFound ; Not found, something special, or being removed... - ble cr5,rbunlink ; If all ptes are flush, go unlink the mapping - mtmsr r0 ; Restore xlation and rupts - mr r3,r10 ; Pass back the removed block in progress - ori r3,r3,2 ; Indicate that the block remove isn't completed yet - isync - blr ; Return... - -rbpendret: - stw r9,0(r3) ; Unlock - mtmsr r0 ; Restore xlation and rupts - mr r3,r10 ; Pass back the removed block in progress - ori r3,r3,2 ; Indicate that the block remove isn't completed yet - isync - blr ; Return... + bt++ pf64Bitb,htrDo64 ; Split for 64 bit + + bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent + + cmplwi cr1,r24,0 ; Do we want to clear RC? + lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field + mr. r3,r3 ; Was there a previously valid PTE? + li r0,lo16(mpR|mpC) ; Get bits to clear + and r25,r5,r0 ; Save the RC bits + beq++ cr1,htrNoClr32 ; Nope... + + andc r12,r12,r0 ; Clear mapping copy of RC + andc r5,r5,r0 ; Clear PTE copy of RC + sth r12,mpVAddr+6(r31) ; Set the new RC -rbMT: stw r9,0(r3) ; Unlock - mtmsr r0 ; Restore xlation and rupts - li r3,0 ; Say we did not find one - isync - blr ; Return... +htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE... -rbPerm: stw r9,0(r3) ; Unlock - mtmsr r0 ; Restore xlation and rupts - ori r3,r10,1 ; Say we did not remove it - isync - blr ; Return... + sth r5,6(r3) ; Store updated RC + eieio ; Make sure we do not reorder + stw r4,0(r3) ; Revalidate the PTE -rbDone: stw r9,0(r3) ; Unlock - mtmsr r0 ; Restore xlation and rupts - mr r3,r10 ; Pass back the removed block - isync - blr ; Return... + eieio ; Make sure all updates come first + stw r6,0(r7) ; Unlock PCA -/* - * hw_select_mappings(struct mappingflush *mappingflush) - * - * Input: PCA addr - * Ouput: up to 8 user mappings - * - * hw_select_mappings() scans every PCA mapping hash lists and select - * the last user mapping if it exists. - * - */ +htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list + li r3,mapRtOK ; Set normal return + b htrR32 ; Join common... - .align 5 - .globl EXT(hw_select_mappings) + .align 5 + + +htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent + + cmplwi cr1,r24,0 ; Do we want to clear RC? + lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field + mr. r3,r3 ; Was there a previously valid PTE? + li r0,lo16(mpR|mpC) ; Get bits to clear -LEXT(hw_select_mappings) - mr r5,r3 ; Get the mapping flush addr - mfmsr r12 ; Get the MSR - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - mfsprg r9,2 ; Get feature flags - andi. r0,r12,0x7FCF ; Disable translation and interruptions - mtcrf 0x04,r9 ; Set the features - bt pfNoMSRirb,hvmNoMSR ; No MSR... - mtmsr r0 - isync - b hvmNoMSRx -hvmNoMSR: - mr r3,r0 ; Get the new MSR - li r0,loadMSR ; Get the MSR setter SC - sc -hvmNoMSRx: - mr r0,r12 - li r11,1 ; Get the locked value - -hvmptegLckx: - lwz r3,MFpcaptr(r5) ; Get the PCA pointer - lwarx r10,0,r3 ; Get the PTEG lock - mr. r10,r10 ; Is it locked? - bne- hvmptegLckwx ; Yeah... - stwcx. r11,0,r3 ; Take take it - bne- hvmptegLckx ; Someone else was trying, try again... - b hvmptegSXgx ; All done... - - .align 4 - -hvmptegLckwx: - mr. r10,r10 ; Check if it is already held - beq+ hvmptegLckx ; It's clear... - lwz r10,0(r3) ; Get lock word again... - b hvmptegLckwx ; Wait... - - .align 4 - -hvmptegSXgx: - isync ; Make sure we haven't used anything yet - - li r11,8 ; set count to 8 - - lwz r6,PCAhash(r3) ; load the first mapping hash list - la r12,PCAhash(r3) ; Point to the mapping hash area - la r4,MFmapping(r5) ; Point to the mapping flush mapping area - li r7,0 ; Load zero - stw r7,MFmappingcnt(r5) ; Set the current count to 0 -hvmnexthash: - li r10,0 ; Mapping test - -hvmfindmap: - mr. r6,r6 ; Test if the hash list current pointer is zero - beq hvmfindmapret ; Did we hit the end of the hash list - lwz r7,mmPTEv(r6) ; Pick up our virtual ID - rlwinm r8,r7,5,0,19 ; Pick VSID 20 lower bits - mr. r8,r8 - beq hvmfindmapnext ; Skip Kernel VSIDs - rlwinm r8,r7,1,0,3 ; Extract the Segment index - rlwinm r9,r7,22,4,9 ; Extract API 6 upper bits - or r8,r8,r9 ; Add to the virtual address - rlwinm r9,r7,31,6,25 ; Pick VSID 19 lower bits - xor r9,r9,r3 ; Exclusive or with the PCA address - rlwinm r9,r9,6,10,19 ; Extract API 10 lower bits - or r8,r8,r9 ; Add to the virtual address - - stw r8,4(r4) ; Store the virtual address - lwz r8,mmpmap(r6) ; Get the pmap - stw r8,0(r4) ; Store the pmap - li r10,1 ; Found one - -hvmfindmapnext: - lwz r6,mmhashnext(r6) ; Pick up next mapping block - b hvmfindmap ; Scan the next mapping -hvmfindmapret: - mr. r10,r10 ; Found mapping - beq hvmnexthashprep ; If not, do not update the mappingflush array - lwz r7,MFmappingcnt(r5) ; Get the current count - addi r7,r7,1 ; Increment the current count - stw r7,MFmappingcnt(r5) ; Store the current count - addi r4,r4,MFmappingSize ; Point to the next mapping flush entry -hvmnexthashprep: - addi r12,r12,4 ; Load the next hash list - lwz r6,0(r12) ; Load the next hash list entry - subi r11,r11,1 ; Decrement hash list index - mr. r11,r11 ; Test for a remaining hash list - bne hvmnexthash ; Loop to scan the next hash list - - li r10,0 - stw r10,0(r3) ; Unlock the hash list - mtmsr r0 ; Restore translation and interruptions - isync - blr + and r25,r5,r0 ; Save the RC bits + beq++ cr1,htrNoClr64 ; Nope... + + andc r12,r12,r0 ; Clear mapping copy of RC + andc r5,r5,r0 ; Clear PTE copy of RC + sth r12,mpVAddr+6(r31) ; Set the new RC -/* - * vm_offset_t hw_cvp_blk(pmap_t pmap, vm_offset_t va) - * - * This is used to translate a virtual address within a block mapping entry - * to a physical address. If not found, 0 is returned. - * - */ +htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte... + + sth r5,14(r3) ; Store updated RC + eieio ; Make sure we do not reorder + std r4,0(r3) ; Revalidate the PTE - .align 5 - .globl EXT(hw_cvp_blk) + eieio ; Make sure all updates come first + stw r6,0(r7) ; Unlock PCA -LEXT(hw_cvp_blk) +htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list + li r3,mapRtOK ; Set normal return + b htrR64 ; Join common... - mfsprg r9,2 ; Get feature flags - lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation - mfmsr r0 /* Save the MSR */ - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ - mtcrf 0x04,r9 ; Set the features - xor r3,r3,r6 ; Get real address of bmap anchor - rlwinm r12,r12,0,28,25 /* Clear IR and DR */ - la r3,PMAP_BMAPS(r3) ; Point to chain header + .align 5 + +htrReturn: bt++ pf64Bitb,htrR64 ; Yes... - bt pfNoMSRirb,hcbNoMSR ; No MSR... +htrR32: mtmsr r27 ; Restore enables/translation/etc. + isync + b htrReturnC ; Join common... - mtmsr r12 ; Translation and all off - isync ; Toss prefetch - b hcbNoMSRx +htrR64: mtmsrd r27 ; Restore enables/translation/etc. + isync -hcbNoMSR: - mr r9,r0 - mr r8,r3 - li r0,loadMSR ; Get the MSR setter SC - mr r3,r12 ; Get new MSR - sc ; Set it - mr r3,r8 - mr r0,r9 -hcbNoMSRx: - -cbLck: lwarx r9,0,r3 ; Get the block map anchor and lock - rlwinm. r8,r9,0,31,31 ; Is it locked? - ori r8,r9,1 ; Set the lock - bne- cbLckw ; Yeah... - stwcx. r8,0,r3 ; Lock the bmap list - bne- cbLck ; Someone else was trying, try again... - b cbSXg ; All done... - - .align 4 - -cbLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held - beq+ cbLck ; Not no more... - lwz r9,0(r3) ; Get lock word again... - b cbLckw ; Check it out... - - .align 5 - - nop ; Force ISYNC to last instruction in IFETCH - nop - nop - nop - nop - -cbSXg: rlwinm. r11,r9,0,0,26 ; Clear out flags and lock - li r2,0 ; Assume we do not find anything - isync ; Make sure we have not used anything yet - -cbChk: mr. r11,r11 ; Is there more? - beq- cbDone ; No more... - lwz r5,bmstart(r11) ; Get the bottom of range - lwz r12,bmend(r11) ; Get the top of range - cmplw cr0,r4,r5 ; Are we before the entry? - cmplw cr1,r4,r12 ; Are we after of the entry? - cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range - beq- cr1,cbNo ; We are not in the range... - - lwz r2,bmPTEr(r11) ; Get the real part of the PTE - sub r5,r4,r5 ; Get offset into area - rlwinm r2,r2,0,0,19 ; Clean out everything but the page - add r2,r2,r5 ; Adjust the real address - -cbDone: stw r9,0(r3) ; Unlock it, we are done with it (no sync needed) - mtmsr r0 ; Restore translation and interrupts... - isync ; Make sure it is on - mr r3,r2 ; Set return physical address +htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + or r3,r3,r25 ; Send the RC bits back + lwz r24,FM_ARG0+0x00(r1) ; Save a register + lwz r25,FM_ARG0+0x04(r1) ; Save a register + lwz r26,FM_ARG0+0x08(r1) ; Save a register + mtlr r0 ; Restore the return + lwz r27,FM_ARG0+0x0C(r1) ; Save a register + lwz r28,FM_ARG0+0x10(r1) ; Save a register + lwz r29,FM_ARG0+0x14(r1) ; Save a register + lwz r30,FM_ARG0+0x18(r1) ; Save a register + lwz r31,FM_ARG0+0x1C(r1) ; Save a register + lwz r1,0(r1) ; Pop the stack blr ; Leave... .align 5 -cbNo: lwz r11,bmnext(r11) ; Link next - b cbChk ; Check it out... +htrBadLock: li r3,mapRtBadLk ; Set lock time out error code + b htrReturn ; Leave.... +htrNotFound: + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list -/* - * hw_set_user_space(pmap) - * hw_set_user_space_dis(pmap) - * - * Indicate whether memory space needs to be switched. - * We really need to turn off interrupts here, because we need to be non-preemptable + li r3,mapRtNotFnd ; Set that we did not find the requested page + b htrReturn ; Leave.... + + + +; +; mapPhysFindLock - find physent list and lock it +; R31 points to mapping +; + .align 5 + +mapPhysFindLock: + lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table + lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part) + rlwinm r4,r4,2,0,29 ; Change index into byte offset + addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry + add r3,r3,r4 ; Point to table entry + lwz r5,mpPAddr(r31) ; Get physical page number + lwz r7,mrStart(r3) ; Get the start of range + lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank + sub r6,r5,r7 ; Get index to physent + rlwinm r6,r6,3,0,28 ; Get offset to physent + add r3,r3,r6 ; Point right to the physent + b mapPhysLock ; Join in the lock... + +; +; mapPhysLock - lock a physent list +; R3 contains list header +; + .align 5 + +mapPhysLockS: + li r2,lgKillResv ; Get a spot to kill reservation + stwcx. r2,0,r2 ; Kill it... + +mapPhysLockT: + lwz r2,ppLink(r3) ; Get physent chain header + rlwinm. r2,r2,0,0,0 ; Is lock clear? + bne-- mapPhysLockT ; Nope, still locked... + +mapPhysLock: + lwarx r2,0,r3 ; Get the lock + rlwinm. r0,r2,0,0,0 ; Is it locked? + oris r0,r2,0x8000 ; Set the lock bit + bne-- mapPhysLockS ; It is locked, spin on it... + stwcx. r0,0,r3 ; Try to stuff it back... + bne-- mapPhysLock ; Collision, try again... + isync ; Clear any speculations + blr ; Leave... + + +; +; mapPhysUnlock - unlock a physent list +; R3 contains list header +; + .align 5 + +mapPhysUnlock: + lwz r0,ppLink(r3) ; Get physent chain header + rlwinm r0,r0,0,1,31 ; Clear the lock bit + eieio ; Make sure unlock comes last + stw r0,ppLink(r3) ; Unlock the list + blr + +; +; mapPhysMerge - merge the RC bits into the master copy +; R3 points to the physent +; R4 contains the RC bits +; +; Note: we just return if RC is 0 +; + .align 5 + +mapPhysMerge: + rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits + la r5,ppLink+4(r3) ; Point to the RC field + beqlr-- ; Leave if RC is 0... + +mapPhysMergeT: + lwarx r6,0,r5 ; Get the RC part + or r6,r6,r4 ; Merge in the RC + stwcx. r6,0,r5 ; Try to stuff it back... + bne-- mapPhysMergeT ; Collision, try again... + blr ; Leave... + +; +; Sets the physent link pointer and preserves all flags +; The list is locked +; R3 points to physent +; R4 has link to set +; + + .align 5 + +mapPhyCSet32: + la r5,ppLink+4(r3) ; Point to the link word + +mapPhyCSetR: + lwarx r2,0,r5 ; Get the link and flags + rlwimi r4,r2,0,26,31 ; Insert the flags + stwcx. r4,0,r5 ; Stick them back + bne-- mapPhyCSetR ; Someone else did something, try again... + blr ; Return... + + .align 5 + +mapPhyCSet64: + li r0,0xFF ; Get mask to clean up mapping pointer + rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + +mapPhyCSet64x: + ldarx r2,0,r3 ; Get the link and flags + and r5,r2,r0 ; Isolate the flags + or r6,r4,r5 ; Add them to the link + stdcx. r6,0,r3 ; Stick them back + bne-- mapPhyCSet64x ; Someone else did something, try again... + blr ; Return... + +; +; mapBumpBusy - increment the busy count on a mapping +; R3 points to mapping +; + + .align 5 + +mapBumpBusy: + lwarx r4,0,r3 ; Get mpBusy + addis r4,r4,0x0100 ; Bump the busy count + stwcx. r4,0,r3 ; Save it back + bne-- mapBumpBusy ; This did not work, try again... + blr ; Leave... + +; +; mapDropBusy - increment the busy count on a mapping +; R3 points to mapping +; + + .globl EXT(mapping_drop_busy) + .align 5 + +LEXT(mapping_drop_busy) +mapDropBusy: + lwarx r4,0,r3 ; Get mpBusy + addis r4,r4,0xFF00 ; Drop the busy count + stwcx. r4,0,r3 ; Save it back + bne-- mapDropBusy ; This did not work, try again... + blr ; Leave... + +; +; mapDrainBusy - drain the busy count on a mapping +; R3 points to mapping +; Note: we already have a busy for ourselves. Only one +; busy per processor is allowed, so we just spin here +; waiting for the count to drop to 1. +; Also, the mapping can not be on any lists when we do this +; so all we are doing is waiting until it can be released. +; + + .align 5 + +mapDrainBusy: + lwz r4,mpFlags(r3) ; Get mpBusy + rlwinm r4,r4,8,24,31 ; Clean it up + cmplwi r4,1 ; Is is just our busy? + beqlr++ ; Yeah, it is clear... + b mapDrainBusy ; Try again... + + + +; +; handleDSeg - handle a data segment fault +; handleISeg - handle an instruction segment fault +; +; All that we do here is to map these to DSI or ISI and insure +; that the hash bit is not set. This forces the fault code +; to also handle the missing segment. +; +; At entry R2 contains per_proc, R13 contains savarea pointer, +; and R11 is the exception code. +; + + .align 5 + .globl EXT(handleDSeg) + +LEXT(handleDSeg) + + li r11,T_DATA_ACCESS ; Change fault to DSI + stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss + b EXT(handlePF) ; Join common... + + .align 5 + .globl EXT(handleISeg) + +LEXT(handleISeg) + + li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI + stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss + b EXT(handlePF) ; Join common... + + +/* + * handlePF - handle a page fault interruption + * + * At entry R2 contains per_proc, R13 contains savarea pointer, + * and R11 is the exception code. + * + * This first part does a quick check to see if we can handle the fault. + * We canot handle any kind of protection exceptions here, so we pass + * them up to the next level. + * + * NOTE: In order for a page-fault redrive to work, the translation miss + * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur + * before we come here. + */ + + .align 5 + .globl EXT(handlePF) + +LEXT(handlePF) + + mfsprg r12,2 ; Get feature flags + cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction + lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode + mtcrf 0x02,r12 ; move pf64Bit to cr6 + lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here + lwz r18,SAVflags(r13) ; Get the flags + + beq-- gotIfetch ; We have an IFETCH here... + + lwz r27,savedsisr(r13) ; Get the DSISR + lwz r29,savedar(r13) ; Get the first half of the DAR + lwz r30,savedar+4(r13) ; And second half + + b ckIfProt ; Go check if this is a protection fault... + +gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value + lwz r29,savesrr0(r13) ; Get the first half of the instruction address + lwz r30,savesrr0+4(r13) ; And second half + stw r27,savedsisr(r13) ; Save the "constructed" DSISR + +ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception? + li r20,64 ; Set a limit of 64 nests for sanity check + bne-- hpfExit ; Yes... (probably not though) + +; +; Note: if the RI is on, we are accessing user space from the kernel, therefore we +; should be loading the user pmap here. +; + + andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space? + lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel + mr r19,r2 ; Remember the per_proc + ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address) + mr r23,r30 ; Save the low part of faulting address + beq-- hpfInKern ; Skip if we are in the kernel + la r8,ppUserPmap(r19) ; Point to the current user pmap + +hpfInKern: mr r22,r29 ; Save the high part of faulting address + + bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit... + +; +; On 32-bit machines we emulate a segment exception by loading unused SRs with a +; predefined value that corresponds to no address space. When we see that value +; we turn off the PTE miss bit in the DSISR to drive the code later on that will +; cause the proper SR to be loaded. +; + + lwz r28,4(r8) ; Pick up the pmap + rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive? + mr r25,r28 ; Save the original pmap (in case we nest) + bne hpfNest ; Segs are not ours if so... + mfsrin r4,r30 ; Get the SR that was used for translation + cmplwi r4,invalSpace ; Is this a simulated segment fault? + bne++ hpfNest ; No... + + rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR + b hpfNest ; Join on up... + + .align 5 + + nop ; Push hpfNest to a 32-byte boundary + nop ; Push hpfNest to a 32-byte boundary + nop ; Push hpfNest to a 32-byte boundary + nop ; Push hpfNest to a 32-byte boundary + nop ; Push hpfNest to a 32-byte boundary + nop ; Push hpfNest to a 32-byte boundary + +hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit) + mr r25,r28 ; Save the original pmap (in case we nest) + +; +; This is where we loop descending nested pmaps +; + +hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock + addi r20,r20,-1 ; Count nest try + bl sxlkShared ; Go get a shared lock on the mapping lists + mr. r3,r3 ; Did we get the lock? + bne-- hpfBadLock ; Nope... + + mr r3,r28 ; Get the pmap pointer + mr r4,r22 ; Get top of faulting vaddr + mr r5,r23 ; Get bottom of faulting vaddr + bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags) + + rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one? + mr. r31,r3 ; Save the mapping if we found it + cmplwi cr1,r0,0 ; Check for removal + crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing + + bt-- cr0_eq,hpfNotFound ; Not found or removing... + + rlwinm. r0,r7,0,mpNestb,mpNestb ; Are we nested? + mr r26,r7 ; Get the flags for this mapping (passed back from search call) + + lhz r21,mpSpace(r31) ; Get the space + + beq++ hpfFoundIt ; No, we found our guy... + + +#if pmapTransSize != 12 +#error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize +#endif + rlwinm. r0,r26,0,mpSpecialb,mpSpecialb ; Special handling? + cmplwi cr1,r20,0 ; Too many nestings? + bne-- hpfSpclNest ; Do we need to do special handling? + +hpfCSrch: lhz r21,mpSpace(r31) ; Get the space + lwz r8,mpNestReloc(r31) ; Get the vaddr relocation + lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half + la r3,pmapSXlk(r28) ; Point to the old pmap search lock + lis r0,0x8000 ; Get 0xFFFFFFFF80000000 + lis r10,hi16(EXT(pmapTrans)) ; Get the translate table + add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit + blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop... + or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit + slwi r11,r21,3 ; Multiply space by 8 + ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part + addc r23,r23,r9 ; Relocate bottom half of vaddr + lwz r10,0(r10) ; Get the actual translation map + slwi r12,r21,2 ; Multiply space by 4 + add r10,r10,r11 ; Add in the higher part of the index + rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit) + adde r22,r22,r8 ; Relocate the top half of the vaddr + add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry + bl sxlkUnlock ; Unlock the search list + + lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap + bf-- pf64Bitb,hpfNest ; Done if 32-bit... + + ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap + b hpfNest ; Go try the new pmap... + +; +; Error condition. We only allow 64 nestings. This keeps us from having to +; check for recusive nests when we install them. +; + + .align 5 + +hpfNestTooMuch: + lwz r20,savedsisr(r13) ; Get the DSISR + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list (R3 good from above) + ori r20,r20,1 ; Indicate that there was a nesting problem + stw r20,savedsisr(r13) ; Stash it + lwz r11,saveexception(r13) ; Restore the exception code + b EXT(PFSExit) ; Yes... (probably not though) + +; +; Error condition - lock failed - this is fatal +; + + .align 5 + +hpfBadLock: + lis r0,hi16(Choke) ; System abend + ori r0,r0,lo16(Choke) ; System abend + li r3,failMapping ; Show mapping failure + sc +; +; Did not find any kind of mapping +; + + .align 5 + +hpfNotFound: + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock it + lwz r11,saveexception(r13) ; Restore the exception code + +hpfExit: ; We need this because we can not do a relative branch + b EXT(PFSExit) ; Yes... (probably not though) + + +; +; Here is where we handle special mappings. So far, the only use is to load a +; processor specific segment register for copy in/out handling. +; +; The only (so far implemented) special map is used for copyin/copyout. +; We keep a mapping of a "linkage" mapping in the per_proc. +; The linkage mapping is basically a nested pmap that is switched in +; as part of context switch. It relocates the appropriate user address +; space slice into the right place in the kernel. +; + + .align 5 + +hpfSpclNest: + la r31,ppCIOmp(r19) ; Just point to the mapping + oris r27,r27,hi16(dsiSpcNest) ; Show that we had a special nesting here + b hpfCSrch ; Go continue search... + + +; +; We have now found a mapping for the address we faulted on. +; + +; +; Here we go about calculating what the VSID should be. We concatanate +; the space ID (14 bits wide) 3 times. We then slide the vaddr over +; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID). +; Then we XOR and expanded space ID and the shifted vaddr. This gives us +; the VSID. +; +; This is used both for segment handling and PTE handling +; + + +#if maxAdrSpb != 14 +#error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!! +#endif + + .align 5 + +hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment + rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment? + rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID + rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order + rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over + rlwinm r0,r27,0,dsiSpcNestb,dsiSpcNestb ; Isolate special nest flag + rlwimi r21,r21,14,4,17 ; Make a second copy of space above first + cmplwi cr5,r0,0 ; Did we just do a special nesting? + rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35 + crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest + rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register + xor r14,r14,r20 ; Calculate the top half of VSID + xor r15,r15,r21 ; Calculate the bottom half of the VSID + rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing) + rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry + rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top + rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position + or r12,r12,r15 ; Add key into the bottom of VSID +; +; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12 + + bne++ hpfPteMiss ; Nope, normal PTE miss... + +; +; Here is the only place that we make an entry in the pmap segment cache. +; +; Note that we do not make an entry in the segment cache for special +; nested mappings. This makes the copy in/out segment get refreshed +; when switching threads. +; +; The first thing that we do is to look up the ESID we are going to load +; into a segment in the pmap cache. If it is already there, this is +; a segment that appeared since the last time we switched address spaces. +; If all is correct, then it was another processors that made the cache +; entry. If not, well, it is an error that we should die on, but I have +; not figured a good way to trap it yet. +; +; If we get a hit, we just bail, otherwise, lock the pmap cache, select +; an entry based on the generation number, update the cache entry, and +; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit +; entries that correspond to the last 4 bits (32:35 for 64-bit and +; 0:3 for 32-bit) of the ESID. +; +; Then we unlock and bail. +; +; First lock it. Then select a free slot or steal one based on the generation +; number. Then store it, update the allocation flags, and unlock. +; +; The cache entry contains an image of the ESID/VSID pair we would load for +; 64-bit architecture. For 32-bit, it is a simple transform to an SR image. +; +; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not +; the current one, which may have changed because we nested. +; +; Also remember that we do not store the valid bit in the ESID. If we +; od, this will break some other stuff. +; + + bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault.... + + mr r3,r25 ; Point to the pmap + mr r4,r22 ; ESID high half + mr r5,r23 ; ESID low half + bl pmapCacheLookup ; Go see if this is in the cache already + + mr. r3,r3 ; Did we find it? + mr r4,r11 ; Copy this to a different register + + bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry... + + lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table + lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table + + cntlzw r7,r4 ; Find a free slot + + subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one + rlwinm r30,r30,0,0,3 ; Clean up the ESID + srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not + addi r5,r4,1 ; Bump the generation number + and r7,r7,r6 ; Clear bit number if none empty + andc r8,r4,r6 ; Clear generation count if we found an empty + rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word + or r7,r7,r8 ; Select a slot number + li r8,0 ; Clear + andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using + oris r8,r8,0x8000 ; Get the high bit on + la r9,pmapSegCache(r25) ; Point to the segment cache + slwi r6,r7,4 ; Get index into the segment cache + slwi r2,r7,2 ; Get index into the segment cache sub-tag index + srw r8,r8,r7 ; Get the mask + cmplwi r2,32 ; See if we are in the first or second half of sub-tag + li r0,0 ; Clear + rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out + oris r0,r0,0xF000 ; Get the sub-tag mask + add r9,r9,r6 ; Point to the cache slot + srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half) + srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half) + + stw r29,sgcESID(r9) ; Save the top of the ESID + andc r10,r10,r0 ; Clear sub-tag slot in case we are in top + andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom + stw r30,sgcESID+4(r9) ; Save the bottom of the ESID + or r10,r10,r5 ; Stick in subtag in case top half + or r11,r11,r5 ; Stick in subtag in case bottom half + stw r14,sgcVSID(r9) ; Save the top of the VSID + andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated + stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key + bge hpfSCSTbottom ; Go save the bottom part of sub-tag + + stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag + b hpfNoCacheEnt ; Go finish up... + +hpfSCSTbottom: + stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag + + +hpfNoCacheEnt: + eieio ; Make sure cache is updated before lock + stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number + + +hpfNoCacheEnt2: + lwz r4,ppMapFlags(r19) ; Get the protection key modifier + bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment... + +; +; Make and enter 32-bit segment register +; + + lwz r16,validSegs(r19) ; Get the valid SR flags + xor r12,r12,r4 ; Alter the storage key before loading segment register + rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting + rlwinm r6,r12,19,1,3 ; Insert the keys and N bit + lis r0,0x8000 ; Set bit 0 + rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID + srw r0,r0,r2 ; Get bit corresponding to SR + rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents + or r16,r16,r0 ; Show that SR is valid + + mtsrin r6,r30 ; Set the actual SR + + stw r16,validSegs(r19) ; Set the valid SR flags + + b hpfPteMiss ; SR loaded, go do a PTE... + +; +; Make and enter 64-bit segment look-aside buffer entry. +; Note that the cache entry is the right format except for valid bit. +; We also need to convert from long long to 64-bit register values. +; + + + .align 5 + +hpfLoadSeg64: + ld r16,validSegs(r19) ; Get the valid SLB entry flags + sldi r8,r29,32 ; Move high order address over + sldi r10,r14,32 ; Move high part of VSID over + + not r3,r16 ; Make valids be 0s + li r0,1 ; Prepare to set bit 0 + + cntlzd r17,r3 ; Find a free SLB + xor r12,r12,r4 ; Alter the storage key before loading segment table entry + or r9,r8,r30 ; Form full 64-bit address + cmplwi r17,63 ; Did we find a free SLB entry? + sldi r0,r0,63 ; Get bit 0 set + or r10,r10,r12 ; Move in low part and keys + addi r17,r17,1 ; Skip SLB 0 always + blt++ hpfFreeSeg ; Yes, go load it... + +; +; No free SLB entries, select one that is in use and invalidate it +; + lwz r4,ppSegSteal(r19) ; Get the next slot to steal + addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only + addi r4,r4,1 ; Set next slot to steal + slbmfee r7,r17 ; Get the entry that is in the selected spot + subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap + rldicr r7,r7,0,35 ; Clear the valid bit and the rest + srawi r2,r2,31 ; Get -1 if steal index still in range + slbie r7 ; Invalidate the in-use SLB entry + and r4,r4,r2 ; Reset steal index when it should wrap + isync ; + + stw r4,ppSegSteal(r19) ; Set the next slot to steal +; +; We are now ready to stick the SLB entry in the SLB and mark it in use +; + +hpfFreeSeg: + subi r4,r17,1 ; Adjust shift to account for skipping slb 0 + mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear + srd r0,r0,r4 ; Set bit mask for allocation + oris r9,r9,0x0800 ; Turn on the valid bit + or r16,r16,r0 ; Turn on the allocation flag + rldimi r9,r17,0,58 ; Copy in the SLB entry selector + + beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest... + slbie r7 ; Blow away a potential duplicate + +hpfNoBlow: slbmte r10,r9 ; Make that SLB entry + + std r16,validSegs(r19) ; Mark as valid + b hpfPteMiss ; STE loaded, go do a PTE... + +; +; The segment has been set up and loaded if need be. Now we are ready to build the +; PTE and get it into the hash table. +; +; Note that there is actually a race here. If we start fault processing on +; a different pmap, i.e., we have descended into a nested pmap, it is possible +; that the nest could have been removed from the original pmap. We would +; succeed with this translation anyway. I do not think we need to worry +; about this (famous last words) because nobody should be unnesting anything +; if there are still people activily using them. It should be up to the +; higher level VM system to put the kibosh on this. +; +; There is also another race here: if we fault on the same mapping on more than +; one processor at the same time, we could end up with multiple PTEs for the same +; mapping. This is not a good thing.... We really only need one of the +; fault handlers to finish, so what we do is to set a "fault in progress" flag in +; the mapping. If we see that set, we just abandon the handler and hope that by +; the time we restore context and restart the interrupted code, the fault has +; been resolved by the other guy. If not, we will take another fault. +; + +; +; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not. +; It is required to stay there until after we call mapSelSlot!!!! +; + + .align 5 + +hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field + lwz r12,mpPte(r31) ; Get the quick pointer to PTE + li r3,mpHValid ; Get the PTE valid bit + andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side? + ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag + crnot cr1_eq,cr0_eq ; Remember if FIP was on + and. r12,r12,r3 ; Isolate the valid bit + crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail... + beq-- hpfAbandon ; Yes, other processor is or already has handled this... + andi. r0,r2,mpBlock ; Is this a block mapping? + crmove cr7_eq,cr0_eq ; Remember if we have a block mapping + stwcx. r2,0,r31 ; Store the flags + bne-- hpfPteMiss ; Collision, try again... + + bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff... + +; +; At this point we are about to do the 32-bit PTE generation. +; +; The following is the R14:R15 pair that contains the "shifted" VSID: +; +; 1 2 3 4 4 5 6 +; 0 8 6 4 2 0 8 6 3 +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; +; The 24 bits of the 32-bit architecture VSID is in the following: +; +; 1 2 3 4 4 5 6 +; 0 8 6 4 2 0 8 6 3 +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; + + +hpfBldPTE32: + lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion) + lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping + + mfsdr1 r27 ; Get the hash table base address + + rlwinm r0,r23,0,4,19 ; Isolate just the page index + rlwinm r18,r23,10,26,31 ; Extract the API + xor r19,r15,r0 ; Calculate hash << 12 + mr r2,r25 ; Save the flag part of the mapping + rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image + rlwinm r16,r27,16,7,15 ; Extract the hash table size + rlwinm r25,r25,0,0,19 ; Clear out the flags + slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported) + sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map) + ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask + rlwinm r27,r27,0,0,15 ; Extract the hash table base + rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table + add r24,r24,r25 ; Adjust to true physical address + rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image + rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot + and r19,r19,r16 ; Wrap hash table offset into the hash table + ori r24,r24,lo16(mpR) ; Turn on the reference bit right now + rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA + add r19,r19,r27 ; Point to the PTEG + subfic r20,r20,-4 ; Get negative offset to PCA + oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on + add r20,r20,r27 ; Point to the PCA slot + +; +; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower. +; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA. +; +; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible +; that some other processor beat us and stuck in a PTE or that +; all we had was a simple segment exception and the PTE was there the whole time. +; If we find one a pointer, we are done. +; + + mr r7,r20 ; Copy the PCA pointer + bl mapLockPteg ; Lock the PTEG + + lwz r12,mpPte(r31) ; Get the offset to the PTE + mr r17,r6 ; Remember the PCA image + mr r16,r6 ; Prime the post-select PCA image + andi. r0,r12,mpHValid ; Is there a PTE here already? + li r21,8 ; Get the number of slots + + bne- cr7,hpfNoPte32 ; Skip this for a block mapping... + + bne- hpfBailOut ; Someone already did this for us... + +; +; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a +; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was +; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE. +; R4 returns the slot index. +; +; REMEMBER: CR7 indicates that we are building a block mapping. +; + +hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots + mr r6,r17 ; Get back the original PCA + rlwimi r6,r16,0,8,15 ; Insert the updated steal slot + blt- hpfBailOut ; Holy Cow, all slots are locked... + + bl mapSelSlot ; Go select a slot (note that the PCA image is already set up) + + cmplwi cr5,r3,1 ; Did we steal a slot? + rlwinm r5,r4,3,26,28 ; Convert index to slot offset + add r19,r19,r5 ; Point directly to the PTE + mr r16,r6 ; Remember the PCA image after selection + blt+ cr5,hpfInser32 ; Nope, no steal... + + lwz r6,0(r19) ; Get the old PTE + lwz r7,4(r19) ; Get the real part of the stealee + rlwinm r6,r6,0,1,31 ; Clear the valid bit + bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping... + srwi r3,r7,12 ; Change phys address to a ppnum + bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page) + cmplwi cr1,r3,0 ; Check if this is in RAM + bne- hpfNoPte32 ; Could not get it, try for another... + + crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map + +hpfNipBM: stw r6,0(r19) ; Set the invalid PTE + + sync ; Make sure the invalid is stored + li r9,tlbieLock ; Get the TLBIE lock + rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part + +hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock + mfsprg r4,0 ; Get the per_proc + rlwinm r8,r6,25,18,31 ; Extract the space ID + rlwinm r11,r6,25,18,31 ; Extract the space ID + lwz r7,hwSteals(r4) ; Get the steal count + srwi r2,r6,7 ; Align segment number with hash + rlwimi r11,r11,14,4,17 ; Get copy above ourselves + mr. r0,r0 ; Is it locked? + srwi r0,r19,6 ; Align PTEG offset for back hash + xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits) + xor r11,r11,r0 ; Hash backwards to partial vaddr + rlwinm r12,r2,14,0,3 ; Shift segment up + mfsprg r2,2 ; Get feature flags + li r0,1 ; Get our lock word + rlwimi r12,r6,22,4,9 ; Move up the API + bne- hpfTLBIE32 ; It is locked, go wait... + rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr + + stwcx. r0,0,r9 ; Try to get it + bne- hpfTLBIE32 ; We was beat... + addi r7,r7,1 ; Bump the steal count + + rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box? + li r0,0 ; Lock clear value + + tlbie r12 ; Invalidate it everywhere + + stw r0,tlbieLock(0) ; Clear the tlbie lock + + beq- hpfNoTS32 ; Can not have MP on this machine... + + eieio ; Make sure that the tlbie happens first + tlbsync ; Wait for everyone to catch up + sync ; Make sure of it all + +hpfNoTS32: stw r7,hwSteals(r4) ; Save the steal count + bgt cr5,hpfInser32 ; We just stole a block mapping... + + lwz r4,4(r19) ; Get the RC of the just invalidated PTE + + la r11,ppLink+4(r3) ; Point to the master RC copy + lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping + rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC + +hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC + or r0,r0,r2 ; Merge in the new RC + stwcx. r0,0,r11 ; Try to stick it back + bne- hpfMrgRC32 ; Try again if we collided... + + +hpfFPnch: rlwinm. r7,r7,0,0,25 ; Clean and test mapping address + beq- hpfLostPhys ; We could not find our mapping. Kick the bucket... + + lhz r10,mpSpace(r7) ; Get the space + lwz r9,mpVAddr+4(r7) ; And the vaddr + cmplw cr1,r10,r8 ; Is this one of ours? + xor r9,r12,r9 ; Compare virtual address + cmplwi r9,0x1000 ; See if we really match + crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match + beq+ hpfFPnch2 ; Yes, found ours... + + lwz r7,mpAlias+4(r7) ; Chain on to the next + b hpfFPnch ; Check it out... + +hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG + stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG) + bl mapPhysUnlock ; Unlock the physent now + +hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on + + stw r24,4(r19) ; Stuff in the real part of the PTE + eieio ; Make sure this gets there first + + stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid + mr r17,r16 ; Get the PCA image to save + b hpfFinish ; Go join the common exit code... + + +; +; At this point we are about to do the 64-bit PTE generation. +; +; The following is the R14:R15 pair that contains the "shifted" VSID: +; +; 1 2 3 4 4 5 6 +; 0 8 6 4 2 0 8 6 3 +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; +; + + .align 5 + +hpfBldPTE64: + ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping + lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping + + mfsdr1 r27 ; Get the hash table base address + + sldi r11,r22,32 ; Slide top of adjusted EA over + sldi r14,r14,32 ; Slide top of VSID over + rlwinm r5,r27,0,27,31 ; Isolate the size + eqv r16,r16,r16 ; Get all foxes here + rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN + mr r2,r10 ; Save the flag part of the mapping + or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value + rldicr r27,r27,0,45 ; Clean up the hash table base + or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value + rlwinm r0,r11,0,4,19 ; Clear out everything but the page + subfic r5,r5,46 ; Get number of leading zeros + xor r19,r0,r15 ; Calculate hash + ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE + srd r16,r16,r5 ; Shift over to get length of table + srdi r19,r19,5 ; Convert page offset to hash table offset + rldicr r16,r16,0,56 ; Clean up lower bits in hash table size + rldicr r10,r10,0,51 ; Clear out flags + sldi r24,r24,12 ; Change ppnum to physical address + sub r11,r11,r10 ; Get the offset from the base mapping + and r19,r19,r16 ; Wrap into hash table + add r24,r24,r11 ; Get actual physical address of this page + srdi r20,r19,5 ; Convert PTEG offset to PCA offset + rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc. + subfic r20,r20,-4 ; Get negative offset to PCA + ori r24,r24,lo16(mpR) ; Force on the reference bit + add r20,r20,r27 ; Point to the PCA slot + add r19,r19,r27 ; Point to the PTEG + +; +; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower. +; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA. +; +; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible +; that some other processor beat us and stuck in a PTE or that +; all we had was a simple segment exception and the PTE was there the whole time. +; If we find one a pointer, we are done. +; + + mr r7,r20 ; Copy the PCA pointer + bl mapLockPteg ; Lock the PTEG + + lwz r12,mpPte(r31) ; Get the offset to the PTE + mr r17,r6 ; Remember the PCA image + mr r18,r6 ; Prime post-selection PCA image + andi. r0,r12,mpHValid ; See if we have a PTE now + li r21,8 ; Get the number of slots + + bne-- cr7,hpfNoPte64 ; Skip this for a block mapping... + + bne-- hpfBailOut ; Someone already did this for us... + +; +; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a +; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was +; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE. +; R4 returns the slot index. +; +; REMEMBER: CR7 indicates that we are building a block mapping. +; + +hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots + mr r6,r17 ; Restore original state of PCA + rlwimi r6,r18,0,8,15 ; Insert the updated steal slot + blt- hpfBailOut ; Holy Cow, all slots are locked... + + bl mapSelSlot ; Go select a slot + + cmplwi cr5,r3,1 ; Did we steal a slot? + rlwinm r5,r4,4,25,27 ; Convert index to slot offset + mr r18,r6 ; Remember the PCA image after selection + add r19,r19,r5 ; Point directly to the PTE + lwz r10,hwSteals(r2) ; Get the steal count + blt++ cr5,hpfInser64 ; Nope, no steal... + + ld r6,0(r19) ; Get the old PTE + ld r7,8(r19) ; Get the real part of the stealee + rldicr r6,r6,0,62 ; Clear the valid bit + bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping... + srdi r3,r7,12 ; Change page address to a page address + bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page) + cmplwi cr1,r3,0 ; Check if this is in RAM + bne-- hpfNoPte64 ; Could not get it, try for another... + + crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map + +hpfNipBMx: std r6,0(r19) ; Set the invalid PTE + li r9,tlbieLock ; Get the TLBIE lock + + srdi r11,r6,5 ; Shift VSID over for back hash + mfsprg r4,0 ; Get the per_proc + xor r11,r11,r19 ; Hash backwards to get low bits of VPN + sync ; Make sure the invalid is stored + + sldi r12,r6,16 ; Move AVPN to EA position + sldi r11,r11,5 ; Move this to the page position + +hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock + mr. r0,r0 ; Is it locked? + li r0,1 ; Get our lock word + bne-- hpfTLBIE65 ; It is locked, go wait... + + stwcx. r0,0,r9 ; Try to get it + rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN + rldicl r8,r6,52,50 ; Isolate the address space ID + bne-- hpfTLBIE64 ; We was beat... + addi r10,r10,1 ; Bump the steal count + + rldicl r11,r12,0,16 ; Clear cause the book says so + li r0,0 ; Lock clear value + + tlbie r11 ; Invalidate it everywhere + + stw r0,tlbieLock(0) ; Clear the tlbie lock + + mr r7,r8 ; Get a copy of the space ID + eieio ; Make sure that the tlbie happens first + rldimi r7,r7,14,36 ; Copy address space to make hash value + tlbsync ; Wait for everyone to catch up + rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top + isync + srdi r2,r6,26 ; Shift original segment down to bottom + + ptesync ; Make sure of it all + xor r7,r7,r2 ; Compute original segment + + stw r10,hwSteals(r4) ; Save the steal count + bgt cr5,hpfInser64 ; We just stole a block mapping... + + rldimi r12,r7,28,0 ; Insert decoded segment + rldicl r4,r12,0,13 ; Trim to max supported address + + ld r12,8(r19) ; Get the RC of the just invalidated PTE + + la r11,ppLink+4(r3) ; Point to the master RC copy + ld r7,ppLink(r3) ; Grab the pointer to the first mapping + rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC + +hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC + li r12,0xFF ; Get mask to clean up alias pointer + or r0,r0,r2 ; Merge in the new RC + rldicl r12,r12,62,0 ; Rotate clean up mask to get 0xC0000000000000003F + stwcx. r0,0,r11 ; Try to stick it back + bne-- hpfMrgRC64 ; Try again if we collided... + +hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address + beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket... + + lhz r10,mpSpace(r7) ; Get the space + ld r9,mpVAddr(r7) ; And the vaddr + cmplw cr1,r10,r8 ; Is this one of ours? + xor r9,r4,r9 ; Compare virtual address + cmpldi r9,0x1000 ; See if we really match + crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match + beq++ hpfFPnch2x ; Yes, found ours... + + ld r7,mpAlias(r7) ; Chain on to the next + b hpfFPnchx ; Check it out... + + .align 5 + +hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area + stwcx. r7,0,r7 ; Kill reservation + +hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock + mr. r0,r0 ; Is it locked? + beq++ hpfTLBIE64 ; Yup, wait for it... + b hpfTLBIE63 ; Nope, try again.. + + + +hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG + stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though) + bl mapPhysUnlock ; Unlock the physent now + + +hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE + eieio ; Make sure this gets there first + std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid + mr r17,r18 ; Get the PCA image to set + b hpfFinish ; Go join the common exit code... + +hpfLostPhys: + lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead + ori r0,r0,lo16(Choke) ; System abend + sc + +; +; This is the common code we execute when we are finished setting up the PTE. +; + + .align 5 + +hpfFinish: sub r4,r19,r27 ; Get offset of PTE + ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset + bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map + stw r4,mpPte(r31) ; Remember our PTE + +hpfBailOut: eieio ; Make sure all updates come first + stw r17,0(r20) ; Unlock and set the final PCA + +; +; This is where we go if we have started processing the fault, but find that someone +; else has taken care of it. +; + +hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags + rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag + sth r2,mpFlags+2(r31) ; Set it + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list + + li r11,T_IN_VAIN ; Say that it was handled + b EXT(PFSExit) ; Leave... + +; +; This is where we go when we find that someone else +; is in the process of handling the fault. +; + +hpfAbandon: li r3,lgKillResv ; Kill off any reservation + stwcx. r3,0,r3 ; Do it + + la r3,pmapSXlk(r28) ; Point to the pmap search lock + bl sxlkUnlock ; Unlock the search list + + li r11,T_IN_VAIN ; Say that it was handled + b EXT(PFSExit) ; Leave... + + + +/* + * hw_set_user_space(pmap) + * hw_set_user_space_dis(pmap) + * + * Indicate whether memory space needs to be switched. + * We really need to turn off interrupts here, because we need to be non-preemptable * * hw_set_user_space_dis is used when interruptions are already disabled. Mind the * register usage here. The VMM switch code in vmachmon.s that calls this @@ -3100,255 +3971,1582 @@ cbNo: lwz r11,bmnext(r11) ; Link next */ - + + .align 5 + .globl EXT(hw_set_user_space) + +LEXT(hw_set_user_space) + + lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable + mfmsr r10 ; Get the current MSR + ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP + ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE + andc r10,r10,r8 ; Turn off VEC, FP for good + andc r9,r10,r9 ; Turn off EE also + mtmsr r9 ; Disable them + isync ; Make sure FP and vec are off + mfsprg r6,0 ; Get the per_proc_info address + lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address + mfsprg r4,2 ; The the feature flags + lwz r7,pmapvr(r3) ; Get the v to r translation + lwz r8,pmapvr+4(r3) ; Get the v to r translation + mtcrf 0x80,r4 ; Get the Altivec flag + xor r4,r3,r8 ; Get bottom of the real address of bmap anchor + cmplw cr1,r3,r2 ; Same address space as before? + stw r7,ppUserPmap(r6) ; Show our real pmap address + crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine + stw r4,ppUserPmap+4(r6) ; Show our real pmap address + stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address + mtmsr r10 ; Restore interruptions + beqlr-- cr1 ; Leave if the same address space or not Altivec + + dssall ; Need to kill all data streams if adrsp changed + sync + blr ; Return... + + .align 5 + .globl EXT(hw_set_user_space_dis) + +LEXT(hw_set_user_space_dis) + + lwz r7,pmapvr(r3) ; Get the v to r translation + mfsprg r4,2 ; The the feature flags + lwz r8,pmapvr+4(r3) ; Get the v to r translation + mfsprg r6,0 ; Get the per_proc_info address + lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address + mtcrf 0x80,r4 ; Get the Altivec flag + xor r4,r3,r8 ; Get bottom of the real address of bmap anchor + cmplw cr1,r3,r2 ; Same address space as before? + stw r7,ppUserPmap(r6) ; Show our real pmap address + crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine + stw r4,ppUserPmap+4(r6) ; Show our real pmap address + stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address + beqlr-- cr1 ; Leave if the same + + dssall ; Need to kill all data streams if adrsp changed + sync + blr ; Return... + +/* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry + * + * Lock must already be held on mapping block list + * returns 0 if all slots filled. + * returns n if a slot is found and it is not the last + * returns -n if a slot is found and it is the last + * when n and -n are returned, the corresponding bit is cleared + * the mapping is zeroed out before return + * + */ + + .align 5 + .globl EXT(mapalc1) + +LEXT(mapalc1) + lwz r4,mbfree(r3) ; Get the 1st mask + lis r0,0x8000 ; Get the mask to clear the first free bit + lwz r5,mbfree+4(r3) ; Get the 2nd mask + mr r12,r3 ; Save the block ptr + cntlzw r3,r4 ; Get first 1-bit in 1st word + srw. r9,r0,r3 ; Get bit corresponding to first free one + cntlzw r10,r5 ; Get first free field in second word + andc r4,r4,r9 ; Turn 1-bit off in 1st word + bne mapalc1f ; Found one in 1st word + + srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word + li r3,0 ; assume failure return + andc r5,r5,r9 ; Turn it off + beqlr-- ; There are no 1 bits left... + addi r3,r10,32 ; set the correct number + +mapalc1f: + or. r0,r4,r5 ; any more bits set? + stw r4,mbfree(r12) ; update bitmasks + stw r5,mbfree+4(r12) + + slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block + addi r7,r6,32 + dcbz r6,r12 ; clear the 64-byte mapping + dcbz r7,r12 + + bnelr++ ; return if another bit remains set + + neg r3,r3 ; indicate we just returned the last bit + blr + + +/* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry + * + * Lock must already be held on mapping block list + * returns 0 if all slots filled. + * returns n if a slot is found and it is not the last + * returns -n if a slot is found and it is the last + * when n and -n are returned, the corresponding bits are cleared + * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)). + * the mapping is zero'd out before return + */ + + .align 5 + .globl EXT(mapalc2) +LEXT(mapalc2) + lwz r4,mbfree(r3) ; Get the first mask + lis r0,0x8000 ; Get the mask to clear the first free bit + lwz r5,mbfree+4(r3) ; Get the second mask + mr r12,r3 ; Save the block ptr + slwi r6,r4,1 ; shift first word over + and r6,r4,r6 ; lite start of double bit runs in 1st word + slwi r7,r5,1 ; shift 2nd word over + cntlzw r3,r6 ; Get first free 2-bit run in 1st word + and r7,r5,r7 ; lite start of double bit runs in 2nd word + srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word + cntlzw r10,r7 ; Get first free field in second word + srwi r11,r9,1 ; shift over for 2nd bit in 1st word + andc r4,r4,r9 ; Turn off 1st bit in 1st word + andc r4,r4,r11 ; turn off 2nd bit in 1st word + bne mapalc2a ; Found two consecutive free bits in 1st word + + srw. r9,r0,r10 ; Get bit corresponding to first free one in second word + li r3,0 ; assume failure + srwi r11,r9,1 ; get mask for 2nd bit + andc r5,r5,r9 ; Turn off 1st bit in 2nd word + andc r5,r5,r11 ; turn off 2nd bit in 2nd word + beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either + addi r3,r10,32 ; set the correct number + +mapalc2a: + or. r0,r4,r5 ; any more bits set? + stw r4,mbfree(r12) ; update bitmasks + stw r5,mbfree+4(r12) + slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block + addi r7,r6,32 + addi r8,r6,64 + addi r9,r6,96 + dcbz r6,r12 ; zero out the 128-byte mapping + dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines + dcbz r8,r12 ; because the mapping may not be 128-byte aligned + dcbz r9,r12 + + bnelr++ ; return if another bit remains set + + neg r3,r3 ; indicate we just returned the last bit + blr + +mapalc2c: + rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31 + and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free? + beqlr ; no, we failed + rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word + rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word + li r3,31 ; get index of this field + b mapalc2a + + +; +; This routine initialzes the hash table and PCA. +; It is done here because we may need to be 64-bit to do it. +; + + .align 5 + .globl EXT(hw_hash_init) + +LEXT(hw_hash_init) + + mfsprg r10,2 ; Get feature flags + lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address + mtcrf 0x02,r10 ; move pf64Bit to cr6 + lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address + lis r4,0xFF01 ; Set all slots free and start steal at end + ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address + ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address + + lwz r12,0(r12) ; Get hash table size + li r3,0 ; Get start + bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint) + + lwz r11,4(r11) ; Get hash table base + +hhiNext32: cmplw r3,r12 ; Have we reached the end? + bge- hhiCPCA32 ; Yes... + dcbz r3,r11 ; Clear the line + addi r3,r3,32 ; Next one... + b hhiNext32 ; Go on... + +hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4 + li r3,-4 ; Displacement to first PCA entry + neg r12,r12 ; Get negative end of PCA + +hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry + subi r3,r3,4 ; Next slot + cmpw r3,r12 ; Have we finished? + bge+ hhiNPCA32 ; Not yet... + blr ; Leave... + +hhiSF: mfmsr r9 ; Save the MSR + li r8,1 ; Get a 1 + mr r0,r9 ; Get a copy of the MSR + ld r11,0(r11) ; Get hash table base + rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0) + mtmsrd r0 ; Turn on SF + isync + + +hhiNext64: cmpld r3,r12 ; Have we reached the end? + bge-- hhiCPCA64 ; Yes... + dcbz128 r3,r11 ; Clear the line + addi r3,r3,128 ; Next one... + b hhiNext64 ; Go on... + +hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4 + li r3,-4 ; Displacement to first PCA entry + neg r12,r12 ; Get negative end of PCA + +hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry + subi r3,r3,4 ; Next slot + cmpd r3,r12 ; Have we finished? + bge++ hhiNPCA64 ; Not yet... + + mtmsrd r9 ; Turn off SF if it was off + isync + blr ; Leave... + + +; +; This routine sets up the hardware to start translation. +; Note that we do NOT start translation. +; + + .align 5 + .globl EXT(hw_setup_trans) + +LEXT(hw_setup_trans) + + mfsprg r11,0 ; Get the per_proc block + mfsprg r12,2 ; Get feature flags + li r0,0 ; Get a 0 + li r2,1 ; And a 1 + mtcrf 0x02,r12 ; Move pf64Bit to cr6 + stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid + stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux + sth r2,ppInvSeg(r11) ; Force a reload of the SRs + sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel + + bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint) + + li r9,0 ; Clear out a register + sync + isync + mtdbatu 0,r9 ; Invalidate maps + mtdbatl 0,r9 ; Invalidate maps + mtdbatu 1,r9 ; Invalidate maps + mtdbatl 1,r9 ; Invalidate maps + mtdbatu 2,r9 ; Invalidate maps + mtdbatl 2,r9 ; Invalidate maps + mtdbatu 3,r9 ; Invalidate maps + mtdbatl 3,r9 ; Invalidate maps + + mtibatu 0,r9 ; Invalidate maps + mtibatl 0,r9 ; Invalidate maps + mtibatu 1,r9 ; Invalidate maps + mtibatl 1,r9 ; Invalidate maps + mtibatu 2,r9 ; Invalidate maps + mtibatl 2,r9 ; Invalidate maps + mtibatu 3,r9 ; Invalidate maps + mtibatl 3,r9 ; Invalidate maps + + lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address + lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address + ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address + ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address + lwz r11,4(r11) ; Get hash table base + lwz r12,0(r12) ; Get hash table size + subi r12,r12,1 ; Back off by 1 + rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image + + mtsdr1 r11 ; Ok, we now have the hash table set up + sync + + li r12,invalSpace ; Get the invalid segment value + li r10,0 ; Start low + +hstsetsr: mtsrin r12,r10 ; Set the SR + addis r10,r10,0x1000 ; Bump the segment + mr. r10,r10 ; Are we finished? + bne+ hstsetsr ; Nope... + sync + blr ; Return... + +; +; 64-bit version +; + +hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address + lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address + ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address + ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address + ld r11,0(r11) ; Get hash table base + lwz r12,0(r12) ; Get hash table size + cntlzw r10,r12 ; Get the number of bits + subfic r10,r10,13 ; Get the extra bits we need + or r11,r11,r10 ; Add the size field to SDR1 + + mtsdr1 r11 ; Ok, we now have the hash table set up + sync + + li r0,0 ; Set an SLB slot index of 0 + slbia ; Trash all SLB entries (except for entry 0 that is) + slbmfee r7,r0 ; Get the entry that is in SLB index 0 + rldicr r7,r7,0,35 ; Clear the valid bit and the rest + slbie r7 ; Invalidate it + + blr ; Return... + + +; +; This routine turns on translation for the first time on a processor +; + + .align 5 + .globl EXT(hw_start_trans) + +LEXT(hw_start_trans) + + + mfmsr r10 ; Get the msr + ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation + + mtmsr r10 ; Everything falls apart here + isync + + blr ; Back to it. + + + +; +; This routine validates a segment register. +; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va) +; +; r3 = virtual pmap +; r4 = segment[0:31] +; r5 = segment[32:63] +; r6 = va[0:31] +; r7 = va[32:63] +; +; Note that we transform the addr64_t (long long) parameters into single 64-bit values. +; Note that there is no reason to apply the key modifier here because this is only +; used for kernel accesses. +; + + .align 5 + .globl EXT(hw_map_seg) + +LEXT(hw_map_seg) + + lwz r0,pmapSpace(r3) ; Get the space, we will need it soon + lwz r9,pmapFlags(r3) ; Get the flags for the keys now + mfsprg r10,2 ; Get feature flags + mfsprg r12,0 ; Get the per_proc + +; +; Note: the following code would problably be easier to follow if I split it, +; but I just wanted to see if I could write this to work on both 32- and 64-bit +; machines combined. +; + +; +; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines) +; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines) + + rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit + rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID + mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6 + srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest + rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25] + rlwimi r0,r0,14,4,17 ; Dup address space ID above itself + rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines) + rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half + rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32 + rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines) + + rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space + ; concatenated together. There is garbage + ; at the top for 64-bit but we will clean + ; that out later. + rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit + + +; +; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or +; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines +; + +; +; What we have now is: +; +; 0 0 1 2 3 4 4 5 6 +; 0 8 6 4 2 0 8 6 3 - for 64-bit machines +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; 0 0 1 2 3 - for 32-bit machines +; 0 8 6 4 1 +; +; 0 0 1 2 3 4 4 5 6 +; 0 8 6 4 2 0 8 6 3 - for 64-bit machines +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; 0 0 1 2 3 - for 32-bit machines +; 0 8 6 4 1 +; +; 0 0 1 2 3 4 4 5 6 +; 0 8 6 4 2 0 8 6 3 - for 64-bit machines +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment +; +--------+--------+--------+--------+--------+--------+--------+--------+ +; 0 0 1 2 3 - for 32-bit machines +; 0 8 6 4 1 + + + xor r8,r8,r2 ; Calculate VSID + + bf-- pf64Bitb,hms32bit ; Skip out if 32-bit... + + li r0,1 ; Prepare to set bit 0 (also to clear EE) + mfmsr r6 ; Get current MSR + li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits + mtmsrd r0,1 ; Set only the EE bit to 0 + rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on + mfmsr r11 ; Get the MSR right now, after disabling EE + andc r2,r11,r2 ; Turn off translation now + rldimi r2,r0,63,0 ; Get bit 64-bit turned on + or r11,r11,r6 ; Turn on the EE bit if it was on + mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on + isync ; Hang out a bit + + ld r6,validSegs(r12) ; Get the valid SLB entry flags + sldi r9,r9,9 ; Position the key and noex bit + + rldimi r5,r8,12,0 ; Form the VSID/key + + not r3,r6 ; Make valids be 0s + + cntlzd r7,r3 ; Find a free SLB + cmplwi r7,63 ; Did we find a free SLB entry? + + slbie r4 ; Since this ESID may still be in an SLBE, kill it + + oris r4,r4,0x0800 ; Turn on the valid bit in ESID + addi r7,r7,1 ; Make sure we skip slb 0 + blt++ hmsFreeSeg ; Yes, go load it... + +; +; No free SLB entries, select one that is in use and invalidate it +; + lwz r2,ppSegSteal(r12) ; Get the next slot to steal + addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only + addi r2,r2,1 ; Set next slot to steal + slbmfee r3,r7 ; Get the entry that is in the selected spot + subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap + rldicr r3,r3,0,35 ; Clear the valid bit and the rest + srawi r8,r8,31 ; Get -1 if steal index still in range + slbie r3 ; Invalidate the in-use SLB entry + and r2,r2,r8 ; Reset steal index when it should wrap + isync ; + + stw r2,ppSegSteal(r12) ; Set the next slot to steal +; +; We are now ready to stick the SLB entry in the SLB and mark it in use +; + +hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0 + rldimi r4,r7,0,58 ; Copy in the SLB entry selector + srd r0,r0,r2 ; Set bit mask for allocation + rldicl r5,r5,0,15 ; Clean out the unsupported bits + or r6,r6,r0 ; Turn on the allocation flag + + slbmte r5,r4 ; Make that SLB entry + + std r6,validSegs(r12) ; Mark as valid + mtmsrd r11 ; Restore the MSR + isync + blr ; Back to it... + + .align 5 + +hms32bit: rlwinm r8,r8,0,8,31 ; Clean up the VSID + rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting + lis r0,0x8000 ; Set bit 0 + rlwimi r8,r9,28,1,3 ; Insert the keys and N bit + srw r0,r0,r2 ; Get bit corresponding to SR + addi r7,r12,validSegs ; Point to the valid segment flags directly + + mtsrin r8,r4 ; Set the actual SR + isync ; Need to make sure this is done + +hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags + or r6,r6,r0 ; Show that SR is valid + stwcx. r6,0,r7 ; Set the valid SR flags + bne-- hmsrupt ; Had an interrupt, need to get flags again... + + blr ; Back to it... + + +; +; This routine invalidates a segment register. +; + + .align 5 + .globl EXT(hw_blow_seg) + +LEXT(hw_blow_seg) + + mfsprg r10,2 ; Get feature flags + mfsprg r12,0 ; Get the per_proc + mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6 + + addi r7,r12,validSegs ; Point to the valid segment flags directly + rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean + + bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit... + + li r0,1 ; Prepare to set bit 0 (also to clear EE) + mfmsr r6 ; Get current MSR + li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits + mtmsrd r0,1 ; Set only the EE bit to 0 + rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on + mfmsr r11 ; Get the MSR right now, after disabling EE + andc r2,r11,r2 ; Turn off translation now + rldimi r2,r0,63,0 ; Get bit 64-bit turned on + or r11,r11,r6 ; Turn on the EE bit if it was on + mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on + isync ; Hang out a bit + + rldimi r9,r3,32,0 ; Insert the top part of the ESID + + slbie r9 ; Invalidate the associated SLB entry + + mtmsrd r11 ; Restore the MSR + isync + blr ; Back to it. + + .align 5 + +hbs32bit: lwarx r4,0,r7 ; Get and reserve the valid segment flags + rlwinm r6,r9,4,28,31 ; Convert segment to number + lis r2,0x8000 ; Set up a mask + srw r2,r2,r6 ; Make a mask + and. r0,r4,r2 ; See if this is even valid + li r5,invalSpace ; Set the invalid address space VSID + beqlr ; Leave if already invalid... + + mtsrin r5,r9 ; Slam the segment register + isync ; Need to make sure this is done + +hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment + stwcx. r4,0,r7 ; Set the valid SR flags + beqlr++ ; Stored ok, no interrupt, time to leave... + + lwarx r4,0,r7 ; Get and reserve the valid segment flags again + b hbsrupt ; Try again... + +; +; This routine invadates the entire pmap segment cache +; +; Translation is on, interrupts may or may not be enabled. +; + + .align 5 + .globl EXT(invalidateSegs) + +LEXT(invalidateSegs) + + la r10,pmapCCtl(r3) ; Point to the segment cache control + eqv r2,r2,r2 ; Get all foxes + +isInv: lwarx r4,0,r10 ; Get the segment cache control value + rlwimi r4,r2,0,0,15 ; Slam in all invalid bits + rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked? + bne-- isInv0 ; Yes, try again... + + stwcx. r4,0,r10 ; Try to invalidate it + bne-- isInv ; Someone else just stuffed it... + blr ; Leave... + + +isInv0: li r4,lgKillResv ; Get reservation kill zone + stwcx. r4,0,r4 ; Kill reservation + +isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control + rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked? + bne-- isInv ; Nope... + b isInv1 ; Still locked do it again... + +; +; This routine switches segment registers between kernel and user. +; We have some assumptions and rules: +; We are in the exception vectors +; pf64Bitb is set up +; R3 contains the MSR we going to +; We can not use R4, R13, R20, R21, R29 +; R13 is the savearea +; R29 has the per_proc +; +; We return R3 as 0 if we did not switch between kernel and user +; We also maintain and apply the user state key modifier used by VMM support; +; If we go to the kernel it is set to 0, otherwise it follows the bit +; in spcFlags. +; + .align 5 - .globl EXT(hw_set_user_space) + .globl EXT(switchSegs) -LEXT(hw_set_user_space) +LEXT(switchSegs) + + lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator) + lwz r9,spcFlags(r29) ; Pick up the special user state flags + rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit + rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit + lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel + or r2,r2,r3 ; This will 1 if we will be using user segments + li r3,0 ; Get a selection mask + cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg + ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address) + sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user + la r19,ppUserPmap(r29) ; Point to the current user pmap + +; The following line is an exercise of a generally unreadable but recompile-friendly programing practice + rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key + + andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise + and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise + and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise + or r8,r8,r19 ; Get the pointer to the pmap we are using + + beqlr ; We are staying in the same mode, do not touch segs... + + lwz r28,0(r8) ; Get top half of pmap address + lwz r10,4(r8) ; Get bottom half + + stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg + rlwinm r28,r28,0,1,0 ; Copy top to top + stw r30,ppMapFlags(r29) ; Set the key modifier + rlwimi r28,r10,0,0,31 ; Insert bottom + + la r10,pmapCCtl(r28) ; Point to the segment cache control + la r9,pmapSegCache(r28) ; Point to the segment cache + +ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control + rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock? + ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit + bne-- ssgLock0 ; Yup, this is in use... + + stwcx. r16,0,r10 ; Try to set the lock + bne-- ssgLock ; Did we get contention? + + not r11,r15 ; Invert the invalids to valids + li r17,0 ; Set a mask for the SRs we are loading + isync ; Make sure we are all caught up + + bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it... + + li r0,0 ; Clear + slbia ; Trash all SLB entries (except for entry 0 that is) + li r17,1 ; Get SLB index to load (skip slb 0) + oris r0,r0,0x8000 ; Get set for a mask + b ssg64Enter ; Start on a cache line... - mfmsr r10 /* Get the current MSR */ - rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r9,r10,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off 'rupts */ - mtmsr r9 /* Disable 'em */ - lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation - lwz r4,PMAP_SPACE(r3) ; Get the space - mfsprg r6,0 /* Get the per_proc_info address */ - xor r3,r3,r7 ; Get real address of bmap anchor - stw r4,PP_USERSPACE(r6) /* Show our new address space */ - stw r3,PP_USERPMAP(r6) ; Show our real pmap address - mtmsr r10 /* Restore interruptions */ - blr /* Return... */ - .align 5 - .globl EXT(hw_set_user_space_dis) -LEXT(hw_set_user_space_dis) +ssgLock0: li r15,lgKillResv ; Killing field + stwcx. r15,0,r15 ; Kill reservation - lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation - lwz r4,PMAP_SPACE(r3) ; Get the space - mfsprg r6,0 ; Get the per_proc_info address - xor r3,r3,r7 ; Get real address of bmap anchor - stw r4,PP_USERSPACE(r6) ; Show our new address space - stw r3,PP_USERPMAP(r6) ; Show our real pmap address - blr ; Return... - +ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls + rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock? + beq++ ssgLock ; Yup, this is in use... + b ssgLock1 ; Nope, try again... +; +; This is the 32-bit address space switch code. +; We take a reservation on the segment cache and walk through. +; For each entry, we load the specified entries and remember which +; we did with a mask. Then, we figure out which segments should be +; invalid and then see which actually are. Then we load those with the +; defined invalid VSID. +; Afterwards, we unlock the segment cache. +; -/* struct mapping *hw_cpv(struct mapping *mp) - Converts a physcial mapping CB address to virtual - * - */ + .align 5 + +ssg32Enter: cntlzw r12,r11 ; Find the next slot in use + cmplwi r12,pmapSegCacheUse ; See if we are done + slwi r14,r12,4 ; Index to the cache slot + lis r0,0x8000 ; Get set for a mask + add r14,r14,r9 ; Point to the entry + + bge- ssg32Done ; All done... + + lwz r5,sgcESID+4(r14) ; Get the ESID part + srw r2,r0,r12 ; Form a mask for the one we are loading + lwz r7,sgcVSID+4(r14) ; And get the VSID bottom + + andc r11,r11,r2 ; Clear the bit + lwz r6,sgcVSID(r14) ; And get the VSID top + + rlwinm r2,r5,4,28,31 ; Change the segment number to a number + + xor r7,r7,r30 ; Modify the key before we actually set it + srw r0,r0,r2 ; Get a mask for the SR we are loading + rlwinm r8,r7,19,1,3 ; Insert the keys and N bit + or r17,r17,r0 ; Remember the segment + rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID + rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents + + mtsrin r8,r5 ; Load the segment + b ssg32Enter ; Go enter the next... + + .align 5 + +ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags + stw r15,pmapCCtl(r28) ; Unlock the segment cache controls + + lis r0,0x8000 ; Get set for a mask + li r2,invalSpace ; Set the invalid address space VSID + + nop ; Align loop + nop ; Align loop + andc r16,r16,r17 ; Get list of SRs that were valid before but not now + nop ; Align loop + +ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate + cmplwi r18,16 ; Have we finished? + srw r22,r0,r18 ; Get the mask bit + rlwinm r23,r18,28,0,3 ; Get the segment register we need + andc r16,r16,r22 ; Get rid of the guy we just did + bge ssg32Really ; Yes, we are really done now... + + mtsrin r2,r23 ; Invalidate the SR + b ssg32Inval ; Do the next... + + .align 5 + +ssg32Really: + stw r17,validSegs(r29) ; Set the valid SR flags + li r3,1 ; Set kernel/user transition + blr + +; +; This is the 64-bit address space switch code. +; First we blow away all of the SLB entries. +; Walk through, +; loading the SLB. Afterwards, we release the cache lock +; +; Note that because we have to treat SLBE 0 specially, we do not ever use it... +; Its a performance thing... +; .align 5 - .globl EXT(hw_cpv) -LEXT(hw_cpv) +ssg64Enter: cntlzw r12,r11 ; Find the next slot in use + cmplwi r12,pmapSegCacheUse ; See if we are done + slwi r14,r12,4 ; Index to the cache slot + srw r16,r0,r12 ; Form a mask for the one we are loading + add r14,r14,r9 ; Point to the entry + andc r11,r11,r16 ; Clear the bit + bge-- ssg64Done ; All done... + + ld r5,sgcESID(r14) ; Get the ESID part + ld r6,sgcVSID(r14) ; And get the VSID part + oris r5,r5,0x0800 ; Turn on the valid bit + or r5,r5,r17 ; Insert the SLB slot + xor r6,r6,r30 ; Modify the key before we actually set it + addi r17,r17,1 ; Bump to the next slot + slbmte r6,r5 ; Make that SLB entry + b ssg64Enter ; Go enter the next... - rlwinm. r4,r3,0,0,19 ; Round back to the mapping block allocation control block - mfmsr r10 ; Get the current MSR - beq- hcpvret ; Skip if we are passed a 0... - rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - andi. r9,r10,0x7FEF ; Turn off interrupts and data translation - mtmsr r9 ; Disable DR and EE - isync + .align 5 - lwz r4,mbvrswap(r4) ; Get the conversion value - mtmsr r10 ; Interrupts and DR back on - isync - xor r3,r3,r4 ; Convert to physical +ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls -hcpvret: rlwinm r3,r3,0,0,26 ; Clean out any flags + eqv r16,r16,r16 ; Load up with all foxes + subfic r17,r17,64 ; Get the number of 1 bits we need + + sld r16,r16,r17 ; Get a mask for the used SLB entries + li r3,1 ; Set kernel/user transition + std r16,validSegs(r29) ; Set the valid SR flags blr +; +; mapSetUp - this function sets initial state for all mapping functions. +; We turn off all translations (physical), disable interruptions, and +; enter 64-bit mode if applicable. +; +; We also return the original MSR in r11, the feature flags in R12, +; and CR6 set up so we can do easy branches for 64-bit +; + + .align 5 + .globl EXT(mapSetUp) + +LEXT(mapSetUp) + + lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask + mfsprg r12,2 ; Get feature flags + ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well + mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6 + mfmsr r11 ; Save the MSR + mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6 + andc r11,r11,r0 ; Clear VEC and FP for good + ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR + li r2,1 ; Prepare for 64 bit + andc r0,r11,r0 ; Clear the rest + bt pfNoMSRirb,msuNoMSR ; No MSR... + bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint) -/* struct mapping *hw_cvp(struct mapping *mp) - Converts a virtual mapping CB address to physcial - * - * Translation must be on for this - * - */ + mtmsr r0 ; Translation and all off + isync ; Toss prefetch + blr ; Return... + + .align 5 + +msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0) + mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR + isync ; synchronize + blr ; Return... + + .align 5 + +msuNoMSR: mr r2,r3 ; Save R3 across call + mr r3,r0 ; Get the new MSR value + li r0,loadMSR ; Get the MSR setter SC + sc ; Set it + mr r3,r2 ; Restore R3 + blr ; Go back all set up... + + +; +; Find the physent based on a physical page and try to lock it (but not too hard) +; Note that this table always has an entry that with a 0 table pointer at the end +; +; R3 contains ppnum on entry +; R3 is 0 if no entry was found +; R3 is physent if found +; cr0_eq is true if lock was obtained or there was no entry to lock +; cr0_eq is false of there was an entry and it was locked +; + + .align 5 + +mapFindPhyTry: + lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table + mr r2,r3 ; Save our target + ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table + +mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address + lwz r5,mrStart(r9) ; Get start of table entry + lwz r0,mrEnd(r9) ; Get end of table entry + addi r9,r9,mrSize ; Point to the next slot + cmplwi cr2,r3,0 ; Are we at the end of the table? + cmplw r2,r5 ; See if we are in this table + cmplw cr1,r2,r0 ; Check end also + sub r4,r2,r5 ; Calculate index to physical entry + beq-- cr2,mapFindNo ; Leave if we did not find an entry... + cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry + slwi r4,r4,3 ; Get offset to physical entry + + blt-- mapFindPhz ; Did not find it... + + add r3,r3,r4 ; Point right to the slot + +mapFindOv: lwz r2,0(r3) ; Get the lock contents right now + rlwinm. r0,r2,0,0,0 ; Is it locked? + bnelr-- ; Yes it is... + + lwarx r2,0,r3 ; Get the lock + rlwinm. r0,r2,0,0,0 ; Is it locked? + oris r0,r2,0x8000 ; Set the lock bit + bne-- mapFindKl ; It is locked, go get rid of reservation and leave... + stwcx. r0,0,r3 ; Try to stuff it back... + bne-- mapFindOv ; Collision, try again... + isync ; Clear any speculations + blr ; Leave... + +mapFindKl: li r2,lgKillResv ; Killing field + stwcx. r2,0,r2 ; Trash reservation... + crclr cr0_eq ; Make sure we do not think we got the lock + blr ; Leave... + +mapFindNo: crset cr0_eq ; Make sure that we set this + li r3,0 ; Show that we did not find it + blr ; Leave... +; +; pmapCacheLookup - This function will look up an entry in the pmap segment cache. +; +; How the pmap cache lookup works: +; +; We use a combination of three things: a mask of valid entries, a sub-tag, and the +; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain +; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits +; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache +; entry contains the full 36 bit ESID. +; +; The purpose of the sub-tag is to limit the number of searches necessary when looking +; for an existing cache entry. Because there are 16 slots in the cache, we could end up +; searching all 16 if an match is not found. +; +; Essentially, we will search only the slots that have a valid entry and whose sub-tag +; matches. More than likely, we will eliminate almost all of the searches. +; +; Inputs: +; R3 = pmap +; R4 = ESID high half +; R5 = ESID low half +; +; Outputs: +; R3 = pmap cache slot if found, 0 if not +; R10 = pmapCCtl address +; R11 = pmapCCtl image +; pmapCCtl locked on exit +; + + .align 5 + +pmapCacheLookup: + la r10,pmapCCtl(r3) ; Point to the segment cache control + +pmapCacheLookuq: + lwarx r11,0,r10 ; Get the segment cache control value + rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked? + ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit + bne-- pmapCacheLookur ; Nope... + stwcx. r0,0,r10 ; Try to take the lock + bne-- pmapCacheLookuq ; Someone else just stuffed it, try again... + + isync ; Make sure we get reservation first + lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag + rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------) + lwz r10,pmapSCSubTag+4(r3) ; And the bottom half + rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----) + lis r8,0x8888 ; Get some eights + rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right + ori r8,r8,0x8888 ; Fill the rest with eights + + eqv r10,r10,r5 ; Get 0xF where we hit in bottom half + eqv r9,r9,r5 ; Get 0xF where we hit in top half + + rlwinm r2,r10,1,0,30 ; Shift over 1 + rlwinm r0,r9,1,0,30 ; Shift over 1 + and r2,r2,r10 ; AND the even/odd pair into the even + and r0,r0,r9 ; AND the even/odd pair into the even + rlwinm r10,r2,2,0,28 ; Shift over 2 + rlwinm r9,r0,2,0,28 ; Shift over 2 + and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ... + and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ... + + and r10,r10,r8 ; Clear out extras + and r9,r9,r8 ; Clear out extras + + rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other + rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other + or r10,r0,r10 ; Merge them + or r9,r2,r9 ; Merge them + rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other + rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other + or r10,r0,r10 ; Merge them + or r9,r2,r9 ; Merge them + rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad + rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad + not r6,r11 ; Turn invalid into valid + rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask + + la r10,pmapSegCache(r3) ; Point at the cache slots + and. r6,r9,r6 ; Get mask of valid and hit + li r0,0 ; Clear + li r3,0 ; Assume not found + oris r0,r0,0x8000 ; Start a mask + beqlr++ ; Leave, should usually be no hits... + +pclNextEnt: cntlzw r5,r6 ; Find an in use one + cmplwi cr1,r5,pmapSegCacheUse ; Did we find one? + rlwinm r7,r5,4,0,27 ; Index to the cache entry + srw r2,r0,r5 ; Get validity mask bit + add r7,r7,r10 ; Point to the cache slot + andc r6,r6,r2 ; Clear the validity bit we just tried + bgelr-- cr1 ; Leave if there are no more to check... + + lwz r5,sgcESID(r7) ; Get the top half + + cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half + + bne++ pclNextEnt ; Nope, try again... + + mr r3,r7 ; Point to the slot + blr ; Leave.... .align 5 - .globl EXT(hw_cvp) -LEXT(hw_cvp) +pmapCacheLookur: + li r11,lgKillResv ; The killing spot + stwcx. r11,0,r11 ; Kill the reservation - rlwinm r4,r3,0,0,19 ; Round back to the mapping block allocation control block - rlwinm r3,r3,0,0,26 ; Clean out any flags - lwz r4,mbvrswap(r4) ; Get the conversion value - xor r3,r3,r4 ; Convert to virtual +pmapCacheLookus: + lwz r11,pmapCCtl(r3) ; Get the segment cache control + rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked? + beq++ pmapCacheLookup ; Nope... + b pmapCacheLookus ; Yup, keep waiting... + + + + +; +; This routine, given a mapping, will find and lock the PTEG +; If mpPte does not point to a PTE (checked before and after lock), it will unlock the +; PTEG and return. In this case we will have undefined in R4 +; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0. +; +; If the mapping is still valid, we will invalidate the PTE and merge +; the RC bits into the physent and also save them into the mapping. +; +; We then return with R3 pointing to the PTE slot, R4 is the +; top of the PTE and R5 is the bottom. R6 contains the PCA. +; R7 points to the PCA entry. +; +; Note that we should NEVER be called on a block or special mapping. +; We could do many bad things. +; + + .align 5 + +mapInvPte32: + lwz r0,mpPte(r31) ; Grab the PTE offset + mfsdr1 r7 ; Get the pointer to the hash table + lwz r5,mpVAddr+4(r31) ; Grab the virtual address + rlwinm r10,r7,0,0,15 ; Clean up the hash table base + andi. r3,r0,mpHValid ; Is there a possible PTE? + srwi r7,r0,4 ; Convert to PCA units + rlwinm r7,r7,0,0,29 ; Clean up PCA offset + mflr r2 ; Save the return + subfic r7,r7,-4 ; Convert to -4 based negative index + add r7,r10,r7 ; Point to the PCA directly + beqlr-- ; There was no PTE to start with... + + bl mapLockPteg ; Lock the PTEG + + lwz r0,mpPte(r31) ; Grab the PTE offset + mtlr r2 ; Restore the LR + andi. r3,r0,mpHValid ; Is there a possible PTE? + beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave... + + rlwinm r3,r0,0,0,30 ; Clear the valid bit + add r3,r3,r10 ; Point to actual PTE + lwz r4,0(r3) ; Get the top of the PTE + + li r8,tlbieLock ; Get the TLBIE lock + rlwinm r0,r4,0,1,31 ; Clear the valid bit + stw r0,0(r3) ; Invalidate the PTE + + sync ; Make sure everyone sees the invalidate + +mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock + mfsprg r2,2 ; Get feature flags + mr. r0,r0 ; Is it locked? + li r0,1 ; Get our lock word + bne- mITLBIE32 ; It is locked, go wait... + + stwcx. r0,0,r8 ; Try to get it + bne- mITLBIE32 ; We was beat... + + rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box? + li r0,0 ; Lock clear value + + tlbie r5 ; Invalidate it everywhere + + stw r0,tlbieLock(0) ; Clear the tlbie lock + + beq- mINoTS32 ; Can not have MP on this machine... + + eieio ; Make sure that the tlbie happens first + tlbsync ; Wait for everyone to catch up + sync ; Make sure of it all + +mINoTS32: lwz r5,4(r3) ; Get the real part + srwi r10,r5,12 ; Change physical address to a ppnum + +mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table + lwz r0,mpVAddr+4(r31) ; Get the flags part of the field + lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table + ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table + rlwinm r11,r11,2,0,29 ; Change index into byte offset + add r11,r11,r8 ; Point to the bank table + lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer + lwz r11,mrStart(r11) ; Get the start of bank + rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC + addi r2,r2,4 ; Offset to last half of field + stw r0,mpVAddr+4(r31) ; Set the new RC into the field + sub r11,r10,r11 ; Get the index into the table + rlwinm r11,r11,3,0,28 ; Get offset to the physent + + +mImrgRC: lwarx r10,r11,r2 ; Get the master RC + rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC + or r0,r0,r10 ; Merge in the new RC + stwcx. r0,r11,r2 ; Try to stick it back + bne-- mImrgRC ; Try again if we collided... + + blr ; Leave with the PCA still locked up... + +mIPUnlock: eieio ; Make sure all updates come first + + stw r6,0(r7) ; Unlock blr +; +; 64-bit version +; + .align 5 -/* int mapalc(struct mappingblok *mb) - Finds, allocates, and checks a free mapping entry in a block - * - * Lock must already be held on mapping block list - * returns 0 if all slots filled. - * returns n if a slot is found and it is not the last - * returns -n if a slot os found and it is the last - * when n and -n are returned, the corresponding bit is cleared - * - */ +mapInvPte64: + lwz r0,mpPte(r31) ; Grab the PTE offset + ld r5,mpVAddr(r31) ; Grab the virtual address + mfsdr1 r7 ; Get the pointer to the hash table + rldicr r10,r7,0,45 ; Clean up the hash table base + andi. r3,r0,mpHValid ; Is there a possible PTE? + srdi r7,r0,5 ; Convert to PCA units + rldicr r7,r7,0,61 ; Clean up PCA + subfic r7,r7,-4 ; Convert to -4 based negative index + mflr r2 ; Save the return + add r7,r10,r7 ; Point to the PCA directly + beqlr-- ; There was no PTE to start with... + + bl mapLockPteg ; Lock the PTEG + + lwz r0,mpPte(r31) ; Grab the PTE offset again + mtlr r2 ; Restore the LR + andi. r3,r0,mpHValid ; Is there a possible PTE? + beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave... + + rlwinm r3,r0,0,0,30 ; Clear the valid bit + add r3,r3,r10 ; Point to the actual PTE + ld r4,0(r3) ; Get the top of the PTE + + li r8,tlbieLock ; Get the TLBIE lock + rldicr r0,r4,0,62 ; Clear the valid bit + std r0,0(r3) ; Invalidate the PTE + + rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN + sync ; Make sure everyone sees the invalidate + rldimi r2,r5,0,36 ; Cram in the page portion of the EA + +mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock + mr. r0,r0 ; Is it locked? + li r0,1 ; Get our lock word + bne-- mITLBIE64a ; It is locked, toss reservation and wait... + + stwcx. r0,0,r8 ; Try to get it + bne-- mITLBIE64 ; We was beat... + + rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders + + li r0,0 ; Lock clear value + + tlbie r2 ; Invalidate it everywhere + + stw r0,tlbieLock(0) ; Clear the tlbie lock + + eieio ; Make sure that the tlbie happens first + tlbsync ; Wait for everyone to catch up + isync + ptesync ; Wait for quiet again + +mINoTS64: sync ; Make sure of it all + + ld r5,8(r3) ; Get the real part + srdi r10,r5,12 ; Change physical address to a ppnum + b mINmerge ; Join the common 32-64-bit code... + +mITLBIE64a: li r5,lgKillResv ; Killing field + stwcx. r5,0,r5 ; Kill reservation + +mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock + mr. r0,r0 ; Is it locked? + beq++ mITLBIE64 ; Nope, try again... + b mITLBIE64b ; Yup, wait for it... + +; +; mapLockPteg - Locks a PTEG +; R7 points to PCA entry +; R6 contains PCA on return +; +; .align 5 - .globl EXT(mapalc) + +mapLockPteg: + lwarx r6,0,r7 ; Pick up the PCA + rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked? + ori r0,r6,PCAlock ; Set the lock bit + bne-- mLSkill ; It is locked... + + stwcx. r0,0,r7 ; Try to lock the PTEG + bne-- mapLockPteg ; We collided... + + isync ; Nostradamus lied + blr ; Leave... + +mLSkill: li r6,lgKillResv ; Get killing field + stwcx. r6,0,r6 ; Kill it -LEXT(mapalc) +mapLockPteh: + lwz r6,0(r7) ; Pick up the PCA + rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked? + beq++ mapLockPteg ; Nope, try again... + b mapLockPteh ; Yes, wait for it... - lwz r4,mbfree(r3) ; Get the first mask - lis r0,0x8000 ; Get the mask to clear the first free bit - lwz r5,mbfree+4(r3) ; Get the second mask - mr r12,r3 ; Save the return - cntlzw r8,r4 ; Get first free field - lwz r6,mbfree+8(r3) ; Get the third mask - srw. r9,r0,r8 ; Get bit corresponding to first free one - lwz r7,mbfree+12(r3) ; Get the fourth mask - cntlzw r10,r5 ; Get first free field in second word - andc r4,r4,r9 ; Turn it off - bne malcfnd0 ; Found one... + +; +; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6 +; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was +; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE. +; R4 returns the slot index. +; +; CR7 also indicates that we have a block mapping +; +; The PTEG allocation controls are a bit map of the state of the PTEG. +; PCAfree indicates that the PTE slot is empty. +; PCAauto means that it comes from an autogen area. These +; guys do not keep track of reference and change and are actually "wired". +; They are easy to maintain. PCAsteal +; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these +; fields fit in a single word and are loaded and stored under control of the +; PTEG control area lock (PCAlock). +; +; Note that PCAauto does not contribute to the steal calculations at all. Originally +; it did, autogens were second in priority. This can result in a pathalogical +; case where an instruction can not make forward progress, or one PTE slot +; thrashes. +; +; Note that the PCA must be locked when we get here. +; +; Physically, the fields are arranged: +; 0: PCAfree +; 1: PCAsteal +; 2: PCAauto +; 3: PCAmisc +; +; +; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched) +; +; At exit: +; +; R3 = 0 - no steal +; R3 = 1 - steal regular +; R3 = 2 - steal autogen +; R4 contains slot number +; R6 contains updated PCA image +; + + .align 5 - srw. r9,r0,r10 ; Get bit corresponding to first free one in second word - cntlzw r11,r6 ; Get first free field in third word - andc r5,r5,r9 ; Turn it off - bne malcfnd1 ; Found one... +mapSelSlot: lis r10,0 ; Clear autogen mask + li r9,0 ; Start a mask + beq cr7,mSSnotblk ; Skip if this is not a block mapping + ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen) + +mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask + oris r9,r9,0x8000 ; Get a mask + cntlzw r4,r6 ; Find a slot or steal one + ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000 + rlwinm r4,r4,0,29,31 ; Isolate bit position + rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate + srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags + srwi r11,r11,1 ; Slide steal mask right + and r8,r6,r2 ; Isolate the old in use and autogen bits + andc r6,r6,r2 ; Allocate the slot and also clear autogen flag + addi r0,r8,0x7F00 ; Push autogen flag to bit 16 + and r2,r2,r10 ; Keep the autogen part if autogen + addis r8,r8,0xFF00 ; Push in use to bit 0 and invert + or r6,r6,r2 ; Add in the new autogen bit + rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use) + rlwinm r8,r8,1,31,31 ; Isolate old in use + rlwimi r6,r11,16,8,15 ; Stick the new steal slot in + + add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen + blr ; Leave... - srw. r9,r0,r11 ; Get bit corresponding to first free one in third word - cntlzw r10,r7 ; Get first free field in fourth word - andc r6,r6,r9 ; Turn it off - bne malcfnd2 ; Found one... +; +; Shared/Exclusive locks +; +; A shared/exclusive lock allows multiple shares of a lock to be taken +; but only one exclusive. A shared lock can be "promoted" to exclusive +; when it is the only share. If there are multiple sharers, the lock +; must be "converted". A promotion drops the share and gains exclusive as +; an atomic operation. If anyone else has a share, the operation fails. +; A conversion first drops the share and then takes an exclusive lock. +; +; We will want to add a timeout to this eventually. +; +; R3 is set to 0 for success, non-zero for failure +; + +; +; Convert a share into an exclusive +; + + .align 5 - srw. r9,r0,r10 ; Get bit corresponding to first free one in second word - li r3,0 ; Assume abject failure - andc r7,r7,r9 ; Turn it off - beqlr ; There are none any left... +sxlkConvert: + + lis r0,0x8000 ; Get the locked lock image +#if 0 + mflr r0 ; (TEST/DEBUG) + oris r0,r0,0x8000 ; (TEST/DEBUG) +#endif + +sxlkCTry: lwarx r2,0,r3 ; Get the lock word + cmplwi r2,1 ; Does it just have our share? + subi r2,r2,1 ; Drop our share in case we do not get it + bne-- sxlkCnotfree ; No, we need to unlock... + stwcx. r0,0,r3 ; Try to take it exclusively + bne-- sxlkCTry ; Collision, try again... - addi r3,r10,96 ; Set the correct bit number - stw r7,mbfree+12(r12) ; Actually allocate the slot + isync + li r3,0 ; Set RC + blr ; Leave... + +sxlkCnotfree: + stwcx. r2,0,r3 ; Try to drop our share... + bne-- sxlkCTry ; Try again if we collided... + b sxlkExclusive ; Go take it exclusively... + +; +; Promote shared to exclusive +; + + .align 5 -mapafin: or r4,r4,r5 ; Merge the first two allocation maps - or r6,r6,r7 ; Then the last two - or. r4,r4,r6 ; Merge both halves - bnelr+ ; Return if some left for next time... +sxlkPromote: + lis r0,0x8000 ; Get the locked lock image +#if 0 + mflr r0 ; (TEST/DEBUG) + oris r0,r0,0x8000 ; (TEST/DEBUG) +#endif + +sxlkPTry: lwarx r2,0,r3 ; Get the lock word + cmplwi r2,1 ; Does it just have our share? + bne-- sxlkPkill ; No, just fail (R3 is non-zero)... + stwcx. r0,0,r3 ; Try to take it exclusively + bne-- sxlkPTry ; Collision, try again... - neg r3,r3 ; Indicate we just allocated the last one + isync + li r3,0 ; Set RC blr ; Leave... + +sxlkPkill: li r2,lgKillResv ; Point to killing field + stwcx. r2,0,r2 ; Kill reservation + blr ; Leave + + + +; +; Take lock exclusivily +; + + .align 5 -malcfnd0: stw r4,mbfree(r12) ; Actually allocate the slot - mr r3,r8 ; Set the correct bit number - b mapafin ; Exit now... +sxlkExclusive: + lis r0,0x8000 ; Get the locked lock image +#if 0 + mflr r0 ; (TEST/DEBUG) + oris r0,r0,0x8000 ; (TEST/DEBUG) +#endif + +sxlkXTry: lwarx r2,0,r3 ; Get the lock word + mr. r2,r2 ; Is it locked? + bne-- sxlkXWait ; Yes... + stwcx. r0,0,r3 ; Try to take it + bne-- sxlkXTry ; Collision, try again... -malcfnd1: stw r5,mbfree+4(r12) ; Actually allocate the slot - addi r3,r10,32 ; Set the correct bit number - b mapafin ; Exit now... + isync ; Toss anything younger than us + li r3,0 ; Set RC + blr ; Leave... -malcfnd2: stw r6,mbfree+8(r12) ; Actually allocate the slot - addi r3,r11,64 ; Set the correct bit number - b mapafin ; Exit now... + .align 5 + +sxlkXWait: li r2,lgKillResv ; Point to killing field + stwcx. r2,0,r2 ; Kill reservation +sxlkXWaiu: lwz r2,0(r3) ; Get the lock again + mr. r2,r2 ; Is it free yet? + beq++ sxlkXTry ; Yup... + b sxlkXWaiu ; Hang around a bit more... -/* - * Log out all memory usage - */ +; +; Take a share of the lock +; .align 5 - .globl EXT(logmem) + +sxlkShared: lwarx r2,0,r3 ; Get the lock word + rlwinm. r0,r2,0,0,0 ; Is it locked exclusively? + addi r2,r2,1 ; Up the share count + bne-- sxlkSWait ; Yes... + stwcx. r2,0,r3 ; Try to take it + bne-- sxlkShared ; Collision, try again... + + isync ; Toss anything younger than us + li r3,0 ; Set RC + blr ; Leave... + + .align 5 -LEXT(logmem) +sxlkSWait: li r2,lgKillResv ; Point to killing field + stwcx. r2,0,r2 ; Kill reservation - mfmsr r2 ; Get the MSR - lis r10,hi16(EXT(DebugWork)) ; High part of area - rlwinm r2,r2,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - lis r12,hi16(EXT(mem_actual)) ; High part of actual - rlwinm r2,r2,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - andi. r0,r2,0x7FCF ; Interrupts and translation off - ori r10,r10,lo16(EXT(DebugWork)) ; Get the entry - mtmsr r0 ; Turn stuff off - ori r12,r12,lo16(EXT(mem_actual)) ; Get the actual - li r0,1 ; Get a one - +sxlkSWaiu: lwz r2,0(r3) ; Get the lock again + rlwinm. r0,r2,0,0,0 ; Is it locked exclusively? + beq++ sxlkShared ; Nope... + b sxlkSWaiu ; Hang around a bit more... + +; +; Unlock either exclusive or shared. +; + + .align 5 + +sxlkUnlock: eieio ; Make sure we order our stores out + +sxlkUnTry: lwarx r2,0,r3 ; Get the lock + rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively? + subi r2,r2,1 ; Remove our share if we have one + li r0,0 ; Clear this + bne-- sxlkUExclu ; We hold exclusive... + + stwcx. r2,0,r3 ; Try to lose our share + bne-- sxlkUnTry ; Collision... + blr ; Leave... + +sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation + beqlr++ ; Leave if ok... + b sxlkUnTry ; Could not store, try over... + + + .align 5 + .globl EXT(fillPage) + +LEXT(fillPage) + + mfsprg r0,2 ; Get feature flags + mtcrf 0x02,r0 ; move pf64Bit to cr + + rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register + lis r2,0x0200 ; Get vec + mr r6,r4 ; Copy + ori r2,r2,0x2000 ; Get FP + mr r7,r4 ; Copy + mfmsr r5 ; Get MSR + mr r8,r4 ; Copy + andc r5,r5,r2 ; Clear out permanent turn-offs + mr r9,r4 ; Copy + ori r2,r2,0x8030 ; Clear IR, DR and EE + mr r10,r4 ; Copy + andc r0,r5,r2 ; Kill them + mr r11,r4 ; Copy + mr r12,r4 ; Copy + bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint) + + slwi r3,r3,12 ; Make into a physical address + mtmsr r2 ; Interrupts and translation off + isync + + li r2,4096/32 ; Get number of cache lines + +fp32again: dcbz 0,r3 ; Clear + addic. r2,r2,-1 ; Count down + stw r4,0(r3) ; Fill + stw r6,4(r3) ; Fill + stw r7,8(r3) ; Fill + stw r8,12(r3) ; Fill + stw r9,16(r3) ; Fill + stw r10,20(r3) ; Fill + stw r11,24(r3) ; Fill + stw r12,28(r3) ; Fill + addi r3,r3,32 ; Point next + bgt+ fp32again ; Keep going + + mtmsr r5 ; Restore all isync + blr ; Return... + + .align 5 + +fpSF1: li r2,1 + sldi r2,r2,63 ; Get 64-bit bit + or r0,r0,r2 ; Turn on 64-bit + sldi r3,r3,12 ; Make into a physical address - stw r0,4(r10) ; Force logging off - lwz r0,0(r12) ; Get the end of memory - - lis r12,hi16(EXT(mem_size)) ; High part of defined memory - ori r12,r12,lo16(EXT(mem_size)) ; Low part of defined memory - lwz r12,0(r12) ; Make it end of defined - - cmplw r0,r12 ; Is there room for the data? - ble- logmemexit ; No, do not even try... - - stw r12,0(r12) ; Set defined memory size - stw r0,4(r12) ; Set the actual amount of memory - - lis r3,hi16(EXT(hash_table_base)) ; Hash table address - lis r4,hi16(EXT(hash_table_size)) ; Hash table size - lis r5,hi16(EXT(pmap_mem_regions)) ; Memory regions - lis r6,hi16(EXT(mapCtl)) ; Mappings - ori r3,r3,lo16(EXT(hash_table_base)) - ori r4,r4,lo16(EXT(hash_table_size)) - ori r5,r5,lo16(EXT(pmap_mem_regions)) - ori r6,r6,lo16(EXT(mapCtl)) - lwz r3,0(r3) - lwz r4,0(r4) - lwz r5,4(r5) ; Get the pointer to the phys_ent table - lwz r6,0(r6) ; Get the pointer to the current mapping block - stw r3,8(r12) ; Save the hash table address - stw r4,12(r12) ; Save the hash table size - stw r5,16(r12) ; Save the physent pointer - stw r6,20(r12) ; Save the mappings - - addi r11,r12,0x1000 ; Point to area to move hash table and PCA - - add r4,r4,r4 ; Double size for both - -copyhash: lwz r7,0(r3) ; Copy both of them - lwz r8,4(r3) - lwz r9,8(r3) - lwz r10,12(r3) - subic. r4,r4,0x10 - addi r3,r3,0x10 - stw r7,0(r11) - stw r8,4(r11) - stw r9,8(r11) - stw r10,12(r11) - addi r11,r11,0x10 - bgt+ copyhash - - rlwinm r4,r12,20,12,31 ; Get number of phys_ents - -copyphys: lwz r7,0(r5) ; Copy physents - lwz r8,4(r5) - subic. r4,r4,1 - addi r5,r5,8 - stw r7,0(r11) - stw r8,4(r11) - addi r11,r11,8 - bgt+ copyphys - - addi r11,r11,4095 ; Round up to next page - rlwinm r11,r11,0,0,19 - - lwz r4,4(r6) ; Get the size of the mapping area - -copymaps: lwz r7,0(r6) ; Copy the mappings - lwz r8,4(r6) - lwz r9,8(r6) - lwz r10,12(r6) - subic. r4,r4,0x10 - addi r6,r6,0x10 - stw r7,0(r11) - stw r8,4(r11) - stw r9,8(r11) - stw r10,12(r11) - addi r11,r11,0x10 - bgt+ copymaps - - sub r11,r11,r12 ; Get the total length we saved - stw r11,24(r12) ; Save the size - -logmemexit: mtmsr r2 ; Back to normal - li r3,0 + mtmsrd r0 ; Interrupts and translation off isync + + li r2,4096/128 ; Get number of cache lines + +fp64again: dcbz128 0,r3 ; Clear + addic. r2,r2,-1 ; Count down + std r4,0(r3) ; Fill + std r6,8(r3) ; Fill + std r7,16(r3) ; Fill + std r8,24(r3) ; Fill + std r9,32(r3) ; Fill + std r10,40(r3) ; Fill + std r11,48(r3) ; Fill + std r12,56(r3) ; Fill + std r4,64+0(r3) ; Fill + std r6,64+8(r3) ; Fill + std r7,64+16(r3) ; Fill + std r8,64+24(r3) ; Fill + std r9,64+32(r3) ; Fill + std r10,64+40(r3) ; Fill + std r11,64+48(r3) ; Fill + std r12,64+56(r3) ; Fill + addi r3,r3,128 ; Point next + bgt+ fp64again ; Keep going + + mtmsrd r5 ; Restore all + isync + blr ; Return... + + .align 5 + .globl EXT(mapLog) + +LEXT(mapLog) + + mfmsr r12 + lis r11,hi16(EXT(mapdebug)) + ori r11,r11,lo16(EXT(mapdebug)) + lwz r10,0(r11) + mr. r10,r10 + bne++ mLxx + mr r10,r3 +mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1 + mtmsr r0 + isync + stw r4,0(r10) + stw r4,4(r10) + stw r5,8(r10) + stw r6,12(r10) + mtmsr r12 + isync + addi r10,r10,16 + stw r10,0(r11) blr + +#if 1 + .align 5 + .globl EXT(checkBogus) + +LEXT(checkBogus) + + BREAKPOINT_TRAP + blr ; No-op normally + +#endif + + diff --git a/osfmk/ppc/instrumentation.h b/osfmk/ppc/instrumentation.h new file mode 100644 index 000000000..1800a8485 --- /dev/null +++ b/osfmk/ppc/instrumentation.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +/* + * Here be the instrumentaion page layout + * Lovingly crafted by Bill Angell using traditional methods +*/ + +#ifndef _INSTRUMENTATION_H_ +#define _INSTRUMENTATION_H_ + +#define INTRUMENTATION 1 + + +#define inBase 0x6000 + +#define inEntry 0 +#define inAtGetTb 1 +#define inBeforeTrace 2 +#define inAfterSAAlloc 3 +#define inBeforeFilter 4 +#define inEatRuptQfret 5 +#define inEatRuptSAfree 6 +#define inPassupSwtchSeg 7 +#define inExceptionExit 8 +#define inMiddleOfSC 9 +#define inEatRuptSwtchSeg 10 +#define inPassup 11 +#define inCopyout 12 +#define inMUASbefore 13 +#define inMUAS + +#endif /* _INSTRUMENTATION_H_ */ diff --git a/osfmk/ppc/interrupt.c b/osfmk/ppc/interrupt.c index 3895c3800..2074435fc 100644 --- a/osfmk/ppc/interrupt.c +++ b/osfmk/ppc/interrupt.c @@ -33,15 +33,15 @@ #include #include #include +#include #include #include #include #include -#if NCPUS > 1 -#include -#endif /* NCPUS > 1 */ #include +perfTrap perfIntHook = 0; /* Pointer to performance trap hook routine */ + struct savearea * interrupt( int type, struct savearea *ssp, @@ -54,6 +54,10 @@ struct savearea * interrupt( thread_act_t act; disable_preemption(); + + if(perfIntHook) { /* Is there a hook? */ + if(perfIntHook(type, ssp, dsisr, dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */ + } #if 0 { @@ -111,7 +115,7 @@ struct savearea * interrupt( case T_DECREMENTER: KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE, - isync_mfdec(), ssp->save_srr0, 0, 0, 0); + isync_mfdec(), (unsigned int)ssp->save_srr0, 0, 0, 0); #if 0 if (pcsample_enable) { @@ -139,7 +143,7 @@ struct savearea * interrupt( counter_always(c_incoming_interrupts++); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, - current_cpu, ssp->save_srr0, 0, 0, 0); + current_cpu, (unsigned int)ssp->save_srr0, 0, 0, 0); per_proc_info[current_cpu].interrupt_handler( per_proc_info[current_cpu].interrupt_target, diff --git a/osfmk/ppc/io_map.c b/osfmk/ppc/io_map.c index 00f08cdbc..4a1824f32 100644 --- a/osfmk/ppc/io_map.c +++ b/osfmk/ppc/io_map.c @@ -45,6 +45,8 @@ extern vm_offset_t virtual_avail; * outside the usual physical memory. If phys_addr is NULL then * steal the appropriate number of physical pages from the vm * system and map them. + * + * Note, this will onl */ vm_offset_t io_map(phys_addr, size) @@ -61,38 +63,68 @@ io_map(phys_addr, size) assert (kernel_map != VM_MAP_NULL); /* VM must be initialised */ #endif - if (phys_addr != 0) { - /* make sure we map full contents of all the pages concerned */ - size = round_page(size + (phys_addr & PAGE_MASK)); + if (phys_addr != 0) { /* If they supplied a physical address, use it */ - /* Steal some free virtual addresses */ + size = round_page_32(size + (phys_addr & PAGE_MASK)); /* Make sure we map all of it */ - (void) kmem_alloc_pageable(kernel_map, &start, size); + (void) kmem_alloc_pageable(kernel_map, &start, size); /* Get some virtual addresses to use */ - pmap_map_block(kernel_pmap, start, phys_addr, size, - VM_PROT_READ|VM_PROT_WRITE, PTE_WIMG_IO, 0); /* Set up a block mapped area */ + (void)mapping_make(kernel_pmap, (addr64_t)start, (ppnum_t)(phys_addr >> 12), + (mmFlgBlock | mmFlgUseAttr | mmFlgCInhib | mmFlgGuarded), /* Map as I/O page */ + size >> 12, VM_PROT_READ|VM_PROT_WRITE); - return (start + (phys_addr & PAGE_MASK)); + return (start + (phys_addr & PAGE_MASK)); /* Pass back the physical address */ } else { - /* Steal some free virtual addresses */ - (void) kmem_alloc_pageable(kernel_map, &start, size); + (void) kmem_alloc_pageable(kernel_map, &start, size); /* Get some virtual addresses */ mapping_prealloc(size); /* Make sure there are enough free mappings */ - /* Steal some physical pages and map them one by one */ + for (i = 0; i < size ; i += PAGE_SIZE) { m = VM_PAGE_NULL; - while ((m = vm_page_grab()) == VM_PAGE_NULL) - VM_PAGE_WAIT(); + while ((m = vm_page_grab()) == VM_PAGE_NULL) { /* Get a physical page */ + VM_PAGE_WAIT(); /* Wait if we didn't have one */ + } vm_page_gobble(m); - (void) pmap_map_bd(start + i, - m->phys_addr, - m->phys_addr + PAGE_SIZE, - VM_PROT_READ|VM_PROT_WRITE); + + (void)mapping_make(kernel_pmap, + (addr64_t)(start + i), m->phys_page, + (mmFlgBlock | mmFlgUseAttr | mmFlgCInhib | mmFlgGuarded), /* Map as I/O page */ + 1, VM_PROT_READ|VM_PROT_WRITE); + } mapping_relpre(); /* Allow mapping release */ return start; } } + + +/* + * Allocate and map memory for devices before the VM system comes alive. + */ + +vm_offset_t io_map_spec(vm_offset_t phys_addr, vm_size_t size) +{ + vm_offset_t start; + int i; + unsigned int j; + vm_page_t m; + + + if(kernel_map != VM_MAP_NULL) { /* If VM system is up, redirect to normal routine */ + + return io_map(phys_addr, size); /* Map the address */ + + } + + size = round_page_32(size + (phys_addr - (phys_addr & -PAGE_SIZE))); /* Extend the length to include it all */ + start = pmap_boot_map(size); /* Get me some virtual address */ + + (void)mapping_make(kernel_pmap, (addr64_t)start, (ppnum_t)(phys_addr >> 12), + (mmFlgBlock | mmFlgUseAttr | mmFlgCInhib | mmFlgGuarded), /* Map as I/O page */ + size >> 12, VM_PROT_READ|VM_PROT_WRITE); + + return (start + (phys_addr & PAGE_MASK)); +} diff --git a/osfmk/ppc/io_map_entries.h b/osfmk/ppc/io_map_entries.h index 5c8380b30..2082bdf2e 100644 --- a/osfmk/ppc/io_map_entries.h +++ b/osfmk/ppc/io_map_entries.h @@ -33,5 +33,6 @@ extern vm_offset_t io_map( vm_offset_t phys_addr, vm_size_t size); +extern vm_offset_t io_map_spec(vm_offset_t phys_addr, vm_size_t size); #endif /* _PPC_IO_MAP_ENTRIES_H_ */ diff --git a/osfmk/ppc/lock.h b/osfmk/ppc/lock.h index 007e2e6c3..cb7e07aed 100644 --- a/osfmk/ppc/lock.h +++ b/osfmk/ppc/lock.h @@ -65,6 +65,8 @@ #include #include +#include +#include extern unsigned int LockTimeOut; /* Number of hardware ticks of a lock timeout */ @@ -76,20 +78,27 @@ extern unsigned int LockTimeOut; /* Number of hardware ticks of a lock timeout #include -#define simple_lock_init(l,t) hw_lock_init(l) #define __slock_held_func__(l) hw_lock_held(l) -extern void fast_usimple_lock(simple_lock_t); -extern void fast_usimple_unlock(simple_lock_t); -extern unsigned int fast_usimple_lock_try(simple_lock_t); +extern void ppc_usimple_lock_init(simple_lock_t,etap_event_t); +extern void ppc_usimple_lock(simple_lock_t); +extern void ppc_usimple_unlock_rwmb(simple_lock_t); +extern void ppc_usimple_unlock_rwcmb(simple_lock_t); +extern unsigned int ppc_usimple_lock_try(simple_lock_t); -#define simple_lock(l) fast_usimple_lock(l) -#define simple_unlock(l) fast_usimple_unlock(l) -#define simple_lock_try(l) fast_usimple_lock_try(l) -#define simple_lock_addr(l) (&(l)) +#define MACHINE_SIMPLE_LOCK + +#define simple_lock_init(l,t) ppc_usimple_lock_init(l,t) +#define simple_lock(l) ppc_usimple_lock(l) +#define simple_unlock(l) ppc_usimple_unlock_rwcmb(l) +#define simple_unlock_rwmb(l) ppc_usimple_unlock_rwmb(l) +#define simple_lock_try(l) ppc_usimple_lock_try(l) +#define simple_lock_addr(l) (&(l)) #define thread_sleep_simple_lock(l, e, i) \ thread_sleep_fast_usimple_lock((l), (e), (i)) +#define mutex_unlock(l) mutex_unlock_rwcmb(l) + #endif /* !(NCPUS == 1 || ETAP_LOCK_TRACE || USLOCK_DEBUG) */ #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/ppc/low_trace.h b/osfmk/ppc/low_trace.h index 90551671c..389852db9 100644 --- a/osfmk/ppc/low_trace.h +++ b/osfmk/ppc/low_trace.h @@ -39,6 +39,7 @@ #ifndef _LOW_TRACE_H_ #define _LOW_TRACE_H_ +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct LowTraceRecord { unsigned short LTR_cpu; /* 0000 - CPU address */ @@ -46,22 +47,29 @@ typedef struct LowTraceRecord { unsigned int LTR_timeHi; /* 0004 - High order time */ unsigned int LTR_timeLo; /* 0008 - Low order time */ unsigned int LTR_cr; /* 000C - CR */ - unsigned int LTR_srr0; /* 0010 - SRR0 */ - unsigned int LTR_srr1; /* 0014 - SRR1 */ - unsigned int LTR_dar; /* 0018 - DAR */ - unsigned int LTR_save; /* 001C - savearea */ - - unsigned int LTR_lr; /* 0020 - LR */ - unsigned int LTR_ctr; /* 0024 - CTR */ - unsigned int LTR_r0; /* 0028 - R0 */ - unsigned int LTR_r1; /* 002C - R1 */ - unsigned int LTR_r2; /* 0030 - R2 */ - unsigned int LTR_r3; /* 0034 - R3 */ - unsigned int LTR_r4; /* 0038 - R4 */ - unsigned int LTR_r5; /* 003C - R5 */ + unsigned int LTR_dsisr; /* 0010 - DSISR */ + unsigned int LTR_rsvd0; /* 0014 - reserved */ + uint64_t LTR_srr0; /* 0018 - SRR0 */ + + uint64_t LTR_srr1; /* 0020 - SRR1 */ + uint64_t LTR_dar; /* 0028 - DAR */ + uint64_t LTR_save; /* 0030 - savearea */ + uint64_t LTR_lr; /* 0038 - LR */ + + uint64_t LTR_ctr; /* 0040 - CTR */ + uint64_t LTR_r0; /* 0048 - R0 */ + uint64_t LTR_r1; /* 0050 - R1 */ + uint64_t LTR_r2; /* 0058 - R2 */ + + uint64_t LTR_r3; /* 0060 - R3 */ + uint64_t LTR_r4; /* 0068 - R4 */ + uint64_t LTR_r5; /* 0070 - R5 */ + uint64_t LTR_r6; /* 0078 - R6 */ } LowTraceRecord; +#pragma pack() +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct traceWork { unsigned int traceCurr; /* Address of next slot */ @@ -69,8 +77,10 @@ typedef struct traceWork { unsigned int traceStart; /* Start of trace table */ unsigned int traceEnd; /* End of trace table */ unsigned int traceMsnd; /* Saved trace mask */ - unsigned int traceGas[3]; + unsigned int traceSize; /* Size of trace table. Min 1 page */ + unsigned int traceGas[2]; } traceWork; +#pragma pack() extern traceWork trcWork; extern unsigned int lastTrace; /* Value of low-level exception trace controls */ diff --git a/osfmk/ppc/lowglobals.h b/osfmk/ppc/lowglobals.h new file mode 100644 index 000000000..c848bcd6d --- /dev/null +++ b/osfmk/ppc/lowglobals.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Header files for the Low Memory Globals (lg) + */ +#ifndef _LOW_MEMORY_GLOBALS_H_ +#define _LOW_MEMORY_GLOBALS_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Don't change these structures unless you change the corresponding assembly code + * which is in lowmem_vectors.s + */ + +/* + * This is where we put constants, pointers, and data areas that must be accessed + * quickly through assembler. They are designed to be accessed directly with + * absolute addresses, not via a base register. This is a global area, and not + * per processor. + */ + +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct lowglo { + + unsigned long lgForceAddr[5*1024]; /* 0000 Force to page 5 */ + unsigned char lgVerCode[8]; /* 5000 System verification code */ + unsigned long long lgZero; /* 5008 Double constant 0 */ + unsigned int lgPPStart; /* 5010 Start of per_proc blocks */ + unsigned int lgCHUDXNUfnStart; /* 5014 CHUD XNU function glue table */ + unsigned int lgRsv018[26]; /* 5018 reserved */ + traceWork lgTrcWork; /* 5080 Tracing control block - trcWork */ + unsigned int lgRsv0A0[24]; /* 50A0 reserved */ + struct Saveanchor lgSaveanchor; /* 5100 Savearea anchor - saveanchor */ + unsigned int lgRsv140[16]; /* 5140 reserved */ + unsigned int lgTlbieLck; /* 5180 TLBIE lock */ + unsigned int lgRsv184[31]; /* 5184 reserved - push to next line */ + struct diagWork lgdgWork; /* 5200 Start of diagnostic work area */ + unsigned int lgRsv220[24]; /* 5220 reserved */ + unsigned int lgRst280[32]; /* 5280 reserved */ + unsigned int lgKillResv; /* 5300 line used to kill reservations */ + unsigned int lgKillResvpad[31]; /* 5304 pad reservation kill line */ + unsigned int lgRsv380[768]; /* 5380 reserved - push to 1 page */ + +} lowglo; + +extern lowglo lowGlo; + +#endif /* _LOW_MEMORY_GLOBALS_H_ */ diff --git a/osfmk/ppc/lowmem_vectors.s b/osfmk/ppc/lowmem_vectors.s index edbcc2829..f707b92be 100644 --- a/osfmk/ppc/lowmem_vectors.s +++ b/osfmk/ppc/lowmem_vectors.s @@ -26,38 +26,10 @@ * @OSF_COPYRIGHT@ */ -/* - * Low-memory exception vector code for PowerPC MACH - * - * These are the only routines that are ever run with - * VM instruction translation switched off. - * - * The PowerPC is quite strange in that rather than having a set - * of exception vectors, the exception handlers are installed - * in well-known addresses in low memory. This code must be loaded - * at ZERO in physical memory. The simplest way of doing this is - * to load the kernel at zero, and specify this as the first file - * on the linker command line. - * - * When this code is loaded into place, it is loaded at virtual - * address KERNELBASE, which is mapped to zero (physical). - * - * This code handles all powerpc exceptions and is always entered - * in supervisor mode with translation off. It saves the minimum - * processor state before switching back on translation and - * jumping to the approprate routine. - * - * Vectors from 0x100 to 0x3fff occupy 0x100 bytes each (64 instructions) - * - * We use some of this space to decide which stack to use, and where to - * save the context etc, before jumping to a generic handler. - */ - #include #include #include #include -#include #include #include @@ -67,21 +39,11 @@ #include #include -#define TRCSAVE 0 -#define CHECKSAVE 0 -#define PERFTIMES 0 #define ESPDEBUG 0 +#define INSTRUMENT 0 -#if TRCSAVE -#error The TRCSAVE option is broken.... Fix it -#endif - -#define featL1ena 24 -#define featSMP 25 -#define featAltivec 26 -#define wasNapping 27 -#define featFP 28 -#define specAccess 29 +#define featAltivec 29 +#define wasNapping 30 #define VECTOR_SEGMENT .section __VECTORS, __interrupts @@ -93,9 +55,19 @@ EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */ baseR: /* Used so we have more readable code */ -/* - * System reset - call debugger - */ +; +; Handle system reset. +; We do not ever expect a hard reset so we do not actually check. +; When we come here, we check for a RESET_HANDLER_START (which means we are +; waking up from sleep), a RESET_HANDLER_BUPOR (which is using for bring up +; when starting directly from a POR), and RESET_HANDLER_IGNORE (which means +; ignore the interrupt). +; +; Some machines (so far, 32-bit guys) will always ignore a non-START interrupt. +; The ones who do take it, check if the interrupt is too be ignored. This is +; always the case until the previous reset is handled (i.e., we have exited +; from the debugger). +; . = 0xf0 .globl EXT(ResetHandler) EXT(ResetHandler): @@ -120,8 +92,46 @@ EXT(ResetHandler): mtlr r4 blr -resetexc: - mtcr r11 +resetexc: cmplwi r13,RESET_HANDLER_BUPOR ; Special bring up POR sequence? + bne resetexc2 ; No... + lis r4,hi16(EXT(resetPOR)) ; Get POR code + ori r4,r4,lo16(EXT(resetPOR)) ; The rest + mtlr r4 ; Set it + blr ; Jump to it.... + +resetexc2: cmplwi cr1,r13,RESET_HANDLER_IGNORE ; Are we ignoring these? (Software debounce) + + mfsprg r13,0 ; Get per_proc + lwz r13,pfAvailable(r13) ; Get the features + rlwinm. r13,r13,0,pf64Bitb,pf64Bitb ; Is this a 64-bit machine? + cror cr1_eq,cr0_eq,cr1_eq ; See if we want to take this + bne-- cr1,rxCont ; Yes, continue... + bne-- rxIg64 ; 64-bit path... + + mtcr r11 ; Restore the CR + mfsprg r13,2 ; Restore R13 + mfsprg r11,0 ; Get per_proc + lwz r11,pfAvailable(r11) ; Get the features + mtsprg 2,r11 ; Restore sprg2 + mfsprg r11,3 ; Restore R11 + rfi ; Return and ignore the reset + +rxIg64: mtcr r11 ; Restore the CR + mfsprg r11,0 ; Get per_proc + mtspr hsprg0,r14 ; Save a register + lwz r14,UAW(r11) ; Get the User Assist Word + mfsprg r13,2 ; Restore R13 + lwz r11,pfAvailable(r11) ; Get the features + mtsprg 2,r11 ; Restore sprg2 + mfsprg r11,3 ; Restore R11 + mtsprg 3,r14 ; Set the UAW in sprg3 + mfspr r14,hsprg0 ; Restore R14 + rfid ; Return and ignore the reset + +rxCont: mtcr r11 + li r11,RESET_HANDLER_IGNORE ; Get set to ignore + stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Start ignoring these + mfsprg r13,1 /* Get the exception save area */ li r11,T_RESET /* Set 'rupt code */ b .L_exception_entry /* Join common... */ @@ -131,10 +141,81 @@ resetexc: . = 0x200 .L_handler200: - mtsprg 2,r13 /* Save R13 */ - mtsprg 3,r11 /* Save R11 */ - li r11,T_MACHINE_CHECK /* Set 'rupt code */ - b .L_exception_entry /* Join common... */ + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + + .globl EXT(extPatchMCK) +LEXT(extPatchMCK) ; This is patched to a nop for 64-bit + b h200aaa ; Skip 64-bit code... + +; +; Fall through here for 970 MCKs. +; + + li r11,1 ; ? + sldi r11,r11,32+3 ; ? + mfspr r13,hid4 ; ? + or r11,r11,r13 ; ? + sync + mtspr hid4,r11 ; ? + isync + li r11,1 ; ? + sldi r11,r11,32+8 ; ? + andc r13,r13,r11 ; ? + lis r11,0xE000 ; Get the unlikeliest ESID possible + sync + mtspr hid4,r13 ; ? + isync ; ? + + srdi r11,r11,1 ; ? + slbie r11 ; ? + sync + isync + + li r11,T_MACHINE_CHECK ; Set rupt code + b .L_exception_entry ; Join common... + +; +; Preliminary checking of other MCKs +; + +h200aaa: mfsrr1 r11 ; Get the SRR1 + mfcr r13 ; Save the CR + + rlwinm. r11,r11,0,dcmck,dcmck ; ? + beq+ notDCache ; ? + + sync + mfspr r11,msscr0 ; ? + dssall ; ? + sync + isync + + oris r11,r11,hi16(dl1hwfm) ; ? + mtspr msscr0,r11 ; ? + +rstbsy: mfspr r11,msscr0 ; ? + + rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ? + bne rstbsy ; ? + + sync ; ? + + mfsprg r11,0 ; Get the per_proc + mtcrf 255,r13 ; Restore CRs + lwz r13,hwMachineChecks(r11) ; Get old count + addi r13,r13,1 ; Count this one + stw r13,hwMachineChecks(r11) ; Set new count + lwz r11,pfAvailable(r11) ; Get the feature flags + mfsprg r13,2 ; Restore R13 + mtsprg 2,r11 ; Set the feature flags + mfsprg r11,3 ; Restore R11 + rfi ; Return + +notDCache: mtcrf 255,r13 ; Restore CRs + li r11,T_MACHINE_CHECK ; Set rupt code + b .L_exception_entry ; Join common... + /* * Data access - page fault, invalid memory rights for operation @@ -147,16 +228,39 @@ resetexc: li r11,T_DATA_ACCESS /* Set 'rupt code */ b .L_exception_entry /* Join common... */ + +/* + * Data segment + */ + + . = 0x380 +.L_handler380: + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + li r11,T_DATA_SEGMENT ; Set rupt code + b .L_exception_entry ; Join common... + /* * Instruction access - as for data access */ . = 0x400 .L_handler400: - mtsprg 2,r13 /* Save R13 */ - mtsprg 3,r11 /* Save R11 */ - li r11,T_INSTRUCTION_ACCESS /* Set 'rupt code */ - b .L_exception_entry /* Join common... */ + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + li r11,T_INSTRUCTION_ACCESS ; Set rupt code + b .L_exception_entry ; Join common... + +/* + * Instruction segment + */ + + . = 0x480 +.L_handler480: + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + li r11,T_INSTRUCTION_SEGMENT ; Set rupt code + b .L_exception_entry ; Join common... /* * External interrupt @@ -164,10 +268,10 @@ resetexc: . = 0x500 .L_handler500: - mtsprg 2,r13 /* Save R13 */ - mtsprg 3,r11 /* Save R11 */ - li r11,T_INTERRUPT /* Set 'rupt code */ - b .L_exception_entry /* Join common... */ + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + li r11,T_INTERRUPT ; Set rupt code + b .L_exception_entry ; Join common... /* * Alignment - many reasons @@ -188,6 +292,19 @@ resetexc: .L_handler700: mtsprg 2,r13 /* Save R13 */ mtsprg 3,r11 /* Save R11 */ + +#if 0 + mfsrr1 r13 ; (BRINGUP) + mfcr r11 ; (BRINGUP) + rlwinm. r13,r13,0,12,12 ; (BRINGUP) + crmove cr1_eq,cr0_eq ; (BRINGUP) + mfsrr1 r13 ; (BRINGUP) + rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; (BRINGUP) + crorc cr0_eq,cr1_eq,cr0_eq ; (BRINGUP) + bf-- cr0_eq,. ; (BRINGUP) + mtcrf 255,r11 ; (BRINGUP) +#endif + li r11,T_PROGRAM|T_FAM /* Set 'rupt code */ b .L_exception_entry /* Join common... */ @@ -236,29 +353,6 @@ resetexc: li r11,T_RESERVED /* Set 'rupt code */ b .L_exception_entry /* Join common... */ -#if 0 -hackxxxx1: - stmw r29,4(br0) - lwz r29,0(br0) - mr. r29,r29 - bne+ xxxx1 - lis r29,0x4000 - -xxxx1: - stw r0,0(r29) - mfsrr0 r30 - stw r30,4(r29) - mtlr r30 - stw r30,8(r29) - - addi r29,r29,12 - stw r29,0(br0) - - lmw r29,4(br0) - b hackxxxx2 -#endif - - ; ; System call - generated by the sc instruction ; @@ -268,7 +362,7 @@ xxxx1: ; 0xFFFFFFFE - BlueBox only - kcNKIsPreemptiveTaskEnv ; 0x00007FF2 - User state only - thread info ; 0x00007FF3 - User state only - floating point / vector facility status -; 0x00007FF4 - Kernel only - loadMSR +; 0x00007FF4 - Kernel only - loadMSR - not used on 64-bit machines ; ; Note: none handled if virtual machine is running ; Also, it we treat SCs as kernel SCs if the RI bit is set @@ -276,80 +370,108 @@ xxxx1: . = 0xC00 .L_handlerC00: + mtsprg 3,r11 ; Save R11 + mfsprg r11,2 ; Get the feature flags + mtsprg 2,r13 ; Save R13 + rlwinm r11,r11,pf64Bitb-4,4,4 ; Get the 64-bit flag mfsrr1 r13 ; Get SRR1 for loadMSR - mtsprg 3,r11 ; Save R11 - rlwimi r13,r13,MSR_PR_BIT,0,0 ; Move PR bit to non-volatile CR0 bit 0 - mfcr r11 ; Save the CR - mtcrf 0x81,r13 ; Get the moved PR and the RI for testing - crnot 0,0 ; Get !PR - cror 0,0,MSR_RI_BIT ; See if we have !PR or RI - mfsprg r13,0 ; Get the per_proc_area - bt- 0,uftInKern ; We are in the kernel... - - lwz r13,spcFlags(r13) ; Get the special flags - rlwimi r13,r13,runningVMbit+1,31,31 ; Move VM flag after the 3 blue box flags - mtcrf 1,r13 ; Set BB and VMM flags in CR7 - bt- 31,ufpVM ; fast paths running VM ... - cmplwi cr5,r0,0x7FF2 ; Ultra fast path cthread info call? - cmpwi cr6,r0,0x7FF3 ; Ultra fast path facility status? - cror cr1_eq,cr5_lt,cr6_gt ; Set true if not 0x7FF2 and not 0x7FF3 and not negative - bt- cr1_eq,notufp ; Exit if we can not be ultra fast... - - not. r0,r0 ; Flip bits and kind of subtract 1 - - cmplwi cr1,r0,1 ; Is this a bb fast path? - not r0,r0 ; Restore to entry state - bf- bbNoMachSCbit,ufpUSuft ; We are not running BlueBox... - bgt cr1,notufp ; This can not be a bb ufp... -#if 0 - b hackxxxx1 -hackxxxx2: -#endif + rlwimi r11,r13,MSR_PR_BIT-5,5,5 ; Move the PR bit to bit 1 + mfcr r13 ; Save the CR - rlwimi r11,r13,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq - mfsprg r13,0 ; Get back pre_proc + mtcrf 0x40,r11 ; Get the top 3 CR bits to 64-bit, PR, sign + cmpwi r0,lo16(-3) ; Eliminate all negatives but -1 and -2 + mfsprg r11,0 ; Get the per_proc + bf-- 5,uftInKern ; We came from the kernel... + ble-- notufp ; This is a mach call + + lwz r11,spcFlags(r11) ; Pick up the special flags + + cmpwi cr7,r0,lo16(-1) ; Is this a BlueBox call? + cmplwi cr2,r0,0x7FF2 ; Ultra fast path cthread info call? + cmplwi cr3,r0,0x7FF3 ; Ultra fast path facility status? + cror cr4_eq,cr2_eq,cr3_eq ; Is this one of the two ufts we handle here? - bne cr1,ufpIsBBpre ; This is the "isPreemptiveTask" call... - - lwz r0,ppbbTaskEnv(r13) ; Get the shadowed taskEnv from per_proc_area + ble-- cr7,uftBBCall ; We think this is blue box call... -ufpIsBBpre: - mtcrf 0xFF,r11 ; Restore CR - mfsprg r11,3 ; Restore R11 - mfsprg r13,2 ; Restore R13 - rfi ; All done, go back... + rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits + andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode) + cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM + beq-- cr0,ufpVM ; fast paths running VM ... + bne-- cr4_eq,notufp ; Bail ifthis is not a uft... + ; -; Normal fast path... +; Handle normal user ultra-fast trap ; + + li r3,spcFlags ; Assume facility status - 0x7FF3 + + beq-- cr3,uftFacStat ; This is a facilities status call... -ufpUSuft: bge+ notufp ; Bail if negative... (ARRRGGG -- BRANCH TO A BRANCH!!!!!) + li r3,UAW ; This is really a thread info call - 0x7FF2 + +uftFacStat: mfsprg r11,0 ; Get the per_proc + lwzx r3,r11,r3 ; Get the UAW or spcFlags field + +uftExit: bt++ 4,uftX64 ; Go do the 64-bit exit... + + lwz r11,pfAvailable(r11) ; Get the feature flags + mtcrf 255,r13 ; Restore the CRs + mfsprg r13,2 ; Restore R13 + mtsprg 2,r11 ; Set the feature flags mfsprg r11,3 ; Restore R11 - mfsprg r3,0 ; Get the per_proc_area + + rfi ; Back to our guy... + +uftX64: mtspr hsprg0,r14 ; Save a register + + lwz r14,UAW(r11) ; Get the User Assist Word + lwz r11,pfAvailable(r11) ; Get the feature flags + + mtcrf 255,r13 ; Restore the CRs + mfsprg r13,2 ; Restore R13 - bne- cr5,isvecfp ; This is the facility stat call - lwz r3,UAW(r3) ; Get the assist word - rfi ; All done, scream back... (no need to restore CR or R11, they are volatile) -; -isvecfp: lwz r3,spcFlags(r3) ; Get the facility status - rfi ; Bail back... + mtsprg 2,r11 ; Set the feature flags + mfsprg r11,3 ; Restore R11 + mtsprg 3,r14 ; Set the UAW in sprg3 + mfspr r14,hsprg0 ; Restore R14 + + rfid ; Back to our guy... + ; -notufp: mtcrf 0xFF,r11 ; Restore the used CRs - li r11,T_SYSTEM_CALL|T_FAM ; Set interrupt code - b .L_exception_entry ; Join common... +; Handle BlueBox ultra-fast trap +; + +uftBBCall: andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need + cmplwi r11,bbNoMachSC ; See if we are trapping syscalls + blt-- notufp ; No... + + rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq + + mfsprg r11,0 ; Get the per proc + beq++ cr7,uftExit ; For MKIsPreemptiveTask we are done... + + lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv from per_proc_area + b uftExit ; We are really all done now... + +; Kernel ultra-fast trap + uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR? bne- notufp ; Someone is trying to cheat... - - mtcrf 0xFF,r11 ; Restore CR - lwz r11,pfAvailable(r13) ; Pick up the feature flags + mtsrr1 r3 ; Set new MSR - mfsprg r13,2 ; Restore R13 - mtsprg 2,r11 ; Set the feature flags into sprg2 - mfsprg r11,3 ; Restore R11 - rfi ; Blast back + + b uftExit ; Go load the new MSR... + +notufp: mtcrf 0xFF,r13 ; Restore the used CRs + li r11,T_SYSTEM_CALL|T_FAM ; Set interrupt code + b .L_exception_entry ; Join common... + + + /* @@ -370,55 +492,73 @@ uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR? . = 0xD00 .L_handlerD00: - mtsprg 2,r13 ; Save R13 mtsprg 3,r11 ; Save R11 - mfsrr1 r13 ; Get the old MSR - mfcr r11 ; Get the CR - rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state? - beq- notspectr ; Yes, not special trace... - mfsprg r13,0 ; Get the per_proc area - lhz r13,PP_CPU_FLAGS(r13) ; Get the flags - rlwinm. r13,r13,0,traceBEb+16,traceBEb+16 ; Special trace enabled? - bne+ specbrtr ; Yeah... - -notspectr: mtcr r11 ; Restore CR + mfsprg r11,2 ; Get the feature flags + mtsprg 2,r13 ; Save R13 + rlwinm r11,r11,pf64Bitb-4,4,4 ; Get the 64-bit flag + mfcr r13 ; Get the CR + mtcrf 0x40,r11 ; Set the CR + mfsrr1 r11 ; Get the old MSR + rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state? + + mfsprg r11,0 ; Get the per_proc + lhz r11,PP_CPU_FLAGS(r11) ; Get the flags + crmove cr1_eq,cr0_eq ; Remember if we are in supervisor state + rlwinm. r11,r11,0,traceBEb+16,traceBEb+16 ; Special trace enabled? + cror cr0_eq,cr0_eq,cr1_eq ; Is trace off or supervisor state? + bf-- cr0_eq,specbrtr ; No, we need to trace... + +notspectr: mtcr r13 ; Restore CR li r11,T_TRACE|T_FAM ; Set interrupt code b .L_exception_entry ; Join common... + .align 5 + ; ; We are doing the special branch trace ; -specbrtr: mfsprg r13,0 ; Get the per_proc area - stw r1,emfp0(r13) ; Save in a scratch area - stw r2,emfp0+4(r13) ; Save in a scratch area - stw r3,emfp0+8(r13) ; Save in a scratch area - - lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer - lwz r3,spcTRp(r13) ; Pick up buffer position - mr. r1,r1 ; Is it time to count? +specbrtr: mfsprg r11,0 ; Get the per_proc area + bt++ 4,sbxx64a ; Jump if 64-bit... + + stw r1,tempr0+4(r11) ; Save in a scratch area + stw r2,tempr1+4(r11) ; Save in a scratch area + stw r3,tempr2+4(r11) ; Save in a scratch area + b sbxx64b ; Skip... + +sbxx64a: std r1,tempr0(r11) ; Save in a scratch area + std r2,tempr1(r11) ; Save in a scratch area + std r3,tempr2(r11) ; Save in a scratch area + +sbxx64b: lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer + lwz r3,spcTRp(r11) ; Pick up buffer position ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer - cmplwi cr1,r3,4092 ; Set cr1_eq if we should take exception + cmplwi cr2,r3,4092 ; Set cr1_eq if we should take exception mfsrr0 r1 ; Get the pc stwx r1,r2,r3 ; Save it in the buffer addi r3,r3,4 ; Point to the next slot rlwinm r3,r3,0,20,31 ; Wrap the slot at one page - stw r3,spcTRp(r13) ; Save the new slot - lwz r1,emfp0(r13) ; Restore work register - lwz r2,emfp0+4(r13) ; Restore work register - lwz r3,emfp0+8(r13) ; Restore work register - beq cr1,notspectr ; Buffer filled, make a rupt... - - mtcr r11 ; Restore the CR - mfsprg r13,2 ; Restore R13 - mfsprg r11,3 ; Restore R11 - rfi ; Bail back... + stw r3,spcTRp(r11) ; Save the new slot + + bt++ 4,sbxx64c ; Jump if 64-bit... + + lwz r1,tempr0+4(r11) ; Restore work register + lwz r2,tempr1+4(r11) ; Restore work register + lwz r3,tempr2+4(r11) ; Restore work register + beq cr2,notspectr ; Buffer filled, make a rupt... + b uftExit ; Go restore and leave... + +sbxx64c: ld r1,tempr0(r11) ; Restore work register + ld r2,tempr1(r11) ; Restore work register + ld r3,tempr2(r11) ; Restore work register + beq cr2,notspectr ; Buffer filled, make a rupt... + b uftExit ; Go restore and leave... /* * Floating point assist */ - . = 0xe00 + . = 0xE00 .L_handlerE00: mtsprg 2,r13 /* Save R13 */ mtsprg 3,r11 /* Save R11 */ @@ -451,329 +591,43 @@ VMXhandler: -/* - * Instruction translation miss - we inline this code. - * Upon entry (done for us by the machine): - * srr0 : addr of instruction that missed - * srr1 : bits 0-3 = saved CR0 - * 4 = lru way bit - * 16-31 = saved msr - * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) - * imiss: ea that missed - * icmp : the compare value for the va that missed - * hash1: pointer to first hash pteg - * hash2: pointer to 2nd hash pteg - * - * Register usage: - * tmp0: saved counter - * tmp1: junk - * tmp2: pointer to pteg - * tmp3: current compare value - * - * This code is taken from the 603e User's Manual with - * some bugfixes and minor improvements to save bytes and cycles - * - * NOTE: Do not touch sprg2 in here - */ +; +; Instruction translation miss exception - not supported +; - . = 0x1000 + . = 0x1000 .L_handler1000: - mfspr tmp2, hash1 - mfctr tmp0 /* use tmp0 to save ctr */ - mfspr tmp3, icmp - -.L_imiss_find_pte_in_pteg: - li tmp1, 8 /* count */ - subi tmp2, tmp2, 8 /* offset for lwzu */ - mtctr tmp1 /* count... */ - -.L_imiss_pteg_loop: - lwz tmp1, 8(tmp2) /* check pte0 for match... */ - addi tmp2, tmp2, 8 - cmpw cr0, tmp1, tmp3 -#if 0 - bdnzf+ cr0, .L_imiss_pteg_loop -#else - bc 0,2, .L_imiss_pteg_loop -#endif - beq+ cr0, .L_imiss_found_pte - - /* Not found in PTEG, we must scan 2nd then give up */ - - andi. tmp1, tmp3, MASK(PTE0_HASH_ID) - bne- .L_imiss_do_no_hash_exception /* give up */ - - mfspr tmp2, hash2 - ori tmp3, tmp3, MASK(PTE0_HASH_ID) - b .L_imiss_find_pte_in_pteg - -.L_imiss_found_pte: - - lwz tmp1, 4(tmp2) /* get pte1_t */ - andi. tmp3, tmp1, MASK(PTE1_WIMG_GUARD) /* Fault? */ - bne- .L_imiss_do_prot_exception /* Guarded - illegal */ - - /* Ok, we've found what we need to, restore and rfi! */ - - mtctr tmp0 /* restore ctr */ - mfsrr1 tmp3 - mfspr tmp0, imiss - mtcrf 0x80, tmp3 /* Restore CR0 */ - mtspr rpa, tmp1 /* set the pte */ - ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ - tlbli tmp0 - sth tmp1, 6(tmp2) - rfi - -.L_imiss_do_prot_exception: - /* set up srr1 to indicate protection exception... */ - mfsrr1 tmp3 - andi. tmp2, tmp3, 0xffff - addis tmp2, tmp2, MASK(SRR1_TRANS_PROT) >> 16 - b .L_imiss_do_exception - -.L_imiss_do_no_hash_exception: - /* clean up registers for protection exception... */ - mfsrr1 tmp3 - andi. tmp2, tmp3, 0xffff - addis tmp2, tmp2, MASK(SRR1_TRANS_HASH) >> 16 - - /* And the entry into the usual instruction fault handler ... */ -.L_imiss_do_exception: - - mtctr tmp0 /* Restore ctr */ - mtsrr1 tmp2 /* Set up srr1 */ - mfmsr tmp0 - xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ - mtcrf 0x80, tmp3 /* Restore CR0 */ - mtmsr tmp0 /* reset MSR[TGPR] */ - b .L_handler400 /* Instr Access */ + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + li r11,T_INVALID_EXCP0 ; Set rupt code + b .L_exception_entry ; Join common... + -/* - * Data load translation miss - * - * Upon entry (done for us by the machine): - * srr0 : addr of instruction that missed - * srr1 : bits 0-3 = saved CR0 - * 4 = lru way bit - * 5 = 1 if store - * 16-31 = saved msr - * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) - * dmiss: ea that missed - * dcmp : the compare value for the va that missed - * hash1: pointer to first hash pteg - * hash2: pointer to 2nd hash pteg - * - * Register usage: - * tmp0: saved counter - * tmp1: junk - * tmp2: pointer to pteg - * tmp3: current compare value - * - * This code is taken from the 603e User's Manual with - * some bugfixes and minor improvements to save bytes and cycles - * - * NOTE: Do not touch sprg2 in here - */ - . = 0x1100 +; +; Data load translation miss exception - not supported +; + + . = 0x1100 .L_handler1100: - mfspr tmp2, hash1 - mfctr tmp0 /* use tmp0 to save ctr */ - mfspr tmp3, dcmp - -.L_dlmiss_find_pte_in_pteg: - li tmp1, 8 /* count */ - subi tmp2, tmp2, 8 /* offset for lwzu */ - mtctr tmp1 /* count... */ - -.L_dlmiss_pteg_loop: - lwz tmp1, 8(tmp2) /* check pte0 for match... */ - addi tmp2, tmp2, 8 - cmpw cr0, tmp1, tmp3 -#if 0 /* How to write this correctly? */ - bdnzf+ cr0, .L_dlmiss_pteg_loop -#else - bc 0,2, .L_dlmiss_pteg_loop -#endif - beq+ cr0, .L_dmiss_found_pte - - /* Not found in PTEG, we must scan 2nd then give up */ - - andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ - bne- .L_dmiss_do_no_hash_exception /* give up */ - - mfspr tmp2, hash2 - ori tmp3, tmp3, MASK(PTE0_HASH_ID) - b .L_dlmiss_find_pte_in_pteg - -.L_dmiss_found_pte: - - lwz tmp1, 4(tmp2) /* get pte1_t */ - - /* Ok, we've found what we need to, restore and rfi! */ - - mtctr tmp0 /* restore ctr */ - mfsrr1 tmp3 - mfspr tmp0, dmiss - mtcrf 0x80, tmp3 /* Restore CR0 */ - mtspr rpa, tmp1 /* set the pte */ - ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ - tlbld tmp0 /* load up tlb */ - sth tmp1, 6(tmp2) /* sth is faster? */ - rfi - - /* This code is shared with data store translation miss */ - -.L_dmiss_do_no_hash_exception: - /* clean up registers for protection exception... */ - mfsrr1 tmp3 - /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ - rlwinm tmp1, tmp3, 9, 6, 6 - addis tmp1, tmp1, MASK(DSISR_HASH) >> 16 - - /* And the entry into the usual data fault handler ... */ - - mtctr tmp0 /* Restore ctr */ - andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ - mtsrr1 tmp2 /* Set srr1 */ - mtdsisr tmp1 - mfspr tmp2, dmiss - mtdar tmp2 - mfmsr tmp0 - xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ - mtcrf 0x80, tmp3 /* Restore CR0 */ - sync /* Needed on some */ - mtmsr tmp0 /* reset MSR[TGPR] */ - b .L_handler300 /* Data Access */ + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + li r11,T_INVALID_EXCP1 ; Set rupt code + b .L_exception_entry ; Join common... + -/* - * Data store translation miss (similar to data load) - * - * Upon entry (done for us by the machine): - * srr0 : addr of instruction that missed - * srr1 : bits 0-3 = saved CR0 - * 4 = lru way bit - * 5 = 1 if store - * 16-31 = saved msr - * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) - * dmiss: ea that missed - * dcmp : the compare value for the va that missed - * hash1: pointer to first hash pteg - * hash2: pointer to 2nd hash pteg - * - * Register usage: - * tmp0: saved counter - * tmp1: junk - * tmp2: pointer to pteg - * tmp3: current compare value - * - * This code is taken from the 603e User's Manual with - * some bugfixes and minor improvements to save bytes and cycles - * - * NOTE: Do not touch sprg2 in here - */ - . = 0x1200 +; +; Data store translation miss exception - not supported +; + + . = 0x1200 .L_handler1200: - mfspr tmp2, hash1 - mfctr tmp0 /* use tmp0 to save ctr */ - mfspr tmp3, dcmp - -.L_dsmiss_find_pte_in_pteg: - li tmp1, 8 /* count */ - subi tmp2, tmp2, 8 /* offset for lwzu */ - mtctr tmp1 /* count... */ - -.L_dsmiss_pteg_loop: - lwz tmp1, 8(tmp2) /* check pte0 for match... */ - addi tmp2, tmp2, 8 - - cmpw cr0, tmp1, tmp3 -#if 0 /* I don't know how to write this properly */ - bdnzf+ cr0, .L_dsmiss_pteg_loop -#else - bc 0,2, .L_dsmiss_pteg_loop -#endif - beq+ cr0, .L_dsmiss_found_pte - - /* Not found in PTEG, we must scan 2nd then give up */ - - andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ - bne- .L_dmiss_do_no_hash_exception /* give up */ - - mfspr tmp2, hash2 - ori tmp3, tmp3, MASK(PTE0_HASH_ID) - b .L_dsmiss_find_pte_in_pteg - -.L_dsmiss_found_pte: - - lwz tmp1, 4(tmp2) /* get pte1_t */ - andi. tmp3, tmp1, MASK(PTE1_CHANGED) /* unchanged, check? */ - beq- .L_dsmiss_check_prot /* yes, check prot */ - -.L_dsmiss_resolved: - /* Ok, we've found what we need to, restore and rfi! */ - - mtctr tmp0 /* restore ctr */ - mfsrr1 tmp3 - mfspr tmp0, dmiss - mtcrf 0x80, tmp3 /* Restore CR0 */ - mtspr rpa, tmp1 /* set the pte */ - tlbld tmp0 /* load up tlb */ - rfi - -.L_dsmiss_check_prot: - /* PTE is unchanged, we must check that we can write */ - rlwinm. tmp3, tmp1, 30, 0, 1 /* check PP[1] */ - bge- .L_dsmiss_check_prot_user_kern - andi. tmp3, tmp1, 1 /* check PP[0] */ - beq+ .L_dsmiss_check_prot_ok - -.L_dmiss_do_prot_exception: - /* clean up registers for protection exception... */ - mfsrr1 tmp3 - /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ - rlwinm tmp1, tmp3, 9, 6, 6 - addis tmp1, tmp1, MASK(DSISR_PROT) >> 16 - - /* And the entry into the usual data fault handler ... */ - - mtctr tmp0 /* Restore ctr */ - andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ - mtsrr1 tmp2 /* Set srr1 */ - mtdsisr tmp1 - mfspr tmp2, dmiss - mtdar tmp2 - mfmsr tmp0 - xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ - mtcrf 0x80, tmp3 /* Restore CR0 */ - sync /* Needed on some */ - mtmsr tmp0 /* reset MSR[TGPR] */ - b .L_handler300 /* Data Access */ - -/* NB - if we knew we were on a 603e we could test just the MSR_KEY bit */ -.L_dsmiss_check_prot_user_kern: - mfsrr1 tmp3 - andi. tmp3, tmp3, MASK(MSR_PR) - beq+ .L_dsmiss_check_prot_kern - mfspr tmp3, dmiss /* check user privs */ - mfsrin tmp3, tmp3 /* get excepting SR */ - andis. tmp3, tmp3, 0x2000 /* Test SR ku bit */ - beq+ .L_dsmiss_check_prot_ok - b .L_dmiss_do_prot_exception - -.L_dsmiss_check_prot_kern: - mfspr tmp3, dmiss /* check kern privs */ - mfsrin tmp3, tmp3 - andis. tmp3, tmp3, 0x4000 /* Test SR Ks bit */ - bne- .L_dmiss_do_prot_exception - -.L_dsmiss_check_prot_ok: - /* Ok, mark as referenced and changed before resolving the fault */ - ori tmp1, tmp1, (MASK(PTE1_REFERENCED)|MASK(PTE1_CHANGED)) - sth tmp1, 6(tmp2) - b .L_dsmiss_resolved + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + li r11,T_INVALID_EXCP2 ; Set rupt code + b .L_exception_entry ; Join common... + /* * Instruction address breakpoint @@ -797,8 +651,20 @@ VMXhandler: li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */ b .L_exception_entry /* Join common... */ + +/* + * Soft Patch + */ + + . = 0x1500 +.L_handler1500: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + li r11,T_SOFT_PATCH /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + ; -; Altivec Java Mode Assist interrupt +; Altivec Java Mode Assist interrupt or Maintenace interrupt ; . = 0x1600 @@ -809,7 +675,7 @@ VMXhandler: b .L_exception_entry /* Join common... */ ; -; Thermal interruption +; Altivec Java Mode Assist interrupt or Thermal interruption ; . = 0x1700 @@ -819,35 +685,44 @@ VMXhandler: li r11,T_THERMAL /* Set 'rupt code */ b .L_exception_entry /* Join common... */ +; +; Thermal interruption - 64-bit +; + + . = 0x1800 +.L_handler1800: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + li r11,T_ARCHDEP0 /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + /* * There is now a large gap of reserved traps */ /* - * Run mode/ trace exception - single stepping on 601 processors + * Instrumentation interruption */ . = 0x2000 .L_handler2000: mtsprg 2,r13 /* Save R13 */ mtsprg 3,r11 /* Save R11 */ - li r11,T_RUNMODE_TRACE /* Set 'rupt code */ + li r11,T_INSTRUMENTATION /* Set 'rupt code */ b .L_exception_entry /* Join common... */ + . = 0x2100 /* * Filter Ultra Fast Path syscalls for VMM */ ufpVM: - cmpwi cr6,r0,0x6004 ; Is it vmm_dispatch - bne cr6,notufp ; Exit If not + cmpwi cr2,r0,0x6004 ; Is it vmm_dispatch + bne cr2,notufp ; Exit If not cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest - cmpwi cr6,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister - cror cr1_eq,cr5_lt,cr6_gt ; Set true if out of VMM Fast syscall range + cmpwi cr2,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister + cror cr1_eq,cr5_lt,cr2_gt ; Set true if out of VMM Fast syscall range bt- cr1_eq,notufp ; Exit if out of range - rlwinm r13,r13,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit - cmpwi cr0,r13,3 ; Are FamVMena and FamVMmode set - bne+ notufp ; Exit if not in FAM b EXT(vmm_ufp) ; Ultra Fast Path syscall /* @@ -884,20 +759,28 @@ EXT(exception_entry): * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions * are ignored. */ - mfsprg r13,0 /* Load per_proc */ - lwz r13,next_savearea(r13) /* Get the exception save area */ - stw r1,saver1(r13) ; Save register 1 - stw r0,saver0(r13) ; Save register 0 - dcbtst 0,r13 ; We will need this in a bit + + .globl EXT(extPatch32) + + +LEXT(extPatch32) + b extEntry64 ; Go do 64-bit (patched out for 32-bit) + mfsprg r13,0 ; Load per_proc + lwz r13,next_savearea+4(r13) ; Get the exception save area + stw r0,saver0+4(r13) ; Save register 0 + stw r1,saver1+4(r13) ; Save register 1 + mfspr r1,hid0 ; Get HID0 - mfcr r0 ; Save the CR - mtcrf 255,r1 ; Get set to test for cache and sleep + mfcr r0 ; Save the whole CR + + mtcrf 0x20,r1 ; Get set to test for sleep + cror doze,doze,nap ; Remember if we are napping bf sleep,notsleep ; Skip if we are not trying to sleep - mtcrf 255,r0 ; Restore the CR - lwz r0,saver0(r13) ; Restore R0 - lwz r1,saver1(r13) ; Restore R1 + mtcrf 0x20,r0 ; Restore the CR + lwz r0,saver0+4(r13) ; Restore R0 + lwz r1,saver1+4(r13) ; Restore R1 mfsprg r13,0 ; Get the per_proc lwz r11,pfAvailable(r13) ; Get back the feature flags mfsprg r13,2 ; Restore R13 @@ -913,26 +796,42 @@ EXT(exception_entry): .long 0 .long 0 + +; +; This is the 32-bit context saving stuff +; + .align 5 -notsleep: stw r2,saver2(r13) ; Save this one - crmove featL1ena,dce ; Copy the cache enable bit +notsleep: stw r2,saver2+4(r13) ; Save this one + bf doze,notspdo ; Skip the next if we are not napping/dozing... rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits mtspr hid0,r2 ; Clear the nap/doze bits - cmplw r2,r1 ; See if we were napping - la r1,saver8(r13) ; Point to the next line in case we need it - crnot wasNapping,cr0_eq ; Remember if we were napping +notspdo: + +#if INSTRUMENT + mfspr r2,pmc1 ; INSTRUMENT - saveinstr[0] - Take earliest possible stamp + stw r2,0x6100+(0x00*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r2,pmc2 ; INSTRUMENT - Get stamp + stw r2,0x6100+(0x00*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r2,pmc3 ; INSTRUMENT - Get stamp + stw r2,0x6100+(0x00*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r2,pmc4 ; INSTRUMENT - Get stamp + stw r2,0x6100+(0x00*16)+0xC(0) ; INSTRUMENT - Save it +#endif + + la r1,saver4(r13) ; Point to the next line in case we need it + crmove wasNapping,doze ; Remember if we were napping mfsprg r2,0 ; Get the per_proc area - bf- featL1ena,skipz1 ; L1 cache is disabled... - dcbz 0,r1 ; Reserve our line in cache + dcbz 0,r1 ; allocate r4-r7 32-byte line in cache ; ; Remember, we are setting up CR6 with feature flags ; -skipz1: - andi. r1,r11,T_FAM ; Check FAM bit - stw r3,saver3(r13) ; Save this one - stw r4,saver4(r13) ; Save this one + andi. r1,r11,T_FAM ; Check FAM bit + + stw r3,saver3+4(r13) ; Save this one + stw r4,saver4+4(r13) ; Save this one andc r11,r11,r1 ; Clear FAM bit beq+ noFAM ; Is it FAM intercept mfsrr1 r3 ; Load srr1 @@ -948,37 +847,42 @@ skipz1: srw r1,r3,r1 ; Set bit for current exception and. r1,r1,r4 ; And current exception with the intercept mask beq+ noFAM ; Is it FAM intercept - b EXT(vmm_fam_handler) + b EXT(vmm_fam_exc) noFAM: lwz r1,pfAvailable(r2) ; Get the CPU features flags - la r3,savesrr0(r13) ; Point to the last line - mtcrf 0xE0,r1 ; Put the features flags (that we care about) in the CR - stw r6,saver6(r13) ; Save this one - crmove featSMP,pfSMPcapb ; See if we have a PIR - stw r8,saver8(r13) ; Save this one + la r3,saver8(r13) ; Point to line with r8-r11 + mtcrf 0xE2,r1 ; Put the features flags (that we care about) in the CR + dcbz 0,r3 ; allocate r8-r11 32-byte line in cache + la r3,saver12(r13) ; point to r12-r15 line + lis r4,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK + stw r6,saver6+4(r13) ; Save this one + ori r4,r4,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR + stw r8,saver8+4(r13) ; Save this one crmove featAltivec,pfAltivecb ; Set the Altivec flag + mtmsr r4 ; Set MSR + isync mfsrr0 r6 ; Get the interruption SRR0 - stw r8,saver8(r13) ; Save this one - bf- featL1ena,skipz1a ; L1 cache is disabled... - dcbz 0,r3 ; Reserve our line in cache -skipz1a: crmove featFP,pfFloatb ; Remember that we have floating point - stw r7,saver7(r13) ; Save this one + la r8,savesrr0(r13) ; point to line with SRR0, SRR1, CR, XER, and LR + dcbz 0,r3 ; allocate r12-r15 32-byte line in cache + la r3,saver16(r13) ; point to next line + dcbz 0,r8 ; allocate 32-byte line with SRR0, SRR1, CR, XER, and LR + stw r7,saver7+4(r13) ; Save this one lhz r8,PP_CPU_FLAGS(r2) ; Get the flags mfsrr1 r7 ; Get the interrupt SRR1 rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on - stw r6,savesrr0(r13) ; Save the SRR0 + stw r6,savesrr0+4(r13) ; Save the SRR0 rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit - stw r5,saver5(r13) ; Save this one + stw r5,saver5+4(r13) ; Save this one and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on mfsprg r6,2 ; Get interrupt time R13 mtsprg 2,r1 ; Set the feature flags andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set mfsprg r8,3 ; Get rupt time R11 - stw r7,savesrr1(r13) ; Save SRR1 - rlwinm. r7,r7,MSR_RI_BIT,MSR_RI_BIT ; Is this a special case access fault? - stw r6,saver13(r13) ; Save rupt R1 - crnot specAccess,cr0_eq ; Set that we are doing a special access if RI is set - stw r8,saver11(r13) ; Save rupt time R11 + stw r7,savesrr1+4(r13) ; Save SRR1 + stw r8,saver11+4(r13) ; Save rupt time R11 + stw r6,saver13+4(r13) ; Save rupt R13 + dcbz 0,r3 ; allocate 32-byte line with r16-r19 + la r3,saver20(r13) ; point to next line getTB: mftbu r6 ; Get the upper timebase mftb r7 ; Get the lower timebase @@ -986,18 +890,26 @@ getTB: mftbu r6 ; Get the upper timebase cmplw r6,r8 ; Did the top tick? bne- getTB ; Yeah, need to get it again... +#if INSTRUMENT + mfspr r6,pmc1 ; INSTRUMENT - saveinstr[1] - Save halfway context save stamp + stw r6,0x6100+(0x01*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r6,pmc2 ; INSTRUMENT - Get stamp + stw r6,0x6100+(0x01*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r6,pmc3 ; INSTRUMENT - Get stamp + stw r6,0x6100+(0x01*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r6,pmc4 ; INSTRUMENT - Get stamp + stw r6,0x6100+(0x01*16)+0xC(0) ; INSTRUMENT - Save it +#endif + stw r8,ruptStamp(r2) ; Save the top of time stamp stw r8,SAVtime(r13) ; Save the top of time stamp - la r6,saver16(r13) ; Point to the next cache line stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp stw r7,SAVtime+4(r13) ; Save the bottom of time stamp - bf- featL1ena,skipz2 ; L1 cache is disabled... - dcbz 0,r6 ; Allocate in cache -skipz2: - stw r9,saver9(r13) ; Save this one + dcbz 0,r3 ; allocate 32-byte line with r20-r23 + stw r9,saver9+4(r13) ; Save this one - stw r10,saver10(r13) ; Save this one + stw r10,saver10+4(r13) ; Save this one mflr r4 ; Get the LR mfxer r10 ; Get the XER @@ -1015,175 +927,492 @@ skipz2: adde r8,r8,r5 ; Add high and carry to total stw r6,napTotal+4(r2) ; Save the low total stw r8,napTotal(r2) ; Save the high total - stw r3,savesrr0(r13) ; Modify to return to nap/doze exit + stw r3,savesrr0+4(r13) ; Modify to return to nap/doze exit - rlwinm. r3,r1,0,pfSlowNapb,pfSlowNapb ; Should HID1 be restored? + rlwinm. r3,r1,0,pfSlowNapb,pfSlowNapb ; Should HID1 be restored? beq notInSlowNap lwz r3,pfHID1(r2) ; Get saved HID1 value - mtspr hid1, r3 ; Restore HID1 + mtspr hid1,r3 ; Restore HID1 notInSlowNap: - rlwinm. r3,r1,0,pfNoL2PFNapb,pfNoL2PFNapb ; Should MSSCR0 be restored? + rlwinm. r3,r1,0,pfNoL2PFNapb,pfNoL2PFNapb ; Should MSSCR0 be restored? beq notNapping lwz r3,pfMSSCR0(r2) ; Get saved MSSCR0 value - mtspr msscr0, r3 ; Restore MSSCR0 + mtspr msscr0,r3 ; Restore MSSCR0 sync isync -notNapping: stw r12,saver12(r13) ; Save this one +notNapping: stw r12,saver12+4(r13) ; Save this one - stw r14,saver14(r13) ; Save this one - stw r15,saver15(r13) ; Save this one + stw r14,saver14+4(r13) ; Save this one + stw r15,saver15+4(r13) ; Save this one la r14,saver24(r13) ; Point to the next block to save into - stw r0,savecr(r13) ; Save rupt CR mfctr r6 ; Get the CTR - stw r16,saver16(r13) ; Save this one - stw r4,savelr(r13) ; Save rupt LR + stw r16,saver16+4(r13) ; Save this one + la r15,savectr(r13) ; point to line with CTR, DAR, DSISR, Exception code, and VRSAVE + stw r4,savelr+4(r13) ; Save rupt LR - bf- featL1ena,skipz4 ; L1 cache is disabled... - dcbz 0,r14 ; Allocate next save area line -skipz4: - stw r17,saver17(r13) ; Save this one - stw r18,saver18(r13) ; Save this one - stw r6,savectr(r13) ; Save rupt CTR - stw r19,saver19(r13) ; Save this one - lis r12,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value + dcbz 0,r14 ; allocate 32-byte line with r24-r27 + la r16,saver28(r13) ; point to line with r28-r31 + dcbz 0,r15 ; allocate line with CTR, DAR, DSISR, Exception code, and VRSAVE + stw r17,saver17+4(r13) ; Save this one + stw r18,saver18+4(r13) ; Save this one + stw r6,savectr+4(r13) ; Save rupt CTR + stw r0,savecr(r13) ; Save rupt CR + stw r19,saver19+4(r13) ; Save this one mfdar r6 ; Get the rupt DAR - stw r20,saver20(r13) ; Save this one - - bf+ specAccess,noSRsave ; Do not save SRs if this is not a special access... - mfsr r14,sr0 ; Get SR0 - stw r14,savesr0(r13) ; and save - mfsr r14,sr1 ; Get SR1 - stw r14,savesr1(r13) ; and save - mfsr r14,sr2 ; get SR2 - stw r14,savesr2(r13) ; and save - mfsr r14,sr3 ; get SR3 - stw r14,savesr3(r13) ; and save - -noSRsave: mtsr sr0,r12 ; Set the kernel SR0 - stw r21,saver21(r13) ; Save this one - addis r12,r12,0x0010 ; Point to the second segment of kernel - stw r10,savexer(r13) ; Save the rupt XER - mtsr sr1,r12 ; Set the kernel SR1 - stw r30,saver30(r13) ; Save this one - addis r12,r12,0x0010 ; Point to the third segment of kernel - stw r31,saver31(r13) ; Save this one - mtsr sr2,r12 ; Set the kernel SR2 - stw r22,saver22(r13) ; Save this one - addis r12,r12,0x0010 ; Point to the third segment of kernel - stw r23,saver23(r13) ; Save this one - mtsr sr3,r12 ; Set the kernel SR3 - stw r24,saver24(r13) ; Save this one - stw r25,saver25(r13) ; Save this one + stw r20,saver20+4(r13) ; Save this one + dcbz 0,r16 ; allocate 32-byte line with r28-r31 + + stw r21,saver21+4(r13) ; Save this one + lwz r21,spcFlags(r2) ; Get the special flags from per_proc + stw r10,savexer+4(r13) ; Save the rupt XER + stw r30,saver30+4(r13) ; Save this one + lhz r30,pfrptdProc(r2) ; Get the reported processor type + stw r31,saver31+4(r13) ; Save this one + stw r22,saver22+4(r13) ; Save this one + stw r23,saver23+4(r13) ; Save this one + stw r24,saver24+4(r13) ; Save this one + stw r25,saver25+4(r13) ; Save this one mfdsisr r7 ; Get the rupt DSISR - stw r26,saver26(r13) ; Save this one - stw r27,saver27(r13) ; Save this one - li r10,emfp0 ; Point to floating point save - stw r28,saver28(r13) ; Save this one - stw r29,saver29(r13) ; Save this one - mfsr r14,sr14 ; Get the copyin/out segment register - stw r6,savedar(r13) ; Save the rupt DAR - bf- featL1ena,skipz5a ; Do not do this if no L1... - dcbz r10,r2 ; Clear and allocate an L1 slot - -skipz5a: stw r7,savedsisr(r13) ; Save the rupt code DSISR + stw r26,saver26+4(r13) ; Save this one + stw r27,saver27+4(r13) ; Save this one + andis. r21,r21,hi16(perfMonitor) ; Is the performance monitor enabled? + stw r28,saver28+4(r13) ; Save this one + cmpwi cr1, r30,CPU_SUBTYPE_POWERPC_750 ; G3? + la r27,savevscr(r13) ; point to 32-byte line with VSCR and FPSCR + cmpwi cr2,r30,CPU_SUBTYPE_POWERPC_7400 ; This guy? + stw r29,saver29+4(r13) ; Save R29 + stw r6,savedar+4(r13) ; Save the rupt DAR + li r10,savepmc ; Point to pmc savearea + + beq+ noPerfMonSave32 ; No perfmon on here... + + dcbz r10,r13 ; Clear first part of pmc area + li r10,savepmc+0x20 ; Point to pmc savearea second part + li r22,0 ; r22: zero + dcbz r10,r13 ; Clear second part of pmc area + + beq cr1,perfMonSave32_750 ; This is a G3... + + beq cr2,perfMonSave32_7400 ; Regular olde G4... + + mfspr r24,pmc5 ; Here for a 7450 + mfspr r25,pmc6 + stw r24,savepmc+16(r13) ; Save PMC5 + stw r25,savepmc+20(r13) ; Save PMC6 + mtspr pmc5,r22 ; Leave PMC5 clear + mtspr pmc6,r22 ; Leave PMC6 clear + +perfMonSave32_7400: + mfspr r25,mmcr2 + stw r25,savemmcr2+4(r13) ; Save MMCR2 + mtspr mmcr2,r22 ; Leave MMCR2 clear + +perfMonSave32_750: + mfspr r23,mmcr0 + mfspr r24,mmcr1 + stw r23,savemmcr0+4(r13) ; Save MMCR0 + stw r24,savemmcr1+4(r13) ; Save MMCR1 + mtspr mmcr0,r22 ; Leave MMCR0 clear + mtspr mmcr1,r22 ; Leave MMCR1 clear + mfspr r23,pmc1 + mfspr r24,pmc2 + mfspr r25,pmc3 + mfspr r26,pmc4 + stw r23,savepmc+0(r13) ; Save PMC1 + stw r24,savepmc+4(r13) ; Save PMC2 + stw r25,savepmc+8(r13) ; Save PMC3 + stw r26,savepmc+12(r13) ; Save PMC4 + mtspr pmc1,r22 ; Leave PMC1 clear + mtspr pmc2,r22 ; Leave PMC2 clear + mtspr pmc3,r22 ; Leave PMC3 clear + mtspr pmc4,r22 ; Leave PMC4 clear + +noPerfMonSave32: + dcbz 0,r27 ; allocate line with VSCR and FPSCR + + stw r7,savedsisr(r13) ; Save the rupt code DSISR stw r11,saveexception(r13) ; Save the exception code - stw r14,savesr14(r13) ; Save copyin/copyout ; -; Here we will save some floating point and vector status -; and we also set a clean default status for a new interrupt level. -; Note that we assume that emfp0 is on an altivec boundary -; and that R10 points to it (as a displacemnt from R2). +; Everything is saved at this point, except for FPRs, and VMX registers. +; Time for us to get a new savearea and then trace interrupt if it is enabled. ; - lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable bit - mfmsr r6 ; Get the current MSR value - ori r8,r8,lo16(MASK(MSR_FP)) ; Add in the float enable - li r19,0 ; Assume no Altivec - or r7,r6,r8 ; Enable floating point - li r9,0 ; Get set to clear VRSAVE - mtmsr r7 ; Do it - isync - - bf featAltivec,noavec ; No Altivec on this CPU... - addi r14,r10,16 ; Displacement to second vector register - stvxl v0,r10,r2 ; Save a register - stvxl v1,r14,r2 ; Save a second register - mfvscr v0 ; Get the vector status register - la r28,savevscr(r13) ; Point to the status area - vspltish v1,1 ; Turn on the non-Java bit and saturate - stvxl v0,0,r28 ; Save the vector status - vspltisw v0,1 ; Turn on the saturate bit - mfspr r19,vrsave ; Get the VRSAVE register - vxor v1,v1,v0 ; Turn off saturate - mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level - mtvscr v1 ; Set the non-java, no saturate status for new level + lwz r25,traceMask(0) ; Get the trace mask + li r0,SAVgeneral ; Get the savearea type value + lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number + rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2 + stb r0,SAVflags+2(r13) ; Mark valid context + addi r22,r22,10 ; Adjust code so we shift into CR5 + li r23,trcWork ; Get the trace work area address + rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed + li r26,0x8 ; Get start of cpu mask + srw r26,r26,r19 ; Get bit position of cpu number + mtcrf 0x04,r7 ; Set CR5 to show trace or not + and. r26,r26,r25 ; See if we trace this cpu + crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled +; +; At this point, we can take another exception and lose nothing. +; - lvxl v0,r10,r2 ; Restore first work register - lvxl v1,r14,r2 ; Restore second work register +#if INSTRUMENT + mfspr r26,pmc1 ; INSTRUMENT - saveinstr[2] - Take stamp after save is done + stw r26,0x6100+(0x02*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r26,pmc2 ; INSTRUMENT - Get stamp + stw r26,0x6100+(0x02*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r26,pmc3 ; INSTRUMENT - Get stamp + stw r26,0x6100+(0x02*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r26,pmc4 ; INSTRUMENT - Get stamp + stw r26,0x6100+(0x02*16)+0xC(0) ; INSTRUMENT - Save it +#endif -noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags + bne+ cr5,xcp32xit ; Skip all of this if no tracing here... ; -; We need to save the FPSCR as if it is normal context. -; This is because pending exceptions will cause an exception even if -; FP is disabled. We need to clear the FPSCR when we first start running in the -; kernel. +; We select a trace entry using a compare and swap on the next entry field. +; Since we do not lock the actual trace buffer, there is a potential that +; another processor could wrap an trash our entry. Who cares? ; - bf- featFP,nofpexe ; No possible floating point exceptions... + lwz r25,traceStart(0) ; Get the start of trace table + lwz r26,traceEnd(0) ; Get end of trace table + +trcsel: lwarx r20,0,r23 ; Get and reserve the next slot to allocate - stfd f0,emfp0(r2) ; Save FPR0 - stfd f1,emfp1(r2) ; Save FPR1 - mffs f0 ; Get the FPSCR - fsub f1,f1,f1 ; Make a 0 - stfd f0,savefpscrpad(r13) ; Save the FPSCR - mtfsf 0xFF,f1 ; Clear it - lfd f0,emfp0(r2) ; Restore FPR0 - lfd f1,emfp1(r2) ; Restore FPR1 + addi r22,r20,LTR_size ; Point to the next trace entry + cmplw r22,r26 ; Do we need to wrap the trace table? + bne+ gotTrcEnt ; No wrap, we got us a trace entry... + + mr r22,r25 ; Wrap back to start -nofpexe: mtmsr r6 ; Turn off FP and vector +gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer + bne- trcsel ; Collision, try again... + +#if ESPDEBUG + dcbf 0,r23 ; Force to memory + sync +#endif + + dcbz 0,r20 ; Clear and allocate first trace line + +; +; Let us cut that trace entry now. +; + + lwz r16,ruptStamp(r2) ; Get top of time base + lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp + + li r14,32 ; Offset to second line + + lwz r0,saver0+4(r13) ; Get back interrupt time R0 + lwz r1,saver1+4(r13) ; Get back interrupt time R1 + lwz r8,savecr(r13) ; Get the CR value + + dcbz r14,r20 ; Zap the second line + + sth r19,LTR_cpu(r20) ; Stash the cpu number + li r14,64 ; Offset to third line + sth r11,LTR_excpt(r20) ; Save the exception type + lwz r7,saver2+4(r13) ; Get back interrupt time R2 + lwz r3,saver3+4(r13) ; Restore this one + + dcbz r14,r20 ; Zap the third half + + mfdsisr r9 ; Get the DSISR + li r14,96 ; Offset to forth line + stw r16,LTR_timeHi(r20) ; Set the upper part of TB + stw r17,LTR_timeLo(r20) ; Set the lower part of TB + lwz r10,savelr+4(r13) ; Get the LR + mfsrr0 r17 ; Get SRR0 back, it is still good + + dcbz r14,r20 ; Zap the forth half + lwz r4,saver4+4(r13) ; Restore this one + lwz r5,saver5+4(r13) ; Restore this one + mfsrr1 r18 ; SRR1 is still good in here + + stw r8,LTR_cr(r20) ; Save the CR + lwz r6,saver6+4(r13) ; Get R6 + mfdar r16 ; Get this back + stw r9,LTR_dsisr(r20) ; Save the DSISR + stw r17,LTR_srr0+4(r20) ; Save the SSR0 + + stw r18,LTR_srr1+4(r20) ; Save the SRR1 + stw r16,LTR_dar+4(r20) ; Save the DAR + mfctr r17 ; Get the CTR (still good in register) + stw r13,LTR_save+4(r20) ; Save the savearea + stw r10,LTR_lr+4(r20) ; Save the LR + + stw r17,LTR_ctr+4(r20) ; Save off the CTR + stw r0,LTR_r0+4(r20) ; Save off register 0 + stw r1,LTR_r1+4(r20) ; Save off register 1 + stw r7,LTR_r2+4(r20) ; Save off register 2 + + + stw r3,LTR_r3+4(r20) ; Save off register 3 + stw r4,LTR_r4+4(r20) ; Save off register 4 + stw r5,LTR_r5+4(r20) ; Save off register 5 + stw r6,LTR_r6+4(r20) ; Save off register 6 + +#if ESPDEBUG + addi r17,r20,32 ; Second line + addi r16,r20,64 ; Third line + dcbst br0,r20 ; Force to memory + dcbst br0,r17 ; Force to memory + addi r17,r17,32 ; Fourth line + dcbst br0,r16 ; Force to memory + dcbst br0,r17 ; Force to memory + + sync ; Make sure it all goes +#endif +xcp32xit: mr r14,r11 ; Save the interrupt code across the call + bl EXT(save_get_phys_32) ; Grab a savearea + mfsprg r2,0 ; Get the per_proc info + li r10,emfp0 ; Point to floating point save + mr r11,r14 ; Get the exception code back + dcbz r10,r2 ; Clear for speed + stw r3,next_savearea+4(r2) ; Store the savearea for the next rupt + +#if INSTRUMENT + mfspr r4,pmc1 ; INSTRUMENT - saveinstr[3] - Take stamp after next savearea + stw r4,0x6100+(0x03*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r4,pmc2 ; INSTRUMENT - Get stamp + stw r4,0x6100+(0x03*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r4,pmc3 ; INSTRUMENT - Get stamp + stw r4,0x6100+(0x03*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r4,pmc4 ; INSTRUMENT - Get stamp + stw r4,0x6100+(0x03*16)+0xC(0) ; INSTRUMENT - Save it +#endif + b xcpCommon ; Go join the common interrupt processing... + +; +; +; This is the 64-bit context saving stuff +; + + .align 5 + +extEntry64: mfsprg r13,0 ; Load per_proc + ld r13,next_savearea(r13) ; Get the exception save area + std r0,saver0(r13) ; Save register 0 + lis r0,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK + std r1,saver1(r13) ; Save register 1 + ori r1,r0,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR + lis r0,0x0010 ; Get rupt code transform validity mask + mtmsr r1 ; Set MSR isync + + ori r0,r0,0x0200 ; Get rupt code transform validity mask + std r2,saver2(r13) ; Save this one + lis r1,0x00F0 ; Top half of xform XOR + rlwinm r2,r11,29,27,31 ; Get high 5 bits of rupt code + std r3,saver3(r13) ; Save this one + slw r0,r0,r2 ; Move transform validity bit to bit 0 + std r4,saver4(r13) ; Save this one + std r5,saver5(r13) ; Save this one + ori r1,r1,0x04EC ; Bottom half of xform XOR + mfxer r5 ; Save the XER because we are about to muck with it + rlwinm r4,r11,1,27,28 ; Get bottom of interrupt code * 8 + lis r3,hi16(dozem|napm) ; Get the nap and doze bits + srawi r0,r0,31 ; Get 0xFFFFFFFF of xform valid, 0 otherwise + rlwnm r4,r1,r4,24,31 ; Extract the xform XOR + li r1,saver16 ; Point to the next line + and r4,r4,r0 ; Only keep transform if we are to use it + li r2,lgKillResv ; Point to the killing field + mfcr r0 ; Save the CR + stwcx. r2,0,r2 ; Kill any pending reservation + dcbz128 r1,r13 ; Blow away the line + sldi r3,r3,32 ; Position it + mfspr r1,hid0 ; Get HID0 + andc r3,r1,r3 ; Clear nap and doze + xor r11,r11,r4 ; Transform 970 rupt code to standard keeping FAM bit + cmpld r3,r1 ; See if nap and/or doze was on + std r6,saver6(r13) ; Save this one + mfsprg r2,0 ; Get the per_proc area + la r6,savesrr0(r13) ; point to line with SRR0, SRR1, CR, XER, and LR + beq++ eE64NoNap ; No nap here, skip all this... + + sync ; Make sure we are clean + mtspr hid0,r3 ; Set the updated hid0 + mfspr r1,hid0 ; Yes, this is silly, keep it here + mfspr r1,hid0 ; Yes, this is a duplicate, keep it here + mfspr r1,hid0 ; Yes, this is a duplicate, keep it here + mfspr r1,hid0 ; Yes, this is a duplicate, keep it here + mfspr r1,hid0 ; Yes, this is a duplicate, keep it here + mfspr r1,hid0 ; Yes, this is a duplicate, keep it here + +eE64NoNap: crnot wasNapping,cr0_eq ; Remember if we were napping + andi. r1,r11,T_FAM ; Check FAM bit + beq++ eEnoFAM ; Is it FAM intercept + mfsrr1 r3 ; Load srr1 + andc r11,r11,r1 ; Clear FAM bit + rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? + beq+ eEnoFAM ; From supervisor state + lwz r1,spcFlags(r2) ; Load spcFlags + rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit + cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode + bne++ eEnoFAM ; Can this context be FAM intercept + lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept + li r3,0 ; Clear + srwi r1,r11,2 ; divide r11 by 4 + oris r3,r3,0x8000 ; Set r3 to 0x80000000 + srw r1,r3,r1 ; Set bit for current exception + and. r1,r1,r4 ; And current exception with the intercept mask + beq++ eEnoFAM ; Is it FAM intercept + b EXT(vmm_fam_exc) + + .align 5 + +eEnoFAM: lwz r1,pfAvailable(r2) ; Get the CPU features flags + dcbz128 0,r6 ; allocate 128-byte line with SRR0, SRR1, CR, XER, and LR + +; +; Remember, we are setting up CR6 with feature flags +; + std r7,saver7(r13) ; Save this one + mtcrf 0x80,r1 ; Put the features flags (that we care about) in the CR + std r8,saver8(r13) ; Save this one + mtcrf 0x40,r1 ; Put the features flags (that we care about) in the CR + mfsrr0 r6 ; Get the interruption SRR0 + lhz r8,PP_CPU_FLAGS(r2) ; Get the flags + mtcrf 0x20,r1 ; Put the features flags (that we care about) in the CR + mfsrr1 r7 ; Get the interrupt SRR1 + rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on + std r6,savesrr0(r13) ; Save the SRR0 + mtcrf 0x02,r1 ; Put the features flags (that we care about) in the CR + rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit + and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on + std r9,saver9(r13) ; Save this one + andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set + crmove featAltivec,pfAltivecb ; Set the Altivec flag + std r7,savesrr1(r13) ; Save SRR1 + mfsprg r9,3 ; Get rupt time R11 + std r10,saver10(r13) ; Save this one + mfsprg r6,2 ; Get interrupt time R13 + std r9,saver11(r13) ; Save rupt time R11 + mtsprg 2,r1 ; Set the feature flags + std r12,saver12(r13) ; Save this one + mflr r4 ; Get the LR + mftb r7 ; Get the timebase + std r6,saver13(r13) ; Save rupt R13 + std r7,ruptStamp(r2) ; Save the time stamp + std r7,SAVtime(r13) ; Save the time stamp + + bf++ wasNapping,notNappingSF ; Skip if not waking up from nap... + + ld r6,napStamp(r2) ; Pick up nap stamp + lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return + sub r7,r7,r6 ; Subtract stamp from now + ld r6,napTotal(r2) ; Pick up total + add r6,r6,r7 ; Add low to total + ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return + std r6,napTotal(r2) ; Save the high total + std r3,savesrr0(r13) ; Modify to return to nap/doze exit +notNappingSF: + std r14,saver14(r13) ; Save this one + std r15,saver15(r13) ; Save this one + stw r0,savecr(r13) ; Save rupt CR + mfctr r6 ; Get the CTR + std r16,saver16(r13) ; Save this one + std r4,savelr(r13) ; Save rupt LR + + std r17,saver17(r13) ; Save this one + li r7,savepmc ; Point to pmc area + std r18,saver18(r13) ; Save this one + lwz r17,spcFlags(r2) ; Get the special flags from per_proc + std r6,savectr(r13) ; Save rupt CTR + std r19,saver19(r13) ; Save this one + mfdar r6 ; Get the rupt DAR + std r20,saver20(r13) ; Save this one + + dcbz128 r7,r13 ; Clear out the pmc spot + + std r21,saver21(r13) ; Save this one + std r5,savexer(r13) ; Save the rupt XER + std r22,saver22(r13) ; Save this one + std r23,saver23(r13) ; Save this one + std r24,saver24(r13) ; Save this one + std r25,saver25(r13) ; Save this one + mfdsisr r7 ; Get the rupt DSISR + std r26,saver26(r13) ; Save this one + andis. r17,r17,hi16(perfMonitor) ; Is the performance monitor enabled? + std r27,saver27(r13) ; Save this one + li r10,emfp0 ; Point to floating point save + std r28,saver28(r13) ; Save this one + la r27,savevscr(r13) ; point to 32-byte line with VSCR and FPSCR + std r29,saver29(r13) ; Save R29 + std r30,saver30(r13) ; Save this one + std r31,saver31(r13) ; Save this one + std r6,savedar(r13) ; Save the rupt DAR + stw r7,savedsisr(r13) ; Save the rupt code DSISR + stw r11,saveexception(r13) ; Save the exception code + + beq++ noPerfMonSave64 ; Performance monitor not on... + + li r22,0 ; r22: zero + + mfspr r23,mmcr0_gp + mfspr r24,mmcr1_gp + mfspr r25,mmcra_gp + std r23,savemmcr0(r13) ; Save MMCR0 + std r24,savemmcr1(r13) ; Save MMCR1 + std r25,savemmcr2(r13) ; Save MMCRA + mtspr mmcr0_gp,r22 ; Leave MMCR0 clear + mtspr mmcr1_gp,r22 ; Leave MMCR1 clear + mtspr mmcra_gp,r22 ; Leave MMCRA clear + mfspr r23,pmc1_gp + mfspr r24,pmc2_gp + mfspr r25,pmc3_gp + mfspr r26,pmc4_gp + stw r23,savepmc+0(r13) ; Save PMC1 + stw r24,savepmc+4(r13) ; Save PMC2 + stw r25,savepmc+8(r13) ; Save PMC3 + stw r26,savepmc+12(r13) ; Save PMC4 + mfspr r23,pmc5_gp + mfspr r24,pmc6_gp + mfspr r25,pmc7_gp + mfspr r26,pmc8_gp + stw r23,savepmc+16(r13) ; Save PMC5 + stw r24,savepmc+20(r13) ; Save PMC6 + stw r25,savepmc+24(r13) ; Save PMC7 + stw r26,savepmc+28(r13) ; Save PMC8 + mtspr pmc1_gp,r22 ; Leave PMC1 clear + mtspr pmc2_gp,r22 ; Leave PMC2 clear + mtspr pmc3_gp,r22 ; Leave PMC3 clear + mtspr pmc4_gp,r22 ; Leave PMC4 clear + mtspr pmc5_gp,r22 ; Leave PMC5 clear + mtspr pmc6_gp,r22 ; Leave PMC6 clear + mtspr pmc7_gp,r22 ; Leave PMC7 clear + mtspr pmc8_gp,r22 ; Leave PMC8 clear + +noPerfMonSave64: ; ; Everything is saved at this point, except for FPRs, and VMX registers. ; Time for us to get a new savearea and then trace interrupt if it is enabled. ; + lwz r25,traceMask(0) ; Get the trace mask li r0,SAVgeneral ; Get the savearea type value - lis r23,hi16(EXT(trcWork)) ; Get the trace work area address - mr r14,r11 ; Save the interrupt code across the call + lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number stb r0,SAVflags+2(r13) ; Mark valid context ori r23,r23,lo16(EXT(trcWork)) ; Get the rest rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2 - lwz r25,traceMask(r23) ; Get the trace mask + li r23,trcWork ; Get the trace work area address addi r22,r22,10 ; Adjust code so we shift into CR5 - - bl EXT(save_get_phys) ; Grab a savearea - - mfsprg r2,0 ; Get back the per_proc block - rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed - lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number li r26,0x8 ; Get start of cpu mask - mr r11,r14 ; Get the exception code back + rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed srw r26,r26,r19 ; Get bit position of cpu number mtcrf 0x04,r7 ; Set CR5 to show trace or not and. r26,r26,r25 ; See if we trace this cpu - stw r3,next_savearea(r2) ; Remember the savearea we just got for the next rupt crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled -; -; At this point, we can take another exception and lose nothing. -; - - lwz r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not) - bne+ cr5,skipTrace ; Skip all of this if no tracing here... + bne++ cr5,xcp64xit ; Skip all of this if no tracing here... ; ; We select a trace entry using a compare and swap on the next entry field. @@ -1191,220 +1420,484 @@ nofpexe: mtmsr r6 ; Turn off FP and vector ; another processor could wrap an trash our entry. Who cares? ; - lwz r25,traceStart(r23) ; Get the start of trace table - lwz r26,traceEnd(r23) ; Get end of trace table - -trcsel: lwarx r20,0,r23 ; Get and reserve the next slot to allocate + lwz r25,traceStart(0) ; Get the start of trace table + lwz r26,traceEnd(0) ; Get end of trace table + +trcselSF: lwarx r20,0,r23 ; Get and reserve the next slot to allocate addi r22,r20,LTR_size ; Point to the next trace entry cmplw r22,r26 ; Do we need to wrap the trace table? - bne+ gotTrcEnt ; No wrap, we got us a trace entry... + bne+ gotTrcEntSF ; No wrap, we got us a trace entry... mr r22,r25 ; Wrap back to start -gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer - bne- trcsel ; Collision, try again... +gotTrcEntSF: + stwcx. r22,0,r23 ; Try to update the current pointer + bne- trcselSF ; Collision, try again... #if ESPDEBUG dcbf 0,r23 ; Force to memory sync #endif - - bf- featL1ena,skipz6 ; L1 cache is disabled... - dcbz 0,r20 ; Clear and allocate first trace line -skipz6: ; ; Let us cut that trace entry now. ; + dcbz128 0,r20 ; Zap the trace entry - li r14,32 ; Offset to second line - - lwz r16,ruptStamp(r2) ; Get top of time base - lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp - - bf- featL1ena,skipz7 ; L1 cache is disabled... - dcbz r14,r20 ; Zap the second half - -skipz7: stw r16,LTR_timeHi(r20) ; Set the upper part of TB - lwz r1,saver1(r13) ; Get back interrupt time R1 - stw r17,LTR_timeLo(r20) ; Set the lower part of TB - lwz r18,saver2(r13) ; Get back interrupt time R2 - stw r0,LTR_r0(r20) ; Save off register 0 - lwz r3,saver3(r13) ; Restore this one + ld r16,ruptStamp(r2) ; Get top of time base + ld r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not) + std r16,LTR_timeHi(r20) ; Set the upper part of TB + ld r1,saver1(r13) ; Get back interrupt time R1 + ld r18,saver2(r13) ; Get back interrupt time R2 + std r0,LTR_r0(r20) ; Save off register 0 + ld r3,saver3(r13) ; Restore this one sth r19,LTR_cpu(r20) ; Stash the cpu number - stw r1,LTR_r1(r20) ; Save off register 1 - lwz r4,saver4(r13) ; Restore this one - stw r18,LTR_r2(r20) ; Save off register 2 - lwz r5,saver5(r13) ; Restore this one - stw r3,LTR_r3(r20) ; Save off register 3 + std r1,LTR_r1(r20) ; Save off register 1 + ld r4,saver4(r13) ; Restore this one + std r18,LTR_r2(r20) ; Save off register 2 + ld r5,saver5(r13) ; Restore this one + ld r6,saver6(r13) ; Get R6 + std r3,LTR_r3(r20) ; Save off register 3 lwz r16,savecr(r13) ; Get the CR value - stw r4,LTR_r4(r20) ; Save off register 4 + std r4,LTR_r4(r20) ; Save off register 4 mfsrr0 r17 ; Get SRR0 back, it is still good - stw r5,LTR_r5(r20) ; Save off register 5 + std r5,LTR_r5(r20) ; Save off register 5 + std r6,LTR_r6(r20) ; Save off register 6 mfsrr1 r18 ; SRR1 is still good in here stw r16,LTR_cr(r20) ; Save the CR - stw r17,LTR_srr0(r20) ; Save the SSR0 - stw r18,LTR_srr1(r20) ; Save the SRR1 + std r17,LTR_srr0(r20) ; Save the SSR0 + std r18,LTR_srr1(r20) ; Save the SRR1 + mfdar r17 ; Get this back - lwz r16,savelr(r13) ; Get the LR - stw r17,LTR_dar(r20) ; Save the DAR + ld r16,savelr(r13) ; Get the LR + std r17,LTR_dar(r20) ; Save the DAR mfctr r17 ; Get the CTR (still good in register) - stw r16,LTR_lr(r20) ; Save the LR -#if 0 - lwz r17,emfp1(r2) ; (TEST/DEBUG) -#endif - stw r17,LTR_ctr(r20) ; Save off the CTR - stw r13,LTR_save(r20) ; Save the savearea + std r16,LTR_lr(r20) ; Save the LR + std r17,LTR_ctr(r20) ; Save off the CTR + mfdsisr r17 ; Get the DSISR + std r13,LTR_save(r20) ; Save the savearea + stw r17,LTR_dsisr(r20) ; Save the DSISR sth r11,LTR_excpt(r20) ; Save the exception type + #if ESPDEBUG - addi r17,r20,32 ; (TEST/DEBUG) - dcbst br0,r20 ; (TEST/DEBUG) - dcbst br0,r17 ; (TEST/DEBUG) - sync ; (TEST/DEBUG) + dcbf 0,r20 ; Force to memory + sync ; Make sure it all goes #endif +xcp64xit: mr r14,r11 ; Save the interrupt code across the call + bl EXT(save_get_phys_64) ; Grab a savearea + mfsprg r2,0 ; Get the per_proc info + li r10,emfp0 ; Point to floating point save + mr r11,r14 ; Get the exception code back + dcbz128 r10,r2 ; Clear for speed + std r3,next_savearea(r2) ; Store the savearea for the next rupt + b xcpCommon ; Go join the common interrupt processing... ; -; We are done with the trace, except for maybe modifying the exception -; code later on. So, that means that we need to save R20 and CR5. -; -; So, finish setting up the kernel registers now. +; All of the context is saved. Now we will get a +; fresh savearea. After this we can take an interrupt. +; + + .align 5 + +xcpCommon: + +; +; Here we will save some floating point and vector status +; and we also set a clean default status for a new interrupt level. +; Note that we assume that emfp0 is on an altivec boundary +; and that R10 points to it (as a displacemnt from R2). +; +; We need to save the FPSCR as if it is normal context. +; This is because pending exceptions will cause an exception even if +; FP is disabled. We need to clear the FPSCR when we first start running in the +; kernel. +; + + stfd f0,emfp0(r2) ; Save FPR0 + stfd f1,emfp1(r2) ; Save FPR1 + li r19,0 ; Assume no Altivec + mffs f0 ; Get the FPSCR + lfd f1,Zero(0) ; Make a 0 + stfd f0,savefpscrpad(r13) ; Save the FPSCR + li r9,0 ; Get set to clear VRSAVE + mtfsf 0xFF,f1 ; Clear it + addi r14,r10,16 ; Displacement to second vector register + lfd f0,emfp0(r2) ; Restore FPR0 + la r28,savevscr(r13) ; Point to the status area + lfd f1,emfp1(r2) ; Restore FPR1 + + bf featAltivec,noavec ; No Altivec on this CPU... + + stvxl v0,r10,r2 ; Save a register + stvxl v1,r14,r2 ; Save a second register + mfspr r19,vrsave ; Get the VRSAVE register + mfvscr v0 ; Get the vector status register + vspltish v1,1 ; Turn on the non-Java bit and saturate + stvxl v0,0,r28 ; Save the vector status + vspltisw v0,1 ; Turn on the saturate bit + vxor v1,v1,v0 ; Turn off saturate + mtvscr v1 ; Set the non-java, no saturate status for new level + mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level + + lvxl v0,r10,r2 ; Restore first work register + lvxl v1,r14,r2 ; Restore second work register + +noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags + +; +; We are now done saving all of the context. Start filtering the interrupts. +; Note that a Redrive will count as an actual interrupt. +; Note also that we take a lot of system calls so we will start decode here. ; -skipTrace: lhz r21,PP_CPU_NUMBER(r2) ; Get the logical processor number - lis r12,hi16(EXT(hw_counts)) ; Get the high part of the interrupt counters - lwz r7,savesrr1(r13) ; Get the entering MSR - ori r12,r12,lo16(EXT(hw_counts)) ; Get the low part of the interrupt counters - rlwinm r21,r21,8,20,23 ; Get index to processor counts +Redrive: + + +#if INSTRUMENT + mfspr r20,pmc1 ; INSTRUMENT - saveinstr[4] - Take stamp before exception filter + stw r20,0x6100+(0x04*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r20,pmc2 ; INSTRUMENT - Get stamp + stw r20,0x6100+(0x04*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r20,pmc3 ; INSTRUMENT - Get stamp + stw r20,0x6100+(0x04*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r20,pmc4 ; INSTRUMENT - Get stamp + stw r20,0x6100+(0x04*16)+0xC(0) ; INSTRUMENT - Save it +#endif + lwz r22,SAVflags(r13) ; Pick up the flags + lwz r0,saver0+4(r13) ; Get back interrupt time syscall number + mfsprg r2,0 ; Restore per_proc + + li r20,lo16(xcpTable) ; Point to the vector table (note: this must be in 1st 64k of physical memory) + la r12,hwCounts(r2) ; Point to the exception count area + rlwinm r22,r22,SAVredriveb+1,31,31 ; Get a 1 if we are redriving + add r12,r12,r11 ; Point to the count + lwzx r20,r20,r11 ; Get the interrupt handler + lwz r25,0(r12) ; Get the old value + lwz r23,hwRedrives(r2) ; Get the redrive count + xori r24,r22,1 ; Get the NOT of the redrive + mtctr r20 ; Point to the interrupt handler mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code - rlwinm r6,r0,1,0,31 ; Move sign bit to the end - cmplwi cr1,r11,T_SYSTEM_CALL ; Did we get a system call? - add r12,r12,r21 ; Point to the processor count area + add r25,r25,r24 ; Count this one if not a redrive + add r23,r23,r24 ; Count this one if if is a redrive crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x - lwzx r22,r12,r11 ; Get the old value - cmplwi cr3,r11,T_IN_VAIN ; Was this all in vain? All for nothing? - addi r22,r22,1 ; Count this one + stw r25,0(r12) ; Store it back + stw r23,hwRedrives(r2) ; Save the redrive count + bctr ; Go process the exception... + + +; +; Exception vector filter table +; + + .align 7 + +xcpTable: + .long EatRupt ; T_IN_VAIN + .long PassUpTrap ; T_RESET + .long MachineCheck ; T_MACHINE_CHECK + .long EXT(handlePF) ; T_DATA_ACCESS + .long EXT(handlePF) ; T_INSTRUCTION_ACCESS + .long PassUpRupt ; T_INTERRUPT + .long EXT(AlignAssist) ; T_ALIGNMENT + .long EXT(Emulate) ; T_PROGRAM + .long PassUpFPU ; T_FP_UNAVAILABLE + .long PassUpRupt ; T_DECREMENTER + .long PassUpTrap ; T_IO_ERROR + .long PassUpTrap ; T_RESERVED + .long xcpSyscall ; T_SYSTEM_CALL + .long PassUpTrap ; T_TRACE + .long PassUpTrap ; T_FP_ASSIST + .long PassUpTrap ; T_PERF_MON + .long PassUpVMX ; T_VMX + .long PassUpTrap ; T_INVALID_EXCP0 + .long PassUpTrap ; T_INVALID_EXCP1 + .long PassUpTrap ; T_INVALID_EXCP2 + .long PassUpTrap ; T_INSTRUCTION_BKPT + .long PassUpRupt ; T_SYSTEM_MANAGEMENT + .long EXT(AltivecAssist) ; T_ALTIVEC_ASSIST + .long PassUpRupt ; T_THERMAL + .long PassUpTrap ; T_INVALID_EXCP5 + .long PassUpTrap ; T_INVALID_EXCP6 + .long PassUpTrap ; T_INVALID_EXCP7 + .long PassUpTrap ; T_INVALID_EXCP8 + .long PassUpTrap ; T_INVALID_EXCP9 + .long PassUpTrap ; T_INVALID_EXCP10 + .long PassUpTrap ; T_INVALID_EXCP11 + .long PassUpTrap ; T_INVALID_EXCP12 + .long PassUpTrap ; T_INVALID_EXCP13 + + .long PassUpTrap ; T_RUNMODE_TRACE + + .long PassUpRupt ; T_SIGP + .long PassUpTrap ; T_PREEMPT + .long conswtch ; T_CSWITCH + .long PassUpRupt ; T_SHUTDOWN + .long PassUpAbend ; T_CHOKE + + .long EXT(handleDSeg) ; T_DATA_SEGMENT + .long EXT(handleISeg) ; T_INSTRUCTION_SEGMENT + + .long WhoaBaby ; T_SOFT_PATCH + .long WhoaBaby ; T_MAINTENANCE + .long WhoaBaby ; T_INSTRUMENTATION + +; +; Just what the heck happened here???? +; + + .align 5 + +WhoaBaby: b . ; Open the hood and wait for help + + +; +; System call +; + + .align 5 + +xcpSyscall: lis r20,hi16(EXT(shandler)) ; Assume this is a normal one, get handler address + rlwinm r6,r0,1,0,31 ; Move sign bit to the end + ori r20,r20,lo16(EXT(shandler)) ; Assume this is a normal one, get handler address + bnl++ cr0,PassUp ; R0 not 0b10xxx...x, can not be any kind of magical system call, just pass it up... + lwz r7,savesrr1+4(r13) ; Get the entering MSR (low half) + lwz r1,dgFlags(0) ; Get the flags cmplwi cr2,r6,1 ; See if original R0 had the CutTrace request code in it - stwx r22,r12,r11 ; Store it back - beq- cr3,EatRupt ; Interrupt was all for nothing... - cmplwi cr3,r11,T_MACHINE_CHECK ; Did we get a machine check? - bne+ cr1,noCutT ; Not a system call... - bnl+ cr0,noCutT ; R0 not 0b10xxx...x, can not be any kind of magical system call... rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state? - lis r1,hi16(EXT(dgWork)) ; Get the diagnostics flags - beq+ FCisok ; From supervisor state... + beq++ FCisok ; From supervisor state... - ori r1,r1,lo16(EXT(dgWork)) ; Again - lwz r1,dgFlags(r1) ; Get the flags rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid? - beq- noCutT ; No... + beq++ PassUp ; No, treat as a normal one... -FCisok: beq- cr2,isCutTrace ; This is a CutTrace system call... +FCisok: beq++ cr2,EatRupt ; This is a CutTrace system call, we are done with it... ; ; Here is where we call the firmware. If it returns T_IN_VAIN, that means ; that it has handled the interruption. Remember: thou shalt not trash R13 -; or R20 while you are away. Anything else is ok. +; while you are away. Anything else is ok. ; - lwz r3,saver3(r13) ; Restore the first parameter - bl EXT(FirmwareCall) ; Go handle the firmware call.... - - cmplwi r3,T_IN_VAIN ; Was it handled? - mfsprg r2,0 ; Restore the per_proc - beq+ EatRupt ; Interrupt was handled... - mr r11,r3 ; Put the rupt code into the right register - b filter ; Go to the normal system call handler... - - .align 5 - -isCutTrace: - li r7,-32768 ; Get a 0x8000 for the exception code - bne- cr5,EatRupt ; Tracing is disabled... - sth r7,LTR_excpt(r20) ; Modify the exception type to a CutTrace - b EatRupt ; Time to go home... - -; We are here because we did not have a CutTrace system call - - .align 5 - -noCutT: beq- cr3,MachineCheck ; Whoa... Machine check... + lwz r3,saver3+4(r13) ; Restore the first parameter + b EXT(FirmwareCall) ; Go handle the firmware call.... ; -; The following interrupts are the only ones that can be redriven -; by the higher level code or emulation routines. +; Here is where we return from the firmware call ; -Redrive: cmplwi cr0,r11,T_IN_VAIN ; Did the signal handler eat the signal? - mfsprg r2,0 ; Get the per_proc block - beq+ cr0,EatRupt ; Bail now if we ate the rupt... + .align 5 + .globl EXT(FCReturn) +LEXT(FCReturn) + cmplwi r3,T_IN_VAIN ; Was it handled? + beq+ EatRupt ; Interrupt was handled... + mr r11,r3 ; Put the rupt code into the right register + b Redrive ; Go through the filter again... + ; -; Here ss where we check for the other fast-path exceptions: translation exceptions, -; emulated instructions, etc. +; Here is where we return from the PTE miss and segment exception handler ; -filter: cmplwi cr3,r11,T_ALTIVEC_ASSIST ; Check for an Altivec denorm assist - cmplwi cr4,r11,T_ALIGNMENT ; See if we got an alignment exception - cmplwi cr1,r11,T_PROGRAM ; See if we got a program exception - cmplwi cr2,r11,T_INSTRUCTION_ACCESS ; Check on an ISI - bne+ cr3,noAltivecAssist ; It is not an assist... - b EXT(AltivecAssist) ; It is an assist... - .align 5 + .globl EXT(PFSExit) -noAltivecAssist: - bne+ cr4,noAlignAssist ; No alignment here... - b EXT(AlignAssist) ; Go try to emulate... +LEXT(PFSExit) - .align 5 +#if 0 + mfsprg r2,0 ; (BRINGUP) + lwz r0,savedsisr(r13) ; (BRINGUP) + andis. r0,r0,hi16(dsiAC) ; (BRINGUP) + beq++ didnthit ; (BRINGUP) + lwz r0,20(0) ; (BRINGUP) + mr. r0,r0 ; (BRINGUP) + bne-- didnthit ; (BRINGUP) +#if 0 + li r0,1 ; (BRINGUP) + stw r0,20(0) ; (BRINGUP) + lis r0,hi16(Choke) ; (BRINGUP) + ori r0,r0,lo16(Choke) ; (BRINGUP) + sc ; (BRINGUP) +#endif + + lwz r4,savesrr0+4(r13) ; (BRINGUP) + lwz r8,savesrr1+4(r13) ; (BRINGUP) + lwz r6,savedar+4(r13) ; (BRINGUP) + rlwinm. r0,r8,0,MSR_IR_BIT,MSR_IR_BIT ; (BRINGUP) + mfmsr r9 ; (BRINGUP) + ori r0,r9,lo16(MASK(MSR_DR)) ; (BRINGUP) + beq-- hghg ; (BRINGUP) + mtmsr r0 ; (BRINGUP) + isync ; (BRINGUP) + +hghg: lwz r5,0(r4) ; (BRINGUP) + beq-- hghg1 ; (BRINGUP) + mtmsr r9 ; (BRINGUP) + isync ; (BRINGUP) + +hghg1: rlwinm r7,r5,6,26,31 ; (BRINGUP) + rlwinm r27,r5,14,24,28 ; (BRINGUP) + addi r3,r13,saver0+4 ; (BRINGUP) + lwzx r3,r3,r27 ; (BRINGUP) + +#if 0 + lwz r27,patcharea+4(r2) ; (BRINGUP) + mr. r3,r3 ; (BRINGUP) + bne++ nbnbnb ; (BRINGUP) + addi r27,r27,1 ; (BRINGUP) + stw r27,patcharea+4(r2) ; (BRINGUP) +nbnbnb: +#endif + + rlwinm. r28,r8,0,MSR_DR_BIT,MSR_DR_BIT ; (BRINGUP) + rlwinm r27,r6,0,0,29 ; (BRINGUP) + ori r28,r9,lo16(MASK(MSR_DR)) ; (BRINGUP) + mfspr r10,dabr ; (BRINGUP) + li r0,0 ; (BRINGUP) + mtspr dabr,r0 ; (BRINGUP) + cmplwi cr1,r7,31 ; (BRINGUP) + beq-- qqq0 ; (BRINGUP) + mtmsr r28 ; (BRINGUP) +qqq0: + isync ; (BRINGUP) + + lwz r27,0(r27) ; (BRINGUP) - Get original value + + bne cr1,qqq1 ; (BRINGUP) + + rlwinm r5,r5,31,22,31 ; (BRINGUP) + cmplwi cr1,r5,151 ; (BRINGUP) + beq cr1,qqq3 ; (BRINGUP) + cmplwi cr1,r5,407 ; (BRINGUP) + beq cr1,qqq2 ; (BRINGUP) + cmplwi cr1,r5,215 ; (BRINGUP) + beq cr1,qqq0q ; (BRINGUP) + cmplwi cr1,r5,1014 ; (BRINGUP) + beq cr1,qqqm1 ; (BRINGUP) + + lis r0,hi16(Choke) ; (BRINGUP) + ori r0,r0,lo16(Choke) ; (BRINGUP) + sc ; (BRINGUP) + +qqqm1: rlwinm r7,r6,0,0,26 ; (BRINGUP) + stw r0,0(r7) ; (BRINGUP) + stw r0,4(r7) ; (BRINGUP) + stw r0,8(r7) ; (BRINGUP) + stw r0,12(r7) ; (BRINGUP) + stw r0,16(r7) ; (BRINGUP) + stw r0,20(r7) ; (BRINGUP) + stw r0,24(r7) ; (BRINGUP) + stw r0,28(r7) ; (BRINGUP) + b qqq9 + +qqq1: cmplwi r7,38 ; (BRINGUP) + bgt qqq2 ; (BRINGUP) + blt qqq3 ; (BRINGUP) -noAlignAssist: - bne+ cr1,noEmulate ; No emulation here... - b EXT(Emulate) ; Go try to emulate... +qqq0q: stb r3,0(r6) ; (BRINGUP) + b qqq9 ; (BRINGUP) + +qqq2: sth r3,0(r6) ; (BRINGUP) + b qqq9 ; (BRINGUP) + +qqq3: stw r3,0(r6) ; (BRINGUP) + +qqq9: +#if 0 + rlwinm r7,r6,0,0,29 ; (BRINGUP) + lwz r0,0(r7) ; (BRINGUP) - Get newest value +#else + lis r7,hi16(0x000792B8) ; (BRINGUP) + ori r7,r7,lo16(0x000792B8) ; (BRINGUP) + lwz r0,0(r7) ; (BRINGUP) - Get newest value +#endif + mtmsr r9 ; (BRINGUP) + mtspr dabr,r10 ; (BRINGUP) + isync ; (BRINGUP) - .align 5 +#if 0 + lwz r28,patcharea+12(r2) ; (BRINGUP) + mr. r28,r28 ; (BRINGUP) + bne++ qqq12 ; (BRINGUP) + lis r28,0x4000 ; (BRINGUP) + +qqq12: stw r27,0(r28) ; (BRINGUP) + lwz r6,savedar+4(r13) ; (BRINGUP) + stw r0,4(r28) ; (BRINGUP) + stw r4,8(r28) ; (BRINGUP) + stw r6,12(r28) ; (BRINGUP) + addi r28,r28,16 ; (BRINGUP) + mr. r3,r3 ; (BRINGUP) + stw r28,patcharea+12(r2) ; (BRINGUP) + lwz r10,patcharea+8(r2) ; (BRINGUP) + lwz r0,patcharea+4(r2) ; (BRINGUP) +#endif -noEmulate: cmplwi cr3,r11,T_CSWITCH ; Are we context switching - cmplwi r11,T_DATA_ACCESS ; Check on a DSI - beq- cr2,DSIorISI ; It is a PTE fault... - beq- cr3,conswtch ; It is a context switch... - bne+ PassUp ; It is not a PTE fault... +#if 1 + stw r0,patcharea(r2) ; (BRINGUP) +#endif -; -; This call will either handle the fault, in which case it will not -; return, or return to pass the fault up the line. -; +#if 0 + xor r28,r0,r27 ; (BRINGUP) - See how much it changed + rlwinm r28,r28,24,24,31 ; (BRINGUP) + cmplwi r28,1 ; (BRINGUP) + + ble++ qqq10 ; (BRINGUP) + + mr r7,r0 ; (BRINGUP) + li r0,1 ; (BRINGUP) + stw r0,20(0) ; (BRINGUP) + lis r0,hi16(Choke) ; (BRINGUP) + ori r0,r0,lo16(Choke) ; (BRINGUP) + sc ; (BRINGUP) +#endif -DSIorISI: mr r3,r11 ; Move the rupt code - - bl EXT(handlePF) ; See if we can handle this fault - lwz r0,savesrr1(r13) ; Get the MSR in use at exception time - mfsprg r2,0 ; Get back per_proc - cmplwi cr1,r3,T_IN_VAIN ; Was it handled? +qqq10: addi r4,r4,4 ; (BRINGUP) + stw r4,savesrr0+4(r13) ; (BRINGUP) + + li r11,T_IN_VAIN ; (BRINGUP) + b EatRupt ; (BRINGUP) + +didnthit: ; (BRINGUP) +#endif +#if 0 + lwz r0,20(0) ; (BRINGUP) + mr. r0,r0 ; (BRINGUP) + beq++ opopop ; (BRINGUP) + li r0,0 ; (BRINGUP) + stw r0,20(0) ; (BRINGUP) + lis r0,hi16(Choke) ; (BRINGUP) + ori r0,r0,lo16(Choke) ; (BRINGUP) + sc ; (BRINGUP) +opopop: +#endif + lwz r0,savesrr1+4(r13) ; Get the MSR in use at exception time + cmplwi cr1,r11,T_IN_VAIN ; Was it handled? rlwinm. r4,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? - mr r11,r3 ; Put interrupt code back into the right register - beq+ cr1,EatRupt ; Yeah, just blast back to the user... - beq- NoFamPf + beq++ cr1,EatRupt ; Yeah, just blast back to the user... + beq-- NoFamPf + mfsprg r2,0 ; Get back per_proc lwz r1,spcFlags(r2) ; Load spcFlags rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit cmpi cr0,r1,2 ; Check FamVMena set without FamVMmode - bne- cr0,NoFamPf + bne-- cr0,NoFamPf lwz r6,FAMintercept(r2) ; Load exceptions mask to intercept + li r5,0 ; Clear srwi r1,r11,2 ; divide r11 by 4 - lis r5,0x8000 ; Set r5 to 0x80000000 + oris r5,r5,0x8000 ; Set r5 to 0x80000000 srw r1,r5,r1 ; Set bit for current exception and. r1,r1,r6 ; And current exception with the intercept mask - beq+ NoFamPf ; Is it FAM intercept - bl EXT(vmm_fam_pf_handler) + beq++ NoFamPf ; Is it FAM intercept + bl EXT(vmm_fam_pf) b EatRupt -NoFamPf: - andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on - beq+ PassUp ; Not on, normal case... + +NoFamPf: andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on + lis r0,0x8000 ; Get 0xFFFFFFFF80000000 + add r0,r0,r0 ; Get 0xFFFFFFFF00000000 + beq++ PassUpTrap ; Not on, normal case... ; ; Here is where we handle the "recovery mode" stuff. ; This is set by an emulation routine to trap any faults when it is fetching data or @@ -1413,35 +1906,49 @@ NoFamPf: ; If we get a fault, we turn off RI, set CR0_EQ to false, bump the PC, and set R0 ; and R1 to the DAR and DSISR, respectively. ; - lwz r4,savesrr0(r13) ; Get the failing instruction address + lwz r3,savesrr0(r13) ; Get the failing instruction address + lwz r4,savesrr0+4(r13) ; Get the failing instruction address lwz r5,savecr(r13) ; Get the condition register - addi r4,r4,4 ; Skip failing instruction - lwz r6,savedar(r13) ; Get the DAR + or r4,r4,r0 ; Fill the high part with foxes + lwz r0,savedar(r13) ; Get the DAR + addic r4,r4,4 ; Skip failing instruction + lwz r6,savedar+4(r13) ; Get the DAR + addze r3,r3 ; Propagate carry rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed lwz r7,savedsisr(r13) ; Grab the DSISR - stw r0,savesrr1(r13) ; Save the result MSR - stw r4,savesrr0(r13) ; Save resume address + stw r3,savesrr0(r13) ; Save resume address + stw r4,savesrr0+4(r13) ; Save resume address stw r5,savecr(r13) ; And the resume CR - stw r6,saver0(r13) ; Pass back the DAR - stw r7,saver1(r13) ; Pass back the DSISR + stw r0,saver0(r13) ; Pass back the DAR + stw r6,saver0+4(r13) ; Pass back the DAR + stw r7,saver1+4(r13) ; Pass back the DSISR b EatRupt ; Resume emulated code ; ; Here is where we handle the context switch firmware call. The old -; context has been saved, and the new savearea in in saver3. We will just +; context has been saved. The new savearea is in kind of hokey, the high order +; half is stored in saver7 and the low half is in saver3. We will just ; muck around with the savearea pointers, and then join the exit routine ; .align 5 conswtch: + li r0,0xFFF ; Get page boundary mr r29,r13 ; Save the save - rlwinm r30,r13,0,0,19 ; Get the start of the savearea block - lwz r5,saver3(r13) ; Switch to the new savearea - lwz r30,SACvrswap(r30) ; get real to virtual translation + andc r30,r13,r0 ; Round down to page boundary (64-bit safe) + lwz r5,saver3+4(r13) ; Switch to the new savearea + bf-- pf64Bitb,xcswNo64 ; Not 64-bit... + lwz r6,saver7+4(r13) ; Get the high order half + sldi r6,r6,32 ; Position high half + or r5,r5,r6 ; Merge them + +xcswNo64: lwz r30,SACvrswap+4(r30) ; get real to virtual translation mr r13,r5 ; Switch saveareas + li r0,0 ; Clear this xor r27,r29,r30 ; Flip to virtual - stw r27,saver3(r5) ; Push the new savearea to the switch to routine + stw r0,saver3(r5) ; Push the new virtual savearea to the switch to routine + stw r27,saver3+4(r5) ; Push the new virtual savearea to the switch to routine b EatRupt ; Start it up... ; @@ -1454,89 +1961,437 @@ conswtch: MachineCheck: - lwz r27,savesrr1(r13) ; ? - rlwinm. r11,r27,0,dcmck,dcmck ; ? - beq+ notDCache ; ? + bt++ pf64Bitb,mck64 ; ? - mfspr r11,msscr0 ; ? - dssall ; ? - sync - - lwz r27,savesrr1(r13) ; ? - -hiccup: cmplw r27,r27 ; ? - bne- hiccup ; ? - isync ; ? - - oris r11,r11,hi16(dl1hwfm) ; ? - mtspr msscr0,r11 ; ? - -rstbsy: mfspr r11,msscr0 ; ? - - rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ? - bne rstbsy ; ? - - sync ; ? + lwz r27,savesrr1+4(r13) ; Pick up srr1 - b EatRupt ; ? - - .align 5 - -notDCache: ; ; Check if the failure was in ; ml_probe_read. If so, this is expected, so modify the PC to ; ml_proble_read_mck and then eat the exception. ; - lwz r30,savesrr0(r13) ; Get the failing PC + lwz r30,savesrr0+4(r13) ; Get the failing PC lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part lis r27,hi16(EXT(ml_probe_read)) ; High order part ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part cmplw r30,r28 ; Check highest possible cmplw cr1,r30,r27 ; Check lowest - bge- PassUp ; Outside of range - blt- cr1,PassUp ; Outside of range + bge- PassUpTrap ; Outside of range + blt- cr1,PassUpTrap ; Outside of range +; +; We need to fix up the BATs here because the probe +; routine messed them all up... As long as we are at it, +; fix up to return directly to caller of probe. +; + + lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address + ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address + + lwz r30,0(r11) ; Pick up DBAT 0 high + lwz r28,4(r11) ; Pick up DBAT 0 low + lwz r27,8(r11) ; Pick up DBAT 1 high + lwz r18,16(r11) ; Pick up DBAT 2 high + lwz r11,24(r11) ; Pick up DBAT 3 high + + sync + mtdbatu 0,r30 ; Restore DBAT 0 high + mtdbatl 0,r28 ; Restore DBAT 0 low + mtdbatu 1,r27 ; Restore DBAT 1 high + mtdbatu 2,r18 ; Restore DBAT 2 high + mtdbatu 3,r11 ; Restore DBAT 3 high + sync + + lwz r28,savelr+4(r13) ; Get return point + lwz r27,saver0+4(r13) ; Get the saved MSR + li r30,0 ; Get a failure RC + stw r28,savesrr0+4(r13) ; Set the return point + stw r27,savesrr1+4(r13) ; Set the continued MSR + stw r30,saver3+4(r13) ; Set return code + b EatRupt ; Yum, yum, eat it all up... + +; +; 64-bit machine checks +; + +mck64: + +; +; NOTE: WE NEED TO RETHINK RECOVERABILITY A BIT - radar 3167190 +; + + ld r23,savesrr0(r13) ; Grab the SRR0 in case we need bad instruction + ld r20,savesrr1(r13) ; Grab the SRR1 so we can decode the thing + lwz r21,savedsisr(r13) ; We might need this in a bit + ld r22,savedar(r13) ; We might need this in a bit + + lis r8,AsyMCKSrc ; Get the Async MCK Source register address + mfsprg r19,2 ; Get the feature flags + ori r8,r8,0x8000 ; Set to read data + rlwinm. r0,r19,0,pfSCOMFixUpb,pfSCOMFixUpb ; Do we need to fix the SCOM data? + + sync + + mtspr scomc,r8 ; Request the MCK source + mfspr r24,scomd ; Get the source + mfspr r8,scomc ; Get back the status (we just ignore it) + sync + isync + + lis r8,AsyMCKRSrc ; Get the Async MCK Source AND mask address + li r9,0 ; Get and AND mask of 0 + + sync + + mtspr scomd,r9 ; Set the AND mask to 0 + mtspr scomc,r8 ; Write the AND mask and clear conditions + mfspr r8,scomc ; Get back the status (we just ignore it) + sync + isync + + lis r8,cFIR ; Get the Core FIR register address + ori r8,r8,0x8000 ; Set to read data + + sync + + mtspr scomc,r8 ; Request the Core FIR + mfspr r25,scomd ; Get the source + mfspr r8,scomc ; Get back the status (we just ignore it) + sync + isync + + lis r8,cFIRrst ; Get the Core FIR AND mask address + + sync + + mtspr scomd,r9 ; Set the AND mask to 0 + mtspr scomc,r8 ; Write the AND mask and clear conditions + mfspr r8,scomc ; Get back the status (we just ignore it) + sync + isync + +; Note: bug in early chips where scom reads are shifted right by 1. We fix that here. +; Also note that we will lose bit 63 + + beq++ mckNoFix ; No fix up is needed + sldi r24,r24,1 ; Shift left 1 + sldi r25,r25,1 ; Shift left 1 + +mckNoFix: std r24,savemisc0(r13) ; Save the MCK source in case we pass the error + std r25,savemisc1(r13) ; Save the Core FIR in case we pass the error + + rlwinm. r0,r20,0,mckIFUE-32,mckIFUE-32 ; Is this some kind of uncorrectable? + bne mckUE ; Yeah... + + rlwinm. r0,r20,0,mckLDST-32,mckLDST-32 ; Some kind of load/store error? + bne mckHandleLDST ; Yes... + + rldicl. r0,r20,46,62 ; Get the error cause code + beq mckNotSure ; We need some more checks for this one... + + cmplwi r0,2 ; Check for TLB parity error + blt mckSLBparity ; This is an SLB parity error... + bgt mckhIFUE ; This is an IFetch tablewalk reload UE... + +; IFetch TLB parity error + + isync + tlbiel r23 ; Locally invalidate TLB entry for iaddr + sync ; Wait for it + b EatRupt ; All recovered... + +; SLB parity error. This could be software caused. We get one if there is +; more than 1 valid SLBE with a matching ESID. That one we do not want to +; try to recover from. Search for it and if we get it, panic. + +mckSLBparity: + crclr cr0_eq ; Make sure we are not equal so we take correct exit + + la r3,emvr0(r2) ; Use this to keep track of valid ESIDs we find + li r5,0 ; Start with index 0 + +mckSLBck: la r4,emvr0(r2) ; Use this to keep track of valid ESIDs we find + slbmfee r6,r5 ; Get the next SLBE + andis. r0,r6,0x0800 ; See if valid bit is on + beq mckSLBnx ; Skip invalid and go to next + +mckSLBck2: cmpld r4,r3 ; Have we reached the end of the table? + beq mckSLBne ; Yes, go enter this one... + ld r7,0(r4) ; Pick up the saved ESID + cmpld r6,r7 ; Is this a match? + beq mckSLBrec ; Whoops, I did bad, recover and pass up... + addi r4,r4,8 ; Next table entry + b mckSLBck2 ; Check the next... + +mckSLBnx: addi r5,r5,1 ; Point to next SLBE + cmplwi r5,64 ; Have we checked all of them? + bne++ mckSLBck ; Not yet, check again... + b mckSLBrec ; We looked at them all, go recover... + +mckSLBne: std r6,0(r3) ; Save this ESID + addi r3,r3,8 ; Point to the new slot + b mckSLBnx ; Go do the next SLBE... + +; Recover an SLB error + +mckSLBrec: li r0,0 ; Set an SLB slot index of 0 + slbia ; Trash all SLB entries (except for entry 0 that is) + slbmfee r7,r0 ; Get the entry that is in SLB index 0 + rldicr r7,r7,0,35 ; Clear the valid bit and the rest + slbie r7 ; Invalidate it + + li r3,0 ; Set the first SLBE + +mckSLBclr: slbmte r0,r3 ; Clear the whole entry to 0s + addi r3,r3,1 ; Bump index + cmplwi cr1,r3,64 ; Have we done them all? + bne++ cr1,mckSLBclr ; Yup.... + + sth r3,ppInvSeg(r2) ; Store non-zero to trigger SLB reload + bne++ EatRupt ; This was not a programming error, all recovered... + b PassUpTrap ; Pass the software error up... + +; +; Handle a load/store unit error. We need to decode the DSISR +; + +mckHandleLDST: + rlwinm. r0,r21,0,mckL1DCPE,mckL1DCPE ; An L1 data cache parity error? + bne++ mckL1D ; Yeah, we dealt with this back in the vector... + + rlwinm. r0,r21,0,mckL1DTPE,mckL1DTPE ; An L1 tag error? + bne++ mckL1T ; Yeah, we dealt with this back in the vector... + + rlwinm. r0,r21,0,mckUEdfr,mckUEdfr ; Is the a "deferred" UE? + bne mckDUE ; Yeah, go see if expected... + + rlwinm. r0,r21,0,mckUETwDfr,mckUETwDfr ; Is the a "deferred" tablewalk UE? + bne mckDTW ; Yeah, no recovery... + + rlwinm. r0,r21,0,mckSLBPE,mckSLBPE ; SLB parity error? + bne mckSLBparity ; Yeah, go attempt recovery.... + +; This is a recoverable D-ERAT or TLB error + + la r9,hwMckERCPE(r2) ; Get DERAT parity error count + +mckInvDAR: isync + tlbiel r22 ; Locally invalidate the TLB entry + sync + + lwz r21,0(r9) ; Get count + addi r21,r21,1 ; Count this one + stw r21,0(r9) ; Stick it back + + b EatRupt ; All recovered... + +; +; When we come here, we are not quite sure what the error is. We need to +; dig a bit further. +; +; R24 is interrupt source +; R25 is Core FIR +; +; Note that both have been cleared already. +; + +mckNotSure: + rldicl. r0,r24,AsyMCKfir+1,63 ; Something in the FIR? + bne-- mckFIR ; Yup, go check some more... + + rldicl. r0,r24,AsyMCKhri+1,63 ; Hang recovery? + bne-- mckHangRcvr ; Yup... + + rldicl. r0,r24,AsyMCKext+1,63 ; External signal? + bne-- mckExtMck ; Yup... + +; +; We really do not know what this one is or what to do with it... +; + +mckUnk: lwz r21,hwMckUnk(r2) ; Get unknown error count + addi r21,r21,1 ; Count it + stw r21,hwMckUnk(r2) ; Stuff it + b PassUpTrap ; Go south, young man... + +; +; Hang recovery. This is just a notification so we only count. +; + +mckHangRcrvr: + lwz r21,hwMckHang(r2) ; Get hang recovery count + addi r21,r21,1 ; Count this one + stw r21,hwMckHang(r2) ; Stick it back + b EatRupt ; All recovered... + +; +; Externally signaled MCK. No recovery for the moment, but we this may be +; where we handle ml_probe_read problems eventually. +; +mckExtMck: + lwz r21,hwMckHang(r2) ; Get hang recovery count + addi r21,r21,1 ; Count this one + stw r21,hwMckHang(r2) ; Stick it back + b EatRupt ; All recovered... + +; +; Machine check cause is in a FIR. Suss it out here. +; Core FIR is in R25 and has been cleared in HW. +; + +mckFIR: rldicl. r0,r25,cFIRICachePE+1,63 ; I-Cache parity error? + la r19,hwMckICachePE(r2) ; Point to counter + bne mckInvICache ; Go invalidate I-Cache... + + rldicl. r0,r25,cFIRITagPE0+1,63 ; I-Cache tag parity error? + la r19,hwMckITagPE(r2) ; Point to counter + bne mckInvICache ; Go invalidate I-Cache... + + rldicl. r0,r25,cFIRITagPE1+1,63 ; I-Cache tag parity error? + la r19,hwMckITagPE(r2) ; Point to counter + bne mckInvICache ; Go invalidate I-Cache... + + rldicl. r0,r25,cFIRIEratPE+1,63 ; IERAT parity error? + la r19,hwMckIEratPE(r2) ; Point to counter + bne mckInvERAT ; Go invalidate ERATs... + + rldicl. r0,r25,cFIRIFUL2UE+1,63 ; IFetch got L2 UE? + bne mckhIFUE ; Go count and pass up... + + rldicl. r0,r25,cFIRDCachePE+1,63 ; D-Cache PE? + bne mckL1D ; Handled, just go count... + + rldicl. r0,r25,cFIRDTagPE+1,63 ; D-Cache tag PE? + bne mckL1T ; Handled, just go count... + + rldicl. r0,r25,cFIRDEratPE+1,63 ; DERAT PE? + la r19,hwMckDEratPE(r2) ; Point to counter + bne mckInvERAT ; Go invalidate ERATs... + + rldicl. r0,r25,cFIRTLBPE+1,63 ; TLB PE? + la r9,hwMckTLBPE(r2) ; Get TLB parity error count + bne mckInvDAR ; Go recover... + + rldicl. r0,r25,cFIRSLBPE+1,63 ; SLB PE? + bne mckSLBparity ; Cope with it... + + b mckUnk ; Have not a clue... + ; -; We need to fix up the BATs here because the probe -; routine messed them all up... As long as we are at it, -; fix up to return directly to caller of probe. +; General recovery for I-Cache errors. Just flush it completely. ; - - lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address - ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address + + .align 7 ; Force into cache line + +mckInvICache: + lis r0,0x0080 ; Get a 0x0080 (bit 9 >> 32) + mfspr r21,hid1 ; Get the current HID1 + sldi r0,r0,32 ; Get the "forced ICBI match" bit + or r0,r0,r21 ; Set forced match - lwz r30,0(r11) ; Pick up DBAT 0 high - lwz r28,4(r11) ; Pick up DBAT 0 low - lwz r27,8(r11) ; Pick up DBAT 1 high - lwz r18,16(r11) ; Pick up DBAT 2 high - lwz r11,24(r11) ; Pick up DBAT 3 high + isync + mtspr hid1,r0 ; Stick it + mtspr hid1,r0 ; Stick it again + isync + + li r6,0 ; Start at 0 - sync - mtdbatu 0,r30 ; Restore DBAT 0 high - mtdbatl 0,r28 ; Restore DBAT 0 low - mtdbatu 1,r27 ; Restore DBAT 1 high - mtdbatu 2,r18 ; Restore DBAT 2 high - mtdbatu 3,r11 ; Restore DBAT 3 high - sync +mckIcbi: icbi 0,r6 ; Kill I$ + addi r6,r6,128 ; Next line + andis. r5,r6,1 ; Have we done them all? + beq++ mckIcbi ; Not yet... - lwz r27,saver6(r13) ; Get the saved R6 value - mtspr hid0,r27 ; Restore HID0 isync + mtspr hid1,r21 ; Restore original HID1 + mtspr hid1,r21 ; Stick it again + isync + + lwz r5,0(r19) ; Get the counter + addi r5,r5,1 ; Count it + stw r5,0(r19) ; Stuff it back + b EatRupt ; All recovered... + + +; General recovery for ERAT problems - handled in exception vector already - lwz r28,savelr(r13) ; Get return point - lwz r27,saver0(r13) ; Get the saved MSR - li r30,0 ; Get a failure RC - stw r28,savesrr0(r13) ; Set the return point - stw r27,savesrr1(r13) ; Set the continued MSR - stw r30,saver3(r13) ; Set return code - b EatRupt ; Yum, yum, eat it all up... +mckInvERAT: lwz r21,0(r19) ; Get the exception count spot + addi r21,r21,1 ; Count this one + stw r21,0(r19) ; Save count + b EatRupt ; All recovered... + +; General hang recovery - this is a notification only, just count. + +mckHangRcvr: + lwz r21,hwMckHang(r2) ; Get hang recovery count + addi r21,r21,1 ; Count this one + stw r21,hwMckHang(r2) ; Stick it back + b EatRupt ; All recovered... + + +; +; These are the uncorrectable errors, just count them then pass it along. +; + +mckUE: lwz r21,hwMckUE(r2) ; Get general uncorrectable error count + addi r21,r21,1 ; Count it + stw r21,hwMckUE(r2) ; Stuff it + b PassUpTrap ; Go south, young man... + +mckhIFUE: lwz r21,hwMckIUEr(r2) ; Get I-Fetch TLB reload uncorrectable error count + addi r21,r21,1 ; Count it + stw r21,hwMckIUEr(r2) ; Stuff it + b PassUpTrap ; Go south, young man... + +mckDUE: lwz r21,hwMckDUE(r2) ; Get deferred uncorrectable error count + addi r21,r21,1 ; Count it + stw r21,hwMckDUE(r2) ; Stuff it + +; +; Right here is where we end up after a failure on a ml_probe_read_64. +; We will check if that is the case, and if so, fix everything up and +; return from it. + + lis r8,hi16(EXT(ml_probe_read_64)) ; High of start + lis r9,hi16(EXT(ml_probe_read_mck_64)) ; High of end + ori r8,r8,lo16(EXT(ml_probe_read_64)) ; Low of start + ori r9,r9,lo16(EXT(ml_probe_read_mck_64)) ; Low of end + cmpld r23,r8 ; Too soon? + cmpld cr1,r23,r9 ; Too late? + + cror cr0_lt,cr0_lt,cr1_gt ; Too soo or too late? + ld r3,saver12(r13) ; Get the original MSR + ld r5,savelr(r13) ; Get the return address + li r4,0 ; Get fail code + blt-- PassUpTrap ; This is a normal machine check, just pass up... + std r5,savesrr0(r13) ; Set the return MSR + + std r3,savesrr1(r13) ; Set the return address + std r4,saver3(r13) ; Set failure return code + b EatRupt ; Go return from ml_probe_read_64... + +mckDTW: lwz r21,hwMckDTW(r2) ; Get deferred tablewalk uncorrectable error count + addi r21,r21,1 ; Count it + stw r21,hwMckDTW(r2) ; Stuff it + b PassUpTrap ; Go south, young man... + +mckL1D: lwz r21,hwMckL1DPE(r2) ; Get data cache parity error count + addi r21,r21,1 ; Count it + stw r21,hwMckL1DPE(r2) ; Stuff it + b PassUpTrap ; Go south, young man... + +mckL1T: lwz r21,hwMckL1TPE(r2) ; Get TLB parity error count + addi r21,r21,1 ; Count it + stw r21,hwMckL1TPE(r2) ; Stuff it + b PassUpTrap ; Go south, young man... + /* * Here's where we come back from some instruction emulator. If we come back with * T_IN_VAIN, the emulation is done and we should just reload state and directly * go back to the interrupted code. Otherwise, we'll check to see if * we need to redrive with a different interrupt, i.e., DSI. + * Note that this we are actually not redriving the rupt, rather changing it + * into a different one. Thus we clear the redrive bit. */ .align 5 @@ -1544,18 +2399,15 @@ notDCache: LEXT(EmulExit) - cmplwi r11,T_IN_VAIN ; Was it emulated? + cmplwi cr1,r11,T_IN_VAIN ; Was it emulated? lis r1,hi16(SAVredrive) ; Get redrive request - mfsprg r2,0 ; Restore the per_proc area - beq+ EatRupt ; Yeah, just blast back to the user... + beq++ cr1,EatRupt ; Yeah, just blast back to the user... lwz r4,SAVflags(r13) ; Pick up the flags and. r0,r4,r1 ; Check if redrive requested - andc r4,r4,r1 ; Clear redrive - beq+ PassUp ; No redrive, just keep on going... + beq++ PassUpTrap ; No redrive, just keep on going... - stw r4,SAVflags(r13) ; Set the flags b Redrive ; Redrive the exception... ; @@ -1565,41 +2417,106 @@ LEXT(EmulExit) ; memory, otherwise we would need to switch on (at least) virtual data. ; SRs are already set up. ; - + + .align 5 + +PassUpTrap: lis r20,hi16(EXT(thandler)) ; Get thandler address + ori r20,r20,lo16(EXT(thandler)) ; Get thandler address + b PassUp ; Go pass it up... + +PassUpRupt: lis r20,hi16(EXT(ihandler)) ; Get ihandler address + ori r20,r20,lo16(EXT(ihandler)) ; Get ihandler address + b PassUp ; Go pass it up... + + .align 5 + +PassUpFPU: lis r20,hi16(EXT(fpu_switch)) ; Get FPU switcher address + ori r20,r20,lo16(EXT(fpu_switch)) ; Get FPU switcher address + b PassUp ; Go pass it up... + +PassUpVMX: lis r20,hi16(EXT(vec_switch)) ; Get VMX switcher address + ori r20,r20,lo16(EXT(vec_switch)) ; Get VMX switcher address + bt++ featAltivec,PassUp ; We have VMX on this CPU... + li r11,T_PROGRAM ; Say that it is a program exception + li r20,8 ; Set invalid instruction + stw r11,saveexception(r13) ; Set the new the exception code + sth r20,savesrr1+4(r13) ; Set the invalid instruction SRR code + + b PassUpTrap ; Go pass it up... + .align 5 + +PassUpAbend: + lis r20,hi16(EXT(chandler)) ; Get choke handler address + ori r20,r20,lo16(EXT(chandler)) ; Get choke handler address + b PassUp ; Go pass it up... -PassUp: lis r2,hi16(EXT(exception_handlers)) ; Get exception vector address - ori r2,r2,lo16(EXT(exception_handlers)) ; And low half - lwzx r6,r2,r11 ; Get the actual exception handler address + .align 5 -PassUpDeb: mtsrr0 r6 ; Set up the handler address - rlwinm r5,r13,0,0,19 ; Back off to the start of savearea block +PassUp: +#if INSTRUMENT + mfspr r29,pmc1 ; INSTRUMENT - saveinstr[11] - Take stamp at passup or eatrupt + stw r29,0x6100+(11*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r29,pmc2 ; INSTRUMENT - Get stamp + stw r29,0x6100+(11*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r29,pmc3 ; INSTRUMENT - Get stamp + stw r29,0x6100+(11*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r29,pmc4 ; INSTRUMENT - Get stamp + stw r29,0x6100+(11*16)+0xC(0) ; INSTRUMENT - Save it +#endif + lwz r10,SAVflags(r13) ; Pick up the flags + + li r0,0xFFF ; Get a page mask + li r2,MASK(MSR_BE)|MASK(MSR_SE) ; Get the mask to save trace bits + andc r5,r13,r0 ; Back off to the start of savearea block mfmsr r3 ; Get our MSR - rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 ; Clear all but the trace bits - li r2,MSR_SUPERVISOR_INT_OFF ; Get our normal MSR value - lwz r5,SACvrswap(r5) ; Get real to virtual conversion - or r2,r2,r3 ; Keep the trace bits if they are on + rlwinm r10,r10,0,SAVredriveb+1,SAVredriveb-1 ; Clear the redrive before we pass it up + li r21,MSR_SUPERVISOR_INT_OFF ; Get our normal MSR value + and r3,r3,r2 ; Clear all but trace + lwz r5,SACvrswap+4(r5) ; Get real to virtual conversion + or r21,r21,r3 ; Keep the trace bits if they are on + stw r10,SAVflags(r13) ; Set the flags with the cleared redrive flag mr r3,r11 ; Pass the exception code in the paramter reg - mtsrr1 r2 ; Set up our normal MSR value xor r4,r13,r5 ; Pass up the virtual address of context savearea + mfsprg r29,0 ; Get the per_proc block back + rlwinm r4,r4,0,0,31 ; Clean top half of virtual savearea if 64-bit + + mr r3,r21 ; Pass in the MSR we will go to + bl EXT(switchSegs) ; Go handle the segment registers/STB + +#if INSTRUMENT + mfspr r30,pmc1 ; INSTRUMENT - saveinstr[7] - Take stamp afer switchsegs + stw r30,0x6100+(7*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r30,pmc2 ; INSTRUMENT - Get stamp + stw r30,0x6100+(7*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r30,pmc3 ; INSTRUMENT - Get stamp + stw r30,0x6100+(7*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r30,pmc4 ; INSTRUMENT - Get stamp + stw r30,0x6100+(7*16)+0xC(0) ; INSTRUMENT - Save it +#endif + lwz r3,saveexception(r13) ; Recall the exception code + + mtsrr0 r20 ; Set up the handler address + mtsrr1 r21 ; Set up our normal MSR value - rfi ; Launch the exception handler + bt++ pf64Bitb,puLaunch ; Handle 64-bit machine... - .long 0 ; Leave these here gol durn it! - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 + rfi ; Launch the exception handler + +puLaunch: rfid ; Launch the exception handler /* - * This routine is the only place where we return from an interruption. - * Anyplace else is wrong. Even if I write the code, it's still wrong. - * Feel free to come by and slap me if I do do it--even though I may - * have had a good reason to do it. + * This routine is the main place where we return from an interruption. + * + * This is also where we release the quickfret list. These are saveareas + * that were released as part of the exception exit path in hw_exceptions. + * In order to save an atomic operation (which actually will not work + * properly on a 64-bit machine) we use holdQFret to indicate that the list + * is in flux and should not be looked at here. This comes into play only + * when we take a PTE miss when we are queuing a savearea onto qfret. + * Quite rare but could happen. If the flag is set, this code does not + * release the list and waits until next time. * * All we need to remember here is that R13 must point to the savearea * that has the context we need to load up. Translation and interruptions @@ -1610,88 +2527,50 @@ PassUpDeb: mtsrr0 r6 ; Set up the handler address * is any tomfoolery with savearea stacks, it must be taken care of * before we get here. * - * Speaking of tomfoolery, this is where we synthesize interruptions - * if we need to. */ .align 5 EatRupt: mfsprg r29,0 ; Get the per_proc block back mr r31,r13 ; Move the savearea pointer to the far end of the register set + mfsprg r27,2 ; Get the processor features - lwz r30,quickfret(r29) ; Pick up the quick fret list, if any + lwz r3,holdQFret(r29) ; Get the release hold off flag - mfsprg r27,2 ; Get the processor features - lwz r21,savesrr1(r31) ; Get destination MSR + bt++ pf64Bitb,eat64a ; Skip down to the 64-bit version of this + +; +; This starts the 32-bit version +; + + mr. r3,r3 ; Should we hold off the quick release? + lwz r30,quickfret+4(r29) ; Pick up the quick fret list, if any + la r21,saver0(r31) ; Point to the first thing we restore + bne- ernoqfret ; Hold off set, do not release just now... erchkfret: mr. r3,r30 ; Any savearea to quickly release? beq+ ernoqfret ; No quickfrets... - lwz r30,SAVprev(r30) ; Chain back now + lwz r30,SAVprev+4(r30) ; Chain back now bl EXT(save_ret_phys) ; Put it on the free list - stw r30,quickfret(r29) ; Dequeue previous guy (really, it is ok to wait until after the release) + stw r30,quickfret+4(r29) ; Dequeue previous guy (really, it is ok to wait until after the release) b erchkfret ; Try the next one... - .align 5 -ernoqfret: mtcrf 0x60,r27 ; Set CRs with thermal facilities - rlwinm. r0,r21,0,MSR_EE_BIT,MSR_EE_BIT ; Are interruptions going to be enabled? - crandc 31,pfThermalb,pfThermIntb ; See if we have both thermometer and not interrupt facility - la r21,saver0(r31) ; Point to the first thing we restore - crandc 31,cr0_eq,31 ; Factor in enablement - bf 31,tempisok ; No thermal checking needed... - -; -; We get to here if 1) there is a thermal facility, and 2) the hardware -; will or cannot interrupt, and 3) the interrupt will be enabled after this point. -; - - mfspr r16,thrm3 ; Get thermal 3 - mfspr r14,thrm1 ; Get thermal 2 - rlwinm. r16,r16,0,thrme,thrme ; Is the themometer enabled? - mfspr r15,thrm2 ; Get thermal 2 - beq- tempisok ; No thermometer... - rlwinm r16,r14,2,28,31 ; Cluster THRM1s TIE, V, TIN, and TIV at bottom 4 bits - srawi r0,r15,31 ; Make a mask of 1s if temprature over - rlwinm r30,r15,2,28,31 ; Cluster THRM2s TIE, V, TIN, and TIV at bottom 4 bits -; -; Note that the following compare check that V, TIN, and TIV are set and that TIE is cleared. -; This insures that we only emulate when the hardware is not set to interrupt. -; - cmplwi cr0,r16,7 ; Is there a valid pending interruption for THRM1? - cmplwi cr1,r30,7 ; Is there a valid pending interruption for THRM2? - and r15,r15,r0 ; Keep high temp if that interrupted, zero if not - cror cr0_eq,cr0_eq,cr1_eq ; Merge both - andc r14,r14,r0 ; Keep low if high did not interrupt, zero if it did - bne+ tempisok ; Nope, temprature is in range - - li r11,T_THERMAL ; Time to emulate a thermal interruption - or r14,r14,r15 ; Get contents of interrupting register - mr r13,r31 ; Make sure savearea is pointed to correctly - stw r11,saveexception(r31) ; Set the exception code - stw r14,savedar(r31) ; Set the contents of the interrupting register into the dar - -; -; This code is here to prevent a problem that will probably never happen. If we are -; returning from an emulation routine (alignment, altivec assist, etc.) the SRs may -; not be set to the proper kernel values. Then, if we were to emulate a thermal here, -; we would end up running in the kernel with a bogus SR. So, to prevent -; this unfortunate circumstance, we slam the SRs here. (I worry too much...) -; - - lis r30,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value - mtsr sr0,r30 ; Set the kernel SR0 - addis r30,r30,0x0010 ; Point to the second segment of kernel - mtsr sr1,r30 ; Set the kernel SR1 - addis r30,r30,0x0010 ; Point to the third segment of kernel - mtsr sr2,r30 ; Set the kernel SR2 - addis r30,r30,0x0010 ; Point to the third segment of kernel - mtsr sr3,r30 ; Set the kernel SR3 - b Redrive ; Go process this new interruption... - +ernoqfret: +#if INSTRUMENT + mfspr r30,pmc1 ; INSTRUMENT - saveinstr[5] - Take stamp at saveareas released + stw r30,0x6100+(5*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r30,pmc2 ; INSTRUMENT - Get stamp + stw r30,0x6100+(5*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r30,pmc3 ; INSTRUMENT - Get stamp + stw r30,0x6100+(5*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r30,pmc4 ; INSTRUMENT - Get stamp + stw r30,0x6100+(5*16)+0xC(0) ; INSTRUMENT - Save it +#endif -tempisok: dcbt 0,r21 ; Touch in the first thing we need + dcbt 0,r21 ; Touch in the first thing we need ; ; Here we release the savearea. @@ -1704,291 +2583,383 @@ tempisok: dcbt 0,r21 ; Touch in the first thing we need ; savearea to the head of the local list. Then, if it needs to trim, it will ; start with the SECOND savearea, leaving ours intact. ; -; Build the SR values depending upon destination. If we are going to the kernel, -; the SRs are almost all the way set up. SR14 (or the currently used copyin/out register) -; must be set to whatever it was at the last exception because it varies. All the rest -; have been set up already. -; -; If we are going into user space, we need to check a bit more. SR0, SR1, SR2, and -; SR14 (current implementation) must be restored always. The others must be set if -; they are different that what was loaded last time (i.e., tasks have switched). -; We check the last loaded address space ID and if the same, we skip the loads. -; This is a performance gain because SR manipulations are slow. -; -; There is also the special case when MSR_RI is set. This happens when we are trying to -; make a special user state access when we are in the kernel. If we take an exception when -; during that, the SRs may have been modified. Therefore, we need to restore them to -; what they were before the exception because they could be non-standard. We saved them -; during exception entry, so we will just load them here. ; mr r3,r31 ; Get the exiting savearea in parm register bl EXT(save_ret_phys) ; Put it on the free list +#if INSTRUMENT + mfspr r3,pmc1 ; INSTRUMENT - saveinstr[6] - Take stamp afer savearea released + stw r3,0x6100+(6*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r3,pmc2 ; INSTRUMENT - Get stamp + stw r3,0x6100+(6*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r3,pmc3 ; INSTRUMENT - Get stamp + stw r3,0x6100+(6*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r3,pmc4 ; INSTRUMENT - Get stamp + stw r3,0x6100+(6*16)+0xC(0) ; INSTRUMENT - Save it +#endif - li r3,savesrr1 ; Get offset to the srr1 value + lwz r3,savesrr1+4(r31) ; Pass in the MSR we are going to + bl EXT(switchSegs) ; Go handle the segment registers/STB +#if INSTRUMENT + mfspr r30,pmc1 ; INSTRUMENT - saveinstr[10] - Take stamp afer switchsegs + stw r30,0x6100+(10*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r30,pmc2 ; INSTRUMENT - Get stamp + stw r30,0x6100+(10*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r30,pmc3 ; INSTRUMENT - Get stamp + stw r30,0x6100+(10*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r30,pmc4 ; INSTRUMENT - Get stamp + stw r30,0x6100+(10*16)+0xC(0) ; INSTRUMENT - Save it +#endif + li r3,savesrr1+4 ; Get offset to the srr1 value + lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags lwarx r26,r3,r31 ; Get destination MSR and take reservation along the way (just so we can blow it away) - lwz r7,PP_USERPMAP(r29) ; Pick up the user pmap we may launch - rlwinm. r17,r26,0,MSR_RI_BIT,MSR_RI_BIT ; See if we are returning from a special fault + + rlwinm r25,r26,27,22,22 ; Move PR bit to BE + cmplw cr3,r14,r14 ; Set that we do not need to stop streams - beq+ nSpecAcc ; Do not reload the kernel SRs if this is not a special access... + rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on + li r21,emfp0 ; Point to the fp savearea + and r9,r9,r25 ; Clear BE if supervisor state + or r26,r26,r9 ; Flip on the BE bit for special trace if needed + stwcx. r26,r3,r31 ; Blow away any reservations we hold (and set BE) + + lwz r25,savesrr0+4(r31) ; Get the SRR0 to use + + la r28,saver4(r31) ; Point to the 32-byte line with r4-r7 + dcbz r21,r29 ; Clear a work area + lwz r0,saver0+4(r31) ; Restore R0 + dcbt 0,r28 ; Touch in r4-r7 + lwz r1,saver1+4(r31) ; Restore R1 + lwz r2,saver2+4(r31) ; Restore R2 + la r28,saver8(r31) ; Point to the 32-byte line with r8-r11 + lwz r3,saver3+4(r31) ; Restore R3 + andis. r6,r27,hi16(pfAltivec) ; Do we have altivec on the machine? + dcbt 0,r28 ; touch in r8-r11 + lwz r4,saver4+4(r31) ; Restore R4 + la r28,saver12(r31) ; Point to the 32-byte line with r12-r15 + mtsrr0 r25 ; Restore the SRR0 now + lwz r5,saver5+4(r31) ; Restore R5 + mtsrr1 r26 ; Restore the SRR1 now + lwz r6,saver6+4(r31) ; Restore R6 + + dcbt 0,r28 ; touch in r12-r15 + la r28,saver16(r31) + + lwz r7,saver7+4(r31) ; Restore R7 + lwz r8,saver8+4(r31) ; Restore R8 + lwz r9,saver9+4(r31) ; Restore R9 + + dcbt 0,r28 ; touch in r16-r19 + la r28,saver20(r31) + + lwz r10,saver10+4(r31) ; Restore R10 + lwz r11,saver11+4(r31) ; Restore R11 + + dcbt 0,r28 ; touch in r20-r23 + la r28,savevscr(r31) ; Point to the status area + + lwz r12,saver12+4(r31) ; Restore R12 + lwz r13,saver13+4(r31) ; Restore R13 - lwz r14,savesr0(r31) ; Get SR0 at fault time - mtsr sr0,r14 ; Set SR0 - lwz r14,savesr1(r31) ; Get SR1 at fault time - mtsr sr1,r14 ; Set SR1 - lwz r14,savesr2(r31) ; Get SR2 at fault time - mtsr sr2,r14 ; Set SR2 - lwz r14,savesr3(r31) ; Get SR3 at fault timee - mtsr sr3,r14 ; Set SR3 - b segsdone ; We are all set up now... + la r14,savectr+4(r31) + dcbt 0,r28 ; Touch in VSCR and FPSCR + dcbt 0,r14 ; touch in CTR, DAR, DSISR, VRSAVE, and Exception code - .align 5 + lwz r26,next_savearea+4(r29) ; Get the exception save area + la r28,saver24(r31) -nSpecAcc: rlwinm. r17,r26,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are going to user or system - li r14,PMAP_SEGS ; Point to segments - bne+ gotouser ; We are going into user state... + lwz r14,saver14+4(r31) ; Restore R14 + lwz r15,saver15+4(r31) ; Restore R15 - lwz r14,savesr14(r31) ; Get the copyin/out register at interrupt time - mtsr sr14,r14 ; Set SR14 - b segsdone ; We are all set up now... - - .align 5 -gotouser: dcbt r14,r7 ; Touch the segment register contents - lwz r9,spcFlags(r29) ; Pick up the special flags - lwz r16,PP_LASTPMAP(r29) ; Pick up the last loaded pmap - addi r14,r14,32 ; Second half of pmap segments - rlwinm r9,r9,userProtKeybit-2,2,2 ; Isolate the user state protection key - lwz r15,PMAP_SPACE(r7) ; Get the primary space - lwz r13,PMAP_VFLAGS(r7) ; Get the flags - dcbt r14,r7 ; Touch second page - oris r15,r15,hi16(SEG_REG_PROT) ; Set segment 0 SR value - mtcrf 0x0F,r13 ; Set CRs to correspond to the subordinate spaces - xor r15,r15,r9 ; Flip to proper segment register key - lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags + stfd f0,emfp0(r29) ; Save FP0 + lwz r27,savevrsave(r31) ; Get the vrsave + dcbt 0,r28 ; touch in r24-r27 + la r28,savevscr(r31) ; Point to the status area + lfd f0,savefpscrpad(r31) ; Get the fpscr + la r22,saver28(r31) + mtfsf 0xFF,f0 ; Restore fpscr + lfd f0,emfp0(r29) ; Restore the used register - addis r13,r15,0x0000 ; Get SR0 value - bf 16,nlsr0 ; No alternate here... - lwz r13,PMAP_SEGS+(0*4)(r7) ; Get SR0 value + beq noavec3 ; No Altivec on this CPU... -nlsr0: mtsr sr0,r13 ; Load up the SR - rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on + stvxl v0,r21,r29 ; Save a vector register + lvxl v0,0,r28 ; Get the vector status + mtspr vrsave,r27 ; Set the vrsave + mtvscr v0 ; Set the vector status + lvxl v0,r21,r29 ; Restore work vector register - addis r13,r15,0x0010 ; Get SR1 value - bf 17,nlsr1 ; No alternate here... - lwz r13,PMAP_SEGS+(1*4)(r7) ; Get SR1 value - -nlsr1: mtsr sr1,r13 ; Load up the SR - or r26,r26,r9 ; Flip on the BE bit for special trace if needed +noavec3: dcbt 0,r22 ; touch in r28-r31 + + lwz r23,spcFlags(r29) ; Get the special flags from per_proc + la r17,savesrr0(r31) + la r26,saver0(r26) ; Point to the first part of the next savearea + dcbt 0,r17 ; touch in SRR0, SRR1, CR, XER, LR + lhz r28,pfrptdProc(r29) ; Get the reported processor type + + lwz r16,saver16+4(r31) ; Restore R16 + lwz r17,saver17+4(r31) ; Restore R17 + lwz r18,saver18+4(r31) ; Restore R18 + lwz r19,saver19+4(r31) ; Restore R19 + lwz r20,saver20+4(r31) ; Restore R20 + lwz r21,saver21+4(r31) ; Restore R21 + lwz r22,saver22+4(r31) ; Restore R22 + + cmpwi cr1,r28,CPU_SUBTYPE_POWERPC_750 ; G3? + + dcbz 0,r26 ; Clear and allocate next savearea we use in the off chance it is still in when we next interrupt + + andis. r23,r23,hi16(perfMonitor) ; Is the performance monitor enabled? + lwz r23,saver23+4(r31) ; Restore R23 + cmpwi cr2,r28,CPU_SUBTYPE_POWERPC_7400 ; Yer standard G4? + lwz r24,saver24+4(r31) ; Restore R24 + lwz r25,saver25+4(r31) ; Restore R25 + lwz r26,saver26+4(r31) ; Restore R26 + lwz r27,saver27+4(r31) ; Restore R27 + + beq+ noPerfMonRestore32 ; No perf monitor... + + beq- cr1,perfMonRestore32_750 ; This is a G3... + beq- cr2,perfMonRestore32_7400 ; Standard G4... + + lwz r28,savepmc+16(r31) + lwz r29,savepmc+20(r31) + mtspr pmc5,r28 ; Restore PMC5 + mtspr pmc6,r29 ; Restore PMC6 + +perfMonRestore32_7400: + lwz r28,savemmcr2+4(r31) + mtspr mmcr2,r28 ; Restore MMCR2 + +perfMonRestore32_750: + lwz r28,savepmc+0(r31) + lwz r29,savepmc+4(r31) + mtspr pmc1,r28 ; Restore PMC1 + mtspr pmc2,r29 ; Restore PMC2 + lwz r28,savepmc+8(r31) + lwz r29,savepmc+12(r31) + mtspr pmc3,r28 ; Restore PMC3 + mtspr pmc4,r29 ; Restore PMC4 + lwz r28,savemmcr1+4(r31) + lwz r29,savemmcr0+4(r31) + mtspr mmcr1,r28 ; Restore MMCR1 + mtspr mmcr0,r29 ; Restore MMCR0 + +noPerfMonRestore32: + lwz r28,savecr(r31) ; Get CR to restore + lwz r29,savexer+4(r31) ; Get XER to restore + mtcr r28 ; Restore the CR + lwz r28,savelr+4(r31) ; Get LR to restore + mtxer r29 ; Restore the XER + lwz r29,savectr+4(r31) ; Get the CTR to restore + mtlr r28 ; Restore the LR + lwz r28,saver30+4(r31) ; Get R30 + mtctr r29 ; Restore the CTR + lwz r29,saver31+4(r31) ; Get R31 + mtsprg 2,r28 ; Save R30 for later + lwz r28,saver28+4(r31) ; Restore R28 + mtsprg 3,r29 ; Save R31 for later + lwz r29,saver29+4(r31) ; Restore R29 - cmplw cr3,r7,r16 ; Are we running the same segs as last time? + mfsprg r31,0 ; Get per_proc + mfsprg r30,2 ; Restore R30 + lwz r31,pfAvailable(r31) ; Get the feature flags + mtsprg 2,r31 ; Set the feature flags + mfsprg r31,3 ; Restore R31 - addis r13,r15,0x0020 ; Get SR2 value - bf 18,nlsr2 ; No alternate here... - lwz r13,PMAP_SEGS+(2*4)(r7) ; Get SR2 value - -nlsr2: mtsr sr2,r13 ; Load up the SR + rfi ; Click heels three times and think very hard that there is no place like home... - addis r13,r15,0x0030 ; Get SR3 value - bf 19,nlsr3 ; No alternate here... - lwz r13,PMAP_SEGS+(3*4)(r7) ; Get SR3 value - -nlsr3: mtsr sr3,r13 ; Load up the SR + .long 0 ; Leave this here + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 - addis r13,r15,0x00E0 ; Get SR14 value - bf 30,nlsr14 ; No alternate here... - lwz r13,PMAP_SEGS+(14*4)(r7) ; Get SR14 value - -nlsr14: mtsr sr14,r13 ; Load up the SR - beq+ cr3,segsdone ; All done if same pmap as last time... - - stw r7,PP_LASTPMAP(r29) ; Remember what we just loaded - - addis r13,r15,0x0040 ; Get SR4 value - bf 20,nlsr4 ; No alternate here... - lwz r13,PMAP_SEGS+(4*4)(r7) ; Get SR4 value - -nlsr4: mtsr sr4,r13 ; Load up the SR +; +; This starts the 64-bit version +; - addis r13,r15,0x0050 ; Get SR5 value - bf 21,nlsr5 ; No alternate here... - lwz r13,PMAP_SEGS+(5*4)(r7) ; Get SR5 value - -nlsr5: mtsr sr5,r13 ; Load up the SR + .align 7 - addis r13,r15,0x0060 ; Get SR6 value - bf 22,nlsr6 ; No alternate here... - lwz r13,PMAP_SEGS+(6*4)(r7) ; Get SR6 value - -nlsr6: mtsr sr6,r13 ; Load up the SR +eat64a: ld r30,quickfret(r29) ; Pick up the quick fret list, if any - addis r13,r15,0x0070 ; Get SR7 value - bf 23,nlsr7 ; No alternate here... - lwz r13,PMAP_SEGS+(7*4)(r7) ; Get SR7 value + mr. r3,r3 ; Should we hold off the quick release? + la r21,saver0(r31) ; Point to the first thing we restore + bne-- ernoqfre64 ; Hold off set, do not release just now... -nlsr7: mtsr sr7,r13 ; Load up the SR - - addis r13,r15,0x0080 ; Get SR8 value - bf 24,nlsr8 ; No alternate here... - lwz r13,PMAP_SEGS+(8*4)(r7) ; Get SR8 value +erchkfre64: mr. r3,r30 ; Any savearea to quickly release? + beq+ ernoqfre64 ; No quickfrets... + ld r30,SAVprev(r30) ; Chain back now -nlsr8: mtsr sr8,r13 ; Load up the SR + bl EXT(save_ret_phys) ; Put it on the free list - addis r13,r15,0x0090 ; Get SR9 value - bf 25,nlsr9 ; No alternate here... - lwz r13,PMAP_SEGS+(9*4)(r7) ; Get SR9 value - -nlsr9: mtsr sr9,r13 ; Load up the SR + std r30,quickfret(r29) ; Dequeue previous guy (really, it is ok to wait until after the release) + b erchkfre64 ; Try the next one... - addis r13,r15,0x00A0 ; Get SR10 value - bf 26,nlsr10 ; No alternate here... - lwz r13,PMAP_SEGS+(10*4)(r7) ; Get SR10 value + .align 7 -nlsr10: mtsr sr10,r13 ; Load up the SR - - addis r13,r15,0x00B0 ; Get SR11 value - bf 27,nlsr11 ; No alternate here... - lwz r13,PMAP_SEGS+(11*4)(r7) ; Get SR11 value +ernoqfre64: dcbt 0,r21 ; Touch in the first thing we need -nlsr11: mtsr sr11,r13 ; Load up the SR +; +; Here we release the savearea. +; +; Important!!!! The savearea is released before we are done with it. When the +; local free savearea list (anchored at lclfree) gets too long, save_ret_phys +; will trim the list, making the extra saveareas allocatable by another processor +; The code in there must ALWAYS leave our savearea on the local list, otherwise +; we could be very, very unhappy. The code there always queues the "just released" +; savearea to the head of the local list. Then, if it needs to trim, it will +; start with the SECOND savearea, leaving ours intact. +; +; - addis r13,r15,0x00C0 ; Get SR12 value - bf 28,nlsr12 ; No alternate here... - lwz r13,PMAP_SEGS+(12*4)(r7) ; Get SR12 value + li r3,lgKillResv ; Get spot to kill reservation + stdcx. r3,0,r3 ; Blow away any reservations we hold -nlsr12: mtsr sr12,r13 ; Load up the SR + mr r3,r31 ; Get the exiting savearea in parm register + bl EXT(save_ret_phys) ; Put it on the free list - addis r13,r15,0x00D0 ; Get SR13 value - bf 29,nlsr13 ; No alternate here... - lwz r13,PMAP_SEGS+(13*4)(r7) ; Get SR13 value - -nlsr13: mtsr sr13,r13 ; Load up the SR + lwz r3,savesrr1+4(r31) ; Pass in the MSR we will be going to + bl EXT(switchSegs) ; Go handle the segment registers/STB - addis r13,r15,0x00F0 ; Get SR15 value - bf 31,nlsr15 ; No alternate here... - lwz r13,PMAP_SEGS+(15*4)(r7) ; Get SR15 value - -nlsr15: mtsr sr15,r13 ; Load up the SR - -segsdone: stwcx. r26,r3,r31 ; Blow away any reservations we hold + lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags + ld r26,savesrr1(r31) ; Get destination MSR + cmplw cr3,r14,r14 ; Set that we do not need to stop streams + rlwinm r25,r26,27,22,22 ; Move PR bit to BE - li r21,emfp0 ; Point to the fp savearea - lwz r25,savesrr0(r31) ; Get the SRR0 to use - la r28,saver8(r31) ; Point to the next line to use - dcbt r21,r29 ; Start moving in a work area - lwz r0,saver0(r31) ; Restore R0 - dcbt 0,r28 ; Touch it in - lwz r1,saver1(r31) ; Restore R1 - lwz r2,saver2(r31) ; Restore R2 - la r28,saver16(r31) ; Point to the next line to get - lwz r3,saver3(r31) ; Restore R3 + rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on + li r21,emfp0 ; Point to a workarea + and r9,r9,r25 ; Clear BE if supervisor state + or r26,r26,r9 ; Flip on the BE bit for special trace if needed + + ld r25,savesrr0(r31) ; Get the SRR0 to use + la r28,saver16(r31) ; Point to the 128-byte line with r16-r31 + dcbz128 r21,r29 ; Clear a work area + ld r0,saver0(r31) ; Restore R0 + dcbt 0,r28 ; Touch in r16-r31 + ld r1,saver1(r31) ; Restore R1 + ld r2,saver2(r31) ; Restore R2 + ld r3,saver3(r31) ; Restore R3 mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7) - lwz r4,saver4(r31) ; Restore R4 + ld r4,saver4(r31) ; Restore R4 mtsrr0 r25 ; Restore the SRR0 now - lwz r5,saver5(r31) ; Restore R5 + ld r5,saver5(r31) ; Restore R5 mtsrr1 r26 ; Restore the SRR1 now - lwz r6,saver6(r31) ; Restore R6 - - dcbt 0,r28 ; Touch that next line on in - la r28,savevscr(r31) ; Point to the saved facility context - - lwz r7,saver7(r31) ; Restore R7 - lwz r8,saver8(r31) ; Restore R8 - lwz r9,saver9(r31) ; Restore R9 - mfmsr r26 ; Get the current MSR - dcbt 0,r28 ; Touch saved facility context - lwz r10,saver10(r31) ; Restore R10 - lwz r11,saver11(r31) ; Restore R11 - oris r26,r26,hi16(MASK(MSR_VEC)) ; Get the vector enable bit - lwz r12,saver12(r31) ; Restore R12 - ori r26,r26,lo16(MASK(MSR_FP)) ; Add in the float enable - lwz r13,saver13(r31) ; Restore R13 - la r28,saver24(r31) ; Point to the next line to do - -; -; Note that floating point and vector will be enabled from here on until the RFI -; - - mtmsr r26 ; Turn on vectors and floating point - isync - - dcbt 0,r28 ; Touch next line to do + ld r6,saver6(r31) ; Restore R6 + + ld r7,saver7(r31) ; Restore R7 + ld r8,saver8(r31) ; Restore R8 + ld r9,saver9(r31) ; Restore R9 + + la r28,savevscr(r31) ; Point to the status area + + ld r10,saver10(r31) ; Restore R10 + ld r11,saver11(r31) ; Restore R11 + ld r12,saver12(r31) ; Restore R12 + ld r13,saver13(r31) ; Restore R13 - lwz r14,saver14(r31) ; Restore R14 - lwz r15,saver15(r31) ; Restore R15 + ld r26,next_savearea(r29) ; Get the exception save area - bf pfAltivecb,noavec3 ; No Altivec on this CPU... + ld r14,saver14(r31) ; Restore R14 + ld r15,saver15(r31) ; Restore R15 + lwz r27,savevrsave(r31) ; Get the vrsave + + bf-- pfAltivecb,noavec2s ; Skip if no VMX... - la r28,savevscr(r31) ; Point to the status area stvxl v0,r21,r29 ; Save a vector register lvxl v0,0,r28 ; Get the vector status - lwz r27,savevrsave(r31) ; Get the vrsave mtvscr v0 ; Set the vector status lvxl v0,r21,r29 ; Restore work vector register - beq+ cr3,noavec2 ; SRs have not changed, no need to stop the streams... - dssall ; Kill all data streams - sync -noavec2: mtspr vrsave,r27 ; Set the vrsave - -noavec3: bf- pfFloatb,nofphere ; Skip if no floating point... +noavec2s: mtspr vrsave,r27 ; Set the vrsave + lwz r28,saveexception(r31) ; Get exception type stfd f0,emfp0(r29) ; Save FP0 lfd f0,savefpscrpad(r31) ; Get the fpscr mtfsf 0xFF,f0 ; Restore fpscr lfd f0,emfp0(r29) ; Restore the used register - -nofphere: lwz r16,saver16(r31) ; Restore R16 - lwz r17,saver17(r31) ; Restore R17 - lwz r18,saver18(r31) ; Restore R18 - lwz r19,saver19(r31) ; Restore R19 - lwz r20,saver20(r31) ; Restore R20 - lwz r21,saver21(r31) ; Restore R21 - lwz r22,saver22(r31) ; Restore R22 - - lwz r23,saver23(r31) ; Restore R23 - lwz r24,saver24(r31) ; Restore R24 - lwz r25,saver25(r31) ; Restore R25 - lwz r26,saver26(r31) ; Restore R26 - lwz r27,saver27(r31) ; Restore R27 - + ld r16,saver16(r31) ; Restore R16 + lwz r30,spcFlags(r29) ; Get the special flags from per_proc + ld r17,saver17(r31) ; Restore R17 + ld r18,saver18(r31) ; Restore R18 + cmplwi cr1,r28,T_RESET ; Are we returning from a reset? + ld r19,saver19(r31) ; Restore R19 + ld r20,saver20(r31) ; Restore R20 + li r27,0 ; Get a zero + ld r21,saver21(r31) ; Restore R21 + la r26,saver0(r26) ; Point to the first part of the next savearea + andis. r30,r30,hi16(perfMonitor) ; Is the performance monitor enabled? + ld r22,saver22(r31) ; Restore R22 + ld r23,saver23(r31) ; Restore R23 + bne++ cr1,er64rrst ; We are not returning from a reset... + stw r27,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Allow resets again + +er64rrst: ld r24,saver24(r31) ; Restore R24 + + dcbz128 0,r26 ; Clear and allocate next savearea we use in the off chance it is still in when we next interrupt + + ld r25,saver25(r31) ; Restore R25 + ld r26,saver26(r31) ; Restore R26 + ld r27,saver27(r31) ; Restore R27 + + beq++ noPerfMonRestore64 ; Nope... + + lwz r28,savepmc+0(r31) + lwz r29,savepmc+4(r31) + mtspr pmc1_gp,r28 ; Restore PMC1 + mtspr pmc2_gp,r29 ; Restore PMC2 + lwz r28,savepmc+8(r31) + lwz r29,savepmc+12(r31) + mtspr pmc3_gp,r28 ; Restore PMC3 + mtspr pmc4_gp,r29 ; Restore PMC4 + lwz r28,savepmc+16(r31) + lwz r29,savepmc+20(r31) + mtspr pmc5_gp,r28 ; Restore PMC5 + mtspr pmc6_gp,r29 ; Restore PMC6 + lwz r28,savepmc+24(r31) + lwz r29,savepmc+28(r31) + mtspr pmc7_gp,r28 ; Restore PMC7 + mtspr pmc8_gp,r29 ; Restore PMC8 + ld r28,savemmcr1(r31) + ld r29,savemmcr2(r31) + mtspr mmcr1_gp,r28 ; Restore MMCR1 + mtspr mmcra_gp,r29 ; Restore MMCRA + ld r28,savemmcr0(r31) + + mtspr mmcr0_gp,r28 ; Restore MMCR0 + +noPerfMonRestore64: + mfsprg r30,0 ; Get per_proc lwz r28,savecr(r31) ; Get CR to restore - - lwz r29,savexer(r31) ; Get XER to restore + ld r29,savexer(r31) ; Get XER to restore mtcr r28 ; Restore the CR - lwz r28,savelr(r31) ; Get LR to restore + ld r28,savelr(r31) ; Get LR to restore mtxer r29 ; Restore the XER - lwz r29,savectr(r31) ; Get the CTR to restore + ld r29,savectr(r31) ; Get the CTR to restore mtlr r28 ; Restore the LR - lwz r28,saver30(r31) ; Get R30 + ld r28,saver30(r31) ; Get R30 mtctr r29 ; Restore the CTR - lwz r29,saver31(r31) ; Get R31 - mtsprg 2,r28 ; Save R30 for later - lwz r28,saver28(r31) ; Restore R28 + ld r29,saver31(r31) ; Get R31 + mtspr hsprg0,r28 ; Save R30 for later + ld r28,saver28(r31) ; Restore R28 mtsprg 3,r29 ; Save R31 for later - lwz r29,saver29(r31) ; Restore R29 + ld r29,saver29(r31) ; Restore R29 - mfsprg r31,0 ; Get per_proc - mfsprg r30,2 ; Restore R30 - lwz r31,pfAvailable(r31) ; Get the feature flags + lwz r31,pfAvailable(r30) ; Get the feature flags + lwz r30,UAW(r30) ; Get the User Assist Word mtsprg 2,r31 ; Set the feature flags mfsprg r31,3 ; Restore R31 + mtsprg 3,r30 ; Set the UAW + mfspr r30,hsprg0 ; Restore R30 - rfi ; Click heels three times and think very hard that there is no place like home... - - .long 0 ; Leave this here - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - .long 0 - + rfid ; Click heels three times and think very hard that there is no place like home... @@ -1997,7 +2968,7 @@ nofphere: lwz r16,saver16(r31) ; Restore R16 * * * ENTRY : IR and/or DR and/or interruptions can be on - * R3 points to the physical address of a savearea + * R3 points to the virtual address of a savearea */ .align 5 @@ -2006,66 +2977,371 @@ nofphere: lwz r16,saver16(r31) ; Restore R16 LEXT(exception_exit) mfsprg r29,2 ; Get feature flags - mfmsr r30 ; Get the current MSR - mtcrf 0x04,r29 ; Set the features - rlwinm r30,r30,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off mr r31,r3 ; Get the savearea in the right register - rlwinm r30,r30,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - li r10,savesrr0 ; Point to one of the first things we touch in the savearea on exit - andi. r30,r30,0x7FCF ; Turn off externals, IR, and DR + mtcrf 0x04,r29 ; Set the features + li r0,1 ; Get this just in case + mtcrf 0x02,r29 ; Set the features + lis r30,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK + rlwinm r4,r3,0,0,19 ; Round down to savearea block base lis r1,hi16(SAVredrive) ; Get redrive request - + mfsprg r2,0 ; Get the per_proc block + ori r30,r30,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR + bt++ pf64Bitb,eeSixtyFour ; We are 64-bit... + + lwz r4,SACvrswap+4(r4) ; Get the virtual to real translation + bt pfNoMSRirb,eeNoMSR ; No MSR... mtmsr r30 ; Translation and all off isync ; Toss prefetch b eeNoMSRx + .align 5 + +eeSixtyFour: + ld r4,SACvrswap(r4) ; Get the virtual to real translation + rldimi r30,r0,63,MSR_SF_BIT ; Set SF bit (bit 0) + mtmsrd r30 ; Set 64-bit mode, turn off EE, DR, and IR + isync ; Toss prefetch + b eeNoMSRx + + .align 5 + eeNoMSR: li r0,loadMSR ; Get the MSR setter SC mr r3,r30 ; Get new MSR sc ; Set it -eeNoMSRx: dcbt r10,r31 ; Touch in the first stuff we restore - mfsprg r2,0 ; Get the per_proc block +eeNoMSRx: xor r31,r31,r4 ; Convert the savearea to physical addressing lwz r4,SAVflags(r31) ; Pick up the flags mr r13,r31 ; Put savearea here also +#if INSTRUMENT + mfspr r5,pmc1 ; INSTRUMENT - saveinstr[8] - stamp exception exit + stw r5,0x6100+(8*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r5,pmc2 ; INSTRUMENT - Get stamp + stw r5,0x6100+(8*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r5,pmc3 ; INSTRUMENT - Get stamp + stw r5,0x6100+(8*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r5,pmc4 ; INSTRUMENT - Get stamp + stw r5,0x6100+(8*16)+0xC(0) ; INSTRUMENT - Save it +#endif + + and. r0,r4,r1 ; Check if redrive requested - andc r4,r4,r1 ; Clear redrive dcbt br0,r2 ; We will need this in just a sec beq+ EatRupt ; No redrive, just exit... lwz r11,saveexception(r13) ; Restore exception code - stw r4,SAVflags(r13) ; Set the flags b Redrive ; Redrive the exception... + + + .align 12 ; Force page alignment -/* - * Start of the trace table - */ - - .align 12 /* Align to 4k boundary */ - - .globl EXT(traceTableBeg) -EXT(traceTableBeg): /* Start of trace table */ -/* .fill 2048,4,0 Make an 8k trace table for now */ - .fill 13760,4,0 /* Make an .trace table for now */ -/* .fill 240000,4,0 Make an .trace table for now */ - .globl EXT(traceTableEnd) -EXT(traceTableEnd): /* End of trace table */ - .globl EXT(ExceptionVectorsEnd) EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */ -#ifndef HACKALERTHACKALERT -/* - * This .long needs to be here because the linker gets confused and tries to - * include the final label in a section in the next section if there is nothing - * after it - */ - .long 0 /* (HACK/HACK/HACK) */ + + + + +; +; Here is where we keep the low memory globals +; + + . = 0x5000 + .globl EXT(lowGlo) + +EXT(lowGlo): + + .ascii "Hagfish " ; 5000 Unique eyecatcher + .long 0 ; 5008 Zero + .long 0 ; 500C Zero cont... + .long EXT(per_proc_info) ; 5010 pointer to per_procs + .long 0 + .long 0 ; 5018 reserved + .long 0 ; 501C reserved + .long 0 ; 5020 reserved + .long 0 ; 5024 reserved + .long 0 ; 5028 reserved + .long 0 ; 502C reserved + .long 0 ; 5030 reserved + .long 0 ; 5034 reserved + .long 0 ; 5038 reserved + .long 0 ; 503C reserved + .long 0 ; 5040 reserved + .long 0 ; 5044 reserved + .long 0 ; 5048 reserved + .long 0 ; 504C reserved + .long 0 ; 5050 reserved + .long 0 ; 5054 reserved + .long 0 ; 5058 reserved + .long 0 ; 505C reserved + .long 0 ; 5060 reserved + .long 0 ; 5064 reserved + .long 0 ; 5068 reserved + .long 0 ; 506C reserved + .long 0 ; 5070 reserved + .long 0 ; 5074 reserved + .long 0 ; 5078 reserved + .long 0 ; 507C reserved + + .globl EXT(trcWork) +EXT(trcWork): + .long 0 ; 5080 The next trace entry to use +#if DEBUG + .long 0xFFFFFFFF ; 5084 All enabled +#else + .long 0x00000000 ; 5084 All disabled on non-debug systems #endif + .long 0 ; 5088 Start of the trace table + .long 0 ; 508C End (wrap point) of the trace + .long 0 ; 5090 Saved mask while in debugger + .long 0 ; 5094 Size of trace table (1 - 256 pages) + .long 0 ; 5098 traceGas[0] + .long 0 ; 509C traceGas[1] + + .long 0 ; 50A0 reserved + .long 0 ; 50A4 reserved + .long 0 ; 50A8 reserved + .long 0 ; 50AC reserved + .long 0 ; 50B0 reserved + .long 0 ; 50B4 reserved + .long 0 ; 50B8 reserved + .long 0 ; 50BC reserved + .long 0 ; 50C0 reserved + .long 0 ; 50C4 reserved + .long 0 ; 50C8 reserved + .long 0 ; 50CC reserved + .long 0 ; 50D0 reserved + .long 0 ; 50D4 reserved + .long 0 ; 50D8 reserved + .long 0 ; 50DC reserved + .long 0 ; 50E0 reserved + .long 0 ; 50E4 reserved + .long 0 ; 50E8 reserved + .long 0 ; 50EC reserved + .long 0 ; 50F0 reserved + .long 0 ; 50F4 reserved + .long 0 ; 50F8 reserved + .long 0 ; 50FC reserved + + .globl EXT(saveanchor) + +EXT(saveanchor): ; 5100 saveanchor + .set .,.+SVsize + + .long 0 ; 5140 reserved + .long 0 ; 5144 reserved + .long 0 ; 5148 reserved + .long 0 ; 514C reserved + .long 0 ; 5150 reserved + .long 0 ; 5154 reserved + .long 0 ; 5158 reserved + .long 0 ; 515C reserved + .long 0 ; 5160 reserved + .long 0 ; 5164 reserved + .long 0 ; 5168 reserved + .long 0 ; 516C reserved + .long 0 ; 5170 reserved + .long 0 ; 5174 reserved + .long 0 ; 5178 reserved + .long 0 ; 517C reserved + + .long 0 ; 5180 tlbieLock + + .long 0 ; 5184 reserved + .long 0 ; 5188 reserved + .long 0 ; 518C reserved + .long 0 ; 5190 reserved + .long 0 ; 5194 reserved + .long 0 ; 5198 reserved + .long 0 ; 519C reserved + .long 0 ; 51A0 reserved + .long 0 ; 51A4 reserved + .long 0 ; 51A8 reserved + .long 0 ; 51AC reserved + .long 0 ; 51B0 reserved + .long 0 ; 51B4 reserved + .long 0 ; 51B8 reserved + .long 0 ; 51BC reserved + .long 0 ; 51C0 reserved + .long 0 ; 51C4 reserved + .long 0 ; 51C8 reserved + .long 0 ; 51CC reserved + .long 0 ; 51D0 reserved + .long 0 ; 51D4 reserved + .long 0 ; 51D8 reserved + .long 0 ; 51DC reserved + .long 0 ; 51E0 reserved + .long 0 ; 51E4 reserved + .long 0 ; 51E8 reserved + .long 0 ; 51EC reserved + .long 0 ; 51F0 reserved + .long 0 ; 51F4 reserved + .long 0 ; 51F8 reserved + .long 0 ; 51FC reserved + + .globl EXT(dgWork) + +EXT(dgWork): + + .long 0 ; 5200 dgLock + .long 0 ; 5204 dgFlags + .long 0 ; 5208 dgMisc0 + .long 0 ; 520C dgMisc1 + .long 0 ; 5210 dgMisc2 + .long 0 ; 5214 dgMisc3 + .long 0 ; 5218 dgMisc4 + .long 0 ; 521C dgMisc5 + + .long 0 ; 5220 reserved + .long 0 ; 5224 reserved + .long 0 ; 5228 reserved + .long 0 ; 522C reserved + .long 0 ; 5230 reserved + .long 0 ; 5234 reserved + .long 0 ; 5238 reserved + .long 0 ; 523C reserved + .long 0 ; 5240 reserved + .long 0 ; 5244 reserved + .long 0 ; 5248 reserved + .long 0 ; 524C reserved + .long 0 ; 5250 reserved + .long 0 ; 5254 reserved + .long 0 ; 5258 reserved + .long 0 ; 525C reserved + .long 0 ; 5260 reserved + .long 0 ; 5264 reserved + .long 0 ; 5268 reserved + .long 0 ; 526C reserved + .long 0 ; 5270 reserved + .long 0 ; 5274 reserved + .long 0 ; 5278 reserved + .long 0 ; 527C reserved + + .long 0 ; 5280 reserved + .long 0 ; 5284 reserved + .long 0 ; 5288 reserved + .long 0 ; 528C reserved + .long 0 ; 5290 reserved + .long 0 ; 5294 reserved + .long 0 ; 5298 reserved + .long 0 ; 529C reserved + .long 0 ; 52A0 reserved + .long 0 ; 52A4 reserved + .long 0 ; 52A8 reserved + .long 0 ; 52AC reserved + .long 0 ; 52B0 reserved + .long 0 ; 52B4 reserved + .long 0 ; 52B8 reserved + .long 0 ; 52BC reserved + .long 0 ; 52C0 reserved + .long 0 ; 52C4 reserved + .long 0 ; 52C8 reserved + .long 0 ; 52CC reserved + .long 0 ; 52D0 reserved + .long 0 ; 52D4 reserved + .long 0 ; 52D8 reserved + .long 0 ; 52DC reserved + .long 0 ; 52E0 reserved + .long 0 ; 52E4 reserved + .long 0 ; 52E8 reserved + .long 0 ; 52EC reserved + .long 0 ; 52F0 reserved + .long 0 ; 52F4 reserved + .long 0 ; 52F8 reserved + .long 0 ; 52FC reserved + + .globl EXT(killresv) +EXT(killresv): + + .long 0 ; 5300 Used to kill reservations + .long 0 ; 5304 Used to kill reservations + .long 0 ; 5308 Used to kill reservations + .long 0 ; 530C Used to kill reservations + .long 0 ; 5310 Used to kill reservations + .long 0 ; 5314 Used to kill reservations + .long 0 ; 5318 Used to kill reservations + .long 0 ; 531C Used to kill reservations + .long 0 ; 5320 Used to kill reservations + .long 0 ; 5324 Used to kill reservations + .long 0 ; 5328 Used to kill reservations + .long 0 ; 532C Used to kill reservations + .long 0 ; 5330 Used to kill reservations + .long 0 ; 5334 Used to kill reservations + .long 0 ; 5338 Used to kill reservations + .long 0 ; 533C Used to kill reservations + .long 0 ; 5340 Used to kill reservations + .long 0 ; 5344 Used to kill reservations + .long 0 ; 5348 Used to kill reservations + .long 0 ; 534C Used to kill reservations + .long 0 ; 5350 Used to kill reservations + .long 0 ; 5354 Used to kill reservations + .long 0 ; 5358 Used to kill reservations + .long 0 ; 535C Used to kill reservations + .long 0 ; 5360 Used to kill reservations + .long 0 ; 5364 Used to kill reservations + .long 0 ; 5368 Used to kill reservations + .long 0 ; 536C Used to kill reservations + .long 0 ; 5370 Used to kill reservations + .long 0 ; 5374 Used to kill reservations + .long 0 ; 5378 Used to kill reservations + .long 0 ; 537C Used to kill reservations + + .long 0 ; 5380 reserved + .long 0 ; 5384 reserved + .long 0 ; 5388 reserved + .long 0 ; 538C reserved + .long 0 ; 5390 reserved + .long 0 ; 5394 reserved + .long 0 ; 5398 reserved + .long 0 ; 539C reserved + .long 0 ; 53A0 reserved + .long 0 ; 53A4 reserved + .long 0 ; 53A8 reserved + .long 0 ; 53AC reserved + .long 0 ; 53B0 reserved + .long 0 ; 53B4 reserved + .long 0 ; 53B8 reserved + .long 0 ; 53BC reserved + .long 0 ; 53C0 reserved + .long 0 ; 53C4 reserved + .long 0 ; 53C8 reserved + .long 0 ; 53CC reserved + .long 0 ; 53D0 reserved + .long 0 ; 53D4 reserved + .long 0 ; 53D8 reserved + .long 0 ; 53DC reserved + .long 0 ; 53E0 reserved + .long 0 ; 53E4 reserved + .long 0 ; 53E8 reserved + .long 0 ; 53EC reserved + .long 0 ; 53F0 reserved + .long 0 ; 53F4 reserved + .long 0 ; 53F8 reserved + .long 0 ; 53FC reserved + + +; +; The "shared page" is used for low-level debugging +; + + . = 0x6000 + .globl EXT(sharedPage) + +EXT(sharedPage): ; Per processor data area + .long 0xC24BC195 ; Comm Area validity value + .long 0x87859393 ; Comm Area validity value + .long 0xE681A2C8 ; Comm Area validity value + .long 0x8599855A ; Comm Area validity value + .long 0xD74BD296 ; Comm Area validity value + .long 0x8388E681 ; Comm Area validity value + .long 0xA2C88599 ; Comm Area validity value + .short 0x855A ; Comm Area validity value + .short 1 ; Comm Area version number + .fill 1016*4,1,0 ; (filled with 0s) .data .align ALIGN @@ -2074,3 +3350,4 @@ EXT(exception_end): .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */ + diff --git a/osfmk/ppc/machine_routines.c b/osfmk/ppc/machine_routines.c index 86b62c3f6..51af4a3d3 100644 --- a/osfmk/ppc/machine_routines.c +++ b/osfmk/ppc/machine_routines.c @@ -33,6 +33,7 @@ #include unsigned int max_cpus_initialized = 0; +extern int forcenap; #define MAX_CPUS_SET 0x1 #define MAX_CPUS_WAIT 0x2 @@ -61,7 +62,7 @@ ml_static_malloc( return((vm_offset_t)NULL); else { vaddr = static_memory_end; - static_memory_end = round_page(vaddr+size); + static_memory_end = round_page_32(vaddr+size); return(vaddr); } } @@ -88,14 +89,14 @@ ml_static_mfree( { vm_offset_t paddr_cur, vaddr_cur; - for (vaddr_cur = round_page(vaddr); - vaddr_cur < trunc_page(vaddr+size); + for (vaddr_cur = round_page_32(vaddr); + vaddr_cur < trunc_page_32(vaddr+size); vaddr_cur += PAGE_SIZE) { paddr_cur = pmap_extract(kernel_pmap, vaddr_cur); if (paddr_cur != (vm_offset_t)NULL) { vm_page_wire_count--; - pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE); - vm_page_create(paddr_cur,paddr_cur+PAGE_SIZE); + pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE)); + vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12); } } } @@ -146,40 +147,8 @@ void ml_init_interrupt(void) (void) ml_set_interrupts_enabled(current_state); } -boolean_t fake_get_interrupts_enabled(void) -{ - /* - * The scheduler is not active on this cpu. There is no need to disable - * preemption. The current thread wont be dispatched on anhother cpu. - */ - return((per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0); -} - -boolean_t fake_set_interrupts_enabled(boolean_t enable) -{ - boolean_t interrupt_state_prev; - - /* - * The scheduler is not active on this cpu. There is no need to disable - * preemption. The current thread wont be dispatched on anhother cpu. - */ - interrupt_state_prev = - (per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0; - if (interrupt_state_prev != enable) - per_proc_info[cpu_number()].cpu_flags ^= turnEEon; - return(interrupt_state_prev); -} - /* Get Interrupts Enabled */ boolean_t ml_get_interrupts_enabled(void) -{ - if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) - return(get_interrupts_enabled()); - else - return(fake_get_interrupts_enabled()); -} - -boolean_t get_interrupts_enabled(void) { return((mfmsr() & MASK(MSR_EE)) != 0); } @@ -207,6 +176,8 @@ void ml_thread_policy( unsigned policy_id, unsigned policy_info) { + extern int srv; + if ((policy_id == MACHINE_GROUP) && ((per_proc_info[0].pf.Available) & pfSMPcap)) thread_bind(thread, master_processor); @@ -216,7 +187,8 @@ void ml_thread_policy( thread_lock(thread); - thread->sched_mode |= TH_MODE_FORCEDPREEMPT; + if (srv == 0) + thread->sched_mode |= TH_MODE_FORCEDPREEMPT; set_priority(thread, thread->priority + 1); thread_unlock(thread); @@ -251,7 +223,8 @@ void machine_signal_idle( processor_t processor) { - (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0); + if (per_proc_info[processor->slot_num].pf.Available & (pfCanDoze|pfWillNap)) + (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0); } kern_return_t @@ -261,21 +234,25 @@ ml_processor_register( ipi_handler_t *ipi_handler) { kern_return_t ret; - int target_cpu; + int target_cpu, cpu; + int donap; if (processor_info->boot_cpu == FALSE) { if (cpu_register(&target_cpu) != KERN_SUCCESS) return KERN_FAILURE; } else { /* boot_cpu is always 0 */ - target_cpu= 0; + target_cpu = 0; } per_proc_info[target_cpu].cpu_id = processor_info->cpu_id; per_proc_info[target_cpu].start_paddr = processor_info->start_paddr; + donap = processor_info->supports_nap; /* Assume we use requested nap */ + if(forcenap) donap = forcenap - 1; /* If there was an override, use that */ + if(per_proc_info[target_cpu].pf.Available & pfCanNap) - if(processor_info->supports_nap) + if(donap) per_proc_info[target_cpu].pf.Available |= pfWillNap; if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL) @@ -297,6 +274,8 @@ ml_enable_nap(int target_cpu, boolean_t nap_enabled) { boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap); + if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */ + if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */ if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */ else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */ @@ -304,7 +283,7 @@ ml_enable_nap(int target_cpu, boolean_t nap_enabled) if(target_cpu == cpu_number()) __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */ - + return (prev_value); } @@ -339,12 +318,6 @@ ml_get_max_cpus(void) return(machine_info.max_cpus); } -int -ml_get_current_cpus(void) -{ - return machine_info.avail_cpus; -} - void ml_cpu_get_info(ml_cpu_info_t *cpu_info) { diff --git a/osfmk/ppc/machine_routines.h b/osfmk/ppc/machine_routines.h index 9133cef9f..f767dcf4e 100644 --- a/osfmk/ppc/machine_routines.h +++ b/osfmk/ppc/machine_routines.h @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -92,25 +93,68 @@ vm_offset_t ml_vtophys( boolean_t ml_probe_read( vm_offset_t paddr, unsigned int *val); +boolean_t ml_probe_read_64( + addr64_t paddr, + unsigned int *val); /* Read physical address byte */ unsigned int ml_phys_read_byte( vm_offset_t paddr); +unsigned int ml_phys_read_byte_64( + addr64_t paddr); + +/* Read physical address half word */ +unsigned int ml_phys_read_half( + vm_offset_t paddr); +unsigned int ml_phys_read_half_64( + addr64_t paddr); -/* Read physical address */ +/* Read physical address word*/ unsigned int ml_phys_read( vm_offset_t paddr); +unsigned int ml_phys_read_64( + addr64_t paddr); +unsigned int ml_phys_read_word( + vm_offset_t paddr); +unsigned int ml_phys_read_word_64( + addr64_t paddr); + +/* Read physical address double word */ +unsigned long long ml_phys_read_double( + vm_offset_t paddr); +unsigned long long ml_phys_read_double_64( + addr64_t paddr); /* Write physical address byte */ void ml_phys_write_byte( vm_offset_t paddr, unsigned int data); +void ml_phys_write_byte_64( + addr64_t paddr, unsigned int data); -/* Write physical address */ +/* Write physical address half word */ +void ml_phys_write_half( + vm_offset_t paddr, unsigned int data); +void ml_phys_write_half_64( + addr64_t paddr, unsigned int data); + +/* Write physical address word */ void ml_phys_write( vm_offset_t paddr, unsigned int data); +void ml_phys_write_64( + addr64_t paddr, unsigned int data); +void ml_phys_write_word( + vm_offset_t paddr, unsigned int data); +void ml_phys_write_word_64( + addr64_t paddr, unsigned int data); + +/* Write physical address double word */ +void ml_phys_write_double( + vm_offset_t paddr, unsigned long long data); +void ml_phys_write_double_64( + addr64_t paddr, unsigned long long data); /* Struct for ml_processor_register */ -struct ml_processor_info_t { +struct ml_processor_info { cpu_id_t cpu_id; boolean_t boot_cpu; vm_offset_t start_paddr; @@ -119,7 +163,7 @@ struct ml_processor_info_t { time_base_enable_t time_base_enable; }; -typedef struct ml_processor_info_t ml_processor_info_t; +typedef struct ml_processor_info ml_processor_info_t; /* Register a processor */ kern_return_t ml_processor_register( @@ -127,6 +171,11 @@ kern_return_t ml_processor_register( processor_t *processor, ipi_handler_t *ipi_handler); +/* Zero bytes starting at a physical address */ +void bzero_phys( + addr64_t phys_address, + uint32_t length); + #endif /* __APPLE_API_UNSTABLE */ #ifdef __APPLE_API_PRIVATE @@ -152,9 +201,6 @@ boolean_t fake_get_interrupts_enabled(void); boolean_t fake_set_interrupts_enabled( boolean_t enable); -/* check pending timers */ -void machine_clock_assist(void); - void machine_idle(void); void machine_signal_idle( diff --git a/osfmk/ppc/machine_routines_asm.s b/osfmk/ppc/machine_routines_asm.s index e81d931cd..32aeabef5 100644 --- a/osfmk/ppc/machine_routines_asm.s +++ b/osfmk/ppc/machine_routines_asm.s @@ -30,6 +30,106 @@ #include #include + +/* + * ml_set_physical() -- turn off DR and (if 64-bit) turn SF on + * it is assumed that pf64Bit is already in cr6 + * ml_set_physical_get_ffs() -- turn DR off, SF on, and get feature flags + * ml_set_physical_disabled() -- turn DR and EE off, SF on, get feature flags + * ml_set_translation_off() -- turn DR, IR, and EE off, SF on, get feature flags + * + * Callable only from assembler, these return: + * r2 -- new MSR + * r11 -- old MSR + * r10 -- feature flags (pf64Bit etc, ie SPRG 2) + * cr6 -- feature flags 24-27, ie pf64Bit, pf128Byte, and pf32Byte + * + * Uses r0 and r2. ml_set_translation_off also uses r3 and cr5. + */ + + .align 4 + .globl EXT(ml_set_translation_off) +LEXT(ml_set_translation_off) + mfsprg r10,2 // get feature flags + li r0,0 ; Clear this + mtcrf 0x02,r10 // move pf64Bit etc to cr6 + ori r0,r0,lo16(MASK(MSR_EE)+MASK(MSR_FP)+MASK(MSR_IR)+MASK(MSR_DR)) // turn off all 4 + mfmsr r11 // get MSR + oris r0,r0,hi16(MASK(MSR_VEC)) // Turn off vector too + mtcrf 0x04,r10 // move pfNoMSRir etc to cr5 + andc r2,r11,r0 // turn off EE, IR, and DR + bt++ pf64Bitb,ml_set_physical_64 // skip if 64-bit (only they take the hint) + bf pfNoMSRirb,ml_set_physical_32 // skip if we can load MSR directly + li r0,loadMSR // Get the MSR setter SC + mr r3,r2 // copy new MSR to r2 + sc // Set it + blr + + .align 4 + .globl EXT(ml_set_physical_disabled) + +LEXT(ml_set_physical_disabled) + li r0,0 ; Clear + mfsprg r10,2 // get feature flags + ori r0,r0,lo16(MASK(MSR_EE)) // turn EE and fp off + mtcrf 0x02,r10 // move pf64Bit etc to cr6 + b ml_set_physical_join + + .align 5 + .globl EXT(ml_set_physical_get_ffs) + +LEXT(ml_set_physical_get_ffs) + mfsprg r10,2 // get feature flags + mtcrf 0x02,r10 // move pf64Bit etc to cr6 + + .globl EXT(ml_set_physical) +LEXT(ml_set_physical) + + li r0,0 // do not turn off interrupts + +ml_set_physical_join: + oris r0,r0,hi16(MASK(MSR_VEC)) // Always gonna turn of vectors + mfmsr r11 // get MSR + ori r0,r0,lo16(MASK(MSR_DR)+MASK(MSR_FP)) // always turn off DR and FP bit + andc r2,r11,r0 // turn off DR and maybe EE + bt++ pf64Bitb,ml_set_physical_64 // skip if 64-bit (only they take the hint) +ml_set_physical_32: + mtmsr r2 // turn off translation + isync + blr + +ml_set_physical_64: + li r0,1 // get a 1 to slam into SF + rldimi r2,r0,63,MSR_SF_BIT // set SF bit (bit 0) + mtmsrd r2 // set 64-bit mode, turn off data relocation + isync // synchronize + blr + + +/* + * ml_restore(old_MSR) + * + * Callable only from assembler, restores the MSR in r11 saved by ml_set_physical. + * We assume cr6 and r11 are as set by ml_set_physical, ie: + * cr6 - pf64Bit flag (feature flags 24-27) + * r11 - old MSR + */ + + .align 5 + .globl EXT(ml_restore) + +LEXT(ml_restore) + bt++ pf64Bitb,ml_restore_64 // handle 64-bit cpus (only they take the hint) + mtmsr r11 // restore a 32-bit MSR + isync + blr + +ml_restore_64: + mtmsrd r11 // restore a 64-bit MSR + isync + blr + + /* PCI config cycle probing * * boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val) @@ -47,16 +147,23 @@ LEXT(ml_probe_read) mfsprg r9,2 ; Get feature flags + + rlwinm. r0,r9,0,pf64Bitb,pf64Bitb ; Are we on a 64-bit machine? + rlwinm r3,r3,0,0,31 ; Clean up for 64-bit machines + bne++ mpr64bit ; Go do this the 64-bit way... + +mpr32bit: lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag mfmsr r0 ; Save the current MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + ori r8,r8,lo16(MASK(MSR_FP)) ; Add the FP flag + neg r10,r3 ; Number of bytes to end of page - rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions + andc r0,r0,r8 ; Clear VEC and FP rlwinm. r10,r10,0,20,31 ; Clear excess junk and test for page bndry + ori r8,r8,lo16(MASK(MSR_EE)|MASK(MSR_IR)|MASK(MSR_DR)) ; Drop EE, IR, and DR mr r12,r3 ; Save the load address + andc r2,r0,r8 ; Clear VEC, FP, and EE mtcrf 0x04,r9 ; Set the features cmplwi cr1,r10,4 ; At least 4 bytes left in page? - rlwinm r2,r2,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Clear translation beq- mprdoit ; We are right on the boundary... li r3,0 bltlr- cr1 ; No, just return failure... @@ -132,9 +239,6 @@ mprNoMuM: mtmsr r2 ; Turn translation back off isync - mtspr hid0, r6 ; Restore HID0 - isync - lis r10,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address ori r10,r10,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address @@ -164,121 +268,421 @@ mprNoMuM: .globl EXT(ml_probe_read_mck) LEXT(ml_probe_read_mck) -/* Read physical address + +/* PCI config cycle probing - 64-bit + * + * boolean_t ml_probe_read_64(addr64_t paddr, unsigned int *val) + * + * Read the memory location at physical address paddr. + * This is a part of a device probe, so there is a good chance we will + * have a machine check here. So we have to be able to handle that. + * We assume that machine checks are enabled both in MSR and HIDs + */ + +; Force a line boundry here + .align 6 + .globl EXT(ml_probe_read_64) + +LEXT(ml_probe_read_64) + + mfsprg r9,2 ; Get feature flags + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwinm. r0,r9,0,pf64Bitb,pf64Bitb ; Are we on a 64-bit machine? + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + + mr r4,r5 ; Move result to common register + beq-- mpr32bit ; Go do this the 32-bit way... + +mpr64bit: andi. r0,r3,3 ; Check if we are on a word boundary + li r0,0 ; Clear the EE bit (and everything else for that matter) + bne-- mprFail ; Boundary not good... + mfmsr r11 ; Get the MSR + mtmsrd r0,1 ; Set the EE bit only (do not care about RI) + rlwinm r11,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Isolate just the EE bit + mfmsr r10 ; Refresh our view of the MSR (VMX/FP may have changed) + or r12,r10,r11 ; Turn on EE if on before we turned it off + ori r0,r0,lo16(MASK(MSR_IR)|MASK(MSR_DR)) ; Get the IR and DR bits + li r2,1 ; Get a 1 + sldi r2,r2,63 ; Get the 64-bit bit + andc r10,r10,r0 ; Clear IR and DR + or r10,r10,r2 ; Set 64-bit + + li r0,1 ; Get a 1 + mtmsrd r10 ; Translation and EE off, 64-bit on + isync + + sldi r0,r0,32+8 ; Get the right bit to inhibit caching + + mfspr r8,hid4 ; Get HID4 + or r2,r8,r0 ; Set bit to make real accesses cache-inhibited + sync ; Sync up + mtspr hid4,r2 ; Make real accesses cache-inhibited + isync ; Toss prefetches + + lis r7,0xE000 ; Get the unlikeliest ESID possible + srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000 + slbie r7 ; Make sure the ERAT is cleared + + sync + isync + + eieio ; Make sure of all previous accesses + + lwz r11,0(r3) ; Get it and maybe machine check here + + eieio ; Make sure of ordering again + sync ; Get caught up yet again + isync ; Do not go further till we are here + + sync ; Sync up + mtspr hid4,r8 ; Make real accesses not cache-inhibited + isync ; Toss prefetches + + lis r7,0xE000 ; Get the unlikeliest ESID possible + srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000 + slbie r7 ; Make sure the ERAT is cleared + + mtmsrd r12 ; Restore entry MSR + isync + + stw r11,0(r4) ; Pass back the result + li r3,1 ; Indicate success + blr ; Leave... + +mprFail: li r3,0 ; Set failure + blr ; Leave... + +; Force a line boundry here. This means we will be able to check addresses better + .align 6 + .globl EXT(ml_probe_read_mck_64) +LEXT(ml_probe_read_mck_64) + + +/* Read physical address byte * * unsigned int ml_phys_read_byte(vm_offset_t paddr) + * unsigned int ml_phys_read_byte_64(addr64_t paddr) * * Read the byte at physical address paddr. Memory should not be cache inhibited. */ ; Force a line boundry here + .align 5 + .globl EXT(ml_phys_read_byte_64) + +LEXT(ml_phys_read_byte_64) + + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + b ml_phys_read_byte_join + .globl EXT(ml_phys_read_byte) LEXT(ml_phys_read_byte) + rlwinm r3,r3,0,0,31 ; truncate address to 32-bits +ml_phys_read_byte_join: ; r3 = address to read (reg64_t) + mflr r11 ; Save the return + bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc. + + lbz r3,0(r3) ; Get the byte + b rdwrpost ; Clean up and leave... - mfmsr r10 ; Save the current MSR - rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r4,r10,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions - rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation - mtmsr r4 ; Translation and all off - isync ; Toss prefetch +/* Read physical address half word + * + * unsigned int ml_phys_read_half(vm_offset_t paddr) + * unsigned int ml_phys_read_half_64(addr64_t paddr) + * + * Read the half word at physical address paddr. Memory should not be cache inhibited. + */ - lbz r3,0(r3) ; Get the byte - sync +; Force a line boundry here - mtmsr r10 ; Restore translation and rupts - isync - blr + .align 5 + .globl EXT(ml_phys_read_half_64) + +LEXT(ml_phys_read_half_64) + + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + b ml_phys_read_half_join -/* Read physical address + .globl EXT(ml_phys_read_half) + +LEXT(ml_phys_read_half) + rlwinm r3,r3,0,0,31 ; truncate address to 32-bits +ml_phys_read_half_join: ; r3 = address to read (reg64_t) + mflr r11 ; Save the return + bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc. + + lhz r3,0(r3) ; Get the half word + b rdwrpost ; Clean up and leave... + + +/* Read physical address word * * unsigned int ml_phys_read(vm_offset_t paddr) + * unsigned int ml_phys_read_64(addr64_t paddr) + * unsigned int ml_phys_read_word(vm_offset_t paddr) + * unsigned int ml_phys_read_word_64(addr64_t paddr) * * Read the word at physical address paddr. Memory should not be cache inhibited. */ ; Force a line boundry here + .align 5 + .globl EXT(ml_phys_read_64) + .globl EXT(ml_phys_read_word_64) + +LEXT(ml_phys_read_64) +LEXT(ml_phys_read_word_64) + + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + b ml_phys_read_word_join + .globl EXT(ml_phys_read) + .globl EXT(ml_phys_read_word) LEXT(ml_phys_read) +LEXT(ml_phys_read_word) + rlwinm r3,r3,0,0,31 ; truncate address to 32-bits +ml_phys_read_word_join: ; r3 = address to read (reg64_t) + mflr r11 ; Save the return + bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc. + + lwz r3,0(r3) ; Get the word + b rdwrpost ; Clean up and leave... - mfmsr r0 ; Save the current MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions - rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation - mtmsr r4 ; Translation and all off - isync ; Toss prefetch +/* Read physical address double word + * + * unsigned long long ml_phys_read_double(vm_offset_t paddr) + * unsigned long long ml_phys_read_double_64(addr64_t paddr) + * + * Read the double word at physical address paddr. Memory should not be cache inhibited. + */ + +; Force a line boundry here + + .align 5 + .globl EXT(ml_phys_read_double_64) + +LEXT(ml_phys_read_double_64) + + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + b ml_phys_read_double_join + + .globl EXT(ml_phys_read_double) + +LEXT(ml_phys_read_double) + rlwinm r3,r3,0,0,31 ; truncate address to 32-bits +ml_phys_read_double_join: ; r3 = address to read (reg64_t) + mflr r11 ; Save the return + bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc. - lwz r3,0(r3) ; Get the word - sync + lwz r4,4(r3) ; Get the low word + lwz r3,0(r3) ; Get the high word + b rdwrpost ; Clean up and leave... - mtmsr r0 ; Restore translation and rupts - isync - blr /* Write physical address byte * * void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) + * void ml_phys_write_byte_64(addr64_t paddr, unsigned int data) * * Write the byte at physical address paddr. Memory should not be cache inhibited. */ -; Force a line boundry here .align 5 + .globl EXT(ml_phys_write_byte_64) + +LEXT(ml_phys_write_byte_64) + + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + mr r4,r5 ; Copy over the data + b ml_phys_write_byte_join + .globl EXT(ml_phys_write_byte) LEXT(ml_phys_write_byte) + rlwinm r3,r3,0,0,31 ; truncate address to 32-bits +ml_phys_write_byte_join: ; r3 = address to write (reg64_t), r4 = data + mflr r11 ; Save the return + bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc. + + stb r4,0(r3) ; Set the byte + b rdwrpost ; Clean up and leave... - mfmsr r0 ; Save the current MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions - rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation - mtmsr r5 ; Translation and all off - isync ; Toss prefetch +/* Write physical address half word + * + * void ml_phys_write_half(vm_offset_t paddr, unsigned int data) + * void ml_phys_write_half_64(addr64_t paddr, unsigned int data) + * + * Write the half word at physical address paddr. Memory should not be cache inhibited. + */ + + .align 5 + .globl EXT(ml_phys_write_half_64) + +LEXT(ml_phys_write_half_64) + + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + mr r4,r5 ; Copy over the data + b ml_phys_write_half_join + + .globl EXT(ml_phys_write_half) + +LEXT(ml_phys_write_half) + rlwinm r3,r3,0,0,31 ; truncate address to 32-bits +ml_phys_write_half_join: ; r3 = address to write (reg64_t), r4 = data + mflr r11 ; Save the return + bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc. - stb r4,0(r3) ; Set the byte - sync + sth r4,0(r3) ; Set the half word + b rdwrpost ; Clean up and leave... - mtmsr r0 ; Restore translation and rupts - isync - blr -/* Write physical address +/* Write physical address word * * void ml_phys_write(vm_offset_t paddr, unsigned int data) + * void ml_phys_write_64(addr64_t paddr, unsigned int data) + * void ml_phys_write_word(vm_offset_t paddr, unsigned int data) + * void ml_phys_write_word_64(addr64_t paddr, unsigned int data) * * Write the word at physical address paddr. Memory should not be cache inhibited. */ -; Force a line boundry here .align 5 + .globl EXT(ml_phys_write_64) + .globl EXT(ml_phys_write_word_64) + +LEXT(ml_phys_write_64) +LEXT(ml_phys_write_word_64) + + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + mr r4,r5 ; Copy over the data + b ml_phys_write_word_join + .globl EXT(ml_phys_write) + .globl EXT(ml_phys_write_word) LEXT(ml_phys_write) +LEXT(ml_phys_write_word) + rlwinm r3,r3,0,0,31 ; truncate address to 32-bits +ml_phys_write_word_join: ; r3 = address to write (reg64_t), r4 = data + mflr r11 ; Save the return + bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc. + + stw r4,0(r3) ; Set the word + b rdwrpost ; Clean up and leave... - mfmsr r0 ; Save the current MSR - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions - rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation - mtmsr r5 ; Translation and all off - isync ; Toss prefetch +/* Write physical address double word + * + * void ml_phys_write_double(vm_offset_t paddr, unsigned long long data) + * void ml_phys_write_double_64(addr64_t paddr, unsigned long long data) + * + * Write the double word at physical address paddr. Memory should not be cache inhibited. + */ + + .align 5 + .globl EXT(ml_phys_write_double_64) + +LEXT(ml_phys_write_double_64) + + rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32 + rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits + mr r4,r5 ; Copy over the high data + mr r5,r6 ; Copy over the low data + b ml_phys_write_double_join + + .globl EXT(ml_phys_write_double) + +LEXT(ml_phys_write_double) + rlwinm r3,r3,0,0,31 ; truncate address to 32-bits +ml_phys_write_double_join: ; r3 = address to write (reg64_t), r4,r5 = data (long long) + mflr r11 ; Save the return + bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc. + + stw r4,0(r3) ; Set the high word + stw r5,4(r3) ; Set the low word + b rdwrpost ; Clean up and leave... + + + .align 5 + +rdwrpre: mfsprg r12,2 ; Get feature flags + lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag + mfmsr r10 ; Save the MSR + ori r8,r8,lo16(MASK(MSR_FP)) ; Add the FP flag + mtcrf 0x02,r12 ; move pf64Bit + andc r10,r10,r8 ; Clear VEC and FP + ori r9,r8,lo16(MASK(MSR_EE)|MASK(MSR_IR)|MASK(MSR_DR)) ; Drop EE, DR, and IR + li r2,1 ; Prepare for 64 bit + andc r9,r10,r9 ; Clear VEC, FP, DR, and EE + bf-- pf64Bitb,rdwrpre32 ; Join 32-bit code... + + srdi r7,r3,31 ; Get a 1 if address is in I/O memory + rldimi r9,r2,63,MSR_SF_BIT ; set SF bit (bit 0) + cmpldi cr7,r7,1 ; Is source in I/O memory? + mtmsrd r9 ; set 64-bit mode, turn off EE, DR, and IR + isync ; synchronize + + sldi r0,r2,32+8 ; Get the right bit to turn off caching + + bnelr++ cr7 ; We are not in the I/O area, all ready... + + mfspr r8,hid4 ; Get HID4 + or r2,r8,r0 ; Set bit to make real accesses cache-inhibited + sync ; Sync up + mtspr hid4,r2 ; Make real accesses cache-inhibited + isync ; Toss prefetches + + lis r7,0xE000 ; Get the unlikeliest ESID possible + srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000 + slbie r7 ; Make sure the ERAT is cleared - stw r4,0(r3) ; Set the word sync + isync + blr ; Finally, all ready... + + .align 5 + +rdwrpre32: rlwimi r9,r10,0,MSR_IR_BIT,MSR_IR_BIT ; Leave the IR bit unchanged + mtmsr r9 ; Drop EE, DR, and leave IR unchanged + isync + blr ; All set up, leave... + + .align 5 + +rdwrpost: mtlr r11 ; Restore the return + bt++ pf64Bitb,rdwrpost64 ; Join 64-bit code... + + mtmsr r10 ; Restore entry MSR (sans FP and VEC) + isync + blr ; Leave... + +rdwrpost64: bne++ cr7,rdwrpcok ; Skip enabling real mode caching if we did not change it... - mtmsr r0 ; Restore translation and rupts + sync ; Sync up + mtspr hid4,r8 ; Make real accesses not cache-inhibited + isync ; Toss prefetches + + lis r7,0xE000 ; Get the unlikeliest ESID possible + srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000 + slbie r7 ; Make sure the ERAT is cleared + +rdwrpcok: mtmsrd r10 ; Restore entry MSR (sans FP and VEC) isync - blr + blr ; Leave... /* set interrupts enabled or disabled @@ -294,38 +698,41 @@ LEXT(ml_phys_write) LEXT(ml_set_interrupts_enabled) - mfsprg r7,0 - lwz r4,PP_INTS_ENABLED(r7) - mr. r4,r4 - beq- EXT(fake_set_interrupts_enabled) + andi. r4,r3,1 ; Are we turning interruptions on? + lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable mfmsr r5 ; Get the current MSR - rlwinm r5,r5,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - mr r4,r3 ; Save the old value - rlwinm r5,r5,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Get float enable and EE enable rlwinm r3,r5,17,31,31 ; Set return value - rlwimi r5,r4,15,16,16 ; Insert new EE bit - andi. r8,r5,lo16(MASK(MSR_EE)) ; Interruptions - bne CheckPreemption -NoPreemption: - mtmsr r5 ; Slam enablement + andc r5,r5,r0 ; Force VEC and FP off + bne CheckPreemption ; Interrupts going on, check ASTs... + + mtmsr r5 ; Slam diable (always going disabled here) + isync ; Need this because FP/Vec might go off blr + .align 5 + CheckPreemption: - lwz r8,PP_NEED_AST(r7) - li r6,AST_URGENT - lwz r8,0(r8) - lwz r7,PP_PREEMPT_CNT(r7) - lis r0,HIGH_ADDR(DoPreemptCall) - and. r8,r8,r6 - ori r0,r0,LOW_ADDR(DoPreemptCall) - beq+ NoPreemption - cmpi cr0, r7, 0 + mfsprg r7,0 + ori r5,r5,lo16(MASK(MSR_EE)) ; Turn on the enable + lwz r8,PP_NEED_AST(r7) ; Get pointer to AST flags + mfsprg r9,1 ; Get current activation + li r6,AST_URGENT ; Get the type we will preempt for + lwz r7,ACT_PREEMPT_CNT(r9) ; Get preemption count + lwz r8,0(r8) ; Get AST flags + lis r0,hi16(DoPreemptCall) ; High part of Preempt FW call + cmpwi cr1,r7,0 ; Are preemptions masked off? + and. r8,r8,r6 ; Are we urgent? + crorc cr1_eq,cr0_eq,cr1_eq ; Remember if preemptions are masked or not urgent + ori r0,r0,lo16(DoPreemptCall) ; Bottome of FW call + mtmsr r5 ; Restore the MSR now, before we can preempt - bnelr+ ; Return if no premption + isync ; Need this because FP/Vec might go off + + beqlr++ cr1 ; Return if no premption... sc ; Preempt blr - /* Emulate a decremeter exception * * void machine_clock_assist(void) @@ -341,8 +748,8 @@ LEXT(machine_clock_assist) mfsprg r7,0 lwz r4,PP_INTS_ENABLED(r7) mr. r4,r4 - beq- EXT(CreateFakeDEC) - blr + bnelr+ cr0 + b EXT(CreateFakeDEC) /* Set machine into idle power-saving mode. * @@ -354,23 +761,24 @@ LEXT(machine_clock_assist) * */ - ; Force a line boundry here .align 5 .globl EXT(machine_idle_ppc) LEXT(machine_idle_ppc) - mfmsr r3 ; Get the current MSR - rlwinm r3,r3,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r3,r3,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r5,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + lis r0,hi16(MASK(MSR_VEC)) ; Get the vector flag + mfmsr r3 ; Save the MSR + ori r0,r0,lo16(MASK(MSR_FP)) ; Add the FP flag + andc r3,r3,r0 ; Clear VEC and FP + ori r0,r0,lo16(MASK(MSR_EE)) ; Drop EE also + andc r5,r3,r0 ; Clear VEC, FP, DR, and EE + mtmsr r5 ; Hold up interruptions for now isync ; May have messed with fp/vec mfsprg r12,0 ; Get the per_proc_info - mfspr r6,hid0 ; Get the current power-saving mode mfsprg r11,2 ; Get CPU specific features - rlwinm r6,r6,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) + mfspr r6,hid0 ; Get the current power-saving mode mtcrf 0xC7,r11 ; Get the facility flags lis r4,hi16(napm) ; Assume we can nap @@ -391,21 +799,21 @@ yesnap: mftbu r9 ; Get the upper timebase stw r8,napStamp(r12) ; Set high order time stamp stw r7,napStamp+4(r12) ; Set low order nap stamp - rlwinm. r7,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before nap? + rlwinm. r7,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before nap? beq miL2PFok mfspr r7,msscr0 ; Get currect MSSCR0 value - rlwinm r7,r7,0,0,l2pfes-1 ; Dissable L2 Prefetch + rlwinm r7,r7,0,0,l2pfes-1 ; Disable L2 Prefetch mtspr msscr0,r7 ; Updates MSSCR0 value sync isync miL2PFok: - rlwinm. r7,r11,0,pfSlowNapb,pfSlowNapb ; Should nap at slow speed? + rlwinm. r7,r11,0,pfSlowNapb,pfSlowNapb ; Should nap at slow speed? beq minoslownap mfspr r7,hid1 ; Get current HID1 value - oris r7,r7,hi16(hid1psm) ; Select PLL1 + oris r7,r7,hi16(hid1psm) ; Select PLL1 mtspr hid1,r7 ; Update HID1 value minoslownap: @@ -417,11 +825,31 @@ minoslownap: ; is taken and set everything up to return directly to machine_idle_ret. ; So, make sure everything we need there is already set up... ; + + li r10,hi16(dozem|napm|sleepm) ; Mask of power management bits + + bf-- pf64Bitb,mipNSF1 ; skip if 32-bit... + + sldi r4,r4,32 ; Position the flags + sldi r10,r10,32 ; Position the masks + + +mipNSF1: andc r6,r6,r10 ; Clean up the old power bits + ori r7,r5,lo16(MASK(MSR_EE)) ; Flip on EE or r6,r6,r4 ; Set nap or doze oris r5,r7,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR + + sync mtspr hid0,r6 ; Set up the HID for nap/doze + mfspr r6,hid0 ; Yes, this is silly, keep it here + mfspr r6,hid0 ; Yes, this is a duplicate, keep it here + mfspr r6,hid0 ; Yes, this is a duplicate, keep it here + mfspr r6,hid0 ; Yes, this is a duplicate, keep it here + mfspr r6,hid0 ; Yes, this is a duplicate, keep it here + mfspr r6,hid0 ; Yes, this is a duplicate, keep it here isync ; Make sure it is set + mtmsr r7 ; Enable for interrupts rlwinm. r11,r11,0,pfAltivecb,pfAltivecb ; Do we have altivec? beq- minovec ; No... @@ -478,30 +906,46 @@ deadsleep: addi r3,r3,1 ; Make analyzer happy b deadsleep ; Die the death of 1000 joys... #endif + mfsprg r12,0 ; Get the per_proc_info mfspr r4,hid0 ; Get the current power-saving mode eqv r10,r10,r10 ; Get all foxes mfsprg r11,2 ; Get CPU specific features - rlwinm. r5,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before sleep? + rlwinm. r5,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before sleep? beq mpsL2PFok mfspr r5,msscr0 ; Get currect MSSCR0 value - rlwinm r5,r5,0,0,l2pfes-1 ; Dissable L2 Prefetch + rlwinm r5,r5,0,0,l2pfes-1 ; Disable L2 Prefetch mtspr msscr0,r5 ; Updates MSSCR0 value sync isync mpsL2PFok: + rlwinm. r5,r11,0,pf64Bitb,pf64Bitb ; PM bits are shifted on 64bit systems. + bne mpsPF64bit + + rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) + oris r4,r4,hi16(sleepm) ; Set sleep + b mpsClearDEC + +mpsPF64bit: + lis r5, hi16(dozem|napm|sleepm) ; Clear all possible power-saving modes (not DPM though) + sldi r5, r5, 32 + andc r4, r4, r5 + lis r5, hi16(napm) ; Set sleep +// lis r5, hi16(dozem) ; Set sleep + sldi r5, r5, 32 + or r4, r4, r5 + +mpsClearDEC: mfmsr r5 ; Get the current MSR rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF - rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) mtdec r10 ; Load decrimenter with 0x7FFFFFFF isync ; and make sure, mfdec r9 ; really sure, it gets there mtcrf 0x07,r11 ; Get the cache flags, etc - oris r4,r4,hi16(sleepm) ; Set sleep rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation ; ; Note that we need translation off before we set the HID to sleep. Otherwise @@ -522,6 +966,12 @@ mpsNoMSRx: ori r3,r5,lo16(MASK(MSR_EE)) ; Flip on EE sync mtspr hid0,r4 ; Set up the HID to sleep + mfspr r4,hid0 ; Yes, this is silly, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here mtmsr r3 ; Enable for interrupts to drain decrimenter @@ -543,8 +993,14 @@ mpsNoMSRx: mfmsr r5 ; Get the current MSR oris r5,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR ; Leave EE off because power goes off shortly - -slSleepNow: sync ; Sync it all up + mfsprg r12,0 ; Get the per_proc_info + li r10,PP_CPU_FLAGS + lhz r11,PP_CPU_FLAGS(r12) ; Get the flags + ori r11,r11,SleepState ; Marked SleepState + sth r11,PP_CPU_FLAGS(r12) ; Set the flags + dcbf r10,r12 +slSleepNow: + sync ; Sync it all up mtmsr r5 ; Do sleep with interruptions enabled isync ; Take a pill b slSleepNow ; Go back to sleep if we wake up... @@ -576,14 +1032,25 @@ LEXT(cacheInit) mfsprg r11,2 ; Get CPU specific features mfmsr r7 ; Get the current MSR - rlwinm r4,r9,0,dpm+1,doze-1 ; Clear all possible power-saving modes (also disable DPM) rlwinm r7,r7,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off rlwinm r7,r7,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off rlwimi r11,r11,pfLClckb+1,31,31 ; Move pfLClck to another position (to keep from using non-volatile CRs) rlwinm r5,r7,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions mtcrf 0x87,r11 ; Get the feature flags + lis r10,hi16(dozem|napm|sleepm|dpmm) ; Mask of power management bits + bf-- pf64Bitb,cIniNSF1 ; Skip if 32-bit... + + sldi r10,r10,32 ; Position the masks + +cIniNSF1: andc r4,r9,r10 ; Clean up the old power bits mtspr hid0,r4 ; Set up the HID + mfspr r4,hid0 ; Yes, this is silly, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here bt pfNoMSRirb,ciNoMSR ; No MSR... @@ -602,11 +1069,12 @@ ciNoMSRx: dssall ; Stop streams sync -cinoDSS: lis r5,hi16(EXT(tlb_system_lock)) ; Get the TLBIE lock +cinoDSS: li r5,tlbieLock ; Get the TLBIE lock li r0,128 ; Get number of TLB entries - ori r5,r5,lo16(EXT(tlb_system_lock)) ; Grab up the bottom part li r6,0 ; Start at 0 + bf-- pf64Bitb,citlbhang ; Skip if 32-bit... + li r0,1024 ; Get the number of TLB entries citlbhang: lwarx r2,0,r5 ; Get the TLBIE lock mr. r2,r2 ; Is it locked? @@ -630,14 +1098,19 @@ cipurgeTLB: tlbie r6 ; Purge this entry sync isync -cinoSMP: stw r2,0(r5) ; Unlock TLBIE lock + bf-- pf64Bitb,cinoSMP ; Skip if 32-bit... + ptesync ; Wait for quiet again + sync + +cinoSMP: stw r2,tlbieLock(0) ; Unlock TLBIE lock + + bt++ pf64Bitb,cin64 ; Skip if 64-bit... - cror cr0_eq,pfL1ib,pfL1db ; Check for either I- or D-cache - bf- cr0_eq,cinoL1 ; No level 1 to flush... rlwinm. r0,r9,0,ice,dce ; Were either of the level 1s on? beq- cinoL1 ; No, no need to flush... - bf pfL1fab,ciswdl1 ; If no hw flush assist, go do by software... + rlwinm. r0,r11,0,pfL1fab,pfL1fab ; do we have L1 flush assist? + beq ciswdl1 ; If no hw flush assist, go do by software... mfspr r8,msscr0 ; Get the memory system control register oris r8,r8,hi16(dl1hwfm) ; Turn on the hardware flush request @@ -761,7 +1234,9 @@ cinoL1: ; ; Flush and disable the level 2 ; - bf pfL2b,cinol2 ; No level 2 cache to flush + mfsprg r10,2 ; need to check 2 features we did not put in CR + rlwinm. r0,r10,0,pfL2b,pfL2b ; do we have L2? + beq cinol2 ; No level 2 cache to flush mfspr r8,l2cr ; Get the L2CR lwz r3,pfl2cr(r12) ; Get the L2CR value @@ -776,7 +1251,8 @@ cinoL1: bne- ciinvdl2 ; Yes, just invalidate and get PLL synced... ciflushl2: - bf pfL2fab,ciswfl2 ; Flush not in hardware... + rlwinm. r0,r10,0,pfL2fab,pfL2fab ; hardware-assisted L2 flush? + beq ciswfl2 ; Flush not in hardware... mr r10,r8 ; Take a copy now @@ -827,7 +1303,7 @@ ciswfldl2a: lwz r0,0(r10) ; Load something to flush something addi r10,r10,32 ; Next line bdnz ciswfldl2a ; Do the lot... -ciinvdl2: rlwinm r8,r3,0,l2e+1,31 ; Use the saved L2CR and clear the enable bit +ciinvdl2: rlwinm r8,r3,0,l2e+1,31 ; Clear the enable bit b cinla ; Branch to next line... .align 5 @@ -854,7 +1330,9 @@ ciinvl2: sync sync isync ciinvdl2a: mfspr r2,l2cr ; Get the L2CR - bf pfL2ib,ciinvdl2b ; Flush not in hardware... + mfsprg r0,2 ; need to check a feature in "non-volatile" set + rlwinm. r0,r0,0,pfL2ib,pfL2ib ; flush in HW? + beq ciinvdl2b ; Flush not in hardware... rlwinm. r2,r2,0,l2i,l2i ; Is the invalidate still going? bne+ ciinvdl2a ; Assume so, this will take a looong time... sync @@ -906,7 +1384,7 @@ cihwfl3: mfspr r10,l3cr ; Get back the L3CR rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over? bne+ cihwfl3 ; Nope, keep going... -ciinvdl3: rlwinm r8,r3,0,l3e+1,31 ; Use saved L3CR value and clear the enable bit +ciinvdl3: rlwinm r8,r3,0,l3e+1,31 ; Clear the enable bit sync ; Make sure of life, liberty, and justice mtspr l3cr,r8 ; Disable L3 sync @@ -956,11 +1434,13 @@ ciinvdl3c: addi r2,r2,-1 ; ? mtspr l3cr,r3 ; Enable it as desired sync cinol3: - bf pfL2b,cinol2a ; No level 2 cache to enable + mfsprg r0,2 ; need to check a feature in "non-volatile" set + rlwinm. r0,r0,0,pfL2b,pfL2b ; is there an L2 cache? + beq cinol2a ; No level 2 cache to enable lwz r3,pfl2cr(r12) ; Get the L2CR value cmplwi r3, 0 ; Should the L2 be all the way off? - beq cinol2a : Yes, done with L2 + beq cinol2a : Yes, done with L2 mtspr l2cr,r3 ; Enable it as desired sync @@ -989,6 +1469,161 @@ cinoexit: mtspr hid0,r9 ; Turn off the invalidate (needed for some older m blr ; Return... +; +; Handle 64-bit architecture +; This processor can not run without caches, so we just push everything out +; and flush. It will be relativily clean afterwards +; + + .align 5 + +cin64: + li r10,hi16(dozem|napm|sleepm) ; Mask of power management bits we want cleared + sldi r10,r10,32 ; Position the masks + andc r9,r9,r10 ; Clean up the old power bits + mr r4,r9 + isync + mtspr hid0,r4 ; Set up the HID + mfspr r4,hid0 ; Yes, this is silly, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + mfspr r4,hid0 ; Yes, this is a duplicate, keep it here + isync + + mfspr r10,hid1 ; Save hid1 + mfspr r4,hid4 ; Save hid4 + mr r12,r10 ; Really save hid1 + mr r11,r4 ; Get a working copy of hid4 + + li r0,0 ; Get a 0 + eqv r2,r2,r2 ; Get all foxes + + rldimi r10,r0,55,7 ; Clear I$ prefetch bits (7:8) + + isync + mtspr hid1,r10 ; Stick it + mtspr hid1,r10 ; Stick it again + isync + + rldimi r11,r2,38,25 ; Disable D$ prefetch (25:25) + + sync + mtspr hid4,r11 ; Stick it + isync + + li r3,8 ; Set bit 28+32 + sldi r3,r3,32 ; Make it bit 28 + or r3,r3,r11 ; Turn on the flash invalidate L1D$ + + oris r5,r11,0x0600 ; Set disable L1D$ bits + sync + mtspr hid4,r3 ; Invalidate + isync + + mtspr hid4,r5 ; Un-invalidate and disable L1D$ + isync + + lis r8,GUSModeReg ; Get the GUS mode ring address + mfsprg r0,2 ; Get the feature flags + ori r8,r8,0x8000 ; Set to read data + rlwinm. r0,r0,pfSCOMFixUpb+1,31,31 ; Set shift if we need a fix me up + + sync + + mtspr scomc,r8 ; Request the GUS mode + mfspr r11,scomd ; Get the GUS mode + mfspr r8,scomc ; Get back the status (we just ignore it) + sync + isync + + sld r11,r11,r0 ; Fix up if needed + + ori r6,r11,lo16(GUSMdmapen) ; Set the bit that means direct L2 cache address + lis r8,GUSModeReg ; Get GUS mode register address + + sync + + mtspr scomd,r6 ; Set that we want direct L2 mode + mtspr scomc,r8 ; Tell GUS we want direct L2 mode + mfspr r3,scomc ; Get back the status + sync + isync + + li r3,0 ; Clear start point + +cflushlp: lis r6,0x0040 ; Pick 4MB line as our target + or r6,r6,r3 ; Put in the line offset + lwz r5,0(r6) ; Load a line + addis r6,r6,8 ; Roll bit 42:44 + lwz r5,0(r6) ; Load a line + addis r6,r6,8 ; Roll bit 42:44 + lwz r5,0(r6) ; Load a line + addis r6,r6,8 ; Roll bit 42:44 + lwz r5,0(r6) ; Load a line + addis r6,r6,8 ; Roll bit 42:44 + lwz r5,0(r6) ; Load a line + addis r6,r6,8 ; Roll bit 42:44 + lwz r5,0(r6) ; Load a line + addis r6,r6,8 ; Roll bit 42:44 + lwz r5,0(r6) ; Load a line + addis r6,r6,8 ; Roll bit 42:44 + lwz r5,0(r6) ; Load a line + + addi r3,r3,128 ; Next line + andis. r5,r3,8 ; Have we done enough? + beq++ cflushlp ; Not yet... + + sync + + lis r6,0x0040 ; Pick 4MB line as our target + +cflushx: dcbf 0,r6 ; Flush line and invalidate + addi r6,r6,128 ; Next line + andis. r5,r6,0x0080 ; Have we done enough? + beq++ cflushx ; Keep on flushing... + + mr r3,r10 ; Copy current hid1 + rldimi r3,r2,54,9 ; Set force icbi match mode + + li r6,0 ; Set start if ICBI range + isync + mtspr hid1,r3 ; Stick it + mtspr hid1,r3 ; Stick it again + isync + +cflicbi: icbi 0,r6 ; Kill I$ + addi r6,r6,128 ; Next line + andis. r5,r6,1 ; Have we done them all? + beq++ cflicbi ; Not yet... + + lis r8,GUSModeReg ; Get GUS mode register address + + sync + + mtspr scomd,r11 ; Set that we do not want direct mode + mtspr scomc,r8 ; Tell GUS we do not want direct mode + mfspr r3,scomc ; Get back the status + sync + isync + + isync + mtspr hid1,r12 ; Restore entry hid1 + mtspr hid1,r12 ; Stick it again + isync + + sync + mtspr hid4,r4 ; Restore entry hid4 + isync + + sync + mtmsr r7 ; Restore MSR to entry + isync + blr ; Return... + + + /* Disables all caches * * void cacheDisable(void) @@ -1012,13 +1647,16 @@ LEXT(cacheDisable) cdNoAlt: sync + btlr pf64Bitb ; No way to disable a 64-bit machine... + mfspr r5,hid0 ; Get the hid rlwinm r5,r5,0,dce+1,ice-1 ; Clear the I- and D- cache enables mtspr hid0,r5 ; Turn off dem caches sync - bf pfL2b,cdNoL2 ; Skip if no L2... - + rlwinm. r0,r11,0,pfL2b,pfL2b ; is there an L2? + beq cdNoL2 ; Skip if no L2... + mfspr r5,l2cr ; Get the L2 rlwinm r5,r5,0,l2e+1,31 ; Turn off enable bit @@ -1037,7 +1675,7 @@ cinlbb: sync ; Finish memory stuff b cinlcc ; Jump back up and turn off cache... cdNoL2: - + bf pfL3b,cdNoL3 ; Skip down if no L3... mfspr r5,l3cr ; Get the L3 @@ -1262,36 +1900,84 @@ throtoff: mfspr r3,ictc ; Get the old throttle LEXT(ml_get_timebase) loop: - mftbu r4 - mftb r5 - mftbu r6 - cmpw r6, r4 - bne- loop + mftbu r4 + mftb r5 + mftbu r6 + cmpw r6, r4 + bne- loop + + stw r4, 0(r3) + stw r5, 4(r3) + + blr - stw r4, 0(r3) - stw r5, 4(r3) +/* + * unsigned int cpu_number(void) + * + * Returns the current cpu number. + */ + + .align 5 + .globl EXT(cpu_number) + +LEXT(cpu_number) + mfsprg r4,0 ; Get per-proc block + lhz r3,PP_CPU_NUMBER(r4) ; Get CPU number + blr ; Return... - blr /* - * The routine that implements cpu_number. + * void set_machine_current_act(thread_act_t) + * + * Set the current activation */ + .align 5 + .globl EXT(set_machine_current_act) - .align 5 - .globl EXT(cpu_number) - -LEXT(cpu_number) - - mfmsr r9 /* Save the old MSR */ - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r8,r9,0,17,15 /* Clear interruptions */ - mtmsr r8 /* Interrupts off */ - isync - mfsprg r7,0 /* Get per-proc block */ - lhz r3,PP_CPU_NUMBER(r7) /* Get CPU number */ - mtmsr r9 /* Restore interruptions to entry */ - blr /* Return... */ +LEXT(set_machine_current_act) + + mtsprg 1,r3 ; Set spr1 with the active thread + blr ; Return... + +/* + * thread_t current_act(void) + * thread_t current_thread(void) + * + * + * Return the current thread for outside components. + */ + .align 5 + .globl EXT(current_act) + .globl EXT(current_thread) + +LEXT(current_act) +LEXT(current_thread) + + mfsprg r3,1 + blr + + .align 5 + .globl EXT(clock_get_uptime) +LEXT(clock_get_uptime) +1: mftbu r9 + mftb r0 + mftbu r11 + cmpw r11,r9 + bne- 1b + stw r0,4(r3) + stw r9,0(r3) + blr + + + .align 5 + .globl EXT(mach_absolute_time) +LEXT(mach_absolute_time) +1: mftbu r3 + mftb r4 + mftbu r0 + cmpw r0,r3 + bne- 1b + blr /* ** ml_sense_nmi() diff --git a/osfmk/ppc/mappings.c b/osfmk/ppc/mappings.c index ee4d8503b..89672e4da 100644 --- a/osfmk/ppc/mappings.c +++ b/osfmk/ppc/mappings.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -43,18 +43,19 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include #include -#include #include #include @@ -62,24 +63,25 @@ #include #include -#include /* (TEST/DEBUG) */ +#include /* (TEST/DEBUG) */ #define PERFTIMES 0 -#if PERFTIMES && DEBUG -#define debugLog2(a, b, c) dbgLog2(a, b, c) -#else -#define debugLog2(a, b, c) -#endif - vm_map_t mapping_map = VM_MAP_NULL; -#define MAPPING_MAP_SIZE 33554432 /* 32MB address space */ -unsigned int incrVSID = 0; /* VSID increment value */ +unsigned int incrVSID = 0; /* VSID increment value */ unsigned int mappingdeb0 = 0; -unsigned int mappingdeb1 = 0; +unsigned int mappingdeb1 = 0; +int ppc_max_adrsp; /* Maximum address spaces */ + +addr64_t *mapdebug; /* (BRINGUP) */ +extern unsigned int DebugWork; /* (BRINGUP) */ + extern unsigned int hash_table_size; -extern vm_offset_t mem_size; + +void mapping_verify(void); +void mapping_phys_unused(ppnum_t pa); + /* * ppc_prot translates from the mach representation of protections to the PPC version. * We also allow for a direct setting of the protection bits. This extends the mach @@ -151,927 +153,487 @@ extern vm_offset_t mem_size; void mapping_init(void) { - unsigned int tmp; + unsigned int tmp, maxeff, rwidth; - __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */ - - incrVSID = 1 << ((32 - tmp + 1) >> 1); /* Get ceiling of sqrt of table size */ - incrVSID |= 1 << ((32 - tmp + 1) >> 2); /* Get ceiling of quadroot of table size */ - incrVSID |= 1; /* Set bit and add 1 */ - return; - -} - - -/* - * mapping_remove(pmap_t pmap, vm_offset_t va); - * Given a pmap and virtual address, this routine finds the mapping and removes it from - * both its PTEG hash list and the physical entry list. The mapping block will be added to - * the free list. If the free list threshold is reached, garbage collection will happen. - * We also kick back a return code to say whether or not we had one to remove. - * - * We have a strict ordering here: the mapping must be removed from the PTEG hash list before - * it can be removed from the physical entry list. This allows us to get by with only the PTEG - * hash lock at page fault time. The physical entry lock must be held while we remove the mapping - * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions, - * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die. - * It's just that simple! - * - * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around. - * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG - * lock to control the hash cahin and may move the position of the mapping for MRU calculations. - * - * Note that mappings do not need to point to a physical entry. When they don't, it indicates - * the mapping is outside of physical memory and usually refers to a memory mapped device of - * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock - * routines return normally, but don't do anything. - */ - -boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) { /* Remove a single mapping for this VADDR - Returns TRUE if a mapping was found to remove */ - - mapping *mp, *mpv; - register blokmap *blm; - spl_t s; - unsigned int *useadd, *useaddr, uindx; - int i; - struct phys_entry *pp; - mapping *mp1, *mpv1; + ppc_max_adrsp = maxAdrSp; /* Set maximum address spaces */ - debugLog2(1, va, pmap->space); /* start mapping_remove */ - - s=splhigh(); /* Don't bother me */ + maxeff = 32; /* Assume 32-bit */ + if(per_proc_info[0].pf.Available & pf64Bit) maxeff = 64; /* Is this a 64-bit machine? */ - mp = hw_lock_phys_vir(pmap->space, va); /* Lock the physical entry for this mapping */ - - if(!mp) { /* Did we find one? */ - splx(s); /* Allow 'rupts now */ - if(mp = (mapping *)hw_rem_blk(pmap, va, va)) { /* No normal pages, try to remove an odd-sized one */ - - if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */ - blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC)); /* Get virtual address */ - panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n", - pmap, va, blm); - } - while ((unsigned int)mp & 2) - mp = (mapping *)hw_rem_blk(pmap, va, va); -#if 0 - blm = (blokmap *)hw_cpv(mp); /* (TEST/DEBUG) */ - kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ - blm, blm->start, blm->end, blm->PTEr); -#endif - mapping_free(hw_cpv(mp)); /* Release it */ - debugLog2(2, 1, 0); /* End mapping_remove */ - return TRUE; /* Tell them we did it */ - } - debugLog2(2, 0, 0); /* end mapping_remove */ - return FALSE; /* Didn't find any, return FALSE... */ - } - if((unsigned int)mp&1) { /* Did we timeout? */ - panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */ - splx(s); /* Restore the interrupt level */ - return FALSE; /* Bad hair day, return FALSE... */ - } + rwidth = per_proc_info[0].pf.pfMaxVAddr - maxAdrSpb; /* Reduce address width by width of address space ID */ + if(rwidth > maxeff) rwidth = maxeff; /* If we still have more virtual than effective, clamp at effective */ - mpv = hw_cpv(mp); /* Get virtual address of mapping */ -#if DEBUG - if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n"); -#else - (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */ -#endif - useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */ - useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ - (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ - -#if 0 - for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ - if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ - panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", - i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); - } - } -#endif + vm_max_address = 0xFFFFFFFFFFFFFFFFULL >> (64 - rwidth); /* Get maximum effective address supported */ + vm_max_physical = 0xFFFFFFFFFFFFFFFFULL >> (64 - per_proc_info[0].pf.pfMaxPAddr); /* Get maximum physical address supported */ - hw_rem_map(mp); /* Remove the corresponding mapping */ - - pp = mpv->physent; - - if ((mpv->physent) && (pmap->vflags & pmapVMhost)) { - - while(mp1 = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */ - - mpv1 = hw_cpv(mp1); /* Get the virtual address */ -#if DEBUG - if(hw_atomic_sub(&mpv1->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n"); -#else - (void)hw_atomic_sub(&mpv1->pmap->stats.resident_count, 1); /* Decrement the resident page count */ -#endif + if(per_proc_info[0].pf.Available & pf64Bit) { /* Are we 64 bit? */ + tmp = 12; /* Size of hash space */ + } + else { + __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */ + tmp = 32 - tmp; /* Size of hash space */ + } - uindx = ((mpv1->PTEv >> 24) & 0x78) | ((mpv1->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */ - useadd = (unsigned int *)&mpv1->pmap->pmapUsage[uindx]; /* Point to slot to bump */ - useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ - (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ + incrVSID = 1 << ((tmp + 1) >> 1); /* Get ceiling of sqrt of table size */ + incrVSID |= 1 << ((tmp + 1) >> 2); /* Get ceiling of quadroot of table size */ + incrVSID |= 1; /* Set bit and add 1 */ -#if 0 - for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ - if((mpv1->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ - panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", - i * pmapUsageSize, mpv1->pmap->pmapUsage[i], mpv1->pmap); - } - } -#endif - - hw_rem_map(mp1); /* Remove the mapping */ - mapping_free(mpv1); /* Add mapping to the free list */ - } - } + return; - if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */ - - splx(s); /* Was there something you needed? */ - - mapping_free(mpv); /* Add mapping to the free list */ - debugLog2(2, 1, 0); /* end mapping_remove */ - return TRUE; /* Tell them we did it */ } + /* - * mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map + * mapping_remove(pmap_t pmap, addr64_t va); + * Given a pmap and virtual address, this routine finds the mapping and unmaps it. + * The mapping block will be added to + * the free list. If the free list threshold is reached, garbage collection will happen. * - * This guy releases any mappings that exist for a physical page on a specified map. - * We get the lock on the phys_entry, and hold it through out this whole routine. - * That way, no one can change the queue out from underneath us. We keep fetching - * the physents mapping anchor until it is null, then we're done. + * We also pass back the next higher mapped address. This is done so that the higher level + * pmap_remove function can release a range of addresses simply by calling mapping_remove + * in a loop until it finishes the range or is returned a vaddr of 0. * - * For each mapping, we call the remove routine to remove it from the PTEG hash list and - * decriment the pmap's residency count. Then we release the mapping back to the free list. + * Note that if the mapping is not found, we return the next VA ORed with 1 * */ - -void mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) { /* Remove all mappings from specified pmap for this physent */ +addr64_t mapping_remove(pmap_t pmap, addr64_t va) { /* Remove a single mapping for this VADDR + Returns TRUE if a mapping was found to remove */ - mapping *mp, *mp_next, *mpv; - spl_t s; - unsigned int *useadd, *useaddr, uindx; - int i; - - s=splhigh(); /* Don't bother me */ - - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ - panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n", - pp, pp->phys_link, pp->pte1); /* Complain about timeout */ - } - - mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS); + mapping *mp; + addr64_t nextva; - while(mp) { /* Keep going so long as there's another */ + disable_preemption(); /* Don't change threads */ - mpv = hw_cpv(mp); /* Get the virtual address */ - if(mpv->pmap != pmap) { - mp = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS); - continue; - } -#if DEBUG - if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n"); -#else - (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */ -#endif - - uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join seg # and top 2 bits of API */ - useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */ - useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ - (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Incr the even or odd slot */ - - - - mp_next = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS); - hw_rem_map(mp); /* Remove the mapping */ - mapping_free(mpv); /* Add mapping to the free list */ - mp = mp_next; - } - - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ - splx(s); - return; -} -/* - * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list - * - * This guy releases any mappings that exist for a physical page. - * We get the lock on the phys_entry, and hold it through out this whole routine. - * That way, no one can change the queue out from underneath us. We keep fetching - * the physents mapping anchor until it is null, then we're done. - * - * For each mapping, we call the remove routine to remove it from the PTEG hash list and - * decriment the pmap's residency count. Then we release the mapping back to the free list. - * - */ - -void mapping_purge(struct phys_entry *pp) { /* Remove all mappings for this physent */ - - mapping *mp, *mpv; - spl_t s; - unsigned int *useadd, *useaddr, uindx; - int i; - - s=splhigh(); /* Don't bother me */ - debugLog2(3, pp->pte1, 0); /* start mapping_purge */ - - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ - panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n", - pp, pp->phys_link, pp->pte1); /* Complain about timeout */ + while(1) { /* Keep trying until we truely fail */ + mp = hw_rem_map(pmap, va, &nextva); /* Remove a mapping from this pmap */ + if(((unsigned int)mp & mapRetCode) != mapRtRemove) break; /* If it is gone, we are done */ } - - while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */ - mpv = hw_cpv(mp); /* Get the virtual address */ -#if DEBUG - if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n"); -#else - (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */ -#endif + enable_preemption(); /* Thread change ok */ - uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */ - useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */ - useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ - (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ + if(!mp) return (nextva | 1); /* Nothing found to unmap */ -#if 0 - for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ - if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ - panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", - i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); - } - } -#endif - + if((unsigned int)mp & mapRetCode) { /* Was there a failure? */ - hw_rem_map(mp); /* Remove the mapping */ - mapping_free(mpv); /* Add mapping to the free list */ + panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n", + pmap, va, mp); } - - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ - debugLog2(4, pp->pte1, 0); /* end mapping_purge */ - splx(s); /* Was there something you needed? */ - return; /* Tell them we did it */ -} + mapping_free(mp); /* Add mapping to the free list */ + return nextva; /* Tell them we did it */ +} /* - * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one + * mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one * * This routine takes the given parameters, builds a mapping block, and queues it into the * correct lists. * - * The pp parameter can be null. This allows us to make a mapping that is not - * associated with any physical page. We may need this for certain I/O areas. + * pmap (virtual address) is the pmap to map into + * va (virtual address) is the 64-bit virtual address that is being mapped + * pa (physical page number) is the physical page number (i.e., physcial address >> 12). This is + * a 32-bit quantity. + * Flags: + * block if 1, mapping is a block, size parameter is used. Note: we do not keep + * reference and change information or allow protection changes of blocks. + * any changes must first unmap and then remap the area. + * use attribute Use specified attributes for map, not defaults for physical page + * perm Mapping is permanent + * cache inhibited Cache inhibited (used if use attribute or block set ) + * guarded Guarded access (used if use attribute or block set ) + * size size of block (not used if not block) + * prot VM protection bits + * attr Cachability/Guardedness + * + * Returns 0 if mapping was successful. Returns vaddr that overlaps/collides. + * Returns 1 for any other failure. + * + * Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved + * for I/O and default the cache attrubutes appropriately. The caller is free to set whatever they want however. + * + * If there is any physical page that is not found in the physent table, the mapping is forced to be a + * block mapping of length 1. This keeps us from trying to update a physent during later mapping use, + * e.g., fault handling. + * * - * If the phys_entry address is null, we neither lock or chain into it. - * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it. */ -mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) { /* Make an address mapping */ +addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot) { /* Make an address mapping */ - register mapping *mp, *mpv; - unsigned int *useadd, *useaddr; - spl_t s; - int i; + register mapping *mp; + addr64_t colladdr; + unsigned int pindex, mflags, pattr, wimg; + phys_entry *physent; + int i, nlists; - debugLog2(5, va, pa); /* start mapping_purge */ - mpv = mapping_alloc(); /* Get a spare mapping block */ - - mpv->pmap = pmap; /* Initialize the pmap pointer */ - mpv->physent = pp; /* Initialize the pointer to the physical entry */ - mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot); /* Build the real portion of the PTE */ - mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F); /* Build the VSID */ + disable_preemption(); /* Don't change threads */ - s=splhigh(); /* Don't bother from now on */ + pindex = 0; + + mflags = 0x01000000; /* Start building mpFlags field (busy count = 1) */ - mp = hw_cvp(mpv); /* Get the physical address of this */ + if(!(flags & mmFlgBlock)) { /* Is this a block map? */ - if(pp && !locked) { /* Is there a physical entry? Or do we already hold the lock? */ - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ - panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n", - pp, pp->phys_link, pp->pte1); /* Complain about timeout */ + size = 1; /* Set size to 1 page if not block */ + + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + mflags |= mpBlock; /* Force this to a block if no physent */ + size = 1; /* Force size to 1 page */ + pattr = 0; /* Assume normal, non-I/O memory */ + if((pa & 0xFFF80000) == 0x00080000) pattr = mmFlgCInhib | mmFlgGuarded; /* If this page is in I/O range, set I/O attributes */ } - } + else pattr = ((physent->ppLink & (ppI | ppG)) >> 4); /* Get the default attributes from physent */ - if(pp) { /* See of there is a physcial entry */ - mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS); /* Move the old anchor to the new mappings forward */ - pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS); /* Point the anchor at us. Now we're on the list (keep the flags) */ + if(flags & mmFlgUseAttr) pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */ } - - hw_add_map(mp, pmap->space, va); /* Stick it on the PTEG hash list */ - - (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1); /* Increment the resident page count */ - useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */ - useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ - (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ -#if 0 - for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ - if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ - panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", - i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); - } + else { /* This is a block */ + + pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */ + mflags |= mpBlock; /* Show that this is a block */ } -#endif - - if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* If we have one and we didn't hold on entry, unlock the physical entry */ - - splx(s); /* Ok for interruptions now */ - debugLog2(6, pmap->space, prot); /* end mapping_purge */ - return mpv; /* Leave... */ -} - - -/* - * Enters optimal translations for odd-sized V=F blocks. - * - * Builds a block map for each power-of-two hunk o' address - * that exists. This is specific to the processor type. - * PPC uses BAT register size stuff. Future PPC might have - * something else. - * - * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too - * stupid to know otherwise so we only look at the va anyhow, so there... - * - */ - -void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) { /* Maps optimal autogenned blocks */ - - register blokmap *blm, *oblm; - unsigned int pg; - unsigned int maxsize, boundary, leading, trailing, cbsize, minsize, tomin; - int i, maxshft, nummax, minshft; - -#if 1 - kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ - pmap, va, pa, bnd, size, prot, attr); -#endif - minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */ - maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */ + wimg = 0x2; /* Set basic PPC wimg to 0b0010 - Coherent */ + if(pattr & mmFlgCInhib) wimg |= 0x4; /* Add cache inhibited if we need to */ + if(pattr & mmFlgGuarded) wimg |= 0x1; /* Add guarded if we need to */ - minshft = 31 - cntlzw(minsize); /* Shift to position minimum size */ - maxshft = 31 - cntlzw(blokValid); /* Shift to position maximum size */ + mflags = mflags | (pindex << 16); /* Stick in the physical entry table index */ - leading = ((va + bnd - 1) & -bnd) - va; /* Get size of leading area */ - trailing = size - leading; /* Get size of trailing area */ - tomin = ((va + minsize - 1) & -minsize) - va; /* Get size needed to round up to the minimum block size */ + if(flags & mmFlgPerm) mflags |= mpPerm; /* Set permanent mapping */ -#if 1 - kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin); /* (TEST/DEBUG) */ -#endif - - if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */ + size = size - 1; /* Change size to offset */ + if(size > 0xFFFF) return 1; /* Leave if size is too big */ - va = va + tomin; /* Adjust virtual start */ - pa = pa + tomin; /* Adjust physical start */ - leading = leading - tomin; /* Adjust leading size */ + nlists = mapSetLists(pmap); /* Set number of lists this will be on */ -/* - * Some of this code is very classic PPC. We need to fix this up. - */ - - leading = leading >> minshft; /* Position for bit testing */ - cbsize = minsize; /* Set the minimum size */ + mp = mapping_alloc(nlists); /* Get a spare mapping block with this many lists */ + + /* the mapping is zero except that the mpLists field is set */ + mp->mpFlags |= mflags; /* Add in the rest of the flags to mpLists */ + mp->mpSpace = pmap->space; /* Set the address space/pmap lookup ID */ + mp->mpBSize = size; /* Set the size */ + mp->mpPte = 0; /* Set the PTE invalid */ + mp->mpPAddr = pa; /* Set the physical page number */ + mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) | ppc_prot(prot); /* Add the protection and attributes to the field */ - for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */ + while(1) { /* Keep trying... */ + colladdr = hw_add_map(pmap, mp); /* Go add the mapping to the pmap */ + if(!colladdr) { /* All is ok... */ + enable_preemption(); /* Ok to switch around here */ + return 0; /* Return... */ + } - if(leading & 1) { - pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */ - pa = pa + cbsize; /* Bump up physical address */ - va = va + cbsize; /* Bump up virtual address */ + if((colladdr & mapRetCode) == mapRtRemove) { /* Is our target being removed? */ + (void)mapping_remove(pmap, colladdr); /* Yes, go help out */ + continue; /* Try to add it now */ + } + + if((colladdr & mapRetCode) == mapRtMapDup) { /* Is our target already mapped (collision mapping must be identical)? */ + mapping_free(mp); /* Return mapping to the free list */ + enable_preemption(); /* Ok to switch around here */ + return 0; /* Normal return */ } - - leading = leading >> 1; /* Shift up to next size */ - cbsize = cbsize << 1; /* Here too */ - - } - - nummax = trailing >> maxshft; /* Get number of max size blocks left */ - for(i=0; i < nummax - 1; i++) { /* Account for all max size block left but 1 */ - pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */ - - pa = pa + maxsize; /* Bump up physical address */ - va = va + maxsize; /* Bump up virtual address */ - trailing -= maxsize; /* Back off what we just did */ - } - - cbsize = maxsize; /* Start at maximum size */ - - for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */ - if(trailing & cbsize) { - trailing &= ~cbsize; /* Remove the block we are allocating */ - pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */ - pa = pa + cbsize; /* Bump up physical address */ - va = va + cbsize; /* Bump up virtual address */ - } - cbsize = cbsize >> 1; /* Next size down */ + if(colladdr != mapRtBadLk) { /* Did it collide? */ + mapping_free(mp); /* Yeah, toss the pending mapping */ + enable_preemption(); /* Ok to switch around here */ + return colladdr; /* Pass back the overlapping address */ + } + + panic("mapping_make: hw_add_map failed - code = %08X, pmap = %08X, va = %016llX, mapping = %08X\n", + colladdr, pmap, va, mp); /* Die dead */ } - if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */ - - return; /* Return */ + return 1; /* Leave... */ } /* - * Enters translations for odd-sized V=F blocks. + * mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping * - * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request - * will be split into normal-sized page mappings. + * Looks up the vaddr and returns the mapping and the next mapped va + * If full is true, it will descend through all nested pmaps to find actual mapping * - * The higher level VM map should be locked to insure that we don't have a - * double diddle here. + * Must be called with interruptions disabled or we can hang trying to remove found mapping. * - * We panic if we get a block that overlaps with another. We do not merge adjacent - * blocks because removing any address within a block removes the entire block and if - * would really mess things up if we trashed too much. + * Returns 0 if not found and the virtual address of the mapping if it is + * Note that the mappings busy count is bumped. It is the responsibility of the caller + * to drop the count. If this is not done, any attempt to remove the mapping will hang. * - * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can - * not be changed. The block must be unmapped and then remapped with the new stuff. - * We also do not keep track of reference or change flags. + * NOTE: The nextva field is not valid when full is TRUE. * - * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only - * with interruptions and translation disabled and under the control of the lock located - * in the first block map. MRU is used because it is expected that the same entry - * will be accessed repeatedly while PTEs are being generated to cover those addresses. * */ -void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */ - - register blokmap *blm, *oblm, *oblm_virt;; - unsigned int pg; - -#if 0 - kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ - pmap, va, pa, size, prot, attr); -#endif - - if(size < ODDBLKMIN) { /* Is this below the minimum size? */ - for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ - mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ -#if 0 - kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */ - va + pg, pa + pg); -#endif - } - return; /* All done */ - } - - blm = (blokmap *)mapping_alloc(); /* Get a block mapping */ - - blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */ - blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */ - blm->current = 0; - blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */ - blm->space = pmap->space; /* Set the space (only needed for remove) */ - blm->blkFlags = flags; /* Set the block's flags */ - -#if 0 - kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ - blm, blm->start, blm->end, blm->PTEr); -#endif - - blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */ - -#if 0 - kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ - blm, pmap->bmaps); -#endif - - do { - oblm = hw_add_blk(pmap, blm); - if ((unsigned int)oblm & 2) { - oblm_virt = (blokmap *)hw_cpv((mapping *)((unsigned int)oblm & 0xFFFFFFFC)); - mapping_remove(pmap, oblm_virt->start); - }; - } while ((unsigned int)oblm & 2); - - if (oblm) { - oblm = (blokmap *)hw_cpv((mapping *) oblm); /* Get the old block virtual address */ - blm = (blokmap *)hw_cpv((mapping *)blm); /* Back to the virtual address of this */ - if((oblm->start != blm->start) || /* If we have a match, then this is a fault race and */ - (oblm->end != blm->end) || /* is acceptable */ - (oblm->PTEr != blm->PTEr)) - panic("pmap_map_block: block map overlap - blm = %08X\n", oblm);/* Otherwise, Squeak loudly and carry a big stick */ - mapping_free((struct mapping *)blm); - } - -#if 0 - kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ - blm, pmap->bmaps); -#endif - - return; /* Return */ -} +mapping *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full) { /* Make an address mapping */ + register mapping *mp; + addr64_t curva; + pmap_t curpmap; + int nestdepth; -/* - * Optimally enters translations for odd-sized V=F blocks. - * - * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request - * will be split into normal-sized page mappings. - * - * This one is different than pmap_map_block in that it will allocate it's own virtual - * target address. Rather than allocating a single block, - * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows - * hardware-level mapping that takes advantage of BAT maps or large page sizes. - * - * Most considerations for pmap_map_block apply. - * - * - */ - -kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va, - vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an optimal autogenned block */ - - register blokmap *blm, *oblm; - unsigned int pg; - kern_return_t err; - unsigned int bnd; + curpmap = pmap; /* Remember entry */ + nestdepth = 0; /* Set nest depth */ + curva = (addr64_t)va; /* Set current va */ -#if 1 - kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ - map, pa, size, prot, attr); -#endif + while(1) { - if(size < ODDBLKMIN) { /* Is this below the minimum size? */ - err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */ - if(err) { -#if DEBUG - kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we died */ -#endif - return(err); /* Pass back the error */ + mp = hw_find_map(curpmap, curva, nextva); /* Find the mapping for this address */ + if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */ + panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp, curpmap); /* Die... */ } -#if 1 - kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va); /* (TEST/DEBUG) */ -#endif + + if(!mp || !(mp->mpFlags & mpNest) || !full) break; /* Are we a nest or are we only going one deep? */ - for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ - mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ + if(mp->mpFlags & mpSpecial) { /* Don't chain through a special mapping */ + mp = 0; /* Set not found */ + break; } - return(KERN_SUCCESS); /* All done */ - } - - err = vm_map_block(map, va, &bnd, pa, size, prot); /* Go get an optimal allocation */ - if(err == KERN_INVALID_ADDRESS) { /* Can we try a brute force block mapping? */ - err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */ - if(err) { -#if DEBUG - kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err); /* Say we died */ -#endif - return(err); /* Pass back the error */ + if(nestdepth++ > 64) { /* Have we nested too far down? */ + panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n", + va, curva, pmap, curpmap); } -#if 1 - kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va); /* (TEST/DEBUG) */ -#endif - pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0); /* Set up a block mapped area */ - return KERN_SUCCESS; /* All done now */ - } - - if(err != KERN_SUCCESS) { /* We couldn't get any address range to map this... */ -#if DEBUG - kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we couldn' do it */ -#endif - return(err); + + curva = curva + mp->mpNestReloc; /* Relocate va to new pmap */ + curpmap = (pmap_t) pmapTrans[mp->mpSpace].pmapVAddr; /* Get the address of the nested pmap */ + mapping_drop_busy(mp); /* We have everything we need from the mapping */ + } -#if 1 - kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd); /* (TEST/DEBUG) */ -#endif - mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr); /* Go build the maps */ - return(KERN_SUCCESS); /* All done */ + return mp; /* Return the mapping if we found one */ } - -#if 0 - /* - * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping - * areas. - * - * Once blocks are merged, they act like one block, i.e., if you remove it, - * it all goes... + * kern_return_t mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page * - * This can only be used during boot. Ain't no way we can handle SMP - * or preemption easily, so we restrict it. We don't check either. We - * assume only skilled professional programmers will attempt using this - * function. We assume no responsibility, either real or imagined, for - * injury or death resulting from unauthorized use of this function. + * This routine takes a pmap and virtual address and changes + * the protection. If there are PTEs associated with the mappings, they will be invalidated before + * the protection is changed. * - * No user servicable parts inside. Notice to be removed by end-user only, - * under penalty of applicable federal and state laws. + * We return success if we change the protection or if there is no page mapped at va. We return failure if + * the va corresponds to a block mapped area or the mapping is permanant. * - * See descriptions of pmap_map_block. Ignore the part where we say we panic for - * overlapping areas. Note that we do panic if we can't merge. * */ - -void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an autogenned block */ - - register blokmap *blm, *oblm; - unsigned int pg; - spl_t s; -#if 1 - kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ - pmap, va, pa, size, prot, attr); -#endif +int mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) { /* Change protection of a virtual page */ - s=splhigh(); /* Don't bother from now on */ - if(size < ODDBLKMIN) { /* Is this below the minimum size? */ - for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ - mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ - } - return; /* All done */ - } - - blm = (blokmap *)mapping_alloc(); /* Get a block mapping */ + int ret; - blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */ - blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */ - blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */ - -#if 1 - kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ - blm, blm->start, blm->end, blm->PTEr); -#endif + ret = hw_protect(pmap, va, ppc_prot(prot), nextva); /* Try to change the protect here */ - blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */ - -#if 1 - kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ - blm, pmap->bmaps); -#endif + switch (ret) { /* Decode return code */ + + case mapRtOK: /* Changed */ + case mapRtNotFnd: /* Didn't find it */ + return mapRtOK; /* Ok, return... */ + break; - if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */ - panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */ + case mapRtBlock: /* Block map, just ignore request */ + case mapRtNest: /* Nested pmap, just ignore request */ + return ret; /* Pass back return code */ + break; + + default: + panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, va); + } -#if 1 - kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ - blm, pmap->bmaps); -#endif - splx(s); /* Ok for interruptions now */ - - return; /* Return */ } -#endif /* - * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page + * void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page * * This routine takes a physical entry and runs through all mappings attached to it and changes * the protection. If there are PTEs associated with the mappings, they will be invalidated before - * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations - * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g., + * the protection is changed. There is no limitation on changes, e.g., * higher to lower, lower to higher. * + * Any mapping that is marked permanent is not changed + * * Phys_entry is unlocked. */ -void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) { /* Change protection of all mappings to page */ - - spl_t spl; +void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) { /* Change protection of all mappings to page */ - debugLog2(9, pp->pte1, prot); /* end remap */ - spl=splhigh(); /* No interruptions during this */ - if(!locked) { /* Do we need to lock the physent? */ - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ - panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n", - pp, pp->phys_link, pp->pte1); /* Complain about timeout */ - } - } - - hw_prot(pp, ppc_prot(prot)); /* Go set the protection on this physical page */ - - if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ - splx(spl); /* Restore interrupt state */ - debugLog2(10, pp->pte1, 0); /* end remap */ + unsigned int pindex; + phys_entry *physent; - return; /* Leave... */ -} - -/* - * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page - * - * This routine takes a pmap and virtual address and changes - * the protection. If there are PTEs associated with the mappings, they will be invalidated before - * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations - * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g., - * higher to lower, lower to higher. - * - */ - -void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */ - - mapping *mp, *mpv; - spl_t s; - - debugLog2(9, vaddr, pmap); /* start mapping_protect */ - s = splhigh(); /* Don't bother me */ - - mp = hw_lock_phys_vir(pmap->space, vaddr); /* Lock the physical entry for this mapping */ - - if(!mp) { /* Did we find one? */ - splx(s); /* Restore the interrupt level */ - debugLog2(10, 0, 0); /* end mapping_pmap */ - return; /* Didn't find any... */ + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + panic("mapping_protect_phys: invalid physical page %08X\n", pa); } - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */ - splx(s); /* Restore the interrupt level */ - return; /* Bad hair day... */ - } - - hw_prot_virt(mp, ppc_prot(prot)); /* Go set the protection on this virtual mapping */ - mpv = hw_cpv(mp); /* Get virtual address of mapping */ - if(mpv->physent) { /* If there is a physical page, */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - } - splx(s); /* Restore interrupt state */ - debugLog2(10, mpv->PTEr, 0); /* end remap */ - - return; /* Leave... */ -} - -/* - * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes - * - * This routine takes a physical entry and sets the physical attributes. There can be no mappings - * associated with this page when we do it. - */ - -void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) { /* Sets the default physical page attributes */ + hw_walk_phys(physent, hwpSPrtPhy, hwpSPrtMap, hwpNoop, ppc_prot(prot)); /* Set the new protection for page and mappings */ - debugLog2(11, pp->pte1, prot); /* end remap */ - - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ - panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n", - pp, pp->phys_link, pp->pte1); /* Complain about timeout */ - } - - hw_phys_attr(pp, ppc_prot(prot), wimg); /* Go set the default WIMG and protection */ - - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ - debugLog2(12, pp->pte1, wimg); /* end remap */ - - return; /* Leave... */ -} - -/* - * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page - * - * This routine takes a physical entry and runs through all mappings attached to it and invalidates - * any PTEs it finds. - * - * Interruptions must be disabled and the physical entry locked at entry. - */ - -void mapping_invall(struct phys_entry *pp) { /* Clear all PTEs pointing to a physical page */ - - hw_inv_all(pp); /* Go set the change bit of a physical page */ - return; /* Leave... */ } /* - * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page + * void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page * * This routine takes a physical entry and runs through all mappings attached to it and turns - * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before - * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations - * either (I don't think, maybe I'll change my mind later). - * - * Interruptions must be disabled and the physical entry locked at entry. + * off the change bit. */ -void mapping_clr_mod(struct phys_entry *pp) { /* Clears the change bit of a physical page */ +void mapping_clr_mod(ppnum_t pa) { /* Clears the change bit of a physical page */ + + unsigned int pindex; + phys_entry *physent; + + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + panic("mapping_clr_mod: invalid physical page %08X\n", pa); + } - hw_clr_mod(pp); /* Go clear the change bit of a physical page */ + hw_walk_phys(physent, hwpNoop, hwpCCngMap, hwpCCngPhy, 0); /* Clear change for page and mappings */ return; /* Leave... */ } /* - * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page + * void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page * * This routine takes a physical entry and runs through all mappings attached to it and turns - * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before - * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations - * either (I don't think, maybe I'll change my mind later). - * - * Interruptions must be disabled and the physical entry locked at entry. + * on the change bit. */ -void mapping_set_mod(struct phys_entry *pp) { /* Sets the change bit of a physical page */ +void mapping_set_mod(ppnum_t pa) { /* Sets the change bit of a physical page */ + + unsigned int pindex; + phys_entry *physent; + + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + panic("mapping_set_mod: invalid physical page %08X\n", pa); + } - hw_set_mod(pp); /* Go set the change bit of a physical page */ + hw_walk_phys(physent, hwpNoop, hwpSCngMap, hwpSCngPhy, 0); /* Set change for page and mappings */ return; /* Leave... */ } /* - * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page + * void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page * * This routine takes a physical entry and runs through all mappings attached to it and turns - * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before - * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations - * either (I don't think, maybe I'll change my mind later). - * - * Interruptions must be disabled at entry. + * off the reference bit. */ -void mapping_clr_ref(struct phys_entry *pp) { /* Clears the reference bit of a physical page */ - - mapping *mp; +void mapping_clr_ref(ppnum_t pa) { /* Clears the reference bit of a physical page */ - debugLog2(13, pp->pte1, 0); /* end remap */ - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry for this mapping */ - panic("Lock timeout getting lock on physical entry\n"); /* Just die... */ + unsigned int pindex; + phys_entry *physent; + + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + panic("mapping_clr_ref: invalid physical page %08X\n", pa); } - hw_clr_ref(pp); /* Go clear the reference bit of a physical page */ - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock physical entry */ - debugLog2(14, pp->pte1, 0); /* end remap */ + + hw_walk_phys(physent, hwpNoop, hwpCRefMap, hwpCRefPhy, 0); /* Clear reference for page and mappings */ return; /* Leave... */ } /* - * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page + * void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page * * This routine takes a physical entry and runs through all mappings attached to it and turns - * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before - * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations - * either (I don't think, maybe I'll change my mind later). - * - * Interruptions must be disabled and the physical entry locked at entry. + * on the reference bit. */ -void mapping_set_ref(struct phys_entry *pp) { /* Sets the reference bit of a physical page */ +void mapping_set_ref(ppnum_t pa) { /* Sets the reference bit of a physical page */ + + unsigned int pindex; + phys_entry *physent; + + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + panic("mapping_set_ref: invalid physical page %08X\n", pa); + } - hw_set_ref(pp); /* Go set the reference bit of a physical page */ + hw_walk_phys(physent, hwpNoop, hwpSRefMap, hwpSRefPhy, 0); /* Set reference for page and mappings */ return; /* Leave... */ } /* - * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page + * void mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page * * This routine takes a physical entry and runs through all mappings attached to it and tests - * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before - * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations - * either (I don't think, maybe I'll change my mind later). - * - * Interruptions must be disabled and the physical entry locked at entry. + * the changed bit. */ -boolean_t mapping_tst_mod(struct phys_entry *pp) { /* Tests the change bit of a physical page */ +boolean_t mapping_tst_mod(ppnum_t pa) { /* Tests the change bit of a physical page */ + + unsigned int pindex, rc; + phys_entry *physent; + + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + panic("mapping_tst_mod: invalid physical page %08X\n", pa); + } - return(hw_tst_mod(pp)); /* Go test the change bit of a physical page */ + rc = hw_walk_phys(physent, hwpTCngPhy, hwpTCngMap, hwpNoop, 0); /* Set change for page and mappings */ + return ((rc & (unsigned long)ppC) != 0); /* Leave with change bit */ } /* - * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page + * void mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page * * This routine takes a physical entry and runs through all mappings attached to it and tests - * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before - * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations - * either (I don't think, maybe I'll change my mind later). - * - * Interruptions must be disabled and the physical entry locked at entry. + * the reference bit. */ -boolean_t mapping_tst_ref(struct phys_entry *pp) { /* Tests the reference bit of a physical page */ +boolean_t mapping_tst_ref(ppnum_t pa) { /* Tests the reference bit of a physical page */ + + unsigned int pindex, rc; + phys_entry *physent; + + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + panic("mapping_tst_ref: invalid physical page %08X\n", pa); + } - return(hw_tst_ref(pp)); /* Go test the reference bit of a physical page */ + rc = hw_walk_phys(physent, hwpTRefPhy, hwpTRefMap, hwpNoop, 0); /* Test reference for page and mappings */ + return ((rc & (unsigned long)ppR) != 0); /* Leave with reference bit */ } /* - * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent + * phys_ent *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page * - * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits + * This routine takes a physical page number and returns the phys_entry associated with it. It also + * calculates the bank address associated with the entry + * the reference bit. */ -void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) { /* Initializes hw specific storage attributes */ +phys_entry *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) { /* Finds the physical entry for the page */ - pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */ - - return; /* Leave... */ + phys_entry *physent; + int i; + + for(i = 0; i < pmap_mem_regions_count; i++) { /* Walk through the list */ + if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue; /* Skip any empty lists */ + if((pp < pmap_mem_regions[i].mrStart) || (pp > pmap_mem_regions[i].mrEnd)) continue; /* This isn't ours */ + + *pindex = (i * sizeof(mem_region_t)) / 4; /* Make the word index to this list */ + + return &pmap_mem_regions[i].mrPhysTab[pp - pmap_mem_regions[i].mrStart]; /* Return the physent pointer */ + } + + return (phys_entry *)0; /* Shucks, can't find it... */ + } + + /* * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones * @@ -1080,8 +642,8 @@ void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise, * a new one is allocated. * - * This routine allocates and/or memory and must be called from a safe place. - * Currently, vm_pageout_scan is the safest place. We insure that the + * This routine allocates and/or frees memory and must be called from a safe place. + * Currently, vm_pageout_scan is the safest place. */ thread_call_t mapping_adjust_call; @@ -1089,14 +651,14 @@ static thread_call_data_t mapping_adjust_call_data; void mapping_adjust(void) { /* Adjust free mappings */ - kern_return_t retr; + kern_return_t retr = KERN_SUCCESS; mappingblok *mb, *mbn; spl_t s; int allocsize, i; extern int vm_page_free_count; if(mapCtl.mapcmin <= MAPPERBLOK) { - mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16; + mapCtl.mapcmin = (sane_size / PAGE_SIZE) / 16; #if DEBUG kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin); @@ -1127,10 +689,10 @@ void mapping_adjust(void) { /* Adjust free mappings */ mapCtl.mapcreln--; /* Back off the count */ allocsize = MAPPERBLOK; /* Show we allocated one block */ } - else { /* No free ones, try to get it */ + else { /* No free ones, try to get it */ allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */ - + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ splx(s); /* Restore 'rupts */ @@ -1141,18 +703,21 @@ void mapping_adjust(void) { /* Adjust free mappings */ } if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */ } + allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */ s = splhigh(); /* Don't bother from now on */ if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */ } } + if (retr != KERN_SUCCESS) break; /* Fail to alocate, bail out... */ for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */ mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */ mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */ } + if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc) mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1)); } @@ -1173,11 +738,13 @@ void mapping_adjust(void) { /* Adjust free mappings */ while((unsigned int)mbn) { /* Toss 'em all */ mb = mbn->nextblok; /* Get the next */ + kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */ + mbn = mb; /* Chain to the next */ } - __asm__ volatile("sync"); /* Make sure all is well */ + __asm__ volatile("eieio"); /* Make sure all is well */ mapCtl.mapcrecurse = 0; /* We are done now */ return; } @@ -1196,18 +763,53 @@ void mapping_free(struct mapping *mp) { /* Release a mapping */ mappingblok *mb, *mbn; spl_t s; - unsigned int full, mindx; + unsigned int full, mindx, lists; - mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5; /* Get index to mapping */ + mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 6; /* Get index to mapping */ mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */ + lists = (mp->mpFlags & mpLists); /* get #lists */ + if ((lists == 0) || (lists > kSkipListMaxLists)) /* panic if out of range */ + panic("mapping_free: mpLists invalid\n"); + +#if 0 + mp->mpFlags = 0x99999999; /* (BRINGUP) */ + mp->mpSpace = 0x9999; /* (BRINGUP) */ + mp->mpBSize = 0x9999; /* (BRINGUP) */ + mp->mpPte = 0x99999998; /* (BRINGUP) */ + mp->mpPAddr = 0x99999999; /* (BRINGUP) */ + mp->mpVAddr = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpAlias = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList0 = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[0] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[1] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[2] = 0x9999999999999999ULL; /* (BRINGUP) */ + + if(lists > mpBasicLists) { /* (BRINGUP) */ + mp->mpList[3] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[4] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[5] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[6] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[7] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[8] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[9] = 0x9999999999999999ULL; /* (BRINGUP) */ + mp->mpList[10] = 0x9999999999999999ULL; /* (BRINGUP) */ + } +#endif + s = splhigh(); /* Don't bother from now on */ if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */ } - full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]); /* See if full now */ + full = !(mb->mapblokfree[0] | mb->mapblokfree[1]); /* See if full now */ mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */ + if ( lists > mpBasicLists ) { /* if big block, lite the 2nd bit too */ + mindx++; + mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); + mapCtl.mapcfree++; + mapCtl.mapcinuse--; + } if(full) { /* If it was full before this: */ mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */ @@ -1222,8 +824,7 @@ void mapping_free(struct mapping *mp) { /* Release a mapping */ mapCtl.mapcfreec++; /* Count total calls */ if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */ - if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3]) - == 0xFFFFFFFF) { /* See if empty now */ + if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1]) == 0xFFFFFFFF) { /* See if empty now */ if(mapCtl.mapcnext == mb) { /* Are we first on the list? */ mapCtl.mapcnext = mb->nextblok; /* Unchain us */ @@ -1265,70 +866,169 @@ void mapping_free(struct mapping *mp) { /* Release a mapping */ /* - * mapping_alloc(void) - obtain a mapping from the free list + * mapping_alloc(lists) - obtain a mapping from the free list * - * This routine takes a mapping off of the free list and returns it's address. + * This routine takes a mapping off of the free list and returns its address. + * The mapping is zeroed, and its mpLists count is set. The caller passes in + * the number of skiplists it would prefer; if this number is greater than + * mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is + * just two consequtive free entries coallesced into one. If we cannot find + * two consequtive free entries, we clamp the list count down to mpBasicLists + * and return a basic 64-byte node. Our caller never knows the difference. * - * We do this by finding a free entry in the first block and allocating it. - * If this allocation empties the block, we remove it from the free list. + * If this allocation empties a block, we remove it from the free list. * If this allocation drops the total number of free entries below a threshold, * we allocate a new block. * */ -mapping *mapping_alloc(void) { /* Obtain a mapping */ +mapping *mapping_alloc(int lists) { /* Obtain a mapping */ register mapping *mp; mappingblok *mb, *mbn; spl_t s; int mindx; kern_return_t retr; - + int big = (lists > mpBasicLists); /* set flag if big block req'd */ + pmap_t refpmap, ckpmap; + unsigned int space, i; + int ref_count; + addr64_t va, nextva; + extern pmap_t free_pmap_list; + extern int free_pmap_count; + decl_simple_lock_data(extern,free_pmap_lock) + boolean_t found_mapping; + boolean_t do_rescan; + s = splhigh(); /* Don't bother from now on */ if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */ } - if(!(mb = mapCtl.mapcnext)) { /* Get the first block entry */ - unsigned int i; - struct mappingflush mappingflush; - PCA *pca_min, *pca_max; - PCA *pca_base; - - pca_min = (PCA *)(hash_table_base+hash_table_size); - pca_max = (PCA *)(hash_table_base+hash_table_size+hash_table_size); - - while (mapCtl.mapcfree <= (MAPPERBLOK*2)) { - mapCtl.mapcflush.mappingcnt = 0; - pca_base = mapCtl.mapcflush.pcaptr; - do { - hw_select_mappings(&mapCtl.mapcflush); - mapCtl.mapcflush.pcaptr++; - if (mapCtl.mapcflush.pcaptr >= pca_max) - mapCtl.mapcflush.pcaptr = pca_min; - } while ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr != pca_base)); - - if ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr == pca_base)) { - hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); - panic("mapping_alloc - all mappings are wired\n"); + if(!((unsigned int)mapCtl.mapcnext)) { /* Are there any free mappings? */ + +/* + * No free mappings. First, there may be some mapping blocks on the "to be released" + * list. If so, rescue one. Otherwise, try to steal a couple blocks worth. + */ + + if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */ + mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */ + mapCtl.mapcreln--; /* Back off the count */ + mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */ + goto rescued; + } + + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); + + simple_lock(&free_pmap_lock); + + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */ + } + + if (!((unsigned int)mapCtl.mapcnext)) { + + refpmap = (pmap_t)cursor_pmap->pmap_link.next; + space = mapCtl.mapcflush.spacenum; + while (refpmap != cursor_pmap) { + if(((pmap_t)(refpmap->pmap_link.next))->spaceNum > space) break; + refpmap = (pmap_t)refpmap->pmap_link.next; } - mappingflush = mapCtl.mapcflush; - hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); - splx(s); - for (i=0;ipmap_link.next; + + if ((ckpmap->stats.resident_count != 0) && (ckpmap != kernel_pmap)) { + do_rescan = TRUE; + for (i=0;i<8;i++) { + mp = hw_purge_map(ckpmap, va, &nextva); + + if((unsigned int)mp & mapRetCode) { + panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap, va, mp); + } + + if(!mp) { + if (do_rescan) + do_rescan = FALSE; + else + break; + } else { + mapping_free(mp); + found_mapping = TRUE; + } + + va = nextva; + } + } + + if (ckpmap == refpmap) { + if (found_mapping == FALSE) + panic("no valid pmap to purge mappings\n"); + else + found_mapping = FALSE; + } + + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */ + } + } + + mapCtl.mapcflush.spacenum = ckpmap->spaceNum; + mapCtl.mapcflush.addr = nextva; } - mb = mapCtl.mapcnext; - } - - if(!(mindx = mapalc(mb))) { /* Allocate a slot */ - panic("mapping_alloc - empty mapping block detected at %08X\n", mb); /* Not allowed to find none */ - } + + simple_unlock(&free_pmap_lock); + } + +rescued: + + mb = mapCtl.mapcnext; + + if ( big ) { /* if we need a big (128-byte) mapping */ + mapCtl.mapcbig++; /* count attempts to allocate a big mapping */ + mbn = NULL; /* this will be prev ptr */ + mindx = 0; + while( mb ) { /* loop over mapping blocks with free entries */ + mindx = mapalc2(mb); /* try for 2 consequtive free bits in this block */ + + if ( mindx ) break; /* exit loop if we found them */ + mbn = mb; /* remember previous block */ + mb = mb->nextblok; /* move on to next block */ + } + if ( mindx == 0 ) { /* if we couldn't find 2 consequtive bits... */ + mapCtl.mapcbigfails++; /* count failures */ + big = 0; /* forget that we needed a big mapping */ + lists = mpBasicLists; /* clamp list count down to the max in a 64-byte mapping */ + mb = mapCtl.mapcnext; /* back to the first block with a free entry */ + } + else { /* if we did find a big mapping */ + mapCtl.mapcfree--; /* Decrement free count twice */ + mapCtl.mapcinuse++; /* Bump in use count twice */ + if ( mindx < 0 ) { /* if we just used the last 2 free bits in this block */ + if (mbn) { /* if this wasn't the first block */ + mindx = -mindx; /* make positive */ + mbn->nextblok = mb->nextblok; /* unlink this one from the middle of block list */ + if (mb == mapCtl.mapclast) { /* if we emptied last block */ + mapCtl.mapclast = mbn; /* then prev block is now last */ + } + } + } + } + } + + if ( !big ) { /* if we need a small (64-byte) mapping */ + if(!(mindx = mapalc1(mb))) /* Allocate a 1-bit slot */ + panic("mapping_alloc - empty mapping block detected at %08X\n", mb); + } if(mindx < 0) { /* Did we just take the last one */ mindx = -mindx; /* Make positive */ @@ -1349,6 +1049,7 @@ mapping *mapping_alloc(void) { /* Obtain a mapping */ * For early boot, we are set up to only rescue one block at a time. This is because we prime * the release list with as much as we need until threads start. */ + if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */ if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */ mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */ @@ -1368,7 +1069,9 @@ mapping *mapping_alloc(void) { /* Obtain a mapping */ splx(s); /* Restore 'rupts */ mp = &((mapping *)mb)[mindx]; /* Point to the allocated mapping */ - __asm__ volatile("dcbz 0,%0" : : "r" (mp)); /* Clean it up */ + mp->mpFlags = lists; /* set the list count */ + + return mp; /* Send it back... */ } @@ -1380,7 +1083,7 @@ consider_mapping_adjust() s = splhigh(); /* Don't bother from now on */ if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ - panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */ + panic("consider_mapping_adjust -- lock timeout\n"); } if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) { @@ -1399,8 +1102,15 @@ consider_mapping_adjust() /* * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list * - * The mapping block is a page size area on a page boundary. It contains 1 header and 127 - * mappings. This call adds and initializes a block for use. + * The mapping block is a page size area on a page boundary. It contains 1 header and 63 + * mappings. This call adds and initializes a block for use. Mappings come in two sizes, + * 64 and 128 bytes (the only difference is the number of skip-lists.) When we allocate a + * 128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the + * code only deals with "basic" 64-byte mappings. This works for two reasons: + * - Only one in 256 mappings is big, so they are rare. + * - If we cannot find two consequtive free mappings, we just return a small one. + * There is no problem with doing this, except a minor performance degredation. + * Therefore, all counts etc in the mapping control structure are in units of small blocks. * * The header contains a chain link, bit maps, a virtual to real translation mask, and * some statistics. Bit maps map each slot on the page (bit 0 is not used because it @@ -1432,33 +1142,38 @@ void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) { mappingblok *mb; spl_t s; int i; - unsigned int raddr; + addr64_t raddr; + ppnum_t pp; - mb = (mappingblok *)mbl; /* Start of area */ - + mb = (mappingblok *)mbl; /* Start of area */ if(perm >= 0) { /* See if we need to initialize the block */ if(perm) { - raddr = (unsigned int)mbl; /* Perm means V=R */ + raddr = (addr64_t)((unsigned int)mbl); /* Perm means V=R */ mb->mapblokflags = mbPerm; /* Set perm */ +// mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */ } else { - raddr = kvtophys(mbl); /* Get real address */ + pp = pmap_find_phys(kernel_pmap, (addr64_t)mbl); /* Get the physical page */ + if(!pp) { /* What gives? Where's the page? */ + panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t)mbl); + } + + raddr = (addr64_t)pp << 12; /* Convert physical page to physical address */ mb->mapblokflags = 0; /* Set not perm */ +// mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */ } - mb->mapblokvrswap = raddr ^ (unsigned int)mbl; /* Form translation mask */ + mb->mapblokvrswap = raddr ^ (addr64_t)((unsigned int)mbl); /* Form translation mask */ mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */ mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */ - mb->mapblokfree[2] = 0xFFFFFFFF; /* Set next 32 free */ - mb->mapblokfree[3] = 0xFFFFFFFF; /* Set next 32 free */ } s = splhigh(); /* Don't bother from now on */ if(!locked) { /* Do we need the lock? */ if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ - panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */ + panic("mapping_free_init: timeout getting control lock\n"); /* Tell all and die */ } } @@ -1484,7 +1199,8 @@ void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) { if(!locked) { /* Do we need to unlock? */ hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ } - splx(s); /* Restore 'rupts */ + + splx(s); /* Restore 'rupts */ return; /* All done, leave... */ } @@ -1521,9 +1237,9 @@ void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for lar splx(s); /* Restore 'rupts */ return; } - if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ + if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ - splx(s); /* Restore 'rupts */ + splx(s); /* Restore 'rupts */ return; } nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */ @@ -1533,9 +1249,8 @@ void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for lar for(i = 0; i < nmapb; i++) { /* Allocate 'em all */ retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */ - if(retr != KERN_SUCCESS) { /* Did we get some memory? */ + if(retr != KERN_SUCCESS) /* Did we get some memory? */ break; - } mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */ } if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc) @@ -1586,7 +1301,7 @@ void mapping_free_prime(void) { /* Primes the mapping block release list mappingblok *mbn; vm_offset_t mapping_min; - retr = kmem_suballoc(kernel_map, &mapping_min, MAPPING_MAP_SIZE, + retr = kmem_suballoc(kernel_map, &mapping_min, sane_size / 16, FALSE, TRUE, &mapping_map); if (retr != KERN_SUCCESS) @@ -1629,86 +1344,41 @@ mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_ /* - * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space + * addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space * - * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with - * the same space. If it finds it, it returns the virtual address. + * First looks up the physical entry associated witht the physical page. Then searches the alias + * list for a matching pmap. It grabs the virtual address from the mapping, drops busy, and returns + * that. * - * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check - * for it and fail it myself... */ -vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp) { /* Finds first virtual mapping of a physical page in a space */ - - spl_t s; - register mapping *mp, *mpv; - vm_offset_t va; +addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) { /* Finds first virtual mapping of a physical page in a space */ - if(pmap->vflags & pmapAltSeg) return 0; /* If there are nested pmaps, fail immediately */ + spl_t s; + mapping *mp; + unsigned int pindex; + phys_entry *physent; + addr64_t va; - s = splhigh(); - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ - splx(s); /* Restore 'rupts */ - panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */ - return(0); /* Should die before here */ - } - - va = 0; /* Assume failure */ - - for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) { /* Scan 'em all */ - - if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */ - - va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */ - va = va | ((mpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */ - va = va | ((mpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */ - break; /* We're done now, pass virtual address back */ + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) { /* Did we find the physical page? */ + panic("mapping_p2v: invalid physical page %08X\n", pa); } - - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - splx(s); /* Restore 'rupts */ - return(va); /* Return the result or 0... */ -} -/* - * kvtophys(addr) - * - * Convert a kernel virtual address to a physical address - */ -vm_offset_t kvtophys(vm_offset_t va) { - - register mapping *mp, *mpv; - register blokmap *bmp; - register vm_offset_t pa; - spl_t s; - - s=splhigh(); /* Don't bother from now on */ - mp = hw_lock_phys_vir(PPC_SID_KERNEL, va); /* Find mapping and lock the physical entry for this mapping */ + s = splhigh(); /* Make sure interruptions are disabled */ - if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ - splx(s); /* Restore 'rupts */ - panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */ - return 0; - } + mp = (mapping *) hw_find_space(physent, pmap->space); /* Go find the first mapping to the page from the requested pmap */ - if(!mp) { /* If it was not a normal page */ - pa = hw_cvp_blk(kernel_pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ - splx(s); /* Restore 'rupts */ - return pa; /* Return physical address */ + if(mp) { /* Did we find one? */ + va = mp->mpVAddr & -4096; /* If so, get the cleaned up vaddr */ + mapping_drop_busy(mp); /* Go ahead and relase the mapping now */ } + else va = 0; /* Return failure */ - mpv = hw_cpv(mp); /* Convert to virtual addressing */ + splx(s); /* Restore 'rupts */ - if(!mpv->physent) { /* Was there a physical entry? */ - pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */ - } - else { - pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - } + return va; /* Bye, bye... */ - splx(s); /* Restore 'rupts */ - return pa; /* Return the physical address... */ } /* @@ -1720,17 +1390,27 @@ vm_offset_t kvtophys(vm_offset_t va) { vm_offset_t phystokv(vm_offset_t pa) { - struct phys_entry *pp; - vm_offset_t va; + addr64_t va; + ppnum_t pp; - pp = pmap_find_physentry(pa); /* Find the physical entry */ - if (PHYS_NULL == pp) { - return (vm_offset_t)NULL; /* If none, return null */ - } - if(!(va=mapping_p2v(kernel_pmap, pp))) { + pp = pa >> 12; /* Convert to a page number */ + + if(!(va = mapping_p2v(kernel_pmap, pp))) { return 0; /* Can't find it, return 0... */ } - return (va | (pa & (PAGE_SIZE-1))); /* Build and return VADDR... */ + + return (va | (pa & (PAGE_SIZE - 1))); /* Build and return VADDR... */ + +} + +/* + * kvtophys(addr) + * + * Convert a kernel virtual address to a physical address + */ +vm_offset_t kvtophys(vm_offset_t va) { + + return pmap_extract(kernel_pmap, va); /* Find mapping and lock the physical entry for this mapping */ } @@ -1752,346 +1432,213 @@ void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fa } -/* - * Allocates a range of virtual addresses in a map as optimally as - * possible for block mapping. The start address is aligned such - * that a minimum number of power-of-two sized/aligned blocks is - * required to cover the entire range. +/* + * Copies data between a physical page and a virtual page, or 2 physical. This is used to + * move data from the kernel to user state. Note that the "which" parm + * says which of the parameters is physical and if we need to flush sink/source. + * Note that both addresses may be physicical but only one may be virtual * - * We also use a mask of valid block sizes to determine optimality. + * The rules are that the size can be anything. Either address can be on any boundary + * and span pages. The physical data must be congiguous as must the virtual. * - * Note that the passed in pa is not actually mapped to the selected va, - * rather, it is used to figure the optimal boundary. The actual - * V to R mapping is done externally. + * We can block when we try to resolve the virtual address at each page boundary. + * We don't check protection on the physical page. * - * This function will return KERN_INVALID_ADDRESS if an optimal address - * can not be found. It is not necessarily a fatal error, the caller may still be - * still be able to do a non-optimal assignment. - */ - -kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa, - vm_size_t size, vm_prot_t prot) { - - vm_map_entry_t entry, next, tmp_entry, new_entry; - vm_offset_t start, end, algnpa, endadr, strtadr, curradr; - vm_offset_t boundary; - - unsigned int maxsize, minsize, leading, trailing; - - assert(page_aligned(pa)); - assert(page_aligned(size)); - - if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); /* Dude, like we need a target map */ - - minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */ - maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */ - - boundary = 0x80000000 >> cntlzw(size); /* Get optimal boundary */ - if(boundary > maxsize) boundary = maxsize; /* Pin this at maximum supported hardware size */ - - vm_map_lock(map); /* No touchee no mapee */ - - for(; boundary > minsize; boundary >>= 1) { /* Try all optimizations until we find one */ - if(!(boundary & blokValid)) continue; /* Skip unavailable block sizes */ - algnpa = (pa + boundary - 1) & -boundary; /* Round physical up */ - leading = algnpa - pa; /* Get leading size */ - - curradr = 0; /* Start low */ - - while(1) { /* Try all possible values for this opt level */ - - curradr = curradr + boundary; /* Get the next optimal address */ - strtadr = curradr - leading; /* Calculate start of optimal range */ - endadr = strtadr + size; /* And now the end */ - - if((curradr < boundary) || /* Did address wrap here? */ - (strtadr > curradr) || /* How about this way? */ - (endadr < strtadr)) break; /* We wrapped, try next lower optimization... */ - - if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */ - if(endadr > map->max_offset) break; /* No room right now... */ - - if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */ - - next = entry->vme_next; /* Get the next entry */ - if((next == vm_map_to_entry(map)) || /* Are we the last entry? */ - (next->vme_start >= endadr)) { /* or do we end before the next entry? */ - - new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */ - VM_OBJECT_NULL, - 0, /* Offset into object of 0 */ - FALSE, /* No copy needed */ - FALSE, /* Not shared */ - FALSE, /* Not in transition */ - prot, /* Set the protection to requested */ - prot, /* We can't change protection */ - VM_BEHAVIOR_DEFAULT, /* Use default behavior, but makes no never mind, - 'cause we don't page in this area */ - VM_INHERIT_DEFAULT, /* Default inheritance */ - 0); /* Nothing is wired */ - - vm_map_unlock(map); /* Let the world see it all */ - *va = strtadr; /* Tell everyone */ - *bnd = boundary; /* Say what boundary we are aligned to */ - return(KERN_SUCCESS); /* Leave, all is right with the world... */ - } - } - } - - vm_map_unlock(map); /* Couldn't find a slot */ - return(KERN_INVALID_ADDRESS); -} - -/* - * Copies data from a physical page to a virtual page. This is used to - * move data from the kernel to user state. - * - * Note that it is invalid to have a source that spans a page boundry. - * This can block. - * We don't check protection either. - * And we don't handle a block mapped sink address either. + * Note that we will not check the entire range and if a page translation fails, + * we will stop with partial contents copied. * */ -kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) { +kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which) { vm_map_t map; kern_return_t ret; - unsigned int spaceid; - int left, csize; - vm_offset_t pa; - register mapping *mpv, *mp; + addr64_t pa, nextva, vaddr, paddr; + register mapping *mp; spl_t s; + unsigned int sz, left, lop, csize; + int needtran, bothphys; + unsigned int pindex; + phys_entry *physent; + vm_prot_t prot; + int orig_which; - if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE; /* We don't allow a source page crosser */ - map = current_act()->map; /* Get the current map */ + orig_which = which; - while(size) { - s=splhigh(); /* Don't bother me */ - - spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28]; /* Get space ID. Don't bother to clean top bits */ + map = (which & cppvKmap) ? kernel_map : current_map_fast(); - mp = hw_lock_phys_vir(spaceid, sink); /* Lock the physical entry for the sink */ - if(!mp) { /* Was it there? */ - splx(s); /* Restore the interrupt level */ - ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in... */ - if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */ + if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */ + panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */ + } + + bothphys = 1; /* Assume both are physical */ + + if(!(which & cppvPsnk)) { /* Is there a virtual page here? */ + vaddr = sink; /* Sink side is virtual */ + bothphys = 0; /* Show both aren't physical */ + prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */ + } else if(!(which & cppvPsrc)) { /* Source side is virtual */ + vaddr = source; /* Source side is virtual */ + bothphys = 0; /* Show both aren't physical */ + prot = VM_PROT_READ; /* Virtual source is always read only */ + } - return KERN_FAILURE; /* Didn't find any, return no good... */ - } - if((unsigned int)mp&1) { /* Did we timeout? */ - panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink); /* Yeah, scream about it! */ - splx(s); /* Restore the interrupt level */ - return KERN_FAILURE; /* Bad hair day, return FALSE... */ - } + needtran = 1; /* Show we need to map the virtual the first time */ + s = splhigh(); /* Don't bother me */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ + while(size) { - if(mpv->PTEr & 1) { /* Are we write protected? yes, could indicate COW */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */ - splx(s); /* Restore the interrupt level */ - ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* check for a COW area */ - if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */ - return KERN_FAILURE; /* Didn't find any, return no good... */ + if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */ + if(!needtran) { /* If this is not the first translation, we need to drop the old busy */ + mapping_drop_busy(mp); /* Release the old mapping now */ + } + needtran = 0; + + while(1) { + mp = mapping_find(map->pmap, vaddr, &nextva, 1); /* Find and busy the mapping */ + if(!mp) { /* Was it there? */ + if(per_proc_info[cpu_number()].istackptr == 0) + panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr); + + splx(s); /* Restore the interrupt level */ + ret = vm_fault(map, trunc_page_32((vm_offset_t)vaddr), prot, FALSE, FALSE, NULL, 0); /* Didn't find it, try to fault it in... */ + + if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */ + + s = splhigh(); /* Don't bother me */ + continue; /* Go try for the map again... */ + + } + if (mp->mpVAddr & mpI) { /* cache inhibited, so force the appropriate page to be flushed before */ + if (which & cppvPsrc) /* and after the copy to avoid cache paradoxes */ + which |= cppvFsnk; + else + which |= cppvFsrc; + } else + which = orig_which; + + /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source, + we can just leave. + */ + if((which & cppvPsnk) || !(mp->mpVAddr & 1)) break; /* We got it mapped R/W or the source is not virtual, leave... */ + + mapping_drop_busy(mp); /* Go ahead and release the mapping for now */ + if(per_proc_info[cpu_number()].istackptr == 0) + panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr); + splx(s); /* Restore the interrupt level */ + + ret = vm_fault(map, trunc_page_32((vm_offset_t)vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, FALSE, NULL, 0); /* check for a COW area */ + if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */ + s = splhigh(); /* Don't bother me */ + } + paddr = ((addr64_t)mp->mpPAddr << 12) + (vaddr - (mp->mpVAddr & -4096LL)); /* construct the physical address... this calculation works */ + /* properly on both single page and block mappings */ + if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */ + else source = paddr; /* Otherwise the source is */ } - left = PAGE_SIZE - (sink & PAGE_MASK); /* Get amount left on sink page */ - - csize = size < left ? size : left; /* Set amount to copy this pass */ - - pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK)); /* Get physical address of sink */ - - bcopy_physvir((char *)source, (char *)pa, csize); /* Do a physical copy, virtually */ - - hw_set_mod(mpv->physent); /* Go set the change of the sink */ - - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */ - splx(s); /* Open up for interrupts */ - - sink += csize; /* Move up to start of next page */ - source += csize; /* Move up source */ - size -= csize; /* Set amount for next pass */ - } - return KERN_SUCCESS; -} + + lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */ + if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */ + + csize = size; /* Assume we can copy it all */ + if(lop < size) csize = lop; /* Nope, we can't do it all */ + + if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source before move */ + if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink before move */ + bcopy_physvir(source, sink, csize); /* Do a physical copy, virtually */ + + if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source after move */ + if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink after move */ /* - * copy 'size' bytes from physical to physical address - * the caller must validate the physical ranges - * - * if flush_action == 0, no cache flush necessary - * if flush_action == 1, flush the source - * if flush_action == 2, flush the dest - * if flush_action == 3, flush both source and dest + * Note that for certain ram disk flavors, we may be copying outside of known memory. + * Therefore, before we try to mark it modifed, we check if it exists. */ -kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) { - - switch(flush_action) { - case 1: - flush_dcache(source, size, 1); - break; - case 2: - flush_dcache(dest, size, 1); - break; - case 3: - flush_dcache(source, size, 1); - flush_dcache(dest, size, 1); - break; - + if( !(which & cppvNoModSnk)) { + physent = mapping_phys_lookup(sink >> 12, &pindex); /* Get physical entry for sink */ + if(physent) mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */ + } + if( !(which & cppvNoRefSrc)) { + physent = mapping_phys_lookup(source >> 12, &pindex); /* Get physical entry for source */ + if(physent) mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */ + } + size = size - csize; /* Calculate what is left */ + vaddr = vaddr + csize; /* Move to next sink address */ + source = source + csize; /* Bump source to next physical address */ + sink = sink + csize; /* Bump sink to next physical address */ } - bcopy_phys((char *)source, (char *)dest, size); /* Do a physical copy */ - - switch(flush_action) { - case 1: - flush_dcache(source, size, 1); - break; - case 2: - flush_dcache(dest, size, 1); - break; - case 3: - flush_dcache(source, size, 1); - flush_dcache(dest, size, 1); - break; + + if(!bothphys) mapping_drop_busy(mp); /* Go ahead and release the mapping of the virtual page if any */ + splx(s); /* Open up for interrupts */ - } + return KERN_SUCCESS; } - -#if DEBUG /* - * Dumps out the mapping stuff associated with a virtual address + * Debug code */ -void dumpaddr(space_t space, vm_offset_t va) { - - mapping *mp, *mpv; - vm_offset_t pa; - spl_t s; - s=splhigh(); /* Don't bother me */ - - mp = hw_lock_phys_vir(space, va); /* Lock the physical entry for this mapping */ - if(!mp) { /* Did we find one? */ - splx(s); /* Restore the interrupt level */ - printf("dumpaddr: virtual address (%08X) not mapped\n", va); - return; /* Didn't find any, return FALSE... */ - } - if((unsigned int)mp&1) { /* Did we timeout? */ - panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */ - splx(s); /* Restore the interrupt level */ - return; /* Bad hair day, return FALSE... */ - } - printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va); /* Say what address were dumping */ - mpv = hw_cpv(mp); /* Get virtual address of mapping */ - dumpmapping(mpv); - if(mpv->physent) { - dumppca(mpv); - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */ - } - splx(s); /* Was there something you needed? */ - return; /* Tell them we did it */ -} +void mapping_verify(void) { + spl_t s; + mappingblok *mb, *mbn; + int relncnt; + unsigned int dumbodude; + dumbodude = 0; + + s = splhigh(); /* Don't bother from now on */ -/* - * Prints out a mapping control block - * - */ - -void dumpmapping(struct mapping *mp) { /* Dump out a mapping */ - - printf("Dump of mapping block: %08X\n", mp); /* Header */ - printf(" next: %08X\n", mp->next); - printf(" hashnext: %08X\n", mp->hashnext); - printf(" PTEhash: %08X\n", mp->PTEhash); - printf(" PTEent: %08X\n", mp->PTEent); - printf(" physent: %08X\n", mp->physent); - printf(" PTEv: %08X\n", mp->PTEv); - printf(" PTEr: %08X\n", mp->PTEr); - printf(" pmap: %08X\n", mp->pmap); - - if(mp->physent) { /* Print physent if it exists */ - printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1); + mbn = 0; /* Start with none */ + for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) { /* Walk the free chain */ + if((mappingblok *)(mb->mapblokflags & 0x7FFFFFFF) != mb) { /* Is tag ok? */ + panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb, mb->mapblokflags); + } + mbn = mb; /* Remember the last one */ } - else { - printf("Associated physical entry: none\n"); + + if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) { /* Do we point to the last one? */ + panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb, mapCtl.mapclast); } - dumppca(mp); /* Dump out the PCA information */ + relncnt = 0; /* Clear count */ + for(mb = mapCtl.mapcrel; mb; mb = mb->nextblok) { /* Walk the release chain */ + dumbodude |= mb->mapblokflags; /* Just touch it to make sure it is mapped */ + relncnt++; /* Count this one */ + } - return; -} + if(mapCtl.mapcreln != relncnt) { /* Is the count on release queue ok? */ + panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl.mapcreln, relncnt, dumbodude); + } -/* - * Prints out a PTEG control area - * - */ - -void dumppca(struct mapping *mp) { /* PCA */ - - PCA *pca; - unsigned int *pteg; - - pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */ - pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16)); - printf(" Dump of PCA: %08X\n", pca); /* Header */ - printf(" PCAlock: %08X\n", pca->PCAlock); - printf(" PCAallo: %08X\n", pca->flgs.PCAallo); - printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]); - printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]); - printf("Dump of PTEG: %08X\n", pteg); /* Header */ - printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]); - printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]); - printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]); - printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]); - return; -} + splx(s); /* Restore 'rupts */ -/* - * Dumps starting with a physical entry - */ - -void dumpphys(struct phys_entry *pp) { /* Dump from physent */ - - mapping *mp; - PCA *pca; - unsigned int *pteg; - - printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1); - mp = hw_cpv(pp->phys_link); - while(mp) { - dumpmapping(mp); - dumppca(mp); - mp = hw_cpv(mp->next); - } - return; } -#endif +void mapping_phys_unused(ppnum_t pa) { + unsigned int pindex; + phys_entry *physent; -kern_return_t bmapvideo(vm_offset_t *info); -kern_return_t bmapvideo(vm_offset_t *info) { + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if(!physent) return; /* Did we find the physical page? */ - extern struct vc_info vinfo; + if(!(physent->ppLink & ~(ppLock | ppN | ppFlags))) return; /* No one else is here */ - (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */ - return KERN_SUCCESS; -} - -kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr); -kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { + panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa, physent); - pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0); /* Map it in */ - return KERN_SUCCESS; } - -kern_return_t bmapmapr(vm_offset_t va); -kern_return_t bmapmapr(vm_offset_t va) { - mapping_remove(current_act()->task->map->pmap, va); /* Remove map */ - return KERN_SUCCESS; -} + + + + + + + + + diff --git a/osfmk/ppc/mappings.h b/osfmk/ppc/mappings.h index 7b3040f0a..438b319db 100644 --- a/osfmk/ppc/mappings.h +++ b/osfmk/ppc/mappings.h @@ -28,71 +28,183 @@ #ifndef _PPC_MAPPINGS_H_ #define _PPC_MAPPINGS_H_ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Don't change these structures unless you change the assembly code + */ + +/* + * This control block serves as anchor for all virtual mappings of the same physical + * page, i.e., aliases. There is a table for each bank (mem_region). All tables + * must reside in V=R storage and within the first 2GB of memory. Also, the + * mappings to which it points must be on at least a 64-byte boundary. These + * requirements allow a total of 2 bits for status and flags, and allow all address + * calculations to be 32-bit. + */ + +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct phys_entry { + addr64_t ppLink; /* Physical pointer to aliased mappings and flags */ +#define ppLock 0x8000000000000000LL /* Lock for alias chain */ +#define ppN 0x4000000000000000LL /* Not executable */ +#define ppFlags 0x000000000000003FLL /* Status and flags */ +#define ppI 0x0000000000000020LL /* Cache inhibited */ +#define ppIb 58 /* Cache inhibited */ +#define ppG 0x0000000000000010LL /* Guarded */ +#define ppGb 59 /* Guarded */ +#define ppR 0x0000000000000008LL /* Referenced */ +#define ppRb 60 /* Referenced */ +#define ppC 0x0000000000000004LL /* Changed */ +#define ppCb 61 /* Changed */ +#define ppPP 0x0000000000000003LL /* Protection */ +#define ppPPb 62 /* Protection begin */ +#define ppPPe 63 /* Protection end */ +} phys_entry; +#pragma pack() + +/* Memory may be non-contiguous. This data structure contains info + * for mapping this non-contiguous space into the contiguous + * physical->virtual mapping tables. An array of this type is + * provided to the pmap system at bootstrap by ppc_vm_init. + * + */ + +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct mem_region { + phys_entry *mrPhysTab; /* Base of region table */ + ppnum_t mrStart; /* Start of region */ + ppnum_t mrEnd; /* Last page in region */ + ppnum_t mrAStart; /* Next page in region to allocate */ + ppnum_t mrAEnd; /* Last page in region to allocate */ +} mem_region_t; +#pragma pack() + +#define mrSize sizeof(mem_region_t) +#define PMAP_MEM_REGION_MAX 26 + +extern mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX + 1]; +extern int pmap_mem_regions_count; + +/* Prototypes */ + + +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct PCA { /* PTEG Control Area */ - unsigned int PCAlock; /* PCA lock */ union flgs { unsigned int PCAallo; /* Allocation controls */ struct PCAalflgs { /* Keep these in order!!! */ unsigned char PCAfree; /* Indicates the slot is free */ - unsigned char PCAauto; /* Indicates that the PTE was autogenned */ - unsigned char PCAslck; /* Indicates that the slot is locked */ unsigned char PCAsteal; /* Steal scan start position */ + unsigned char PCAauto; /* Indicates that the PTE was autogenned */ + unsigned char PCAmisc; /* Misc. flags */ +#define PCAlock 1 /* This locks up the associated PTEG */ +#define PCAlockb 31 } PCAalflgs; } flgs; - unsigned int PCAgas[6]; /* Filler to 32 byte boundary */ - unsigned int PCAhash[8]; /* PTEG hash chains */ } PCA; +#pragma pack() -#define MAPFLAGS 0x0000001F -#define BMAP 0x00000001 - -typedef struct mapping { - struct mapping *next; /* MUST BE FIRST - chain off physent */ - struct mapping *hashnext; /* Next mapping in same hash group */ - unsigned int *PTEhash; /* Pointer to the head of the mapping hash list */ - unsigned int *PTEent; /* Pointer to PTE if exists */ - struct phys_entry *physent; /* Quick pointer back to the physical entry */ - unsigned int PTEv; /* Virtual half of HW PTE */ - unsigned int PTEr; /* Real half of HW PTE. This is used ONLY if - there is no physical entry associated - with this mapping, ie.e, physent==0 */ - struct pmap *pmap; /* Quick pointer back to the containing pmap */ -} mapping; - -/* - * This control block maps odd size blocks of memory. The mapping must - * be V=F (Virtual = Fixed), i.e., virtually and physically contiguous - * multiples of hardware size pages. +/* Mappings currently come in two sizes: 64 and 128 bytes. The only difference is the + * number of skiplists (ie, mpLists): 64-byte mappings have 1-4 lists and 128-byte mappings + * have from 5-12. Only 1 in 256 mappings is large, so an average mapping is 64.25 bytes. + * All mappings are 64-byte aligned. * - * This control block overlays the mapping CB and is allocated from the - * same pool. - * - * It is expected that only a small number of these exist for each address - * space and will typically be for I/O areas. It is further assumed that - * there is a minimum size (ODDBLKMIN) for these blocks. If smaller, the - * block will be split into N normal page mappings. - * - * Binary tree for fast lookups. - */ + * Special note on mpFIP and mpRIP: + * These flags are manipulated under various locks. RIP is always set under an + * exclusive lock while FIP is shared. The only worry is that there is a possibility that + * FIP could be attempted by more than 1 processor at a time. Obviously, one will win. + * The other(s) bail all the way to user state and may refault (or not). There are only + * a few things in mpFlags that are not static, mpFIP, mpRIP, mpRemovable, and mpBusy. + * + * We organize these so that mpFIP is in a byte with static data and mpRIP and mpRemovable + * is in another. That means that we can use a store byte to update the guys without + * worrying about load and reserve. Note that mpFIP must be set atomically because it is + * under a share lock, but it may be clear with a simple store byte. So far as mpRIP + * goes, it is in the same byte as mpRemovable. However, mpRemovable is set atomically + * but never cleared, and mpRIP will not ever be set until after mpRemovable. Note that + * mpRIP is never cleared either. + * + */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct mapping { + unsigned int mpFlags; /* 0x000 - Various flags, lock bit. These are static except for lock */ +#define mpBusy 0xFF000000 /* Busy count */ +#define mpPIndex 0x00FF0000 /* Index into physical table (in words) */ +#define mpSpecial 0x00008000 /* Special mapping - processor specific. */ +#define mpSpecialb 16 /* Special mapping - processor specific. */ +#define mpFIP 0x00004000 /* Fault in progress */ +#define mpFIPb 17 /* Fault in progress */ +#define mpNest 0x00001000 /* Mapping describes nested pmap */ +#define mpNestb 19 /* Mapping describes nested pmap */ +#define mpPerm 0x00000800 /* Mapping is permanent */ +#define mpPermb 20 /* Mapping is permanent */ +#define mpBlock 0x00000400 /* Mapping is a block map - used for V=F or I/O */ +#define mpBlockb 21 /* Mapping is a block map - used for V=F or I/O */ +#define mpRIP 0x00000080 /* Remove in progress - DO NOT MOVE */ +#define mpRIPb 24 /* Remove in progress */ +#define mpRemovable 0x00000040 /* Mapping is removable - DO NOT MOVE */ +#define mpRemovableb 25 /* Mapping is removable */ +#define mpRSVD1 0x00002330 /* Reserved for future use */ +#define mpLists 0x0000001F /* Number of skip lists mapping is on, max of 27 */ +#define mpListsb 27 /* Number of skip lists mapping is on, max of 27 */ + unsigned short mpSpace; /* 0x004 - Address space hash */ + unsigned short mpBSize; /* 0x006 - Block size - 1 in pages - max block size 256MB */ + unsigned int mpPte; /* 0x008 - Offset to PTEG in hash table. Offset to exact PTE if mpHValid set - NOTE: this MUST be 0 for block mappings */ +#define mpHValid 0x00000001 /* PTE is entered in hash table */ +#define mpHValidb 31 /* PTE is entered in hash table */ + ppnum_t mpPAddr; /* 0x00C - Physical page number */ + addr64_t mpVAddr; /* 0x010 - Starting virtual address */ +#define mpHWFlags 0x0000000000000FFFULL /* Reference/Change, WIMG, AC, N, protection flags from PTE */ +#define mpPP 0x0000000000000007ULL /* Protection flags */ +#define mpPPb 61 +#define mpKKN 0x0000000000000007ULL /* Segment key and no execute flag (nested pmap) */ +#define mpKKNb 61 +#define mpWIMG 0x0000000000000078ULL /* Attribute bits */ +#define mpWIMGb 57 +#define mpW 0x0000000000000040ULL +#define mpWb 57 +#define mpI 0x0000000000000020ULL +#define mpIb 58 +#define mpM 0x0000000000000010ULL +#define mpMb 59 +#define mpG 0x0000000000000008ULL +#define mpGb 60 +#define mpWIMGe 60 +#define mpC 0x0000000000000080ULL /* Change bit */ +#define mpCb 56 +#define mpR 0x0000000000000100ULL /* Reference bit */ +#define mpRb 55 + addr64_t mpAlias; /* 0x018 - Pointer to alias mappings of physical page */ +#define mpNestReloc mpAlias /* 0x018 - Redefines mpAlias relocation value of vaddr to nested pmap value */ +#define mpBlkRemCur mpAlias /* 0x018 - Next offset in block map to remove (this is 4 bytes) */ + addr64_t mpList0; /* 0x020 - Forward chain of mappings. This one is always used */ + addr64_t mpList[3]; /* 0x028 - Forward chain of mappings. Next higher order */ +/* 0x040 - End of basic mapping */ +#define mpBasicSize 64 +#define mpBasicLists 4 +/* note the dependence on kSkipListMaxLists, which must be <= #lists in a 256-byte mapping (ie, <=28) */ +/* addr64_t mpList4[8]; 0x040 - First extended list entries */ +/* 0x080 - End of first extended mapping */ +/* addr64_t mpList12[8]; 0x080 - Second extended list entries */ +/* 0x0C0 - End of second extended mapping */ +/* addr64_t mpList20[8]; 0x0C0 - Third extended list entries */ +/* 0x100 - End of third extended mapping */ -typedef struct blokmap { - struct blokmap *next; /* Next block in list */ - unsigned int start; /* Start of block */ - unsigned int end; /* End of block */ - unsigned int PTEr; /* Real half of HW PTE at base address */ - unsigned int space; /* Cached VSID */ - unsigned int blkFlags; /* Flags for this block */ -#define blkPerm 0x80000000 -#define blkRem 0x40000000 -#define blkPermbit 0 -#define blkRembit 1 - unsigned int current; /* Partial block remove current start */ - unsigned int gas4; /* Reserved */ -} blokmap; - -#define ODDBLKMIN (8 * PAGE_SIZE) -#define BLKREMMAX 128 +} mapping; +#pragma pack() #define MAPPING_NULL ((struct mapping *) 0) @@ -102,16 +214,15 @@ typedef struct blokmap { #define mapRWRW 0x00000002 #define mapRORO 0x00000003 - -typedef struct mfmapping { - struct pmap *pmap; - vm_offset_t offset; -} mfmapping; +/* All counts are in units of basic 64-byte mappings. A 128-byte mapping is + * just two adjacent 64-byte entries. + */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct mappingflush { - PCA *pcaptr; - unsigned int mappingcnt; - struct mfmapping mapping[8]; + addr64_t addr; /* Start address to search mapping */ + unsigned int spacenum; /* Last space num to search pmap */ + unsigned int mapfgas[1]; /* Pad to 64 bytes */ } mappingflush; typedef struct mappingctl { @@ -126,69 +237,103 @@ typedef struct mappingctl { int mapcholdoff; /* Hold off clearing release list */ unsigned int mapcfreec; /* Total calls to mapping free */ unsigned int mapcallocc; /* Total calls to mapping alloc */ + unsigned int mapcbig; /* Count times a big mapping was requested of mapping_alloc */ + unsigned int mapcbigfails; /* Times caller asked for a big one but we gave 'em a small one */ unsigned int mapcmin; /* Minimum free mappings to keep */ unsigned int mapcmaxalloc; /* Maximum number of mappings allocated at one time */ - struct mappingflush mapcflush; unsigned int mapcgas[1]; /* Pad to 64 bytes */ + struct mappingflush mapcflush; } mappingctl; +#pragma pack() -#define MAPPERBLOK 127 +/* MAPPERBLOK is the number of basic 64-byte mappings per block (ie, per page.) */ +#define MAPPERBLOK 63 #define MAPALTHRSH (4*MAPPERBLOK) #define MAPFRTHRSH (2 * ((MAPALTHRSH + MAPPERBLOK - 1) / MAPPERBLOK)) typedef struct mappingblok { - unsigned int mapblokfree[4]; /* Bit map of free mapping entrys */ - unsigned int mapblokvrswap; /* Virtual address XORed with physical address */ + unsigned int mapblokfree[2]; /* Bit map of free mapping entrys */ + addr64_t mapblokvrswap; /* Virtual address XORed with physical address */ unsigned int mapblokflags; /* Various flags */ #define mbPerm 0x80000000 /* Block is permanent */ struct mappingblok *nextblok; /* Pointer to the next mapping block */ } mappingblok; +#define mapRemChunk 128 + +#define mapRetCode 0xF +#define mapRtOK 0 +#define mapRtBadLk 1 +#define mapRtPerm 2 +#define mapRtNotFnd 3 +#define mapRtBlock 4 +#define mapRtNest 5 +#define mapRtRemove 6 +#define mapRtMapDup 7 + extern mappingctl mapCtl; /* Mapping allocation control */ -extern void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg); /* Initializes hw specific storage attributes */ -extern boolean_t mapping_remove(pmap_t pmap, vm_offset_t va); /* Remove a single mapping for this VADDR */ +extern addr64_t mapping_remove(pmap_t pmap, addr64_t va); /* Remove a single mapping for this VADDR */ +extern mapping *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full); /* Finds a mapping */ extern void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked); /* Sets start and end of a block of mappings */ extern void mapping_adjust(void); /* Adjust free mapping count */ extern void mapping_free_prime(void); /* Primes the mapping block release list */ extern void mapping_prealloc(unsigned int); /* Preallocate mappings for large use */ extern void mapping_relpre(void); /* Releases preallocate request */ extern void mapping_init(void); /* Do initial stuff */ -extern void mapping_flush(void); -extern mapping *mapping_alloc(void); /* Obtain a mapping */ +extern mapping *mapping_alloc(int lists); /* Obtain a mapping */ extern void mapping_free(struct mapping *mp); /* Release a mapping */ -extern boolean_t mapping_tst_ref(struct phys_entry *pp); /* Tests the reference bit of a physical page */ -extern boolean_t mapping_tst_mod(struct phys_entry *pp); /* Tests the change bit of a physical page */ -extern void mapping_set_ref(struct phys_entry *pp); /* Sets the reference bit of a physical page */ -extern void mapping_clr_ref(struct phys_entry *pp); /* Clears the reference bit of a physical page */ -extern void mapping_set_mod(struct phys_entry *pp); /* Sets the change bit of a physical page */ -extern void mapping_clr_mod(struct phys_entry *pp); /* Clears the change bit of a physical page */ -extern void mapping_invall(struct phys_entry *pp); /* Clear all PTEs pointing to a physical page */ -extern void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked); /* Change protection of all mappings to page */ -extern void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot); /* Change protection of a single mapping to page */ -extern mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked); /* Make an address mapping */ -extern void mapping_purge(struct phys_entry *pp); /* Remove all mappings for this physent */ -extern void mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap); /* Remove physent mappings for this pmap */ -extern vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp); /* Finds first virtual mapping of a physical page in a space */ -extern void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg); /* Sets the default physical page attributes */ -extern void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr); /* Map a block optimally */ -extern int mapalc(struct mappingblok *mb); /* Finds and allcates a mapping entry */ +extern boolean_t mapping_tst_ref(ppnum_t pa); /* Tests the reference bit of a physical page */ +extern boolean_t mapping_tst_mod(ppnum_t pa); /* Tests the change bit of a physical page */ +extern void mapping_set_ref(ppnum_t pa); /* Sets the reference bit of a physical page */ +extern void mapping_clr_ref(ppnum_t pa); /* Clears the reference bit of a physical page */ +extern void mapping_set_mod(ppnum_t pa); /* Sets the change bit of a physical page */ +extern void mapping_clr_mod(ppnum_t pa); /* Clears the change bit of a physical page */ +extern void mapping_protect_phys(ppnum_t pa, vm_prot_t prot); /* Change protection of all mappings to page */ +extern int mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva); /* Change protection of a single mapping to page */ +extern addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot); /* Make a mapping */ +/* Flags for mapping_make */ +#define mmFlgBlock 0x80000000 /* This is a block map, use size for number of pages covered */ +#define mmFlgUseAttr 0x40000000 /* Use specified attributes */ +#define mmFlgPerm 0x20000000 /* Mapping is permanant */ +#define mmFlgCInhib 0x00000002 /* Cahching inhibited - use if mapFlgUseAttr set or block */ +#define mmFlgGuarded 0x00000001 /* Access guarded - use if mapFlgUseAttr set or block */ +extern void mapping_purge(ppnum_t pa); /* Remove all mappings for this physent */ +extern addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa); /* Finds first virtual mapping of a physical page in a space */ +extern void mapping_drop_busy(struct mapping *mapping); /* Drops busy count on mapping */ +extern phys_entry *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex); /* Finds the physical entry for the page */ +extern int mapalc1(struct mappingblok *mb); /* Finds and allcates a 1-bit mapping entry */ +extern int mapalc2(struct mappingblok *mb); /* Finds and allcates a 2-bit mapping entry */ extern void ignore_zero_fault(boolean_t type); /* Sets up to ignore or honor any fault on page 0 access for the current thread */ -extern mapping *hw_lock_phys_vir(space_t space, vm_offset_t va); /* Finds and locks a physical entry by vaddr */ -extern mapping *hw_cpv(struct mapping *mapping); /* Converts a physical mapping control block address to virtual */ -extern mapping *hw_cvp(struct mapping *mapping); /* Converts a virtual mapping control block address to physical */ -extern void hw_rem_map(struct mapping *mapping); /* Remove a mapping from the system */ -extern void hw_add_map(struct mapping *mp, space_t space, vm_offset_t va); /* Add a mapping to the PTEG hash list */ -extern void hw_select_mappings(struct mappingflush *mappingflush); /* Select user mappings in a PTEG */ -extern blokmap *hw_rem_blk(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); /* Remove a block that falls within a range */ -extern vm_offset_t hw_cvp_blk(pmap_t pmap, vm_offset_t va); /* Convert mapped block virtual to physical */ -extern blokmap *hw_add_blk(pmap_t pmap, struct blokmap *bmr); /* Add a block to the pmap */ -extern void hw_prot(struct phys_entry *pp, vm_prot_t prot); /* Change the protection of a physical page */ -extern void hw_prot_virt(struct mapping *mp, vm_prot_t prot); /* Change the protection of a virtual page */ -extern void hw_attr_virt(struct mapping *mp, unsigned int wimg); /* Change the attributes of a virtual page */ -extern void hw_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg); /* Sets the default physical page attributes */ -extern unsigned int hw_test_rc(struct mapping *mp, boolean_t reset); /* Test and optionally reset the RC bit of specific mapping */ +extern mapping *hw_rem_map(pmap_t pmap, addr64_t va, addr64_t *next); /* Remove a mapping from the system */ +extern mapping *hw_purge_map(pmap_t pmap, addr64_t va, addr64_t *next); /* Remove a regular mapping from the system */ +extern mapping *hw_purge_space(struct phys_entry *pp, pmap_t pmap); /* Remove the first mapping for a specific pmap from physentry */ +extern mapping *hw_purge_phys(struct phys_entry *pp); /* Remove the first mapping for a physentry */ +extern mapping *hw_find_map(pmap_t pmap, addr64_t va, addr64_t *nextva); /* Finds a mapping */ +extern addr64_t hw_add_map(pmap_t pmap, struct mapping *mp); /* Add a mapping to a pmap */ +extern int hw_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva); /* Change the protection of a virtual page */ +extern unsigned int hw_test_rc(pmap_t pmap, addr64_t va, boolean_t reset); /* Test and optionally reset the RC bit of specific mapping */ + +extern unsigned int hw_phys_walk(struct phys_entry *pp, unsigned int preop, unsigned int op, /* Perform function on all mappings on a physical page */ + unsigned int postop, unsigned int parm); +#define hwpNoop 0 /* No operation */ +#define hwpSPrtPhy 1 /* Sets protection in physent */ +#define hwpSPrtMap 2 /* Sets protection in mapping */ +#define hwpSAtrPhy 3 /* Sets attributes in physent */ +#define hwpSAtrMap 4 /* Sets attributes in mapping */ +#define hwpCRefPhy 5 /* Clears reference in physent */ +#define hwpCRefMap 6 /* Clears reference in mapping */ +#define hwpCCngPhy 7 /* Clears change in physent */ +#define hwpCCngMap 8 /* Clears change in mapping */ +#define hwpSRefPhy 9 /* Sets reference in physent */ +#define hwpSRefMap 10 /* Sets reference in mapping */ +#define hwpSCngPhy 11 /* Sets change in physent */ +#define hwpSCngMap 12 /* Sets change in mapping */ +#define hwpTRefPhy 13 /* Tests reference in physent */ +#define hwpTRefMap 14 /* Tests reference in mapping */ +#define hwpTCngPhy 15 /* Tests change in physent */ +#define hwpTCngMap 16 /* Tests change in mapping */ extern boolean_t hw_tst_mod(struct phys_entry *pp); /* Tests change bit */ extern void hw_set_mod(struct phys_entry *pp); /* Set change bit */ @@ -198,15 +343,17 @@ extern boolean_t hw_tst_ref(struct phys_entry *pp); /* Tests reference bit */ extern void hw_set_ref(struct phys_entry *pp); /* Set reference bit */ extern void hw_clr_ref(struct phys_entry *pp); /* Clear reference bit */ -extern void hw_inv_all(struct phys_entry *pp); /* Invalidate all PTEs associated with page */ extern void hw_set_user_space(pmap_t pmap); /* Indicate we need a space switch */ extern void hw_set_user_space_dis(pmap_t pmap); /* Indicate we need a space switch (already disabled) */ -kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size); /* Copy a physical page to a virtual address */ -extern void *LRA(space_t space, void *vaddr); /* Translate virtual to real using only HW tables */ -extern void dumpaddr(space_t space, vm_offset_t va); -extern void dumpmapping(struct mapping *mp); /* Print contents of a mapping */ -extern void dumppca(struct mapping *mp); /* Print contents of a PCA */ -extern void dumpphys(struct phys_entry *pp); /* Prints stuff starting at phys */ +extern void hw_setup_trans(void); /* Setup hardware for translation */ +extern void hw_start_trans(void); /* Start translation for the first time */ +extern void hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va); /* Validate a segment */ +extern void hw_blow_seg(addr64_t seg); /* Invalidate a segment */ +extern void invalidateSegs(pmap_t pmap); /* Invalidate the segment cache */ +extern struct phys_entry *pmap_find_physentry(ppnum_t pa); +extern void mapLog(unsigned int laddr, unsigned int type, addr64_t va); +extern unsigned int mapSkipListVerifyC(pmap_t pmap, unsigned long long *dumpa); +extern void fillPage(ppnum_t pa, unsigned int fill); extern unsigned int mappingdeb0; /* (TEST/DEBUG) */ extern unsigned int incrVSID; /* VSID increment value */ diff --git a/osfmk/ppc/mcount.s b/osfmk/ppc/mcount.s new file mode 100644 index 000000000..8c91e175f --- /dev/null +++ b/osfmk/ppc/mcount.s @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include + + +/* + * The compiler generates calls to this function and passes address + * of caller of the function [ from which mcount is called ] as the + * first parameter. + * mcount disables interrupts prior to call mcount() and restores + * interrupt upon return. + * To prevent recursive calls to mcount(), a flag, mcountOff, is set + * in cpu_flags per_proc. + */ + + .align 4 + .globl mcount +mcount: + mflr r0 ; Load lr + stw r0,8(r1) ; Save lr on the stack + stwu r1,-64(r1) ; Get a stack frame + mfmsr r9 ; Get msr + rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off + rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + mtmsr r8 ; Update msr + isync + mfsprg r7,0 ; Get per_proc + lhz r6,PP_CPU_FLAGS(r7) ; Get cpu flags + ori r5,r6,mcountOff ; + cmplw r5,r6 ; is mount off + beq mcount_ret ; return if off + sth r5,PP_CPU_FLAGS(r7) ; Update cpu_flags + stw r9,FM_ARG0(r1) ; Save MSR + mr r4, r0 + bl _mcount ; Call the C routine + lwz r9,FM_ARG0(r1) + mfsprg r7,0 ; Get per-proc block + lhz r6,PP_CPU_FLAGS(r7) ; Get CPU number + li r5,mcountOff ; + andc r6,r6,r5 ; Clear mcount_off + sth r6,PP_CPU_FLAGS(r7) ; Save cpu_flags +mcount_ret: + addi r1,r1,64 + mtmsr r9 ; Restore MSR + lwz r0,8(r1) + mtlr r0 + blr + diff --git a/osfmk/ppc/mem.c b/osfmk/ppc/mem.c deleted file mode 100644 index 8f9bb7c72..000000000 --- a/osfmk/ppc/mem.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - */ - -/* A marvelous selection of support routines for virtual memory */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include /* For pmap_pteg_overflow */ - -/* These refer to physical addresses and are set and referenced elsewhere */ - -unsigned int hash_table_base; -unsigned int hash_table_size; - -unsigned int hash_function_mask; - -struct shadowBAT shadow_BAT; - -/* gather statistics about hash table usage */ - -#if DEBUG -#define MEM_STATS 1 -#else -#define MEM_STATS 0 -#endif /* DEBUG */ - -#if MEM_STATS -/* hash table usage information */ -struct hash_table_stats { - int find_pte_in_pteg_calls; - int find_pte_in_pteg_not_found; - int find_pte_in_pteg_location[8]; - struct find_or_alloc_calls { - int found_primary; - int found_secondary; - int alloc_primary; - int alloc_secondary; - int overflow; - int not_found; - } find_or_alloc_calls[2]; - -} hash_table_stats[NCPUS]; - -#define INC_STAT(LOC) \ - hash_table_stats[cpu_number()].find_pte_in_pteg_location[LOC]++ - -#else /* MEM_STATS */ -#define INC_STAT(LOC) -#endif /* MEM_STATS */ - -/* Set up the machine registers for the given hash table. - * The table has already been zeroed. - */ -void hash_table_init(unsigned int base, unsigned int size) -{ - sync(); /* SYNC: it's not just the law, it's a good idea... */ - mtsdr1(hash_table_base | ((size-1)>>16)); /* Slam the SDR1 with the has table address */ - sync(); /* SYNC: it's not just the law, it's a good idea... */ - isync(); -} - diff --git a/osfmk/ppc/mem.h b/osfmk/ppc/mem.h index bbeb60b1e..a3e7fc692 100644 --- a/osfmk/ppc/mem.h +++ b/osfmk/ppc/mem.h @@ -34,25 +34,28 @@ #include #include -#include #include -extern vm_offset_t hash_table_base; +extern addr64_t hash_table_base; extern unsigned int hash_table_size; void hash_table_init(vm_offset_t base, vm_offset_t size); #define MAX_BAT 4 +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct ppcBAT { unsigned int upper; /* Upper half of BAT */ unsigned int lower; /* Lower half of BAT */ } ppcBAT; +#pragma pack() +#pragma pack(4) /* Make sure the structure stays as we defined it */ struct shadowBAT { ppcBAT IBATs[MAX_BAT]; /* Instruction BATs */ ppcBAT DBATs[MAX_BAT]; /* Data BAT */ }; +#pragma pack() extern struct shadowBAT shadow_BAT; diff --git a/osfmk/ppc/misc_asm.s b/osfmk/ppc/misc_asm.s index 126714d9b..3d6c1974a 100644 --- a/osfmk/ppc/misc_asm.s +++ b/osfmk/ppc/misc_asm.s @@ -50,13 +50,13 @@ ENTRY(getrpc, TAG_NO_FRAME_USED) /* Mask and unmask interrupts at the processor level */ ENTRY(interrupt_disable, TAG_NO_FRAME_USED) - mfmsr r0 - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r0, r0, 0, MSR_EE_BIT+1, MSR_EE_BIT-1 - mtmsr r0 - isync - blr + lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag + mfmsr r0 ; Save the MSR + ori r8,r8,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Add the FP flag + andc r0,r0,r8 ; Clear VEC, FP, DR, and EE + mtmsr r0 + isync + blr ENTRY(interrupt_enable, TAG_NO_FRAME_USED) @@ -73,13 +73,13 @@ ENTRY(interrupt_enable, TAG_NO_FRAME_USED) /* Mask and unmask interrupts at the processor level */ ENTRY(db_interrupt_disable, TAG_NO_FRAME_USED) - mfmsr r0 - rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r0, r0, 0, MSR_EE_BIT+1, MSR_EE_BIT-1 - mtmsr r0 - isync - blr + lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag + mfmsr r0 ; Save the MSR + ori r8,r8,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Add the FP flag + andc r0,r0,r8 ; Clear VEC, FP, DR, and EE + mtmsr r0 + isync + blr ENTRY(db_interrupt_enable, TAG_NO_FRAME_USED) mfmsr r0 @@ -95,11 +95,12 @@ ENTRY(db_interrupt_enable, TAG_NO_FRAME_USED) ENTRY(Call_Debugger, TAG_NO_FRAME_USED) + + lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag mfmsr r7 ; Get the current MSR - rlwinm r7,r7,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r7,r7,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off + ori r8,r8,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Add the FP flag mflr r0 ; Save the return - rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + andc r7,r7,r8 ; Clear VEC and FP mtmsr r7 ; Do it isync mfsprg r8,0 ; Get the per_proc block @@ -122,9 +123,11 @@ cdNewDeb: li r0,0 ; Clear this out bl EXT(Call_DebuggerC) ; Call the "C" phase of this - mfmsr r0 ; Get the MSR just in case it was enabled + lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag + mfmsr r0 ; Get the current MSR + ori r8,r8,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Add the FP flag addi r1,r1,FM_SIZE ; Pop off first stack frame - rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions enable bit + andc r0,r0,r8 ; Turn off all the interesting stuff mtmsr r0 mfsprg r8,0 ; Get the per_proc block address @@ -192,6 +195,10 @@ ENTRY(mtdec, TAG_NO_FRAME_USED) mtdec ARG0 blr +ENTRY(cntlzw, TAG_NO_FRAME_USED) + cntlzw r3,r3 + blr + /* Decrementer frequency and realtime|timebase processor registers * are different between ppc601 and ppc603/4, we define them all. */ @@ -291,3 +298,11 @@ ENTRY(mfsda, TAG_NO_FRAME_USED) mfspr r3,sda blr + .globl EXT(hid0get64) + +LEXT(hid0get64) + + mfspr r4,hid0 ; Get the HID0 + srdi r3,r4,32 ; Move top down + rlwinm r4,r4,0,0,31 ; Clean top + blr diff --git a/osfmk/ppc/misc_protos.h b/osfmk/ppc/misc_protos.h index d4153d54f..9c61739af 100644 --- a/osfmk/ppc/misc_protos.h +++ b/osfmk/ppc/misc_protos.h @@ -52,9 +52,10 @@ extern char *strcpy(char *dest, const char *src); extern void vprintf(const char *fmt, va_list args); extern void printf(const char *fmt, ...); +extern void bzero_nc(char* buf, int size); /* uncached-safe */ extern void bcopy_nc(char *from, char *to, int size); /* uncached-safe */ -extern void bcopy_phys(char *from, char *to, int size); /* Physical to physical copy (ints must be disabled) */ -extern void bcopy_physvir(char *from, char *to, int size); /* Physical to physical copy virtually (ints must be disabled) */ +extern void bcopy_phys(addr64_t from, addr64_t to, int size); /* Physical to physical copy (ints must be disabled) */ +extern void bcopy_physvir(addr64_t from, addr64_t to, int size); /* Physical to physical copy virtually (ints must be disabled) */ extern void ppc_init(boot_args *args); extern struct savearea *enterDebugger(unsigned int trap, @@ -62,7 +63,10 @@ extern struct savearea *enterDebugger(unsigned int trap, unsigned int dsisr); extern void draw_panic_dialog(void); -extern void ppc_vm_init(unsigned int mem_size, boot_args *args); +extern void ppc_vm_init(uint64_t mem_size, boot_args *args); + +extern int ppcNull(struct savearea *); +extern int ppcNullinst(struct savearea *); extern void autoconf(void); extern void machine_init(void); @@ -75,25 +79,29 @@ extern void interrupt_init(void); extern void interrupt_enable(void); extern void interrupt_disable(void); extern void disable_bluebox_internal(thread_act_t act); +extern uint64_t hid0get64(void); #if MACH_KDB extern void db_interrupt_enable(void); extern void db_interrupt_disable(void); #endif /* MACH_KDB */ extern void phys_zero(vm_offset_t, vm_size_t); -extern void phys_copy(vm_offset_t, vm_offset_t, vm_size_t); +extern void phys_copy(addr64_t, addr64_t, vm_size_t); extern void Load_context(thread_t th); -extern struct thread_shuttle *Switch_context(struct thread_shuttle *old, - void (*cont)(void), - struct thread_shuttle *new); +extern thread_t Switch_context( + thread_t old, + void (*cont)(void), + thread_t new); extern void fpu_save(struct facility_context *); extern void vec_save(struct facility_context *); extern void toss_live_fpu(struct facility_context *); extern void toss_live_vec(struct facility_context *); +extern void condStop(unsigned int, unsigned int); + extern int nsec_to_processor_clock_ticks(int nsec); extern void tick_delay(int ticks); diff --git a/osfmk/ppc/model_dep.c b/osfmk/ppc/model_dep.c index 90d8a05d9..962058728 100644 --- a/osfmk/ppc/model_dep.c +++ b/osfmk/ppc/model_dep.c @@ -86,6 +86,7 @@ #include #include #include +#include #include #include @@ -155,6 +156,8 @@ void lock_debugger(void); void dump_backtrace(unsigned int stackptr, unsigned int fence); void dump_savearea(savearea *sv, unsigned int fence); +int packAsc (unsigned char *inbuf, unsigned int length); + #if !MACH_KDB boolean_t db_breakpoints_inserted = TRUE; jmp_buf_t *db_recover = 0; @@ -185,6 +188,7 @@ char *failNames[] = { "No saveareas", /* failNoSavearea */ "Savearea corruption", /* failSaveareaCorr */ "Invalid live context", /* failBadLiveContext */ + "Unaligned stack", /* failUnalignedStk */ "Unknown failure code" /* Unknown failure code - must always be last */ }; @@ -192,7 +196,6 @@ char *invxcption = "Unknown code"; extern const char version[]; extern char *trap_type[]; -extern vm_offset_t mem_actual; #if !MACH_KDB void kdb_trap(int type, struct savearea *regs); @@ -276,6 +279,11 @@ machine_startup(boot_args *args) sched_poll_yield_shift = boot_arg; } + if (PE_parse_boot_arg("refunn", &boot_arg)) { + extern int refunnel_hint_enabled; + + refunnel_hint_enabled = boot_arg; + } machine_conf(); @@ -302,13 +310,14 @@ machine_conf(void) { machine_info.max_cpus = NCPUS; machine_info.avail_cpus = 1; - machine_info.memory_size = mem_size; + machine_info.memory_size = mem_size; /* Note that this will be 2 GB for >= 2 GB machines */ } void machine_init(void) { clock_config(); + perfmon_init(); } void slave_machine_init(void) @@ -363,6 +372,7 @@ print_backtrace(struct savearea *ssp) thread_act_t *act; savearea *sv, *svssp; int cpu; + savearea *psv; /* * We need this lock to make sure we don't hang up when we double panic on an MP. @@ -370,7 +380,7 @@ print_backtrace(struct savearea *ssp) cpu = cpu_number(); /* Just who are we anyways? */ if(pbtcpu != cpu) { /* Allow recursion */ - hw_atomic_add(&pbtcnt, 1); /* Remember we are trying */ + hw_atomic_add((uint32_t *)&pbtcnt, 1); /* Remember we are trying */ while(!hw_lock_try(&pbtlock)); /* Spin here until we can get in. If we never do, well, we're crashing anyhow... */ pbtcpu = cpu; /* Mark it as us */ } @@ -380,7 +390,7 @@ print_backtrace(struct savearea *ssp) if(current_thread()) sv = (savearea *)current_act()->mact.pcb; /* Find most current savearea if system has started */ fence = 0xFFFFFFFF; /* Show we go all the way */ - if(sv) fence = sv->save_r1; /* Stop at previous exception point */ + if(sv) fence = (unsigned int)sv->save_r1; /* Stop at previous exception point */ if(!svssp) { /* Should we start from stack? */ kdb_printf("Latest stack backtrace for cpu %d:\n", cpu_number()); @@ -395,8 +405,9 @@ print_backtrace(struct savearea *ssp) else { /* Were we passed an exception? */ fence = 0xFFFFFFFF; /* Show we go all the way */ if(svssp->save_hdr.save_prev) { - if((svssp->save_hdr.save_prev <= VM_MAX_KERNEL_ADDRESS) && ((unsigned int)LRA(PPC_SID_KERNEL, (void *)svssp->save_hdr.save_prev))) { /* Valid address? */ - fence = svssp->save_hdr.save_prev->save_r1; /* Stop at previous exception point */ + if((svssp->save_hdr.save_prev <= vm_last_addr) && ((unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)svssp->save_hdr.save_prev))) { /* Valid address? */ + psv = (savearea *)((unsigned int)svssp->save_hdr.save_prev); /* Get the 64-bit back chain converted to a regualr pointer */ + fence = (unsigned int)psv->save_r1; /* Stop at previous exception point */ } } @@ -414,7 +425,8 @@ print_backtrace(struct savearea *ssp) kdb_printf("Proceeding back via exception chain:\n"); while(sv) { /* Do them all... */ - if(!((sv <= VM_MAX_KERNEL_ADDRESS) && (unsigned int)LRA(PPC_SID_KERNEL, (void *)sv))) { /* Valid address? */ + if(!(((addr64_t)((uintptr_t)sv) <= vm_last_addr) && + (unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)sv)))) { /* Valid address? */ kdb_printf(" Exception state (sv=0x%08X) Not mapped or invalid. stopping...\n", sv); break; } @@ -426,21 +438,22 @@ print_backtrace(struct savearea *ssp) else { fence = 0xFFFFFFFF; /* Show we go all the way */ if(sv->save_hdr.save_prev) { - if((sv->save_hdr.save_prev <= VM_MAX_KERNEL_ADDRESS) && ((unsigned int)LRA(PPC_SID_KERNEL, (void *)sv->save_hdr.save_prev))) { /* Valid address? */ - fence = sv->save_hdr.save_prev->save_r1; /* Stop at previous exception point */ + if((sv->save_hdr.save_prev <= vm_last_addr) && ((unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)sv->save_hdr.save_prev))) { /* Valid address? */ + psv = (savearea *)((unsigned int)sv->save_hdr.save_prev); /* Get the 64-bit back chain converted to a regualr pointer */ + fence = (unsigned int)psv->save_r1; /* Stop at previous exception point */ } } dump_savearea(sv, fence); /* Dump this savearea */ } - sv = sv->save_hdr.save_prev; /* Back chain */ + sv = CAST_DOWN(savearea *, sv->save_hdr.save_prev); /* Back chain */ } kdb_printf("\nKernel version:\n%s\n",version); /* Print kernel version */ pbtcpu = -1; /* Mark as unowned */ hw_lock_unlock(&pbtlock); /* Allow another back trace to happen */ - hw_atomic_sub(&pbtcnt, 1); /* Show we are done */ + hw_atomic_sub((uint32_t *) &pbtcnt, 1); /* Show we are done */ while(pbtcnt); /* Wait for completion */ @@ -455,11 +468,11 @@ void dump_savearea(savearea *sv, unsigned int fence) { else xcode = trap_type[sv->save_exception / 4]; /* Point to the type */ kdb_printf(" PC=0x%08X; MSR=0x%08X; DAR=0x%08X; DSISR=0x%08X; LR=0x%08X; R1=0x%08X; XCP=0x%08X (%s)\n", - sv->save_srr0, sv->save_srr1, sv->save_dar, sv->save_dsisr, - sv->save_lr, sv->save_r1, sv->save_exception, xcode); + (unsigned int)sv->save_srr0, (unsigned int)sv->save_srr1, (unsigned int)sv->save_dar, sv->save_dsisr, + (unsigned int)sv->save_lr, (unsigned int)sv->save_r1, sv->save_exception, xcode); if(!(sv->save_srr1 & MASK(MSR_PR))) { /* Are we in the kernel? */ - dump_backtrace(sv->save_r1, fence); /* Dump the stack back trace from here if not user state */ + dump_backtrace((unsigned int)sv->save_r1, fence); /* Dump the stack back trace from here if not user state */ } return; @@ -481,23 +494,23 @@ void dump_backtrace(unsigned int stackptr, unsigned int fence) { if(!stackptr || (stackptr == fence)) break; /* Hit stop point or end... */ - if(stackptr & 0x0000000f) { /* Is stack pointer valid? */ + if(stackptr & 0x0000000F) { /* Is stack pointer valid? */ kdb_printf("\n backtrace terminated - unaligned frame address: 0x%08X\n", stackptr); /* No, tell 'em */ break; } - raddr = (unsigned int)LRA(PPC_SID_KERNEL, (void *)stackptr); /* Get physical frame address */ - if(!raddr || (stackptr > VM_MAX_KERNEL_ADDRESS)) { /* Is it mapped? */ + raddr = (unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)stackptr); /* Get physical frame address */ + if(!raddr || (stackptr > vm_last_addr)) { /* Is it mapped? */ kdb_printf("\n backtrace terminated - frame not mapped or invalid: 0x%08X\n", stackptr); /* No, tell 'em */ break; } - if(raddr >= mem_actual) { /* Is it within physical RAM? */ + if(!mapping_phys_lookup(raddr, &dumbo)) { /* Is it within physical RAM? */ kdb_printf("\n backtrace terminated - frame outside of RAM: v=0x%08X, p=%08X\n", stackptr, raddr); /* No, tell 'em */ break; } - ReadReal(raddr, &sframe[0]); /* Fetch the stack frame */ + ReadReal((addr64_t)((raddr << 12) | (stackptr & 4095)), &sframe[0]); /* Fetch the stack frame */ bframes[i] = sframe[LRindex]; /* Save the link register */ @@ -555,7 +568,33 @@ Debugger(const char *message) { /* everything should be printed now so copy to NVRAM */ if( debug_buf_size > 0) - pi_size = PESavePanicInfo( debug_buf, debug_buf_ptr - debug_buf); + + { + /* Do not compress the panic log unless kernel debugging + * is disabled - the panic log isn't synced to NVRAM if + * debugging is enabled, and the panic log is valuable + * whilst debugging + */ + if (!panicDebugging) + { + unsigned int bufpos; + + /* Now call the compressor */ + bufpos = packAsc (debug_buf, (unsigned int) (debug_buf_ptr - debug_buf) ); + /* If compression was successful, use the compressed length */ + if (bufpos) + { + debug_buf_ptr = debug_buf + bufpos; + } + } + /* Truncate if the buffer is larger than a certain magic + * size - this really ought to be some appropriate fraction + * of the NVRAM image buffer, and is best done in the + * savePanicInfo() or PESavePanicInfo() calls + */ + pi_size = debug_buf_ptr - debug_buf; + pi_size = PESavePanicInfo( debug_buf, ((pi_size > 2040) ? 2040 : pi_size)); + } if( !panicDebugging && (pi_size != 0) ) { int my_cpu, debugger_cpu; @@ -619,11 +658,11 @@ void SysChoked(int type, savearea *sv) { /* The system is bad dead */ disableDebugOuput = FALSE; debug_mode = TRUE; - failcode = sv->save_r3; /* Get the failure code */ + failcode = (unsigned int)sv->save_r3; /* Get the failure code */ if(failcode > failUnknown) failcode = failUnknown; /* Set unknown code code */ - kprintf("System Failure: cpu=%d; code=%08X (%s)\n", cpu_number(), sv->save_r3, failNames[failcode]); - kdb_printf("System Failure: cpu=%d; code=%08X (%s)\n", cpu_number(), sv->save_r3, failNames[failcode]); + kprintf("System Failure: cpu=%d; code=%08X (%s)\n", cpu_number(), (unsigned int)sv->save_r3, failNames[failcode]); + kdb_printf("System Failure: cpu=%d; code=%08X (%s)\n", cpu_number(), (unsigned int)sv->save_r3, failNames[failcode]); print_backtrace(sv); /* Attempt to print backtrace */ Call_DebuggerC(type, sv); /* Attempt to get into debugger */ @@ -644,7 +683,8 @@ int Call_DebuggerC( struct savearea *saved_state) { int directcall, wait; - vm_offset_t instr_ptr; + addr64_t instr_ptr; + ppnum_t instr_pp; unsigned int instr; int my_cpu, tcpu; @@ -676,13 +716,16 @@ int Call_DebuggerC( my_cpu, debugger_is_slave[my_cpu], debugger_cpu, saved_state->save_srr0); } - if (instr_ptr = (vm_offset_t)LRA(PPC_SID_KERNEL, (void *)(saved_state->save_srr0))) { - instr = ml_phys_read(instr_ptr); /* Get the trap that caused entry */ + instr_pp = (vm_offset_t)pmap_find_phys(kernel_pmap, (addr64_t)(saved_state->save_srr0)); + + if (instr_pp) { + instr_ptr = (addr64_t)(((addr64_t)instr_pp << 12) | (saved_state->save_srr0 & 0xFFF)); /* Make physical address */ + instr = ml_phys_read_64(instr_ptr); /* Get the trap that caused entry */ } else instr = 0; #if 0 - if (debugger_debug) kprintf("Call_DebuggerC(%d): instr_ptr = %08X, instr = %08X\n", my_cpu, instr_ptr, instr); /* (TEST/DEBUG) */ + if (debugger_debug) kprintf("Call_DebuggerC(%d): instr_pp = %08X, instr_ptr = %016llX, instr = %08X\n", my_cpu, instr_pp, instr_ptr, instr); /* (TEST/DEBUG) */ #endif if (db_breakpoints_inserted) cpus_holding_bkpts++; /* Bump up the holding count */ @@ -857,4 +900,38 @@ void unlock_debugger(void) { } +struct pasc { + unsigned a: 7; + unsigned b: 7; + unsigned c: 7; + unsigned d: 7; + unsigned e: 7; + unsigned f: 7; + unsigned g: 7; + unsigned h: 7; +} __attribute__((packed)); + +typedef struct pasc pasc_t; +int packAsc (unsigned char *inbuf, unsigned int length) +{ + unsigned int i, j = 0; + pasc_t pack; + + for (i = 0; i < length; i+=8) + { + pack.a = inbuf[i]; + pack.b = inbuf[i+1]; + pack.c = inbuf[i+2]; + pack.d = inbuf[i+3]; + pack.e = inbuf[i+4]; + pack.f = inbuf[i+5]; + pack.g = inbuf[i+6]; + pack.h = inbuf[i+7]; + bcopy ((char *) &pack, inbuf + j, 7); + j += 7; + } + if (0 != (i - length)) + inbuf[j - (i - length)] &= 0xFF << (8-(i - length)); + return j-(((i-length) == 7) ? 6 : (i - length)); +} diff --git a/osfmk/ppc/movc.s b/osfmk/ppc/movc.s index 2a17ac245..6dee8f87a 100644 --- a/osfmk/ppc/movc.s +++ b/osfmk/ppc/movc.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -32,558 +32,876 @@ #include #include +#define INSTRUMENT 0 + +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> /* * void pmap_zero_page(vm_offset_t pa) * - * zero a page of physical memory. + * Zero a page of physical memory. This routine runs in 32 or 64-bit mode, + * and handles 32 and 128-byte cache lines. */ -#if DEBUG - /* C debug stub in pmap.c calls this */ -ENTRY(pmap_zero_page_assembler, TAG_NO_FRAME_USED) -#else -ENTRY(pmap_zero_page, TAG_NO_FRAME_USED) -#endif /* DEBUG */ - - mfmsr r6 /* Get the MSR */ - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 /* Turn off DR */ - rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions - li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */ - mtmsr r7 /* Set MSR to DR off */ - isync /* Ensure data translations are off */ - - -.L_phys_zero_loop: - subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */ - dcbz r4, r3 /* Clear the whole thing to 0s */ - subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */ - dcbz r5, r3 /* Clear the next to zeros */ - bgt+ .L_phys_zero_loop /* Keep going until we do the page... */ - - sync /* Make sure they're all done */ - li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */ - -.L_inst_inval_loop: - subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */ - icbi r4, r3 /* Clear the whole thing to 0s */ - subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */ - icbi r5, r3 /* Clear the next to zeros */ - bgt+ .L_inst_inval_loop /* Keep going until we do the page... */ - - sync /* Make sure they're all done */ - - mtmsr r6 /* Restore original translations */ - isync /* Ensure data translations are on */ - blr + .align 5 + .globl EXT(pmap_zero_page) + +LEXT(pmap_zero_page) + + mflr r12 // save return address + bl EXT(ml_set_physical_disabled) // turn DR and EE off, SF on, get features in r10 + mtlr r12 // restore return address + andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size + subfic r4,r9,PPC_PGBYTES // r4 <- starting offset in page + + bt++ pf64Bitb,page0S4 // Go do the big guys... + + slwi r3,r3,12 // get page address from page num + b page_zero_1 // Jump to line aligned loop... + + .align 5 + + nop + nop + nop + nop + nop + nop + nop + +page0S4: + sldi r3,r3,12 // get page address from page num + +page_zero_1: // loop zeroing cache lines + sub. r5,r4,r9 // more to go? + dcbz128 r3,r4 // zero either 32 or 128 bytes + sub r4,r5,r9 // generate next offset + dcbz128 r3,r5 + bne-- page_zero_1 + + b EXT(ml_restore) // restore MSR and do the isync + + +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> /* void * phys_copy(src, dst, bytecount) - * vm_offset_t src; - * vm_offset_t dst; + * addr64_t src; + * addr64_t dst; * int bytecount * * This routine will copy bytecount bytes from physical address src to physical - * address dst. + * address dst. It runs in 64-bit mode if necessary, but does not handle + * overlap or make any attempt to be optimal. Length must be a signed word. + * Not performance critical. */ -ENTRY(phys_copy, TAG_NO_FRAME_USED) - - /* Switch off data translations */ - mfmsr r6 - rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 - rlwinm r7, r7, 0, MSR_EE_BIT+1, MSR_EE_BIT-1 - mtmsr r7 - isync /* Ensure data translations are off */ - - subi r3, r3, 4 - subi r4, r4, 4 - - cmpwi r5, 3 - ble- .L_phys_copy_bytes -.L_phys_copy_loop: - lwz r0, 4(r3) - addi r3, r3, 4 - subi r5, r5, 4 - stw r0, 4(r4) - addi r4, r4, 4 - cmpwi r5, 3 - bgt+ .L_phys_copy_loop - - /* If no leftover bytes, we're done now */ - cmpwi r5, 0 - beq+ .L_phys_copy_done - -.L_phys_copy_bytes: - addi r3, r3, 3 - addi r4, r4, 3 -.L_phys_copy_byte_loop: - lbz r0, 1(r3) - addi r3, r3, 1 - subi r5, r5, 1 - stb r0, 1(r4) - addi r4, r4, 1 - cmpwi r5, 0 - bne+ .L_phys_copy_byte_loop - -.L_phys_copy_done: - mtmsr r6 /* Restore original translations */ - isync /* Ensure data translations are off */ - - blr + .align 5 + .globl EXT(phys_copy) + +LEXT(phys_copy) + + rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg + mflr r12 // get return address + rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits + rlwinm r4,r5,0,1,0 ; Duplicate high half of long long paddr into top of reg + bl EXT(ml_set_physical_disabled) // turn DR and EE off, SF on, get features in r10 + rlwimi r4,r6,0,0,31 ; Combine bottom of long long to full 64-bits + mtlr r12 // restore return address + subic. r5,r7,4 // a word to copy? + b phys_copy_2 + + .align 5 + +phys_copy_1: // loop copying words + subic. r5,r5,4 // more to go? + lwz r0,0(r3) + addi r3,r3,4 + stw r0,0(r4) + addi r4,r4,4 +phys_copy_2: + bge phys_copy_1 + addic. r5,r5,4 // restore count + ble phys_copy_4 // no more + + // Loop is aligned here + +phys_copy_3: // loop copying bytes + subic. r5,r5,1 // more to go? + lbz r0,0(r3) + addi r3,r3,1 + stb r0,0(r4) + addi r4,r4,1 + bgt phys_copy_3 +phys_copy_4: + b EXT(ml_restore) // restore MSR and do the isync + + +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> /* void * pmap_copy_page(src, dst) - * vm_offset_t src; - * vm_offset_t dst; + * ppnum_t src; + * ppnum_t dst; * * This routine will copy the physical page src to physical page dst * - * This routine assumes that the src and dst are page aligned and that the - * destination is cached. - * - * We also must assume that noone will be executing within the destination - * page. We also assume that this will be used for paging + * This routine assumes that the src and dst are page numbers and that the + * destination is cached. It runs on 32 and 64 bit processors, with and + * without altivec, and with 32 and 128 byte cache lines. + * We also must assume that no-one will be executing within the destination + * page, and that this will be used for paging. Because this + * is a common routine, we have tuned loops for each processor class. * */ +#define kSFSize (FM_SIZE+160) -#if DEBUG - /* if debug, we have a little piece of C around this - * in pmap.c that gives some trace ability - */ -ENTRY(pmap_copy_page_assembler, TAG_NO_FRAME_USED) -#else ENTRY(pmap_copy_page, TAG_NO_FRAME_USED) -#endif /* DEBUG */ - -#if 0 - mfpvr r9 ; Get the PVR - rlwinm r9,r9,16,16,31 ; Isolate the PPC processor - cmplwi r9,PROCESSOR_VERSION_Max ; Do we have Altivec? - beq+ wegotaltivec ; Yeah... -#endif - - mfmsr r9 ; Get the MSR - rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - stwu r1,-(FM_SIZE+32)(r1) ; Make a frame for us - rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions - ori r7,r7,lo16(MASK(MSR_FP)) ; Turn on the FPU - mtmsr r7 ; Disable rupts and enable FPU - isync - - stfd f0,FM_SIZE+0(r1) ; Save an FP register - rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit - stfd f1,FM_SIZE+8(r1) ; Save an FP register - addi r6,r3,PPC_PGBYTES ; Point to the start of the next page - stfd f2,FM_SIZE+16(r1) ; Save an FP register - mr r8,r4 ; Save the destination - stfd f3,FM_SIZE+24(r1) ; Save an FP register - - mtmsr r7 ; Set the new MSR - isync ; Ensure data translations are off - - dcbt br0, r3 /* Start in first input line */ - li r5, CACHE_LINE_SIZE /* Get the line size */ - -.L_pmap_copy_page_loop: - dcbz 0, r4 /* Allocate a line for the output */ - lfd f0, 0(r3) /* Get first 8 */ - lfd f1, 8(r3) /* Get second 8 */ - lfd f2, 16(r3) /* Get third 8 */ - stfd f0, 0(r4) /* Put first 8 */ - dcbt r5, r3 /* Start next line coming in */ - lfd f3, 24(r3) /* Get fourth 8 */ - stfd f1, 8(r4) /* Put second 8 */ - addi r3,r3,CACHE_LINE_SIZE /* Point to the next line in */ - stfd f2, 16(r4) /* Put third 8 */ - cmplw cr0,r3,r6 /* See if we're finished yet */ - stfd f3, 24(r4) /* Put fourth 8 */ - dcbst br0,r4 /* Force it out */ - addi r4,r4,CACHE_LINE_SIZE /* Point to the next line out */ - blt+ .L_pmap_copy_page_loop /* Copy the whole page */ - - sync /* Make sure they're all done */ - li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */ - -invalinst: - subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */ - icbi r4, r8 /* Trash the i-cache */ - subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */ - icbi r5, r8 /* Trash the i-cache */ - bgt+ invalinst /* Keep going until we do the page... */ - - rlwimi r7,r9,0,MSR_DR_BIT,MSR_DR_BIT ; Set DDAT if on - sync ; Make sure all invalidates done - - mtmsr r7 ; Set DDAT correctly - isync - - lfd f0,FM_SIZE+0(r1) ; Restore an FP register - lfd f1,FM_SIZE+8(r1) ; Restore an FP register - lfd f2,FM_SIZE+16(r1) ; Restore an FP register - lfd f3,FM_SIZE+24(r1) ; Restore an FP register - - lwz r1,0(r1) ; Pop up the stack - - mtmsr r9 ; Turn off FPU now and maybe rupts back on - isync - blr - -#if 0 -; -; This is not very optimal. We just do it here for a test of -; Altivec in the kernel. -; -wegotaltivec: - mfmsr r9 ; Get the MSR - lis r8,hi16(0xC0000000) ; Make sure we keep the first 2 vector registers - rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions - lis r6,lo16(2*256+128) ; Specify 128 blocks of 2 vectors each - rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit - ori r6,r6,32 ; Set a 32-byte stride - mtsprg 256,r8 ; Set VRSave - mtmsr r7 ; Disable rupts and turn xlate off - isync - - addi r11,r3,4096 ; Point to the next page - li r10,16 ; Get vector size - -avmovepg: lvxl v0,br0,r3 ; Get first half of line - dcba br0,r4 ; Allocate output - lvxl v1,r10,r3 ; Get second half of line - stvxl v0,br0,r4 ; Save first half of line - addi r3,r3,32 ; Point to the next line - icbi br0,r4 ; Make the icache go away also - stvxl v1,r10,r4 ; Save second half of line - cmplw r3,r11 ; Have we reached the next page? - dcbst br0,r4 ; Make sure the line is on its way out - addi r4,r4,32 ; Point to the next line - blt+ avmovepg ; Move the next line... - - li r8,0 ; Clear this - sync ; Make sure all the memory stuff is done - mtsprg 256,r8 ; Show we are not using VRs any more - mtmsr r9 ; Translation and interruptions back on - isync - blr -#endif - - - - -/* - * int - * copyin(src, dst, count) - * vm_offset_t src; - * vm_offset_t dst; - * int count; - * - */ -ENTRY2(copyin, copyinmsg, TAG_NO_FRAME_USED) - -/* Preamble allowing us to call a sub-function */ - mflr r0 - stw r0,FM_LR_SAVE(r1) - stwu r1,-(FM_SIZE+16)(r1) - - cmpli cr0,r5,0 - ble- cr0,.L_copyinout_trivial - -/* we know we have a valid copyin to do now */ -/* Set up thread_recover in case we hit an illegal address */ - - mfsprg r8,1 /* Get the current act */ - lwz r10,ACT_THREAD(r8) - lis r11,hi16(.L_copyinout_error) - lwz r8,ACT_VMMAP(r8) - ori r11,r11,lo16(.L_copyinout_error) - add r9,r3,r5 /* Get the end of the source */ - lwz r8,VMMAP_PMAP(r8) ; Get the pmap - rlwinm r12,r3,6,26,29 ; Get index to the segment slot - subi r9,r9,1 /* Make sure we don't go too far */ - add r8,r8,r12 ; Start indexing to the segment value - stw r11,THREAD_RECOVER(r10) - xor r9,r9,r3 /* Smoosh 'em together */ - lwz r8,PMAP_SEGS(r8) ; Get the source SR value - rlwinm. r9,r9,0,1,3 /* Top nybble equal? */ - mtsr SR_COPYIN,r8 ; Set the SR - isync -#if 0 - lis r0,HIGH_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */ - ori r0,r0,LOW_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */ - sc /* (TEST/DEBUG) */ -#endif - -/* For optimization, we check if the copyin lies on a segment - * boundary. If it doesn't, we can use a simple copy. If it - * does, we split it into two separate copies in some C code. - */ - - bne- .L_call_copyin_multiple /* Nope, we went past the segment boundary... */ - - rlwinm r3,r3,0,4,31 - oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */ - - bl EXT(bcopy) + lis r2,hi16(MASK(MSR_VEC)) ; Get the vector flag + mflr r0 // get return + ori r2,r2,lo16(MASK(MSR_FP)) ; Add the FP flag + stw r0,8(r1) // save + stwu r1,-kSFSize(r1) // set up a stack frame for VRs or FPRs + mfmsr r11 // save MSR at entry + mfsprg r10,2 // get feature flags + andc r11,r11,r2 // Clear out vec and fp + ori r2,r2,lo16(MASK(MSR_EE)) // Get EE on also + andc r2,r11,r2 // Clear out EE as well + mtcrf 0x02,r10 // we need to test pf64Bit + ori r2,r2,MASK(MSR_FP) // must enable FP for G3... + mtcrf 0x80,r10 // we need to test pfAltivec too + oris r2,r2,hi16(MASK(MSR_VEC)) // enable altivec for G4 (ignored if G3) + mtmsr r2 // turn EE off, FP and VEC on + isync + bt++ pf64Bitb,pmap_copy_64 // skip if 64-bit processor (only they take hint) + slwi r3,r3,12 // get page address from page num + slwi r4,r4,12 // get page address from page num + rlwinm r12,r2,0,MSR_DR_BIT+1,MSR_DR_BIT-1 // get ready to turn off DR + bt pfAltivecb,pmap_copy_g4 // altivec but not 64-bit means G4 + + + // G3 -- copy using FPRs + + stfd f0,FM_SIZE+0(r1) // save the 4 FPRs we use to copy + stfd f1,FM_SIZE+8(r1) + li r5,PPC_PGBYTES/32 // count of cache lines in a page + stfd f2,FM_SIZE+16(r1) + mtctr r5 + stfd f3,FM_SIZE+24(r1) + mtmsr r12 // turn off DR after saving FPRs on stack + isync + +pmap_g3_copy_loop: // loop over 32-byte cache lines + dcbz 0,r4 // avoid read of dest line + lfd f0,0(r3) + lfd f1,8(r3) + lfd f2,16(r3) + lfd f3,24(r3) + addi r3,r3,32 + stfd f0,0(r4) + stfd f1,8(r4) + stfd f2,16(r4) + stfd f3,24(r4) + dcbst 0,r4 // flush dest line to RAM + addi r4,r4,32 + bdnz pmap_g3_copy_loop + + sync // wait for stores to take + subi r4,r4,PPC_PGBYTES // restore ptr to destintation page + li r6,PPC_PGBYTES-32 // point to last line in page +pmap_g3_icache_flush: + subic. r5,r6,32 // more to go? + icbi r4,r6 // flush another line in icache + subi r6,r5,32 // get offset to next line + icbi r4,r5 + bne pmap_g3_icache_flush + + sync + mtmsr r2 // turn DR back on + isync + lfd f0,FM_SIZE+0(r1) // restore the FPRs + lfd f1,FM_SIZE+8(r1) + lfd f2,FM_SIZE+16(r1) + lfd f3,FM_SIZE+24(r1) + + b pmap_g4_restore // restore MSR and done + + + // G4 -- copy using VRs + +pmap_copy_g4: // r2=(MSR-EE), r12=(r2-DR), r10=features, r11=old MSR + la r9,FM_SIZE+16(r1) // place where we save VRs to r9 + li r5,16 // load x-form offsets into r5-r9 + li r6,32 // another offset + stvx v0,0,r9 // save some VRs so we can use to copy + li r7,48 // another offset + stvx v1,r5,r9 + li r0,PPC_PGBYTES/64 // we loop over 64-byte chunks + stvx v2,r6,r9 + mtctr r0 + li r8,96 // get look-ahead for touch + stvx v3,r7,r9 + li r9,128 + mtmsr r12 // now we've saved VRs on stack, turn off DR + isync // wait for it to happen + b pmap_g4_copy_loop + + .align 5 // align inner loops +pmap_g4_copy_loop: // loop over 64-byte chunks + dcbt r3,r8 // touch 3 lines ahead + nop // avoid a 17-word loop... + dcbt r3,r9 // touch 4 lines ahead + nop // more padding + dcba 0,r4 // avoid pre-fetch of 1st dest line + lvx v0,0,r3 // offset 0 + lvx v1,r5,r3 // offset 16 + lvx v2,r6,r3 // offset 32 + lvx v3,r7,r3 // offset 48 + addi r3,r3,64 + dcba r6,r4 // avoid pre-fetch of 2nd line + stvx v0,0,r4 // offset 0 + stvx v1,r5,r4 // offset 16 + stvx v2,r6,r4 // offset 32 + stvx v3,r7,r4 // offset 48 + dcbf 0,r4 // push line 1 + dcbf r6,r4 // and line 2 + addi r4,r4,64 + bdnz pmap_g4_copy_loop + + sync // wait for stores to take + subi r4,r4,PPC_PGBYTES // restore ptr to destintation page + li r8,PPC_PGBYTES-32 // point to last line in page +pmap_g4_icache_flush: + subic. r9,r8,32 // more to go? + icbi r4,r8 // flush from icache + subi r8,r9,32 // get offset to next line + icbi r4,r9 + bne pmap_g4_icache_flush + + sync + mtmsr r2 // turn DR back on + isync + la r9,FM_SIZE+16(r1) // get base of VR save area + lvx v0,0,r9 // restore the VRs + lvx v1,r5,r9 + lvx v2,r6,r9 + lvx v3,r7,r9 + +pmap_g4_restore: // r11=MSR + mtmsr r11 // turn EE on, VEC and FR off + isync // wait for it to happen + addi r1,r1,kSFSize // pop off our stack frame + lwz r0,8(r1) // restore return address + mtlr r0 + blr + + + // 64-bit/128-byte processor: copy using VRs + +pmap_copy_64: // r10=features, r11=old MSR + sldi r3,r3,12 // get page address from page num + sldi r4,r4,12 // get page address from page num + la r9,FM_SIZE+16(r1) // get base of VR save area + li r5,16 // load x-form offsets into r5-r9 + li r6,32 // another offset + bf pfAltivecb,pmap_novmx_copy // altivec suppressed... + stvx v0,0,r9 // save 8 VRs so we can copy wo bubbles + stvx v1,r5,r9 + li r7,48 // another offset + li r0,PPC_PGBYTES/128 // we loop over 128-byte chunks + stvx v2,r6,r9 + stvx v3,r7,r9 + addi r9,r9,64 // advance base ptr so we can store another 4 + mtctr r0 + li r0,MASK(MSR_DR) // get DR bit + stvx v4,0,r9 + stvx v5,r5,r9 + andc r12,r2,r0 // turn off DR bit + li r0,1 // get a 1 to slam into SF + stvx v6,r6,r9 + stvx v7,r7,r9 + rldimi r12,r0,63,MSR_SF_BIT // set SF bit (bit 0) + li r8,-128 // offset so we can reach back one line + mtmsrd r12 // now we've saved VRs, turn DR off and SF on + isync // wait for it to happen + dcbt128 0,r3,1 // start a forward stream + b pmap_64_copy_loop + + .align 5 // align inner loops +pmap_64_copy_loop: // loop over 128-byte chunks + dcbz128 0,r4 // avoid read of destination line + lvx v0,0,r3 // offset 0 + lvx v1,r5,r3 // offset 16 + lvx v2,r6,r3 // offset 32 + lvx v3,r7,r3 // offset 48 + addi r3,r3,64 // don't have enough GPRs so add 64 2x + lvx v4,0,r3 // offset 64 + lvx v5,r5,r3 // offset 80 + lvx v6,r6,r3 // offset 96 + lvx v7,r7,r3 // offset 112 + addi r3,r3,64 + stvx v0,0,r4 // offset 0 + stvx v1,r5,r4 // offset 16 + stvx v2,r6,r4 // offset 32 + stvx v3,r7,r4 // offset 48 + addi r4,r4,64 + stvx v4,0,r4 // offset 64 + stvx v5,r5,r4 // offset 80 + stvx v6,r6,r4 // offset 96 + stvx v7,r7,r4 // offset 112 + addi r4,r4,64 + dcbf r8,r4 // flush the line we just wrote + bdnz pmap_64_copy_loop + + sync // wait for stores to take + subi r4,r4,PPC_PGBYTES // restore ptr to destintation page + li r8,PPC_PGBYTES-128 // point to last line in page +pmap_64_icache_flush: + subic. r9,r8,128 // more to go? + icbi r4,r8 // flush from icache + subi r8,r9,128 // get offset to next line + icbi r4,r9 + bne pmap_64_icache_flush + + sync + mtmsrd r2 // turn DR back on, SF off + isync + la r9,FM_SIZE+16(r1) // get base address of VR save area on stack + lvx v0,0,r9 // restore the VRs + lvx v1,r5,r9 + lvx v2,r6,r9 + lvx v3,r7,r9 + addi r9,r9,64 + lvx v4,0,r9 + lvx v5,r5,r9 + lvx v6,r6,r9 + lvx v7,r7,r9 + + b pmap_g4_restore // restore lower half of MSR and return + + // + // Copy on 64-bit without VMX + // + +pmap_novmx_copy: + li r0,PPC_PGBYTES/128 // we loop over 128-byte chunks + mtctr r0 + li r0,MASK(MSR_DR) // get DR bit + andc r12,r2,r0 // turn off DR bit + li r0,1 // get a 1 to slam into SF + rldimi r12,r0,63,MSR_SF_BIT // set SF bit (bit 0) + mtmsrd r12 // now we've saved VRs, turn DR off and SF on + isync // wait for it to happen + dcbt128 0,r3,1 // start a forward stream + +pmap_novmx_copy_loop: // loop over 128-byte cache lines + dcbz128 0,r4 // avoid read of dest line + + ld r0,0(r3) // Load half a line + ld r12,8(r3) + ld r5,16(r3) + ld r6,24(r3) + ld r7,32(r3) + ld r8,40(r3) + ld r9,48(r3) + ld r10,56(r3) + + std r0,0(r4) // Store half a line + std r12,8(r4) + std r5,16(r4) + std r6,24(r4) + std r7,32(r4) + std r8,40(r4) + std r9,48(r4) + std r10,56(r4) + + ld r0,64(r3) // Load half a line + ld r12,72(r3) + ld r5,80(r3) + ld r6,88(r3) + ld r7,96(r3) + ld r8,104(r3) + ld r9,112(r3) + ld r10,120(r3) + + addi r3,r3,128 + + std r0,64(r4) // Store half a line + std r12,72(r4) + std r5,80(r4) + std r6,88(r4) + std r7,96(r4) + std r8,104(r4) + std r9,112(r4) + std r10,120(r4) + + dcbf 0,r4 // flush the line we just wrote + addi r4,r4,128 + bdnz pmap_novmx_copy_loop + + sync // wait for stores to take + subi r4,r4,PPC_PGBYTES // restore ptr to destintation page + li r8,PPC_PGBYTES-128 // point to last line in page + +pmap_novmx_icache_flush: + subic. r9,r8,128 // more to go? + icbi r4,r8 // flush from icache + subi r8,r9,128 // get offset to next line + icbi r4,r9 + bne pmap_novmx_icache_flush + + sync + mtmsrd r2 // turn DR back on, SF off + isync + + b pmap_g4_restore // restore lower half of MSR and return + + + +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> -/* Now that copyin is done, we don't need a recovery point */ - - addi r1,r1,FM_SIZE+16 - mfsprg r6,1 /* Get the current act */ - lwz r10,ACT_THREAD(r6) - li r3,0 - lwz r0,FM_LR_SAVE(r1) - stw r3,THREAD_RECOVER(r10) /* Clear recovery */ - mtlr r0 - blr - -/* we get here via the exception handler if an illegal - * user memory reference was made. - */ -.L_copyinout_error: - -/* Now that copyin is done, we don't need a recovery point */ - - mfsprg r6,1 /* Get the current act */ - addi r1,r1,FM_SIZE+16 - lwz r10,ACT_THREAD(r6) - li r4,0 - lwz r0,FM_LR_SAVE(r1) - stw r4,THREAD_RECOVER(r10) /* Clear recovery */ - mtlr r0 - li r3,EFAULT ; Indicate error (EFAULT) - blr - -.L_copyinout_trivial: - /* The copyin/out was for either 0 bytes or a negative - * number of bytes, return an appropriate value (0 == SUCCESS). - * cr0 still contains result of comparison of len with 0. - */ - li r3, 0 - beq+ cr0, .L_copyinout_negative - li r3, 1 -.L_copyinout_negative: - - /* unwind the stack */ - addi r1, r1, FM_SIZE+16 - lwz r0, FM_LR_SAVE(r1) - mtlr r0 - - blr - -.L_call_copyin_multiple: - - /* unwind the stack */ - addi r1, r1, FM_SIZE+16 - lwz r0, FM_LR_SAVE(r1) - mtlr r0 - - b EXT(copyin_multiple) /* not a call - a jump! */ - +// Stack frame format used by copyin, copyout, copyinstr and copyoutstr. +// These routines all run both on 32 and 64-bit machines, though because they are called +// by the BSD kernel they are always in 32-bit mode when entered. The mapped ptr returned +// by MapUserAddressSpace will be 64 bits however on 64-bit machines. Beware to avoid +// using compare instructions on this ptr. This mapped ptr is kept globally in r31, so there +// is no need to store or load it, which are mode-dependent operations since it could be +// 32 or 64 bits. + +#define kkFrameSize (FM_SIZE+32) + +#define kkBufSize (FM_SIZE+0) +#define kkCR (FM_SIZE+4) +#define kkSource (FM_SIZE+8) +#define kkDest (FM_SIZE+12) +#define kkCountPtr (FM_SIZE+16) +#define kkR31Save (FM_SIZE+20) + + +// nonvolatile CR bits we use as flags in cr3 + +#define kk64bit 12 +#define kkNull 13 +#define kkIn 14 +#define kkString 15 +#define kkZero 15 + + +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> /* * int - * copyout(src, dst, count) + * copyoutstr(src, dst, maxcount, count) * vm_offset_t src; * vm_offset_t dst; - * int count; + * vm_size_t maxcount; + * vm_size_t* count; * + * Set *count to the number of bytes copied. */ -ENTRY2(copyout, copyoutmsg, TAG_NO_FRAME_USED) +ENTRY(copyoutstr, TAG_NO_FRAME_USED) + mfcr r2 // we use nonvolatile cr3 + li r0,0 + crset kkString // flag as a string op + mr r10,r4 // for copyout, dest ptr (r4) is in user space + stw r0,0(r6) // initialize #bytes moved + crclr kkIn // flag as copyout + b copyJoin -/* Preamble allowing us to call a sub-function */ - - mflr r0 - stw r0,FM_LR_SAVE(r1) - stwu r1,-(FM_SIZE+16)(r1) - -#if 0 - stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */ - stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */ - stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */ - mr r6,r0 /* (TEST/DEBUG) */ - - bl EXT(tracecopyout) /* (TEST/DEBUG) */ - - lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */ - lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */ - lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */ -#endif - - cmpli cr0,r5,0 - ble- cr0,.L_copyinout_trivial -/* we know we have a valid copyout to do now */ -/* Set up thread_recover in case we hit an illegal address */ - - - mfsprg r8,1 /* Get the current act */ - lwz r10,ACT_THREAD(r8) - lis r11,HIGH_ADDR(.L_copyinout_error) - lwz r8,ACT_VMMAP(r8) - rlwinm r12,r4,6,26,29 ; Get index to the segment slot - ori r11,r11,LOW_ADDR(.L_copyinout_error) - add r9,r4,r5 /* Get the end of the destination */ - lwz r8,VMMAP_PMAP(r8) - subi r9,r9,1 /* Make sure we don't go too far */ - add r8,r8,r12 ; Start indexing to the segment value - stw r11,THREAD_RECOVER(r10) - xor r9,r9,r4 /* Smoosh 'em together */ - lwz r8,PMAP_SEGS(r8) ; Get the source SR value - rlwinm. r9,r9,0,1,3 /* Top nybble equal? */ - mtsr SR_COPYIN,r8 - isync - - -/* For optimisation, we check if the copyout lies on a segment - * boundary. If it doesn't, we can use a simple copy. If it - * does, we split it into two separate copies in some C code. - */ - - bne- .L_call_copyout_multiple /* Nope, we went past the segment boundary... */ - - rlwinm r4,r4,0,4,31 - oris r4,r4,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */ - - bl EXT(bcopy) - -/* Now that copyout is done, we don't need a recovery point */ - mfsprg r6,1 /* Get the current act */ - addi r1,r1,FM_SIZE+16 - lwz r10,ACT_THREAD(r6) - li r3,0 - lwz r0,FM_LR_SAVE(r1) - stw r3,THREAD_RECOVER(r10) /* Clear recovery */ - mtlr r0 - blr - -.L_call_copyout_multiple: - /* unwind the stack */ - addi r1, r1, FM_SIZE+16 - lwz r0, FM_LR_SAVE(r1) - mtlr r0 - - b EXT(copyout_multiple) /* not a call - a jump! */ +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> /* - * boolean_t - * copyinstr(src, dst, count, maxcount) + * int + * copyinstr(src, dst, maxcount, count) * vm_offset_t src; * vm_offset_t dst; * vm_size_t maxcount; * vm_size_t* count; * * Set *count to the number of bytes copied - * * If dst == NULL, don't copy, just count bytes. * Only currently called from klcopyinstr. */ ENTRY(copyinstr, TAG_NO_FRAME_USED) - -/* Preamble allowing us to call a sub-function */ - mflr r0 - stw r0,FM_LR_SAVE(r1) - stwu r1,-(FM_SIZE+16)(r1) - -#if 0 - stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */ - stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */ - stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */ - stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */ - mr r7,r0 /* (TEST/DEBUG) */ - - bl EXT(tracecopystr) /* (TEST/DEBUG) */ - - lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */ - lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */ - lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */ - stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */ -#endif - - cmpli cr0,r5,0 - ble- cr0,.L_copyinout_trivial - -/* we know we have a valid copyin to do now */ -/* Set up thread_recover in case we hit an illegal address */ - - li r0,0 - mfsprg r8,1 /* Get the current act */ - lwz r10,ACT_THREAD(r8) - stw r0,0(r6) /* Clear result length */ - lis r11,HIGH_ADDR(.L_copyinout_error) - lwz r8,ACT_VMMAP(r8) ; Get the map for this activation - rlwinm r12,r3,6,26,29 ; Get index to the segment slot - lwz r8,VMMAP_PMAP(r8) - ori r11,r11,LOW_ADDR(.L_copyinout_error) - add r8,r8,r12 ; Start indexing to the segment value - stw r11,THREAD_RECOVER(r10) - rlwinm r3,r3,0,4,31 - lwz r7,PMAP_SEGS(r8) ; Get the source SR value - oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */ - -/* Copy byte by byte for now - TODO NMGS speed this up with - * some clever (but fairly standard) logic for word copies. - * We don't use a copyinstr_multiple since copyinstr is called - * with INT_MAX in the linux server. Eugh. + mfcr r2 // we use nonvolatile cr3 + cmplwi r4,0 // dst==NULL? + li r0,0 + crset kkString // flag as a string op + mr r10,r3 // for copyin, source ptr (r3) is in user space + crmove kkNull,cr0_eq // remember if (dst==NULL) + stw r0,0(r6) // initialize #bytes moved + crset kkIn // flag as copyin (rather than copyout) + b copyJoin1 // skip over the "crclr kkNull" + + +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> +/* + * int + * copyout(src, dst, count) + * vm_offset_t src; + * vm_offset_t dst; + * size_t count; */ - li r9,0 /* Clear byte counter */ - -/* If the destination is NULL, don't do writes, - * just count bytes. We set CR7 outside the loop to save time + .align 5 + .globl EXT(copyout) + .globl EXT(copyoutmsg) + +LEXT(copyout) +LEXT(copyoutmsg) + +#if INSTRUMENT + mfspr r12,pmc1 ; INSTRUMENT - saveinstr[12] - Take stamp at copyout + stw r12,0x6100+(12*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r12,pmc2 ; INSTRUMENT - Get stamp + stw r12,0x6100+(12*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r12,pmc3 ; INSTRUMENT - Get stamp + stw r12,0x6100+(12*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r12,pmc4 ; INSTRUMENT - Get stamp + stw r12,0x6100+(12*16)+0xC(0) ; INSTRUMENT - Save it +#endif + mfcr r2 // save caller's CR + crclr kkString // not a string version + mr r10,r4 // dest (r4) is user-space ptr + crclr kkIn // flag as copyout + b copyJoin + + +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> +/* + * int + * copyin(src, dst, count) + * vm_offset_t src; + * vm_offset_t dst; + * size_t count; */ - cmpwi cr7,r4,0 /* Is the destination null? */ - -nxtseg: mtsr SR_COPYIN,r7 /* Set the source SR */ - isync -.L_copyinstr_loop: - lbz r0,0(r3) /* Get the source */ - addic. r5,r5,-1 /* Have we gone far enough? */ - addi r3,r3,1 /* Bump source pointer */ - - cmpwi cr1,r0,0 /* Did we hit a null? */ - beq cr7,.L_copyinstr_no_store /* If we are just counting, skip the store... */ - - stb r0,0(r4) /* Move to sink */ - addi r4,r4,1 /* Advance sink pointer */ + .align 5 + .globl EXT(copyin) + .globl EXT(copyinmsg) + +LEXT(copyin) +LEXT(copyinmsg) + + mfcr r2 // save caller's CR + crclr kkString // not a string version + mr r10,r3 // source (r3) is user-space ptr in copyin + crset kkIn // flag as copyin + + +// Common code to handle setup for all the copy variants: +// r2 = caller's CR, since we use cr3 +// r3-r6 = parameters +// r10 = user-space ptr (r3 if copyin, r4 if copyout) +// cr3 = kkIn, kkString, kkNull flags + +copyJoin: + crclr kkNull // (dst==NULL) convention not used with this call +copyJoin1: // enter from copyinstr with kkNull set + mflr r0 // get return address + cmplwi r5,0 // buffer length 0? + lis r9,0x1000 // r9 <- 0x10000000 (256MB) + stw r0,FM_LR_SAVE(r1) // save return + cmplw cr1,r5,r9 // buffer length > 256MB ? + mfsprg r8,2 // get the features + beq-- copyinout_0 // 0 length is degenerate case + stwu r1,-kkFrameSize(r1) // set up stack frame + stw r2,kkCR(r1) // save caller's CR since we use cr3 + mtcrf 0x02,r8 // move pf64Bit to cr6 + stw r3,kkSource(r1) // save args across MapUserAddressSpace + stw r4,kkDest(r1) + stw r5,kkBufSize(r1) + crmove kk64bit,pf64Bitb // remember if this is a 64-bit processor + stw r6,kkCountPtr(r1) + stw r31,kkR31Save(r1) // we use r31 globally for mapped user ptr + li r31,0 // no mapped ptr yet + + +// Handle buffer length > 256MB. This is an error (ENAMETOOLONG) on copyin and copyout. +// The string ops are passed -1 lengths by some BSD callers, so for them we silently clamp +// the buffer length to 256MB. This isn't an issue if the string is less than 256MB +// (as most are!), but if they are >256MB we eventually return ENAMETOOLONG. This restriction +// is due to MapUserAddressSpace; we don't want to consume more than two segments for +// the mapping. + + ble++ cr1,copyin0 // skip if buffer length <= 256MB + bf kkString,copyinout_too_big // error if not string op + mr r5,r9 // silently clamp buffer length to 256MB + stw r9,kkBufSize(r1) // update saved copy too + + +// Set up thread_recover in case we hit an illegal address. + +copyin0: + mfsprg r8,1 /* Get the current act */ + lis r2,hi16(copyinout_error) + lwz r7,ACT_THREAD(r8) + ori r2,r2,lo16(copyinout_error) + lwz r3,ACT_VMMAP(r8) // r3 <- vm_map virtual address + stw r2,THREAD_RECOVER(r7) + + +// Map user segment into kernel map, turn on 64-bit mode. +// r3 = vm map +// r5 = buffer length +// r10 = user space ptr (r3 if copyin, r4 if copyout) + + mr r6,r5 // Set length to map + li r4,0 // Note: we only do this 32-bit for now + mr r5,r10 // arg2 <- user space ptr +#if INSTRUMENT + mfspr r12,pmc1 ; INSTRUMENT - saveinstr[13] - Take stamp before mapuseraddressspace + stw r12,0x6100+(13*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r12,pmc2 ; INSTRUMENT - Get stamp + stw r12,0x6100+(13*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r12,pmc3 ; INSTRUMENT - Get stamp + stw r12,0x6100+(13*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r12,pmc4 ; INSTRUMENT - Get stamp + stw r12,0x6100+(13*16)+0xC(0) ; INSTRUMENT - Save it +#endif + bl EXT(MapUserAddressSpace) // set r3 <- address in kernel map of user operand +#if INSTRUMENT + mfspr r12,pmc1 ; INSTRUMENT - saveinstr[14] - Take stamp after mapuseraddressspace + stw r12,0x6100+(14*16)+0x0(0) ; INSTRUMENT - Save it + mfspr r12,pmc2 ; INSTRUMENT - Get stamp + stw r12,0x6100+(14*16)+0x4(0) ; INSTRUMENT - Save it + mfspr r12,pmc3 ; INSTRUMENT - Get stamp + stw r12,0x6100+(14*16)+0x8(0) ; INSTRUMENT - Save it + mfspr r12,pmc4 ; INSTRUMENT - Get stamp + stw r12,0x6100+(14*16)+0xC(0) ; INSTRUMENT - Save it +#endif + or. r0,r3,r4 // Did we fail the mapping? + mr r31,r4 // r31 <- mapped ptr into user space (may be 64-bit) + beq-- copyinout_error // was 0, so there was an error making the mapping + bf-- kk64bit,copyin1 // skip if a 32-bit processor + + rldimi r31,r3,32,0 // slam high-order bits into mapped ptr + mfmsr r4 // if 64-bit, turn on SF so we can use returned ptr + li r0,1 + rldimi r4,r0,63,MSR_SF_BIT // light bit 0 + mtmsrd r4 // turn on 64-bit mode + isync // wait for mode to change + + +// Load r3-r5, substituting mapped ptr as appropriate. + +copyin1: + lwz r5,kkBufSize(r1) // restore length to copy + bf kkIn,copyin2 // skip if copyout + lwz r4,kkDest(r1) // copyin: source is mapped, dest is r4 at entry + mr r3,r31 // source is mapped ptr + b copyin3 +copyin2: // handle copyout + lwz r3,kkSource(r1) // source is kernel buffer (r3 at entry) + mr r4,r31 // dest is mapped ptr into user space + + +// Finally, all set up to copy: +// r3 = source ptr (mapped if copyin) +// r4 = dest ptr (mapped if copyout) +// r5 = length +// r31 = mapped ptr returned by MapUserAddressSpace +// cr3 = kkIn, kkString, kk64bit, and kkNull flags + +copyin3: + bt kkString,copyString // handle copyinstr and copyoutstr + bl EXT(bcopy) // copyin and copyout: let bcopy do the work + li r3,0 // return success + + +// Main exit point for copyin, copyout, copyinstr, and copyoutstr. Also reached +// from error recovery if we get a DSI accessing user space. Clear recovery ptr, +// and pop off frame. Note that we have kept +// the mapped ptr into user space in r31, as a reg64_t type (ie, a 64-bit ptr on +// 64-bit machines.) We must unpack r31 into an addr64_t in (r3,r4) before passing +// it to ReleaseUserAddressSpace. +// r3 = 0, EFAULT, or ENAMETOOLONG + +copyinx: + lwz r2,kkCR(r1) // get callers cr3 + mfsprg r6,1 // Get the current act + lwz r10,ACT_THREAD(r6) + + bf-- kk64bit,copyinx1 // skip if 32-bit processor + mfmsr r12 + rldicl r12,r12,0,MSR_SF_BIT+1 // if 64-bit processor, turn 64-bit mode off + mtmsrd r12 // turn SF off and EE back on + isync // wait for the mode to change +copyinx1: + lwz r31,kkR31Save(r1) // restore callers r31 + addi r1,r1,kkFrameSize // pop off our stack frame + lwz r0,FM_LR_SAVE(r1) + li r4,0 + stw r4,THREAD_RECOVER(r10) // Clear recovery + mtlr r0 + mtcrf 0x10,r2 // restore cr3 + blr -.L_copyinstr_no_store: - addi r9,r9,1 /* Count the character */ - beq- cr1,.L_copyinstr_done /* We're done if we did a null... */ - beq- cr0,L_copyinstr_toobig /* Also if we maxed the count... */ - -/* Check to see if the copyin pointer has moved out of the - * copyin segment, if it has we must remap. +/* We get here via the exception handler if an illegal + * user memory reference was made. This error handler is used by + * copyin, copyout, copyinstr, and copyoutstr. Registers are as + * they were at point of fault, so for example cr3 flags are valid. */ - rlwinm. r0,r3,0,4,31 /* Did we wrap around to 0? */ - bne+ cr0,.L_copyinstr_loop /* Nope... */ - - lwz r7,PMAP_SEGS+4(r8) ; Get the next source SR value - addi r8,r8,4 ; Point to the next segment - oris r3,r0,(SR_COPYIN_NUM << (28-16)) /* Reset the segment number */ - b nxtseg /* Keep going... */ - -L_copyinstr_toobig: - li r3,ENAMETOOLONG - b L_copyinstr_return -.L_copyinstr_done: - li r3,0 /* Normal return */ -L_copyinstr_return: - li r4,0 /* to clear thread_recover */ - stw r9,0(r6) /* Set how many bytes we did */ - stw r4,THREAD_RECOVER(r10) /* Clear recovery exit */ - - addi r1, r1, FM_SIZE+16 - lwz r0, FM_LR_SAVE(r1) - mtlr r0 - blr +copyinout_error: + li r3,EFAULT // return error + b copyinx + +copyinout_0: // degenerate case: 0-length copy + mtcrf 0x10,r2 // restore cr3 + li r3,0 // return success + blr + +copyinout_too_big: // degenerate case + mtcrf 0x10,r2 // restore cr3 + lwz r1,0(r1) // pop off stack frame + li r3,ENAMETOOLONG + blr + + +//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><> +// Handle copyinstr and copyoutstr. At this point the stack frame is set up, +// the recovery ptr is set, the user's buffer is mapped, we're in 64-bit mode +// if necessary, and: +// r3 = source ptr, mapped if copyinstr +// r4 = dest ptr, mapped if copyoutstr +// r5 = buffer length +// r31 = mapped ptr returned by MapUserAddressSpace +// cr3 = kkIn, kkString, kkNull, and kk64bit flags +// We do word copies unless the buffer is very short, then use a byte copy loop +// for the leftovers if necessary. + +copyString: + li r12,0 // Set header bytes count to zero + cmplwi cr1,r5,20 // is buffer very short? + mtctr r5 // assuming short, set up loop count for bytes + blt cr1,copyinstr8 // too short for word loop + andi. r12,r3,0x3 // is source ptr word aligned? + bne copyinstr11 // bytes loop +copyinstr1: + srwi r6,r5,2 // get #words in buffer + mtctr r6 // set up word loop count + lis r10,hi16(0xFEFEFEFF) // load magic constants into r10 and r11 + lis r11,hi16(0x80808080) + ori r10,r10,lo16(0xFEFEFEFF) + ori r11,r11,lo16(0x80808080) + bf kkNull,copyinstr6 // enter loop that copies + b copyinstr5 // use loop that just counts + + +// Word loop(s). They do a word-parallel search for 0s, using the following +// inobvious but very efficient test: +// y = data + 0xFEFEFEFF +// z = ~data & 0x80808080 +// If (y & z)==0, then all bytes in dataword are nonzero. We need two copies of +// this loop, since if we test kkNull in the loop then it becomes 9 words long. + + .align 5 // align inner loops for speed +copyinstr5: // version that counts but does not copy + lwz r8,0(r3) // get next word of source + addi r3,r3,4 // increment source ptr + add r9,r10,r8 // r9 = data + 0xFEFEFEFF + andc r7,r11,r8 // r7 = ~data & 0x80808080 + and. r7,r9,r7 // r7 = r9 & r7 + bdnzt cr0_eq,copyinstr5 // if r7==0, then all bytes are nonzero + + b copyinstr7 + + .align 5 // align inner loops for speed +copyinstr6: // version that counts and copies + lwz r8,0(r3) // get next word of source + addi r3,r3,4 // increment source ptr + addi r4,r4,4 // increment dest ptr while we wait for data + add r9,r10,r8 // r9 = data + 0xFEFEFEFF + andc r7,r11,r8 // r7 = ~data & 0x80808080 + and. r7,r9,r7 // r7 = r9 & r7 + stw r8,-4(r4) // pack all 4 bytes into buffer + bdnzt cr0_eq,copyinstr6 // if r7==0, then all bytes are nonzero + + +// Either 0 found or buffer filled. The above algorithm has mapped nonzero bytes to 0 +// and 0 bytes to 0x80 with one exception: 0x01 bytes preceeding the first 0 are also +// mapped to 0x80. We must mask out these false hits before searching for an 0x80 byte. + +copyinstr7: + crnot kkZero,cr0_eq // 0 found iff cr0_eq is off + mfctr r6 // get #words remaining in buffer + rlwinm r2,r8,7,0,31 // move 0x01 bits to 0x80 position + slwi r6,r6,2 // convert to #bytes remaining + andc r7,r7,r2 // turn off false hits from 0x0100 worst case + rlwimi r6,r5,0,30,31 // add in odd bytes leftover in buffer + srwi r7,r7,8 // we want to count the 0 as a byte xferred + addi r6,r6,4 // don't count last word xferred (yet) + cntlzw r7,r7 // now we can find the 0 byte (ie, the 0x80) + srwi r7,r7,3 // convert 8,16,24,32 to 1,2,3,4 + sub. r6,r6,r7 // account for nonzero bytes in last word + bt++ kkZero,copyinstr10 // 0 found, so done + + beq copyinstr10 // r6==0, so buffer truly full + mtctr r6 // 0 not found, loop over r6 bytes + b copyinstr8 // enter byte loop for last 1-3 leftover bytes + + +// Byte loop. This is used for very small buffers and for the odd bytes left over +// after searching and copying words at a time. + + .align 5 // align inner loops for speed +copyinstr8: // loop over bytes of source + lbz r0,0(r3) // get next byte of source + addi r3,r3,1 + addi r4,r4,1 // increment dest addr whether we store or not + cmpwi r0,0 // the 0? + bt-- kkNull,copyinstr9 // don't store (was copyinstr with NULL ptr) + stb r0,-1(r4) +copyinstr9: + bdnzf cr0_eq,copyinstr8 // loop if byte not 0 and more room in buffer + + mfctr r6 // get #bytes left in buffer + crmove kkZero,cr0_eq // remember if 0 found or buffer filled + + +// Buffer filled or 0 found. Unwind and return. +// r5 = kkBufSize, ie buffer length +// r6 = untransferred bytes remaining in buffer +// r31 = mapped ptr returned by MapUserAddressSpace +// cr3 = kkZero set iff 0 found + +copyinstr10: + lwz r9,kkCountPtr(r1) // get ptr to place to store count of bytes moved + sub r2,r5,r6 // get #bytes we moved, counting the 0 iff any + add r2,r2,r12 // add the header bytes count + li r3,0 // assume 0 return status + stw r2,0(r9) // store #bytes moved + bt++ kkZero,copyinx // we did find the 0 so return 0 + li r3,ENAMETOOLONG // buffer filled + b copyinx // join main exit routine + +// Byte loop. This is used on the header bytes for unaligned source + + .align 5 // align inner loops for speed +copyinstr11: + li r10,4 // load word size + sub r12,r10,r12 // set the header bytes count + mtctr r12 // set up bytes loop count +copyinstr12: // loop over bytes of source + lbz r0,0(r3) // get next byte of source + addi r3,r3,1 + addi r4,r4,1 // increment dest addr whether we store or not + cmpwi r0,0 // the 0? + bt-- kkNull,copyinstr13 // don't store (was copyinstr with NULL ptr) + stb r0,-1(r4) +copyinstr13: + bdnzf cr0_eq,copyinstr12 // loop if byte not 0 and more room in buffer + sub r5,r5,r12 // substract the bytes copied + bne cr0_eq,copyinstr1 // branch to word loop + + mr r5,r12 // Get the header bytes count + li r12,0 // Clear the header bytes count + mfctr r6 // get #bytes left in buffer + crmove kkZero,cr0_eq // remember if 0 found or buffer filled + b copyinstr10 + diff --git a/osfmk/ppc/net_filter.c b/osfmk/ppc/net_filter.c deleted file mode 100644 index 650d299cd..000000000 --- a/osfmk/ppc/net_filter.c +++ /dev/null @@ -1,753 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - * - */ - - -#if NET_FILTER_COMPILER - - -#define USE_EXTRA_REGS 0 - -#define REG_ZERO 0 /* Register we keep equal to 0. */ -#define REG_DATAADDR 3 /* Address of packet data, and filter return. */ -#define REG_DATALEN 4 /* Length of packet data in two-byte units. */ -#define REG_HDRADDR 5 /* Address of header data. */ -#define REG_RET 3 /* Where to put return value. */ - -/* Originally we dealt in virtual register numbers which were essentially - indexes into this array, and only converted to machine register numbers - when emitting instructions. But that meant a lot of conversions, so - instead we deal with machine register numbers all along, even though this - means wasting slots in the regs[] array. */ -const unsigned char scratchregs[] = { - 6, 7, 8, 9, 10, 11, 12, -#if USE_EXTRA_REGS /* Callee-saves regs available if we save them. */ -#define INITIAL_NSCRATCHREGS 8 /* Number of registers above. */ - #error not yet written -#endif -}; -#define NSCRATCHREGS (sizeof scratchregs / sizeof scratchregs[0]) -#define NREGS 32 -#define NO_REG 1 /* This is the stack pointer! Flag value. */ - -#define MAX_LI 0x7fff /* Max unsigned value in an LI. */ - -#define BCLR(cond) ((19 << 26) | (cond) | (16 << 1)) -#define BLR() BCLR(COND_ALWAYS) -#define BC(cond, off) ((16 << 26) | (cond) | ((off) << 2)) -#define COND(BO, BI) (((BO) << (16 + 5)) | ((BI) << 16)) -#define COND_ALWAYS COND(COND_IF_ALWAYS, 0) -#define COND_EQ COND(COND_IF_TRUE, COND_BIT(0, BIT_EQ)) -#define COND_NE COND(COND_IF_FALSE, COND_BIT(0, BIT_EQ)) -#define COND_LE COND(COND_IF_FALSE, COND_BIT(0, BIT_GT)) -#define COND_GE COND(COND_IF_FALSE, COND_BIT(0, BIT_LT)) -#define COND_BIT(crf, bit) \ - ((crf) * 4 + (bit)) -#define BIT_EQ 2 -#define BIT_GT 1 -#define BIT_LT 0 -#define COND_IF_FALSE 0x04 -#define COND_IF_TRUE 0x0c -#define COND_IF_ALWAYS 0x14 - -/* For arithmetical instructions, a is the dest and b is the source; - for logical instructions, a is the source and b is the dest. Ho hum. */ -#define IMMED(opcode, a, b, imm) \ - (((opcode) << 26) | ((a) << 21) | ((b) << 16) | \ - ((imm) & 0xffff)) -#define ADDI(dst, src, imm) \ - IMMED(14, dst, src, imm) -#define ADDIC(dst, src, imm) \ - IMMED(12, dst, src, imm) -#define SUBFIC(dst, src, imm) \ - IMMED(8, dst, src, imm) -#define LI(dst, imm) ADDI(dst, 0, (imm)) -#define ANDI(dst, src, imm) \ - IMMED(28, src, dst, imm) -#define ORI(dst, src, imm) \ - IMMED(24, src, dst, imm) -#define XORI(dst, src, imm) \ - IMMED(26, src, dst, imm) - -#define CMPL(lhs, rhs) ((31 << 26) | ((lhs) << 16) | ((rhs) << 11) | (32 << 1)) -#define CMPLI(lhs, imm) ((10 << 26) | ((lhs) << 16) | ((imm) & 0xffff)) - -#define INTEGER_OP(opcode, a, b, c) \ - ((31 << 26) | ((a) << 21) | ((b) << 16) | \ - ((c) << 11) | ((opcode) << 1)) -#define ARITH_OP(opcode, dst, lhs, rhs) \ - INTEGER_OP(opcode, dst, lhs, rhs) -#define ADD(dst, lhs, rhs) \ - ARITH_OP(OP_ADD, dst, lhs, rhs) -#define ADDE(dst, lhs, rhs) \ - ARITH_OP(OP_ADDE, dst, lhs, rhs) -#define SUBF(dst, lhs, rhs) \ - ARITH_OP(OP_SUBF, dst, lhs, rhs) -#define SUBFC(dst, lhs, rhs) \ - ARITH_OP(OP_SUBFC, dst, lhs, rhs) -#define SUBFE(dst, lhs, rhs) \ - ARITH_OP(OP_SUBFE, dst, lhs, rhs) -#define LOGIC_OP(opcode, dst, lhs, rhs) \ - INTEGER_OP(opcode, lhs, dst, rhs) -#define OR(dst, lhs, rhs) \ - LOGIC_OP(OP_OR, dst, lhs, rhs) -#define XOR(dst, lhs, rhs) \ - LOGIC_OP(OP_XOR, dst, lhs, rhs) -#define OP_ADD 266 -#define OP_ADDE 138 -#define OP_AND 28 -#define OP_OR 444 -#define OP_SRW 536 -#define OP_SUBF 40 -#define OP_SUBFC 8 -#define OP_SUBFE 136 -#define OP_XOR 316 -#define MR(dst, src) OR(dst, src, src) - -#define LHZ(dst, base, offset) \ - ((40 << 26) | ((dst) << 21) | ((base) << 16) | \ - ((offset) & 0xffff)) -#define LHZX(dst, base, index) \ - INTEGER_OP(279, dst, base, index) -#define MFCR(dst) INTEGER_OP(19, dst, 0, 0) - -#define RLWINM(dst, src, shiftimm, mbegin, mend) \ - ((21 << 26) | ((src) << 21) | ((dst) << 16) | \ - ((shiftimm) << 11) | ((mbegin) << 6) | ((mend) << 1)) -#define RLWNM(dst, src, shiftreg, mbegin, mend) \ - ((23 << 26) | ((src) << 21) | ((dst) << 16) | \ - ((shiftreg) << 11) | ((mbegin) << 6) | ((mend) << 1)) - -/* Every NETF_arg generates at most four instructions (4 for PUSHIND). - Every NETF_op generates at most 3 instructions (3 for EQ and NEQ). */ -#define MAX_INSTR_PER_ARG 4 -#define MAX_INSTR_PER_OP 3 -#define MAX_INSTR_PER_ITEM (MAX_INSTR_PER_ARG + MAX_INSTR_PER_OP) -int junk_filter[MAX_INSTR_PER_ITEM]; - -enum {NF_LITERAL, NF_HEADER, NF_DATA}; -struct common { /* Keeps track of values we might want to avoid reloading. */ - char type; /* NF_LITERAL: immediate; NF_HEADER: header word; - NF_DATA: data word. */ - char nuses; /* Number of remaining uses for this value. */ - unsigned char reg; - /* Register this value is currently in, or NO_REG if none. */ - unsigned short value; - /* Immediate value or header or data offset. */ -}; -struct reg { /* Keeps track of the current contents of registers. */ - unsigned char commoni; - /* Index in common[] of the contained value. */ -#define NOT_COMMON_VALUE NET_MAX_FILTER /* When not a common[] value. */ - unsigned char stacktimes; - /* Number of times register appears in stack. */ -}; -struct local { /* Gather local arrays so we could kalloc() if needed. */ - struct common common[NET_MAX_FILTER]; /* Potentially common values. */ - struct reg regs[NREGS]; /* Register statuses. */ - unsigned char commonpos[NET_MAX_FILTER]; /* Index in common[] for the - value loaded in each filter - command. */ - unsigned char stackregs[NET_FILTER_STACK_DEPTH]; - /* Registers making up the - stack. */ -#if USE_EXTRA_REGS - unsigned char maxreg; -#endif -}; - -int allocate_register(struct local *s, int commoni); -int compile_preamble(int *instructions, struct local *s); - -/* Compile a packet filter into POWERPC machine code. We do everything in - the 7 caller-saves registers listed in scratchregs[], except when - USE_EXTRA_REGS is defined, in which case we may also allocate callee- - saves registers if needed. (Not yet implemented on PPC.) - - Rather than maintaining an explicit stack in memory, we allocate registers - dynamically to correspond to stack elements -- we can do this because we - know the state of the stack at every point in the filter program. We also - attempt to keep around in registers values (immediates, or header or data - words) that are used later on, to avoid having to load them again. - Since there are only 7 registers being used, we might be forced to reload - a value that we could have kept if we had more. We might even be unable - to contain the stack in the registers, in which case we return failure and - cause the filter to be interpreted by net_do_filter(). But for all current - filters I looked at, 7 registers is enough even to avoid reloads. When - USE_EXTRA_REGS is defined there are about 28 available registers, which is - plenty. - - We depend heavily on NET_MAX_FILTER and NET_FILTER_STACK_DEPTH being - small. We keep indexes to arrays sized by them in char-sized fields, - originally because we tried allocating these arrays on the stack. - Even then we overflowed the small (4K) kernel stack, so we were forced - to allocate the arrays dynamically, which is the reason for the existence - of `struct local'. - - We also depend on the filter being logically correct, for instance not - being longer than NET_MAX_FILTER or underflowing its stack. This is - supposed to have been checked by parse_net_filter() before the filter - is compiled. - - We are supposed to return 1 (TRUE) if the filter accepts the packet - and 0 (FALSE) otherwise. In fact, we may return any non-zero value - for true, which is sufficient for our caller and convenient for us. - - There are lots and lots of optimisations that we could do but don't. - This is supposedly a *micro*-kernel, after all. Here are some things - that could be added without too much headache: - - Using the condition register. We go to a lot of trouble to generate - integer truth values for EQ etc, but most of the time those values - are just ANDed or ORed together or used as arguments to COR etc. So - we could compute the comparison values directly into CR bits and - operate on them using the CR logical instructions without (most of - the time) ever having to generate integer equivalents. - - More registers. We could note the last uses of r3, r4, and - r5, and convert them to general registers after those uses. But if - register shortage turns out to be a problem it is probably best just - to define USE_EXTRA_REGS and have done with it. - - Minimising range checks. Every time we refer to a word in the data - part, we generate code to ensure that it is within bounds. But often - the truth of these tests is implied by earlier tests. Instead, at the - start of the filter and after every COR or CNAND we could insert - a single check when that is necessary. (After CAND and CNOR we don't - need to check since if they terminate it will be to return FALSE - anyway so all we'd do would be to return it prematurely.) - - Remembering immediate values. Instead of generating code as soon as we - see a PUSHLIT, we could remember that value and only generate code when - it is used. This would enable us to generate certain shorter - instructions (like addi) that incorporate the immediate value instead - of ever putting it in a register. - */ - -filter_fct_t -net_filter_alloc(filter_t *filter, unsigned int size, unsigned int *lenp) -{ - struct local *s; - int len, oldi, i, j, t, ncommon, sp; - int type, value, arg, op, reg, reg1, dst, commoni; - int returnfalseoffset; - int *instructions, *instp, *returnfalse; -#if USE_EXTRA_REGS - int oldmaxreg; -#endif - boolean_t compiling; - -#define SCHAR_MAX 127 /* machine/machlimits->h, anyone? */ - assert(NET_MAX_FILTER <= SCHAR_MAX); - assert(NET_FILTER_STACK_DEPTH <= SCHAR_MAX); - assert(NREGS <= SCHAR_MAX); - - assert(size < NET_MAX_FILTER); - - s = (struct local *) kalloc(sizeof *s); - -#if USE_EXTRA_REGS - s->maxreg = INITIAL_NSCRATCHREGS; -#endif - len = 0; - compiling = FALSE; - returnfalse = junk_filter; - - /* This loop runs at least twice, once with compiling==FALSE to determine - the length of the instructions we will compile, and once with - compiling==TRUE to compile them. The code generated on the two passes - must be the same. In the USE_EXTRA_REGS case, the loop can be re-run - an extra time while !compiling, if we decide to use the callee-saves - registers. This is because we may be able to generate better code with - the help of these registers than before. */ - while (1) { - - /* Identify values that we can potentially preserve in a register to - avoid having to reload them. All immediate values and references to - known offsets in the header or data are candidates. The results of - this loop are the same on every run, so with a bit of work we - could run it just once; but this is not a time-critical - application. */ - ncommon = 0; - for (i = 0; i < size; i++) { - oldi = i; - arg = NETF_ARG(filter[i]); - if (arg == NETF_PUSHLIT) { - type = NF_LITERAL; - value = filter[++i]; - } else if (arg >= NETF_PUSHSTK) { - continue; - } else if (arg >= NETF_PUSHHDR) { - type = NF_HEADER; - value = arg - NETF_PUSHHDR; - } else if (arg >= NETF_PUSHWORD) { - type = NF_DATA; - value = arg - NETF_PUSHWORD; - } else { - continue; - } - for (j = 0; j < ncommon; j++) { - if (s->common[j].type == type && s->common[j].value == value) { - s->common[j].nuses++; - break; - } - } - if (j == ncommon) { - s->common[j].type = type; - s->common[j].value = value; - s->common[j].nuses = 1; - ncommon++; - } - s->commonpos[oldi] = j; - } - -#if USE_EXTRA_REGS - oldmaxreg = s->maxreg; -#endif - - /* Initially, no registers hold common values or are on the stack. */ - for (i = 0; i < ncommon; i++) - s->common[i].reg = NO_REG; - for (i = 0; i < NSCRATCHREGS; i++) { - s->regs[scratchregs[i]].commoni = NOT_COMMON_VALUE; - s->regs[scratchregs[i]].stacktimes = 0; - } - - /* Now read through the filter and generate code. */ - sp = -1; /* sp points to top element */ - for (i = 0; i < size; i++) { - if (!compiling) - instp = junk_filter; - - assert(sp >= -1); - assert(sp < NET_FILTER_STACK_DEPTH - 1); - commoni = s->commonpos[i]; - arg = NETF_ARG(filter[i]); - op = NETF_OP(filter[i]); - - /* Generate code to get the required value into a register and - set `reg' to the number of this register. */ - switch (arg) { - case NETF_PUSHLIT: - value = filter[++i]; - reg = s->common[commoni].reg; - if (reg == NO_REG) { - if ((reg = allocate_register(s, commoni)) == NO_REG) - goto fail; - assert(value >= 0); /* Comes from unsigned short. */ - *instp++ = ORI(reg, REG_ZERO, value); - } - s->common[commoni].nuses--; - break; - case NETF_NOPUSH: - reg = s->stackregs[sp--]; - s->regs[reg].stacktimes--; - break; - case NETF_PUSHZERO: - reg = REG_ZERO; - break; - case NETF_PUSHIND: - case NETF_PUSHHDRIND: - reg1 = s->stackregs[sp--]; - s->regs[reg1].stacktimes--; - if (arg == NETF_PUSHIND) - *instp++ = CMPL(reg1, REG_DATALEN); - else - *instp++ = CMPLI(reg1, - NET_HDW_HDR_MAX/sizeof (unsigned short)); - *instp = BC(COND_GE, returnfalse - instp); - instp++; - if ((reg = allocate_register(s, -1)) == NO_REG) - goto fail; - *instp++ = ADD(reg, reg1, reg1); - *instp++ = LHZX(reg, (arg == NETF_PUSHIND) ? - REG_DATAADDR : REG_HDRADDR, reg); - break; - default: - if (arg >= NETF_PUSHSTK) - reg = s->stackregs[sp - (arg - NETF_PUSHSTK)]; - else if (arg >= NETF_PUSHWORD) { - assert(2 * (NETF_PUSHHDR - NETF_PUSHWORD) <= MAX_LI); - assert(NETF_PUSHSTK - NETF_PUSHHDR <= MAX_LI); - reg = s->common[commoni].reg; - if (reg == NO_REG) { - if ((reg = allocate_register(s, commoni)) == NO_REG) - goto fail; - if (arg < NETF_PUSHHDR) { - value = arg - NETF_PUSHWORD; - *instp++ = CMPLI(REG_DATALEN, value); - *instp = BC(COND_LE, returnfalse - instp); - instp++; - reg1 = REG_DATAADDR; - } else { - value = arg - NETF_PUSHHDR; - reg1 = REG_HDRADDR; - } - *instp++ = LHZ(reg, reg1, 2 * value); - } - s->common[commoni].nuses--; - } - } - - /* Now generate code to do `op' on `reg1' (lhs) and `reg' (rhs). */ - if (op != NETF_NOP) { - reg1 = s->stackregs[sp--]; - s->regs[reg1].stacktimes--; - } - switch (op) { - case NETF_OP(NETF_CAND): - case NETF_OP(NETF_COR): - case NETF_OP(NETF_CNAND): - case NETF_OP(NETF_CNOR): - dst = -1; - case NETF_OP(NETF_NOP): - break; - default: - /* Allocate a register to put the result in. */ - if ((dst = allocate_register(s, -1)) == NO_REG) - goto fail; - } - switch (op) { - case NETF_OP(NETF_NOP): - dst = reg; - break; - case NETF_OP(NETF_EQ): - case NETF_OP(NETF_NEQ): - /* We arrange for the truth value to end up in the carry - flag and then put it in the destination register by - adding-with-carry zero to itself. To set the carry, we - first make a value `x' that is zero if the values are - equal; this is either their XOR, or, if we know the - rhs is 0, the lhs. Then to set the carry only when - x==0 we do `subfic dst,x,0' (subtract x from 0, setting - carry as not-borrow, so set only if x==0); to set it when - x!=0 we do `addic dst,x,-1' (add -1 to x setting carry, - so set unless x==0). We're only interested in the carry - from these operations, not dst. - We don't test if reg1==REG_ZERO since in practice you - write NETF_PUSHLIT|NETF_EQ; the other order is eccentric - so you get an extra instruction, tough. */ - if (reg == REG_ZERO) - t = reg1; - else { - *instp++ = XOR(dst, reg1, reg); - t = dst; - } - *instp++ = (op == NETF_OP(NETF_EQ)) ? - SUBFIC(dst, t, 0) : ADDIC(dst, t, -1); - *instp++ = ADDE(dst, REG_ZERO, REG_ZERO); - break; - case NETF_OP(NETF_LT): - /* LT and GT take advantage of the fact that all numbers are - 16-bit quantities, so the sign bit after a subtraction - is a reliable indication of the relative magnitudes of - the operands. */ - *instp++ = SUBF(dst, reg, reg1); /* dst = reg1 - reg */ - *instp++ = RLWINM(dst, dst, 1, 31, 31); /* sign bit */ - break; - case NETF_OP(NETF_GT): - *instp++ = SUBF(dst, reg1, reg); /* dst = reg - reg1 */ - *instp++ = RLWINM(dst, dst, 1, 31, 31); /* sign bit */ - break; - case NETF_OP(NETF_LE): - /* LE and GE use the carry (= not-borrow) flag. When doing - a - b, there is a borrow if b > a, so carry if b <= a. */ - *instp++ = SUBFC(dst, reg1, reg); /* dst = reg - reg1 */ - *instp++ = ADDE(dst, REG_ZERO, REG_ZERO);/* ca if reg1 <= reg */ - break; - case NETF_OP(NETF_GE): - *instp++ = SUBFC(dst, reg, reg1); /* dst = reg1 - reg */ - *instp++ = ADDE(dst, REG_ZERO, REG_ZERO);/* ca if reg <= reg1 */ - break; - case NETF_OP(NETF_AND): - j = OP_AND; - goto logical; - case NETF_OP(NETF_OR): - j = OP_OR; - goto logical; - case NETF_OP(NETF_XOR): - j = OP_XOR; - goto logical; - case NETF_OP(NETF_RSH): - j = OP_SRW; -logical: - *instp++ = LOGIC_OP(j, dst, reg1, reg); - break; - case NETF_OP(NETF_ADD): - j = OP_ADD; - goto arithmetical; - case NETF_OP(NETF_SUB): - j = OP_SUBF; /* First reg subtracted from second. */ -arithmetical: - *instp++ = ARITH_OP(j, dst, reg, reg1); - *instp++ = ANDI(dst, dst, 0xffff); - break; - case NETF_OP(NETF_LSH): - *instp++ = RLWNM(dst, reg1, reg, 16, 31); - break; - case NETF_OP(NETF_COR): - case NETF_OP(NETF_CNAND): - *instp++ = CMPL(reg1, reg); - *instp++ = BCLR((op == NETF_OP(NETF_COR)) ? COND_EQ : COND_NE); - break; - case NETF_OP(NETF_CAND): - case NETF_OP(NETF_CNOR): - *instp++ = CMPL(reg1, reg); - *instp = BC((op == NETF_OP(NETF_CAND)) ? COND_NE : COND_EQ, - returnfalse - instp); - instp++; - break; - default: - printf("op == 0x%x\n", op); - panic("net_filter_alloc: bad op"); - /* Should have been caught by parse_net_filter(). */ - } - /* If the op generated a result, push it on the stack. */ - if (dst >= 0) { - s->stackregs[++sp] = dst; - s->regs[dst].stacktimes++; - } - if (!compiling) { - assert(instp - junk_filter <= MAX_INSTR_PER_ITEM); - len += instp - junk_filter; - } - } - if (compiling) { - /* If the stack contains any values, we are supposed to return 0 or - 1 according as the top-of-stack is zero or not. Since the only - place we are called requires just zero-false/nonzero-true, we - simply copy the value into r3. If the stack is empty, we - leave the pointer value r3 intact to return TRUE. */ - if (sp >= 0) - *instp++ = MR(REG_RET, s->stackregs[sp]); - *instp++ = BLR(); - /* Branch here to return false. We could avoid adding these - instructions if they are not used, but practically every - filter does use them (for failure values when trying to - access values beyond the header or data length) so it's - not worth the effort. */ - assert(instp == returnfalse); - *instp++ = LI(REG_RET, 0); - *instp++ = BLR(); - break; - } else { - len += 1 + (sp >= 0); - /* For the reach-the-end return instruction(s). */ -#if USE_EXTRA_REGS - if (s->maxreg > oldmaxreg) { - len = 0; - continue; - } -#endif - len += compile_preamble(NULL, s); - returnfalseoffset = len; - len += 2; /* For the return-false instructions. */ - } - if ((instructions = (int *) kalloc(len * sizeof (int))) == NULL) - return NULL; - returnfalse = instructions + returnfalseoffset; - instp = instructions; - instp += compile_preamble(instp, s); - compiling = TRUE; - } - - assert(instp - instructions == len); - *lenp = len * sizeof (int); - { - kern_return_t kr; - vm_machine_attribute_val_t val = MATTR_VAL_CACHE_SYNC; - - kr = pmap_attribute(kernel_pmap, (vm_offset_t) instructions, - len * sizeof (int), MATTR_CACHE, &val); - if (kr != KERN_SUCCESS) { - printf("net_filter_alloc: pmap_attribute -> 0x%x\n", kr); - return NULL; - } - } - kfree((vm_offset_t) s, sizeof *s); - return (filter_fct_t) instructions; -fail: - assert(!compiling); - kfree((vm_offset_t) s, sizeof *s); - printf("net_filter_alloc: failed to compile (filter too complex)\n"); - printf("-- will work, but more slowly; consider enabling USE_EXTRA_REGS\n"); - return NULL; -} - - -/* Allocate a register. Registers that are already being used to make up - the virtual stack are ineligible. Among the others, we choose the one - whose value has the least number of subsequent uses (ideally, and - usually, 0) of the common value it already holds. If commoni is >= - 0, it is the index in common[] of the value we are going to put in - the allocated register, so we can update the various data structures - appropriately. */ -int -allocate_register(struct local *s, int commoni) -{ - int i, reg, bestreg, nuses, bestregnuses, maxreg; - - bestreg = NO_REG; -#if USE_EXTRA_REGS - maxreg = s->maxreg; -#else - maxreg = NSCRATCHREGS; -#endif - while (1) { - bestregnuses = NOT_COMMON_VALUE; - for (i = 0; i < maxreg; i++) { - reg = scratchregs[i]; - if (s->regs[reg].stacktimes == 0) { - nuses = (s->regs[reg].commoni == NOT_COMMON_VALUE) ? - 0 : s->common[s->regs[reg].commoni].nuses; - if (nuses < bestregnuses) { - bestreg = reg; - bestregnuses = nuses; - } - } - } - if (bestreg != NO_REG) - break; -#if USE_EXTRA_REGS - if (maxreg == NSCRATCHREGS) - return NO_REG; - s->maxreg = ++maxreg; -#else - return NO_REG; -#endif - } - if (bestregnuses > 0) - printf("net_filter_alloc: forced to reallocate r%d\n", bestreg); - /* With USE_EXTRA_REGS, we could push up the number of registers - here to have one extra available for common values, but it's usually - not worth the overhead of the extra save-and-restore in the preamble. - Anyway, this never happens with typical filters. */ - if (s->regs[bestreg].commoni != NOT_COMMON_VALUE) - s->common[s->regs[bestreg].commoni].reg = NO_REG; - if (commoni >= 0) { - s->regs[bestreg].commoni = commoni; - s->common[commoni].reg = bestreg; - } else - s->regs[bestreg].commoni = NOT_COMMON_VALUE; - return bestreg; -} - - -#define FIXED_PREAMBLE_INSTRUCTIONS 1 - -int -compile_preamble(int *instructions, struct local *s) -{ - int *instp; - int len = FIXED_PREAMBLE_INSTRUCTIONS; -#if USE_EXTRA_REGS -#error this hp code must be ported to the ppc - int extra_regs, i, j, t, disp; - - extra_regs = s->maxreg - INITIAL_NSCRATCHREGS; - if (extra_regs > 0) { - len = extra_regs * 2 + 4; - /* stw rp | (n-1) * stw | bl | stw | ldw rp | (n-1) * ldw | bv | ldw */ - } else - return 0; -#endif - if (instructions == NULL) - return len; - instp = instructions; - - *instp++ = LI(REG_ZERO, 0); - assert(instp - instructions == FIXED_PREAMBLE_INSTRUCTIONS); - -#if USE_EXTRA_REGS -#error this hp code must be ported to the ppc - /* Generate a wrapper function to save the callee-saves registers - before invoking the filter code we have generated. It would be - marginally better to have the filter branch directly to the - postamble code on return, but the difference is trivial and it - is easier to have it always branch to (rp). */ -#define FRAME_SIZE 128 /* This is plenty without being excessive. */ - *instp++ = STW_NEG(REG_RTN, 20, REG_SP); /* stw rp,-20(sp) */ - i = INITIAL_NSCRATCHREGS; - t = STWM(scratchregs[i], FRAME_SIZE, REG_SP); /* stwm r3,128(sp) */ - j = FRAME_SIZE; - while (++i < s->maxreg) { - *instp++ = t; - j -= sizeof (int); - t = STW_NEG(scratchregs[i], j, REG_SP); /* stw r4,-124(sp) &c */ - } - disp = extra_regs + 2; /* n * ldw | bv | ldw rp */ - *instp++ = BL(disp, REG_RTN); /* bl filter,rp */ - *instp++ = t; /* stw in delay slot */ - *instp++ = LDW_NEG(FRAME_SIZE + 20, REG_SP, REG_RTN); - /* ldw -148(sp),rp */ - while (--i > INITIAL_NSCRATCHREGS) { - *instp++ = LDW_NEG(j, REG_SP, scratchregs[i]); /* ldw -124(sp),r4 &c */ - j += sizeof (int); - } - *instp++ = BV(0, REG_RTN); /* bv (rp) */ - *instp++ = LDWM_NEG(FRAME_SIZE, REG_SP, scratchregs[i]); - /* ldwm -128(sp),r3 - in delay slot */ -#endif - - assert(instp - instructions == len); - return len; -} - -void -net_filter_free(filter_fct_t fp, unsigned int len) -{ - kfree((vm_offset_t) fp, len); -} - -#else /* NET_FILTER_COMPILER */ - -/* - * Compilation of a source network filter into ppc instructions - * - a small version that doesnt do anything, but doesn't take - * up any space either. Note that if using a single mklinux server - * with ethertalk enabled (standard situation), the filter passes - * everything through so no need to compile one. If running multi - * servers then there is more of a need. Ethertalk (in linux server) - * should really have a packet filter, but at time of writing - * it does not. - */ -filter_fct_t -net_filter_alloc( - filter_t *fpstart, - unsigned int fplen, - unsigned int *len) -{ - *len = 0; - return ((filter_fct_t)0); -} - -void -net_filter_free( - filter_fct_t fp, - unsigned int len) -{ - assert(fp == (filter_fct_t)0 && len == 0); -} -#endif /* NET_FILTER_COMPILER */ diff --git a/osfmk/ppc/notify_interrupt.c b/osfmk/ppc/notify_interrupt.c deleted file mode 100644 index 9a25a9594..000000000 --- a/osfmk/ppc/notify_interrupt.c +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#include -#include -#include -#include -#include -#include -#include - -int debugNotify = 0; - -/* -** Function: NotifyInterruption -** -** Inputs: port - mach_port for main thread -** ppcInterrupHandler - interrupt handler to execute -** interruptStatePtr - current interrupt state -** emulatorDescriptor - where in emulator to notify -** originalPC - where the emulator was executing -** originalR2 - new R2 -** -** Outputs: -** -** Notes: -** -*/ - -unsigned long -syscall_notify_interrupt(mach_port_t, UInt32, UInt32 *, EmulatorDescriptor *, - void ** , void **, void *); - -unsigned long -syscall_notify_interrupt( mach_port_t port_thread, - UInt32 ppcInterruptHandler, - UInt32 * interruptStatePtr, - EmulatorDescriptor * emulatorDescriptor, - void ** originalPC, - void ** originalR2, - void *othread ) -{ - kern_return_t result; - struct ppc_saved_state *mainPCB; - thread_t thread, nthread; - thread_act_t act; - UInt32 interruptState, currentState, postIntMask; - extern thread_act_t port_name_to_act(mach_port_t); - boolean_t isSelf, runningInKernel; - static unsigned long sequence =0; - -#define COPYIN_INTSTATE() { \ - (void) copyin((char *) interruptStatePtr, (char *)&interruptState, sizeof(interruptState)); \ - if (emulatorDescriptor) \ - (void) copyin((char *) &emulatorDescriptor->postIntMask, (char *)&postIntMask, sizeof(postIntMask)); } -#define COPYOUT_INTSTATE() (void) copyout((char *) &interruptState, (char *)interruptStatePtr, sizeof(interruptState)) - - - act = port_name_to_act(port_thread); - - - if (act == THR_ACT_NULL) - return port_thread; - - runningInKernel = (act->mact.ksp == 0); - isSelf = (current_act() == act); - - if (!isSelf) { - /* First.. suspend the thread */ - result = thread_suspend(act); - - if (result) { - act_deallocate(act); - return port_thread; - } - - /* Now try to find and wait for any pending activitations - * to complete.. (the following is an expansion of - * thread_set_state()) - */ - - thread = act_lock_thread(act); - if (!act->active) { - act_unlock_thread(act); - act_deallocate(act); - return port_thread; - } - - thread_hold(act); - - while (1) { - if (!thread || act != thread->top_act) - break; - - act_unlock_thread(act); - (void) thread_stop_wait(thread); - nthread = act_lock_thread(act); - if (nthread == thread) - break; - thread_unstop(thread); - thread = nthread; - } - - } - - COPYIN_INTSTATE() - if (isSelf) - currentState = kOutsideMain; - else - currentState = (interruptState & kInterruptStateMask) >> kInterruptStateShift; - - if (debugNotify > 5) { - printf("\nNotifyInterruption: %X, %X, %X, %X, %X, %X\n", - port_thread, ppcInterruptHandler, interruptStatePtr, - emulatorDescriptor, originalPC, originalR2 ); - } - mainPCB = USER_REGS(act); - - switch (currentState) - { - case kNotifyPending: - case kInUninitialized: - if (debugNotify > 2) - printf("NotifyInterrupt: kInUninitialized\n"); - break; - - case kInPseudoKernel: - case kOutsideMain: - if (debugNotify > 2) - printf("NotifyInterrupt: kInPseudoKernel/kOutsideMain\n"); - interruptState = interruptState - | ((postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask); - COPYOUT_INTSTATE(); - break; - - case kInSystemContext: - if (debugNotify > 2) - printf("kInSystemContext: old CR %x, postIntMask %x, new CR %x\n", - mainPCB->cr, postIntMask, mainPCB->cr | postIntMask); - mainPCB->cr |= postIntMask; - break; - - case kInAlternateContext: - if (debugNotify > 2) - printf("kInAlternateContext: IN InterruptState %x, postIntMask %x\n", - interruptState, postIntMask); - interruptState = interruptState | ((postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask); - interruptState = (interruptState & ~kInterruptStateMask); - - if (runningInKernel) - interruptState |= (kNotifyPending << kInterruptStateShift); - else - interruptState |= (kInPseudoKernel << kInterruptStateShift); - - (void) copyout((char *)&mainPCB->srr0, (char *)originalPC, sizeof(originalPC)); - (void) copyout((char *)&mainPCB->r2, (char *)originalR2, sizeof(originalR2)); - COPYOUT_INTSTATE(); - if (debugNotify > 2) - printf("kInAlternateContext: Out interruptState %x, Old PC %x, New %x, R2 %x\n", - interruptState, mainPCB->srr0, ppcInterruptHandler, mainPCB->r2); - - mainPCB->srr0 = ppcInterruptHandler; - break; - - case kInExceptionHandler: - if (debugNotify > 2) - printf("NotifyInterrupt: kInExceptionHandler\n"); - interruptState = interruptState | ((postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask); - COPYOUT_INTSTATE(); - break; - - default: - if (debugNotify) - printf("NotifyInterrupt: default "); - printf("Interruption while running in unknown state\n"); - printf("interruptState = 0x%X\n",currentState); - break; - } - - if (!isSelf) { - if (thread && act == thread->top_act) - thread_unstop(thread); - thread_release(act); - act_unlock_thread(act); - thread_resume(act); - } - - act_deallocate(act); - - return port_thread; -} diff --git a/osfmk/ppc/pcb.c b/osfmk/ppc/pcb.c index e97068465..85fbf336c 100644 --- a/osfmk/ppc/pcb.c +++ b/osfmk/ppc/pcb.c @@ -79,6 +79,8 @@ extern int real_ncpus; /* Number of actual CPUs */ extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ +void machine_act_terminate(thread_act_t act); + /* * These constants are dumb. They should not be in asm.h! */ @@ -92,17 +94,6 @@ int vec_trap_count = 0; int vec_switch_count = 0; #endif -extern struct thread_shuttle *Switch_context( - struct thread_shuttle *old, - void (*cont)(void), - struct thread_shuttle *new); - - -#if MACH_LDEBUG || MACH_KDB -void log_thread_action (char *, long, long, long); -#endif - - /* * consider_machine_collect: try to collect machine-dependent pages */ @@ -121,73 +112,30 @@ consider_machine_adjust() consider_mapping_adjust(); } - -/* - * stack_attach: Attach a kernel stack to a thread. - */ -void -machine_kernel_stack_init( - struct thread_shuttle *thread, - void (*start_pos)(thread_t)) -{ - vm_offset_t stack; - unsigned int *kss; - struct savearea *sv; - - assert(thread->top_act->mact.pcb); - assert(thread->kernel_stack); - stack = thread->kernel_stack; - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos); -#endif /* MACH_ASSERT */ - - kss = (unsigned int *)STACK_IKS(stack); - sv = thread->top_act->mact.pcb; /* This for the sake of C */ - - sv->save_lr = (unsigned int) start_pos; /* Set up the execution address */ - sv->save_srr0 = (unsigned int) start_pos; /* Here too */ - sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */ - sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE); /* Point to the top frame on the stack */ - sv->save_fpscr = 0; /* Clear all floating point exceptions */ - sv->save_vrsave = 0; /* Set the vector save state */ - sv->save_vscr[3] = 0x00010000; /* Supress java mode */ - - *((int *)sv->save_r1) = 0; /* Zero the frame backpointer */ - thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */ - -} - /* * switch_context: Switch from one thread to another, needed for * switching of space * */ -struct thread_shuttle* -switch_context( - struct thread_shuttle *old, - void (*continuation)(void), - struct thread_shuttle *new) +thread_t +machine_switch_context( + thread_t old, + thread_continue_t continuation, + thread_t new) { register thread_act_t old_act = old->top_act, new_act = new->top_act; - register struct thread_shuttle* retval; + register thread_t retval; pmap_t new_pmap; facility_context *fowner; - int my_cpu; - -#if MACH_LDEBUG || MACH_KDB - log_thread_action("switch", - (long)old, - (long)new, - (long)__builtin_return_address(0)); -#endif + struct per_proc_info *ppinfo; + + if (old == new) + panic("machine_switch_context"); - my_cpu = cpu_number(); - per_proc_info[my_cpu].old_thread = (unsigned int)old; - per_proc_info[my_cpu].cpu_flags &= ~traceBE; /* disable branch tracing if on */ - assert(old_act->kernel_loaded || - active_stacks[my_cpu] == old_act->thread->kernel_stack); + ppinfo = getPerProc(); /* Get our processor block */ + + ppinfo->old_thread = (unsigned int)old; + ppinfo->cpu_flags &= ~traceBE; /* disable branch tracing if on */ check_simple_locks(); @@ -196,13 +144,13 @@ switch_context( * so that it can be found by the other if needed */ if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ - fowner = per_proc_info[my_cpu].FPU_owner; /* Cache this because it may change */ + fowner = ppinfo->FPU_owner; /* Cache this because it may change */ if(fowner) { /* Is there any live context? */ if(fowner->facAct == old->top_act) { /* Is it for us? */ fpu_save(fowner); /* Yes, save it */ } } - fowner = per_proc_info[my_cpu].VMX_owner; /* Cache this because it may change */ + fowner = ppinfo->VMX_owner; /* Cache this because it may change */ if(fowner) { /* Is there any live context? */ if(fowner->facAct == old->top_act) { /* Is it for us? */ vec_save(fowner); /* Yes, save it */ @@ -210,20 +158,13 @@ switch_context( } } -#if DEBUG - if (watchacts & WA_PCB) { - printf("switch_context(0x%08x, 0x%x, 0x%08x)\n", - old,continuation,new); - } -#endif /* DEBUG */ - /* * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags * This bits can be modified in the per proc without updating the thread spcFlags */ if(old_act->mact.specFlags & runningVM) { old_act->mact.specFlags &= ~(userProtKey|FamVMmode); - old_act->mact.specFlags |= (per_proc_info[my_cpu].spcFlags) & (userProtKey|FamVMmode); + old_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode); } /* @@ -235,8 +176,9 @@ switch_context( if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ - per_proc_info[my_cpu].VMMareaPhys = (vm_offset_t)new_act->mact.vmmCEntry->vmmContextPhys; - per_proc_info[my_cpu].FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept; + ppinfo->VMMareaPhys = new_act->mact.vmmCEntry->vmmContextPhys; + ppinfo->VMMXAFlgs = new_act->mact.vmmCEntry->vmmXAFlgs; + ppinfo->FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept; } else { /* otherwise, we use the task's pmap */ new_pmap = new_act->task->map->pmap; @@ -245,14 +187,22 @@ switch_context( } } + if(old_act->mact.cioSpace != invalSpace) { /* Does our old guy have an active copyin/out? */ + old_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ + hw_blow_seg(copyIOaddr); /* Blow off the first segment */ + hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */ + } + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, - (int)old, (int)new, old->sched_pri, new->sched_pri, 0); + old->reason, (int)new, old->sched_pri, new->sched_pri, 0); retval = Switch_context(old, continuation, new); assert(retval != (struct thread_shuttle*)NULL); - if (branch_tracing_enabled()) - per_proc_info[my_cpu].cpu_flags |= traceBE; /* restore branch tracing */ + if (branch_tracing_enabled()) { + ppinfo = getPerProc(); /* Get our processor block */ + ppinfo->cpu_flags |= traceBE; /* restore branch tracing */ + } /* We've returned from having switched context, so we should be * back in the original context. @@ -261,46 +211,20 @@ switch_context( return retval; } -/* - * Alter the thread's state so that a following thread_exception_return - * will make the thread return 'retval' from a syscall. - */ -void -thread_set_syscall_return( - struct thread_shuttle *thread, - kern_return_t retval) -{ - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval); -#endif /* MACH_ASSERT */ - - thread->top_act->mact.pcb->save_r3 = retval; -} - /* * Initialize the machine-dependent state for a new thread. */ kern_return_t -thread_machine_create( - struct thread_shuttle *thread, - thread_act_t thr_act, - void (*start_pos)(thread_t)) +machine_thread_create( + thread_t thread, + task_t task) { - savearea *sv; /* Pointer to newly allocated savearea */ unsigned int *CIsTooLimited, i; - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos); -#endif /* MACH_ASSERT */ - - hw_atomic_add(&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need" + hw_atomic_add((uint32_t *)&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need" for this activation */ - assert(thr_act->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */ + assert(thread->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */ sv = save_alloc(); /* Go get us a savearea */ @@ -308,16 +232,13 @@ thread_machine_create( sv->save_hdr.save_prev = 0; /* Clear the back pointer */ sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */ - sv->save_hdr.save_act = thr_act; /* Set who owns it */ - sv->save_vscr[3] = 0x00010000; /* Supress java mode */ - thr_act->mact.pcb = sv; /* Point to the save area */ - thr_act->mact.curctx = &thr_act->mact.facctx; /* Initialize facility context */ - thr_act->mact.facctx.facAct = thr_act; /* Initialize facility context pointer to activation */ - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("pcb_init(%x) pcb=%x\n", thr_act, sv); -#endif /* MACH_ASSERT */ + sv->save_hdr.save_act = (struct thread_activation *)thread; /* Set who owns it */ + thread->mact.pcb = sv; /* Point to the save area */ + thread->mact.curctx = &thread->mact.facctx; /* Initialize facility context */ + thread->mact.facctx.facAct = thread; /* Initialize facility context pointer to activation */ + thread->mact.cioSpace = invalSpace; /* Initialize copyin/out space to invalid */ + thread->mact.preemption_count = 0; /* Initialize preemption counter */ + /* * User threads will pull their context from the pcb when first * returning to user mode, so fill in all the necessary values. @@ -325,13 +246,15 @@ thread_machine_create( * at the base of the kernel stack (see stack_attach()). */ - sv->save_srr1 = MSR_EXPORT_MASK_SET; /* Set the default user MSR */ + thread->mact.upcb = sv; /* Set user pcb */ + sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */ + sv->save_fpscr = 0; /* Clear all floating point exceptions */ + sv->save_vrsave = 0; /* Set the vector save state */ + sv->save_vscr[0] = 0x00000000; + sv->save_vscr[1] = 0x00000000; + sv->save_vscr[2] = 0x00000000; + sv->save_vscr[3] = 0x00010000; /* Disable java mode and clear saturated */ - CIsTooLimited = (unsigned int *)(&sv->save_sr0); /* Make a pointer 'cause C can't cast on the left */ - for(i=0; i<16; i++) { /* Initialize all SRs */ - CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space; /* Set the SR value */ - } - return(KERN_SUCCESS); } @@ -339,24 +262,64 @@ thread_machine_create( * Machine-dependent cleanup prior to destroying a thread */ void -thread_machine_destroy( thread_t thread ) +machine_thread_destroy( + thread_t thread) { - spl_t s; + register savearea *pcb, *ppsv; + register savearea_vec *vsv, *vpsv; + register savearea_fpu *fsv, *fpsv; + register savearea *svp; + register int i; + +/* + * This function will release all context. + */ + + machine_act_terminate(thread); /* Make sure all virtual machines are dead first */ + +/* + * + * Walk through and release all floating point and vector contexts. Also kill live context. + * + */ + + toss_live_vec(thread->mact.curctx); /* Dump live vectors */ - if (thread->kernel_stack) { - s = splsched(); - stack_free(thread); - splx(s); + vsv = thread->mact.curctx->VMXsave; /* Get the top vector savearea */ + + while(vsv) { /* Any VMX saved state? */ + vpsv = vsv; /* Remember so we can toss this */ + vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev); /* Get one underneath our's */ + save_release((savearea *)vpsv); /* Release it */ } -} + + thread->mact.curctx->VMXsave = 0; /* Kill chain */ + + toss_live_fpu(thread->mact.curctx); /* Dump live float */ + + fsv = thread->mact.curctx->FPUsave; /* Get the top float savearea */ + + while(fsv) { /* Any float saved state? */ + fpsv = fsv; /* Remember so we can toss this */ + fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev); /* Get one underneath our's */ + save_release((savearea *)fpsv); /* Release it */ + } + + thread->mact.curctx->FPUsave = 0; /* Kill chain */ /* - * flush out any lazily evaluated HW state in the - * owning thread's context, before termination. + * free all regular saveareas. */ -void -thread_machine_flush( thread_act_t cur_act ) -{ + + pcb = thread->mact.pcb; /* Get the general savearea */ + + while(pcb) { /* Any float saved state? */ + ppsv = pcb; /* Remember so we can toss this */ + pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */ + save_release(ppsv); /* Release it */ + } + + hw_atomic_sub((uint32_t *)&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */ } /* @@ -373,26 +336,28 @@ int switch_act_swapins = 0; */ void machine_switch_act( - thread_t thread, + thread_t thread, thread_act_t old, - thread_act_t new, - int cpu) + thread_act_t new) { pmap_t new_pmap; facility_context *fowner; + struct per_proc_info *ppinfo; + + ppinfo = getPerProc(); /* Get our processor block */ /* Our context might wake up on another processor, so we must * not keep hot state in our FPU, it must go back to the pcb * so that it can be found by the other if needed */ if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ - fowner = per_proc_info[cpu_number()].FPU_owner; /* Cache this because it may change */ + fowner = ppinfo->FPU_owner; /* Cache this because it may change */ if(fowner) { /* Is there any live context? */ if(fowner->facAct == old) { /* Is it for us? */ fpu_save(fowner); /* Yes, save it */ } } - fowner = per_proc_info[cpu_number()].VMX_owner; /* Cache this because it may change */ + fowner = ppinfo->VMX_owner; /* Cache this because it may change */ if(fowner) { /* Is there any live context? */ if(fowner->facAct == old) { /* Is it for us? */ vec_save(fowner); /* Yes, save it */ @@ -400,9 +365,9 @@ machine_switch_act( } } - active_stacks[cpu] = thread->kernel_stack; + old->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ - ast_context(new, cpu); + ast_context(new, cpu_number()); /* Activations might have different pmaps * (process->kernel->server, for example). @@ -421,14 +386,6 @@ machine_switch_act( } -void -pcb_user_to_kernel(thread_act_t act) -{ - - return; /* Not needed, I hope... */ -} - - /* * act_machine_sv_free * release saveareas associated with an act. if flag is true, release @@ -440,8 +397,8 @@ void act_machine_sv_free(thread_act_t act) { register savearea *pcb, *userpcb; - register savearea_vec *vsv, *vpsv; - register savearea_fpu *fsv, *fpsv; + register savearea_vec *vsv, *vpst, *vsvt; + register savearea_fpu *fsv, *fpst, *fsvt; register savearea *svp; register int i; @@ -453,40 +410,68 @@ act_machine_sv_free(thread_act_t act) * * Walk through and release all floating point and vector contexts that are not * user state. We will also blow away live context if it belongs to non-user state. + * Note that the level can not change while we are in this code. Nor can another + * context be pushed on the stack. + * + * We do nothing here if the current level is user. Otherwise, + * the live context is cleared. Then we find the user saved context. + * Next, we take the sync lock (to keep us from munging things in *_switch). + * The level is set to 0 and all stacked context other than user is dequeued. + * Then we unlock. Next, all of the old kernel contexts are released. * */ if(act->mact.curctx->VMXlevel) { /* Is the current level user state? */ + toss_live_vec(act->mact.curctx); /* Dump live vectors if is not user */ - act->mact.curctx->VMXlevel = 0; /* Mark as user state */ - } - vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */ - - while(vsv) { /* Any VMX saved state? */ - vpsv = vsv; /* Remember so we can toss this */ - if (!vsv->save_hdr.save_level) break; /* Done when hit user if any */ - vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */ - save_ret((savearea *)vpsv); /* Release it */ - } + vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */ + + while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */ - act->mact.curctx->VMXsave = vsv; /* Queue the user context to the top */ + if(!hw_lock_to((hw_lock_t)&act->mact.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */ + panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */ + } + + vsvt = act->mact.curctx->VMXsave; /* Get the top of the chain */ + act->mact.curctx->VMXsave = vsv; /* Point to the user context */ + act->mact.curctx->VMXlevel = 0; /* Set the level to user */ + hw_lock_unlock((hw_lock_t)&act->mact.curctx->VMXsync); /* Unlock */ + + while(vsvt) { /* Clear any VMX saved state */ + if (vsvt == vsv) break; /* Done when hit user if any */ + vpst = vsvt; /* Remember so we can toss this */ + vsvt = (savearea_vec *)vsvt->save_hdr.save_prev; /* Get one underneath our's */ + save_ret((savearea *)vpst); /* Release it */ + } + + } if(act->mact.curctx->FPUlevel) { /* Is the current level user state? */ - toss_live_fpu(act->mact.curctx); /* Dump live float if is not user */ - act->mact.curctx->FPUlevel = 0; /* Mark as user state */ - } + + toss_live_fpu(act->mact.curctx); /* Dump live floats if is not user */ - fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */ + fsv = act->mact.curctx->FPUsave; /* Get the top floats savearea */ + + while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */ - while(fsv) { /* Any float saved state? */ - fpsv = fsv; /* Remember so we can toss this */ - if (!fsv->save_hdr.save_level) break; /* Done when hit user if any */ - fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */ - save_ret((savearea *)fpsv); /* Release it */ + if(!hw_lock_to((hw_lock_t)&act->mact.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */ + panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */ + } + + fsvt = act->mact.curctx->FPUsave; /* Get the top of the chain */ + act->mact.curctx->FPUsave = fsv; /* Point to the user context */ + act->mact.curctx->FPUlevel = 0; /* Set the level to user */ + hw_lock_unlock((hw_lock_t)&act->mact.curctx->FPUsync); /* Unlock */ + + while(fsvt) { /* Clear any VMX saved state */ + if (fsvt == fsv) break; /* Done when hit user if any */ + fpst = fsvt; /* Remember so we can toss this */ + fsvt = (savearea_fpu *)fsvt->save_hdr.save_prev; /* Get one underneath our's */ + save_ret((savearea *)fpst); /* Release it */ + } + } - - act->mact.curctx->FPUsave = fsv; /* Queue the user context to the top */ /* * free all regular saveareas except a user savearea, if any @@ -501,7 +486,7 @@ act_machine_sv_free(thread_act_t act) break; } svp = pcb; /* Remember this */ - pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */ + pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */ save_ret(svp); /* Release it */ } @@ -509,13 +494,15 @@ act_machine_sv_free(thread_act_t act) } +void +machine_thread_set_current(thread_t thread) +{ + set_machine_current_act(thread->top_act); +} -/* - * act_virtual_machine_destroy: - * Shutdown any virtual machines associated with a thread - */ void -act_virtual_machine_destroy(thread_act_t act) +machine_act_terminate( + thread_act_t act) { if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */ disable_bluebox_internal(act); /* Kill off bluebox */ @@ -526,156 +513,14 @@ act_virtual_machine_destroy(thread_act_t act) } } -/* - * act_machine_destroy: Shutdown any state associated with a thread pcb. - */ void -act_machine_destroy(thread_act_t act) +machine_thread_terminate_self(void) { - - register savearea *pcb, *ppsv; - register savearea_vec *vsv, *vpsv; - register savearea_fpu *fsv, *fpsv; - register savearea *svp; - register int i; - -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("act_machine_destroy(0x%x)\n", act); -#endif /* MACH_ASSERT */ - -/* - * This function will release all context. - */ - - act_virtual_machine_destroy(act); /* Make sure all virtual machines are dead first */ - -/* - * - * Walk through and release all floating point and vector contexts. Also kill live context. - * - */ - - toss_live_vec(act->mact.curctx); /* Dump live vectors */ - - vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */ - - while(vsv) { /* Any VMX saved state? */ - vpsv = vsv; /* Remember so we can toss this */ - vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */ - save_release((savearea *)vpsv); /* Release it */ - } - - act->mact.curctx->VMXsave = 0; /* Kill chain */ - - toss_live_fpu(act->mact.curctx); /* Dump live float */ - - fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */ - - while(fsv) { /* Any float saved state? */ - fpsv = fsv; /* Remember so we can toss this */ - fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */ - save_release((savearea *)fpsv); /* Release it */ - } - - act->mact.curctx->FPUsave = 0; /* Kill chain */ - -/* - * free all regular saveareas. - */ - - pcb = act->mact.pcb; /* Get the general savearea */ - - while(pcb) { /* Any float saved state? */ - ppsv = pcb; /* Remember so we can toss this */ - pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */ - save_release(ppsv); /* Release it */ - } - - hw_atomic_sub(&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */ - -} - - -kern_return_t -act_machine_create(task_t task, thread_act_t thr_act) -{ - /* - * Clear & Init the pcb (sets up user-mode s regs) - * We don't use this anymore. - */ - - return KERN_SUCCESS; -} - -void act_machine_init() -{ -#if MACH_ASSERT - if (watchacts & WA_PCB) - printf("act_machine_init()\n"); -#endif /* MACH_ASSERT */ - - /* Good to verify these once */ - assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); - - assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT ); - assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT ); - assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT ); - - /* - * If we start using kernel activations, - * would normally create kernel_thread_pool here, - * populating it from the act_zone - */ -} - -void -act_machine_return(int code) -{ - thread_act_t thr_act = current_act(); - -#if MACH_ASSERT - if (watchacts & WA_EXIT) - printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n", - code, thr_act, thr_act->ref_count, - thr_act->thread, thr_act->thread->ref_count); -#endif /* MACH_ASSERT */ - - - /* - * This code is called with nothing locked. - * It also returns with nothing locked, if it returns. - * - * This routine terminates the current thread activation. - * If this is the only activation associated with its - * thread shuttle, then the entire thread (shuttle plus - * activation) is terminated. - */ - assert( code == KERN_TERMINATED ); - assert( thr_act ); - assert(thr_act->thread->top_act == thr_act); - - /* This is the only activation attached to the shuttle... */ - - thread_terminate_self(); - - /*NOTREACHED*/ - panic("act_machine_return: TALKING ZOMBIE! (1)"); -} - -void -thread_machine_set_current(struct thread_shuttle *thread) -{ - register int my_cpu = cpu_number(); - - set_machine_current_thread(thread); - set_machine_current_act(thread->top_act); - - active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL; + machine_act_terminate(current_act()); } void -thread_machine_init(void) +machine_thread_init(void) { #ifdef MACHINE_STACK #if KERNEL_STACK_SIZE > PPC_PGBYTES @@ -703,8 +548,8 @@ int thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); - printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n", - thr_act->alerts, thr_act->alert_mask, + printf("\tsusp=%x active=%x hi=%x lo=%x\n", + 0 /*thr_act->alerts*/, 0 /*thr_act->alert_mask*/, thr_act->suspend_count, thr_act->active, thr_act->higher, thr_act->lower); @@ -716,10 +561,7 @@ int unsigned int get_useraddr() { - - thread_act_t thr_act = current_act(); - - return(thr_act->mact.pcb->save_srr0); + return(current_act()->mact.upcb->save_srr0); } /* @@ -727,7 +569,8 @@ get_useraddr() */ vm_offset_t -stack_detach(thread_t thread) +machine_stack_detach( + thread_t thread) { vm_offset_t stack; @@ -756,9 +599,10 @@ stack_detach(thread_t thread) */ void -stack_attach(struct thread_shuttle *thread, - vm_offset_t stack, - void (*start_pos)(thread_t)) +machine_stack_attach( + thread_t thread, + vm_offset_t stack, + void (*start)(thread_t)) { thread_act_t thr_act; unsigned int *kss; @@ -766,7 +610,7 @@ stack_attach(struct thread_shuttle *thread, KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), thread, thread->priority, - thread->sched_pri, start_pos, + thread->sched_pri, start, 0); assert(stack); @@ -778,18 +622,18 @@ stack_attach(struct thread_shuttle *thread, if ((thr_act = thread->top_act) != 0) { sv = save_get(); /* cannot block */ sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */ - sv->save_hdr.save_act = thr_act; - sv->save_hdr.save_prev = thr_act->mact.pcb; + sv->save_hdr.save_act = (struct thread_activation *)thr_act; + sv->save_hdr.save_prev = (addr64_t)((uintptr_t)thr_act->mact.pcb); thr_act->mact.pcb = sv; - sv->save_srr0 = (unsigned int) start_pos; + sv->save_srr0 = (unsigned int) start; /* sv->save_r3 = ARG ? */ sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE); sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; sv->save_fpscr = 0; /* Clear all floating point exceptions */ sv->save_vrsave = 0; /* Set the vector save state */ sv->save_vscr[3] = 0x00010000; /* Supress java mode */ - *((int *)sv->save_r1) = 0; + *(CAST_DOWN(int *, sv->save_r1)) = 0; thr_act->mact.ksp = 0; } @@ -801,60 +645,68 @@ stack_attach(struct thread_shuttle *thread, */ void -stack_handoff(thread_t old, - thread_t new) +machine_stack_handoff( + thread_t old, + thread_t new) { vm_offset_t stack; pmap_t new_pmap; facility_context *fowner; - int my_cpu; + mapping *mp; + struct per_proc_info *ppinfo; assert(new->top_act); assert(old->top_act); + + if (old == new) + panic("machine_stack_handoff"); - my_cpu = cpu_number(); - stack = stack_detach(old); + stack = machine_stack_detach(old); new->kernel_stack = stack; - if (stack == old->stack_privilege) { - assert(new->stack_privilege); - old->stack_privilege = new->stack_privilege; - new->stack_privilege = stack; + if (stack == old->reserved_stack) { + assert(new->reserved_stack); + old->reserved_stack = new->reserved_stack; + new->reserved_stack = stack; } - per_proc_info[my_cpu].cpu_flags &= ~traceBE; + ppinfo = getPerProc(); /* Get our processor block */ + + ppinfo->cpu_flags &= ~traceBE; /* Turn off special branch trace */ if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ - fowner = per_proc_info[my_cpu].FPU_owner; /* Cache this because it may change */ + fowner = ppinfo->FPU_owner; /* Cache this because it may change */ if(fowner) { /* Is there any live context? */ if(fowner->facAct == old->top_act) { /* Is it for us? */ fpu_save(fowner); /* Yes, save it */ } } - fowner = per_proc_info[my_cpu].VMX_owner; /* Cache this because it may change */ + fowner = ppinfo->VMX_owner; /* Cache this because it may change */ if(fowner) { /* Is there any live context? */ if(fowner->facAct == old->top_act) { /* Is it for us? */ vec_save(fowner); /* Yes, save it */ } } } + /* * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags * This bits can be modified in the per proc without updating the thread spcFlags */ if(old->top_act->mact.specFlags & runningVM) { /* Is the current thread running a VM? */ old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode); - old->top_act->mact.specFlags |= (per_proc_info[my_cpu].spcFlags) & (userProtKey|FamVMmode); + old->top_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode); } KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE, - (int)old, (int)new, old->sched_pri, new->sched_pri, 0); + old->reason, (int)new, old->sched_pri, new->sched_pri, 0); if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ - per_proc_info[my_cpu].VMMareaPhys = (vm_offset_t)new->top_act->mact.vmmCEntry->vmmContextPhys; - per_proc_info[my_cpu].FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept; + ppinfo->VMMareaPhys = new->top_act->mact.vmmCEntry->vmmContextPhys; + ppinfo->VMMXAFlgs = new->top_act->mact.vmmCEntry->vmmXAFlgs; + ppinfo->FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept; } else { /* otherwise, we use the task's pmap */ new_pmap = new->top_act->task->map->pmap; @@ -863,17 +715,20 @@ stack_handoff(thread_t old, } } - thread_machine_set_current(new); - active_stacks[my_cpu] = new->kernel_stack; - per_proc_info[my_cpu].Uassist = new->top_act->mact.cthread_self; + machine_thread_set_current(new); + ppinfo->Uassist = new->top_act->mact.cthread_self; - per_proc_info[my_cpu].ppbbTaskEnv = new->top_act->mact.bbTaskEnv; - per_proc_info[my_cpu].spcFlags = new->top_act->mact.specFlags; + ppinfo->ppbbTaskEnv = new->top_act->mact.bbTaskEnv; + ppinfo->spcFlags = new->top_act->mact.specFlags; + + old->top_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ + mp = (mapping *)&ppinfo->ppCIOmp; + mp->mpSpace = invalSpace; /* Since we can't handoff in the middle of copy in/out, just invalidate */ if (branch_tracing_enabled()) - per_proc_info[my_cpu].cpu_flags |= traceBE; + ppinfo->cpu_flags |= traceBE; - if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act); /* Cut trace entry if tracing */ + if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act, 0); /* Cut trace entry if tracing */ return; } @@ -902,19 +757,3 @@ call_continuation(void (*continuation)(void) ) return; } - -void -thread_swapin_mach_alloc(thread_t thread) -{ - struct savearea *sv; - - assert(thread->top_act->mact.pcb == 0); - - sv = save_alloc(); - assert(sv); - sv->save_hdr.save_prev = 0; /* Initialize back chain */ - sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */ - sv->save_hdr.save_act = thread->top_act; /* Initialize owner */ - thread->top_act->mact.pcb = sv; - -} diff --git a/osfmk/ppc/pmap.c b/osfmk/ppc/pmap.c index 180e4cc11..9b2cfeed3 100644 --- a/osfmk/ppc/pmap.c +++ b/osfmk/ppc/pmap.c @@ -99,6 +99,7 @@ #include #include #include +#include #include #include @@ -110,7 +111,6 @@ #include #include -#include #include #include @@ -118,35 +118,21 @@ #include #include #include +#include #include -#if DB_MACHINE_COMMANDS -/* optionally enable traces of pmap operations in post-mortem trace table */ -/* #define PMAP_LOWTRACE 1 */ -#define PMAP_LOWTRACE 0 -#else /* DB_MACHINE_COMMANDS */ -/* Can not trace even if we wanted to */ -#define PMAP_LOWTRACE 0 -#endif /* DB_MACHINE_COMMANDS */ - -#define PERFTIMES 0 - -#if PERFTIMES && DEBUG -#define debugLog2(a, b, c) dbgLog2(a, b, c) -#else -#define debugLog2(a, b, c) -#endif - extern unsigned int avail_remaining; extern unsigned int mappingdeb0; extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ extern int real_ncpus; /* Number of actual CPUs */ -unsigned int debugbackpocket; /* (TEST/DEBUG) */ +unsigned int debugbackpocket; /* (TEST/DEBUG) */ -vm_offset_t avail_next; vm_offset_t first_free_virt; int current_free_region; /* Used in pmap_next_page */ +pmapTransTab *pmapTrans; /* Point to the hash to pmap translations */ +struct phys_entry *phys_table; + /* forward */ void pmap_activate(pmap_t pmap, thread_t th, int which_cpu); void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu); @@ -156,28 +142,20 @@ void copy_to_phys(vm_offset_t sva, vm_offset_t dpa, int bytecount); int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space); #endif -#if DEBUG -#define PDB_USER 0x01 /* exported functions */ -#define PDB_MAPPING 0x02 /* low-level mapping routines */ -#define PDB_ENTER 0x04 /* pmap_enter specifics */ -#define PDB_COPY 0x08 /* copy page debugging */ -#define PDB_ZERO 0x10 /* zero page debugging */ -#define PDB_WIRED 0x20 /* things concerning wired entries */ -#define PDB_PTEG 0x40 /* PTEG overflows */ -#define PDB_LOCK 0x100 /* locks */ -#define PDB_IO 0x200 /* Improper use of WIMG_IO checks - PCI machines */ - -int pmdebug=0; -#endif - /* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */ extern struct pmap kernel_pmap_store; pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */ +addr64_t kernel_pmap_phys; /* Pointer to kernel pmap and anchor for in-use pmaps, physical address */ pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */ +pmap_t sharedPmap; /* Pointer to common pmap for 64-bit address spaces */ struct zone *pmap_zone; /* zone of pmap structures */ boolean_t pmap_initialized = FALSE; +int ppc_max_pmaps; /* Maximum number of concurrent address spaces allowed. This is machine dependent */ +addr64_t vm_max_address; /* Maximum effective address supported */ +addr64_t vm_max_physical; /* Maximum physical address supported */ + /* * Physical-to-virtual translations are handled by inverted page table * structures, phys_tables. Multiple mappings of a single page are handled @@ -185,11 +163,6 @@ boolean_t pmap_initialized = FALSE; * for phys_tables of the physical memory we know about, but more may be * added as it is discovered (eg. by drivers). */ -struct phys_entry *phys_table; /* For debugging */ - -lock_t pmap_system_lock; - -decl_simple_lock_data(,tlb_system_lock) /* * free pmap list. caches the first free_pmap_max pmaps that are freed up @@ -203,121 +176,34 @@ decl_simple_lock_data(,free_pmap_lock) * Function to get index into phys_table for a given physical address */ -struct phys_entry *pmap_find_physentry(vm_offset_t pa) +struct phys_entry *pmap_find_physentry(ppnum_t pa) { int i; - struct phys_entry *entry; + unsigned int entry; - for (i = pmap_mem_regions_count-1; i >= 0; i--) { - if (pa < pmap_mem_regions[i].start) - continue; - if (pa >= pmap_mem_regions[i].end) - return PHYS_NULL; + for (i = pmap_mem_regions_count - 1; i >= 0; i--) { + if (pa < pmap_mem_regions[i].mrStart) continue; /* See if we fit in this region */ + if (pa > pmap_mem_regions[i].mrEnd) continue; /* Check the end too */ - entry = &pmap_mem_regions[i].phys_table[(pa - pmap_mem_regions[i].start) >> PPC_PGSHIFT]; - __asm__ volatile("dcbt 0,%0" : : "r" (entry)); /* We will use this in a little bit */ - return entry; + entry = (unsigned int)pmap_mem_regions[i].mrPhysTab + ((pa - pmap_mem_regions[i].mrStart) * sizeof(phys_entry)); + return (struct phys_entry *)entry; } - kprintf("DEBUG : pmap_find_physentry 0x%08x out of range\n",pa); - return PHYS_NULL; +// kprintf("DEBUG - pmap_find_physentry: page 0x%08X not found\n", pa); + return 0; } /* * kern_return_t * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa, * boolean_t available, unsigned int attr) - * Allocate some extra physentries for the physical addresses given, - * specifying some default attribute that on the powerpc specifies - * the default cachability for any mappings using these addresses - * If the memory is marked as available, it is added to the general - * VM pool, otherwise it is not (it is reserved for card IO etc). + * + * THIS IS NOT SUPPORTED */ kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa, boolean_t available, unsigned int attr) { - int i,j; - spl_t s; - - /* Only map whole pages */ panic("Forget it! You can't map no more memory, you greedy puke!\n"); - - spa = trunc_page(spa); - epa = round_page(epa); - - /* First check that the region doesn't already exist */ - - assert (epa >= spa); - for (i = 0; i < pmap_mem_regions_count; i++) { - /* If we're below the next region, then no conflict */ - if (epa < pmap_mem_regions[i].start) - break; - if (spa < pmap_mem_regions[i].end) { -#if DEBUG - kprintf("pmap_add_physical_memory(0x%08x,0x%08x,0x%08x) - memory already present\n",spa,epa,attr); -#endif /* DEBUG */ - return KERN_NO_SPACE; - } - } - -#if DEBUG - kprintf("pmap_add_physical_memory; region insert spot: %d out of %d\n", i, pmap_mem_regions_count); /* (TEST/DEBUG) */ -#endif - - /* Check that we've got enough space for another region */ - if (pmap_mem_regions_count == PMAP_MEM_REGION_MAX) - return KERN_RESOURCE_SHORTAGE; - - /* Once here, i points to the mem_region above ours in physical mem */ - - /* allocate a new phys_table for this new region */ -#if DEBUG - kprintf("pmap_add_physical_memory; kalloc\n"); /* (TEST/DEBUG) */ -#endif - - phys_table = (struct phys_entry *) - kalloc(sizeof(struct phys_entry) * atop(epa-spa)); -#if DEBUG - kprintf("pmap_add_physical_memory; new phys_table: %08X\n", phys_table); /* (TEST/DEBUG) */ -#endif - - /* Initialise the new phys_table entries */ - for (j = 0; j < atop(epa-spa); j++) { - - phys_table[j].phys_link = MAPPING_NULL; - - mapping_phys_init(&phys_table[j], spa+(j*PAGE_SIZE), attr); /* Initialize the hardware specific portions */ - - } - s = splhigh(); - - /* Move all the phys_table entries up some to make room in - * the ordered list. - */ - for (j = pmap_mem_regions_count; j > i ; j--) - pmap_mem_regions[j] = pmap_mem_regions[j-1]; - - /* Insert a new entry with some memory to back it */ - - pmap_mem_regions[i].start = spa; - pmap_mem_regions[i].end = epa; - pmap_mem_regions[i].phys_table = phys_table; - - pmap_mem_regions_count++; - splx(s); - -#if DEBUG - for(i=0; i spa); - debugLog2(40, va, spa); /* Log pmap_map call */ - - pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_DEFAULT, blkPerm); /* Set up a permanent block mapped area */ - - debugLog2(41, epa, prot); /* Log pmap_map call */ - - return(va); -} - -/* - * pmap_map_bd(va, spa, epa, prot) - * Back-door routine for mapping kernel VM at initialisation. - * Used for mapping memory outside the known physical memory - * space, with caching disabled. Designed for use by device probes. - * - * A virtual address range starting at "va" is mapped to the physical - * address range "spa" to "epa" with machine independent protection - * "prot". - * - * "va", "spa", and "epa" are byte addresses and must be on machine - * independent page boundaries. - * - * WARNING: The current version of memcpy() can use the dcbz instruction - * on the destination addresses. This will cause an alignment exception - * and consequent overhead if the destination is caching-disabled. So - * avoid memcpy()ing into the memory mapped by this function. - * - * also, many other pmap_ routines will misbehave if you try and change - * protections or remove these mappings, they are designed to be permanent. - * - * These areas will be added to the autogen list, if possible. Existing translations - * are overridden and their mapping stuctures are released. This takes place in - * the autogen_map function. - * - * Locking: - * this routine is called only during system initialization when only - * one processor is active, so no need to take locks... - */ -vm_offset_t -pmap_map_bd( - vm_offset_t va, - vm_offset_t spa, - vm_offset_t epa, - vm_prot_t prot) -{ - register struct mapping *mp; - register struct phys_entry *pp; + addr64_t colladr; - - if (spa == epa) - return(va); + if (spa == epa) return(va); assert(epa > spa); - debugLog2(42, va, epa); /* Log pmap_map_bd call */ - - pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_IO, blkPerm); /* Set up autogen area */ - - debugLog2(43, epa, prot); /* Log pmap_map_bd exit */ + colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12), (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, prot & VM_PROT_ALL); + if(colladr) { /* Was something already mapped in the range? */ + panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n", + va, spa, epa, colladr); + } return(va); } @@ -417,303 +253,203 @@ pmap_map_bd( * Called with mapping done by BATs. Page_size must already be set. * * Parameters: - * mem_size: Total memory present + * msize: Total memory present * first_avail: First virtual address available - * first_phys_avail: First physical address available + * kmapsize: Size of kernel text and data */ void -pmap_bootstrap(unsigned int mem_size, vm_offset_t *first_avail, vm_offset_t *first_phys_avail, unsigned int kmapsize) +pmap_bootstrap(uint64_t msize, vm_offset_t *first_avail, unsigned int kmapsize) { register struct mapping *mp; vm_offset_t addr; vm_size_t size; - int i, num, j, rsize, mapsize, vmpagesz, vmmapsz; - unsigned int mask; - vm_offset_t first_used_addr; - PCA *pcaptr; - - *first_avail = round_page(*first_avail); - -#if DEBUG - kprintf("first_avail=%08X; first_phys_avail=%08X; avail_remaining=%d\n", - *first_avail, *first_phys_avail, avail_remaining); -#endif + int i, num, j, rsize, mapsize, vmpagesz, vmmapsz, bank, nbits; + uint64_t tmemsize; + uint_t htslop; + vm_offset_t first_used_addr, PCAsize; + struct phys_entry *phys_table; - assert(PAGE_SIZE == PPC_PGBYTES); + *first_avail = round_page_32(*first_avail); /* Make sure we start out on a page boundary */ + vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address know to VM */ /* * Initialize kernel pmap */ kernel_pmap = &kernel_pmap_store; + kernel_pmap_phys = (addr64_t)&kernel_pmap_store; cursor_pmap = &kernel_pmap_store; - lock_init(&pmap_system_lock, - FALSE, /* NOT a sleep lock */ - ETAP_VM_PMAP_SYS, - ETAP_VM_PMAP_SYS_I); - simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL); kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */ kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */ kernel_pmap->ref_count = 1; + kernel_pmap->pmapFlags = pmapKeyDef; /* Set the default keys */ + kernel_pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */ kernel_pmap->space = PPC_SID_KERNEL; - kernel_pmap->pmapvr = 0; /* Virtual = Real */ - kernel_pmap->bmaps = 0; /* No block pages just yet */ - for(i=0; i < 128; i++) { /* Clear usage slots */ - kernel_pmap->pmapUsage[i] = 0; - } - for(i=0; i < 16; i++) { /* Initialize for laughs */ - kernel_pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | PPC_SID_KERNEL; - } - - /* - * Allocate: (from first_avail up) - * Aligned to its own size: - * hash table (for mem size 2**x, allocate 2**(x-10) entries) - * mapping table (same size and immediatly following hash table) - */ - /* hash_table_size must be a power of 2, recommended sizes are - * taken from PPC601 User Manual, table 6-19. We take the next - * highest size if mem_size is not a power of two. - * TODO NMGS make this configurable at boot time. - */ - - num = sizeof(pte_t) * (mem_size >> 10); + kernel_pmap->pmapvr = 0; /* Virtual = Real */ - for (hash_table_size = 64 * 1024; /* minimum size = 64Kbytes */ - hash_table_size < num; - hash_table_size *= 2) - continue; - - if (num > (sizeof(pte_t) * 524288)) - hash_table_size = hash_table_size/2; /* reduce by half above 512MB */ +/* + * The hash table wants to have one pteg for every 2 physical pages. + * We will allocate this in physical RAM, outside of kernel virtual memory, + * at the top of the highest bank that will contain it. + * Note that "bank" doesn't refer to a physical memory slot here, it is a range of + * physically contiguous memory. + * + * The PCA will go there as well, immediately before the hash table. + */ + + nbits = cntlzw(((msize << 1) - 1) >> 32); /* Get first bit in upper half */ + if(nbits == 32) nbits = nbits + cntlzw((uint_t)((msize << 1) - 1)); /* If upper half was empty, find bit in bottom half */ + tmemsize = 0x8000000000000000ULL >> nbits; /* Get memory size rounded up to power of 2 */ + + if(tmemsize > 0x0000002000000000ULL) tmemsize = 0x0000002000000000ULL; /* Make sure we don't make an unsupported hash table size */ - /* Scale to within any physical memory layout constraints */ - do { - num = atop(mem_size); /* num now holds mem_size in pages */ + hash_table_size = (uint_t)(tmemsize >> 13) * per_proc_info[0].pf.pfPTEG; /* Get provisional hash_table_size */ + if(hash_table_size < (256 * 1024)) hash_table_size = (256 * 1024); /* Make sure we are at least minimum size */ - /* size of all structures that we're going to allocate */ + while(1) { /* Try to fit hash table in PCA into contiguous memory */ - size = (vm_size_t) ( - (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */ - ((InitialSaveBloks / 2) * PAGE_SIZE) + /* For backpocket saveareas */ - hash_table_size + /* For hash table */ - hash_table_size + /* For PTEG allocation table */ - (num * sizeof(struct phys_entry)) /* For the physical entries */ - ); + if(hash_table_size < (256 * 1024)) { /* Have we dropped too short? This should never, ever happen */ + panic("pmap_bootstrap: Can't find space for hash table\n"); /* This will never print, system isn't up far enough... */ + } - mapsize = size = round_page(size); /* Get size of area to map that we just calculated */ - mapsize = mapsize + kmapsize; /* Account for the kernel text size */ + PCAsize = (hash_table_size / per_proc_info[0].pf.pfPTEG) * sizeof(PCA); /* Get total size of PCA table */ + PCAsize = round_page_32(PCAsize); /* Make sure it is at least a page long */ + + for(bank = pmap_mem_regions_count - 1; bank >= 0; bank--) { /* Search backwards through banks */ + + hash_table_base = ((addr64_t)pmap_mem_regions[bank].mrEnd << 12) - hash_table_size + PAGE_SIZE; /* Get tenative address */ + + htslop = hash_table_base & (hash_table_size - 1); /* Get the extra that we will round down when we align */ + hash_table_base = hash_table_base & -(addr64_t)hash_table_size; /* Round down to correct boundary */ + + if((hash_table_base - round_page_32(PCAsize)) >= ((addr64_t)pmap_mem_regions[bank].mrStart << 12)) break; /* Leave if we fit */ + } + + if(bank >= 0) break; /* We are done if we found a suitable bank */ + + hash_table_size = hash_table_size >> 1; /* Try the next size down */ + } - vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */ - vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */ + if(htslop) { /* If there was slop (i.e., wasted pages for alignment) add a new region */ + for(i = pmap_mem_regions_count - 1; i >= bank; i--) { /* Copy from end to our bank, including our bank */ + pmap_mem_regions[i + 1].mrStart = pmap_mem_regions[i].mrStart; /* Set the start of the bank */ + pmap_mem_regions[i + 1].mrAStart = pmap_mem_regions[i].mrAStart; /* Set the start of allocatable area */ + pmap_mem_regions[i + 1].mrEnd = pmap_mem_regions[i].mrEnd; /* Set the end address of bank */ + pmap_mem_regions[i + 1].mrAEnd = pmap_mem_regions[i].mrAEnd; /* Set the end address of allocatable area */ + } - mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */ - - mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */ - mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */ - mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */ - -#if DEBUG - kprintf("pmap_bootstrap: initial vm_pages = %08X\n", vmpagesz); - kprintf("pmap_bootstrap: initial vm_maps = %08X\n", vmmapsz); - kprintf("pmap_bootstrap: size before mappings = %08X\n", size); - kprintf("pmap_bootstrap: kernel map size = %08X\n", kmapsize); - kprintf("pmap_bootstrap: mapping blocks rqrd = %08X\n", mapsize); -#endif + pmap_mem_regions[i + 1].mrStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of the next bank to the start of the slop area */ + pmap_mem_regions[i + 1].mrAStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of allocatable area to the start of the slop area */ + pmap_mem_regions[i].mrEnd = (hash_table_base + hash_table_size - 4096) >> 12; /* Set the end of our bank to the end of the hash table */ - size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */ + } + + pmap_mem_regions[bank].mrAEnd = (hash_table_base - PCAsize - 4096) >> 12; /* Set the maximum allocatable in this bank */ + + hw_hash_init(); /* Initiaize the hash table and PCA */ + hw_setup_trans(); /* Set up hardware registers needed for translation */ + +/* + * The hash table is now all initialized and so is the PCA. Go on to do the rest of it. + * This allocation is from the bottom up. + */ + + num = atop_64(msize); /* Get number of pages in all of memory */ - /* hash table must be aligned to its size */ +/* Figure out how much we need to allocate */ - addr = (*first_avail + - (hash_table_size-1)) & ~(hash_table_size-1); + size = (vm_size_t) ( + (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */ + (BackPocketSaveBloks * PAGE_SIZE) + /* For backpocket saveareas */ + trcWork.traceSize + /* Size of trace table */ + ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096) + /* Size of pmap translate table */ + (((num * sizeof(struct phys_entry)) + 4095) & -4096) /* For the physical entries */ + ); - if (addr + size > pmap_mem_regions[0].end) { - hash_table_size /= 2; - } else { - break; - } - /* If we have had to shrink hash table to too small, panic */ - if (hash_table_size == 32 * 1024) - panic("cannot lay out pmap memory map correctly"); - } while (1); - -#if DEBUG - kprintf("hash table size=%08X, total size of area=%08X, addr=%08X\n", - hash_table_size, size, addr); -#endif - if (round_page(*first_phys_avail) < trunc_page(addr)) { - /* We are stepping over at least one page here, so - * add this region to the free regions so that it can - * be allocated by pmap_steal - */ - free_regions[free_regions_count].start = round_page(*first_phys_avail); - free_regions[free_regions_count].end = trunc_page(addr); - - avail_remaining += (free_regions[free_regions_count].end - - free_regions[free_regions_count].start) / - PPC_PGBYTES; -#if DEBUG - kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n", - free_regions[free_regions_count].start,free_regions[free_regions_count].end, - avail_remaining); -#endif /* DEBUG */ - free_regions_count++; - } + mapsize = size = round_page_32(size); /* Get size of area to map that we just calculated */ + mapsize = mapsize + kmapsize; /* Account for the kernel text size */ - /* Zero everything - this also invalidates the hash table entries */ - bzero((char *)addr, size); + vmpagesz = round_page_32(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */ + vmmapsz = round_page_32((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */ + + mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */ - /* Set up some pointers to our new structures */ + mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */ + mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */ + mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */ - /* from here, addr points to the next free address */ - - first_used_addr = addr; /* remember where we started */ + size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */ - /* Set up hash table address and dma buffer address, keeping - * alignment. These mappings are all 1-1, so dma_r == dma_v - * - * If hash_table_size == dma_buffer_alignment, then put hash_table - * first, since dma_buffer_size may be smaller than alignment, but - * hash table alignment==hash_table_size. - */ - hash_table_base = addr; - - addr += hash_table_size; - addr += hash_table_size; /* Add another for the PTEG Control Area */ - assert((hash_table_base & (hash_table_size-1)) == 0); + /* hash table must be aligned to its size */ - pcaptr = (PCA *)(hash_table_base+hash_table_size); /* Point to the PCA table */ - mapCtl.mapcflush.pcaptr = pcaptr; - - for(i=0; i < (hash_table_size/64) ; i++) { /* For all of PTEG control areas: */ - pcaptr[i].flgs.PCAalflgs.PCAfree=0xFF; /* Mark all slots free */ - pcaptr[i].flgs.PCAalflgs.PCAsteal=0x01; /* Initialize steal position */ - } - - savearea_init(&addr); /* Initialize the savearea chains and data */ - - /* phys_table is static to help debugging, - * this variable is no longer actually used - * outside of this scope - */ + addr = *first_avail; /* Set the address to start allocations */ + first_used_addr = addr; /* Remember where we started */ - phys_table = (struct phys_entry *) addr; + bzero((char *)addr, size); /* Clear everything that we are allocating */ -#if DEBUG - kprintf("hash_table_base =%08X\n", hash_table_base); - kprintf("phys_table =%08X\n", phys_table); - kprintf("pmap_mem_regions_count =%08X\n", pmap_mem_regions_count); -#endif + savearea_init(addr); /* Initialize the savearea chains and data */ + + addr = (vm_offset_t)((unsigned int)addr + ((InitialSaveBloks + BackPocketSaveBloks) * PAGE_SIZE)); /* Point past saveareas */ - for (i = 0; i < pmap_mem_regions_count; i++) { + trcWork.traceCurr = (unsigned int)addr; /* Set first trace slot to use */ + trcWork.traceStart = (unsigned int)addr; /* Set start of trace table */ + trcWork.traceEnd = (unsigned int)addr + trcWork.traceSize; /* Set end of trace table */ + + addr = (vm_offset_t)trcWork.traceEnd; /* Set next allocatable location */ - pmap_mem_regions[i].phys_table = phys_table; - rsize = (pmap_mem_regions[i].end - (unsigned int)pmap_mem_regions[i].start)/PAGE_SIZE; + pmapTrans = (pmapTransTab *)addr; /* Point to the pmap to hash translation table */ -#if DEBUG - kprintf("Initializing physical table for region %d\n", i); - kprintf(" table=%08X, size=%08X, start=%08X, end=%08X\n", - phys_table, rsize, pmap_mem_regions[i].start, - (unsigned int)pmap_mem_regions[i].end); -#endif + pmapTrans[PPC_SID_KERNEL].pmapPAddr = (addr64_t)((uintptr_t)kernel_pmap); /* Initialize the kernel pmap in the translate table */ + pmapTrans[PPC_SID_KERNEL].pmapVAddr = CAST_DOWN(unsigned int, kernel_pmap); /* Initialize the kernel pmap in the translate table */ - for (j = 0; j < rsize; j++) { - phys_table[j].phys_link = MAPPING_NULL; - mapping_phys_init(&phys_table[j], (unsigned int)pmap_mem_regions[i].start+(j*PAGE_SIZE), - PTE_WIMG_DEFAULT); /* Initializes hw specific storage attributes */ - } - phys_table = phys_table + - atop(pmap_mem_regions[i].end - pmap_mem_regions[i].start); - } + addr += ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096); /* Point past pmap translate table */ - /* restore phys_table for debug */ - phys_table = (struct phys_entry *) addr; +/* NOTE: the phys_table must be within the first 2GB of physical RAM. This makes sure we only need to do 32-bit arithmetic */ - addr += sizeof(struct phys_entry) * num; - - simple_lock_init(&tlb_system_lock, ETAP_VM_PMAP_TLB); + phys_table = (struct phys_entry *) addr; /* Get pointer to physical table */ - /* Initialise the registers necessary for supporting the hashtable */ -#if DEBUG - kprintf("*** hash_table_init: base=%08X, size=%08X\n", hash_table_base, hash_table_size); -#endif + for (bank = 0; bank < pmap_mem_regions_count; bank++) { /* Set pointer and initialize all banks of ram */ + + pmap_mem_regions[bank].mrPhysTab = phys_table; /* Set pointer to the physical table for this bank */ + + phys_table = phys_table + (pmap_mem_regions[bank].mrEnd - pmap_mem_regions[bank].mrStart + 1); /* Point to the next */ + } - hash_table_init(hash_table_base, hash_table_size); - + addr += (((num * sizeof(struct phys_entry)) + 4095) & -4096); /* Step on past the physical entries */ + /* * Remaining space is for mapping entries. Tell the initializer routine that * the mapping system can't release this block because it's permanently assigned */ - mapping_init(); /* Initialize the mapping tables */ + mapping_init(); /* Initialize the mapping tables */ for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */ - mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */ + mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */ } - mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */ - -#if DEBUG - - kprintf("mapping kernel memory from 0x%08x to 0x%08x, to address 0x%08x\n", - first_used_addr, round_page(first_used_addr+size), - first_used_addr); -#endif /* DEBUG */ + mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */ /* Map V=R the page tables */ pmap_map(first_used_addr, first_used_addr, - round_page(first_used_addr+size), VM_PROT_READ | VM_PROT_WRITE); - -#if DEBUG - - for(i=first_used_addr; i < round_page(first_used_addr+size); i+=PAGE_SIZE) { /* Step through all these mappings */ - if(i != (j = kvtophys(i))) { /* Verify that the mapping was made V=R */ - kprintf("*** V=R mapping failed to verify: V=%08X; R=%08X\n", i, j); - } - } -#endif + round_page_32(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE); - *first_avail = round_page(first_used_addr + size); - first_free_virt = round_page(first_used_addr + size); + *first_avail = round_page_32(first_used_addr + size); /* Set next available page */ + first_free_virt = *first_avail; /* Ditto */ /* All the rest of memory is free - add it to the free * regions so that it can be allocated by pmap_steal */ - free_regions[free_regions_count].start = *first_avail; - free_regions[free_regions_count].end = pmap_mem_regions[0].end; - - avail_remaining += (free_regions[free_regions_count].end - - free_regions[free_regions_count].start) / - PPC_PGBYTES; -#if DEBUG - kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n", - free_regions[free_regions_count].start,free_regions[free_regions_count].end, - avail_remaining); -#endif /* DEBUG */ + pmap_mem_regions[0].mrAStart = (*first_avail >> 12); /* Set up the free area to start allocations (always in the first bank) */ - free_regions_count++; - - current_free_region = 0; - - avail_next = free_regions[current_free_region].start; - -#if DEBUG - kprintf("Number of free regions=%d\n",free_regions_count); /* (TEST/DEBUG) */ - kprintf("Current free region=%d\n",current_free_region); /* (TEST/DEBUG) */ - for(i=0;i= free_regions_count) { - /* We're into the pmap_mem_regions, handle this - * separately to free_regions - */ - - int current_pmap_mem_region = current_free_region - - free_regions_count + 1; - if (current_pmap_mem_region > pmap_mem_regions_count) - return FALSE; - *addrp = avail_next; - avail_next += PAGE_SIZE; - avail_remaining--; - if (avail_next >= pmap_mem_regions[current_pmap_mem_region].end) { - current_free_region++; - current_pmap_mem_region++; - avail_next = pmap_mem_regions[current_pmap_mem_region].start; -#if DEBUG - kprintf("pmap_next_page : next region start=0x%08x\n",avail_next); -#endif /* DEBUG */ - } - return TRUE; - } + if(current_free_region >= pmap_mem_regions_count) return FALSE; /* Return failure if we have used everything... */ - /* We're in the free_regions, allocate next page and increment - * counters - */ - *addrp = avail_next; - - avail_next += PAGE_SIZE; - avail_remaining--; - - if (avail_next >= free_regions[current_free_region].end) { - current_free_region++; - if (current_free_region < free_regions_count) - avail_next = free_regions[current_free_region].start; - else - avail_next = pmap_mem_regions[current_free_region - - free_regions_count + 1].start; -#if DEBUG - kprintf("pmap_next_page : next region start=0x%08x\n",avail_next); -#endif + for(i = current_free_region; i < pmap_mem_regions_count; i++) { /* Find the next bank with free pages */ + if(pmap_mem_regions[i].mrAStart <= pmap_mem_regions[i].mrAEnd) break; /* Found one */ } + + current_free_region = i; /* Set our current bank */ + if(i >= pmap_mem_regions_count) return FALSE; /* Couldn't find a free page */ + + *addrp = pmap_mem_regions[i].mrAStart; /* Allocate the page */ + pmap_mem_regions[i].mrAStart = pmap_mem_regions[i].mrAStart + 1; /* Set the next one to go */ + avail_remaining--; /* Drop free count */ + return TRUE; } @@ -818,8 +525,8 @@ void pmap_virtual_space( vm_offset_t *startp, vm_offset_t *endp) { - *startp = round_page(first_free_virt); - *endp = VM_MAX_KERNEL_ADDRESS; + *startp = round_page_32(first_free_virt); + *endp = vm_last_addr; } /* @@ -845,17 +552,8 @@ pmap_create(vm_size_t size) { pmap_t pmap, ckpmap, fore, aft; int s, i; - space_t sid; - unsigned int currSID; - -#if PMAP_LOWTRACE - dbgTrace(0xF1D00001, size, 0); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_create(size=%x)%c", size, size ? '\n' : ' '); -#endif + unsigned int currSID, hspace; + addr64_t physpmap; /* * A software use-only map doesn't even need a pmap structure. @@ -871,76 +569,69 @@ pmap_create(vm_size_t size) s = splhigh(); simple_lock(&free_pmap_lock); - if(free_pmap_list) { /* Any free? */ - pmap = free_pmap_list; /* Yes, allocate it */ - free_pmap_list = (pmap_t)pmap->bmaps; /* Dequeue this one (we chain free ones through bmaps) */ + if(free_pmap_list) { /* Any free? */ + pmap = free_pmap_list; /* Yes, allocate it */ + free_pmap_list = (pmap_t)pmap->freepmap; /* Dequeue this one (we chain free ones through freepmap) */ free_pmap_count--; } else { - simple_unlock(&free_pmap_lock); /* Unlock just in case */ + simple_unlock(&free_pmap_lock); /* Unlock just in case */ splx(s); - pmap = (pmap_t) zalloc(pmap_zone); /* Get one */ + pmap = (pmap_t) zalloc(pmap_zone); /* Get one */ if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */ - bzero((char *)pmap, pmapSize); /* Clean up the pmap */ + bzero((char *)pmap, pmapSize); /* Clean up the pmap */ s = splhigh(); - simple_lock(&free_pmap_lock); /* Lock it back up */ + simple_lock(&free_pmap_lock); /* Lock it back up */ - ckpmap = cursor_pmap; /* Get starting point for free ID search */ - currSID = ckpmap->spaceNum; /* Get the actual space ID number */ + ckpmap = cursor_pmap; /* Get starting point for free ID search */ + currSID = ckpmap->spaceNum; /* Get the actual space ID number */ - while(1) { /* Keep trying until something happens */ + while(1) { /* Keep trying until something happens */ - currSID = (currSID + 1) & SID_MAX; /* Get the next in the sequence */ + currSID = (currSID + 1) & (maxAdrSp - 1); /* Get the next in the sequence */ + if(((currSID * incrVSID) & (maxAdrSp - 1)) == invalSpace) continue; /* Skip the space we have reserved */ ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */ if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */ - if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */ - panic("pmap_create: Maximum number (2^20) active address spaces reached\n"); /* Die pig dog */ + if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */ + panic("pmap_create: Maximum number (%d) active address spaces reached\n", maxAdrSp); /* Die pig dog */ } } - pmap->space = (currSID * incrVSID) & SID_MAX; /* Calculate the actual VSID */ - pmap->spaceNum = currSID; /* Set the space ID number */ - + pmap->space = (currSID * incrVSID) & (maxAdrSp - 1); /* Calculate the actual VSID */ + pmap->spaceNum = currSID; /* Set the space ID number */ /* * Now we link into the chain just before the out of sequence guy. */ - fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */ - pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */ - fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */ - pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */ - ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */ + fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */ + pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */ + fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */ + pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */ + ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */ simple_lock_init(&pmap->lock, ETAP_VM_PMAP); - pmap->pmapvr = (unsigned int)pmap ^ (unsigned int)pmap_extract(kernel_pmap, (vm_offset_t)pmap); /* Get physical pointer to the pmap and make mask */ + + physpmap = ((addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)pmap)) << 12) | (addr64_t)((unsigned int)pmap & 0xFFF); /* Get the physical address of the pmap */ + + pmap->pmapvr = (addr64_t)((uintptr_t)pmap) ^ physpmap; /* Make V to R translation mask */ + + pmapTrans[pmap->space].pmapPAddr = physpmap; /* Set translate table physical to point to us */ + pmapTrans[pmap->space].pmapVAddr = CAST_DOWN(unsigned int, pmap); /* Set translate table virtual to point to us */ } + + pmap->pmapFlags = pmapKeyDef; /* Set default key */ + pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */ pmap->ref_count = 1; pmap->stats.resident_count = 0; pmap->stats.wired_count = 0; - pmap->bmaps = 0; /* Clear block map pointer to 0 */ - pmap->vflags = 0; /* Mark all alternates invalid for now */ - for(i=0; i < 128; i++) { /* Clean out usage slots */ - pmap->pmapUsage[i] = 0; - } - for(i=0; i < 16; i++) { /* Initialize for laughs */ - pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | pmap->space; - } - -#if PMAP_LOWTRACE - dbgTrace(0xF1D00002, (unsigned int)pmap, (unsigned int)pmap->space); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("-> %x, space id = %d\n", pmap, pmap->space); -#endif - + pmap->pmapSCSubTag = 0x0000000000000000ULL; /* Make sure this is clean an tidy */ simple_unlock(&free_pmap_lock); + splx(s); return(pmap); } @@ -960,15 +651,6 @@ pmap_destroy(pmap_t pmap) spl_t s; pmap_t fore, aft; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00003, (unsigned int)pmap, 0); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_destroy(pmap=%x)\n", pmap); -#endif - if (pmap == PMAP_NULL) return; @@ -983,7 +665,7 @@ pmap_destroy(pmap_t pmap) panic("PMAP_DESTROY: pmap not empty"); #else if(pmap->stats.resident_count != 0) { - pmap_remove(pmap, 0, 0xFFFFF000); + pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000ULL); } #endif @@ -997,9 +679,9 @@ pmap_destroy(pmap_t pmap) */ simple_lock(&free_pmap_lock); - if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */ + if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */ - pmap->bmaps = (struct blokmap *)free_pmap_list; /* Queue in front */ + pmap->freepmap = free_pmap_list; /* Queue in front */ free_pmap_list = pmap; free_pmap_count++; simple_unlock(&free_pmap_lock); @@ -1011,6 +693,8 @@ pmap_destroy(pmap_t pmap) fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */ aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */ simple_unlock(&free_pmap_lock); + pmapTrans[pmap->space].pmapPAddr = -1; /* Invalidate the translate table physical */ + pmapTrans[pmap->space].pmapVAddr = -1; /* Invalidate the translate table virtual */ zfree(pmap_zone, (vm_offset_t) pmap); } splx(s); @@ -1025,15 +709,6 @@ pmap_reference(pmap_t pmap) { spl_t s; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00004, (unsigned int)pmap, 0); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_reference(pmap=%x)\n", pmap); -#endif - if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */ } @@ -1048,20 +723,36 @@ void pmap_remove_some_phys( vm_offset_t pa) { register struct phys_entry *pp; - register struct mapping *mp, *mpv; + register struct mapping *mp; + unsigned int pindex; + if (pmap == PMAP_NULL) { /* This should never be called with a null pmap */ + panic("pmap_remove_some_phys: null pmap\n"); + } - if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */ - - pp = pmap_find_physentry(pa); /* Get the physent for this page */ - if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */ + pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if (pp == 0) return; /* Leave if not in physical RAM */ - if (pmap->vflags & pmapVMhost) - mapping_purge(pp); - else - mapping_purge_pmap(pp, pmap); + while(1) { /* Keep going until we toss all pages from this pmap */ + if (pmap->pmapFlags & pmapVMhost) { + mp = hw_purge_phys(pp); /* Toss a map */ + if(!mp ) return; + if((unsigned int)mp & mapRetCode) { /* Was there a failure? */ + panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %08X, pmap = %08X, code = %08X\n", + pp, pmap, mp); + } + } else { + mp = hw_purge_space(pp, pmap); /* Toss a map */ + if(!mp ) return; + if((unsigned int)mp & mapRetCode) { /* Was there a failure? */ + panic("pmap_remove_some_phys: hw_purge_pmap failed - pp = %08X, pmap = %08X, code = %08X\n", + pp, pmap, mp); + } + } + mapping_free(mp); /* Toss the mapping */ + } - return; /* Leave... */ + return; /* Leave... */ } /* @@ -1077,25 +768,13 @@ void pmap_remove_some_phys( void pmap_remove( pmap_t pmap, - vm_offset_t sva, - vm_offset_t eva) + addr64_t sva, + addr64_t eva) { - spl_t spl; - struct mapping *mp, *blm; - vm_offset_t lpage; + addr64_t va, endva; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00005, (unsigned int)pmap, sva|((eva-sva)>>12)); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_remove(pmap=%x, sva=%x, eva=%x)\n", - pmap, sva, eva); -#endif + if (pmap == PMAP_NULL) return; /* Leave if software pmap */ - if (pmap == PMAP_NULL) - return; /* It is just possible that eva might have wrapped around to zero, * and sometimes we get asked to liberate something of size zero @@ -1104,49 +783,17 @@ pmap_remove( assert(eva >= sva); /* If these are not page aligned the loop might not terminate */ - assert((sva == trunc_page(sva)) && (eva == trunc_page(eva))); - - /* We liberate addresses from high to low, since the stack grows - * down. This means that we won't need to test addresses below - * the limit of stack growth - */ - - debugLog2(44, sva, eva); /* Log pmap_map call */ - - sva = trunc_page(sva); /* Make it clean */ - lpage = trunc_page(eva) - PAGE_SIZE; /* Point to the last page contained in the range */ - -/* - * Here we will remove all of the block mappings that overlap this range. - * hw_rem_blk removes one mapping in the range and returns. If it returns - * 0, there are no blocks in the range. - */ + assert((sva == trunc_page_64(sva)) && (eva == trunc_page_64(eva))); - while(mp = (mapping *)hw_rem_blk(pmap, sva, lpage)) { /* Keep going until no more */ - if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */ - blm = (struct mapping *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC)); /* Get virtual address */ - panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n", - pmap, sva, blm); - } - if (!((unsigned int)mp & 2)) - mapping_free(hw_cpv(mp)); /* Release it */ - } - while (pmap->stats.resident_count && (eva > sva)) { + va = sva & -4096LL; /* Round start down to a page */ + endva = eva & -4096LL; /* Round end down to a page */ - eva -= PAGE_SIZE; /* Back up a page */ - -#if 1 - if((0x00008000 >> (sva >> 28)) & pmap->vflags) - panic("pmap_remove: attempt to remove nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */ -#endif - if(!(pmap->pmapUsage[(eva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */ - eva = eva & (-pmapUsageSize); /* Back up into the previous slot */ - continue; /* Check the next... */ - } - mapping_remove(pmap, eva); /* Remove the mapping for this address */ + while(1) { /* Go until we finish the range */ + va = mapping_remove(pmap, va); /* Remove the mapping and see what's next */ + va = va & -4096LL; /* Make sure the "not found" indication is clear */ + if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */ } - debugLog2(45, 0, 0); /* Log pmap_map call */ } /* @@ -1158,24 +805,15 @@ pmap_remove( */ void pmap_page_protect( - vm_offset_t pa, + ppnum_t pa, vm_prot_t prot) { register struct phys_entry *pp; boolean_t remove; + unsigned int pindex; + mapping *mp; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00006, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_page_protect(pa=%x, prot=%x)\n", pa, prot); -#endif - - debugLog2(46, pa, prot); /* Log pmap_page_protect call */ - switch (prot) { case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: @@ -1188,23 +826,31 @@ pmap_page_protect( break; } - pp = pmap_find_physentry(pa); /* Get the physent for this page */ - if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */ + + pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if (pp == 0) return; /* Leave if not in physical RAM */ if (remove) { /* If the protection was set to none, we'll remove all mappings */ - mapping_purge(pp); /* Get rid of them all */ + + while(1) { /* Keep going until we toss all pages from this physical page */ + mp = hw_purge_phys(pp); /* Toss a map */ + if(!mp ) return; + if((unsigned int)mp & mapRetCode) { /* Was there a failure? */ + panic("pmap_page_protect: hw_purge_phys failed - pp = %08X, code = %08X\n", + pp, mp); + } + mapping_free(mp); /* Toss the mapping */ + } - debugLog2(47, 0, 0); /* Log pmap_map call */ return; /* Leave... */ } - - /* When we get here, it means that we are to change the protection for a - * physical page. - */ - - mapping_protect_phys(pp, prot, 0); /* Change protection of all mappings to page. */ - debugLog2(47, 1, 0); /* Log pmap_map call */ +/* When we get here, it means that we are to change the protection for a + * physical page. + */ + + mapping_protect_phys(pa, prot & VM_PROT_ALL); /* Change protection of all mappings to page. */ + } /* @@ -1223,54 +869,24 @@ void pmap_protect( vm_offset_t eva, vm_prot_t prot) { - spl_t spl; - register struct phys_entry *pp; - register struct mapping *mp, *mpv; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00008, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_protect(pmap=%x, sva=%x, eva=%x, prot=%x)\n", pmap, sva, eva, prot); - - assert(sva < eva); -#endif + addr64_t va, endva, nextva; if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */ - debugLog2(48, sva, eva); /* Log pmap_map call */ - if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */ - pmap_remove(pmap, sva, eva); /* Yeah, dump 'em */ - - debugLog2(49, prot, 0); /* Log pmap_map call */ - + pmap_remove(pmap, (addr64_t)sva, (addr64_t)eva); /* Yeah, dump 'em */ return; /* Leave... */ } - sva = trunc_page(sva); /* Start up a page boundary */ - - while(sva < eva) { /* Step through */ - - if(!(pmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */ - sva = (sva + pmapUsageSize) &(-pmapUsageSize); /* Jump up into the next slot if nothing here */ - if(!sva) break; /* We tried to wrap, kill loop... */ - continue; /* Check the next... */ - } - -#if 1 - if((0x00008000 >> (sva >> 28)) & pmap->vflags) - panic("pmap_protect: attempt to protect nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */ -#endif + va = sva & -4096LL; /* Round start down to a page */ + endva = eva & -4096LL; /* Round end down to a page */ - mapping_protect(pmap, sva, prot); /* Change the protection on the page */ - sva += PAGE_SIZE; /* On to the next page */ + while(1) { /* Go until we finish the range */ + (void)mapping_protect(pmap, va, prot & VM_PROT_ALL, &va); /* Change the protection and see what's next */ + if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */ } - debugLog2(49, prot, 1); /* Log pmap_map call */ - return; /* Leave... */ } @@ -1289,61 +905,104 @@ void pmap_protect( * insert this page into the given map NOW. */ void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, +pmap_enter(pmap_t pmap, vm_offset_t va, ppnum_t pa, vm_prot_t prot, unsigned int flags, boolean_t wired) { - spl_t spl; - struct mapping *mp; - struct phys_entry *pp; int memattr; + pmap_t opmap; + unsigned int mflags; + addr64_t colva; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00009, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */ - dbgTrace(0xF1D04009, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */ -#endif - - if (pmap == PMAP_NULL) return; /* If they gave us no pmap, just leave... */ + if (pmap == PMAP_NULL) return; /* Leave if software pmap */ - debugLog2(50, va, pa); /* Log pmap_map call */ + disable_preemption(); /* Don't change threads */ - pp = pmap_find_physentry(pa); /* Get the physent for this physical page */ + mflags = 0; /* Make sure this is initialized to nothing special */ + if(!(flags & VM_WIMG_USE_DEFAULT)) { /* Are they supplying the attributes? */ + mflags = mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */ + } + +/* + * It is possible to hang here if another processor is remapping any pages we collide with and are removing + */ - if((0x00008000 >> (va >> 28)) & pmap->vflags) - panic("pmap_enter: attempt to map into nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, va); /* (TEST/DEBUG) panic */ + while(1) { /* Keep trying the enter until it goes in */ + + colva = mapping_make(pmap, va, pa, mflags, 1, prot & VM_PROT_ALL); /* Enter the mapping into the pmap */ + + if(!colva) break; /* If there were no collisions, we are done... */ + + mapping_remove(pmap, colva); /* Remove the mapping that collided */ + } - spl=splhigh(); /* Have to disallow interrupts between the - time we possibly clear a mapping and the time - we get it remapped again. An I/O SLIH could - try to drive an IOR using the page before - we get it mapped (Dude! This was a tough - bug!!!!) */ + enable_preemption(); /* Thread change ok */ - mapping_remove(pmap, va); /* Remove any other mapping at this address */ +} + +/* + * Enters translations for odd-sized V=F blocks. + * + * The higher level VM map should be locked to insure that we don't have a + * double diddle here. + * + * We panic if we get a block that overlaps with another. We do not merge adjacent + * blocks because removing any address within a block removes the entire block and if + * would really mess things up if we trashed too much. + * + * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can + * not be changed. The block must be unmapped and then remapped with the new stuff. + * We also do not keep track of reference or change flags. + * + * Note that pmap_map_block_rc is the same but doesn't panic if collision. + * + */ + +void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */ + + int memattr; + unsigned int mflags; + addr64_t colva; - if(flags & VM_WIMG_USE_DEFAULT) { - if(pp) { - /* Set attr to the phys default */ - memattr = ((pp->pte1&0x00000078) >> 3); - } else { - memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED; - } - } else { - memattr = flags & VM_WIMG_MASK; - } + if (pmap == PMAP_NULL) { /* Did they give us a pmap? */ + panic("pmap_map_block: null pmap\n"); /* No, like that's dumb... */ + } - /* Make the address mapping */ - mp=mapping_make(pmap, pp, va, pa, prot, memattr, 0); +// kprintf("pmap_map_block: (%08X) va = %016llX, pa = %08X, size = %08X, prot = %08X, attr = %08X, flags = %08X\n", /* (BRINGUP) */ +// current_act(), va, pa, size, prot, attr, flags); /* (BRINGUP) */ - splx(spl); /* I'm not busy no more - come what may */ - debugLog2(51, prot, 0); /* Log pmap_map call */ + mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */ + if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */ + + colva = mapping_make(pmap, va, pa, mflags, (size >> 12), prot); /* Enter the mapping into the pmap */ + + if(colva) { /* If there was a collision, panic */ + panic("pmap_map_block: collision at %016llX, pmap = %08X\n", colva, pmap); + } + + return; /* Return */ +} -#if DEBUG - if (pmdebug & (PDB_USER|PDB_ENTER)) - kprintf("leaving pmap_enter\n"); -#endif +int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */ + int memattr; + unsigned int mflags; + addr64_t colva; + + + if (pmap == PMAP_NULL) { /* Did they give us a pmap? */ + panic("pmap_map_block_rc: null pmap\n"); /* No, like that's dumb... */ + } + + mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */ + if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */ + + colva = mapping_make(pmap, va, pa, mflags, (size >> 12), prot); /* Enter the mapping into the pmap */ + + if(colva) return 0; /* If there was a collision, fail */ + + return 1; /* Return true of we worked */ } /* @@ -1351,101 +1010,90 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, * returns the physical address corrsponding to the * virtual address specified by pmap and va if the * virtual address is mapped and 0 if it is not. + * Note: we assume nothing is ever mapped to phys 0. + * + * NOTE: This call always will fail for physical addresses greater than 0xFFFFF000. */ vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va) { spl_t spl; - register struct mapping *mp, *mpv; + register struct mapping *mp; register vm_offset_t pa; - unsigned int seg; - pmap_t actpmap; - + addr64_t nextva; + ppnum_t ppoffset; + unsigned int gva; -#if PMAP_LOWTRACE - dbgTrace(0xF1D0000B, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */ -#endif -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_extract(pmap=%x, va=%x)\n", pmap, va); -#endif - - seg = va >> 28; /* Isolate segment */ - if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */ - else actpmap = pmap; /* Otherwise use the one passed in */ - - pa = (vm_offset_t) 0; /* Clear this to 0 */ +#ifdef BOGUSCOMPAT + panic("pmap_extract: THIS CALL IS BOGUS. NEVER USE IT EVER. So there...\n"); /* Don't use this */ +#else - debugLog2(52, actpmap->space, va); /* Log pmap_map call */ + gva = (unsigned int)va; /* Make sure we don't have a sign */ spl = splhigh(); /* We can't allow any loss of control here */ - - if(mp=hw_lock_phys_vir(actpmap->space, va)) { /* Find the mapping for this vaddr and lock physent */ - if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ - panic("pmap_extract: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ - splx(spl); /* Interruptions are cool now */ - return 0; - } - - mpv = hw_cpv(mp); /* Get virtual address of mapping */ - pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Build the physical address */ - if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - splx(spl); /* Interruptions are cool now */ - - debugLog2(53, pa, 0); /* Log pmap_map call */ - - return pa; /* Return the physical address... */ + + mp = mapping_find(pmap, (addr64_t)gva, &nextva,1); /* Find the mapping for this address */ + + if(!mp) { /* Is the page mapped? */ + splx(spl); /* Enable interrupts */ + return 0; /* Pass back 0 if not found */ } - pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ - /* Note no nested pmaps here */ + ppoffset = (ppnum_t)(((gva & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */ + + + pa = mp->mpPAddr + ppoffset; /* Remember ppage because mapping may vanish after drop call */ + + mapping_drop_busy(mp); /* We have everything we need from the mapping */ splx(spl); /* Restore 'rupts */ - debugLog2(53, pa, 0); /* Log pmap_map call */ + + if(pa > maxPPage32) return 0; /* Force large addresses to fail */ + + pa = (pa << 12) | (va & 0xFFF); /* Convert physical page number to address */ + +#endif return pa; /* Return physical address or 0 */ } /* - * pmap_attribute_cache_sync - * Handle the machine attribute calls which involve sync the prcessor - * cache. + * ppnum_t pmap_find_phys(pmap, addr64_t va) + * returns the physical page corrsponding to the + * virtual address specified by pmap and va if the + * virtual address is mapped and 0 if it is not. + * Note: we assume nothing is ever mapped to phys 0. + * */ -kern_return_t -pmap_attribute_cache_sync(address, size, attribute, value) - vm_offset_t address; - vm_size_t size; - vm_machine_attribute_t attribute; - vm_machine_attribute_val_t* value; -{ - while(size) { - switch (*value) { /* What type was that again? */ - case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */ - sync_cache(address, PAGE_SIZE); /* Sync up dem caches */ - break; /* Done with this one here... */ - - case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */ - flush_dcache(address, PAGE_SIZE, TRUE); /* Flush out the data cache */ - invalidate_icache(address, - PAGE_SIZE, TRUE); /* Flush out the instruction cache */ - break; /* Done with this one here... */ - - case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */ - flush_dcache(address, PAGE_SIZE, TRUE); /* Flush out the data cache */ - break; /* Done with this one here... */ - - case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */ - invalidate_icache(address, - PAGE_SIZE, TRUE); /* Flush out the instruction cache */ - break; /* Done with this one here... */ - } - size -= PAGE_SIZE; +ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) { + + spl_t spl; + register struct mapping *mp; + ppnum_t pa, ppoffset; + addr64_t nextva, curva; + + spl = splhigh(); /* We can't allow any loss of control here */ + + mp = mapping_find(pmap, va, &nextva, 1); /* Find the mapping for this address */ + + if(!mp) { /* Is the page mapped? */ + splx(spl); /* Enable interrupts */ + return 0; /* Pass back 0 if not found */ } - return KERN_SUCCESS;; -} + + ppoffset = (ppnum_t)(((va & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */ + + pa = mp->mpPAddr + ppoffset; /* Get the actual physical address */ + + mapping_drop_busy(mp); /* We have everything we need from the mapping */ + + splx(spl); /* Restore 'rupts */ + return pa; /* Return physical address or 0 */ +} + /* * pmap_attributes: * - * Set/Get special memory attributes; Set is not implemented. + * Set/Get special memory attributes; not implemented. * * Note: 'VAL_GET_INFO' is used to return info about a page. * If less than 1 page is specified, return the physical page @@ -1454,6 +1102,7 @@ pmap_attribute_cache_sync(address, size, attribute, value) * of resident pages and the number of shared (more than * one mapping) pages in the range; * + * */ kern_return_t pmap_attribute(pmap, address, size, attribute, value) @@ -1463,181 +1112,50 @@ pmap_attribute(pmap, address, size, attribute, value) vm_machine_attribute_t attribute; vm_machine_attribute_val_t* value; { - spl_t s; - vm_offset_t sva, eva; - vm_offset_t pa; - kern_return_t ret; - register struct mapping *mp, *mpv; - register struct phys_entry *pp; - int total, seg; - pmap_t actpmap; - - if (attribute != MATTR_CACHE) - return KERN_INVALID_ARGUMENT; - - /* We can't get the caching attribute for more than one page - * at a time - */ - if ((*value == MATTR_VAL_GET) && - (trunc_page(address) != trunc_page(address+size-1))) - return KERN_INVALID_ARGUMENT; + + return KERN_INVALID_ARGUMENT; + +} - if (pmap == PMAP_NULL) - return KERN_SUCCESS; - - sva = trunc_page(address); - eva = round_page(address + size); - ret = KERN_SUCCESS; - - debugLog2(54, address, attribute); /* Log pmap_map call */ - - switch (*value) { - case MATTR_VAL_CACHE_SYNC: /* sync I+D caches */ - case MATTR_VAL_CACHE_FLUSH: /* flush from all caches */ - case MATTR_VAL_DCACHE_FLUSH: /* flush from data cache(s) */ - case MATTR_VAL_ICACHE_FLUSH: /* flush from instr cache(s) */ - sva = trunc_page(sva); - s = splhigh(); - - while (sva < eva) { - seg = sva >> 28; /* Isolate segment */ - if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */ - else actpmap = pmap; /* Otherwise use the one passed in */ - /* - * Note: the following should work ok with nested pmaps because there are not overlayed mappings + * pmap_attribute_cache_sync(vm_offset_t pa) + * + * Invalidates all of the instruction cache on a physical page and + * pushes any dirty data from the data cache for the same physical page */ - if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */ - sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */ - if(!sva) break; /* We tried to wrap, kill loop... */ - continue; /* Check the next... */ - } - - if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */ - sva += PAGE_SIZE; /* Point to the next page */ - continue; /* Skip if the page is not mapped... */ - } - - if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ - panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ - continue; - } - - mpv = hw_cpv(mp); /* Get virtual address of mapping */ - if((unsigned int)mpv->physent) { /* Is there a physical entry? */ - pa = (vm_offset_t)mpv->physent->pte1 & -PAGE_SIZE; /* Yes, get the physical address from there */ - } - else { - pa = (vm_offset_t)(mpv->PTEr & PAGE_SIZE); /* Otherwise from the mapping */ - } - - switch (*value) { /* What type was that again? */ - case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */ - sync_cache(pa, PAGE_SIZE); /* Sync up dem caches */ - break; /* Done with this one here... */ - - case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */ - flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */ - invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */ - break; /* Done with this one here... */ - - case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */ - flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */ - break; /* Done with this one here... */ - - case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */ - invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */ - break; /* Done with this one here... */ - } - if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry if it exists*/ - - sva += PAGE_SIZE; /* Point to the next page */ - } - splx(s); - break; - - case MATTR_VAL_GET_INFO: /* Get info */ - total = 0; - s = splhigh(); /* Lock 'em out */ - - if (size <= PAGE_SIZE) { /* Do they want just one page */ - seg = sva >> 28; /* Isolate segment */ - if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */ - else actpmap = pmap; /* Otherwise use the one passed in */ - if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */ - *value = 0; /* Return nothing if no mapping */ - } - else { - if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ - panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ - } - mpv = hw_cpv(mp); /* Get virtual address of mapping */ - if(pp = mpv->physent) { /* Check for a physical entry */ - total = 0; /* Clear the count */ - for (mpv = (mapping *)hw_cpv((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)); mpv != NULL; mpv = hw_cpv(mp->next)) total++; /* Count the mapping */ - *value = (vm_machine_attribute_val_t) ((pp->pte1 & -PAGE_SIZE) | total); /* Pass back the physical address and the count of mappings */ - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Clear the physical entry lock */ - } - else { /* This is the case for an I/O mapped area */ - *value = (vm_machine_attribute_val_t) ((mpv->PTEr & -PAGE_SIZE) | 1); /* Pass back the physical address and the count of mappings */ - } - } - } - else { - total = 0; - while (sva < eva) { - seg = sva >> 28; /* Isolate segment */ - if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */ - else actpmap = pmap; /* Otherwise use the one passed in */ - - if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */ - sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */ - if(!sva) break; /* We tried to wrap, kill loop... */ - continue; /* Check the next... */ - } - if(mp = hw_lock_phys_vir(actpmap->space, sva)) { /* Find the mapping for this vaddr and lock physent */ - if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ - panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ - continue; - } - mpv = hw_cpv(mp); /* Get virtual address of mapping */ - total += 65536 + (mpv->physent && ((mapping *)((unsigned int)mpv->physent->phys_link & -32))->next); /* Count the "resident" and shared pages */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Clear the physical entry lock */ - } - sva += PAGE_SIZE; - } - *value = total; - } - splx(s); - break; + +kern_return_t pmap_attribute_cache_sync(ppnum_t pp, vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) { - case MATTR_VAL_GET: /* return current value */ - case MATTR_VAL_OFF: /* turn attribute off */ - case MATTR_VAL_ON: /* turn attribute on */ - default: - ret = KERN_INVALID_ARGUMENT; - break; + spl_t s; + unsigned int i, npages; + + npages = round_page_32(size) >> 12; /* Get the number of pages to do */ + + for(i = 0; i < npages; i++) { /* Do all requested pages */ + s = splhigh(); /* No interruptions here */ + sync_ppage(pp + i); /* Go flush data cache and invalidate icache */ + splx(s); /* Allow interruptions */ } - - debugLog2(55, 0, 0); /* Log pmap_map call */ - - return ret; + + return KERN_SUCCESS; } /* - * pmap_sync_caches_phys(vm_offset_t pa) + * pmap_sync_caches_phys(ppnum_t pa) * * Invalidates all of the instruction cache on a physical page and * pushes any dirty data from the data cache for the same physical page */ -void pmap_sync_caches_phys(vm_offset_t pa) { +void pmap_sync_caches_phys(ppnum_t pa) { spl_t s; - - s = splhigh(); /* No interruptions here */ - sync_cache(trunc_page(pa), PAGE_SIZE); /* Sync up dem caches */ - splx(s); /* Allow interruptions */ + + s = splhigh(); /* No interruptions here */ + sync_ppage(pa); /* Sync up dem caches */ + splx(s); /* Allow interruptions */ return; } @@ -1681,69 +1199,6 @@ pmap_deactivate( return; } -#if DEBUG - -/* - * pmap_zero_page - * pmap_copy page - * - * are implemented in movc.s, these - * are just wrappers to help debugging - */ - -extern void pmap_zero_page_assembler(vm_offset_t p); -extern void pmap_copy_page_assembler(vm_offset_t src, vm_offset_t dst); - -/* - * pmap_zero_page(pa) - * - * pmap_zero_page zeros the specified (machine independent) page pa. - */ -void -pmap_zero_page( - vm_offset_t p) -{ - register struct mapping *mp; - register struct phys_entry *pp; - - if (pmdebug & (PDB_USER|PDB_ZERO)) - kprintf("pmap_zero_page(pa=%x)\n", p); - - /* - * XXX can these happen? - */ - if (pmap_find_physentry(p) == PHYS_NULL) - panic("zero_page: physaddr out of range"); - - pmap_zero_page_assembler(p); -} - -/* - * pmap_copy_page(src, dst) - * - * pmap_copy_page copies the specified (machine independent) - * page from physical address src to physical address dst. - * - * We need to invalidate the cache for address dst before - * we do the copy. Apparently there won't be any mappings - * to the dst address normally. - */ -void -pmap_copy_page( - vm_offset_t src, - vm_offset_t dst) -{ - register struct phys_entry *pp; - - if (pmdebug & (PDB_USER|PDB_COPY)) - kprintf("pmap_copy_page(spa=%x, dpa=%x)\n", src, dst); - if (pmdebug & PDB_COPY) - kprintf("pmap_copy_page: phys_copy(%x, %x, %x)\n", - src, dst, PAGE_SIZE); - - pmap_copy_page_assembler(src, dst); -} -#endif /* DEBUG */ /* * pmap_pageable(pmap, s, e, pageable) @@ -1773,7 +1228,7 @@ pmap_pageable( } /* * Routine: pmap_change_wiring - * NOTE USED ANYMORE. + * NOT USED ANYMORE. */ void pmap_change_wiring( @@ -1790,6 +1245,8 @@ pmap_change_wiring( * virtual address range determined by [s, e] and pmap, * s and e must be on machine independent page boundaries and * s must be less than or equal to e. + * + * Note that this function will not descend nested pmaps. */ void pmap_modify_pages( @@ -1799,38 +1256,41 @@ pmap_modify_pages( { spl_t spl; mapping *mp; + ppnum_t pa; + addr64_t va, endva, nextva; + unsigned int saveflags; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00010, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) kprintf("pmap_modify_pages(pmap=%x, sva=%x, eva=%x)\n", pmap, sva, eva); -#endif - - if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */ - - debugLog2(56, sva, eva); /* Log pmap_map call */ + if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */ + + va = sva & -4096; /* Round to page */ + endva = eva & -4096; /* Round to page */ - spl=splhigh(); /* Don't bother me */ + while (va < endva) { /* Walk through all pages */ - for ( ; sva < eva; sva += PAGE_SIZE) { /* Cycle through the whole range */ - mp = hw_lock_phys_vir(pmap->space, sva); /* Lock the physical entry for this mapping */ - if(mp) { /* Did we find one? */ - if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ - panic("pmap_modify_pages: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ - continue; - } - mp = hw_cpv(mp); /* Convert to virtual addressing */ - if(!mp->physent) continue; /* No physical entry means an I/O page, we can't set attributes */ - mapping_set_mod(mp->physent); /* Set the modfied bit for this page */ - hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + spl = splhigh(); /* We can't allow any loss of control here */ + + mp = mapping_find(pmap, (addr64_t)va, &va, 0); /* Find the mapping for this address */ + + if(!mp) { /* Is the page mapped? */ + splx(spl); /* Page not mapped, restore interruptions */ + if((va == 0) || (va >= endva)) break; /* We are done if there are no more or we hit the end... */ + continue; /* We are not done and there is more to check... */ } + + saveflags = mp->mpFlags; /* Remember the flags */ + pa = mp->mpPAddr; /* Remember ppage because mapping may vanish after drop call */ + + mapping_drop_busy(mp); /* We have everything we need from the mapping */ + + splx(spl); /* Restore 'rupts */ + + if(saveflags & (mpNest | mpBlock)) continue; /* Can't mess around with these guys... */ + + mapping_set_mod(pa); /* Set the modfied bit for this page */ + + if(va == 0) break; /* We hit the end of the pmap, might as well leave now... */ } - splx(spl); /* Restore the interrupt level */ - - debugLog2(57, 0, 0); /* Log pmap_map call */ - return; /* Leave... */ + return; /* Leave... */ } /* @@ -1843,36 +1303,9 @@ pmap_modify_pages( void pmap_clear_modify(vm_offset_t pa) { - register struct phys_entry *pp; - spl_t spl; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00011, (unsigned int)pa, 0); /* (TEST/DEBUG) */ -#endif -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_clear_modify(pa=%x)\n", pa); -#endif + mapping_clr_mod((ppnum_t)pa); /* Clear all change bits for physical page */ - pp = pmap_find_physentry(pa); /* Find the physent for this page */ - if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */ - - debugLog2(58, pa, 0); /* Log pmap_map call */ - - spl=splhigh(); /* Don't bother me */ - - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ - panic("pmap_clear_modify: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */ - splx(spl); /* Restore 'rupts */ - return; /* Should die before here */ - } - - mapping_clr_mod(pp); /* Clear all change bits for physical page */ - - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - splx(spl); /* Restore the interrupt level */ - - debugLog2(59, 0, 0); /* Log pmap_map call */ } /* @@ -1883,40 +1316,8 @@ pmap_clear_modify(vm_offset_t pa) boolean_t pmap_is_modified(register vm_offset_t pa) { - register struct phys_entry *pp; - spl_t spl; - boolean_t ret; - - -#if PMAP_LOWTRACE - dbgTrace(0xF1D00012, (unsigned int)pa, 0); /* (TEST/DEBUG) */ -#endif -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_is_modified(pa=%x)\n", pa); -#endif - - pp = pmap_find_physentry(pa); /* Find the physent for this page */ - if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */ - - debugLog2(60, pa, 0); /* Log pmap_map call */ - - spl=splhigh(); /* Don't bother me */ - - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ - panic("pmap_is_modified: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */ - splx(spl); /* Restore 'rupts */ - return 0; /* Should die before here */ - } - - ret = mapping_tst_mod(pp); /* Check for modified */ - - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - splx(spl); /* Restore the interrupt level */ + return mapping_tst_mod((ppnum_t)pa); /* Check for modified */ - debugLog2(61, ret, 0); /* Log pmap_map call */ - - return ret; } /* @@ -1928,29 +1329,7 @@ pmap_is_modified(register vm_offset_t pa) void pmap_clear_reference(vm_offset_t pa) { - register struct phys_entry *pp; - spl_t spl; - - -#if PMAP_LOWTRACE - dbgTrace(0xF1D00013, (unsigned int)pa, 0); /* (TEST/DEBUG) */ -#endif -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_clear_reference(pa=%x)\n", pa); -#endif - - pp = pmap_find_physentry(pa); /* Find the physent for this page */ - if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */ - - debugLog2(62, pa, 0); /* Log pmap_map call */ - - spl=splhigh(); /* Don't bother me */ - mapping_clr_ref(pp); /* Clear all reference bits for physical page */ - splx(spl); /* Restore the interrupt level */ - - debugLog2(63, 0, 0); /* Log pmap_map call */ - + mapping_clr_ref((ppnum_t)pa); /* Check for modified */ } /* @@ -1961,40 +1340,27 @@ pmap_clear_reference(vm_offset_t pa) boolean_t pmap_is_referenced(vm_offset_t pa) { - register struct phys_entry *pp; - spl_t spl; - boolean_t ret; - - -#if PMAP_LOWTRACE - dbgTrace(0xF1D00014, (unsigned int)pa, 0); /* (TEST/DEBUG) */ -#endif -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_is_referenced(pa=%x)\n", pa); -#endif + return mapping_tst_ref((ppnum_t)pa); /* Check for referenced */ +} - pp = pmap_find_physentry(pa); /* Find the physent for this page */ - if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */ - - debugLog2(64, pa, 0); /* Log pmap_map call */ +/* + * pmap_canExecute(ppnum_t pa) + * returns 1 if instructions can execute + * returns 0 if know not (i.e. guarded and/or non-executable set) + * returns -1 if we don't know (i.e., the page is no RAM) + */ +int +pmap_canExecute(ppnum_t pa) +{ + phys_entry *physent; + unsigned int pindex; - spl=splhigh(); /* Don't bother me */ + physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ - if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ - panic("pmap_is_referenced: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */ - splx(spl); /* Restore 'rupts */ - return 0; /* Should die before here */ - } - - ret = mapping_tst_ref(pp); /* Check for referenced */ - - hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ - splx(spl); /* Restore the interrupt level */ - - debugLog2(65, ret, 0); /* Log pmap_map call */ + if(!physent) return -1; /* If there is no physical entry, we don't know... */ - return ret; + if((physent->ppLink & (ppN | ppG))) return 0; /* If we are marked non-executable or guarded, say we can not execute */ + return 1; /* Good to go... */ } #if MACH_VM_DEBUG @@ -2022,25 +1388,15 @@ pmap_copy_part_page( { register struct phys_entry *pp_src, *pp_dst; spl_t s; + addr64_t fsrc, fdst; + assert(((dst <<12) & PAGE_MASK+dst_offset+len) <= PAGE_SIZE); + assert(((src <<12) & PAGE_MASK+src_offset+len) <= PAGE_SIZE); -#if PMAP_LOWTRACE - dbgTrace(0xF1D00019, (unsigned int)src+src_offset, (unsigned int)dst+dst_offset); /* (TEST/DEBUG) */ - dbgTrace(0xF1D04019, (unsigned int)len, 0); /* (TEST/DEBUG) */ -#endif - s = splhigh(); + fsrc = ((addr64_t)src << 12) + src_offset; + fdst = ((addr64_t)dst << 12) + dst_offset; - assert(((dst & PAGE_MASK)+dst_offset+len) <= PAGE_SIZE); - assert(((src & PAGE_MASK)+src_offset+len) <= PAGE_SIZE); - - /* - * Since the source and destination are physical addresses, - * turn off data translation to perform a bcopy() in bcopy_phys(). - */ - phys_copy((vm_offset_t) src+src_offset, - (vm_offset_t) dst+dst_offset, len); - - splx(s); + phys_copy(fsrc, fdst, len); /* Copy the stuff physically */ } void @@ -2052,24 +1408,16 @@ pmap_zero_part_page( panic("pmap_zero_part_page"); } -boolean_t pmap_verify_free(vm_offset_t pa) { +boolean_t pmap_verify_free(ppnum_t pa) { struct phys_entry *pp; + unsigned int pindex; -#if PMAP_LOWTRACE - dbgTrace(0xF1D00007, (unsigned int)pa, 0); /* (TEST/DEBUG) */ -#endif - -#if DEBUG - if (pmdebug & PDB_USER) - kprintf("pmap_verify_free(pa=%x)\n", pa); -#endif - - if (!pmap_initialized) return(TRUE); + pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ + if (pp == 0) return FALSE; /* If there isn't one, show no mapping... */ - pp = pmap_find_physentry(pa); /* Look up the physical entry */ - if (pp == PHYS_NULL) return FALSE; /* If there isn't one, show no mapping... */ - return ((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS) == MAPPING_NULL); /* Otherwise, return TRUE if mapping exists... */ + if(pp->ppLink & ~(ppLock | ppN | ppFlags)) return TRUE; /* We have at least one mapping */ + return FALSE; /* No mappings */ } @@ -2079,13 +1427,9 @@ void pmap_switch(pmap_t map) { unsigned int i; -#if DEBUG - if (watchacts & WA_PCB) { - kprintf("Switching to map at 0x%08x, space=%d\n", - map,map->space); - } -#endif /* DEBUG */ + hw_blow_seg(copyIOaddr); /* Blow off the first segment */ + hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */ /* when changing to kernel space, don't bother * doing anything, the kernel is mapped from here already. @@ -2099,113 +1443,117 @@ void pmap_switch(pmap_t map) } /* - * kern_return_t pmap_nest(grand, subord, vaddr, size) + * kern_return_t pmap_nest(grand, subord, vstart, size) * * grand = the pmap that we will nest subord into * subord = the pmap that goes into the grand - * vaddr = start of range in pmap to be inserted - * size = size of range in pmap to be inserted + * vstart = start of range in pmap to be inserted + * nstart = start of range in pmap nested pmap + * size = Size of nest area (up to 16TB) * * Inserts a pmap into another. This is used to implement shared segments. * On the current PPC processors, this is limited to segment (256MB) aligned * segment sized ranges. + * + * We actually kinda allow recursive nests. The gating factor is that we do not allow + * nesting on top of something that is already mapped, i.e., the range must be empty. + * + * + * + * Note that we depend upon higher level VM locks to insure that things don't change while + * we are doing this. For example, VM should not be doing any pmap enters while it is nesting + * or do 2 nests at once. */ -kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size) { - - unsigned int oflags, seg, grandr; - int i; +kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size) { + + addr64_t nextva, vend, colladdr; + unsigned int msize; + int i, nlists, asize; + spl_t s; + mapping *mp; - if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */ - if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */ - - while(1) { /* Test and set the subordinate flag */ - oflags = subord->vflags & ~pmapAltSeg; /* Get old unset value */ - if(subord->vflags & pmapAltSeg) { /* Are trying to nest one already nested? */ - panic("pmap_nest: Attempt to nest an already nested pmap\n"); - } - if(hw_compare_and_store(oflags, oflags | pmapSubord, &subord->vflags)) break; /* Done if we got it set */ - } - simple_lock(&grand->lock); /* Lock the superior pmap */ - - if(grand->vflags & pmapSubord) { /* Are we only one level deep? */ - simple_unlock(&grand->lock); /* Unlock the superior pmap */ - panic("pmap_nest: Attempt to nest into subordinate pmap\n"); - return KERN_FAILURE; /* Shame on you */ - } - - seg = vaddr >> 28; /* Isolate the segment number */ - if((0x00008000 >> seg) & grand->vflags) { /* See if it is already in use */ - simple_unlock(&grand->lock); /* Unlock the superior pmap */ - panic("pmap_nest: Attempt to nest into already nested segment\n"); - return KERN_FAILURE; /* Shame on you */ + if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this for multiples of 256MB */ + if((size >> 28) > 65536) return KERN_INVALID_VALUE; /* Max size we can nest is 16TB */ + if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */ + if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */ + + if(size == 0) { /* Is the size valid? */ + panic("pmap_nest: size is invalid - %016llX\n", size); } - grand->pmapPmaps[seg] = subord; /* Set the pointer to the subordinate */ - grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | subord->space; /* Set the vsid to the subordinate's vsid */ - grand->vflags |= (0x00008000 >> seg); /* Set in-use bit */ + msize = (size >> 28) - 1; /* Change size to blocks of 256MB */ + + nlists = mapSetLists(grand); /* Set number of lists this will be on */ - grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */ + mp = mapping_alloc(nlists); /* Get a spare mapping block */ - simple_unlock(&grand->lock); /* Unlock the grand pmap */ + mp->mpFlags = 0x01000000 | mpNest | nlists; /* Set the flags. Make sure busy count is 1 */ + mp->mpSpace = subord->space; /* Set the address space/pmap lookup ID */ + mp->mpBSize = msize; /* Set the size */ + mp->mpPte = 0; /* Set the PTE invalid */ + mp->mpPAddr = 0; /* Set the physical page number */ + mp->mpVAddr = vstart; /* Set the address */ + mp->mpNestReloc = nstart - vstart; /* Set grand to nested vaddr relocation value */ - -/* - * Note that the following will force the segment registers to be reloaded following - * the next interrupt on all processors if they are using the pmap we just changed. - * - */ - - - for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ - (void)hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap); /* Clear if ours */ + colladdr = hw_add_map(grand, mp); /* Go add the mapping to the pmap */ + + if(colladdr) { /* Did it collide? */ + vend = vstart + size - 4096; /* Point to the last page we would cover in nest */ + panic("pmap_nest: attempt to nest into a non-empty range - pmap = %08X, start = %016llX, end = %016llX\n", + grand, vstart, vend); } - - return KERN_SUCCESS; /* Bye, bye, butterfly... */ + + return KERN_SUCCESS; } - /* - * kern_return_t pmap_unnest(grand, vaddr, size) + * kern_return_t pmap_unnest(grand, vaddr) * * grand = the pmap that we will nest subord into - * vaddr = start of range in pmap to be inserted - * size = size of range in pmap to be inserted + * vaddr = start of range in pmap to be unnested * * Removes a pmap from another. This is used to implement shared segments. * On the current PPC processors, this is limited to segment (256MB) aligned * segment sized ranges. */ -kern_return_t pmap_unnest(pmap_t grand, vm_offset_t vaddr, vm_size_t size) { +kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr) { unsigned int oflags, seg, grandr, tstamp; int i, tcpu, mycpu; + addr64_t nextva; + spl_t s; + mapping *mp; - if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */ - if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */ - - simple_lock(&grand->lock); /* Lock the superior pmap */ - disable_preemption(); /* It's all for me! */ - - seg = vaddr >> 28; /* Isolate the segment number */ - if(!((0x00008000 >> seg) & grand->vflags)) { /* See if it is already in use */ - enable_preemption(); /* Ok, your turn */ - simple_unlock(&grand->lock); /* Unlock the superior pmap */ - panic("pmap_unnest: Attempt to unnest an unnested segment\n"); - return KERN_FAILURE; /* Shame on you */ + s = splhigh(); /* Make sure interruptions are disabled */ + + mp = mapping_find(grand, vaddr, &nextva, 0); /* Find the nested map */ + + if(((unsigned int)mp & mapRetCode) != mapRtOK) { /* See if it was even nested */ + panic("pmap_unnest: Attempt to unnest an unnested segment - va = %016llX\n", vaddr); + } + + if(!(mp->mpFlags & mpNest)) { /* Did we find something other than a nest? */ + panic("pmap_unnest: Attempt to unnest something that is not a nest - va = %016llX\n", vaddr); } - grand->pmapPmaps[seg] = (pmap_t)0; /* Clear the pointer to the subordinate */ - grand->pmapSegs[seg] = grand->space; /* Set the pointer to the subordinate's vsid */ - grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | grand->space; /* Set the vsid to the grand's vsid */ - grand->vflags &= ~(0x00008000 >> seg); /* Clear in-use bit */ + if(mp->mpVAddr != vaddr) { /* Make sure the address is the same */ + panic("pmap_unnest: Attempt to unnest something that is not at start of nest - va = %016llX\n", vaddr); + } - grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */ + (void)hw_atomic_or(&mp->mpFlags, mpRemovable); /* Show that this mapping is now removable */ - simple_unlock(&grand->lock); /* Unlock the superior pmap */ + mapping_drop_busy(mp); /* Go ahead and relase the mapping now */ + disable_preemption(); /* It's all for me! */ + splx(s); /* Restore 'rupts */ + + (void)mapping_remove(grand, vaddr); /* Toss the nested pmap mapping */ + + invalidateSegs(grand); /* Invalidate the pmap segment cache */ + /* * Note that the following will force the segment registers to be reloaded * on all processors (if they are using the pmap we just changed) before returning. @@ -2221,72 +1569,233 @@ kern_return_t pmap_unnest(pmap_t grand, vm_offset_t vaddr, vm_size_t size) { */ - mycpu = cpu_number(); /* Who am I? Am I just a dream? */ - for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ - if(hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap)) { /* Clear if ours and kick the other guy if he was using it */ - if(i == mycpu) continue; /* Don't diddle ourselves */ - tstamp = per_proc_info[i].ruptStamp[1]; /* Save the processor's last interrupt time stamp */ - if(cpu_signal(i, SIGPwake, 0, 0) != KERN_SUCCESS) { /* Make sure we see the pmap change */ + mycpu = cpu_number(); /* Who am I? Am I just a dream? */ + for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ + if((unsigned int)grand == per_proc_info[i].ppUserPmapVirt) { /* Is this guy using the changed pmap? */ + + per_proc_info[i].ppInvSeg = 1; /* Show that we need to invalidate the segments */ + + if(i == mycpu) continue; /* Don't diddle ourselves */ + + tstamp = per_proc_info[i].ruptStamp[1]; /* Save the processor's last interrupt time stamp */ + if(cpu_signal(i, SIGPcpureq, CPRQsegload, 0) != KERN_SUCCESS) { /* Make sure we see the pmap change */ continue; } + if(!hw_cpu_wcng(&per_proc_info[i].ruptStamp[1], tstamp, LockTimeOut)) { /* Wait for the other processors to enter debug */ panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i); } } } - enable_preemption(); /* Others can run now */ - return KERN_SUCCESS; /* Bye, bye, butterfly... */ + enable_preemption(); /* Others can run now */ + return KERN_SUCCESS; /* Bye, bye, butterfly... */ } -void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { +/* + * void MapUserAddressSpaceInit(void) + * + * Initialized anything we need to in order to map user address space slices into + * the kernel. Primarily used for copy in/out. + * + * Currently we only support one 512MB slot for this purpose. There are two special + * mappings defined for the purpose: the special pmap nest, and linkage mapping. + * + * The special pmap nest (which is allocated in this function) is used as a place holder + * in the kernel's pmap search list. It is 512MB long and covers the address range + * starting at copyIOaddr. It points to no actual memory and when the fault handler + * hits in it, it knows to look in the per_proc and start using the linkage + * mapping contained therin. + * + * The linkage mapping is used to glue the user address space slice into the + * kernel. It contains the relocation information used to transform the faulting + * kernel address into the user address space. It also provides the link to the + * user's pmap. This is pointed to by the per_proc and is switched in and out + * whenever there is a context switch. + * + */ + +void MapUserAddressSpaceInit(void) { + + addr64_t colladdr; + int nlists, asize; + mapping *mp; + + nlists = mapSetLists(kernel_pmap); /* Set number of lists this will be on */ + + mp = mapping_alloc(nlists); /* Get a spare mapping block */ + + mp->mpFlags = 0x01000000 |mpNest | mpSpecial | nlists; /* Set the flags. Make sure busy count is 1 */ + mp->mpSpace = kernel_pmap->space; /* Set the address space/pmap lookup ID */ + mp->mpBSize = 1; /* Set the size to 2 segments */ + mp->mpPte = 0; /* Means nothing */ + mp->mpPAddr = 0; /* Means nothing */ + mp->mpVAddr = copyIOaddr; /* Set the address range we cover */ + mp->mpNestReloc = 0; /* Means nothing */ + + colladdr = hw_add_map(kernel_pmap, mp); /* Go add the mapping to the pmap */ + + if(colladdr) { /* Did it collide? */ + panic("MapUserAddressSpaceInit: MapUserAddressSpace range already mapped\n"); + } + + return; +} + +/* + * addr64_t MapUserAddressSpace(vm_map_t map, vm_offset_t va, size) + * + * map = the vm_map that we are mapping into the kernel + * va = start of the address range we are mapping + * size = size of the range. No greater than 256MB and not 0. + * Note that we do not test validty, we chose to trust our fellows... + * + * Maps a slice of a user address space into a predefined kernel range + * on a per-thread basis. In the future, the restriction of a predefined + * range will be loosened. + * + * Builds the proper linkage map to map the user range + * We will round this down to the previous segment boundary and calculate + * the relocation to the kernel slot + * + * We always make a segment table entry here if we need to. This is mainly because of + * copyin/out and if we don't, there will be multiple segment faults for + * each system call. I have seen upwards of 30000 per second. + * + * We do check, however, to see if the slice is already mapped and if so, + * we just exit. This is done for performance reasons. It was found that + * there was a considerable boost in copyin/out performance if we did not + * invalidate the segment at ReleaseUserAddressSpace time, so we dumped the + * restriction that you had to bracket MapUserAddressSpace. Further, there + * is a yet further boost if you didn't need to map it each time. The theory + * behind this is that many times copies are to or from the same segment and + * done multiple times within the same system call. To take advantage of that, + * we check cioSpace and cioRelo to see if we've already got it. + * + * We also need to half-invalidate the slice when we context switch or go + * back to user state. A half-invalidate does not clear the actual mapping, + * but it does force the MapUserAddressSpace function to reload the segment + * register/SLBE. If this is not done, we can end up some pretty severe + * performance penalties. If we map a slice, and the cached space/relocation is + * the same, we won't reload the segment registers. Howver, since we ran someone else, + * our SR is cleared and we will take a fault. This is reasonable if we block + * while copying (e.g., we took a page fault), but it is not reasonable when we + * just start. For this reason, we half-invalidate to make sure that the SR is + * explicitly reloaded. + * + * Note that we do not go to the trouble of making a pmap segment cache + * entry for these guys because they are very short term -- 99.99% of the time + * they will be unmapped before the next context switch. + * + */ - int cnt, i, j, k; - vm_offset_t xx; +addr64_t MapUserAddressSpace(vm_map_t map, addr64_t va, unsigned int size) { + + addr64_t baddrs, reladd; + thread_act_t act; + mapping *mp; + struct per_proc_info *perproc; - if(!pmap) return; + baddrs = va & 0xFFFFFFFFF0000000ULL; /* Isolate the segment */ + act = current_act(); /* Remember our activation */ - sva = trunc_page(sva); - eva = trunc_page(eva); + reladd = baddrs - copyIOaddr; /* Get the relocation from user to kernel */ - for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */ - if((pmap->pmapUsage[i]) > 8192) { /* See if this is a sane number */ - panic("pmap_ver: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", - i * pmapUsageSize, pmap->pmapUsage[i], pmap); - } + if((act->mact.cioSpace == map->pmap->space) && (act->mact.cioRelo == reladd)) { /* Already mapped? */ + return ((va & 0x0FFFFFFFULL) | copyIOaddr); /* Pass back the kernel address we are to use */ } - j = 0; - while(1) { /* Try multiple times */ - cnt = 0; - for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */ - cnt = cnt + pmap->pmapUsage[i]; /* Sum all slots */ - } - if(cnt == pmap->stats.resident_count) break; /* We're ok if we match... */ + + disable_preemption(); /* Don't move... */ + perproc = getPerProc(); /* Get our per_proc_block */ + + mp = (mapping *)&perproc->ppCIOmp; /* Make up for C */ + act->mact.cioRelo = reladd; /* Relocation from user to kernel */ + mp->mpNestReloc = reladd; /* Relocation from user to kernel */ + + act->mact.cioSpace = map->pmap->space; /* Set the address space/pmap lookup ID */ + mp->mpSpace = map->pmap->space; /* Set the address space/pmap lookup ID */ + +/* + * Here we make an assumption that we are going to be using the base pmap's address space. + * If we are wrong, and that would be very, very, very rare, the fault handler will fix us up. + */ + + hw_map_seg(map->pmap, copyIOaddr, baddrs); /* Make the entry for the first segment */ + + enable_preemption(); /* Let's move */ + return ((va & 0x0FFFFFFFULL) | copyIOaddr); /* Pass back the kernel address we are to use */ +} + +/* + * void ReleaseUserAddressMapping(addr64_t kva) + * + * kva = kernel address of the user copy in/out slice + * + */ + +void ReleaseUserAddressSpace(addr64_t kva) { - j++; - for(i = 0; i < 100000; i++) { - k = j + i; - } - if(j >= 10) { - panic("pmap_ver: pmapUsage total (%d) does not match resident count (%d) for pmap %08X\n", - cnt, pmap->stats.resident_count, pmap); - } + int i; + addr64_t nextva, vend, kaddr, baddrs; + unsigned int msize; + thread_act_t act; + mapping *mp; + + if(kva == 0) return; /* Handle a 0 */ + + disable_preemption(); /* Don't move... */ + + act = current_act(); /* Remember our activation */ + + if(act->mact.cioSpace == invalSpace) { /* We only support one at a time */ + panic("ReleaseUserAddressMapping: attempt release undefined copy in/out user address space slice\n"); } + + act->mact.cioSpace = invalSpace; /* Invalidate space */ + mp = (mapping *)&per_proc_info[cpu_number()].ppCIOmp; /* Make up for C */ + mp->mpSpace = invalSpace; /* Trash it in the per_proc as well */ - for(xx = sva; xx < eva; xx += PAGE_SIZE) { /* See if any slots not clear */ - if(pmap_extract(pmap, xx)) { - panic("pmap_ver: range (%08X to %08X) not empty at %08X for pmap %08X\n", - sva, eva, xx, pmap); - } + hw_blow_seg(copyIOaddr); /* Blow off the first segment */ + hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */ + + enable_preemption(); /* Let's move */ + + return; /* Let's leave */ +} + + + +/* + * kern_return_t pmap_boot_map(size) + * + * size = size of virtual address range to be mapped + * + * This function is used to assign a range of virtual addresses before VM in + * initialized. It starts at VM_MAX_KERNEL_ADDRESS and works downward. + * The variable vm_last_addr contains the current highest possible VM + * assignable address. It is a panic to attempt to call this after VM has + * started up. The only problem is, is that we may not have the serial or + * framebuffer mapped, so we'll never know we died......... + */ + +vm_offset_t pmap_boot_map(vm_size_t size) { + + if(kernel_map != VM_MAP_NULL) { /* Has VM already started? */ + panic("pmap_boot_map: VM started\n"); } + + size = round_page_32(size); /* Make sure this is in pages */ + vm_last_addr = vm_last_addr - size; /* Allocate the memory */ + return (vm_last_addr + 1); /* Return the vaddr we just allocated */ + } + /* temporary workaround */ boolean_t coredumpok(vm_map_t map, vm_offset_t va) { return TRUE; } - diff --git a/osfmk/ppc/pmap.h b/osfmk/ppc/pmap.h index 10fe1f5ac..1958a3fbd 100644 --- a/osfmk/ppc/pmap.h +++ b/osfmk/ppc/pmap.h @@ -59,46 +59,99 @@ #include #include +#define maxPPage32 0x000FFFFF /* Maximum page number in 32-bit machines */ + +typedef uint32_t shexlock; + +#pragma pack(4) /* Make sure the structure stays as we defined it */ + +struct sgc { + uint64_t sgcESID; /* ESID portion of segment cache */ +#define sgcESmsk 0xFFFFFFFFF0000000ULL /* ESID portion of segment register cache */ + uint64_t sgcVSID; /* VSID portion of segment cache */ +#define sgcVSmsk 0xFFFFFFFFFFFFF000ULL /* VSID mask */ +#define sgcVSKeys 0x0000000000000C00ULL /* Protection keys */ +#define sgcVSKeyUsr 53 /* User protection key */ +#define sgcVSNoEx 0x0000000000000200ULL /* No execute */ +}; +#pragma pack() + +typedef struct sgc sgc; + +#pragma pack(4) /* Make sure the structure stays as we defined it */ struct pmap { - queue_head_t pmap_link; /* MUST BE FIRST */ - unsigned int pmapvr; /* Virtual to real conversion mask */ - space_t space; /* space for this pmap */ -#define BMAPLOCK 0x00000001 - struct blokmap *bmaps; /* Physical pointer to odd-size page maps */ - int ref_count; /* reference count */ - unsigned int vflags; /* Alternate map validity flags */ -#define pmapBatVal 0xFF000000 -#define pmapBatDVal 0xF0000000 -#define pmapBatIVal 0x0F000000 -#define pmapFlags 0x00FF0000 -#define pmapSubord 0x00800000 -#define pmapVMhost 0x00400000 -#define pmapAltSeg 0x0000FFFF - unsigned int spaceNum; /* Space number */ -/* PPC line boundary here - 020 */ - unsigned int pmapSegs[16]; /* Contents of segment register if different than base space */ -/* PPC line boundary here - 060 */ - struct pmap *pmapPmaps[16]; /* Pointer to next lower level of pmaps */ -/* PPC line boundary here - 0A0 */ -/* Note: this must start on a word boundary */ - unsigned short pmapUsage[128]; /* Count of pages mapped into 32mb (8192 page) slots */ -#define pmapUsageShft 25 -#define pmapUsageMask 0x0000007F -#define pmapUsageSize (32*1024*1024) - -/* PPC line boundary here - 1A0 */ - struct pmap_statistics stats; /* statistics */ - decl_simple_lock_data(,lock) /* lock on map */ + queue_head_t pmap_link; /* MUST BE FIRST */ + addr64_t pmapvr; /* Virtual to real conversion mask */ + shexlock pmapSXlk; /* Shared/Exclusive lock for mapping changes */ + unsigned int space; /* space for this pmap */ +#define invalSpace 0x00000001 /* Predefined always invalid space */ + int ref_count; /* reference count */ + unsigned int pmapFlags; /* Flags */ +#define pmapKeys 0x00000007 /* Keys and no execute bit to use with this pmap */ +#define pmapKeyDef 0x00000006 /* Default keys - Sup = 1, user = 1, no ex = 0 */ +#define pmapVMhost 0x00000010 /* pmap with Virtual Machines attached to it */ + unsigned int spaceNum; /* Space number */ + unsigned int pmapCCtl; /* Cache control */ +#define pmapCCtlVal 0xFFFF0000 /* Valid entries */ +#define pmapCCtlLck 0x00008000 /* Lock bit */ +#define pmapCCtlLckb 16 /* Lock bit */ +#define pmapCCtlGen 0x00007FFF /* Generation number */ + +#define pmapSegCacheCnt 16 /* Maximum number of cache entries */ +#define pmapSegCacheUse 16 /* Number of cache entries to use */ + + struct pmap *freepmap; /* Free pmaps */ + + unsigned int pmapRsv1[3]; +/* 0x038 */ + uint64_t pmapSCSubTag; /* Segment cache sub-tags. This is a 16 entry 4 bit array */ +/* 0x040 */ + sgc pmapSegCache[pmapSegCacheCnt]; /* SLD values cached for quick load */ + +/* 0x140 */ +/* if fanout is 4, then shift is 1, if fanout is 8 shift is 2, etc */ +#define kSkipListFanoutShift 1 +/* with n lists, we can handle (fanout**n) pages optimally */ +#define kSkipListMaxLists 12 + unsigned char pmapCurLists; /* 0x140 - max #lists any mapping in this pmap currently has */ + unsigned char pmapRsv2[3]; + uint32_t pmapRandNum; /* 0x144 - used by mapSetLists() as a random number generator */ + addr64_t pmapSkipLists[kSkipListMaxLists]; /* 0x148 - the list headers */ +/* following statistics conditionally gathered */ + uint64_t pmapSearchVisits; /* 0x1A8 - nodes visited searching pmaps */ + uint32_t pmapSearchCnt; /* 0x1B0 - number of calls to mapSearch or mapSearchFull */ + + unsigned int pmapRsv3[3]; + +/* 0x1C0 */ + + struct pmap_statistics stats; /* statistics */ + decl_simple_lock_data(,lock) /* lock on map */ /* Need to pad out to a power of 2 - right now it is 512 bytes */ #define pmapSize 512 }; +#pragma pack() + +#pragma pack(4) +struct pmapTransTab { + addr64_t pmapPAddr; /* Physcial address of pmap */ + unsigned int pmapVAddr; /* Virtual address of pmap */ +}; +#pragma pack() /* Make sure the structure stays as we defined it */ + +typedef struct pmapTransTab pmapTransTab; #define PMAP_NULL ((pmap_t) 0) extern pmap_t kernel_pmap; /* The kernel's map */ extern pmap_t cursor_pmap; /* The pmap to start allocations with */ - +extern pmap_t sharedPmap; +extern unsigned int sharedPage; +extern int ppc_max_adrsp; /* Maximum number of concurrent address spaces allowed. */ +extern addr64_t vm_max_address; /* Maximum effective address supported */ +extern addr64_t vm_max_physical; /* Maximum physical address supported */ +extern pmapTransTab *pmapTrans; /* Space to pmap translate table */ #define PMAP_SWITCH_USER(th, map, my_cpu) th->map = map; #define PMAP_ACTIVATE(pmap, th, cpu) @@ -106,10 +159,13 @@ extern pmap_t cursor_pmap; /* The pmap to start allocations with */ #define PMAP_CONTEXT(pmap,th) #define pmap_kernel_va(VA) \ - (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS)) + (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= vm_last_addr)) #define PPC_SID_KERNEL 0 /* Must change KERNEL_SEG_REG0_VALUE if !0 */ -#define SID_MAX ((1<<20) - 1) /* Space ID=20 bits, segment_id=SID + 4 bits */ + +#define maxAdrSp 16384 +#define maxAdrSpb 14 +#define copyIOaddr 0x00000000E0000000ULL #define pmap_kernel() (kernel_pmap) #define pmap_resident_count(pmap) ((pmap)->stats.resident_count) @@ -117,9 +173,6 @@ extern pmap_t cursor_pmap; /* The pmap to start allocations with */ #define pmap_copy(dpmap,spmap,da,len,sa) #define pmap_update() -#define pmap_phys_address(x) ((x) << PPC_PGSHIFT) -#define pmap_phys_to_frame(x) ((x) >> PPC_PGSHIFT) - #define PMAP_DEFAULT_CACHE 0 #define PMAP_INHIBIT_CACHE 1 #define PMAP_GUARDED_CACHE 2 @@ -127,14 +180,17 @@ extern pmap_t cursor_pmap; /* The pmap to start allocations with */ #define PMAP_NO_GUARD_CACHE 8 /* corresponds to cached, coherent, not writethru, not guarded */ -#define VM_WIMG_DEFAULT VM_MEM_COHERENT -#define VM_WIMG_IO VM_MEM_COHERENT | \ - VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED +#define VM_WIMG_DEFAULT (VM_MEM_COHERENT) +#define VM_WIMG_COPYBACK (VM_MEM_COHERENT) +#define VM_WIMG_IO (VM_MEM_COHERENT | \ + VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) +#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) +/* write combining mode, aka store gather */ +#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) /* * prototypes. */ -extern void ppc_protection_init(void); extern vm_offset_t phystokv(vm_offset_t pa); /* Get kernel virtual address from physical */ extern vm_offset_t kvtophys(vm_offset_t va); /* Get physical address from kernel virtual */ extern vm_offset_t pmap_map(vm_offset_t va, @@ -145,18 +201,9 @@ extern kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa, boolean_t available, unsigned int attr); -extern vm_offset_t pmap_map_bd(vm_offset_t va, - vm_offset_t spa, - vm_offset_t epa, - vm_prot_t prot); -extern void pmap_bootstrap(unsigned int mem_size, +extern void pmap_bootstrap(uint64_t msize, vm_offset_t *first_avail, - vm_offset_t *first_phys_avail, unsigned int kmapsize); -extern void pmap_block_map(vm_offset_t pa, - vm_size_t size, - vm_prot_t prot, - int entry, - int dtlb); + unsigned int kmapsize); extern void pmap_switch(pmap_t); extern vm_offset_t pmap_extract(pmap_t pmap, @@ -164,23 +211,29 @@ extern vm_offset_t pmap_extract(pmap_t pmap, extern void pmap_remove_all(vm_offset_t pa); -extern boolean_t pmap_verify_free(vm_offset_t pa); +extern boolean_t pmap_verify_free(ppnum_t pa); extern void sync_cache(vm_offset_t pa, unsigned length); +extern void sync_cache64(addr64_t pa, unsigned length); +extern void sync_ppage(ppnum_t pa); +extern void sync_cache_virtual(vm_offset_t va, unsigned length); extern void flush_dcache(vm_offset_t va, unsigned length, boolean_t phys); +extern void flush_dcache64(addr64_t va, unsigned length, boolean_t phys); extern void invalidate_dcache(vm_offset_t va, unsigned length, boolean_t phys); +extern void invalidate_dcache64(addr64_t va, unsigned length, boolean_t phys); extern void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys); -extern void pmap_sync_caches_phys(vm_offset_t pa); -extern void invalidate_cache_for_io(vm_offset_t va, unsigned length, boolean_t phys); -extern void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, - vm_prot_t prot, int attr, unsigned int flags); /* Map a block */ -extern kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va, - vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr); /* Map a block allocating an optimal virtual address */ -extern kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa, - vm_size_t size, vm_prot_t prot); - -extern kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size); - -extern void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); +extern void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys); +extern void pmap_sync_caches_phys(ppnum_t pa); +extern void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags); +extern int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags); + +extern kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size); +extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); +extern addr64_t MapUserAddressSpace(vm_map_t map, addr64_t va, unsigned int size); +extern void ReleaseUserAddressSpace(addr64_t kva); +extern kern_return_t pmap_attribute_cache_sync(ppnum_t pp, vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value); +extern int pmap_canExecute(ppnum_t pa); #endif /* _PPC_PMAP_H_ */ diff --git a/osfmk/ppc/pmap_internals.h b/osfmk/ppc/pmap_internals.h deleted file mode 100644 index c7ff3bb19..000000000 --- a/osfmk/ppc/pmap_internals.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - */ - -/* Things that don't need to be exported from pmap. Putting - * them here and not in pmap.h avoids major recompiles when - * modifying something either here or in proc_reg.h - */ - -#ifndef _PMAP_INTERNALS_H_ -#define _PMAP_INTERNALS_H_ - -/* - * Definition of the flags in the low 5 bits of the phys_link field of the phys_entry - */ - -#define PHYS_LOCK 0x00000001 -#define PHYS_FLAGS 0x0000001F - -#ifndef ASSEMBLER - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -/* Page table entries are stored in groups (PTEGS) in a hash table */ - -#if __PPC__ -#if _BIG_ENDIAN == 0 -error - bitfield structures are not checked for bit ordering in words -#endif /* _BIG_ENDIAN */ -#endif /* __PPC__ */ - -/* - * Don't change these structures unless you change the assembly code - */ - -struct phys_entry { - struct mapping *phys_link; /* MUST BE FIRST - chain of mappings and flags in the low 5 bits, see above */ - unsigned int pte1; /* referenced/changed/wimg - info update atomically */ -}; - - -#define PHYS_NULL ((struct phys_entry *)0) - -/* Memory may be non-contiguous. This data structure contains info - * for mapping this non-contiguous space into the contiguous - * physical->virtual mapping tables. An array of this type is - * provided to the pmap system at bootstrap by ppc_vm_init. - * - * NB : regions must be in order in this structure. - */ - -typedef struct mem_region { - vm_offset_t start; /* Address of base of region */ - struct phys_entry *phys_table; /* base of region's table */ - unsigned int end; /* End address+1 */ -} mem_region_t; - -/* PMAP_MEM_REGION_MAX has a PowerMac dependancy - at least the value of - * kMaxRAMBanks in ppc/POWERMAC/nkinfo.h - */ -#define PMAP_MEM_REGION_MAX 26 - -extern mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX]; -extern int pmap_mem_regions_count; - -/* keep track of free regions of physical memory so that we can offer - * them up via pmap_next_page later on - */ - -#define FREE_REGION_MAX 8 -extern mem_region_t free_regions[FREE_REGION_MAX]; -extern int free_regions_count; - -/* Prototypes */ - -struct phys_entry *pmap_find_physentry(vm_offset_t pa); - - -#if DEBUG -extern int pmdebug; -#define PDB_LOCK 0x100 -#define LOCKPRINTF(args) if (pmdebug & PDB_LOCK) printf args; else -#else /* DEBUG */ -#define LOCKPRINTF(args) -#endif /* DEBUG */ - -extern vm_offset_t hash_table_base; -extern unsigned int hash_table_size; - -#endif -#endif /* _PMAP_INTERNALS_H_ */ diff --git a/osfmk/ppc/ppc_disasm.i b/osfmk/ppc/ppc_disasm.i index a3976c9ea..688f81bbc 100644 --- a/osfmk/ppc/ppc_disasm.i +++ b/osfmk/ppc/ppc_disasm.i @@ -36,6 +36,8 @@ in 010011ddd00sss000000000000000000 mcrf $crf($d),$crf($s) in 010011cccccccccc000000000010000l $br($c,0,$l,lr,0) in 010011dddddaaaaabbbbb0oooo000010 cr$crop($o) $crb($d),$crb($a),$crb($b) in 01001100000000000000000001100100 rfi +in 01001100000000000000000000100100 rfid +in 01001100000000000000001000100100 hrfid in 01001100000000000000000100101100 isync in 010011cccccccccc000001000010000l $br($c,0,$l,ctr,0) in 010111dddddaaaaabbbbbffffftttttr rlwnm{|.}[$r] \ @@ -43,7 +45,7 @@ in 010111dddddaaaaabbbbbffffftttttr rlwnm{|.}[$r] \ in 0101xxdddddaaaaasssssffffftttttr rl{wimi|winm|?|?}[$x]{|.}[$r] \ $reg($a),$reg($d),$dec($s),$dec($f),$dec($t) in 011110dddddaaaaasssssffffff0xxSr rld{icl|icr|ic|imi}[$x]{|.}[$r] \ - $reg($a),$reg($d),$dec($[sssssS]),$dec($f) + $reg($a),$reg($d),$dec($[Ssssss]),$dec($f) in 011110dddddaaaaabbbbbffffff100xr rldc{l|r}[$x]{|.}[$r] \ $reg($a),$reg($d),$reg($b),$dec($f) in 011111ddd0laaaaabbbbb0000u000000 cmp{|l}[$u] \ @@ -56,6 +58,7 @@ in 011111dddddaaaaabbbbb000u0010w1r mulh{d|w}[$w]{u|}[$u]{|.}[$r] \ in 011111dddddaaaaabbbbbott0001010r add{c|e||?}[$t]{|o}[$o]{|.}[$r] \ $reg($d),$reg($a),$reg($b) in 011111ddddd0000000000000m0100110 mf{cr|msr}[$m] $reg($d) +in 011111ddddd0ffffffff000000100110 mfcr $hex($f),$reg($d) in 011111dddddaaaaabbbbb000w0101000 l{w|d}[$w]arx $reg($d),$reg0($a),$reg($b) in 011111dddddaaaaabbbbb0000u101010 ld{|u}[$u]x $reg($d),$reg0($a),$reg($b) in 011111dddddaaaaabbbbb0ooou101110 $ldst($o){|u}[$u]x \ @@ -72,6 +75,7 @@ in 011111dddddaaaaabbbbb0001111100r nor{|.}[$r] $reg($a),$reg($d),$reg($b) in 011111dddddaaaaabbbbbo01z001000r subf{|z}[$z]e{|o}[$o]{|.}[$r] \ $reg($d),$reg($a) in 011111ddddd0ffffffff000100100m00 mt{crf $hex($f),|msr}[$m] $reg($d) +in 011111ddddd000000000000101100100 mtmsrd $reg($d) in 011111sssssaaaaabbbbb0010u101010 std{|u}[$u]x $reg($s),$reg0($a),$reg($b) in 011111sssssaaaaabbbbb001w0101101 st{w|d}[$w]cx. $reg($s),$reg0($a),$reg($b) in 011111dddddaaaaa00000o011001010r addze{|o}[$o]{|.}[$r] $reg($d),$reg($a) @@ -97,6 +101,9 @@ in 011111dddddaaaaabbbbbo111u010w1r div{d|w}[$w]{u|}[$u]{|o}[$o]{|.}[$r] \ in 01111100000aaaaabbbbb01110101100 dcbi $reg0($a),$reg($b) in 011111sssssaaaaabbbbb0111011100r nand{|.}[$r] $reg($a),$reg($s),$reg($b) in 01111100000000000000001111100100 slbia +in 011111ddddd00000bbbbb01100100100 slbmte $reg($d),$reg($b) +in 011111ddddd00000bbbbb11010100110 slbmfev $reg($d),$reg($b) +in 011111ddddd00000bbbbb11100100110 slbmfee $reg($d),$reg($b) in 011111ddd00000000000010000000000 mcrxr $crf($d) in 011111dddddaaaaabbbbb10000101010 lswx $reg($d),$reg0($a),$reg($b) in 011111dddddaaaaabbbbb1w000101100 l{w|h}[$w]brx $reg($d),$reg0($a),$reg($b) @@ -108,15 +115,17 @@ in 011111sssssaaaaabbbbb1000011011r srd{|.}[$r] $reg($a),$reg($s),$reg($b) in 01111100000000000000010001101100 tlbsync in 011111ddddd0rrrr0000010010101100 mfsr $reg($d),$dec($r) in 011111dddddaaaaannnnn10010101010 lswi $reg($d),$reg0($a),$dec($n) -in 01111100000000000000010010101100 sync +in 011111000ll000000000010010101100 {sync|?|ptesync|?}[$l] in 011111ddddd00000bbbbb10100100110 mfsrin $reg($d),$reg($b) in 011111sssssaaaaabbbbb10100101010 stswx $reg($s),$reg0($a),$reg($b) in 011111sssssaaaaabbbbb1w100101100 st{w|h}[$w]brx $reg($s),$reg0($a),$reg($b) in 011111sssssaaaaabbbbb101du101110 stf{s|d}[$d]{|u}[$u]x \ $fr($s),{$reg0($a)|$reg($a)}[$u],$reg($b) in 011111sssssaaaaannnnn10110101010 stswi $reg($s),$reg0($a),$dec($n) -in 011111dddddaaaaasssss1100111000r srawi{|.}[$r] $reg($a),$reg($s),$dec($s) +in 011111dddddaaaaasssss1100111000r srawi{|.}[$r] $reg($a),$reg($d),$dec($s) +in 011111dddddaaaaasssss110011101Sr sradi{|.}[$r] $reg($a),$reg($d),$dec($[Ssssss]) in 01111100000000000000011010101100 eieio +in 00000000000000000000001000000000 attn in 011111sssssaaaaa00000111xx11010r exts{h|b|w|?}[$x]{|.}[$r] $reg($a),$reg($s) in 01111100000aaaaabbbbb11110101100 icbi $reg0($a),$reg($b) in 011111sssssaaaaabbbbb11110101110 stfiwx $fr($s),$reg0($a),$reg($b) @@ -196,6 +205,11 @@ spr 1000n11111 hid$dec($n) spr 1001011111 iabr spr 1010111111 dabr spr 1111111111 pir +spr 0000110000 hspr0 +spr 0000110001 hspr1 +spr 0000110110 hdec0 +spr 0000111010 hsrr0 +spr 0000111011 hsrr1 spr xxxxxxxxxx ? reg0 00000 0 diff --git a/osfmk/ppc/ppc_init.c b/osfmk/ppc/ppc_init.c index 790a9ce1a..6a79ce40d 100644 --- a/osfmk/ppc/ppc_init.c +++ b/osfmk/ppc/ppc_init.c @@ -27,6 +27,7 @@ */ #include +#include #include #include @@ -45,70 +46,52 @@ #include #include #include +#include #include -extern const char version[]; -extern const char version_variant[]; - extern unsigned int intstack_top_ss; /* declared in start.s */ -extern unsigned int debstackptr; /* declared in start.s */ +extern unsigned int debstackptr; /* declared in start.s */ extern unsigned int debstack_top_ss; /* declared in start.s */ -extern void thandler(void); /* trap handler */ -extern void ihandler(void); /* interrupt handler */ -extern void shandler(void); /* syscall handler */ -extern void chandler(void); /* system choke */ -extern void fpu_switch(void); /* fp handler */ -extern void vec_switch(void); /* vector handler */ -extern void atomic_switch_trap(void); /* fast path atomic thread switch */ - -void (*exception_handlers[])(void) = { - thandler, /* 0x000 INVALID EXCEPTION (T_IN_VAIN) */ - thandler, /* 0x100 System reset (T_RESET) */ - thandler, /* 0x200 Machine check (T_MACHINE_CHECK) */ - thandler, /* 0x300 Data access (T_DATA_ACCESS) */ - thandler, /* 0x400 Instruction access (T_INSTRUCTION_ACCESS) */ - ihandler, /* 0x500 External interrupt (T_INTERRUPT) */ - thandler, /* 0x600 Alignment (T_ALIGNMENT) */ - thandler, /* 0x700 fp exc, ill/priv instr, trap (T_PROGRAM) */ - fpu_switch, /* 0x800 Floating point disabled (T_FP_UNAVAILABLE) */ - ihandler, /* 0x900 Decrementer (T_DECREMENTER) */ - thandler, /* 0xA00 I/O controller interface (T_IO_ERROR) */ - thandler, /* 0xB00 INVALID EXCEPTION (T_RESERVED) */ - shandler, /* 0xC00 System call exception (T_SYSTEM_CALL) */ - thandler, /* 0xD00 Trace (T_TRACE) */ - thandler, /* 0xE00 FP assist (T_FP_ASSIST) */ - thandler, /* 0xF00 Performance monitor (T_PERF_MON) */ - vec_switch, /* 0xF20 VMX (T_VMX) */ - thandler, /* 0x1000 INVALID EXCEPTION (T_INVALID_EXCP0) */ - thandler, /* 0x1100 INVALID EXCEPTION (T_INVALID_EXCP1) */ - thandler, /* 0x1200 INVALID EXCEPTION (T_INVALID_EXCP2) */ - thandler, /* 0x1300 instruction breakpoint (T_INSTRUCTION_BKPT) */ - ihandler, /* 0x1400 system management (T_SYSTEM_MANAGEMENT) */ - thandler, /* 0x1600 Altivec Assist (T_ALTIVEC_ASSIST) */ - ihandler, /* 0x1700 Thermal interruption (T_THERMAL) */ - thandler, /* 0x1800 INVALID EXCEPTION (T_INVALID_EXCP5) */ - thandler, /* 0x1900 INVALID EXCEPTION (T_INVALID_EXCP6) */ - thandler, /* 0x1A00 INVALID EXCEPTION (T_INVALID_EXCP7) */ - thandler, /* 0x1B00 INVALID EXCEPTION (T_INVALID_EXCP8) */ - thandler, /* 0x1C00 INVALID EXCEPTION (T_INVALID_EXCP9) */ - thandler, /* 0x1D00 INVALID EXCEPTION (T_INVALID_EXCP10) */ - thandler, /* 0x1E00 INVALID EXCEPTION (T_INVALID_EXCP11) */ - thandler, /* 0x1F00 INVALID EXCEPTION (T_INVALID_EXCP12) */ - thandler, /* 0x1F00 INVALID EXCEPTION (T_INVALID_EXCP13) */ - thandler, /* 0x2000 Run Mode/Trace (T_RUNMODE_TRACE) */ - - ihandler, /* Software Signal processor (T_SIGP) */ - thandler, /* Software Preemption (T_PREEMPT) */ - ihandler, /* Software INVALID EXCEPTION (T_CSWITCH) */ - ihandler, /* Software Shutdown Context (T_SHUTDOWN) */ - chandler /* Software System choke (crash) (T_CHOKE) */ -}; - int pc_trace_buf[1024] = {0}; int pc_trace_cnt = 1024; +extern unsigned int extPatchMCK; +extern unsigned int extPatch32; +extern unsigned int hwulckPatch_isync; +extern unsigned int hwulckPatch_eieio; +extern unsigned int hwulckbPatch_isync; +extern unsigned int hwulckbPatch_eieio; +extern unsigned int mulckPatch_isync; +extern unsigned int mulckPatch_eieio; +extern unsigned int sulckPatch_isync; +extern unsigned int sulckPatch_eieio; +extern unsigned int retfsectPatch_eieio; +extern unsigned int retfsectPatch_isync; + +int forcenap = 0; + +patch_entry_t patch_table[PATCH_TABLE_SIZE] = { + &extPatch32, 0x60000000, PATCH_FEATURE, PatchExt32, + &extPatchMCK, 0x60000000, PATCH_PROCESSOR, CPU_SUBTYPE_POWERPC_970, + &hwulckPatch_isync, 0x60000000, PATCH_FEATURE, PatchLwsync, + &hwulckPatch_eieio, 0x7c2004ac, PATCH_FEATURE, PatchLwsync, + &hwulckbPatch_isync, 0x60000000, PATCH_FEATURE, PatchLwsync, + &hwulckbPatch_eieio, 0x7c2004ac, PATCH_FEATURE, PatchLwsync, + &mulckPatch_isync, 0x60000000, PATCH_FEATURE, PatchLwsync, + &mulckPatch_eieio, 0x7c2004ac, PATCH_FEATURE, PatchLwsync, + &sulckPatch_isync, 0x60000000, PATCH_FEATURE, PatchLwsync, + &sulckPatch_eieio, 0x7c2004ac, PATCH_FEATURE, PatchLwsync, +#if !MACH_LDEBUG + &retfsectPatch_isync, 0x60000000, PATCH_FEATURE, PatchLwsync, + &retfsectPatch_eieio, 0x7c2004ac, PATCH_FEATURE, PatchLwsync +#else + 0, 0, PATCH_INVALID, 0, + 0, 0, PATCH_INVALID, 0 +#endif + }; + void ppc_init(boot_args *args) { int i; @@ -116,9 +99,12 @@ void ppc_init(boot_args *args) char *str; unsigned long addr, videoAddr; unsigned int maxmem; + uint64_t xmaxmem; unsigned int cputrace; - bat_t bat; + unsigned int novmx; extern vm_offset_t static_memory_end; + thread_t thread; + mapping *mp; /* * Setup per_proc info for first cpu. @@ -131,24 +117,26 @@ void ppc_init(boot_args *args) per_proc_info[0].debstackptr = debstackptr; per_proc_info[0].debstack_top_ss = debstack_top_ss; per_proc_info[0].interrupts_enabled = 0; - per_proc_info[0].active_kloaded = (unsigned int) - &active_kloaded[0]; - set_machine_current_thread(&pageout_thread); - set_machine_current_act(&pageout_act); - pageout_thread.top_act = &pageout_act; - pageout_act.thread = &pageout_thread; - per_proc_info[0].pp_preemption_count = 1; + per_proc_info[0].pp_preemption_count = -1; per_proc_info[0].pp_simple_lock_count = 0; per_proc_info[0].pp_interrupt_level = 0; - per_proc_info[0].active_stacks = (unsigned int) - &active_stacks[0]; - per_proc_info[0].need_ast = (unsigned int) - &need_ast[0]; + per_proc_info[0].need_ast = (unsigned int)&need_ast[0]; per_proc_info[0].FPU_owner = 0; per_proc_info[0].VMX_owner = 0; + mp = (mapping *)per_proc_info[0].ppCIOmp; + mp->mpFlags = 0x01000000 | mpSpecial | 1; + mp->mpSpace = invalSpace; machine_slot[0].is_cpu = TRUE; + thread_bootstrap(); + + thread = current_act(); + thread->mact.curctx = &thread->mact.facctx; + thread->mact.facctx.facAct = thread; + thread->mact.cioSpace = invalSpace; /* Initialize copyin/out space to invalid */ + thread->mact.preemption_count = 1; + cpu_init(); /* @@ -159,95 +147,24 @@ void ppc_init(boot_args *args) master_cpu = 0; master_processor = cpu_to_processor(master_cpu); - /* Set up segment registers as VM through space 0 */ - for (i=0; i<=15; i++) { - isync(); - mtsrin((KERNEL_SEG_REG0_VALUE | (i << 20)), i * 0x10000000); - sync(); - } - - static_memory_end = round_page(args->topOfKernelData);; - /* Get platform expert set up */ - PE_init_platform(FALSE, args); - - - /* This is how the BATs get configured */ - /* IBAT[0] maps Segment 0 1:1 */ - /* DBAT[0] maps Segment 0 1:1 */ - /* DBAT[2] maps the I/O Segment 1:1 */ - /* DBAT[3] maps the Video Segment 1:1 */ - - - /* Initialize shadow IBATs */ - shadow_BAT.IBATs[0].upper=BAT_INVALID; - shadow_BAT.IBATs[0].lower=BAT_INVALID; - shadow_BAT.IBATs[1].upper=BAT_INVALID; - shadow_BAT.IBATs[1].lower=BAT_INVALID; - shadow_BAT.IBATs[2].upper=BAT_INVALID; - shadow_BAT.IBATs[2].lower=BAT_INVALID; - shadow_BAT.IBATs[3].upper=BAT_INVALID; - shadow_BAT.IBATs[3].lower=BAT_INVALID; - - /* Initialize shadow DBATs */ - shadow_BAT.DBATs[0].upper=BAT_INVALID; - shadow_BAT.DBATs[0].lower=BAT_INVALID; - shadow_BAT.DBATs[1].upper=BAT_INVALID; - shadow_BAT.DBATs[1].lower=BAT_INVALID; - shadow_BAT.DBATs[2].upper=BAT_INVALID; - shadow_BAT.DBATs[2].lower=BAT_INVALID; - shadow_BAT.DBATs[3].upper=BAT_INVALID; - shadow_BAT.DBATs[3].lower=BAT_INVALID; + static_memory_end = round_page_32(args->topOfKernelData);; + + PE_init_platform(FALSE, args); /* Get platform expert set up */ + if (!PE_parse_boot_arg("novmx", &novmx)) novmx=0; /* Special run without VMX? */ + if(novmx) { /* Yeah, turn it off */ + for(i = 0; i < NCPUS; i++) { /* Cycle through all potential processors */ + per_proc_info[i].pf.Available &= ~pfAltivec; /* Turn off Altivec available */ + } + __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[0].pf.Available)); /* Set live value */ + } - /* If v_baseAddr is non zero, use DBAT3 to map the video segment */ - videoAddr = args->Video.v_baseAddr & 0xF0000000; - if (videoAddr) { - /* start off specifying 1-1 mapping of video seg */ - bat.upper.word = videoAddr; - bat.lower.word = videoAddr; - - bat.upper.bits.bl = 0x7ff; /* size = 256M */ - bat.upper.bits.vs = 1; - bat.upper.bits.vp = 0; - - bat.lower.bits.wimg = PTE_WIMG_IO; - bat.lower.bits.pp = 2; /* read/write access */ - - shadow_BAT.DBATs[3].upper = bat.upper.word; - shadow_BAT.DBATs[3].lower = bat.lower.word; - - sync();isync(); - - mtdbatu(3, BAT_INVALID); /* invalidate old mapping */ - mtdbatl(3, bat.lower.word); - mtdbatu(3, bat.upper.word); - sync();isync(); + if (!PE_parse_boot_arg("fn", &forcenap)) forcenap = 0; /* If force nap not set, make 0 */ + else { + if(forcenap < 2) forcenap = forcenap + 1; /* Else set 1 for off, 2 for on */ + else forcenap = 0; /* Clear for error case */ } - /* Use DBAT2 to map the io segment */ - addr = get_io_base_addr() & 0xF0000000; - if (addr != videoAddr) { - /* start off specifying 1-1 mapping of io seg */ - bat.upper.word = addr; - bat.lower.word = addr; - - bat.upper.bits.bl = 0x7ff; /* size = 256M */ - bat.upper.bits.vs = 1; - bat.upper.bits.vp = 0; - - bat.lower.bits.wimg = PTE_WIMG_IO; - bat.lower.bits.pp = 2; /* read/write access */ - - shadow_BAT.DBATs[2].upper = bat.upper.word; - shadow_BAT.DBATs[2].lower = bat.lower.word; - - sync();isync(); - mtdbatu(2, BAT_INVALID); /* invalidate old mapping */ - mtdbatl(2, bat.lower.word); - mtdbatu(2, bat.upper.word); - sync();isync(); - } - if (!PE_parse_boot_arg("diag", &dgWork.dgFlags)) dgWork.dgFlags=0; /* Set diagnostic flags */ if(dgWork.dgFlags & enaExpTrace) trcWork.traceMask = 0xFFFFFFFF; /* If tracing requested, enable it */ @@ -255,54 +172,29 @@ void ppc_init(boot_args *args) trcWork.traceMask = (trcWork.traceMask & 0xFFFFFFF0) | (cputrace & 0xF); /* Limit to 4 */ } -#if 0 - GratefulDebInit((bootBumbleC *)&(args->Video)); /* Initialize the GratefulDeb debugger */ -#endif - - printf_init(); /* Init this in case we need debugger */ - panic_init(); /* Init this in case we need debugger */ - - /* setup debugging output if one has been chosen */ - PE_init_kprintf(FALSE); - kprintf("kprintf initialized\n"); - - /* create the console for verbose or pretty mode */ - PE_create_console(); - - /* setup console output */ - PE_init_printf(FALSE); - - kprintf("version_variant = %s\n", version_variant); - kprintf("version = %s\n", version); - + if(!PE_parse_boot_arg("tb", &trcWork.traceSize)) { /* See if non-default trace buffer size */ #if DEBUG - printf("\n\n\nThis program was compiled using gcc %d.%d for powerpc\n", - __GNUC__,__GNUC_MINOR__); - - /* Processor version information */ - { - unsigned int pvr; - __asm__ ("mfpvr %0" : "=r" (pvr)); - printf("processor version register : 0x%08x\n",pvr); - } - for (i = 0; i < kMaxDRAMBanks; i++) { - if (args->PhysicalDRAM[i].size) - printf("DRAM at 0x%08x size 0x%08x\n", - args->PhysicalDRAM[i].base, - args->PhysicalDRAM[i].size); + trcWork.traceSize = 32; /* Default 32 page trace table for DEBUG */ +#else + trcWork.traceSize = 8; /* Default 8 page trace table for RELEASE */ +#endif } -#endif /* DEBUG */ - /* - * VM initialization, after this we're using page tables... - */ + if(trcWork.traceSize < 1) trcWork.traceSize = 1; /* Minimum size of 1 page */ + if(trcWork.traceSize > 256) trcWork.traceSize = 256; /* Maximum size of 256 pages */ + trcWork.traceSize = trcWork.traceSize * 4096; /* Change page count to size */ + if (!PE_parse_boot_arg("maxmem", &maxmem)) - maxmem=0; + xmaxmem=0; else - maxmem = maxmem * (1024 * 1024); + xmaxmem = (uint64_t)maxmem * (1024 * 1024); - ppc_vm_init(maxmem, args); +/* + * VM initialization, after this we're using page tables... + */ + ppc_vm_init(xmaxmem, args); + PE_init_platform(TRUE, args); machine_startup(args); @@ -313,24 +205,13 @@ ppc_init_cpu( { int i; + proc_info->cpu_flags &= ~SleepState; + if(!(proc_info->next_savearea)) /* Do we have a savearea set up already? */ - proc_info->next_savearea = (savearea *)save_get_init(); /* Get a savearea */ + proc_info->next_savearea = (uint64_t)save_get_init(); /* Get a savearea */ cpu_init(); - - proc_info->pp_preemption_count = 1; - proc_info->pp_simple_lock_count = 0; - proc_info->pp_interrupt_level = 0; - - proc_info->Lastpmap = 0; /* Clear last used space */ - - /* Set up segment registers as VM through space 0 */ - for (i=0; i<=15; i++) { - isync(); - mtsrin((KERNEL_SEG_REG0_VALUE | (i << 20)), i * 0x10000000); - sync(); - } - + ppc_vm_cpu_init(proc_info); ml_thrm_init(); /* Start thermal monitoring on this processor */ diff --git a/osfmk/ppc/ppc_vm_init.c b/osfmk/ppc/ppc_vm_init.c index 81622cb78..bf65fcc7b 100644 --- a/osfmk/ppc/ppc_vm_init.c +++ b/osfmk/ppc/ppc_vm_init.c @@ -47,42 +47,50 @@ #include #include #include -#include #include #include #include #include +#include -#ifdef __MACHO__ #include -#endif -extern unsigned int intstack[]; /* declared in start.s */ -extern unsigned int intstack_top_ss; /* declared in start.s */ +extern const char version[]; +extern const char version_variant[]; -vm_offset_t mem_size; /* Size of actual physical memory present - minus any performance buffer and possibly limited - by mem_limit in bytes */ -vm_offset_t mem_actual; /* The "One True" physical memory size - actually, it's the highest physical address + 1 */ -uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */ +extern unsigned int intstack[]; /* declared in aligned_data.s */ +extern unsigned int intstack_top_ss; /* declared in aligned_data.s */ -mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX]; -int pmap_mem_regions_count = 0; /* No non-contiguous memory regions */ +addr64_t hash_table_base; /* Hash table base */ +unsigned int hash_table_size; /* Hash table size */ +vm_offset_t taproot_addr; /* (BRINGUP) */ +unsigned int taproot_size; /* (BRINGUP) */ +unsigned int serialmode; /* Serial mode keyboard and console control */ +extern int disableConsoleOutput; -mem_region_t free_regions[FREE_REGION_MAX]; -int free_regions_count; +struct shadowBAT shadow_BAT; -#ifndef __MACHO__ -extern unsigned long etext; -#endif +/* + * NOTE: mem_size is bogus on large memory machines. We will pin it to 0x80000000 if there is more than 2 GB + * This is left only for compatibility and max_mem should be used. + */ +vm_offset_t mem_size; /* Size of actual physical memory present + minus any performance buffer and possibly limited + by mem_limit in bytes */ +uint64_t mem_actual; /* The "One True" physical memory size + actually, it's the highest physical address + 1 */ +uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */ +uint64_t sane_size; /* Memory size to use for defaults calculations */ + + +mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX + 1]; +int pmap_mem_regions_count = 0; /* Assume no non-contiguous memory regions */ unsigned int avail_remaining = 0; vm_offset_t first_avail; vm_offset_t static_memory_end; -extern vm_offset_t avail_next; +addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel virtual address known to the VM system */ -#ifdef __MACHO__ extern struct mach_header _mh_execute_header; vm_offset_t sectTEXTB; int sectSizeTEXT; @@ -92,234 +100,310 @@ vm_offset_t sectLINKB; int sectSizeLINK; vm_offset_t sectKLDB; int sectSizeKLD; +vm_offset_t sectPRELINKB; +int sectSizePRELINK; vm_offset_t end, etext, edata; -#endif extern unsigned long exception_entry; extern unsigned long exception_end; -void ppc_vm_init(unsigned int mem_limit, boot_args *args) +void ppc_vm_init(uint64_t mem_limit, boot_args *args) { unsigned int htabmask; - unsigned int i, j, batsize, kmapsize; - vm_offset_t addr; + unsigned int i, j, batsize, kmapsize, pvr; + vm_offset_t addr, ioAddr, videoAddr; int boot_task_end_offset; const char *cpus; mapping *mp; - vm_offset_t first_phys_avail; vm_offset_t sizeadj, oldstart; + unsigned int *xtaproot, bank_shift; + uint64_t cbsize, xhid0; - /* Now retrieve addresses for end, edata, and etext - * from MACH-O headers. - */ - sectTEXTB = (vm_offset_t)getsegdatafromheader( - &_mh_execute_header, "__TEXT", §SizeTEXT); - sectDATAB = (vm_offset_t)getsegdatafromheader( - &_mh_execute_header, "__DATA", §SizeDATA); - sectLINKB = (vm_offset_t)getsegdatafromheader( - &_mh_execute_header, "__LINKEDIT", §SizeLINK); - sectKLDB = (vm_offset_t)getsegdatafromheader( - &_mh_execute_header, "__KLD", §SizeKLD); - - etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; - edata = (vm_offset_t) sectDATAB + sectSizeDATA; - end = round_page(getlastaddr()); /* Force end to next page */ -#if DEBUG - kprintf("sectTEXT: %x, size: %x\n", sectTEXTB, sectSizeTEXT); - kprintf("sectDATA: %x, size: %x\n", sectDATAB, sectSizeDATA); - kprintf("sectLINK: %x, size: %x\n", sectLINKB, sectSizeLINK); - kprintf("sectKLD: %x, size: %x\n", sectKLDB, sectSizeKLD); - kprintf("end: %x\n", end); -#endif -/* Stitch valid memory regions together - they may be contiguous - * even though they're not already glued together +/* + * Invalidate all shadow BATs */ - mem_actual = args->PhysicalDRAM[0].base + args->PhysicalDRAM[0].size; /* Initialize to the first region size */ - addr = 0; /* temp use as pointer to previous memory region... */ - for (i = 1; i < kMaxDRAMBanks; i++) { - - if (args->PhysicalDRAM[i].size == 0) continue; /* If region is empty, skip it */ - - if((args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size) > mem_actual) { /* New high? */ - mem_actual = args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size; /* Take the high bid */ - } - - if (args->PhysicalDRAM[i].base == /* Does the end of the last hit the start of the next? */ - args->PhysicalDRAM[addr].base + - args->PhysicalDRAM[addr].size) { - kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n", - args->PhysicalDRAM[addr].base, args->PhysicalDRAM[addr].size, - args->PhysicalDRAM[i].base, args->PhysicalDRAM[i].size); - - args->PhysicalDRAM[addr].size += args->PhysicalDRAM[i].size; /* Join them */ - args->PhysicalDRAM[i].size = 0; - continue; - } - /* This is now last non-zero region to compare against */ - addr = i; - } - /* Go through the list of memory regions passed in via the args + /* Initialize shadow IBATs */ + shadow_BAT.IBATs[0].upper=BAT_INVALID; + shadow_BAT.IBATs[0].lower=BAT_INVALID; + shadow_BAT.IBATs[1].upper=BAT_INVALID; + shadow_BAT.IBATs[1].lower=BAT_INVALID; + shadow_BAT.IBATs[2].upper=BAT_INVALID; + shadow_BAT.IBATs[2].lower=BAT_INVALID; + shadow_BAT.IBATs[3].upper=BAT_INVALID; + shadow_BAT.IBATs[3].lower=BAT_INVALID; + + /* Initialize shadow DBATs */ + shadow_BAT.DBATs[0].upper=BAT_INVALID; + shadow_BAT.DBATs[0].lower=BAT_INVALID; + shadow_BAT.DBATs[1].upper=BAT_INVALID; + shadow_BAT.DBATs[1].lower=BAT_INVALID; + shadow_BAT.DBATs[2].upper=BAT_INVALID; + shadow_BAT.DBATs[2].lower=BAT_INVALID; + shadow_BAT.DBATs[3].upper=BAT_INVALID; + shadow_BAT.DBATs[3].lower=BAT_INVALID; + + + /* + * Go through the list of memory regions passed in via the boot_args * and copy valid entries into the pmap_mem_regions table, adding * further calculated entries. + * + * boot_args version 1 has address instead of page numbers + * in the PhysicalDRAM banks, set bank_shift accordingly. */ + bank_shift = 0; + if (args->Version == kBootArgsVersion1) bank_shift = 12; + pmap_mem_regions_count = 0; - mem_size = 0; /* Will use to total memory found so far */ - - for (i = 0; i < kMaxDRAMBanks; i++) { - if (args->PhysicalDRAM[i].size == 0) - continue; - - /* The following should only happen if memory size has - been artificially reduced with -m */ - if (mem_limit > 0 && - mem_size + args->PhysicalDRAM[i].size > mem_limit) - args->PhysicalDRAM[i].size = mem_limit - mem_size; + max_mem = 0; /* Will use to total memory found so far */ + mem_actual = 0; /* Actual size of memory */ + + if (mem_limit == 0) mem_limit = 0xFFFFFFFFFFFFFFFFULL; /* If there is no set limit, use all */ + + for (i = 0; i < kMaxDRAMBanks; i++) { /* Look at all of the banks */ + + cbsize = (uint64_t)args->PhysicalDRAM[i].size << (12 - bank_shift); /* Remember current size */ + + if (!cbsize) continue; /* Skip if the bank is empty */ + + mem_actual = mem_actual + cbsize; /* Get true memory size */ - /* We've found a region, tally memory */ + if(mem_limit == 0) continue; /* If we hit restriction, just keep counting */ - pmap_mem_regions[pmap_mem_regions_count].start = - args->PhysicalDRAM[i].base; - pmap_mem_regions[pmap_mem_regions_count].end = - args->PhysicalDRAM[i].base + - args->PhysicalDRAM[i].size; + if (cbsize > mem_limit) cbsize = mem_limit; /* Trim to max allowed */ + max_mem += cbsize; /* Total up what we have so far */ + mem_limit = mem_limit - cbsize; /* Calculate amount left to do */ + + pmap_mem_regions[pmap_mem_regions_count].mrStart = args->PhysicalDRAM[i].base >> bank_shift; /* Set the start of the bank */ + pmap_mem_regions[pmap_mem_regions_count].mrAStart = pmap_mem_regions[pmap_mem_regions_count].mrStart; /* Set the start of allocatable area */ + pmap_mem_regions[pmap_mem_regions_count].mrEnd = ((uint64_t)args->PhysicalDRAM[i].base >> bank_shift) + (cbsize >> 12) - 1; /* Set the end address of bank */ + pmap_mem_regions[pmap_mem_regions_count].mrAEnd = pmap_mem_regions[pmap_mem_regions_count].mrEnd; /* Set the end address of allocatable area */ /* Regions must be provided in ascending order */ assert ((pmap_mem_regions_count == 0) || - pmap_mem_regions[pmap_mem_regions_count].start > - pmap_mem_regions[pmap_mem_regions_count-1].start); - - if (pmap_mem_regions_count > 0) { - /* we add on any pages not in the first memory - * region to the avail_remaining count. The first - * memory region is used for mapping everything for - * bootup and is taken care of specially. - */ - avail_remaining += - args->PhysicalDRAM[i].size / PPC_PGBYTES; - } - - /* Keep track of how much memory we've found */ + pmap_mem_regions[pmap_mem_regions_count].mrStart > + pmap_mem_regions[pmap_mem_regions_count-1].mrStart); - mem_size += args->PhysicalDRAM[i].size; - - /* incremement number of regions found */ - pmap_mem_regions_count++; + pmap_mem_regions_count++; /* Count this region */ } + + mem_size = (unsigned int)max_mem; /* Get size of memory */ + if(max_mem > 0x0000000080000000ULL) mem_size = 0x80000000; /* Pin at 2 GB */ - max_mem = mem_size; + sane_size = max_mem; /* Calculate a sane value to use for init */ + if(sane_size > (addr64_t)(VM_MAX_KERNEL_ADDRESS + 1)) + sane_size = (addr64_t)(VM_MAX_KERNEL_ADDRESS + 1); /* If flush with ram, use addressible portion */ - kprintf("mem_size: %d M\n",mem_size / (1024 * 1024)); - /* - * Initialize the pmap system, using space above `first_avail' - * for the necessary data structures. - * NOTE : assume that we'll have enough space mapped in already - */ +/* + * Initialize the pmap system, using space above `first_avail' + * for the necessary data structures. + * NOTE : assume that we'll have enough space mapped in already + */ + + first_avail = static_memory_end; - first_phys_avail = static_memory_end; - first_avail = adjust_bat_limit(first_phys_avail, 0, FALSE, FALSE); +/* Now retrieve addresses for end, edata, and etext + * from MACH-O headers. + */ + sectTEXTB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__TEXT", §SizeTEXT); + sectDATAB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__DATA", §SizeDATA); + sectLINKB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__LINKEDIT", §SizeLINK); + sectKLDB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__KLD", §SizeKLD); + sectPRELINKB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__PRELINK", §SizePRELINK); + + etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; + edata = (vm_offset_t) sectDATAB + sectSizeDATA; + end = round_page_32(getlastaddr()); /* Force end to next page */ - kmapsize = (round_page(exception_end) - trunc_page(exception_entry)) + /* Get size we will map later */ - (round_page(sectTEXTB+sectSizeTEXT) - trunc_page(sectTEXTB)) + - (round_page(sectDATAB+sectSizeDATA) - trunc_page(sectDATAB)) + - (round_page(sectLINKB+sectSizeLINK) - trunc_page(sectLINKB)) + - (round_page(sectKLDB+sectSizeKLD) - trunc_page(sectKLDB)) + - (round_page(static_memory_end) - trunc_page(end)); + kmapsize = (round_page_32(exception_end) - trunc_page_32(exception_entry)) + /* Get size we will map later */ + (round_page_32(sectTEXTB+sectSizeTEXT) - trunc_page_32(sectTEXTB)) + + (round_page_32(sectDATAB+sectSizeDATA) - trunc_page_32(sectDATAB)) + + (round_page_32(sectLINKB+sectSizeLINK) - trunc_page_32(sectLINKB)) + + (round_page_32(sectKLDB+sectSizeKLD) - trunc_page_32(sectKLDB)) + + (round_page_32(sectPRELINKB+sectSizePRELINK) - trunc_page_32(sectPRELINKB)) + + (round_page_32(static_memory_end) - trunc_page_32(end)); - pmap_bootstrap(mem_size,&first_avail,&first_phys_avail, kmapsize); + pmap_bootstrap(max_mem, &first_avail, kmapsize); -#ifdef __MACHO__ -#if DEBUG - kprintf("Mapping memory:\n"); - kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry), - trunc_page(exception_entry), round_page(exception_end)); - kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB), - trunc_page(sectTEXTB), round_page(sectTEXTB+sectSizeTEXT)); - kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB), - trunc_page(sectDATAB), round_page(sectDATAB+sectSizeDATA)); - kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB), - trunc_page(sectLINKB), round_page(sectLINKB+sectSizeLINK)); - kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB), - trunc_page(sectKLDB), round_page(sectKLDB+sectSizeKLD)); - kprintf(" end: %08X, %08X - %08X\n", trunc_page(end), - trunc_page(end), static_memory_end); -#endif /* DEBUG */ - pmap_map(trunc_page(exception_entry), trunc_page(exception_entry), - round_page(exception_end), VM_PROT_READ|VM_PROT_EXECUTE); - pmap_map(trunc_page(sectTEXTB), trunc_page(sectTEXTB), - round_page(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE); - pmap_map(trunc_page(sectDATAB), trunc_page(sectDATAB), - round_page(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE); - - - /* The KLD and LINKEDIT segments are unloaded in toto after boot completes, - * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have - * to map both segments page-by-page. - */ - for (addr = trunc_page(sectKLDB); - addr < round_page(sectKLDB+sectSizeKLD); + pmap_map(trunc_page_32(exception_entry), trunc_page_32(exception_entry), + round_page_32(exception_end), VM_PROT_READ|VM_PROT_EXECUTE); + + pmap_map(trunc_page_32(sectTEXTB), trunc_page_32(sectTEXTB), + round_page_32(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE); + + pmap_map(trunc_page_32(sectDATAB), trunc_page_32(sectDATAB), + round_page_32(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE); + +/* The KLD and LINKEDIT segments are unloaded in toto after boot completes, +* but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have +* to map both segments page-by-page. +*/ + + for (addr = trunc_page_32(sectPRELINKB); + addr < round_page_32(sectPRELINKB+sectSizePRELINK); addr += PAGE_SIZE) { - pmap_enter(kernel_pmap, addr, addr, + pmap_enter(kernel_pmap, addr, addr>>12, VM_PROT_READ|VM_PROT_WRITE, VM_WIMG_USE_DEFAULT, TRUE); + } - for (addr = trunc_page(sectLINKB); - addr < round_page(sectLINKB+sectSizeLINK); + for (addr = trunc_page_32(sectKLDB); + addr < round_page_32(sectKLDB+sectSizeKLD); addr += PAGE_SIZE) { - pmap_enter(kernel_pmap, addr, addr, + pmap_enter(kernel_pmap, addr, addr>>12, VM_PROT_READ|VM_PROT_WRITE, VM_WIMG_USE_DEFAULT, TRUE); + } + for (addr = trunc_page_32(sectLINKB); + addr < round_page_32(sectLINKB+sectSizeLINK); + addr += PAGE_SIZE) { + + pmap_enter(kernel_pmap, addr, addr>>12, + VM_PROT_READ|VM_PROT_WRITE, + VM_WIMG_USE_DEFAULT, TRUE); + + } + + pmap_enter(kernel_pmap, &sharedPage, (unsigned int)&sharedPage >> 12, /* Make sure the sharedPage is mapped */ + VM_PROT_READ|VM_PROT_WRITE, + VM_WIMG_USE_DEFAULT, TRUE); + + pmap_enter(kernel_pmap, &lowGlo, (unsigned int)&lowGlo >> 12, /* Make sure the low memory globals are mapped */ + VM_PROT_READ|VM_PROT_WRITE, + VM_WIMG_USE_DEFAULT, TRUE); + /* * We need to map the remainder page-by-page because some of this will * be released later, but not all. Ergo, no block mapping here */ - for(addr = trunc_page(end); addr < round_page(static_memory_end); addr += PAGE_SIZE) { - pmap_enter(kernel_pmap, addr, addr, + + for(addr = trunc_page_32(end); addr < round_page_32(static_memory_end); addr += PAGE_SIZE) { + + pmap_enter(kernel_pmap, addr, addr>>12, VM_PROT_READ|VM_PROT_WRITE, VM_WIMG_USE_DEFAULT, TRUE); - } -#endif /* __MACHO__ */ -#if DEBUG - for (i=0 ; i < free_regions_count; i++) { - kprintf("Free region start 0x%08x end 0x%08x\n", - free_regions[i].start,free_regions[i].end); } -#endif + + MapUserAddressSpaceInit(); /* Go initialize copy in/out */ /* - * Note: the shadow BAT registers were already loaded in ppc_init.c + * At this point, there is enough mapped memory and all hw mapping structures are + * allocated and initialized. Here is where we turn on translation for the + * VERY first time.... + * + * NOTE: Here is where our very first interruption will happen. + * */ + hw_start_trans(); /* Start translating */ + +#if 0 + GratefulDebInit((bootBumbleC *)&(args->Video)); /* Initialize the GratefulDeb debugger */ +#endif + + + printf_init(); /* Init this in case we need debugger */ + panic_init(); /* Init this in case we need debugger */ + PE_init_kprintf(TRUE); /* Note on PPC we only call this after VM is set up */ + + kprintf("kprintf initialized\n"); + + serialmode = 0; /* Assume normal keyboard and console */ + if(PE_parse_boot_arg("serial", &serialmode)) { /* Do we want a serial keyboard and/or console? */ + kprintf("Serial mode specified: %08X\n", serialmode); + } + if(serialmode & 1) { /* Start serial if requested */ + (void)switch_to_serial_console(); /* Switch into serial mode */ + disableConsoleOutput = FALSE; /* Allow printfs to happen */ + } + + kprintf("max_mem: %ld M\n", (unsigned long)(max_mem >> 20)); + kprintf("version_variant = %s\n", version_variant); + kprintf("version = %s\n\n", version); + __asm__ ("mfpvr %0" : "=r" (pvr)); + kprintf("proc version = %08x\n", pvr); + if(per_proc_info[0].pf.Available & pf64Bit) { /* 64-bit processor? */ + xhid0 = hid0get64(); /* Get the hid0 */ + if(xhid0 & (1ULL << (63 - 19))) kprintf("Time base is externally clocked\n"); + else kprintf("Time base is internally clocked\n"); + } + + + taproot_size = PE_init_taproot(&taproot_addr); /* (BRINGUP) See if there is a taproot */ + if(taproot_size) { /* (BRINGUP) */ + kprintf("TapRoot card configured to use vaddr = %08X, size = %08X\n", taproot_addr, taproot_size); + bcopy_nc((void *)version, (void *)(taproot_addr + 16), strlen(version)); /* (BRINGUP) Pass it our kernel version */ + __asm__ volatile("eieio"); /* (BRINGUP) */ + xtaproot = (unsigned int *)taproot_addr; /* (BRINGUP) */ + xtaproot[0] = 1; /* (BRINGUP) */ + __asm__ volatile("eieio"); /* (BRINGUP) */ + } + + PE_create_console(); /* create the console for verbose or pretty mode */ - LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); /* Load up real IBATs from shadows */ - LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); /* Load up real DBATs from shadows */ + /* setup console output */ + PE_init_printf(FALSE); #if DEBUG - for(i=0; i<4; i++) kprintf("DBAT%1d: %08X %08X\n", - i, shadow_BAT.DBATs[i].upper, shadow_BAT.DBATs[i].lower); - for(i=0; i<4; i++) kprintf("IBAT%1d: %08X %08X\n", - i, shadow_BAT.IBATs[i].upper, shadow_BAT.IBATs[i].lower); + printf("\n\n\nThis program was compiled using gcc %d.%d for powerpc\n", + __GNUC__,__GNUC_MINOR__); + + + /* Processor version information */ + { + unsigned int pvr; + __asm__ ("mfpvr %0" : "=r" (pvr)); + printf("processor version register : %08X\n", pvr); + } + + kprintf("Args at %08X\n", args); + for (i = 0; i < pmap_mem_regions_count; i++) { + printf("DRAM at %08X size %08X\n", + args->PhysicalDRAM[i].base, + args->PhysicalDRAM[i].size); + } +#endif /* DEBUG */ + +#if DEBUG + kprintf("Mapped memory:\n"); + kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page_32(exception_entry), + trunc_page_32(exception_entry), round_page_32(exception_end)); + kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page_32(sectTEXTB), + trunc_page_32(sectTEXTB), round_page_32(sectTEXTB+sectSizeTEXT)); + kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page_32(sectDATAB), + trunc_page_32(sectDATAB), round_page_32(sectDATAB+sectSizeDATA)); + kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page_32(sectLINKB), + trunc_page_32(sectLINKB), round_page_32(sectLINKB+sectSizeLINK)); + kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page_32(sectKLDB), + trunc_page_32(sectKLDB), round_page_32(sectKLDB+sectSizeKLD)); + kprintf(" end: %08X, %08X - %08X\n", trunc_page_32(end), + trunc_page_32(end), static_memory_end); + #endif + + return; } void ppc_vm_cpu_init( struct per_proc_info *proc_info) { - hash_table_init(hash_table_base, hash_table_size); - - LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); - LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); - - sync();isync(); + hw_setup_trans(); /* Set up hardware needed for translation */ + hw_start_trans(); /* Start translating */ } diff --git a/osfmk/ppc/proc_reg.h b/osfmk/ppc/proc_reg.h index 79fce25c6..c9edb399a 100644 --- a/osfmk/ppc/proc_reg.h +++ b/osfmk/ppc/proc_reg.h @@ -34,13 +34,13 @@ /* Define some useful masks that convert from bit numbers */ #if __PPC__ -#if _BIG_ENDIAN +#ifdef __BIG_ENDIAN__ #ifndef ENDIAN_MASK #define ENDIAN_MASK(val,size) (1 << ((size-1) - val)) #endif #else #error code not ported to little endian targets yet -#endif /* _BIG_ENDIAN */ +#endif /* __BIG_ENDIAN__ */ #endif /* __PPC__ */ #define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32) @@ -55,7 +55,8 @@ /* Defines for decoding the MSR bits */ -#define MSR_SF_BIT 0 +#define MSR_SF_BIT 0 +#define MSR_HV_BIT 3 #define MSR_RES1_BIT 1 #define MSR_RES2_BIT 2 #define MSR_RES3_BIT 3 @@ -142,18 +143,33 @@ #define SR_UNUSED_BY_KERN_NUM 13 #define SR_COPYIN_NAME sr14 #define SR_COPYIN_NUM 14 +#define BAT_INVALID 0 /* DSISR bits on data access exceptions */ #define DSISR_IO_BIT 0 /* NOT USED on 601 */ #define DSISR_HASH_BIT 1 +#define DSISR_NOEX_BIT 3 #define DSISR_PROT_BIT 4 #define DSISR_IO_SPC_BIT 5 #define DSISR_WRITE_BIT 6 #define DSISR_WATCH_BIT 9 #define DSISR_EIO_BIT 11 +#define dsiMiss 0x40000000 +#define dsiMissb 1 +#define dsiNoEx 0x10000000 +#define dsiProt 0x08000000 +#define dsiInvMode 0x04000000 +#define dsiStore 0x02000000 +#define dsiAC 0x00400000 +#define dsiSeg 0x00200000 +#define dsiValid 0x5E600000 +#define dsiSpcNest 0x00010000 /* Special nest - software flag */ +#define dsiSpcNestb 15 /* Special nest - software flag */ +#define dsiSoftware 0x0000FFFF + /* SRR1 bits on data/instruction translation exceptions */ #define SRR1_TRANS_HASH_BIT 1 @@ -168,41 +184,20 @@ #define SRR1_PRG_PRV_INS_BIT 13 #define SRR1_PRG_TRAP_BIT 14 -/* BAT information */ - -/* Constants used when setting mask values */ - -#define BAT_INVALID 0 - /* * Virtual to physical mapping macros/structures. * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page. */ -#define CACHE_LINE_SIZE 32 -#define CACHE_LINE_POW2 5 -#define cache_align(x) (((x) + CACHE_LINE_SIZE-1) & ~(CACHE_LINE_SIZE - 1)) - #define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */ #define PTE1_REFERENCED_BIT 23 /* ditto */ #define PTE1_CHANGED_BIT 24 #define PTE0_HASH_ID_BIT 25 -#define PTE_NULL ((pte_t*) NULL) /* No pte found/associated with this */ -#define PTE_EMPTY 0x7fffffbf /* Value in the pte0.word of a free pte */ - -#define PTE_WIMG_CB_CACHED 0 /* cached, writeback */ -#define PTE_WIMG_CB_CACHED_GUARDED 1 /* cached, writeback, guarded */ -#define PTE_WIMG_CB_CACHED_COHERENT 2 /* cached, writeback, coherent (default) */ -#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 3 /* cached, writeback, coherent, guarded */ -#define PTE_WIMG_UNCACHED 4 /* uncached */ -#define PTE_WIMG_UNCACHED_GUARDED 5 /* uncached, guarded */ -#define PTE_WIMG_UNCACHED_COHERENT 6 /* uncached, coherentt */ -#define PTE_WIMG_UNCACHED_COHERENT_GUARDED 7 /* uncached, coherent, guarded */ -#define PTE_WIMG_WT_CACHED 8 /* cached, writethru */ -#define PTE_WIMG_WT_CACHED_GUARDED 9 /* cached, writethru, guarded */ -#define PTE_WIMG_WT_CACHED_COHERENT 10 /* cached, writethru, coherent */ -#define PTE_WIMG_WT_CACHED_COHERENT_GUARDED 11 /* cached, writethru, coherent, guarded */ +#define PTE_WIMG_CB_CACHED_COHERENT 0 /* cached, writeback, coherent (default) */ +#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 1 /* cached, writeback, coherent, guarded */ +#define PTE_WIMG_UNCACHED_COHERENT 2 /* uncached, coherentt */ +#define PTE_WIMG_UNCACHED_COHERENT_GUARDED 3 /* uncached, coherent, guarded */ #define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT #define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED @@ -212,191 +207,14 @@ #ifndef ASSEMBLER #ifdef __GNUC__ -#if _BIG_ENDIAN == 0 -#error - bitfield structures are not checked for bit ordering in words -#endif /* _BIG_ENDIAN */ - /* Structures and types for machine registers */ -typedef union { - unsigned int word; - struct { - unsigned int htaborg : 16; - unsigned int reserved : 7; - unsigned int htabmask : 9; - } bits; -} sdr1_t; - -/* Block mapping registers. These values are model dependent. - * Eventually, we will need to up these to 64 bit values. - */ - -#define blokValid 0x1FFE0000 -#define batMin 0x00020000 -#define batMax 0x10000000 -#define batICnt 4 -#define batDCnt 4 - -/* BAT register structures. - * Not used for standard mappings, but may be used - * for mapping devices. Note that the 601 has a - * different BAT layout than the other PowerPC processors - */ - -typedef union { - unsigned int word; - struct { - unsigned int blpi : 15; - unsigned int reserved : 10; - unsigned int wim : 3; - unsigned int ks : 1; - unsigned int ku : 1; - unsigned int pp : 2; - } bits; -} bat601u_t; - -typedef union { - unsigned int word; - struct { - unsigned int pbn : 15; - unsigned int reserved : 10; - unsigned int valid : 1; - unsigned int bsm : 6; - } bits; -} bat601l_t; - -typedef struct bat601_t { - bat601u_t upper; - bat601l_t lower; -} bat601_t; - -typedef union { - unsigned int word; - struct { - unsigned int bepi : 15; - unsigned int reserved : 4; - unsigned int bl : 11; - unsigned int vs : 1; - unsigned int vp : 1; - } bits; -} batu_t; - -typedef union { - unsigned int word; - struct { - unsigned int brpn : 15; - unsigned int reserved : 10; - unsigned int wimg : 4; - unsigned int reserved2 : 1; - unsigned int pp : 2; - } bits; -} batl_t; - -typedef struct bat_t { - batu_t upper; - batl_t lower; -} bat_t; - -/* PTE entries - * Used extensively for standard mappings - */ - -typedef union { - unsigned int word; - struct { - unsigned int valid : 1; - unsigned int segment_id : 24; - unsigned int hash_id : 1; - unsigned int page_index : 6; /* Abbreviated */ - } bits; - struct { - unsigned int valid : 1; - unsigned int not_used : 5; - unsigned int segment_id : 19; /* Least Sig 19 bits */ - unsigned int hash_id : 1; - unsigned int page_index : 6; - } hash_bits; -} pte0_t; - -typedef union { - unsigned int word; - struct { - unsigned int phys_page : 20; - unsigned int reserved3 : 3; - unsigned int referenced : 1; - unsigned int changed : 1; - unsigned int wimg : 4; - unsigned int reserved1 : 1; - unsigned int protection : 2; - } bits; -} pte1_t; - -typedef struct pte_t { - pte0_t pte0; - pte1_t pte1; -} pte_t; - -/* - * A virtual address is decoded into various parts when looking for its PTE - */ - -typedef struct va_full_t { - unsigned int seg_num : 4; - unsigned int page_index : 16; - unsigned int byte_ofs : 12; -} va_full_t; - -typedef struct va_abbrev_t { /* use bits.abbrev for abbreviated page index */ - unsigned int seg_num : 4; - unsigned int page_index : 6; - unsigned int junk : 10; - unsigned int byte_ofs : 12; -} va_abbrev_t; - -typedef union { - unsigned int word; - va_full_t full; - va_abbrev_t abbrev; -} virtual_addr_t; - -/* A physical address can be split up into page and offset */ - -typedef struct pa_t { - unsigned int page_no : 20; - unsigned int offset : 12; -} pa_t; - -typedef union { - unsigned int word; - pa_t bits; -} physical_addr_t; /* * C-helper inline functions for accessing machine registers follow. */ -#ifdef __ELF__ -#define __CASMNL__ ";" -#else -#define __CASMNL__ "@" -#endif - -/* Return the current GOT pointer */ - -extern unsigned int get_got(void); - -extern __inline__ unsigned int get_got(void) -{ - unsigned int result; -#ifndef __ELF__ - __asm__ volatile("mr %0, r2" : "=r" (result)); -#else - __asm__ volatile("mr %0, 2" : "=r" (result)); -#endif - return result; -} - /* * Various memory/IO synchronisation instructions */ @@ -426,25 +244,6 @@ extern __inline__ unsigned int get_got(void) __asm__ volatile("isync") -/* - * This guy will make sure all tlbs on all processors finish their tlbies - */ -#define tlbsync() \ - __asm__ volatile("tlbsync") - - - /* Invalidate TLB entry. Caution, requires context synchronization. - */ -extern void tlbie(unsigned int val); - -extern __inline__ void tlbie(unsigned int val) -{ - __asm__ volatile("tlbie %0" : : "r" (val)); - return; -} - - - /* * Access to various system registers */ @@ -480,54 +279,6 @@ extern __inline__ unsigned int mfmsr(void) return result; } -/* mtsr and mfsr must be macros since SR must be hardcoded */ - -#if __ELF__ -#define mtsr(SR, REG) \ - __asm__ volatile("sync" __CASMNL__ "mtsr %0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG)); -#define mfsr(REG, SR) \ - __asm__ volatile("mfsr %0, %1" : "=r" (REG) : "i" (SR)); -#else -#define mtsr(SR, REG) \ - __asm__ volatile("sync" __CASMNL__ "mtsr sr%0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG)); - -#define mfsr(REG, SR) \ - __asm__ volatile("mfsr %0, sr%1" : "=r" (REG) : "i" (SR)); -#endif - - -extern void mtsrin(unsigned int val, unsigned int reg); - -extern __inline__ void mtsrin(unsigned int val, unsigned int reg) -{ - __asm__ volatile("sync" __CASMNL__ "mtsrin %0, %1" __CASMNL__ " isync" : : "r" (val), "r" (reg)); - return; -} - -extern unsigned int mfsrin(unsigned int reg); - -extern __inline__ unsigned int mfsrin(unsigned int reg) -{ - unsigned int result; - __asm__ volatile("mfsrin %0, %1" : "=r" (result) : "r" (reg)); - return result; -} - -extern void mtsdr1(unsigned int val); - -extern __inline__ void mtsdr1(unsigned int val) -{ - __asm__ volatile("mtsdr1 %0" : : "r" (val)); - return; -} - -extern void mtdar(unsigned int val); - -extern __inline__ void mtdar(unsigned int val) -{ - __asm__ volatile("mtdar %0" : : "r" (val)); - return; -} extern unsigned int mfdar(void); @@ -546,23 +297,6 @@ extern __inline__ void mtdec(unsigned int val) return; } -extern int isync_mfdec(void); - -extern __inline__ int isync_mfdec(void) -{ - int result; - __asm__ volatile("isync" __CASMNL__ "mfdec %0" : "=r" (result)); - return result; -} - -/* Read and write the value from the real-time clock - * or time base registers. Note that you have to - * use the right ones depending upon being on - * 601 or 603/604. Care about carries between - * the words and using the right registers must be - * done by the calling function. - */ - extern void mttb(unsigned int val); extern __inline__ void mttb(unsigned int val) @@ -597,48 +331,6 @@ extern __inline__ unsigned int mftbu(void) return result; } -extern void mtrtcl(unsigned int val); - -extern __inline__ void mtrtcl(unsigned int val) -{ - __asm__ volatile("mtspr 21,%0" : : "r" (val)); - return; -} - -extern unsigned int mfrtcl(void); - -extern __inline__ unsigned int mfrtcl(void) -{ - unsigned int result; - __asm__ volatile("mfspr %0,5" : "=r" (result)); - return result; -} - -extern void mtrtcu(unsigned int val); - -extern __inline__ void mtrtcu(unsigned int val) -{ - __asm__ volatile("mtspr 20,%0" : : "r" (val)); - return; -} - -extern unsigned int mfrtcu(void); - -extern __inline__ unsigned int mfrtcu(void) -{ - unsigned int result; - __asm__ volatile("mfspr %0,4" : "=r" (result)); - return result; -} - -extern void mtl2cr(unsigned int val); - -extern __inline__ void mtl2cr(unsigned int val) -{ - __asm__ volatile("mtspr l2cr, %0" : : "r" (val)); - return; -} - extern unsigned int mfl2cr(void); extern __inline__ unsigned int mfl2cr(void) @@ -696,18 +388,6 @@ extern unsigned long mfsda(void); /* macros since the argument n is a hard-coded constant */ -#define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg)) -#define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg)) - -#define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg)) -#define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg)) - -#define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg)) -#define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg)) - -#define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg)) -#define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg)) - #define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg)) #define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg)) diff --git a/osfmk/ppc/rtclock.c b/osfmk/ppc/rtclock.c index a44d3ef73..0ee1b0476 100644 --- a/osfmk/ppc/rtclock.c +++ b/osfmk/ppc/rtclock.c @@ -34,8 +34,6 @@ * real-time clock. */ -#include - #include #include @@ -43,7 +41,10 @@ #include #include +#include + #include /* HZ */ +#include #include #include @@ -79,9 +80,6 @@ int calend_init(void); kern_return_t calend_gettime( mach_timespec_t *cur_time); -kern_return_t calend_settime( - mach_timespec_t *cur_time); - kern_return_t calend_getattr( clock_flavor_t flavor, clock_attr_t attr, @@ -89,73 +87,81 @@ kern_return_t calend_getattr( struct clock_ops calend_ops = { calend_config, calend_init, - calend_gettime, calend_settime, + calend_gettime, 0, calend_getattr, 0, 0, }; /* local data declarations */ -static struct rtclock { - mach_timespec_t calend_offset; - boolean_t calend_is_set; +static struct rtclock_calend { + uint32_t epoch; + uint32_t microepoch; - mach_timebase_info_data_t timebase_const; + uint64_t epoch1; - struct rtclock_timer { - uint64_t deadline; - boolean_t is_set; - } timer[NCPUS]; + int64_t adjtotal; + int32_t adjdelta; +} rtclock_calend; - clock_timer_func_t timer_expire; +static boolean_t rtclock_initialized; - timer_call_data_t alarm_timer; +static uint64_t rtclock_tick_deadline[NCPUS]; - /* debugging */ - uint64_t last_abstime[NCPUS]; - int last_decr[NCPUS]; +#define NSEC_PER_HZ (NSEC_PER_SEC / HZ) +static uint32_t rtclock_tick_interval; - decl_simple_lock_data(,lock) /* real-time clock device lock */ -} rtclock; +static uint32_t rtclock_sec_divisor; -static boolean_t rtclock_initialized; +static mach_timebase_info_data_t rtclock_timebase_const; -static uint64_t rtclock_tick_deadline[NCPUS]; -static uint64_t rtclock_tick_interval; +static boolean_t rtclock_timebase_initialized; + +static struct rtclock_timer { + uint64_t deadline; + uint32_t + /*boolean_t*/ is_set:1, + has_expired:1, + :0; +} rtclock_timer[NCPUS]; + +static clock_timer_func_t rtclock_timer_expire; + +static timer_call_data_t rtclock_alarm_timer; static void timespec_to_absolutetime( - mach_timespec_t timespec, - uint64_t *result); + mach_timespec_t *ts, + uint64_t *result); static int deadline_to_decrementer( - uint64_t deadline, - uint64_t now); + uint64_t deadline, + uint64_t now); -static void rtclock_alarm_timer( +static void rtclock_alarm_expire( timer_call_param_t p0, timer_call_param_t p1); /* global data declarations */ -#define RTC_TICKPERIOD (NSEC_PER_SEC / HZ) - #define DECREMENTER_MAX 0x7FFFFFFFUL #define DECREMENTER_MIN 0xAUL natural_t rtclock_decrementer_min; +decl_simple_lock_data(static,rtclock_lock) + /* * Macros to lock/unlock real-time clock device. */ #define LOCK_RTC(s) \ MACRO_BEGIN \ (s) = splclock(); \ - simple_lock(&rtclock.lock); \ + simple_lock(&rtclock_lock); \ MACRO_END #define UNLOCK_RTC(s) \ MACRO_BEGIN \ - simple_unlock(&rtclock.lock); \ + simple_unlock(&rtclock_lock); \ splx(s); \ MACRO_END @@ -163,28 +169,39 @@ static void timebase_callback( struct timebase_freq_t *freq) { - natural_t numer, denom; - int n; + uint32_t numer, denom; + uint64_t abstime; spl_t s; - denom = freq->timebase_num; - n = 9; - while (!(denom % 10)) { - if (n < 1) - break; - denom /= 10; - n--; - } + if ( freq->timebase_den < 1 || freq->timebase_den > 4 || + freq->timebase_num < freq->timebase_den ) + panic("rtclock timebase_callback: invalid constant %d / %d", + freq->timebase_num, freq->timebase_den); - numer = freq->timebase_den; - while (n-- > 0) { - numer *= 10; - } + denom = freq->timebase_num; + numer = freq->timebase_den * NSEC_PER_SEC; LOCK_RTC(s); - rtclock.timebase_const.numer = numer; - rtclock.timebase_const.denom = denom; + if (!rtclock_timebase_initialized) { + commpage_set_timestamp(0,0,0,0); + + rtclock_timebase_const.numer = numer; + rtclock_timebase_const.denom = denom; + rtclock_sec_divisor = freq->timebase_num / freq->timebase_den; + + nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime); + rtclock_tick_interval = abstime; + } + else { + UNLOCK_RTC(s); + printf("rtclock timebase_callback: late old %d / %d new %d / %d", + rtclock_timebase_const.numer, rtclock_timebase_const.denom, + numer, denom); + return; + } UNLOCK_RTC(s); + + clock_timebase_init(); } /* @@ -196,9 +213,9 @@ sysclk_config(void) if (cpu_number() != master_cpu) return(1); - timer_call_setup(&rtclock.alarm_timer, rtclock_alarm_timer, NULL); + timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL); - simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK); + simple_lock_init(&rtclock_lock, ETAP_MISC_RT_CLOCK); PE_register_timebase_callback(timebase_callback); @@ -219,281 +236,71 @@ sysclk_init(void) panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu); } /* Set decrementer and hence our next tick due */ - clock_get_uptime(&abstime); + abstime = mach_absolute_time(); rtclock_tick_deadline[mycpu] = abstime; rtclock_tick_deadline[mycpu] += rtclock_tick_interval; decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); mtdec(decr); - rtclock.last_decr[mycpu] = decr; return(1); } - /* - * Initialize non-zero clock structure values. - */ - clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1, - &rtclock_tick_interval); /* Set decrementer and our next tick due */ - clock_get_uptime(&abstime); + abstime = mach_absolute_time(); rtclock_tick_deadline[mycpu] = abstime; rtclock_tick_deadline[mycpu] += rtclock_tick_interval; decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); mtdec(decr); - rtclock.last_decr[mycpu] = decr; rtclock_initialized = TRUE; return (1); } -#define UnsignedWide_to_scalar(x) (*(uint64_t *)(x)) -#define scalar_to_UnsignedWide(x) (*(UnsignedWide *)(x)) - -/* - * Perform a full 64 bit by 32 bit unsigned multiply, - * yielding a 96 bit product. The most significant - * portion of the product is returned as a 64 bit - * quantity, with the lower portion as a 32 bit word. - */ -static void -umul_64by32( - UnsignedWide now64, - uint32_t mult32, - UnsignedWide *result64, - uint32_t *result32) -{ - uint32_t mid, mid2; - - asm volatile(" mullw %0,%1,%2" : - "=r" (*result32) : - "r" (now64.lo), "r" (mult32)); - - asm volatile(" mullw %0,%1,%2" : - "=r" (mid2) : - "r" (now64.hi), "r" (mult32)); - asm volatile(" mulhwu %0,%1,%2" : - "=r" (mid) : - "r" (now64.lo), "r" (mult32)); - - asm volatile(" mulhwu %0,%1,%2" : - "=r" (result64->hi) : - "r" (now64.hi), "r" (mult32)); - - asm volatile(" addc %0,%2,%3; - addze %1,%4" : - "=r" (result64->lo), "=r" (result64->hi) : - "r" (mid), "r" (mid2), "1" (result64->hi)); -} - -/* - * Perform a partial 64 bit by 32 bit unsigned multiply, - * yielding a 64 bit product. Only the least significant - * 64 bits of the product are calculated and returned. - */ -static void -umul_64by32to64( - UnsignedWide now64, - uint32_t mult32, - UnsignedWide *result64) -{ - uint32_t mid, mid2; - - asm volatile(" mullw %0,%1,%2" : - "=r" (result64->lo) : - "r" (now64.lo), "r" (mult32)); - - asm volatile(" mullw %0,%1,%2" : - "=r" (mid2) : - "r" (now64.hi), "r" (mult32)); - asm volatile(" mulhwu %0,%1,%2" : - "=r" (mid) : - "r" (now64.lo), "r" (mult32)); - - asm volatile(" add %0,%1,%2" : - "=r" (result64->hi) : - "r" (mid), "r" (mid2)); -} - -/* - * Perform an unsigned division of a 96 bit value - * by a 32 bit value, yielding a 96 bit quotient. - * The most significant portion of the product is - * returned as a 64 bit quantity, with the lower - * portion as a 32 bit word. - */ -static void -udiv_96by32( - UnsignedWide now64, - uint32_t now32, - uint32_t div32, - UnsignedWide *result64, - uint32_t *result32) -{ - UnsignedWide t64; - - if (now64.hi > 0 || now64.lo >= div32) { - UnsignedWide_to_scalar(result64) = - UnsignedWide_to_scalar(&now64) / div32; - - umul_64by32to64(*result64, div32, &t64); - - UnsignedWide_to_scalar(&t64) = - UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64); - - *result32 = (((uint64_t)t64.lo << 32) | now32) / div32; - } - else { - UnsignedWide_to_scalar(result64) = - (((uint64_t)now64.lo << 32) | now32) / div32; - - *result32 = result64->lo; - result64->lo = result64->hi; - result64->hi = 0; - } -} - -/* - * Perform an unsigned division of a 96 bit value - * by a 32 bit value, yielding a 64 bit quotient. - * Any higher order bits of the quotient are simply - * discarded. - */ -static void -udiv_96by32to64( - UnsignedWide now64, - uint32_t now32, - uint32_t div32, - UnsignedWide *result64) -{ - UnsignedWide t64; - - if (now64.hi > 0 || now64.lo >= div32) { - UnsignedWide_to_scalar(result64) = - UnsignedWide_to_scalar(&now64) / div32; - - umul_64by32to64(*result64, div32, &t64); - - UnsignedWide_to_scalar(&t64) = - UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64); - - result64->hi = result64->lo; - result64->lo = (((uint64_t)t64.lo << 32) | now32) / div32; - } - else { - UnsignedWide_to_scalar(result64) = - (((uint64_t)now64.lo << 32) | now32) / div32; - } -} - -/* - * Perform an unsigned division of a 96 bit value - * by a 32 bit value, yielding a 32 bit quotient, - * and a 32 bit remainder. Any higher order bits - * of the quotient are simply discarded. - */ -static void -udiv_96by32to32and32( - UnsignedWide now64, - uint32_t now32, - uint32_t div32, - uint32_t *result32, - uint32_t *remain32) -{ - UnsignedWide t64, u64; - - if (now64.hi > 0 || now64.lo >= div32) { - UnsignedWide_to_scalar(&t64) = - UnsignedWide_to_scalar(&now64) / div32; - - umul_64by32to64(t64, div32, &t64); - - UnsignedWide_to_scalar(&t64) = - UnsignedWide_to_scalar(&now64) - UnsignedWide_to_scalar(&t64); - - UnsignedWide_to_scalar(&t64) = ((uint64_t)t64.lo << 32) | now32; - - UnsignedWide_to_scalar(&u64) = - UnsignedWide_to_scalar(&t64) / div32; - - *result32 = u64.lo; - - umul_64by32to64(u64, div32, &u64); - - *remain32 = UnsignedWide_to_scalar(&t64) - - UnsignedWide_to_scalar(&u64); - } - else { - UnsignedWide_to_scalar(&t64) = ((uint64_t)now64.lo << 32) | now32; - - UnsignedWide_to_scalar(&u64) = - UnsignedWide_to_scalar(&t64) / div32; - - *result32 = u64.lo; - - umul_64by32to64(u64, div32, &u64); - - *remain32 = UnsignedWide_to_scalar(&t64) - - UnsignedWide_to_scalar(&u64); - } -} - -/* - * Get the clock device time. This routine is responsible - * for converting the device's machine dependent time value - * into a canonical mach_timespec_t value. - * - * SMP configurations - *the processor clocks are synchronised* - */ kern_return_t -sysclk_gettime_internal( - mach_timespec_t *time) /* OUT */ +sysclk_gettime( + mach_timespec_t *time) /* OUT */ { - UnsignedWide now; - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - - clock_get_uptime((uint64_t *)&now); - - umul_64by32(now, numer, &t64, &t32); + uint64_t now, t64; + uint32_t divisor; - udiv_96by32(t64, t32, denom, &t64, &t32); + now = mach_absolute_time(); - udiv_96by32to32and32(t64, t32, NSEC_PER_SEC, - &time->tv_sec, &time->tv_nsec); + time->tv_sec = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + time->tv_nsec = (now * NSEC_PER_SEC) / divisor; return (KERN_SUCCESS); } -kern_return_t -sysclk_gettime( - mach_timespec_t *time) /* OUT */ +void +clock_get_system_microtime( + uint32_t *secs, + uint32_t *microsecs) { - UnsignedWide now; - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; + uint64_t now, t64; + uint32_t divisor; - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); + now = mach_absolute_time(); - clock_get_uptime((uint64_t *)&now); - - umul_64by32(now, numer, &t64, &t32); + *secs = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + *microsecs = (now * USEC_PER_SEC) / divisor; +} - udiv_96by32(t64, t32, denom, &t64, &t32); +void +clock_get_system_nanotime( + uint32_t *secs, + uint32_t *nanosecs) +{ + uint64_t now, t64; + uint32_t divisor; - udiv_96by32to32and32(t64, t32, NSEC_PER_SEC, - &time->tv_sec, &time->tv_nsec); + now = mach_absolute_time(); - return (KERN_SUCCESS); + *secs = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + *nanosecs = (now * NSEC_PER_SEC) / divisor; } /* @@ -501,14 +308,15 @@ sysclk_gettime( */ kern_return_t sysclk_getattr( - clock_flavor_t flavor, - clock_attr_t attr, /* OUT */ + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ mach_msg_type_number_t *count) /* IN/OUT */ { - spl_t s; + spl_t s; if (*count != 1) return (KERN_FAILURE); + switch (flavor) { case CLOCK_GET_TIME_RES: /* >0 res */ @@ -516,13 +324,14 @@ sysclk_getattr( case CLOCK_ALARM_MINRES: case CLOCK_ALARM_MAXRES: LOCK_RTC(s); - *(clock_res_t *) attr = RTC_TICKPERIOD; + *(clock_res_t *) attr = NSEC_PER_HZ; UNLOCK_RTC(s); break; default: return (KERN_INVALID_VALUE); } + return (KERN_SUCCESS); } @@ -534,10 +343,10 @@ void sysclk_setalarm( mach_timespec_t *deadline) { - uint64_t abstime; + uint64_t abstime; - timespec_to_absolutetime(*deadline, &abstime); - timer_call_enter(&rtclock.alarm_timer, abstime); + timespec_to_absolutetime(deadline, &abstime); + timer_call_enter(&rtclock_alarm_timer, abstime); } /* @@ -566,41 +375,10 @@ calend_init(void) */ kern_return_t calend_gettime( - mach_timespec_t *curr_time) /* OUT */ + mach_timespec_t *time) /* OUT */ { - spl_t s; - - LOCK_RTC(s); - if (!rtclock.calend_is_set) { - UNLOCK_RTC(s); - return (KERN_FAILURE); - } - - (void) sysclk_gettime_internal(curr_time); - ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset); - UNLOCK_RTC(s); - - return (KERN_SUCCESS); -} - -/* - * Set the current clock time. - */ -kern_return_t -calend_settime( - mach_timespec_t *new_time) -{ - mach_timespec_t curr_time; - spl_t s; - - LOCK_RTC(s); - (void) sysclk_gettime_internal(&curr_time); - rtclock.calend_offset = *new_time; - SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); - rtclock.calend_is_set = TRUE; - UNLOCK_RTC(s); - - PESetGMTTimeOfDay(new_time->tv_sec); + clock_get_calendar_nanotime( + &time->tv_sec, &time->tv_nsec); return (KERN_SUCCESS); } @@ -610,19 +388,20 @@ calend_settime( */ kern_return_t calend_getattr( - clock_flavor_t flavor, - clock_attr_t attr, /* OUT */ + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ mach_msg_type_number_t *count) /* IN/OUT */ { - spl_t s; + spl_t s; if (*count != 1) return (KERN_FAILURE); + switch (flavor) { case CLOCK_GET_TIME_RES: /* >0 res */ LOCK_RTC(s); - *(clock_res_t *) attr = RTC_TICKPERIOD; + *(clock_res_t *) attr = NSEC_PER_HZ; UNLOCK_RTC(s); break; @@ -635,62 +414,456 @@ calend_getattr( default: return (KERN_INVALID_VALUE); } + return (KERN_SUCCESS); } void -clock_adjust_calendar( - clock_res_t nsec) +clock_get_calendar_microtime( + uint32_t *secs, + uint32_t *microsecs) { - spl_t s; + uint32_t epoch, microepoch; + uint64_t now, t64; + spl_t s = splclock(); + + simple_lock(&rtclock_lock); + + if (rtclock_calend.adjdelta >= 0) { + uint32_t divisor; + + now = mach_absolute_time(); + + epoch = rtclock_calend.epoch; + microepoch = rtclock_calend.microepoch; + + simple_unlock(&rtclock_lock); + + *secs = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + *microsecs = (now * USEC_PER_SEC) / divisor; + + if ((*microsecs += microepoch) >= USEC_PER_SEC) { + *microsecs -= USEC_PER_SEC; + epoch += 1; + } + + *secs += epoch; + } + else { + uint32_t delta, t32; + + delta = -rtclock_calend.adjdelta; + + t64 = mach_absolute_time() - rtclock_calend.epoch1; + + *secs = rtclock_calend.epoch; + *microsecs = rtclock_calend.microepoch; + + simple_unlock(&rtclock_lock); + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + *microsecs += (t32 - delta); + + if (*microsecs >= USEC_PER_SEC) { + *microsecs -= USEC_PER_SEC; + *secs += 1; + } + } + + splx(s); +} + +/* This is only called from the gettimeofday() syscall. As a side + * effect, it updates the commpage timestamp. Otherwise it is + * identical to clock_get_calendar_microtime(). Because most + * gettimeofday() calls are handled by the commpage in user mode, + * this routine should be infrequently used except when slowing down + * the clock. + */ +void +clock_gettimeofday( + uint32_t *secs_p, + uint32_t *microsecs_p) +{ + uint32_t epoch, microepoch; + uint32_t secs, microsecs; + uint64_t now, t64, secs_64, usec_64; + spl_t s = splclock(); + + simple_lock(&rtclock_lock); + + if (rtclock_calend.adjdelta >= 0) { + now = mach_absolute_time(); + + epoch = rtclock_calend.epoch; + microepoch = rtclock_calend.microepoch; + + secs = secs_64 = now / rtclock_sec_divisor; + t64 = now - (secs_64 * rtclock_sec_divisor); + microsecs = usec_64 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if ((microsecs += microepoch) >= USEC_PER_SEC) { + microsecs -= USEC_PER_SEC; + epoch += 1; + } + + secs += epoch; + + /* adjust "now" to be absolute time at _start_ of usecond */ + now -= t64 - ((usec_64 * rtclock_sec_divisor) / USEC_PER_SEC); + + commpage_set_timestamp(now,secs,microsecs,rtclock_sec_divisor); + } + else { + uint32_t delta, t32; + + delta = -rtclock_calend.adjdelta; + + now = mach_absolute_time() - rtclock_calend.epoch1; + + secs = rtclock_calend.epoch; + microsecs = rtclock_calend.microepoch; + + t32 = (now * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + microsecs += (t32 - delta); + + if (microsecs >= USEC_PER_SEC) { + microsecs -= USEC_PER_SEC; + secs += 1; + } + /* no need to disable timestamp, it is already off */ + } + + simple_unlock(&rtclock_lock); + splx(s); + + *secs_p = secs; + *microsecs_p = microsecs; +} + +void +clock_get_calendar_nanotime( + uint32_t *secs, + uint32_t *nanosecs) +{ + uint32_t epoch, nanoepoch; + uint64_t now, t64; + spl_t s = splclock(); + + simple_lock(&rtclock_lock); + + if (rtclock_calend.adjdelta >= 0) { + uint32_t divisor; + + now = mach_absolute_time(); + + epoch = rtclock_calend.epoch; + nanoepoch = rtclock_calend.microepoch * NSEC_PER_USEC; + + simple_unlock(&rtclock_lock); + + *secs = t64 = now / (divisor = rtclock_sec_divisor); + now -= (t64 * divisor); + *nanosecs = ((now * USEC_PER_SEC) / divisor) * NSEC_PER_USEC; + + if ((*nanosecs += nanoepoch) >= NSEC_PER_SEC) { + *nanosecs -= NSEC_PER_SEC; + epoch += 1; + } + + *secs += epoch; + } + else { + uint32_t delta, t32; + + delta = -rtclock_calend.adjdelta; + + t64 = mach_absolute_time() - rtclock_calend.epoch1; + + *secs = rtclock_calend.epoch; + *nanosecs = rtclock_calend.microepoch * NSEC_PER_USEC; + + simple_unlock(&rtclock_lock); + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + *nanosecs += ((t32 - delta) * NSEC_PER_USEC); + + if (*nanosecs >= NSEC_PER_SEC) { + *nanosecs -= NSEC_PER_SEC; + *secs += 1; + } + } + + splx(s); +} + +void +clock_set_calendar_microtime( + uint32_t secs, + uint32_t microsecs) +{ + uint32_t sys, microsys; + uint32_t newsecs; + spl_t s; + + newsecs = (microsecs < 500*USEC_PER_SEC)? + secs: secs + 1; LOCK_RTC(s); - if (rtclock.calend_is_set) - ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec); + commpage_set_timestamp(0,0,0,0); + + clock_get_system_microtime(&sys, µsys); + if ((int32_t)(microsecs -= microsys) < 0) { + microsecs += USEC_PER_SEC; + secs -= 1; + } + + secs -= sys; + + rtclock_calend.epoch = secs; + rtclock_calend.microepoch = microsecs; + rtclock_calend.epoch1 = 0; + rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0; UNLOCK_RTC(s); + + PESetGMTTimeOfDay(newsecs); + + host_notify_calendar_change(); } -void -clock_initialize_calendar(void) +#define tickadj (40) /* "standard" skew, us / tick */ +#define bigadj (USEC_PER_SEC) /* use 10x skew above bigadj us */ + +uint32_t +clock_set_calendar_adjtime( + int32_t *secs, + int32_t *microsecs) +{ + int64_t total, ototal; + uint32_t interval = 0; + spl_t s; + + total = (int64_t)*secs * USEC_PER_SEC + *microsecs; + + LOCK_RTC(s); + commpage_set_timestamp(0,0,0,0); + + ototal = rtclock_calend.adjtotal; + + if (rtclock_calend.adjdelta < 0) { + uint64_t now, t64; + uint32_t delta, t32; + uint32_t sys, microsys; + + delta = -rtclock_calend.adjdelta; + + sys = rtclock_calend.epoch; + microsys = rtclock_calend.microepoch; + + now = mach_absolute_time(); + + t64 = now - rtclock_calend.epoch1; + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + if (t32 > delta) + microsys += (t32 - delta); + + if (microsys >= USEC_PER_SEC) { + microsys -= USEC_PER_SEC; + sys += 1; + } + + rtclock_calend.epoch = sys; + rtclock_calend.microepoch = microsys; + + sys = t64 = now / rtclock_sec_divisor; + now -= (t64 * rtclock_sec_divisor); + microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor; + + if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) { + rtclock_calend.microepoch += USEC_PER_SEC; + sys += 1; + } + + rtclock_calend.epoch -= sys; + } + + if (total != 0) { + int32_t delta = tickadj; + + if (total > 0) { + if (total > bigadj) + delta *= 10; + if (delta > total) + delta = total; + + rtclock_calend.epoch1 = 0; + } + else { + uint64_t now, t64; + uint32_t sys, microsys; + + if (total < -bigadj) + delta *= 10; + delta = -delta; + if (delta < total) + delta = total; + + rtclock_calend.epoch1 = now = mach_absolute_time(); + + sys = t64 = now / rtclock_sec_divisor; + now -= (t64 * rtclock_sec_divisor); + microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor; + + if ((rtclock_calend.microepoch += microsys) >= USEC_PER_SEC) { + rtclock_calend.microepoch -= USEC_PER_SEC; + sys += 1; + } + + rtclock_calend.epoch += sys; + } + + rtclock_calend.adjtotal = total; + rtclock_calend.adjdelta = delta; + + interval = rtclock_tick_interval; + } + else { + rtclock_calend.epoch1 = 0; + rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0; + } + + UNLOCK_RTC(s); + + if (ototal == 0) + *secs = *microsecs = 0; + else { + *secs = ototal / USEC_PER_SEC; + *microsecs = ototal % USEC_PER_SEC; + } + + return (interval); +} + +uint32_t +clock_adjust_calendar(void) { - mach_timespec_t curr_time; - long seconds = PEGetGMTTimeOfDay(); - spl_t s; + uint32_t micronew, interval = 0; + int32_t delta; + spl_t s; LOCK_RTC(s); - (void) sysclk_gettime_internal(&curr_time); - if (curr_time.tv_nsec < 500*USEC_PER_SEC) - rtclock.calend_offset.tv_sec = seconds; + commpage_set_timestamp(0,0,0,0); + + delta = rtclock_calend.adjdelta; + + if (delta > 0) { + micronew = rtclock_calend.microepoch + delta; + if (micronew >= USEC_PER_SEC) { + micronew -= USEC_PER_SEC; + rtclock_calend.epoch += 1; + } + + rtclock_calend.microepoch = micronew; + + rtclock_calend.adjtotal -= delta; + if (delta > rtclock_calend.adjtotal) + rtclock_calend.adjdelta = rtclock_calend.adjtotal; + } else - rtclock.calend_offset.tv_sec = seconds + 1; - rtclock.calend_offset.tv_nsec = 0; - SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); - rtclock.calend_is_set = TRUE; + if (delta < 0) { + uint64_t now, t64; + uint32_t t32; + + now = mach_absolute_time(); + + t64 = now - rtclock_calend.epoch1; + + rtclock_calend.epoch1 = now; + + t32 = (t64 * USEC_PER_SEC) / rtclock_sec_divisor; + + micronew = rtclock_calend.microepoch + t32 + delta; + if (micronew >= USEC_PER_SEC) { + micronew -= USEC_PER_SEC; + rtclock_calend.epoch += 1; + } + + rtclock_calend.microepoch = micronew; + + rtclock_calend.adjtotal -= delta; + if (delta < rtclock_calend.adjtotal) + rtclock_calend.adjdelta = rtclock_calend.adjtotal; + + if (rtclock_calend.adjdelta == 0) { + uint32_t sys, microsys; + + sys = t64 = now / rtclock_sec_divisor; + now -= (t64 * rtclock_sec_divisor); + microsys = (now * USEC_PER_SEC) / rtclock_sec_divisor; + + if ((int32_t)(rtclock_calend.microepoch -= microsys) < 0) { + rtclock_calend.microepoch += USEC_PER_SEC; + sys += 1; + } + + rtclock_calend.epoch -= sys; + + rtclock_calend.epoch1 = 0; + } + } + + if (rtclock_calend.adjdelta != 0) + interval = rtclock_tick_interval; + UNLOCK_RTC(s); + + return (interval); } -mach_timespec_t -clock_get_calendar_offset(void) +void +clock_initialize_calendar(void) { - mach_timespec_t result = MACH_TIMESPEC_ZERO; - spl_t s; + uint32_t sys, microsys; + uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay(); + spl_t s; LOCK_RTC(s); - if (rtclock.calend_is_set) - result = rtclock.calend_offset; + commpage_set_timestamp(0,0,0,0); + + clock_get_system_microtime(&sys, µsys); + if ((int32_t)(microsecs -= microsys) < 0) { + microsecs += USEC_PER_SEC; + secs -= 1; + } + + secs -= sys; + + rtclock_calend.epoch = secs; + rtclock_calend.microepoch = microsecs; + rtclock_calend.epoch1 = 0; + rtclock_calend.adjdelta = rtclock_calend.adjtotal = 0; UNLOCK_RTC(s); - return (result); + host_notify_calendar_change(); } void clock_timebase_info( mach_timebase_info_t info) { - spl_t s; + spl_t s; LOCK_RTC(s); - *info = rtclock.timebase_const; + rtclock_timebase_initialized = TRUE; + *info = rtclock_timebase_const; UNLOCK_RTC(s); } @@ -705,22 +878,22 @@ clock_set_timer_deadline( s = splclock(); mycpu = cpu_number(); - mytimer = &rtclock.timer[mycpu]; - clock_get_uptime(&abstime); - rtclock.last_abstime[mycpu] = abstime; + mytimer = &rtclock_timer[mycpu]; mytimer->deadline = deadline; mytimer->is_set = TRUE; - if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) { - decr = deadline_to_decrementer(mytimer->deadline, abstime); - if ( rtclock_decrementer_min != 0 && - rtclock_decrementer_min < (natural_t)decr ) - decr = rtclock_decrementer_min; - - mtdec(decr); - rtclock.last_decr[mycpu] = decr; - - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) - | DBG_FUNC_NONE, decr, 2, 0, 0, 0); + if (!mytimer->has_expired) { + abstime = mach_absolute_time(); + if ( mytimer->deadline < rtclock_tick_deadline[mycpu] ) { + decr = deadline_to_decrementer(mytimer->deadline, abstime); + if ( rtclock_decrementer_min != 0 && + rtclock_decrementer_min < (natural_t)decr ) + decr = rtclock_decrementer_min; + + mtdec(decr); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) + | DBG_FUNC_NONE, decr, 2, 0, 0, 0); + } } splx(s); } @@ -732,8 +905,8 @@ clock_set_timer_func( spl_t s; LOCK_RTC(s); - if (rtclock.timer_expire == NULL) - rtclock.timer_expire = func; + if (rtclock_timer_expire == NULL) + rtclock_timer_expire = func; UNLOCK_RTC(s); } @@ -757,8 +930,8 @@ rtclock_intr( spl_t old_spl) { uint64_t abstime; - int decr[3], mycpu = cpu_number(); - struct rtclock_timer *mytimer = &rtclock.timer[mycpu]; + int decr1, decr2, mycpu = cpu_number(); + struct rtclock_timer *mytimer = &rtclock_timer[mycpu]; /* * We may receive interrupts too early, we must reject them. @@ -768,47 +941,44 @@ rtclock_intr( return; } - decr[1] = decr[2] = DECREMENTER_MAX; + decr1 = decr2 = DECREMENTER_MAX; - clock_get_uptime(&abstime); - rtclock.last_abstime[mycpu] = abstime; + abstime = mach_absolute_time(); if ( rtclock_tick_deadline[mycpu] <= abstime ) { clock_deadline_for_periodic_event(rtclock_tick_interval, abstime, &rtclock_tick_deadline[mycpu]); hertz_tick(USER_MODE(ssp->save_srr1), ssp->save_srr0); } - clock_get_uptime(&abstime); - rtclock.last_abstime[mycpu] = abstime; + abstime = mach_absolute_time(); if ( mytimer->is_set && mytimer->deadline <= abstime ) { - mytimer->is_set = FALSE; - (*rtclock.timer_expire)(abstime); + mytimer->has_expired = TRUE; mytimer->is_set = FALSE; + (*rtclock_timer_expire)(abstime); + mytimer->has_expired = FALSE; } - clock_get_uptime(&abstime); - rtclock.last_abstime[mycpu] = abstime; - decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); + abstime = mach_absolute_time(); + decr1 = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); if (mytimer->is_set) - decr[2] = deadline_to_decrementer(mytimer->deadline, abstime); + decr2 = deadline_to_decrementer(mytimer->deadline, abstime); - if (decr[1] > decr[2]) - decr[1] = decr[2]; + if (decr1 > decr2) + decr1 = decr2; if ( rtclock_decrementer_min != 0 && - rtclock_decrementer_min < (natural_t)decr[1] ) - decr[1] = rtclock_decrementer_min; + rtclock_decrementer_min < (natural_t)decr1 ) + decr1 = rtclock_decrementer_min; - mtdec(decr[1]); - rtclock.last_decr[mycpu] = decr[1]; + mtdec(decr1); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) - | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0); + | DBG_FUNC_NONE, decr1, 3, 0, 0, 0); } static void -rtclock_alarm_timer( +rtclock_alarm_expire( timer_call_param_t p0, timer_call_param_t p1) { @@ -819,29 +989,12 @@ rtclock_alarm_timer( clock_alarm_intr(SYSTEM_CLOCK, ×tamp); } -void -clock_get_uptime( - uint64_t *result0) -{ - UnsignedWide *result = (UnsignedWide *)result0; - uint32_t hi, lo, hic; - - do { - asm volatile(" mftbu %0" : "=r" (hi)); - asm volatile(" mftb %0" : "=r" (lo)); - asm volatile(" mftbu %0" : "=r" (hic)); - } while (hic != hi); - - result->lo = lo; - result->hi = hi; -} - static int deadline_to_decrementer( - uint64_t deadline, - uint64_t now) + uint64_t deadline, + uint64_t now) { - uint64_t delt; + uint64_t delt; if (deadline <= now) return DECREMENTER_MIN; @@ -854,36 +1007,13 @@ deadline_to_decrementer( static void timespec_to_absolutetime( - mach_timespec_t timespec, - uint64_t *result0) + mach_timespec_t *ts, + uint64_t *result) { - UnsignedWide *result = (UnsignedWide *)result0; - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; - - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - asm volatile(" mullw %0,%1,%2" : - "=r" (t64.lo) : - "r" (timespec.tv_sec), "r" (NSEC_PER_SEC)); - - asm volatile(" mulhwu %0,%1,%2" : - "=r" (t64.hi) : - "r" (timespec.tv_sec), "r" (NSEC_PER_SEC)); + uint32_t divisor; - UnsignedWide_to_scalar(&t64) += timespec.tv_nsec; - - umul_64by32(t64, denom, &t64, &t32); - - udiv_96by32(t64, t32, numer, &t64, &t32); - - result->hi = t64.lo; - result->lo = t32; + *result = ((uint64_t)ts->tv_sec * (divisor = rtclock_sec_divisor)) + + ((uint64_t)ts->tv_nsec * divisor) / NSEC_PER_SEC; } void @@ -892,7 +1022,7 @@ clock_interval_to_deadline( uint32_t scale_factor, uint64_t *result) { - uint64_t abstime; + uint64_t abstime; clock_get_uptime(result); @@ -905,32 +1035,16 @@ void clock_interval_to_absolutetime_interval( uint32_t interval, uint32_t scale_factor, - uint64_t *result0) + uint64_t *result) { - UnsignedWide *result = (UnsignedWide *)result0; - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; - - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - asm volatile(" mullw %0,%1,%2" : - "=r" (t64.lo) : - "r" (interval), "r" (scale_factor)); - asm volatile(" mulhwu %0,%1,%2" : - "=r" (t64.hi) : - "r" (interval), "r" (scale_factor)); - - umul_64by32(t64, denom, &t64, &t32); - - udiv_96by32(t64, t32, numer, &t64, &t32); - - result->hi = t64.lo; - result->lo = t32; + uint64_t nanosecs = (uint64_t)interval * scale_factor; + uint64_t t64; + uint32_t divisor; + + *result = (t64 = nanosecs / NSEC_PER_SEC) * + (divisor = rtclock_sec_divisor); + nanosecs -= (t64 * NSEC_PER_SEC); + *result += (nanosecs * divisor) / NSEC_PER_SEC; } void @@ -948,43 +1062,26 @@ absolutetime_to_nanoseconds( uint64_t abstime, uint64_t *result) { - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; + uint64_t t64; + uint32_t divisor; - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - UnsignedWide_to_scalar(&t64) = abstime; - - umul_64by32(t64, numer, &t64, &t32); - - udiv_96by32to64(t64, t32, denom, (void *)result); + *result = (t64 = abstime / (divisor = rtclock_sec_divisor)) * NSEC_PER_SEC; + abstime -= (t64 * divisor); + *result += (abstime * NSEC_PER_SEC) / divisor; } void nanoseconds_to_absolutetime( - uint64_t nanoseconds, + uint64_t nanosecs, uint64_t *result) { - UnsignedWide t64; - uint32_t t32; - uint32_t numer, denom; - spl_t s; - - LOCK_RTC(s); - numer = rtclock.timebase_const.numer; - denom = rtclock.timebase_const.denom; - UNLOCK_RTC(s); - - UnsignedWide_to_scalar(&t64) = nanoseconds; - - umul_64by32(t64, denom, &t64, &t32); + uint64_t t64; + uint32_t divisor; - udiv_96by32to64(t64, t32, numer, (void *)result); + *result = (t64 = nanosecs / NSEC_PER_SEC) * + (divisor = rtclock_sec_divisor); + nanosecs -= (t64 * NSEC_PER_SEC); + *result += (nanosecs * divisor) / NSEC_PER_SEC; } /* @@ -1000,7 +1097,7 @@ delay_for_interval( clock_interval_to_deadline(interval, scale_factor, &end); do { - clock_get_uptime(&now); + now = mach_absolute_time(); } while (now < end); } @@ -1011,7 +1108,7 @@ clock_delay_until( uint64_t now; do { - clock_get_uptime(&now); + now = mach_absolute_time(); } while (now < deadline); } diff --git a/osfmk/ppc/savearea.c b/osfmk/ppc/savearea.c index f5ce179d6..c82affcef 100644 --- a/osfmk/ppc/savearea.c +++ b/osfmk/ppc/savearea.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -126,25 +125,26 @@ unsigned int backchain = 0; /* Debug flag */ * Allocate our initial context save areas. As soon as we do this, * we can take an interrupt. We do the saveareas here, 'cause they're guaranteed * to be at least page aligned. + * + * Note: these initial saveareas are all to be allocated from V=R, less than 4GB + * space. */ -void savearea_init(vm_offset_t *addrx) { +void savearea_init(vm_offset_t addr) { - savearea_comm *savec, *savec2, *saveprev; - vm_offset_t save, save2, addr; + savearea_comm *savec; + vm_offset_t save; int i; saveanchor.savetarget = InitialSaveTarget; /* Initial target value */ saveanchor.saveinuse = 0; /* Number of areas in use */ - saveanchor.savefree = 0; /* Remember the start of the free chain */ + saveanchor.savefree = 0; /* Remember the start of the free chain */ saveanchor.savefreecnt = 0; /* Remember the length */ - saveanchor.savepoolfwd = (unsigned int *)&saveanchor; /* Remember pool forward */ - saveanchor.savepoolbwd = (unsigned int *)&saveanchor; /* Remember pool backward */ - - addr = *addrx; /* Make this easier for ourselves */ + saveanchor.savepoolfwd = (addr64_t)&saveanchor; /* Remember pool forward */ + saveanchor.savepoolbwd = (addr64_t)&saveanchor; /* Remember pool backward */ save = addr; /* Point to the whole block of blocks */ @@ -153,7 +153,7 @@ void savearea_init(vm_offset_t *addrx) { */ - for(i=0; i < 8; i++) { /* Initialize the back pocket saveareas */ + for(i=0; i < BackPocketSaveBloks; i++) { /* Initialize the back pocket saveareas */ savec = (savearea_comm *)save; /* Get the control area for this one */ @@ -161,7 +161,7 @@ void savearea_init(vm_offset_t *addrx) { savec->sac_vrswap = 0; /* V=R, so the translation factor is 0 */ savec->sac_flags = sac_perm; /* Mark it permanent */ savec->sac_flags |= 0x0000EE00; /* Debug eyecatcher */ - save_queue((savearea *)savec); /* Add page to savearea lists */ + save_queue((uint32_t)savec >> 12); /* Add page to savearea lists */ save += PAGE_SIZE; /* Jump up to the next one now */ } @@ -178,8 +178,8 @@ void savearea_init(vm_offset_t *addrx) { saveanchor.savefree = 0; /* Remember the start of the free chain */ saveanchor.savefreecnt = 0; /* Remember the length */ saveanchor.saveadjust = 0; /* Set none needed yet */ - saveanchor.savepoolfwd = (unsigned int *)&saveanchor; /* Remember pool forward */ - saveanchor.savepoolbwd = (unsigned int *)&saveanchor; /* Remember pool backward */ + saveanchor.savepoolfwd = (addr64_t)&saveanchor; /* Remember pool forward */ + saveanchor.savepoolbwd = (addr64_t)&saveanchor; /* Remember pool backward */ for(i=0; i < InitialSaveBloks; i++) { /* Initialize the saveareas */ @@ -189,40 +189,37 @@ void savearea_init(vm_offset_t *addrx) { savec->sac_vrswap = 0; /* V=R, so the translation factor is 0 */ savec->sac_flags = sac_perm; /* Mark it permanent */ savec->sac_flags |= 0x0000EE00; /* Debug eyecatcher */ - save_queue((savearea *)savec); /* Add page to savearea lists */ + save_queue((uint32_t)savec >> 12); /* Add page to savearea lists */ save += PAGE_SIZE; /* Jump up to the next one now */ } - *addrx = save; /* Move the free storage lowwater mark */ - /* * We now have a free list that has our initial number of entries * The local qfret lists is empty. When we call save_get below it will see that * the local list is empty and fill it for us. * - * It is ok to call save_get_phys here because even though if we are translation on, we are still V=R and - * running with BAT registers so no interruptions. Regular interruptions will be off. Using save_get - * would be wrong if the tracing was enabled--it would cause an exception. + * It is ok to call save_get here because all initial saveareas are V=R in less + * than 4GB space, so 32-bit addressing is ok. + * */ - save2 = (vm_offset_t)save_get_phys(); /* This will populate the local list - and get the first one for the system */ - per_proc_info[0].next_savearea = (unsigned int)save2; /* Tell the exception handler about it */ - +/* + * This will populate the local list and get the first one for the system + */ + per_proc_info[0].next_savearea = (vm_offset_t)save_get(); + /* * The system is now able to take interruptions */ - return; - } /* - * Returns a savearea. If the free list needs size adjustment it happens here. + * Obtains a savearea. If the free list needs size adjustment it happens here. * Don't actually allocate the savearea until after the adjustment is done. */ @@ -270,15 +267,17 @@ void save_release(struct savearea *save) { /* Release a save area */ void save_adjust(void) { - savearea_comm *sctl, *sctlnext, *freepool, *freepage, *realpage; + savearea_comm *sctl, *sctlnext, *freepage; kern_return_t ret; + uint64_t vtopmask; + ppnum_t physpage; if(saveanchor.saveadjust < 0) { /* Do we need to adjust down? */ sctl = (savearea_comm *)save_trim_free(); /* Trim list to the need count, return start of trim list */ while(sctl) { /* Release the free pages back to the kernel */ - sctlnext = (savearea_comm *)sctl->save_prev; /* Get next in list */ + sctlnext = CAST_DOWN(savearea_comm *, sctl->save_prev); /* Get next in list */ kmem_free(kernel_map, (vm_offset_t) sctl, PAGE_SIZE); /* Release the page */ sctl = sctlnext; /* Chain onwards */ } @@ -294,15 +293,18 @@ void save_adjust(void) { panic("Whoops... Not a bit of wired memory left for saveareas\n"); } - realpage = (savearea_comm *)pmap_extract(kernel_pmap, (vm_offset_t)freepage); /* Get the physical */ + physpage = pmap_find_phys(kernel_pmap, (vm_offset_t)freepage); /* Find physical page */ + if(!physpage) { /* See if we actually have this mapped*/ + panic("save_adjust: wired page not mapped - va = %08X\n", freepage); /* Die */ + } bzero((void *)freepage, PAGE_SIZE); /* Clear it all to zeros */ freepage->sac_alloc = 0; /* Mark all entries taken */ - freepage->sac_vrswap = (unsigned int)freepage ^ (unsigned int)realpage; /* Form mask to convert V to R and vice versa */ + freepage->sac_vrswap = ((uint64_t)physpage << 12) ^ (uint64_t)((uintptr_t)freepage); /* XOR to calculate conversion mask */ freepage->sac_flags |= 0x0000EE00; /* Set debug eyecatcher */ - save_queue((savearea *)realpage); /* Add all saveareas on page to free list */ + save_queue(physpage); /* Add all saveareas on page to free list */ } } } diff --git a/osfmk/ppc/savearea.h b/osfmk/ppc/savearea.h index ff04a38df..6710b8eb8 100644 --- a/osfmk/ppc/savearea.h +++ b/osfmk/ppc/savearea.h @@ -32,7 +32,10 @@ #ifdef __APPLE_API_PRIVATE #ifdef MACH_KERNEL_PRIVATE -#include +#include +#include + +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct savearea_comm { /* @@ -45,32 +48,35 @@ typedef struct savearea_comm { */ -/* Keep the save_prev, sac_next, and sac_prev in these positions, some assemble code depends upon it to +/* Keep the save_prev, sac_next, and sac_prev in these positions, some assembler code depends upon it to * match up with fields in saveanchor. */ - struct savearea *save_prev; /* The address of the previous (or next) savearea */ - unsigned int *sac_next; /* Points to next savearea page that has a free slot - real */ - unsigned int *sac_prev; /* Points to previous savearea page that has a free slot - real */ - unsigned int save_flags; /* Various flags */ + /* offset 0x000 */ + addr64_t save_prev; /* The address of the previous (or next) savearea */ + addr64_t sac_next; /* Points to next savearea page that has a free slot - real */ + addr64_t sac_prev; /* Points to previous savearea page that has a free slot - real */ unsigned int save_level; /* Context ID */ + unsigned int save_01C; + + /* 0x20 */ unsigned int save_time[2]; /* Context save time - for debugging or performance */ struct thread_activation *save_act; /* Associated activation */ - -/* 0x20 */ - - unsigned int sac_vrswap; /* XOR mask to swap V to R or vice versa */ - unsigned int sac_alloc; /* Bitmap of allocated slots */ + unsigned int save_02c; + uint64_t sac_vrswap; /* XOR mask to swap V to R or vice versa */ + unsigned int save_flags; /* Various flags */ unsigned int sac_flags; /* Various flags */ - unsigned int save_misc0; /* Various stuff */ - unsigned int save_misc1; /* Various stuff */ - unsigned int save_misc2; /* Various stuff */ - unsigned int save_misc3; /* Various stuff */ - unsigned int save_misc4; /* Various stuff */ - - unsigned int save_040[8]; /* Fill 32 bytes */ + + /* offset 0x040 */ + uint64_t save_misc0; /* Various stuff */ + uint64_t save_misc1; /* Various stuff */ + unsigned int sac_alloc; /* Bitmap of allocated slots */ + unsigned int save_054; + unsigned int save_misc2; + unsigned int save_misc3; /* offset 0x0060 */ } savearea_comm; +#pragma pack() #endif #ifdef BSD_KERNEL_PRIVATE @@ -84,107 +90,91 @@ typedef struct savearea_comm { * This type of savearea contains all of the general context. */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct savearea { savearea_comm save_hdr; /* Stuff common to all saveareas */ unsigned int save_060[8]; /* Fill 32 bytes */ - /* offset 0x0080 */ - unsigned int save_r0; - unsigned int save_r1; - unsigned int save_r2; - unsigned int save_r3; - unsigned int save_r4; - unsigned int save_r5; - unsigned int save_r6; - unsigned int save_r7; - - /* offset 0x0A0 */ - unsigned int save_r8; - unsigned int save_r9; - unsigned int save_r10; - unsigned int save_r11; - unsigned int save_r12; - unsigned int save_r13; - unsigned int save_r14; - unsigned int save_r15; - - /* offset 0x0C0 */ - unsigned int save_r16; - unsigned int save_r17; - unsigned int save_r18; - unsigned int save_r19; - unsigned int save_r20; - unsigned int save_r21; - unsigned int save_r22; - unsigned int save_r23; - - /* offset 0x0E0 */ - unsigned int save_r24; - unsigned int save_r25; - unsigned int save_r26; - unsigned int save_r27; - unsigned int save_r28; - unsigned int save_r29; - unsigned int save_r30; - unsigned int save_r31; - - /* offset 0x100 */ - unsigned int save_srr0; - unsigned int save_srr1; + + /* offset 0x0080 */ + uint64_t save_r0; + uint64_t save_r1; + uint64_t save_r2; + uint64_t save_r3; + /* offset 0x0A0 */ + uint64_t save_r4; + uint64_t save_r5; + uint64_t save_r6; + uint64_t save_r7; + /* offset 0x0C0 */ + uint64_t save_r8; + uint64_t save_r9; + uint64_t save_r10; + uint64_t save_r11; + /* offset 0x0E0 */ + uint64_t save_r12; + uint64_t save_r13; + uint64_t save_r14; + uint64_t save_r15; + /* offset 0x100 */ + uint64_t save_r16; + uint64_t save_r17; + uint64_t save_r18; + uint64_t save_r19; + /* offset 0x120 */ + uint64_t save_r20; + uint64_t save_r21; + uint64_t save_r22; + uint64_t save_r23; + /* offset 0x140 */ + uint64_t save_r24; + uint64_t save_r25; + uint64_t save_r26; + uint64_t save_r27; + /* offset 0x160 */ + uint64_t save_r28; + uint64_t save_r29; + uint64_t save_r30; + uint64_t save_r31; + /* offset 0x180 */ + uint64_t save_srr0; + uint64_t save_srr1; + uint64_t save_xer; + uint64_t save_lr; + /* offset 0x1A0 */ + uint64_t save_ctr; + uint64_t save_dar; unsigned int save_cr; - unsigned int save_xer; - unsigned int save_lr; - unsigned int save_ctr; - unsigned int save_dar; unsigned int save_dsisr; - - - /* offset 0x120 */ - unsigned int save_vscr[4]; + unsigned int save_exception; + unsigned int save_vrsave; + /* offset 0x1C0 */ + unsigned int save_vscr[4]; unsigned int save_fpscrpad; unsigned int save_fpscr; - unsigned int save_exception; - unsigned int save_vrsave; - - /* offset 0x140 */ - unsigned int save_sr0; - unsigned int save_sr1; - unsigned int save_sr2; - unsigned int save_sr3; - unsigned int save_sr4; - unsigned int save_sr5; - unsigned int save_sr6; - unsigned int save_sr7; - - /* offset 0x160 */ - unsigned int save_sr8; - unsigned int save_sr9; - unsigned int save_sr10; - unsigned int save_sr11; - unsigned int save_sr12; - unsigned int save_sr13; - unsigned int save_sr14; - unsigned int save_sr15; - - /* offset 0x180 */ - unsigned int save_180[8]; - unsigned int save_1A0[8]; - unsigned int save_1C0[8]; + unsigned int save_1d8[2]; + /* offset 0x1E0 */ unsigned int save_1E0[8]; - unsigned int save_200[8]; - unsigned int save_220[8]; - unsigned int save_240[8]; - unsigned int save_260[8]; - + /* offset 0x200 - keep on 128 byte bndry */ + uint32_t save_pmc[8]; + uint64_t save_mmcr0; /* offset 0x220 */ + uint64_t save_mmcr1; + uint64_t save_mmcr2; + + unsigned int save_238[2]; + /* offset 0x240 */ + unsigned int save_instr[16]; /* Instrumentation */ /* offset 0x280 */ } savearea; +#pragma pack() /* * This type of savearea contains all of the floating point context. */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct savearea_fpu { savearea_comm save_hdr; /* Stuff common to all saveareas */ @@ -242,6 +232,7 @@ typedef struct savearea_fpu { /* offset 0x280 */ } savearea_fpu; +#pragma pack() @@ -249,6 +240,7 @@ typedef struct savearea_fpu { * This type of savearea contains all of the vector context. */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct savearea_vec { savearea_comm save_hdr; /* Stuff common to all saveareas */ @@ -292,10 +284,12 @@ typedef struct savearea_vec { /* offset 0x280 */ } savearea_vec; +#pragma pack() #endif /* MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE */ #ifdef MACH_KERNEL_PRIVATE +#pragma pack(4) /* Make sure the structure stays as we defined it */ struct Saveanchor { /* @@ -304,20 +298,24 @@ struct Saveanchor { * updated with translation and interrupts disabled. This is because it is * locked during exception processing and if we were to take a PTE miss while the * lock were held, well, that would be very bad now wouldn't it? + * Note that the first 24 bytes must be the same format as a savearea header. */ - unsigned int savelock; /* Lock word for savearea free list manipulation */ - unsigned int *savepoolfwd; /* Forward anchor for the free pool */ - unsigned int *savepoolbwd; /* Backward anchor for the free pool */ - volatile unsigned int savefree; /* Anchor for the global free list */ - volatile unsigned int savefreecnt; /* Number of saveareas on global free list */ - volatile int saveadjust; /* If 0 number of saveareas is ok, otherwise number to change (positive means grow, negative means shrink */ - volatile int saveinuse; /* Number of areas in use counting those on the local free list */ - volatile int savetarget; /* Number of savearea's needed */ - int savemaxcount; /* Maximum saveareas ever allocated */ - + unsigned int savelock; /* 000 Lock word for savearea free list manipulation */ + int saveRSVD4; /* 004 reserved */ + addr64_t savepoolfwd; /* 008 Forward anchor for the free pool */ + addr64_t savepoolbwd; /* 010 Backward anchor for the free pool */ + volatile addr64_t savefree; /* 018 Anchor for the global free list */ + volatile unsigned int savefreecnt; /* 020 Number of saveareas on global free list */ + volatile int saveadjust; /* 024 If 0 number of saveareas is ok, otherwise # to change (pos means grow, neg means shrink */ + volatile int saveinuse; /* 028 Number of areas in use counting those on the local free list */ + volatile int savetarget; /* 02C Number of savearea's needed */ + int savemaxcount; /* 030 Maximum saveareas ever allocated */ + unsigned int saveRSVD034[3]; /* 034 reserved */ +/* 040 */ }; +#pragma pack() #define sac_cnt (4096 / sizeof(savearea)) /* Number of saveareas per page */ @@ -335,20 +333,24 @@ struct Saveanchor { #define InitialSaveAreas (2 * FreeListMin) /* The number of saveareas to make at boot time */ #define InitialSaveTarget FreeListMin /* The number of saveareas for an initial target. This should be the minimum ever needed. */ #define InitialSaveBloks (InitialSaveAreas + sac_cnt - 1) / sac_cnt /* The number of savearea blocks to allocate at boot */ +#define BackPocketSaveBloks 8 /* Number of pages of back pocket saveareas */ + +void save_queue(ppnum_t); /* Add a new savearea block to the free list */ +addr64_t save_get_init(void); /* special savearea-get for cpu initialization (returns physical address) */ +struct savearea *save_get(void); /* Obtains a savearea from the free list (returns virtual address) */ +reg64_t save_get_phys_32(void); /* Obtains a savearea from the free list (returns phys addr in r3) */ +reg64_t save_get_phys_64(void); /* Obtains a savearea from the free list (returns phys addr in r3) */ +struct savearea *save_alloc(void); /* Obtains a savearea and allocates blocks if needed */ +struct savearea *save_cpv(addr64_t); /* Converts a physical savearea address to virtual */ +void save_ret(struct savearea *); /* Returns a savearea to the free list by virtual address */ +void save_ret_wMSR(struct savearea *, reg64_t); /* returns a savearea and restores an MSR */ +void save_ret_phys(reg64_t); /* Returns a savearea to the free list by physical address */ +void save_adjust(void); /* Adjust size of the global free list */ +struct savearea_comm *save_trim_free(void); /* Remove free pages from savearea pool */ +int save_recover(void); /* returns nonzero if we can recover enough from the free pool */ +void savearea_init(vm_offset_t addr); /* Boot-time savearea initialization */ -void save_release(struct savearea *); /* Release a save area */ -struct savectl *save_dequeue(void); /* Find and dequeue one that is all empty */ -unsigned int save_queue(struct savearea *); /* Add a new savearea block to the free list */ -struct savearea *save_get(void); /* Obtains a savearea from the free list (returns virtual address) */ -struct savearea *save_get_phys(void); /* Obtains a savearea from the free list (returns physical address) */ -struct savearea *save_alloc(void); /* Obtains a savearea and allocates blocks if needed */ -struct savearea *save_cpv(struct savearea *); /* Converts a physical savearea address to virtual */ -void save_ret(struct savearea *); /* Returns a savearea to the free list */ -void save_ret_phys(struct savearea *); /* Returns a savearea to the free list */ -void save_adjust(void); /* Adjust size of the global free list */ -struct savearea_comm *save_trim_freet(void); /* Remove free pages from savearea pool */ - #endif /* MACH_KERNEL_PRIVATE */ #endif /* __APPLE_API_PRIVATE */ @@ -358,6 +360,9 @@ struct savearea_comm *save_trim_freet(void); /* Remove free pages from saveare #define SAVrststk 0x00010000 /* Indicates that the current stack should be reset to empty */ #define SAVsyscall 0x00020000 /* Indicates that the savearea is associated with a syscall */ #define SAVredrive 0x00040000 /* Indicates that the low-level fault handler associated */ +#define SAVredriveb 13 /* Indicates that the low-level fault handler associated */ +#define SAVinstrument 0x00080000 /* Indicates that we should return instrumentation data */ +#define SAVinstrumentb 12 /* Indicates that we should return instrumentation data */ #define SAVtype 0x0000FF00 /* Shows type of savearea */ #define SAVtypeshft 8 /* Shift to position type */ #define SAVempty 0x86 /* Savearea is on free list */ @@ -365,4 +370,6 @@ struct savearea_comm *save_trim_freet(void); /* Remove free pages from saveare #define SAVfloat 0x02 /* Savearea contains floating point context */ #define SAVvector 0x03 /* Savearea contains vector context */ + + #endif /* _PPC_SAVEAREA_H_ */ diff --git a/osfmk/ppc/savearea_asm.s b/osfmk/ppc/savearea_asm.s index cea64c381..fe332b864 100644 --- a/osfmk/ppc/savearea_asm.s +++ b/osfmk/ppc/savearea_asm.s @@ -37,574 +37,796 @@ #include #include #include -#include #include #include .text +/* Register usage conventions in this code: + * r9 = return address + * r10 = per-proc ptr + * r11 = MSR at entry + * cr6 = feature flags (ie, pf64Bit) + * + * Because much of this code deals with physical addresses, + * there are parallel paths for 32- and 64-bit machines. + */ + + /* - * This routine will add a savearea block to the free list. - * Note really well: we can take NO exceptions of any kind, - * including a PTE miss once the savearea lock is held. That's - * a guaranteed deadlock. That means we must disable for interrutions - * and turn all translation off. + * *********************** + * * s a v e _ q u e u e * + * *********************** + * + * void save_queue(ppnum_t pagenum); * + * This routine will add a savearea block to the free list. * We also queue the block to the free pool list. This is a * circular double linked list. Because this block has no free entries, * it gets queued to the end of the list - * */ - .align 5 .globl EXT(save_queue) LEXT(save_queue) + mflr r9 ; get return address + mr r8,r3 ; move pagenum out of the way + bl saveSetup ; turn translation off, 64-bit on, load many regs + bf-- pf64Bitb,saveQueue32 ; skip if 32-bit processor + + sldi r2,r8,12 ; r2 <-- phys address of page + li r8,sac_cnt ; Get the number of saveareas per page + mr r4,r2 ; Point to start of chain + li r0,SAVempty ; Get empty marker - mfsprg r9,2 ; Get the feature flags - mr r11,r3 ; Save the block - mtcrf 0x04,r9 ; Set the features - mfmsr r12 ; Get the MSR - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - lis r10,hi16(EXT(saveanchor)) ; Get the high part of the anchor - andi. r3,r12,0x7FCF ; Turn off all translation and rupts - ori r10,r10,lo16(EXT(saveanchor)) ; Bottom half of the anchor - - bt pfNoMSRirb,sqNoMSR ; No MSR... - - mtmsr r3 ; Translation and all off - isync ; Toss prefetch - b sqNoMSRx - -sqNoMSR: li r0,loadMSR ; Get the MSR setter SC - sc ; Set it -sqNoMSRx: - - rlwinm. r3,r11,0,0,19 ; (TEST/DEBUG) -#if 0 - bne+ notrapit ; (TEST/DEBUG) - BREAKPOINT_TRAP ; (TEST/DEBUG) -notrapit: ; (TEST/DEBUG) -#endif +saveQueue64a: + addic. r8,r8,-1 ; Keep track of how many we did + stb r0,SAVflags+2(r4) ; Set empty + addi r7,r4,SAVsize ; Point to the next slot + ble- saveQueue64b ; We are done with the chain + std r7,SAVprev(r4) ; Set this chain + mr r4,r7 ; Step to the next + b saveQueue64a ; Fill the whole block... +saveQueue64b: + bl savelock ; Go lock the save anchor + + ld r7,SVfree(0) ; Get the free save area list anchor + lwz r6,SVfreecnt(0) ; Get the number of free saveareas + std r2,SVfree(0) ; Queue in the new one + addi r6,r6,sac_cnt ; Count the ones we are linking in + std r7,SAVprev(r4) ; Queue the old first one off of us + stw r6,SVfreecnt(0) ; Save the new count + b saveQueueExit + + ; Handle 32-bit processor. + +saveQueue32: + slwi r2,r8,12 ; r2 <-- phys address of page li r8,sac_cnt ; Get the number of saveareas per page - mr r4,r11 ; Point to start of chain + mr r4,r2 ; Point to start of chain li r0,SAVempty ; Get empty marker -sqchain: addic. r8,r8,-1 ; Keep track of how many we did +saveQueue32a: + addic. r8,r8,-1 ; Keep track of how many we did stb r0,SAVflags+2(r4) ; Set empty - addi r9,r4,SAVsize ; Point to the next slot - ble- sqchaindn ; We are done with the chain - stw r9,SAVprev(r4) ; Set this chain - mr r4,r9 ; Step to the next - b sqchain ; Fill the whole block... + addi r7,r4,SAVsize ; Point to the next slot + ble- saveQueue32b ; We are done with the chain + stw r7,SAVprev+4(r4) ; Set this chain + mr r4,r7 ; Step to the next + b saveQueue32a ; Fill the whole block... - .align 5 - -sqchaindn: mflr r9 ; Save the return address +saveQueue32b: bl savelock ; Go lock the save anchor - lwz r7,SVfree(r10) ; Get the free save area list anchor - lwz r6,SVfreecnt(r10) ; Get the number of free saveareas + lwz r7,SVfree+4(0) ; Get the free save area list anchor + lwz r6,SVfreecnt(0) ; Get the number of free saveareas - stw r11,SVfree(r10) ; Queue in the new one + stw r2,SVfree+4(0) ; Queue in the new one addi r6,r6,sac_cnt ; Count the ones we are linking in - stw r7,SAVprev(r4) ; Queue the old first one off of us - stw r6,SVfreecnt(r10) ; Save the new count - + stw r7,SAVprev+4(r4) ; Queue the old first one off of us + stw r6,SVfreecnt(0) ; Save the new count + +saveQueueExit: ; join here from 64-bit path bl saveunlock ; Unlock the list and set the adjust count - mtlr r9 ; Restore the return - mtmsr r12 ; Restore interrupts and translation - isync ; Dump any speculations #if FPVECDBG - mfsprg r2,0 ; (TEST/DEBUG) - lwz r2,next_savearea(r2) ; (TEST/DEBUG) + mfsprg r2,1 ; (TEST/DEBUG) mr. r2,r2 ; (TEST/DEBUG) - beqlr- ; (TEST/DEBUG) + beq-- saveRestore ; (TEST/DEBUG) lis r0,hi16(CutTrace) ; (TEST/DEBUG) li r2,0x2201 ; (TEST/DEBUG) oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif - - blr ; Leave... + b saveRestore ; Restore interrupts and translation /* - * This routine will obtain a savearea. - * Note really well: we can take NO exceptions of any kind, - * including a PTE miss during this process. That's - * a guaranteed deadlock or screwup. That means we must disable for interrutions - * and turn all translation off. - * - * We pass back the virtual address of the one we just obtained - * or a zero if none to allocate. - * - * First we try the local list. If that is below a threshold, we will - * lock the free list and replenish. + * ***************************** + * * s a v e _ g e t _ i n i t * + * ***************************** * - * If there are no saveareas in either list, we will install the - * backpocket and choke. - * - * The save_get_phys call assumes that translation and interruptions are - * already off and that the returned address is physical. + * addr64_t save_get_init(void); * * Note that save_get_init is used in initial processor startup only. It * is used because translation is on, but no tables exist yet and we have * no V=R BAT registers that cover the entire physical memory. - * - * - * NOTE!!! NEVER USE R0, R2, or R12 IN HERE THAT WAY WE DON'T NEED A - * STACK FRAME IN FPU_SAVE, FPU_SWITCH, VEC_SAVE, OR VEC_SWITCH. */ - .align 5 .globl EXT(save_get_init) LEXT(save_get_init) + mflr r9 ; get return address + bl saveSetup ; turn translation off, 64-bit on, load many regs + bfl-- pf64Bitb,saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC) + btl++ pf64Bitb,saveGet64 ; get one on a 64-bit machine + bl saveRestore ; restore translation etc + mtlr r9 + + ; unpack the physaddr in r3 into a long long in (r3,r4) + + mr r4,r3 ; copy low word of phys address to r4 + li r3,0 ; assume upper word was 0 + bflr-- pf64Bitb ; if 32-bit processor, return + srdi r3,r4,32 ; unpack reg64_t to addr64_t on 64-bit machine + rlwinm r4,r4,0,0,31 + blr + - mfsprg r9,2 ; Get the feature flags - mfmsr r12 ; Get the MSR - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - mtcrf 0x04,r9 ; Set the features - andi. r3,r12,0x7FCF ; Turn off all translation and interrupts - - bt pfNoMSRirb,sgiNoMSR ; No MSR... - - mtmsr r3 ; Translation and all off - isync ; Toss prefetch - b sgiGetPhys ; Go get the savearea... - -sgiNoMSR: li r0,loadMSR ; Get the MSR setter SC - sc ; Set it - -sgiGetPhys: mflr r11 ; Save R11 (save_get_phys does not use this one) - bl EXT(save_get_phys) ; Get a savearea - mtlr r11 ; Restore return - - mtmsr r12 ; Restore translation and exceptions - isync ; Make sure about it - blr ; Return... - +/* + * ******************* + * * s a v e _ g e t * + * ******************* + * + * savearea *save_get(void); + * + * Allocate a savearea, returning a virtual address. NOTE: we must preserve + * r0, r2, and r12. Our callers in cswtch.s depend on this. + */ .align 5 .globl EXT(save_get) LEXT(save_get) - - crclr cr1_eq ; Clear CR1_eq to indicate we want virtual address - mfsprg r9,2 ; Get the feature flags - mfmsr r11 ; Get the MSR - rlwinm. r3,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Are interrupts enabled here? - beq+ sgnomess ; Nope, do not mess with fp or vec... - rlwinm r11,r11,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r11,r11,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - -sgnomess: mtcrf 0x04,r9 ; Set the features - andi. r3,r11,0x7FCF ; Turn off all translation and interrupts - - bt pfNoMSRirb,sgNoMSR ; No MSR... - - mtmsr r3 ; Translation and all off - isync ; Toss prefetch - b csaveget - -sgNoMSR: mr r9,r0 ; Save this - li r0,loadMSR ; Get the MSR setter SC - sc ; Set it - mr r0,r9 ; Restore it - - b csaveget ; Join the common... + mflr r9 ; get return address + mr r5,r0 ; copy regs before saveSetup nails them + bl saveSetup ; turn translation off, 64-bit on, load many regs + bf-- pf64Bitb,svgt1 ; skip if 32-bit processor + + std r5,tempr0(r10) ; save r0 in per-proc across call to saveGet64 + std r2,tempr2(r10) ; and r2 + std r12,tempr4(r10) ; and r12 + bl saveGet64 ; get r3 <- savearea, r5 <- page address (with SAC) + ld r0,tempr0(r10) ; restore callers regs + ld r2,tempr2(r10) + ld r12,tempr4(r10) + b svgt2 + +svgt1: ; handle 32-bit processor + stw r5,tempr0+4(r10) ; save r0 in per-proc across call to saveGet32 + stw r2,tempr2+4(r10) ; and r2 + stw r12,tempr4+4(r10) ; and r12 + bl saveGet32 ; get r3 <- savearea, r5 <- page address (with SAC) + lwz r0,tempr0+4(r10) ; restore callers regs + lwz r2,tempr2+4(r10) + lwz r12,tempr4+4(r10) + +svgt2: + lwz r5,SACvrswap+4(r5) ; Get the virtual to real translation (only need low word) + mtlr r9 ; restore return address + xor r3,r3,r5 ; convert physaddr to virtual + rlwinm r3,r3,0,0,31 ; 0 upper word if a 64-bit machine - .align 5 - .globl EXT(save_get_phys) - -LEXT(save_get_phys) - - crset cr1_eq ; Clear CR1_ne to indicate we want physical address - -csaveget: mfsprg r9,0 ; Get the per proc - lis r10,hi16(EXT(saveanchor)) ; Get the high part of the anchor - lwz r8,lclfreecnt(r9) ; Get the count - lwz r3,lclfree(r9) ; Get the start of local savearea list - cmplwi r8,LocalSaveMin ; Are we too low? - ori r10,r10,lo16(EXT(saveanchor)) ; Bottom half of the anchor - ble- sglow ; We are too low and need to grow list... - -sgreserve: lis r10,0x5555 ; Get top of empty indication - li r6,0 ; zero value - lwz r4,SAVprev(r3) ; Chain to the next one - stw r6,SAVflags(r3) ; Clear flags - ori r10,r10,0x5555 ; And the bottom - subi r8,r8,1 ; Back down count - stw r10,SAVprev(r3) ; Trash this - stw r10,SAVlevel(r3) ; Trash this - stw r4,lclfree(r9) ; Unchain first savearea - rlwinm r5,r3,0,0,19 ; Back up to first page where SAC is - stw r10,SAVact(r3) ; Trash this - stw r8,lclfreecnt(r9) ; Set new count - - btlr+ cr1_eq ; Return now if physical request - - lwz r5,SACvrswap(r5) ; Get the virtual to real translation - - mtmsr r11 ; Restore translation and exceptions - isync ; Make sure about it - #if FPVECDBG -; Note: we do not trace the physical request because this ususally comes from the -; exception vector code - - mr r6,r0 ; (TEST/DEBUG) - mr r7,r2 ; (TEST/DEBUG) + mr r6,r0 ; (TEST/DEBUG) + mr r7,r2 ; (TEST/DEBUG) + mfsprg r2,1 ; (TEST/DEBUG) + mr. r2,r2 ; (TEST/DEBUG) + beq-- svgDBBypass ; (TEST/DEBUG) lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) li r2,0x2203 ; (TEST/DEBUG) oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) - mr r0,r6 ; (TEST/DEBUG) - mr r2,r7 ; (TEST/DEBUG) +svgDBBypass: ; (TEST/DEBUG) + mr r0,r6 ; (TEST/DEBUG) + mr r2,r7 ; (TEST/DEBUG) #endif - - xor r3,r3,r5 ; Get the virtual address - blr ; Leave... + b saveRestore ; restore MSR and return to our caller + + +/* + * *********************************** + * * s a v e _ g e t _ p h y s _ 3 2 * + * *********************************** + * + * reg64_t save_get_phys(void); + * + * This is the entry normally called from lowmem_vectors.s with + * translation and interrupts already off. + * MUST NOT TOUCH CR7 + */ + .align 5 + .globl EXT(save_get_phys_32) -; -; Here is the slow path which is executed when there are not enough in the local list -; - +LEXT(save_get_phys_32) + mfsprg r10,0 ; get the per-proc ptr + b saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC) + + +/* + * *********************************** + * * s a v e _ g e t _ p h y s _ 6 4 * + * *********************************** + * + * reg64_t save_get_phys_64(void); + * + * This is the entry normally called from lowmem_vectors.s with + * translation and interrupts already off, and in 64-bit mode. + * MUST NOT TOUCH CR7 + */ .align 5 - -sglow: mflr r9 ; Save the return + .globl EXT(save_get_phys_64) + +LEXT(save_get_phys_64) + mfsprg r10,0 ; get the per-proc ptr + b saveGet64 ; Get r3 <- savearea, r5 <- page address (with SAC) + + +/* + * ********************* + * * s a v e G e t 6 4 * + * ********************* + * + * This is the internal routine to allocate a savearea on a 64-bit processor. + * Note that we must not take any exceptions of any kind, including PTE misses, as that + * would deadlock trying to reenter this routine. We pass back the 64-bit physical address. + * First we try the local list. If that is below a threshold, we try the global free list, + * which requires taking a lock, and replenish. If there are no saveareas in either list, + * we will install the backpocket and choke. This routine assumes that the caller has + * turned translation off, masked interrupts, turned on 64-bit mode, and set up: + * r10 = per-proc ptr + * + * We return: + * r3 = 64-bit physical address of the savearea + * r5 = 64-bit physical address of the page the savearea is in, with SAC + * + * We destroy: + * r2-r8. + * + * MUST NOT TOUCH CR7 + */ + +saveGet64: + lwz r8,lclfreecnt(r10) ; Get the count + ld r3,lclfree(r10) ; Get the start of local savearea list + cmplwi r8,LocalSaveMin ; Are we too low? + ble-- saveGet64GetGlobal ; We are too low and need to grow list... + + ; Get it from the per-processor local list. + +saveGet64GetLocal: + li r2,0x5555 ; get r2 <-- 0x55555555 55555555, our bugbug constant + ld r4,SAVprev(r3) ; Chain to the next one + oris r2,r2,0x5555 + subi r8,r8,1 ; Back down count + rldimi r2,r2,32,0 + + std r2,SAVprev(r3) ; bug next ptr + stw r2,SAVlevel(r3) ; bug context ID + li r6,0 + std r4,lclfree(r10) ; Unchain first savearea + stw r2,SAVact(r3) ; bug activation ptr + rldicr r5,r3,0,51 ; r5 <-- page ptr, where SAC is kept + stw r8,lclfreecnt(r10) ; Set new count + stw r6,SAVflags(r3) ; clear the flags + + blr + + ; Local list was low so replenish from global list. + ; r7 = return address to caller of saveGet64 + ; r8 = lclfreecnt + ; r10 = per-proc ptr + +saveGet64GetGlobal: + mflr r7 ; save return adress + subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target bl savelock ; Go lock up the anchor - mtlr r9 ; Restore the return - subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target - lwz r9,SVfreecnt(r10) ; Get the number on this list - lwz r8,SVfree(r10) ; Get the head of the save area list + lwz r2,SVfreecnt(0) ; Get the number on this list + ld r8,SVfree(0) ; Get the head of the save area list - sub r3,r9,r5 ; Get number left after we swipe enough for local list - srawi r3,r3,31 ; Get 0 if enough or 0xFFFFFFFF if not + sub r3,r2,r5 ; Get number left after we swipe enough for local list + sradi r3,r3,63 ; Get 0 if enough or -1 if not andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise - and r5,r9,r3 ; Get 0 if there are enough, number on list otherwise - or. r5,r4,r5 ; Get the number we will move - beq- sgnofree ; There are none to get... + and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise + or. r5,r4,r5 ; r5 <- number we will move from global to local list + beq-- saveGet64NoFree ; There are none to get... mtctr r5 ; Get loop count mr r6,r8 ; Remember the first in the list - -sgtrimf: bdz sgtfdone ; Count down and branch when we hit 0... - lwz r8,SAVprev(r8) ; Get the next - b sgtrimf ; Keep going... - .align 5 - -sgtfdone: lwz r7,SAVprev(r8) ; Get the next one - lwz r4,SVinuse(r10) ; Get the in use count - sub r9,r9,r5 ; Count down what we stole - stw r7,SVfree(r10) ; Set the new first in list +saveGet64c: + bdz saveGet64d ; Count down and branch when we hit 0... + ld r8,SAVprev(r8) ; Get the next + b saveGet64c ; Keep going... + +saveGet64d: + ld r3,SAVprev(r8) ; Get the next one + lwz r4,SVinuse(0) ; Get the in use count + sub r2,r2,r5 ; Count down what we stole + std r3,SVfree(0) ; Set the new first in list add r4,r4,r5 ; Count the ones we just put in the local list as "in use" - stw r9,SVfreecnt(r10) ; Set the new count - mfsprg r9,0 ; Get the per proc - stw r4,SVinuse(r10) ; Set the new in use count + stw r2,SVfreecnt(0) ; Set the new count + stw r4,SVinuse(0) ; Set the new in use count - lwz r4,lclfree(r9) ; Get the old head of list - lwz r3,lclfreecnt(r9) ; Get the old count - stw r6,lclfree(r9) ; Set the new head of the list + ld r4,lclfree(r10) ; Get the old head of list + lwz r3,lclfreecnt(r10) ; Get the old count + std r6,lclfree(r10) ; Set the new head of the list add r3,r3,r5 ; Get the new count - stw r4,SAVprev(r8) ; Point to the old head - stw r3,lclfreecnt(r9) ; Set the new count + std r4,SAVprev(r8) ; Point to the old head + stw r3,lclfreecnt(r10) ; Set the new count - mflr r9 ; Save the return bl saveunlock ; Update the adjust field and unlock - mtlr r9 ; Restore return - b csaveget ; Start over and finally allocate the savearea... - -; -; The local list is below the repopulate threshold and the free list is empty. -; First we check if there are any left in the local list and if so, we allow -; them to be allocated. If not, we release the backpocket list and choke. -; There is nothing more that we can do at this point. Hopefully we stay alive -; long enough to grab some much-needed panic information. -; - -sgnofree: mfsprg r9,0 ; Get the per proc - lwz r8,lclfreecnt(r9) ; Get the count - lwz r3,lclfree(r9) ; Get the start of local savearea list + mtlr r7 ; restore return address + b saveGet64 ; Start over and finally allocate the savearea... + + ; The local list is below the repopulate threshold and the global list is empty. + ; First we check if there are any left in the local list and if so, we allow + ; them to be allocated. If not, we release the backpocket list and choke. + ; There is nothing more that we can do at this point. Hopefully we stay alive + ; long enough to grab some much-needed panic information. + ; r7 = return address to caller of saveGet64 + ; r10 = per-proc ptr + +saveGet64NoFree: + lwz r8,lclfreecnt(r10) ; Get the count mr. r8,r8 ; Are there any reserve to get? - - mflr r9 ; Save the return - beq- sgchoke ; No, go choke and die... + beq-- saveGet64Choke ; No, go choke and die... bl saveunlock ; Update the adjust field and unlock - mtlr r9 ; Restore return - - mfsprg r9,0 ; Get the per proc again - lwz r3,lclfree(r9) ; Get the start of local savearea list - lwz r8,lclfreecnt(r9) ; Get the count - b sgreserve ; We have some left, dip on in... + ld r3,lclfree(r10) ; Get the start of local savearea list + lwz r8,lclfreecnt(r10) ; Get the count + mtlr r7 ; restore return address + b saveGet64GetLocal ; We have some left, dip on in... -; ; We who are about to die salute you. The savearea chain is messed up or ; empty. Add in a few so we have enough to take down the system. -; -sgchoke: lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket +saveGet64Choke: + lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket ori r9,r9,lo16(EXT(backpocket)) ; and low part - lwz r8,SVfreecnt(r9) ; Get the new number of free elements - lwz r7,SVfree(r9) ; Get the head of the chain - lwz r6,SVinuse(r10) ; Get total in the old list + lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements + ld r7,SVfree-saveanchor(r9) ; Get the head of the chain + lwz r6,SVinuse(0) ; Get total in the old list - stw r8,SVfreecnt(r10) ; Set the new number of free elements + stw r8,SVfreecnt(0) ; Set the new number of free elements add r6,r6,r8 ; Add in the new ones - stw r7,SVfree(r10) ; Set the new head of the chain - stw r6,SVinuse(r10) ; Set total in the new list + std r7,SVfree(0) ; Set the new head of the chain + stw r6,SVinuse(0) ; Set total in the new list +saveGetChokeJoin: ; join in the fun from 32-bit mode lis r0,hi16(Choke) ; Set choke firmware call li r7,0 ; Get a clear register to unlock ori r0,r0,lo16(Choke) ; Set the rest of the choke call li r3,failNoSavearea ; Set failure code - sync ; Make sure all is committed - stw r7,SVlock(r10) ; Unlock the free list + eieio ; Make sure all is committed + stw r7,SVlock(0) ; Unlock the free list sc ; System ABEND - /* - * This routine will return a savearea to the free list. - * Note really well: we can take NO exceptions of any kind, - * including a PTE miss once the savearea lock is held. That's - * a guaranteed deadlock. That means we must disable for interrutions - * and turn all translation off. + * ********************* + * * s a v e G e t 3 2 * + * ********************* * - * We take a virtual address for save_ret. For save_ret_phys we - * assume we are already physical/interrupts off and the address is physical. + * This is the internal routine to allocate a savearea on a 32-bit processor. + * Note that we must not take any exceptions of any kind, including PTE misses, as that + * would deadlock trying to reenter this routine. We pass back the 32-bit physical address. + * First we try the local list. If that is below a threshold, we try the global free list, + * which requires taking a lock, and replenish. If there are no saveareas in either list, + * we will install the backpocket and choke. This routine assumes that the caller has + * turned translation off, masked interrupts, and set up: + * r10 = per-proc ptr * - * Here's a tricky bit, and important: - * - * When we trim the list, we NEVER trim the very first one. This is because that is - * the very last one released and the exception exit code will release the savearea - * BEFORE it is done using it. Wouldn't be too good if another processor started - * using it, eh? So for this case, we are safe so long as the savearea stays on - * the local list. (Note: the exit routine needs to do this because it is in the - * process of restoring all context and it needs to keep it until the last second.) + * We return: + * r3 = 32-bit physical address of the savearea + * r5 = 32-bit physical address of the page the savearea is in, with SAC * + * We destroy: + * r2-r8. */ -; -; Note: when called from interrupt enabled code, we want to turn off vector and -; floating point because we can not guarantee that the enablement will not change -; while we hold a copy of the MSR. We force it off so that the lazy switcher will -; turn it back on if used. However, we need to NOT change it save_ret or save_get -; is called with interrupts disabled. This is because both of these routine are -; called from within the context switcher and changing the enablement would be -; very, very bad..... (especially from within the lazt switcher) -; +saveGet32: + lwz r8,lclfreecnt(r10) ; Get the count + lwz r3,lclfree+4(r10) ; Get the start of local savearea list + cmplwi r8,LocalSaveMin ; Are we too low? + ble- saveGet32GetGlobal ; We are too low and need to grow list... + + ; Get savearea from per-processor local list. + +saveGet32GetLocal: + li r2,0x5555 ; get r2 <-- 0x55555555, our bugbug constant + lwz r4,SAVprev+4(r3) ; Chain to the next one + oris r2,r2,0x5555 + subi r8,r8,1 ; Back down count - .align 5 - .globl EXT(save_ret) + stw r2,SAVprev+4(r3) ; bug next ptr + stw r2,SAVlevel(r3) ; bug context ID + li r6,0 + stw r4,lclfree+4(r10) ; Unchain first savearea + stw r2,SAVact(r3) ; bug activation ptr + rlwinm r5,r3,0,0,19 ; r5 <-- page ptr, where SAC is kept + stw r8,lclfreecnt(r10) ; Set new count + stw r6,SAVflags(r3) ; clear the flags + + blr + + ; Local list was low so replenish from global list. + ; r7 = return address to caller of saveGet32 + ; r8 = lclfreecnt + ; r10 = per-proc ptr + +saveGet32GetGlobal: + mflr r7 ; save return adress + subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target + bl savelock ; Go lock up the anchor + + lwz r2,SVfreecnt(0) ; Get the number on this list + lwz r8,SVfree+4(0) ; Get the head of the save area list + + sub r3,r2,r5 ; Get number left after we swipe enough for local list + srawi r3,r3,31 ; Get 0 if enough or -1 if not + andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise + and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise + or. r5,r4,r5 ; r5 <- number we will move from global to local list + beq- saveGet32NoFree ; There are none to get... + + mtctr r5 ; Get loop count + mr r6,r8 ; Remember the first in the list -LEXT(save_ret) +saveGet32c: + bdz saveGet32d ; Count down and branch when we hit 0... + lwz r8,SAVprev+4(r8) ; Get the next + b saveGet32c ; Keep going... - mfmsr r12 ; Get the MSR - rlwinm. r9,r12,0,MSR_EE_BIT,MSR_EE_BIT ; Are interrupts enabled here? - beq+ EXT(save_ret_join) ; Nope, do not mess with fp or vec... - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off +saveGet32d: + lwz r3,SAVprev+4(r8) ; Get the next one + lwz r4,SVinuse(0) ; Get the in use count + sub r2,r2,r5 ; Count down what we stole + stw r3,SVfree+4(0) ; Set the new first in list + add r4,r4,r5 ; Count the ones we just put in the local list as "in use" + stw r2,SVfreecnt(0) ; Set the new count + stw r4,SVinuse(0) ; Set the new in use count + + lwz r4,lclfree+4(r10) ; Get the old head of list + lwz r3,lclfreecnt(r10) ; Get the old count + stw r6,lclfree+4(r10) ; Set the new head of the list + add r3,r3,r5 ; Get the new count + stw r4,SAVprev+4(r8) ; Point to the old head + stw r3,lclfreecnt(r10) ; Set the new count - .globl EXT(save_ret_join) + bl saveunlock ; Update the adjust field and unlock + mtlr r7 ; restore return address + b saveGet32 ; Start over and finally allocate the savearea... + + ; The local list is below the repopulate threshold and the global list is empty. + ; First we check if there are any left in the local list and if so, we allow + ; them to be allocated. If not, we release the backpocket list and choke. + ; There is nothing more that we can do at this point. Hopefully we stay alive + ; long enough to grab some much-needed panic information. + ; r7 = return address to caller of saveGet32 + ; r10 = per-proc ptr + +saveGet32NoFree: + lwz r8,lclfreecnt(r10) ; Get the count + mr. r8,r8 ; Are there any reserve to get? + beq- saveGet32Choke ; No, go choke and die... + bl saveunlock ; Update the adjust field and unlock + lwz r3,lclfree+4(r10) ; Get the start of local savearea list + lwz r8,lclfreecnt(r10) ; Get the count + mtlr r7 ; restore return address + b saveGet32GetLocal ; We have some left, dip on in... + +; We who are about to die salute you. The savearea chain is messed up or +; empty. Add in a few so we have enough to take down the system. -LEXT(save_ret_join) - crclr cr1_eq ; Clear CR1_ne to indicate we have virtual address - mfsprg r9,2 ; Get the feature flags - rlwinm r6,r3,0,0,19 ; Round back down to the savearea page block - lwz r5,SACvrswap(r6) ; Get the conversion to real - mtcrf 0x04,r9 ; Set the features - mfsprg r9,0 ; Get the per proc - xor r8,r3,r5 ; Get the real address of the savearea - andi. r3,r12,0x7FCF ; Turn off all translation and rupts +saveGet32Choke: + lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket + ori r9,r9,lo16(EXT(backpocket)) ; and low part + + lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements + lwz r7,SVfree+4-saveanchor(r9) ; Get the head of the chain + lwz r6,SVinuse(0) ; Get total in the old list - bt pfNoMSRirb,srNoMSR ; No MSR... + stw r8,SVfreecnt(0) ; Set the new number of free elements + add r6,r6,r8 ; Add in the new ones (why?) + stw r7,SVfree+4(0) ; Set the new head of the chain + stw r6,SVinuse(0) ; Set total in the new list + + b saveGetChokeJoin - mtmsr r3 ; Translation and all off - isync ; Toss prefetch - b srcommon - - .align 5 - -srNoMSR: li r0,loadMSR ; Get the MSR setter SC - sc ; Set it -srNoMSRx: b srcommon ; Join up below... +/* + * ******************* + * * s a v e _ r e t * + * ******************* + * + * void save_ret(struct savearea *); // normal call + * void save_ret_wMSR(struct savearea *,reg64_t); // passes MSR to restore as 2nd arg + * + * Return a savearea passed by virtual address to the free list. + * Note really well: we can take NO exceptions of any kind, + * including a PTE miss once the savearea lock is held. That's + * a guaranteed deadlock. That means we must disable for interrutions + * and turn all translation off. + */ + .globl EXT(save_ret_wMSR) ; alternate entry pt w MSR to restore in r4 + +LEXT(save_ret_wMSR) + crset 31 ; set flag for save_ret_wMSR + b svrt1 ; join common code + + .align 5 + .globl EXT(save_ret) + +LEXT(save_ret) + crclr 31 ; clear flag for save_ret_wMSR +svrt1: ; join from save_ret_wMSR + mflr r9 ; get return address + rlwinm r7,r3,0,0,19 ; get virtual address of SAC area at start of page + mr r8,r3 ; save virtual address + lwz r5,SACvrswap+0(r7) ; get 64-bit converter from V to R + lwz r6,SACvrswap+4(r7) ; both halves, though only bottom used on 32-bit machine +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x2204 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + bl saveSetup ; turn translation off, 64-bit on, load many regs + bf++ 31,svrt3 ; skip if not save_ret_wMSR + mr r11,r4 ; was save_ret_wMSR, so overwrite saved MSR +svrt3: + bf-- pf64Bitb,svrt4 ; skip if a 32-bit processor + + ; Handle 64-bit processor. + + rldimi r6,r5,32,0 ; merge upper and lower halves of SACvrswap together + xor r3,r8,r6 ; get r3 <- 64-bit physical address of this savearea + bl saveRet64 ; return it + mtlr r9 ; restore return address + b saveRestore64 ; restore MSR + + ; Handle 32-bit processor. + +svrt4: + xor r3,r8,r6 ; get r3 <- 32-bit physical address of this savearea + bl saveRet32 ; return it + mtlr r9 ; restore return address + b saveRestore32 ; restore MSR + +/* + * ***************************** + * * s a v e _ r e t _ p h y s * + * ***************************** + * + * void save_ret_phys(reg64_t); + * + * Called from lowmem vectors to return (ie, free) a savearea by physical address. + * Translation and interrupts are already off, and 64-bit mode is set if defined. + * We can take _no_ exceptions of any kind in this code, including PTE miss, since + * that would result in a deadlock. We expect: + * r3 = phys addr of savearea + * msr = IR, DR, and EE off, SF on + * cr6 = pf64Bit flag + * We destroy: + * r0,r2-r10. + */ .align 5 .globl EXT(save_ret_phys) LEXT(save_ret_phys) + mfsprg r10,0 ; get the per-proc ptr + bf-- pf64Bitb,saveRet32 ; handle 32-bit machine + b saveRet64 ; handle 64-bit machine + - mfsprg r9,0 ; Get the per proc - crset cr1_eq ; Clear CR1_ne to indicate we have physical address - mr r8,r3 ; Save the savearea address - - nop - -srcommon: +/* + * ********************* + * * s a v e R e t 6 4 * + * ********************* + * + * This is the internal routine to free a savearea, passed by 64-bit physical + * address. We assume that IR, DR, and EE are all off, that SF is on, and: + * r3 = phys address of the savearea + * r10 = per-proc ptr + * We destroy: + * r0,r2-r8. + */ + .align 5 + saveRet64: li r0,SAVempty ; Get marker for free savearea - lwz r7,lclfreecnt(r9) ; Get the local count - lwz r6,lclfree(r9) ; Get the old local header + lwz r7,lclfreecnt(r10) ; Get the local count + ld r6,lclfree(r10) ; Get the old local header addi r7,r7,1 ; Pop up the free count - stw r6,SAVprev(r8) ; Plant free chain pointer + std r6,SAVprev(r3) ; Plant free chain pointer cmplwi r7,LocalSaveMax ; Has the list gotten too long? - stb r0,SAVflags+2(r8) ; Mark savearea free - stw r8,lclfree(r9) ; Chain us on in - stw r7,lclfreecnt(r9) ; Bump up the count - bgt- srtrim ; List is too long, go trim it... - - btlr cr1_eq ; Leave if we were a physical request... + stb r0,SAVflags+2(r3) ; Mark savearea free + std r3,lclfree(r10) ; Chain us on in + stw r7,lclfreecnt(r10) ; Bump up the count + bltlr++ ; List not too long, so done - mtmsr r12 ; Restore translation and exceptions - isync ; Make sure about it - -#if FPVECDBG - lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) - li r2,0x2204 ; (TEST/DEBUG) - mr r3,r8 ; (TEST/DEBUG) - oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) - sc ; (TEST/DEBUG) -#endif - blr ; Leave... - -; -; The local savearea chain has gotten too long. Trim it down to the target. -; Note: never trim the first one, just skip over it. -; - - .align 5 +/* The local savearea chain has gotten too long. Trim it down to the target. + * Here's a tricky bit, and important: + * + * When we trim the list, we NEVER trim the very first one. This is because that is + * the very last one released and the exception exit code will release the savearea + * BEFORE it is done using it. Wouldn't be too good if another processor started + * using it, eh? So for this case, we are safe so long as the savearea stays on + * the local list. (Note: the exit routine needs to do this because it is in the + * process of restoring all context and it needs to keep it until the last second.) + */ -srtrim: - mr r2,r8 ; Save the guy we are releasing - lwz r8,SAVprev(r8) ; Skip over the first + mflr r0 ; save return to caller of saveRet64 + mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed + ld r3,SAVprev(r3) ; Skip over the first subi r7,r7,LocalSaveTarget ; Figure out how much to trim - mr r6,r8 ; Save the first one to trim + mr r6,r3 ; r6 <- first one to trim mr r5,r7 ; Save the number we are trimming -srtrimming: addic. r7,r7,-1 ; Any left to do? - ble- srtrimmed ; Nope... - lwz r8,SAVprev(r8) ; Skip to the next one - b srtrimming ; Keep going... +saveRet64a: + addic. r7,r7,-1 ; Any left to do? + ble-- saveRet64b ; Nope... + ld r3,SAVprev(r3) ; Skip to the next one + b saveRet64a ; Keep going... - .align 5 - -srtrimmed: lis r10,hi16(EXT(saveanchor)) ; Get the high part of the anchor - lwz r7,SAVprev(r8) ; Point to the next one - ori r10,r10,lo16(EXT(saveanchor)) ; Bottom half of the anchor +saveRet64b: ; r3 <- last one to trim + ld r7,SAVprev(r3) ; Point to the first one not to trim li r4,LocalSaveTarget ; Set the target count - stw r7,SAVprev(r2) ; Trim stuff leaving the one just released as first - stw r4,lclfreecnt(r9) ; Set the current count + std r7,SAVprev(r2) ; Trim stuff leaving the one just released as first + stw r4,lclfreecnt(r10) ; Set the current count - mflr r9 ; Save the return bl savelock ; Lock up the anchor - lwz r3,SVfree(r10) ; Get the old head of the free list - lwz r4,SVfreecnt(r10) ; Get the number of free ones - lwz r7,SVinuse(r10) ; Get the number that are in use - stw r6,SVfree(r10) ; Point to the first trimmed savearea + ld r8,SVfree(0) ; Get the old head of the free list + lwz r4,SVfreecnt(0) ; Get the number of free ones + lwz r7,SVinuse(0) ; Get the number that are in use + std r6,SVfree(0) ; Point to the first trimmed savearea add r4,r4,r5 ; Add number trimmed to free count - stw r3,SAVprev(r8) ; Chain the old head to the tail of the trimmed guys + std r8,SAVprev(r3) ; Chain the old head to the tail of the trimmed guys sub r7,r7,r5 ; Remove the trims from the in use count - stw r4,SVfreecnt(r10) ; Set new free count - stw r7,SVinuse(r10) ; Set new in use count + stw r4,SVfreecnt(0) ; Set new free count + stw r7,SVinuse(0) ; Set new in use count - bl saveunlock ; Set adjust count and unlock the saveanchor + mtlr r0 ; Restore the return to our caller + b saveunlock ; Set adjust count, unlock the saveanchor, and return + - mtlr r9 ; Restore the return - - btlr+ cr1_eq ; Leave if we were a physical request... +/* + * ********************* + * * s a v e R e t 3 2 * + * ********************* + * + * This is the internal routine to free a savearea, passed by 32-bit physical + * address. We assume that IR, DR, and EE are all off, and: + * r3 = phys address of the savearea + * r10 = per-proc ptr + * We destroy: + * r0,r2-r8. + */ + .align 5 + saveRet32: + li r0,SAVempty ; Get marker for free savearea + lwz r7,lclfreecnt(r10) ; Get the local count + lwz r6,lclfree+4(r10) ; Get the old local header + addi r7,r7,1 ; Pop up the free count + stw r6,SAVprev+4(r3) ; Plant free chain pointer + cmplwi r7,LocalSaveMax ; Has the list gotten too long? + stb r0,SAVflags+2(r3) ; Mark savearea free + stw r3,lclfree+4(r10) ; Chain us on in + stw r7,lclfreecnt(r10) ; Bump up the count + bltlr+ ; List not too long, so done - mtmsr r12 ; Restore translation and exceptions - isync ; Make sure about it - -#if FPVECDBG - lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) - mr r3,r2 ; (TEST/DEBUG) - li r2,0x2205 ; (TEST/DEBUG) - oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) - sc ; (TEST/DEBUG) -#endif - blr ; Leave... - +/* The local savearea chain has gotten too long. Trim it down to the target. + * Here's a tricky bit, and important: + * + * When we trim the list, we NEVER trim the very first one. This is because that is + * the very last one released and the exception exit code will release the savearea + * BEFORE it is done using it. Wouldn't be too good if another processor started + * using it, eh? So for this case, we are safe so long as the savearea stays on + * the local list. (Note: the exit routine needs to do this because it is in the + * process of restoring all context and it needs to keep it until the last second.) + */ -; -; NOTE: This is the most complicated part of savearea maintainence. -; Expect errors here....... -; -; save_trim_free - this routine will trim the free list down to the target count. -; It trims the list and, if the pool page was fully allocated, puts that page on -; the start of the pool list. -; -; If the savearea being released is the last on a pool page (i.e., all entries -; are released), the page is dequeued from the pool and queued to any other -; found during this scan. Note that this queue is maintained virtually. -; -; When the scan is done, the saveanchor lock is released and the list of -; freed pool pages is returned. + mflr r0 ; save return to caller of saveRet32 + mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed + lwz r3,SAVprev+4(r3) ; Skip over the first + subi r7,r7,LocalSaveTarget ; Figure out how much to trim + mr r6,r3 ; r6 <- first one to trim + mr r5,r7 ; Save the number we are trimming + +saveRet32a: + addic. r7,r7,-1 ; Any left to do? + ble- saveRet32b ; Nope... + lwz r3,SAVprev+4(r3) ; Skip to the next one + b saveRet32a ; Keep going... + +saveRet32b: ; r3 <- last one to trim + lwz r7,SAVprev+4(r3) ; Point to the first one not to trim + li r4,LocalSaveTarget ; Set the target count + stw r7,SAVprev+4(r2) ; Trim stuff leaving the one just released as first + stw r4,lclfreecnt(r10) ; Set the current count + + bl savelock ; Lock up the anchor + + lwz r8,SVfree+4(0) ; Get the old head of the free list + lwz r4,SVfreecnt(0) ; Get the number of free ones + lwz r7,SVinuse(0) ; Get the number that are in use + stw r6,SVfree+4(0) ; Point to the first trimmed savearea + add r4,r4,r5 ; Add number trimmed to free count + stw r8,SAVprev+4(r3) ; Chain the old head to the tail of the trimmed guys + sub r7,r7,r5 ; Remove the trims from the in use count + stw r4,SVfreecnt(0) ; Set new free count + stw r7,SVinuse(0) ; Set new in use count + mtlr r0 ; Restore the return to our caller + b saveunlock ; Set adjust count, unlock the saveanchor, and return -; For latency sake we may want to revisit this code. If we are trimming a -; large number of saveareas, we could be disabled and holding the savearea lock -; for quite a while. It may be that we want to break the trim down into parts. -; Possibly trimming the free list, then individually pushing them into the free pool. -; -; This function expects to be called with translation on and a valid stack. -; +/* + * ******************************* + * * s a v e _ t r i m _ f r e e * + * ******************************* + * + * struct savearea_comm *save_trim_free(void); + * + * Trim the free list down to the target count, ie by -(SVadjust) save areas. + * It trims the list and, if a pool page was fully allocated, puts that page on + * the start of the pool list. + * + * If the savearea being released is the last on a pool page (i.e., all entries + * are released), the page is dequeued from the pool and queued to any other + * found during this scan. Note that this queue is maintained virtually. + * + * When the scan is done, the saveanchor lock is released and the list of + * freed pool pages is returned to our caller. + * + * For latency sake we may want to revisit this code. If we are trimming a + * large number of saveareas, we could be disabled and holding the savearea lock + * for quite a while. It may be that we want to break the trim down into parts. + * Possibly trimming the free list, then individually pushing them into the free pool. + * + * This function expects to be called with translation on and a valid stack. + * It uses the standard ABI, ie we destroy r2 and r3-r11, and return the ptr in r3. + */ .align 5 .globl EXT(save_trim_free) LEXT(save_trim_free) subi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Make space for 4 registers on stack - mfsprg r9,2 ; Get the feature flags + mflr r9 ; save our return address stw r28,FM_SIZE+0(r1) ; Save R28 - mfmsr r12 ; Get the MSR - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - stw r29,FM_SIZE+4(r1) ; Save R28 - mtcrf 0x04,r9 ; Set the features - stw r30,FM_SIZE+8(r1) ; Save R28 - lis r10,hi16(EXT(saveanchor)) ; Get the high part of the anchor - stw r31,FM_SIZE+12(r1) ; Save R28 - andi. r3,r12,0x7FCF ; Turn off all translation and rupts - ori r10,r10,lo16(EXT(saveanchor)) ; Bottom half of the anchor - mflr r9 ; Save the return - - bt pfNoMSRirb,stNoMSR ; No MSR... - - mtmsr r3 ; Translation and all off - isync ; Toss prefetch - b stNoMSRx - - .align 5 - -stNoMSR: li r0,loadMSR ; Get the MSR setter SC - sc ; Set it - -stNoMSRx: bl savelock ; Go lock up the anchor + stw r29,FM_SIZE+4(r1) ; Save R29 + stw r30,FM_SIZE+8(r1) ; Save R30 + stw r31,FM_SIZE+12(r1) ; Save R31 + + bl saveSetup ; turn off translation and interrupts, load many regs + bl savelock ; Go lock up the anchor - lwz r8,SVadjust(r10) ; How many do we need to clear out? + lwz r8,SVadjust(0) ; How many do we need to clear out? li r3,0 ; Get a 0 neg. r8,r8 ; Get the actual we need to toss (adjust is neg if too many) - lwz r7,SVfree(r10) ; Get the first on the free list - bgt+ stneedtrim ; Yeah, we still need it... - - mtlr r9 ; Restore return - stw r3,SVlock(r10) ; Quick unlock (no need for sync or to set adjust, nothing changed) + ble- save_trim_free1 ; skip if no trimming needed anymore + bf-- pf64Bitb,saveTrim32 ; handle 32-bit processors + b saveTrim64 ; handle 64-bit processors - mtmsr r12 ; Restore translation and exceptions - isync ; Make sure about it +save_trim_free1: ; by the time we were called, no need to trim anymore + stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed) + mtlr r9 ; Restore return #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) @@ -612,34 +834,42 @@ stNoMSRx: bl savelock ; Go lock up the anchor oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif - addi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Pop stack - have not trashed register so no need to reload - blr ; Leave... + addi r1,r1,(FM_ALIGN(16)+FM_SIZE); Pop stack - have not trashed register so no need to reload + b saveRestore ; restore translation and EE, turn SF off, return to our caller - .align 5 - -stneedtrim: mr r6,r7 ; Save the first one + +/* + * *********************** + * * s a v e T r i m 3 2 * + * *********************** + * + * Handle "save_trim_free" on 32-bit processors. At this point, translation and interrupts + * are off, the savearea anchor is locked, and: + * r8 = #pages to trim (>0) + * r9 = return address + * r10 = per-proc ptr + * r11 = MSR at entry + */ + +saveTrim32: + lwz r7,SVfree+4(0) ; Get the first on the free list + mr r6,r7 ; Save the first one mr r5,r8 ; Save the number we are trimming - nop - nop - sttrimming: addic. r5,r5,-1 ; Any left to do? ble- sttrimmed ; Nope... - lwz r7,SAVprev(r7) ; Skip to the next one + lwz r7,SAVprev+4(r7) ; Skip to the next one b sttrimming ; Keep going... - - .align 5 -sttrimmed: lwz r5,SAVprev(r7) ; Get the next one (for new head of free list) - lwz r4,SVfreecnt(r10) ; Get the free count - stw r5,SVfree(r10) ; Set new head +sttrimmed: lwz r5,SAVprev+4(r7) ; Get the next one (for new head of free list) + lwz r4,SVfreecnt(0) ; Get the free count + stw r5,SVfree+4(0) ; Set new head sub r4,r4,r8 ; Calculate the new free count li r31,0 ; Show we have no free pool blocks yet - cmplwi cr1,r5,0 ; Make sure this is not equal - stw r4,SVfreecnt(r10) ; Set new free count + crclr cr1_eq ; dont exit loop before 1st iteration + stw r4,SVfreecnt(0) ; Set new free count lis r30,hi16(sac_empty) ; Get what empty looks like -; ; NOTE: The savearea size must be 640 (0x280). We are doing a divide by shifts and stuff ; here. ; @@ -647,6 +877,17 @@ sttrimmed: lwz r5,SAVprev(r7) ; Get the next one (for new head of free list) #error Savearea size is not 640!!!!!!!!!!!! #endif + ; Loop over each savearea we are trimming. + ; r6 = next savearea to trim + ; r7 = last savearea to trim + ; r8 = #pages to trim (>0) + ; r9 = return address + ; r10 = per-proc ptr + ; r11 = MSR at entry + ; r30 = what SACalloc looks like when all saveareas are free + ; r31 = free pool block list + ; cr1 = beq set if we just trimmed the last, ie if we are done + sttoss: beq+ cr1,stdone ; All done now... cmplw cr1,r6,r7 ; Have we finished the loop? @@ -664,7 +905,7 @@ sttoss: beq+ cr1,stdone ; All done now... srw r4,r4,r0 ; Get the allocation mask or r5,r5,r4 ; Free this entry cmplw r5,r4 ; Is this the only free entry? - lwz r6,SAVprev(r6) ; Chain to the next trimmed savearea + lwz r6,SAVprev+4(r6) ; Chain to the next trimmed savearea cmplw cr7,r30,r5 ; Does this look empty? stw r5,SACalloc(r2) ; Save back the allocation bits beq- stputpool ; First free entry, go put it into the pool... @@ -676,19 +917,19 @@ sttoss: beq+ cr1,stdone ; All done now... lwz r29,SACflags(r2) ; Get the flags cmplwi cr5,r31,0 ; Is this guy on the release list? - lwz r28,SACnext(r2) ; Get the forward chain + lwz r28,SACnext+4(r2) ; Get the forward chain rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below) bne- sttoss ; This is permanent entry, do not try to release... - lwz r29,SACprev(r2) ; and the previous + lwz r29,SACprev+4(r2) ; and the previous beq- cr5,stnot1st ; Not first - lwz r0,SACvrswap(r31) ; Load the previous pool page vr conversion + lwz r0,SACvrswap+4(r31) ; Load the previous pool page vr conversion -stnot1st: stw r28,SACnext(r29) ; Previous guy points to my next +stnot1st: stw r28,SACnext+4(r29) ; Previous guy points to my next xor r0,r0,r31 ; Make the last guy virtual - stw r29,SACprev(r28) ; Next guy points back to my previous - stw r0,SAVprev(r2) ; Store the old top virtual as my back chain + stw r29,SACprev+4(r28) ; Next guy points back to my previous + stw r0,SAVprev+4(r2) ; Store the old top virtual as my back chain mr r31,r2 ; My physical is now the head of the chain b sttoss ; Get the next one... @@ -696,31 +937,136 @@ stnot1st: stw r28,SACnext(r29) ; Previous guy points to my next ; A pool block that had no free entries now has one. Stick it on the pool list. ; - .align 5 - -stputpool: lwz r28,SVpoolfwd(r10) ; Get the first guy on the list - stw r2,SVpoolfwd(r10) ; Put us on the top of the list - stw r28,SACnext(r2) ; We point to the old top - stw r2,SACprev(r28) ; Old top guy points back to us - stw r10,SACprev(r2) ; Our back points to the anchor +stputpool: lwz r28,SVpoolfwd+4(0) ; Get the first guy on the list + li r0,saveanchor ; Point to the saveanchor + stw r2,SVpoolfwd+4(0) ; Put us on the top of the list + stw r28,SACnext+4(r2) ; We point to the old top + stw r2,SACprev+4(r28) ; Old top guy points back to us + stw r0,SACprev+4(r2) ; Our back points to the anchor b sttoss ; Go on to the next one... + + +/* + * *********************** + * * s a v e T r i m 6 4 * + * *********************** + * + * Handle "save_trim_free" on 64-bit processors. At this point, translation and interrupts + * are off, SF is on, the savearea anchor is locked, and: + * r8 = #pages to trim (>0) + * r9 = return address + * r10 = per-proc ptr + * r11 = MSR at entry + */ + +saveTrim64: + ld r7,SVfree(0) ; Get the first on the free list + mr r6,r7 ; Save the first one + mr r5,r8 ; Save the number we are trimming -; -; We are all done. Relocate pool release head, restore all, and go. -; +sttrimming64: + addic. r5,r5,-1 ; Any left to do? + ble-- sttrimmed64 ; Nope... + ld r7,SAVprev(r7) ; Skip to the next one + b sttrimming64 ; Keep going... - .align 5 +sttrimmed64: + ld r5,SAVprev(r7) ; Get the next one (for new head of free list) + lwz r4,SVfreecnt(0) ; Get the free count + std r5,SVfree(0) ; Set new head + sub r4,r4,r8 ; Calculate the new free count + li r31,0 ; Show we have no free pool blocks yet + crclr cr1_eq ; dont exit loop before 1st iteration + stw r4,SVfreecnt(0) ; Set new free count + lis r30,hi16(sac_empty) ; Get what empty looks like + + + ; Loop over each savearea we are trimming. + ; r6 = next savearea to trim + ; r7 = last savearea to trim + ; r8 = #pages to trim (>0) + ; r9 = return address + ; r10 = per-proc ptr + ; r11 = MSR at entry + ; r30 = what SACalloc looks like when all saveareas are free + ; r31 = free pool block list + ; cr1 = beq set if we just trimmed the last, ie if we are done + ; + ; WARNING: as in the 32-bit path, this code is doing a divide by 640 (SAVsize). + +sttoss64: + beq++ cr1,stdone ; All done now... + + cmpld cr1,r6,r7 ; Have we finished the loop? + + lis r0,0x0044 ; Get top of table + rldicr r2,r6,0,51 ; r2 <- phys addr of savearea block (with control area) + ori r0,r0,0x2200 ; Finish shift table + rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble) + lwz r5,SACalloc(r2) ; Get the allocation bits + addi r4,r4,1 ; Shift 1 extra + rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1 + rlwnm r0,r0,r4,29,31 ; Get partial index + lis r4,lo16(0x8000) ; Get the bit mask + add r0,r0,r3 ; Make the real index + srw r4,r4,r0 ; Get the allocation mask + or r5,r5,r4 ; Free this entry + cmplw r5,r4 ; Is this the only free entry? + ld r6,SAVprev(r6) ; Chain to the next trimmed savearea + cmplw cr7,r30,r5 ; Does this look empty? + stw r5,SACalloc(r2) ; Save back the allocation bits + beq-- stputpool64 ; First free entry, go put it into the pool... + bne++ cr7,sttoss64 ; Not an empty block + +; We have an empty block. Remove it from the pool list. + + lwz r29,SACflags(r2) ; Get the flags + cmpldi cr5,r31,0 ; Is this guy on the release list? + ld r28,SACnext(r2) ; Get the forward chain + + rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below) + bne-- sttoss64 ; This is permanent entry, do not try to release... + + ld r29,SACprev(r2) ; and the previous + beq-- cr5,stnot1st64 ; Not first + ld r0,SACvrswap(r31) ; Load the previous pool page vr conversion + +stnot1st64: + std r28,SACnext(r29) ; Previous guy points to my next + xor r0,r0,r31 ; Make the last guy virtual + std r29,SACprev(r28) ; Next guy points back to my previous + std r0,SAVprev(r2) ; Store the old top virtual as my back chain + mr r31,r2 ; My physical is now the head of the chain + b sttoss64 ; Get the next one... + +; A pool block that had no free entries now has one. Stick it on the pool list. + +stputpool64: + ld r28,SVpoolfwd(0) ; Get the first guy on the list + li r0,saveanchor ; Point to the saveanchor + std r2,SVpoolfwd(0) ; Put us on the top of the list + std r28,SACnext(r2) ; We point to the old top + std r2,SACprev(r28) ; Old top guy points back to us + std r0,SACprev(r2) ; Our back points to the anchor + b sttoss64 ; Go on to the next one... + +; We are all done. Relocate pool release head, restore all, and go. This code +; is used both by the 32 and 64-bit paths. +; r9 = return address +; r10 = per-proc ptr +; r11 = MSR at entry +; r31 = free pool block list + stdone: bl saveunlock ; Unlock the saveanchor and set adjust field mr. r3,r31 ; Move release chain and see if there are any li r5,0 ; Assume either V=R or no release chain beq- stnorel ; Nothing to release... - lwz r5,SACvrswap(r31) ; Get the vr conversion - -stnorel: mtmsr r12 ; Restore translation and exceptions - isync ; Make sure about it + lwz r5,SACvrswap+4(r31) ; Get the vr conversion (only need low half if 64-bit) +stnorel: + bl saveRestore ; restore translation and exceptions, turn off SF mtlr r9 ; Restore the return lwz r28,FM_SIZE+0(r1) ; Restore R28 @@ -729,6 +1075,7 @@ stnorel: mtmsr r12 ; Restore translation and exceptions lwz r31,FM_SIZE+12(r1) ; Restore R31 addi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Pop the stack xor r3,r3,r5 ; Convert release chain address to virtual + rlwinm r3,r3,0,0,31 ; if 64-bit, clear upper half of virtual address #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) @@ -737,70 +1084,69 @@ stnorel: mtmsr r12 ; Restore translation and exceptions sc ; (TEST/DEBUG) #endif blr ; Return... - -; -; save_recover - here we scan the free pool and see if we can get -; enough free saveareas to hit target. -; -; If we empty a pool block, remove it from the pool list -; -; + + +/* + * *************************** + * * s a v e _ r e c o v e r * + * *************************** + * + * int save_recover(void); + * + * Returns nonzero if we can get enough saveareas to hit the target. We scan the free + * pool. If we empty a pool block, we remove it from the pool list. + */ .align 5 .globl EXT(save_recover) LEXT(save_recover) - mfsprg r9,2 ; Get the feature flags - mfmsr r12 ; Get the MSR - rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - mtcrf 0x04,r9 ; Set the features - lis r10,hi16(EXT(saveanchor)) ; Get the high part of the anchor - andi. r3,r12,0x7FCF ; Turn off all translation and rupts - ori r10,r10,lo16(EXT(saveanchor)) ; Bottom half of the anchor - mflr r9 ; Save the return - - bt pfNoMSRirb,srcNoMSR ; No MSR... - - mtmsr r3 ; Translation and all off - isync ; Toss prefetch - b srcNoMSRx - - .align 5 - -srcNoMSR: li r0,loadMSR ; Get the MSR setter SC - sc ; Set it - -srcNoMSRx: bl savelock ; Go lock up the anchor + mflr r9 ; save return address + bl saveSetup ; turn translation and interrupts off, SF on, load many regs + bl savelock ; lock the savearea anchor - lwz r8,SVadjust(r10) ; How many do we need to clear get? + lwz r8,SVadjust(0) ; How many do we need to clear get? li r3,0 ; Get a 0 mr. r8,r8 ; Do we need any? - bgt+ srcneedmore ; Yeah, we still need it... - + ble-- save_recover1 ; not any more + bf-- pf64Bitb,saveRecover32 ; handle 32-bit processor + b saveRecover64 ; handle 64-bit processor + +save_recover1: ; by the time we locked the anchor, no longer short mtlr r9 ; Restore return - stw r3,SVlock(r10) ; Quick unlock (no need for sync or to set adjust, nothing changed) - - mtmsr r12 ; Restore translation and exceptions - isync ; Make sure about it - + stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed) #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) li r2,0x2208 ; (TEST/DEBUG) oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif - blr ; Leave... + b saveRestore ; turn translation etc back on, return to our caller - .align 5 - -srcneedmore: - mr r6,r10 ; Start at pool anchor - cmplwi cr1,r10,0 ; Make sure we start as not equal - lwz r7,SVfreecnt(r10) ; Get the current free count - -srcnpool: lwz r6,SACnext(r6) ; Point to the next one - cmplw r6,r10 ; Have we wrapped? + +/* + * ***************************** + * * s a v e R e c o v e r 3 2 * + * ***************************** + * + * Handle "save_recover" on 32-bit processors. At this point, translation and interrupts + * are off, the savearea anchor is locked, and: + * r8 = #pages to recover + * r9 = return address + * r10 = per-proc ptr + * r11 = MSR at entry + */ + +saveRecover32: + li r6,saveanchor ; Start at pool anchor + crclr cr1_eq ; initialize the loop test + lwz r7,SVfreecnt(0) ; Get the current free count + + +; Loop over next block in free pool. r6 is the ptr to the last block we looked at. + +srcnpool: lwz r6,SACnext+4(r6) ; Point to the next one + cmplwi r6,saveanchor ; Have we wrapped? beq- srcdone ; Yes, did not have enough... lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block @@ -813,6 +1159,16 @@ srcnpool: lwz r6,SACnext(r6) ; Point to the next one #error Savearea size is not 640!!!!!!!!!!!! #endif +; Loop over free savearea in current block. +; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc) +; r6 = ptr to current free pool block +; r7 = free count +; r8 = #pages more we still need to recover +; r9 = return address +; r10 = per-proc ptr +; r11 = MSR at entry +; cr1 = beq if (r8==0) + srcnext: beq- cr1,srcdone ; We have no more to get... lis r3,0x8000 ; Get the top bit on @@ -828,130 +1184,273 @@ srcnext: beq- cr1,srcdone ; We have no more to get... stw r5,SACalloc(r6) ; Set new allocation bits add r2,r2,r6 ; Get the actual address of the savearea - lwz r3,SVfree(r10) ; Get the head of the chain + lwz r3,SVfree+4(0) ; Get the head of the chain cmplwi cr1,r8,0 ; Do we actually need any more? - stw r2,SVfree(r10) ; Push ourselves in the front - stw r3,SAVprev(r2) ; Chain the rest of the list behind + stw r2,SVfree+4(0) ; Push ourselves in the front + stw r3,SAVprev+4(r2) ; Chain the rest of the list behind bne+ srcnext ; The pool block is not empty yet, try for another... - lwz r2,SACnext(r6) ; Get the next pointer - lwz r3,SACprev(r6) ; Get the previous pointer - stw r3,SACprev(r2) ; The previous of my next points to my previous - stw r2,SACnext(r3) ; The next of my previous points to my next + lwz r2,SACnext+4(r6) ; Get the next pointer + lwz r3,SACprev+4(r6) ; Get the previous pointer + stw r3,SACprev+4(r2) ; The previous of my next points to my previous + stw r2,SACnext+4(r3) ; The next of my previous points to my next bne+ cr1,srcnpool ; We still have more to do... - -srcdone: stw r7,SVfreecnt(r10) ; Set the new free count + + +; Join here from 64-bit path when we have recovered all the saveareas we need to. + +srcdone: stw r7,SVfreecnt(0) ; Set the new free count bl saveunlock ; Unlock the save and set adjust field mtlr r9 ; Restore the return - mtmsr r12 ; Restore translation and exceptions - isync ; Make sure about it - #if FPVECDBG lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) li r2,0x2209 ; (TEST/DEBUG) oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif - blr ; Leave... + b saveRestore ; turn xlate and EE back on, SF off, and return to our caller + + +/* + * ***************************** + * * s a v e R e c o v e r 6 4 * + * ***************************** + * + * Handle "save_recover" on 64-bit processors. At this point, translation and interrupts + * are off, the savearea anchor is locked, and: + * r8 = #pages to recover + * r9 = return address + * r10 = per-proc ptr + * r11 = MSR at entry + */ + +saveRecover64: + li r6,saveanchor ; Start at pool anchor + crclr cr1_eq ; initialize the loop test + lwz r7,SVfreecnt(0) ; Get the current free count + + +; Loop over next block in free pool. r6 is the ptr to the last block we looked at. + +srcnpool64: + ld r6,SACnext(r6) ; Point to the next one + cmpldi r6,saveanchor ; Have we wrapped? + beq-- srcdone ; Yes, did not have enough... + lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block + + +; Loop over free savearea in current block. +; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc) +; r6 = ptr to current free pool block +; r7 = free count +; r8 = #pages more we still need to recover +; r9 = return address +; r10 = per-proc ptr +; r11 = MSR at entry +; cr1 = beq if (r8==0) ; -; Here is where we lock the saveanchor lock -; We assume R10 points to the saveanchor -; We trash R7 and R3 -; +; WARNING: as in the 32-bit path, we depend on (SAVsize==640) - .align 5 - -savelock: lwarx r7,0,r10 ; Grab the lock value - li r3,1 ; Use part of the delay time - mr. r7,r7 ; Is it locked? */ - bne- sllcks ; Yeah, wait for it to clear... - stwcx. r3,0,r10 ; Try to seize that there durn lock - beq+ sllckd ; Got it... - b savelock ; Collision, try again... +srcnext64: + beq-- cr1,srcdone ; We have no more to get... - .align 5 + lis r3,0x8000 ; Get the top bit on + cntlzw r4,r5 ; Find a free slot + addi r7,r7,1 ; Bump up the free count + srw r3,r3,r4 ; Make a mask + slwi r0,r4,7 ; First multiply by 128 + subi r8,r8,1 ; Decrement the need count + slwi r2,r4,9 ; Then multiply by 512 + andc. r5,r5,r3 ; Clear out the "free" bit + add r2,r2,r0 ; Sum to multiply by 640 -sllcks: lwz r7,SVlock(r10) ; Get that lock in here - mr. r7,r7 ; Is it free yet? - beq+ savelock ; Yeah, try for it again... - b sllcks ; Sniff away... + stw r5,SACalloc(r6) ; Set new allocation bits + + add r2,r2,r6 ; Get the actual address of the savearea + ld r3,SVfree(0) ; Get the head of the chain + cmplwi cr1,r8,0 ; Do we actually need any more? + std r2,SVfree(0) ; Push ourselves in the front + std r3,SAVprev(r2) ; Chain the rest of the list behind - nop ; Force isync to last in ifetch buffer - nop - nop + bne++ srcnext64 ; The pool block is not empty yet, try for another... -sllckd: isync ; Make sure translation is off - blr ; Return.... - + ld r2,SACnext(r6) ; Get the next pointer + ld r3,SACprev(r6) ; Get the previous pointer + std r3,SACprev(r2) ; The previous of my next points to my previous + std r2,SACnext(r3) ; The next of my previous points to my next + bne++ cr1,srcnpool64 ; We still have more to do... + + b srcdone -; -; This is the common routine that sets the saveadjust field and unlocks the savearea -; anchor. -; -; Note that we can not use R9 here because we use it to save the LR across the call. -; Also, R10 is assumed to point to the saveanchor. R3 is also reserved. -; +/* + * ******************* + * * s a v e l o c k * + * ******************* + * + * Lock the savearea anchor, so we can manipulate the free list. + * msr = interrupts and translation off + * We destroy: + * r8, r3, r12 + */ .align 5 +savelock: lwz r8,SVlock(0) ; See if lock is held + cmpwi r8,0 + li r12,saveanchor ; Point to the saveanchor + bne-- savelock ; loop until lock released... + +savelock0: lwarx r8,0,r12 ; Grab the lock value + cmpwi r8,0 ; taken? + li r8,1 ; get nonzero to lock it with + bne-- savelock1 ; already locked, wait for it to clear... + stwcx. r8,0,r12 ; Try to seize that there durn lock + isync ; assume we got it + beqlr++ ; reservation not lost, so we have the lock + b savelock0 ; Try again... + +savelock1: li r8,lgKillResv ; Point to killing field + stwcx. r8,0,r8 ; Kill reservation + b savelock ; Start over.... + + +/* + * *********************** + * * s a v e u n l o c k * + * *********************** + * + * + * This is the common routine that sets the saveadjust field and unlocks the savearea + * anchor. + * msr = interrupts and translation off + * We destroy: + * r2, r5, r6, r8. + */ + .align 5 saveunlock: - lwz r6,SVfreecnt(r10) ; and the number on the free list - lwz r5,SVinuse(r10) ; Pick up the in use count - cmplwi r6,FreeListMin ; Do we have at least the minimum? - blt- sutooshort ; Do not have minumum.... - lwz r7,SVtarget(r10) ; Get the target + lwz r6,SVfreecnt(0) ; and the number on the free list + lwz r5,SVinuse(0) ; Pick up the in use count + subic. r8,r6,FreeListMin ; do we have at least the minimum? + lwz r2,SVtarget(0) ; Get the target + neg r8,r8 ; assuming we are short, get r8 <- shortfall + blt-- saveunlock1 ; skip if fewer than minimum on free list add r6,r6,r5 ; Get the total number of saveareas - addi r5,r7,-SaveLowHysteresis ; Find bottom + addi r5,r2,-SaveLowHysteresis ; Find low end of acceptible range sub r5,r6,r5 ; Make everything below hysteresis negative - sub r7,r7,r6 ; Get the distance from the target - rlwinm r5,r5,0,0,31 ; Clear negative bit + sub r2,r2,r6 ; Get the distance from the target addi r5,r5,-(SaveLowHysteresis + SaveHighHysteresis + 1) ; Subtract full hysteresis range srawi r5,r5,31 ; Get 0xFFFFFFFF if outside range or 0 if inside - and r7,r7,r5 ; Get 0 if in range or distance to target if not + and r8,r2,r5 ; r8 <- 0 if in range or distance to target if not - li r8,0 ; Set a clear value - stw r7,SVadjust(r10) ; Set the adjustment value - - sync ; Make sure everything is done - stw r8,SVlock(r10) ; Unlock the savearea chain +saveunlock1: + li r5,0 ; Set a clear value + stw r8,SVadjust(0) ; Set the adjustment value + eieio ; Make sure everything is done + stw r5,SVlock(0) ; Unlock the savearea chain blr - - .align 5 - -sutooshort: subfic r6,r6,FreeListMin ; Get the number needed to hit minimum - li r8,0 ; Set a clear value - stw r6,SVadjust(r10) ; Set the adjustment value - - sync ; Make sure everything is done - stw r8,SVlock(r10) ; Unlock the savearea chain - blr - - /* - * struct savearea *save_cpv(struct savearea *); Converts a physical savearea address to virtual + * ******************* + * * s a v e _ c p v * + * ******************* + * + * struct savearea *save_cpv(addr64_t saveAreaPhysAddr); + * + * Converts a physical savearea address to virtual. Called with translation on + * and in 32-bit mode. Note that the argument is passed as a long long in (r3,r4). */ .align 5 .globl EXT(save_cpv) LEXT(save_cpv) + mflr r9 ; save return address + mr r8,r3 ; save upper half of phys address here + bl saveSetup ; turn off translation and interrupts, turn SF on + rlwinm r5,r4,0,0,19 ; Round back to the start of the physical savearea block + bf-- pf64Bitb,save_cpv1 ; skip if 32-bit processor + rldimi r5,r8,32,0 ; r5 <- 64-bit phys address of block +save_cpv1: + lwz r6,SACvrswap+4(r5) ; Get the conversion to virtual (only need low half if 64-bit) + mtlr r9 ; restore return address + xor r3,r4,r6 ; convert phys to virtual + rlwinm r3,r3,0,0,31 ; if 64-bit, zero upper half of virtual address + b saveRestore ; turn translation etc back on, SF off, and return r3 + - mfmsr r10 ; Get the current MSR - rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off - rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off - rlwinm r4,r3,0,0,19 ; Round back to the start of the physical savearea block - andi. r9,r10,0x7FEF ; Turn off interrupts and data translation - mtmsr r9 ; Disable DR and EE - isync - - lwz r4,SACvrswap(r4) ; Get the conversion to virtual - mtmsr r10 ; Interrupts and DR back on - isync - xor r3,r3,r4 ; Convert to physical - blr +/* + * ********************* + * * s a v e S e t u p * + * ********************* + * + * This routine is called at the start of all the save-area subroutines. + * It turns off translation, disabled interrupts, turns on 64-bit mode, + * and sets up cr6 with the feature flags (especially pf64Bit). + * + * Note that most save-area routines cannot take _any_ interrupt (such as a + * PTE miss) once the savearea anchor is locked, since that would result in + * instant deadlock as we need a save-area to process any exception. + * We set up: + * r10 = per-proc ptr + * r11 = old MSR + * cr5 = pfNoMSRir feature flag + * cr6 = pf64Bit feature flag + * + * We use r0, r3, r10, and r11. + */ + +saveSetup: + mfmsr r11 ; get msr + mfsprg r3,2 ; get feature flags + li r0,0 + mtcrf 0x2,r3 ; copy pf64Bit to cr6 + ori r0,r0,lo16(MASK(MSR_IR)+MASK(MSR_DR)+MASK(MSR_EE)) + mtcrf 0x4,r3 ; copy pfNoMSRir to cr5 + andc r3,r11,r0 ; turn off IR, DR, and EE + li r0,1 ; get a 1 in case its a 64-bit machine + bf-- pf64Bitb,saveSetup1 ; skip if not a 64-bit machine + rldimi r3,r0,63,MSR_SF_BIT ; turn SF (bit 0) on + mtmsrd r3 ; turn translation and interrupts off, 64-bit mode on + isync ; wait for it to happen + mfsprg r10,0 ; get per-proc ptr + blr +saveSetup1: ; here on 32-bit machines + bt- pfNoMSRirb,saveSetup2 ; skip if cannot turn off IR with a mtmsr + mtmsr r3 ; turn translation and interrupts off + isync ; wait for it to happen + mfsprg r10,0 ; get per-proc ptr + blr +saveSetup2: ; here if pfNoMSRir set for this machine + li r0,loadMSR ; we will "mtmsr r3" via system call + sc + mfsprg r10,0 ; get per-proc ptr + blr + + +/* + * ************************* + * * s a v e R e s t o r e * + * ************************* + * + * Undoes the effect of calling "saveSetup", ie it turns relocation and interrupts back on, + * and turns 64-bit mode back off. + * r11 = old MSR + * cr6 = pf64Bit feature flag + */ + +saveRestore: + bt++ pf64Bitb,saveRestore64 ; handle a 64-bit processor +saveRestore32: + mtmsr r11 ; restore MSR + isync ; wait for translation to start up + blr +saveRestore64: ; 64-bit processor + mtmsrd r11 ; restore MSR + isync ; wait for changes to happen + blr + diff --git a/osfmk/ppc/POWERMAC/scc_8530.h b/osfmk/ppc/scc_8530.h similarity index 100% rename from osfmk/ppc/POWERMAC/scc_8530.h rename to osfmk/ppc/scc_8530.h diff --git a/osfmk/ppc/sched_param.h b/osfmk/ppc/sched_param.h index 9a2fd0bf2..e5aa3902e 100644 --- a/osfmk/ppc/sched_param.h +++ b/osfmk/ppc/sched_param.h @@ -64,4 +64,4 @@ #include #include -#endif _PPC_SCHED_PARAM_H_ +#endif /* _PPC_SCHED_PARAM_H_ */ diff --git a/osfmk/ppc/POWERMAC/serial_io.c b/osfmk/ppc/serial_io.c similarity index 90% rename from osfmk/ppc/POWERMAC/serial_io.c rename to osfmk/ppc/serial_io.c index b132ee39b..5b637ad5b 100644 --- a/osfmk/ppc/POWERMAC/serial_io.c +++ b/osfmk/ppc/serial_io.c @@ -76,9 +76,8 @@ #include #include #include -#include -#include -#include +#include +#include #if MACH_KDB #include @@ -104,6 +103,8 @@ extern unsigned int disableSerialOuput; int serial_initted = 0; unsigned int scc_parm_done = 0; /* (TEST/DEBUG) */ +extern unsigned int serialmode; + static struct scc_byte { unsigned char reg; unsigned char val; @@ -148,6 +149,7 @@ enum scc_error {SCC_ERR_NONE, SCC_ERR_PARITY, SCC_ERR_BREAK, SCC_ERR_OVERRUN}; #define convert_baud_rate(rate) ((((SERIAL_CLOCK_FREQUENCY) + (rate)) / (2 * (rate))) - 2) #define DEFAULT_SPEED 57600 +#define DEFAULT_PORT0_SPEED 1200 #define DEFAULT_FLAGS (TF_LITOUT|TF_ECHO) int scc_param(struct scc_tty *tp); @@ -267,8 +269,6 @@ scc_probe(void) * new memory mappings. */ -// scc_std[0] = POWERMAC_IO(scc_std[0]); - regs = (scc_regmap_t)scc_std[0]; if (regs == (scc_regmap_t) 0) { @@ -289,8 +289,13 @@ scc_probe(void) open but are needed if the port will be used independently of the Mach interfaces, e.g., for gdb or for a serial console. */ - tp->t_ispeed = DEFAULT_SPEED; - tp->t_ospeed = DEFAULT_SPEED; + if (i == 0) { + tp->t_ispeed = DEFAULT_PORT0_SPEED; + tp->t_ospeed = DEFAULT_PORT0_SPEED; + } else { + tp->t_ispeed = DEFAULT_SPEED; + tp->t_ospeed = DEFAULT_SPEED; + } tp->t_flags = DEFAULT_FLAGS; scc->softr[i].speed = -1; @@ -638,4 +643,52 @@ scc_param(struct scc_tty *tp) } +/* + * This routine will start a thread that polls the serial port, listening for + * characters that have been typed. + */ + +void +serial_keyboard_init(void) +{ + + if(!(serialmode & 2)) return; /* Leave if we do not want a serial console */ + + kprintf("Serial keyboard started\n"); + kernel_thread_with_priority(serial_keyboard_start, MAXPRI_STANDARD); + return; +} + +void +serial_keyboard_start(void) +{ + thread_t cthread; + + cthread = current_thread(); /* Just who the heck are we anyway? */ + stack_privilege(cthread); /* Make sure we don't lose our stack */ + serial_keyboard_poll(); /* Go see if there are any characters pending now */ + panic("serial_keyboard_start: we can't get back here\n"); +} + +void +serial_keyboard_poll(void) +{ + int chr; + uint64_t next; + extern void cons_cinput(char ch); /* The BSD routine that gets characters */ + + while(1) { /* Do this for a while */ + chr = scc_getc(0, 1, 0, 1); /* Get a character if there is one */ + if(chr < 0) break; /* The serial buffer is empty */ + cons_cinput((char)chr); /* Buffer up the character */ + } + + clock_interval_to_deadline(16, 1000000, &next); /* Get time of pop */ + + assert_wait((event_t)serial_keyboard_poll, THREAD_INTERRUPTIBLE); /* Show we are "waiting" */ + thread_set_timer_deadline(next); /* Set the next time to check */ + thread_block(serial_keyboard_poll); /* Wait for it */ + panic("serial_keyboard_poll: Shouldn't never ever get here...\n"); +} + #endif /* NSCC > 0 */ diff --git a/osfmk/ppc/POWERMAC/serial_io.h b/osfmk/ppc/serial_io.h similarity index 97% rename from osfmk/ppc/POWERMAC/serial_io.h rename to osfmk/ppc/serial_io.h index 05fc86eae..cd7075bf7 100644 --- a/osfmk/ppc/POWERMAC/serial_io.h +++ b/osfmk/ppc/serial_io.h @@ -127,6 +127,10 @@ extern int switch_to_video_console( extern void switch_to_old_console( int old_console); +void serial_keyboard_init(void); +void serial_keyboard_start(void); +void serial_keyboard_poll(void); + /* * JMM - We are not really going to support this driver in SMP (barely diff --git a/osfmk/ppc/skiplists.s b/osfmk/ppc/skiplists.s new file mode 100644 index 000000000..4bb4a7808 --- /dev/null +++ b/osfmk/ppc/skiplists.s @@ -0,0 +1,1304 @@ +/* + * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* skiplists.s + * + * These are the subroutines that manage the skip-list data structures used for the + * resident mappings for each pmap. We used to use a much simpler hash-based scheme, + * but it didn't scale well for 64-bit address spaces and multi-GB real memories. + * Here's a brief tutorial on skip-lists: + * + * The basic idea is that each mapping is on one or more singly-linked lists, sorted + * in increasing order by virtual address. The number of lists a mapping is on is an + * invariant property determined when the mapping is created, using an exponentially- + * distributed random number. Every mapping is on the first list. Ideally, each + * successive list has only 1/F as many nodes on it as the previous, where F is the + * "fanout." With a max of n lists, up to F**n nodes can be handled optimally. + * + * Searching, adding, and deleting from a skip-list can all be done in O(ln(n)) time. + * Because the first skip-list is just a sorted list of all mappings, it is also + * efficient to purge a sparsely populated pmap of all the mappings in a large range, + * for example when tearing down an address space. Large-range deletes are the + * primary advantage of skip-lists over a hash, btw. + * + * We currently use a fanout of 4 and a maximum of 12 lists (cf kSkipListFanoutShift + * and kSkipListMaxLists.) Thus, we can optimally handle pmaps with as many as 4**12 + * pages, which is 64GB of resident physical memory per pmap. Pmaps can be larger than + * this, albeit with diminishing efficiency. + * + * The major problem with skip-lists is that we could waste a lot of space with 12 + * 64-bit link fields in every mapping. So we currently have two sizes of mappings: + * 64-byte nodes with 4 list links, and 128-byte nodes with 12. Only one in every + * (4**4)==256 mappings requires the larger node, so the average size is 64.25 bytes. + * In practice, the additional complexity of the variable node size is entirely + * contained in the allocate and free routines. + * + * The other, mostly theoretic problem with skip-lists is that they have worst cases + * where performance becomes nearly linear. These worst-cases are quite rare but there + * is no practical way to prevent them. + */ + + +; set nonzero to accumulate skip-list stats on a per-map basis: +#define SKIPLISTSTATS 1 + +; cr7 bit set when mapSearchFull() finds a match on a high list: +#define bFullFound 28 + +#include +#include +#include +#include +#include + + +/* + * ********************* + * * m a p S e a r c h * + * ********************* + * + * Given a pmap and a virtual address (VA), find the mapping for that address. + * This is the fast call, that does not set up the previous-ptr vector or make + * consistency checks. When called: + * the pmap is locked (shared or exclusive) + * translation is off, interrupts masked + * 64-bit mode is enabled (if on a 64-bit machine) + * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit) + * r3 = pmap ptr + * r4 = high 32 bits of key to search for (0 if a 32-bit processor) + * r5 = low 32 bits of key (low 12 bits may be nonzero garbage) + * r7 = mpFlags field if found. Undefined if not + * + * We return the mapping ptr (or 0) in r3, and the next VA (or 0 if no more) in r4 and r5. + * Except for cr6 (which is global), we trash nonvolatile regs. Called both on 32- and 64-bit + * machines, though we quickly branch into parallel code paths. + */ + .text + .align 5 + .globl EXT(mapSearch) +LEXT(mapSearch) + lbz r7,pmapCurLists(r3) ; get largest #lists any mapping is on + la r8,pmapSkipLists+4(r3) ; point to lists in pmap, assuming 32-bit machine + rlwinm r5,r5,0,0,19 ; zero low 12 bits of key + mr r6,r3 ; save pmap ptr here so we can accumulate statistics + li r9,0 ; initialize prev ptr + addic. r7,r7,-1 ; get base-0 number of last list, and test for 0 + li r2,0 ; initialize count of mappings visited + slwi r7,r7,3 ; get offset of last list in use + blt-- mapSrchPmapEmpty ; pmapCurLists==0 (ie, no mappings) + lwzx r3,r8,r7 ; get 32-bit ptr to 1st mapping in highest list + bf-- pf64Bitb,mapSrch32c ; skip if 32-bit processor + subi r8,r8,4 ; we use all 64 bits of ptrs + rldimi r5,r4,32,0 ; r5 <- 64-bit va + ldx r3,r8,r7 ; get 64-bit ptr to 1st mapping in highest list + b mapSrch64c ; enter 64-bit search loop + + + ; 64-bit processors. Check next mapping. + ; r2 = count of mappings visited so far + ; r3 = current mapping ptr + ; r4 = va of current mapping (ie, of r3) + ; r5 = va to search for (the "key") (low 12 bits are 0) + ; r6 = pmap ptr + ; r7 = current skip list number * 8 + ; r8 = ptr to skip list vector of mapping pointed to by r9 (or pmap, if r9==0) + ; r9 = prev ptr, or 0 if none + + .align 5 +mapSrch64a: ; loop over each mapping + ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits) + addi r2,r2,1 ; count mappings visited + rldicr r4,r4,0,51 ; zero low 12 bits of mapping va + cmpld cr1,r5,r4 ; compare the vas + blt cr1,mapSrch64d ; key is less, try next list + la r8,mpList0(r3) ; point to skip list vector in this mapping + mr r9,r3 ; remember prev ptr + beq-- cr1,mapSrch64Found ; this is the correct mapping + ldx r3,r7,r8 ; get ptr to next mapping in current list +mapSrch64c: + mr. r3,r3 ; was there another mapping on current list? + bne++ mapSrch64a ; was another, so loop +mapSrch64d: + subic. r7,r7,8 ; move on to next list offset + ldx r3,r7,r8 ; get next mapping on next list (if any) + bge++ mapSrch64c ; loop to try next list + + ; Mapping not found, check to see if prev node was a block mapping or nested pmap. + ; If not, or if our address is not covered by the block or nested map, return 0. + ; Note the advantage of keeping the check for block mappings (and nested pmaps) + ; out of the inner loop; we do the special case work at most once per search, and + ; never for the most-common case of finding a scalar mapping. The full searches + ; must check _in_ the inner loop, to get the prev ptrs right. + + mr. r9,r9 ; was there a prev ptr? + li r3,0 ; assume we are going to return null + ld r4,pmapSkipLists(r6) ; assume prev ptr null... so next is first + beq-- mapSrch64Exit ; prev ptr was null, search failed + lwz r0,mpFlags(r9) ; get flag bits from prev mapping + ld r10,mpVAddr(r9) ; re-fetch base address of prev ptr + ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any + andi. r0,r0,mpBlock+mpNest ; block mapping or nested pmap? + lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping + rldicr r10,r10,0,51 ; zero low 12 bits of mapping va + beq mapSrch64Exit ; prev mapping was just a scalar page, search failed + cmpwi r0,mpBlock ; block mapping or nested pmap? + sldi r0,r11,12 ; assume block mapping, get size in bytes - 4k + beq mapSrch64f ; we guessed right, it was a block mapping + addi r11,r11,1 ; mpBSize is 1 too low + sldi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments + subi r0,r11,4096 ; get address of last page in submap +mapSrch64f: + add r10,r10,r0 ; r10 <- last page in this mapping + cmpld r5,r10 ; does this mapping cover our page? + bgt mapSrch64Exit ; no, search failed + mr r3,r9 ; yes, we found it + + ; found the mapping + ; r2 = count of nodes visited + ; r3 = the mapping + ; r6 = pmap ptr + +mapSrch64Found: ; WARNING: can drop down to here + ld r4,mpList0(r3) ; get ptr to next mapping + lwz r7,mpFlags(r3) ; Get the flags for our caller + + ; r2 = count of nodes visited + ; r3 = return value (ie, found mapping or 0) + ; r4 = next mapping (or 0 if none) + ; r6 = pmap ptr + ; r7 = mpFlags + +mapSrch64Exit: ; WARNING: can drop down to here + mr. r5,r4 ; next ptr null? +#if SKIPLISTSTATS + lwz r10,pmapSearchCnt(r6) ; prepare to accumulate statistics + ld r8,pmapSearchVisits(r6) + addi r10,r10,1 ; count searches + add r8,r8,r2 ; count nodes visited + stw r10,pmapSearchCnt(r6) + std r8,pmapSearchVisits(r6) +#endif + beqlr- ; next ptr was null, so return 0 in r4 and r5 + lwz r5,mpVAddr+4(r4) ; get VA of next node + lwz r4,mpVAddr+0(r4) + blr + + + ; 32-bit processors. Check next mapping. + ; r2 = count of mappings visited so far + ; r3 = current mapping ptr + ; r4 = va of current mapping (ie, of r3) + ; r5 = va to search for (the "key") (low 12 bits are 0) + ; r6 = pmap ptr + ; r7 = current skip list number * 8 + ; r8 = ptr to skip list vector of mapping pointed to by r9 (or pmap, if r9==0) + ; r9 = prev ptr, or 0 if none + + .align 4 +mapSrch32a: ; loop over each mapping + lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits) + addi r2,r2,1 ; count mappings visited + rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va + cmplw cr1,r5,r4 ; compare the vas + blt cr1,mapSrch32d ; key is less, try next list + la r8,mpList0+4(r3) ; point to skip list vector in this mapping + mr r9,r3 ; remember prev ptr + beq- cr1,mapSrch32Found ; this is the correct mapping + lwzx r3,r7,r8 ; get ptr to next mapping in current list +mapSrch32c: + mr. r3,r3 ; was there another mapping on current list? + bne+ mapSrch32a ; was another, so loop +mapSrch32d: + subic. r7,r7,8 ; move on to next list offset + lwzx r3,r7,r8 ; get next mapping on next list (if any) + bge+ mapSrch32c ; loop to try next list + + ; Mapping not found, check to see if prev node was a block mapping or nested pmap. + ; If not, or if our address is not covered by the block or nested map, return 0. + ; Note the advantage of keeping the check for block mappings (and nested pmaps) + ; out of the inner loop; we do the special case work at most once per search, and + ; never for the most-common case of finding a scalar mapping. The full searches + ; must check _in_ the inner loop, to get the prev ptrs right. + + mr. r9,r9 ; was there a prev ptr? + li r3,0 ; assume we are going to return null + lwz r4,pmapSkipLists+4(r6) ; assume prev ptr null... so next is first + beq- mapSrch32Exit ; prev ptr was null, search failed + lwz r0,mpFlags(r9) ; get flag bits from prev mapping + lwz r10,mpVAddr+4(r9) ; re-fetch base address of prev ptr + andi. r0,r0,mpBlock+mpNest ; block mapping or nested pmap? + lwz r4,mpList0+4(r9) ; get ptr to next mapping, if any + beq mapSrch32Exit ; prev mapping was just a scalar page, search failed + lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping + cmpwi r0,mpBlock ; block mapping or nested pmap? + rlwinm r10,r10,0,0,19 ; zero low 12 bits of block mapping va + slwi r0,r11,12 ; assume block mapping, get size in bytes - 4k + beq mapSrch32f ; we guessed right, it was a block mapping + addi r11,r11,1 ; mpBSize is 1 too low + slwi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments + subi r0,r11,4096 ; get address of last page in submap +mapSrch32f: + add r10,r10,r0 ; r10 <- last page in this mapping + cmplw r5,r10 ; does this mapping cover our page? + bgt mapSrch32Exit ; no, search failed + mr r3,r9 ; yes, we found it + + ; found the mapping + ; r2 = count of nodes visited + ; r3 = the mapping + ; r6 = pmap ptr + +mapSrch32Found: ; WARNING: can drop down to here + lwz r4,mpList0+4(r3) ; get ptr to next mapping + lwz r7,mpFlags(r3) ; Get mpFlags for our caller + ; r2 = count of nodes visited + ; r3 = return value (ie, found mapping or 0) + ; r4 = next mapping (or 0 if none) + ; r6 = pmap ptr + ; r7 = mpFlags + +mapSrch32Exit: + mr. r5,r4 ; next ptr null? +#if SKIPLISTSTATS + lwz r10,pmapSearchCnt(r6) ; prepare to accumulate statistics + lwz r8,pmapSearchVisits(r6) + lwz r9,pmapSearchVisits+4(r6) + addi r10,r10,1 ; count searches + addc r9,r9,r2 ; count nodes visited + addze r8,r8 + stw r10,pmapSearchCnt(r6) + stw r8,pmapSearchVisits(r6) + stw r9,pmapSearchVisits+4(r6) +#endif + beqlr- ; next ptr was null, so return 0 in r4 and r5 + lwz r5,mpVAddr+4(r4) ; get VA of next node + lwz r4,mpVAddr+0(r4) + blr + + ; Here when the pmap is empty (ie, pmapCurLists==0), both in 32 and 64-bit mode, + ; and from both mapSearch and mapSearchFull. + ; r6 = pmap ptr + +mapSrchPmapEmpty: + li r3,0 ; return null + li r4,0 ; return 0 as virtual address of next node + li r5,0 +#if SKIPLISTSTATS + lwz r7,pmapSearchCnt(r6) ; prepare to accumulate statistics + addi r7,r7,1 ; count searches + stw r7,pmapSearchCnt(r6) +#endif + blr + + +/* + * ***************************** + * * m a p S e a r c h F u l l * + * ***************************** + * + * Given a pmap and a virtual address (VA), find the mapping for that address. + * This is the "full" call, that sets up a vector of ptrs to the previous node + * (or to the pmap, if there is no previous node) for each list that the mapping + * in on. We also make consistency checks on the skip-lists. When called: + * the pmap is locked (shared or exclusive) + * translation is off, interrupts masked + * 64-bit mode is enabled (if on a 64-bit machine) + * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit) + * r3 = pmap ptr + * r4 = high 32 bits of key to search for (0 if a 32-bit processor) + * r5 = low 32 bits of key (low 12 bits may be nonzero garbage) + * + * We return the mapping ptr (or 0) in r3, and the next VA (or 0 if no more) in r4 and r5. + * Except for cr6 (which is global), we trash nonvolatile regs. Called both on 32- and 64-bit + * machines, though we quickly branch into parallel code paths. + */ + .text + .align 5 + .globl EXT(mapSearchFull) +LEXT(mapSearchFull) + lbz r7,pmapCurLists(r3) ; get largest #lists any mapping is on + la r8,pmapSkipLists+4(r3) ; point to lists in pmap, assuming 32-bit machine + rlwinm r5,r5,0,0,19 ; zero low 12 bits of key + mr r6,r3 ; save pmap ptr here so we can accumulate statistics + li r2,0 ; initialize count of mappings visited + mfsprg r12,0 ; get the per-proc data ptr + crclr bFullFound ; we have not found the mapping yet + addic. r7,r7,-1 ; get base-0 number of last list, and test for 0 + subi r9,r8,mpList0+4 ; initialize prev ptr to be a fake mapping + slwi r7,r7,3 ; get (offset*8) of last list + la r12,skipListPrev+4(r12) ; point to vector of prev ptrs, assuming 32-bit machine + blt-- mapSrchPmapEmpty ; pmapCurLists==0 (ie, no mappings) + lwzx r3,r8,r7 ; get 32-bit ptr to 1st mapping in highest list + li r10,0 ; initialize prev ptrs VA to 0 too + bf-- pf64Bitb,mapSrchFull32c ; skip if 32-bit processor + subi r8,r8,4 ; we use all 64 bits of ptrs + subi r12,r12,4 + rldimi r5,r4,32,0 ; r5 <- 64-bit va + ldx r3,r8,r7 ; get 64-bit ptr to 1st mapping in highest list + b mapSrchFull64c ; enter 64-bit search loop + + + ; 64-bit processors. Check next mapping. + ; r2 = count of mappings visited so far + ; r3 = current mapping ptr + ; r4 = va of current mapping (ie, of r3) + ; r5 = va to search for (the "key") (low 12 bits are 0) + ; r6 = pmap ptr + ; r7 = current skip list number * 8 + ; r8 = ptr to skip list vector of mapping pointed to by r9 + ; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap) + ; r10 = prev mappings va, or 0 if r9==pmap + ; r12 = ptr to the skipListPrev vector in the per-proc + + .align 5 +mapSrchFull64a: ; loop over each mapping + ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits) + addi r2,r2,1 ; count mappings visited + lwz r0,mpFlags(r3) ; get mapping flag bits + cmpld cr0,r10,r4 ; make sure VAs come in strictly ascending order + rldicr r4,r4,0,51 ; zero low 12 bits of mapping va + cmpld cr1,r5,r4 ; compare the vas + bge-- cr0,mapSkipListPanic ; die if keys are out of order + andi. r0,r0,mpBlock+mpNest ; is it a scalar mapping? (ie, of a single page) + blt cr1,mapSrchFull64d ; key is less, try next list + beq cr1,mapSrchFull64Found ; this is the correct mapping + bne-- cr0,mapSrchFull64e ; handle block mapping or nested pmap +mapSrchFull64b: + la r8,mpList0(r3) ; point to skip list vector in this mapping + mr r9,r3 ; current becomes previous + ldx r3,r7,r8 ; get ptr to next mapping in current list + mr r10,r4 ; remember prev ptrs VA +mapSrchFull64c: + mr. r3,r3 ; was there another mapping on current list? + bne++ mapSrchFull64a ; was another, so loop +mapSrchFull64d: + stdx r9,r7,r12 ; save prev ptr in per-proc vector + subic. r7,r7,8 ; move on to next list offset + ldx r3,r7,r8 ; get next mapping on next list (if any) + bge++ mapSrchFull64c ; loop to try next list + + ; Mapping not found, return 0 and next higher key + + li r3,0 ; return null + bt-- bFullFound,mapSkipListPanic ; panic if it was on earlier list + ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any + b mapSrch64Exit + + ; Block mapping or nested pmap, and key > base. We must compute the va of + ; the end of the block to see if key fits within it. + +mapSrchFull64e: + lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping (if nonscalar) + cmpwi r0,mpBlock ; distinguish between block mapping and nested pmaps + sldi r0,r11,12 ; assume block mapping, get size in bytes - 4k + beq mapSrchFull64f ; we guessed right, it was a block mapping + addi r11,r11,1 ; mpBSize is 1 too low + sldi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments + subi r0,r11,4096 ; get address of last page in submap +mapSrchFull64f: + add r4,r4,r0 ; r4 <- last page in this mapping + cmpld r5,r4 ; does this mapping cover our page? + bgt mapSrchFull64b ; no, try next mapping (r4 is advanced to end of range) + + + ; found the mapping + ; r2 = count of nodes visited + ; r3 = the mapping + ; r6 = pmap ptr + ; r7 = current skip list number * 8 + ; r8 = ptr to prev mappings (ie, r9) skip-list vector + ; r9 = prev ptr, ie highest mapping that comes before search target + ; r10 = prev mappings va + ; r12 = ptr to the skipListPrev vector in the per-proc + +mapSrchFull64Found: ; WARNING: can drop down to here + cmpwi r7,0 ; are we in the last skip-list? + crset bFullFound ; remember that we found the mapping + bne mapSrchFull64d ; mapSearchFull must search all lists to get prev ptrs + ld r4,mpList0(r3) ; get ptr to next mapping + stdx r9,r7,r12 ; save prev ptr in last list + lwz r7,mpFlags(r3) ; Get the flags for our caller + b mapSrch64Exit + + + ; 32-bit processors. Check next mapping. + ; r2 = count of nodes visited + ; r3 = ptr to next mapping in current list + ; r5 = va to search for (the "key") (low 12 bits are 0) + ; r6 = pmap ptr + ; r7 = current skip list number * 8 + ; r8 = ptr to skip list vector of mapping pointed to by r9 + ; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap) + ; r10 = prev mappings va, or 0 if r9==pmap + ; r12 = ptr to the skipListPrev vector in the per-proc + + .align 4 +mapSrchFull32a: ; loop over each mapping + lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits) + addi r2,r2,1 ; count mappings visited + lwz r0,mpFlags(r3) ; get mapping flag bits + cmplw cr0,r10,r4 ; make sure VAs come in strictly ascending order + rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va + cmplw cr1,r5,r4 ; compare the vas + bge- cr0,mapSkipListPanic ; die if keys are out of order + andi. r0,r0,mpBlock+mpNest ; is it a scalar mapping? (ie, of a single page) + blt cr1,mapSrchFull32d ; key is less than this va, try next list + beq- cr1,mapSrchFull32Found ; this is the correct mapping + bne- cr0,mapSrchFull32e ; handle block mapping or nested pmap +mapSrchFull32b: + la r8,mpList0+4(r3) ; point to skip list vector in this mapping + mr r9,r3 ; current becomes previous + lwzx r3,r7,r8 ; get ptr to next mapping in current list + mr r10,r4 ; remember prev ptrs VA +mapSrchFull32c: + mr. r3,r3 ; next becomes current + bne+ mapSrchFull32a ; was another, so loop +mapSrchFull32d: + stwx r9,r7,r12 ; save prev ptr in per-proc vector + subic. r7,r7,8 ; move on to next list offset + lwzx r3,r7,r8 ; get next mapping on lower list (if any) + bge+ mapSrchFull32c ; loop to try next list + + ; mapping not found, return 0 and next-key + + li r3,0 ; return null + bt- bFullFound,mapSkipListPanic ; panic if it was on an earlier list + lwz r4,mpList0+4(r9) ; get ptr to next mapping + b mapSrch32Exit + + ; Block mapping or nested pmap, and key > base. We must compute the va of + ; the end of the block to see if our key fits within it. + +mapSrchFull32e: + lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping (if nonscalar) + cmpwi r0,mpBlock ; distinguish between block mapping and nested pmaps + slwi r0,r11,12 ; assume block mapping, get size in bytes - 4k + beq mapSrchFull32f ; we guessed right, it was a block mapping + addi r11,r11,1 ; mpBSize is 1 too low + slwi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments + subi r0,r11,4096 ; get address of last page in submap +mapSrchFull32f: + add r4,r4,r0 ; r4 <- last page in this mapping + cmplw r5,r4 ; does this mapping cover our page? + bgt mapSrchFull32b ; no, try next mapping + + + ; found the mapping + ; r2 = count of nodes visited + ; r3 = the mapping + ; r6 = pmap ptr + ; r7 = current skip list number * 8 + ; r9 = prev ptr, ie highest mapping that comes before search target, or 0 + ; r10 = prev mappings va + ; r12 = ptr to the skipListPrev vector in the per-proc + +mapSrchFull32Found: ; WARNING: can drop down to here + cmpwi r7,0 ; are we in the last skip-list? + crset bFullFound ; remember that we found the mapping + bne mapSrchFull32d ; mapSearchFull must search all lists to get prev ptrs + lwz r4,mpList0+4(r3) ; get ptr to next mapping + stwx r9,r7,r12 ; save prev ptr in last list + lwz r7,mpFlags(r3) ; Get mpFlags for our caller + b mapSrch32Exit + + +/* + * ********************* + * * m a p I n s e r t * + * ********************* + * + * Insert a mapping into pmap skip-lists. The caller has already called mapSearchFull to + * determine that this mapping does not overlap other mappings in the pmap. As a side effect + * of calling mapSearchFull, the per-proc skipListPrev array is set up with a vector of the + * previous ptrs for each skip list. When called: + * the pmap is locked (exclusive) + * translation is off, interrupts masked + * 64-bit mode is enabled (if on a 64-bit machine) + * mapSearchFull has just been called for this mappings key + * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit) + * r3 = pmap ptr + * r4 = mapping ptr + * + * There is no return value. Except for cr6 (which is global), we trash nonvolatile regs. + */ + + .align 5 + .globl EXT(mapInsert) +LEXT(mapInsert) + lwz r8,mpFlags(r4) ; get this mappings flags + lbz r7,pmapCurLists(r3) ; get current max# lists any mapping is on + la r10,pmapSkipLists+4(r3) ; r10 <-- base of pmap list headers, assuming 32-bit machine + la r11,mpList0+4(r4) ; r11 <-- base of this mappings list vector + mfsprg r12,0 ; get ptr to our per-proc + andi. r9,r8,mpLists ; get #lists this mapping is on (1<=n<=27) + la r12,skipListPrev+4(r12) ; r12 <-- base of prev ptr vector + sub. r6,r9,r7 ; is this mapping on more lists than any other? + slwi r8,r9,3 ; get #lists * 8 + subi r8,r8,8 ; get offset to topmost (last) list in use + bf-- pf64Bitb,mapIns32 ; handle 32-bit processor + subi r10,r10,4 ; we use all 8 bytes of the ptr fields + subi r11,r11,4 + subi r12,r12,4 + ble++ mapIns64a ; not new max #lists + + ; 64-bit processor: We must increase pmapCurLists. Since mapSearchFull() only + ; sets up the first pmapCurLists prev ptrs, we must initialize the new ones to + ; point to the pmap. While we are at it, we verify that the unused list hdrs in + ; the pmap are 0. + + cmpwi r9,kSkipListMaxLists ; in range? + stb r9,pmapCurLists(r3) ; remember new max + mtctr r6 ; set up count of new lists + mr r5,r8 ; copy offset to last list + subi r0,r10,mpList0 ; r0 <-- fake mapping ptr (to pmap) for null prev ptrs + bgt-- mapSkipListPanic ; choke if this mapping is on too many lists +mapIns64NewList: + ldx r6,r5,r10 ; get pmap list head + stdx r0,r5,r12 ; initialize prev ptr + subi r5,r5,8 ; get next list offset + cmpdi r6,0 ; was list hdr null? + bdnzt cr0_eq,mapIns64NewList ; loop if more lists to initialize and list hdr was 0 + bne-- mapSkipListPanic ; die if pmap list hdr was not null + b mapIns64a + + ; 64-bit processor: loop over each list this mapping is on + ; r4 = mapping + ; r8 = next list offset + ; r10 = ptr to base of pmap list header vector + ; r11 = ptr to base of new mappings list vector + ; r12 = ptr to base of prev ptr vector in per-proc + + .align 5 +mapIns64a: + ldx r5,r8,r12 ; get prev ptr from per-proc vector + cmpwi cr1,r8,0 ; more to go? + la r7,mpList0(r5) ; get base of prev mappings list vector + ldx r9,r8,r7 ; *** + stdx r4,r8,r7 ; * insert new mapping in middle of this list + stdx r9,r8,r11 ; *** + subi r8,r8,8 ; get next list offset + bne++ cr1,mapIns64a ; more lists to go + blr ; done + + ; Handle 32-bit processor. First, increase pmapCurLists if necessary; cr0 is bgt + ; iff the new mapping has more lists. Since mapSearchFull() only sets up the first + ; pmapCurLists prev ptrs, we must initialize any new ones to point to the pmap. + ; While we are at it, we verify that the unused list hdrs in the pmap are 0. + +mapIns32: + ble+ mapIns32a ; skip if new mapping does not use extra lists + cmpwi r9,kSkipListMaxLists ; in range? + stb r9,pmapCurLists(r3) ; remember new max + mtctr r6 ; set up count of new lists + mr r5,r8 ; copy offset to last list + subi r0,r10,mpList0+4 ; r0 <-- fake mapping ptr (to pmap) for null prev ptrs + bgt- mapSkipListPanic ; choke if this mapping is on too many lists +mapIns32NewList: + lwzx r6,r5,r10 ; get pmap list head + stwx r0,r5,r12 ; initialize prev ptr + subi r5,r5,8 ; get next list offset + cmpwi r6,0 ; was list hdr null? + bdnzt cr0_eq,mapIns32NewList ; loop if more lists to initialize and list hdr was 0 + bne- mapSkipListPanic ; die if pmap list hdr was not null + b mapIns32a + + ; 32-bit processor: loop over each list this mapping is on + ; r4 = mapping + ; r8 = next list offset + ; r10 = ptr to base of pmap list header vector + ; r11 = ptr to base of new mappings list vector + ; r12 = ptr to base of prev ptr vector + + .align 4 +mapIns32a: + lwzx r5,r8,r12 ; get prev ptr from per-proc vector + cmpwi cr1,r8,0 ; more to go? + la r7,mpList0+4(r5) ; get base of prev mappings list vector + lwzx r9,r8,r7 ; *** + stwx r4,r8,r7 ; * insert new mapping in middle of this list + stwx r9,r8,r11 ; *** + subi r8,r8,8 ; get next list offset + bne+ cr1,mapIns32a ; more lists to go + blr ; done + + +/* + * ********************* + * * m a p R e m o v e * + * ********************* + * + * Remove a mapping from pmap skip-lists. The caller has already called mapSearchFull to + * find the mapping, which sets up the skipListPrev array with a vector of the previous + * ptrs for each skip list. When called: + * the pmap is locked (exclusive) + * translation is off, interrupts masked + * 64-bit mode is enabled (if on a 64-bit machine) + * mapSearchFull has just been called for this mappings key + * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit) + * r3 = pmap ptr + * r4 = mapping ptr + * + * There is no return value. Except for cr6 (which is global), we trash nonvolatile regs. + */ + + .align 5 + .globl EXT(mapRemove) +LEXT(mapRemove) + lwz r8,mpFlags(r4) ; get this mappings flags + lbz r10,pmapCurLists(r3) ; get current #lists in use + la r11,mpList0+4(r4) ; r11 <-- base of this mappings list vector + mfsprg r12,0 ; get ptr to our per-proc + andi. r9,r8,mpLists ; get #lists this mapping is on (1<=n<=27) + slwi r8,r9,3 ; get #lists * 8 + cmpw cr5,r9,r10 ; compare mpLists to pmapCurLists + la r12,skipListPrev+4(r12) ; r12 <-- base of prev ptr vector + bgt-- cr5,mapSkipListPanic ; die if mpLists > pmapCurLists + subi r8,r8,8 ; get offset to topmast (last) list this mapping is in + bf-- pf64Bitb,mapRem32a ; skip if 32-bit processor + subi r11,r11,4 ; we use all 64 bits of list links on 64-bit machines + subi r12,r12,4 + b mapRem64a + + ; 64-bit processor: loop over each list this mapping is on + ; r3 = pmap + ; r4 = mapping + ; r8 = offset to next list + ; r10 = pmapCurLists + ; r11 = ptr to base of mapping list vector + ; r12 = ptr to base of prev ptr vector in per-proc + ; cr5 = beq if (mpLists == pmapCurLists) + + .align 5 +mapRem64a: + ldx r5,r8,r12 ; get prev ptr from per-proc vector + ldx r9,r8,r11 ; get next ptr from mapping + cmpwi cr1,r8,0 ; more to go? + la r7,mpList0(r5) ; get base of prev mappings list vector + stdx r9,r8,r7 ; point to next from prev + subi r8,r8,8 ; get next list offset + bne++ cr1,mapRem64a ; loop if another list to unlink from + + ; Did we reduce #lists in use by removing last mapping in last list? + + bnelr++ cr5 ; if (mpLists!=pmapCurLists) cannot have removed last map + la r5,pmapSkipLists(r3) ; point to vector of list hdrs +mapRem64b: + subic. r10,r10,1 ; get base-0 list# + slwi r8,r10,3 ; get offset to last list + ldx r0,r8,r5 ; get last list ptr + cmpdi cr1,r0,0 ; null? + bnelr cr1 ; not null, so we are done + stb r10,pmapCurLists(r3) ; was null, so decrement pmapCurLists + bgt mapRem64b ; loop to see if more than one list was emptied + blr + + + ; 32-bit processor: loop over each list this mapping is on + ; r3 = pmap + ; r4 = mapping + ; r8 = offset to next list + ; r10 = pmapCurLists + ; r11 = ptr to base of mapping list vector + ; r12 = ptr to base of prev ptr vector in per-proc + ; cr5 = beq if (mpLists == pmapCurLists) + + .align 4 +mapRem32a: + lwzx r5,r8,r12 ; get prev ptr from per-proc vector + lwzx r9,r8,r11 ; get next ptr from mapping + cmpwi cr1,r8,0 ; more to go? + la r7,mpList0+4(r5) ; get base of prev mappings list vector + stwx r9,r8,r7 ; point to next from prev + subi r8,r8,8 ; get next list offset + bne+ cr1,mapRem32a ; loop if another list to unlink from + + ; Did we reduce #lists in use by removing last mapping in last list? + + bnelr+ cr5 ; if (mpLists!=pmapCurLists) cannot have removed last map + la r5,pmapSkipLists+4(r3) ; point to vector of list hdrs +mapRem32b: + subic. r10,r10,1 ; get base-0 list# + slwi r8,r10,3 ; get offset to last list + lwzx r0,r8,r5 ; get last list ptr + cmpwi cr1,r0,0 ; null? + bnelr cr1 ; not null, so we are done + stb r10,pmapCurLists(r3) ; was null, so decrement pmapCurLists + bgt mapRem32b ; loop to see if more than one list was emptied + blr + + +/* + * ************************* + * * m a p S e t L i s t s * + * ************************* + * + * Called to decide how many skip-lists the next mapping will be on. For each pmap, + * we maintain a psuedo-random sequence based on a linear feedback shift register. The + * next number is generated by rotating the old value left by 1 and XORing with a + * polynomial (actually 4 8-bit polynomials concatanated) and adding 1. + * The simple (unclamped) number of lists a mapping is on is the number of trailing 0s + * in the pseudo-random sequence, shifted by the (log2-1) of the fanout F, plus one. + * This seems to give us a near perfect distribution, in the sense that about F times more nodes + * are allocated on n lists, as are on (n+1) lists. + * + * At one point we used a simple counter to assign lists. While this gave perfect + * distribution, there were certain access pattern that would drive a worst case + * distribution (e.g., insert low, then high, then low, etc.). Unfortunately, + * these patterns were not too uncommon. We changed to a less-than-perfect assignment, + * but one that works consistently across all known access patterns. + * + * Also, we modify the "simple" trailing-0-based list count, to account for an important + * observation: because VM does a lot of removing and restoring of mappings in the process of + * doing copy-on-write etc, it is common to have the pmap's "random number" (ie, the + * count of created mappings) be much larger than the number of mappings currently in the + * pmap. This means the simple list count will often be larger than justified by the number of + * mappings in the pmap. To avoid this common situation, we clamp the list count to be no more + * than ceil(logBaseF(pmapResidentCnt)). + * + * Finally, we also clamp the list count to kSkipListMaxLists. + * + * We are passed the pmap ptr in r3. Called with translation on, interrupts enabled, + * and in 32-bit mode. + */ + .align 5 + .globl EXT(mapSetLists) +LEXT(mapSetLists) + lwz r5,pmapRandNum(r3) ; get the per-pmap counter of mapping creates + lwz r4,pmapResidentCnt(r3) ; get number of mappings in this pmap + lis r11,hi16(0xA7CBF5B9) ; Get polynomial (I just made this up...) + li r0,-1 ; get a mask of 1s + ori r11,r11,lo16(0xA7CBF5B9) ; Get polynomial (I just made this up...) + rlwinm r5,r5,1,0,31 ; Rotate + cntlzw r7,r4 ; get magnitude of pmapResidentCnt + xor r5,r5,r11 ; Munge with poly + srw r7,r0,r7 ; r7 <- mask for magnitude of pmapResidentCnt + addi r6,r5,1 ; increment pmapRandNum non-atomically + andc r8,r5,r6 ; get a mask for trailing zeroes in pmapRandNum + stw r6,pmapRandNum(r3) ; update "random number" + and r8,r8,r7 ; clamp trailing 0s to magnitude of pmapResidentCnt + rlwinm r8,r8,0,32-(kSkipListMaxLists*(kSkipListFanoutShift+1))+1,31 ; clamp to kSkipListMaxLists + cntlzw r9,r8 ; count leading 0s in the mask + subfic r10,r9,32 ; r10 <- trailing zero count + srwi r11,r10,kSkipListFanoutShift ; shift by 1 if fanout is 4, 2 if 8, etc + addi r3,r11,1 ; every mapping is on at least one list + blr + + +/* + * ************************************* + * * m a p S k i p L i s t V e r i f y * + * ************************************* + * + * This does a fairly thorough sweep through a pmaps skip-list data structure, doing + * consistency checks. It is typically called (from hw_exceptions.s) from debug or + * instrumented builds. It is probably not a good idea to call this in production builds, + * as it must run with exceptions disabled and can take a long time to verify a big pmap. + * It runs in O(n*ln(n)). + * + * Called on a bl, with the pmap ptr in r20. We assume the pmap is locked (shared) and + * that EE and DR are off. We check all 64 bits of ptrs even on 32-bit machines. + * We use r20-r31, cr0, cr1, and cr7. If we return, no inconsistencies were found. + * + * You will notice we make little attempt to schedule the code; clarity is deemed more + * important than speed. + */ + + + /* + * mapSkipListVerifyC is a version that is callable from C. + * This should be called only from the debugger, IT DOES NOT LOCK THE PMAP!!!! + */ + + .globl EXT(mapSkipListVerifyC) +LEXT(mapSkipListVerifyC) + + stwu r1,-(FM_ALIGN((31-13+1)*4)+FM_SIZE)(r1) ; Make some space on the stack + mflr r0 ; Save the link register + stmw r13,FM_ARG0(r1) ; Save all registers + stw r0,(FM_ALIGN((31-13+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + lwz r15,pmapvr(r3) ; Get the V to R translation + lwz r16,pmapvr+4(r3) ; Get the V to R translation + mr r19,r4 ; Save register dump area + + bl EXT(mapSetUp) ; Get set up + + mr r17,r11 + xor r20,r3,r16 ; Translate 32-bit portion + bf-- pf64Bitb,mslvc32a ; Skip if 32-bit... + + rldimi r20,r15,32,0 ; Shift the fixed upper part of the physical over and cram in top + +mslvc32a: lis r18,hi16(EXT(DebugWork)) + ori r18,r18,lo16(EXT(DebugWork)) + li r0,0x4262 + stw r0,4(r18) ; Make sure the test knows to run + + bl EXT(mapSkipListVerify) ; Run the test + + li r0,0 + stw r0,4(r18) ; Remove explicit call flag + + bt++ pf64Bitb,mslvc64a ; This is 64-bit... + + mtmsr r17 ; Restore enables/translation/etc. + isync + + li r0,0 + stw r0,0x000+0(r19) + stw r0,0x000+4(r19) + stw r0,0x008+0(r19) + stw r1,0x008+4(r19) + stw r0,0x010+0(r19) + stw r2,0x010+4(r19) + stw r0,0x018+0(r19) + stw r3,0x018+4(r19) + stw r0,0x020+0(r19) + stw r4,0x020+4(r19) + stw r0,0x028+0(r19) + stw r5,0x028+4(r19) + stw r0,0x030+0(r19) + stw r6,0x030+4(r19) + stw r0,0x038+0(r19) + stw r7,0x038+4(r19) + stw r0,0x040+0(r19) + stw r8,0x040+4(r19) + stw r0,0x048+0(r19) + stw r9,0x048+4(r19) + stw r0,0x050+0(r19) + stw r10,0x050+4(r19) + stw r0,0x058+0(r19) + stw r11,0x058+4(r19) + stw r0,0x060+0(r19) + stw r12,0x060+4(r19) + stw r0,0x068+0(r19) + stw r13,0x068+4(r19) + stw r0,0x070+0(r19) + stw r14,0x070+4(r19) + stw r0,0x078+0(r19) + stw r15,0x078+4(r19) + stw r0,0x080+0(r19) + stw r16,0x080+4(r19) + stw r0,0x088+0(r19) + stw r17,0x088+4(r19) + stw r0,0x090+0(r19) + stw r18,0x090+4(r19) + stw r0,0x098+0(r19) + stw r19,0x098+4(r19) + stw r0,0x0A0+0(r19) + stw r20,0x0A0+4(r19) + stw r0,0x0A8+0(r19) + stw r21,0x0A8+4(r19) + stw r0,0x0B0+0(r19) + stw r22,0x0B0+4(r19) + stw r0,0x0B8+0(r19) + stw r23,0x0B8+4(r19) + stw r0,0x0C0+0(r19) + stw r24,0x0C0+4(r19) + stw r0,0x0C8+0(r19) + stw r25,0x0C8+4(r19) + stw r0,0x0D0+0(r19) + stw r26,0x0D0+4(r19) + stw r0,0x0D8+0(r19) + stw r27,0x0D8+4(r19) + stw r0,0x0E0+0(r19) + stw r28,0x0E0+4(r19) + stw r0,0x0E8+0(r19) + stw r29,0x0E8+4(r19) + stw r0,0x0F0+0(r19) + stw r30,0x0F0+4(r19) + stw r0,0x0F8+0(r19) + stw r31,0x0F8+4(r19) + + b mslvcreturn ; Join common... + +mslvc64a: mtmsrd r17 ; Restore enables/translation/etc. + isync + + std r0,0x000(r19) + std r1,0x008(r19) + std r2,0x010(r19) + std r3,0x018(r19) + std r4,0x020(r19) + std r5,0x028(r19) + std r6,0x030(r19) + std r7,0x038(r19) + std r8,0x040(r19) + std r9,0x048(r19) + std r10,0x050(r19) + std r11,0x058(r19) + std r12,0x060(r19) + std r13,0x068(r19) + std r14,0x070(r19) + std r15,0x078(r19) + std r16,0x080(r19) + std r17,0x088(r19) + std r18,0x090(r19) + std r19,0x098(r19) + std r20,0x0A0(r19) + std r21,0x0A8(r19) + std r22,0x0B0(r19) + std r23,0x0B8(r19) + std r24,0x0C0(r19) + std r25,0x0C8(r19) + std r26,0x0D0(r19) + std r27,0x0D8(r19) + std r28,0x0E0(r19) + std r29,0x0E8(r19) + std r30,0x0F0(r19) + std r31,0x0F8(r19) + + +mslvcreturn: + lwz r0,(FM_ALIGN((31-13+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return + lmw r13,FM_ARG0(r1) ; Get the registers + mtlr r0 ; Restore the return + lwz r1,0(r1) ; Pop the stack + blr + + + .globl EXT(mapSkipListVerify) +LEXT(mapSkipListVerify) + mflr r31 ; save LR so we can bl to mapVerifyDie + + ; If we have already found an inconsistency and died, don not do so again, to + ; avoid a loop. + + lis r27,hi16(EXT(DebugWork)) + ori r27,r27,lo16(EXT(DebugWork)) + lwz r0,4(r27) ; Get the explicit entry flag + lwz r27,0(r27) ; Get lockout + cmplwi r0,0x4262 ; Should we run anyway? + beq-- mslvAnyway ; Yes... + cmpwi r27,0 ; have we already found an error? + bnelr-- ; yes, just return wo checking again + +mslvAnyway: + ; Not recursive call, so initialize. + + mfsprg r23,2 ; get the feature flags + mtcrf 0x02,r23 ; put pf64Bit where we can test it + lbz r26,pmapCurLists(r20) ; get #lists that are in use + lwz r21,pmapResidentCnt(r20); get #mappings in this pmap + cmpwi r26,kSkipListMaxLists ; in range? + bgtl-- mapVerifyDie ; pmapCurLists is too big + + ; To prevent infinite loops, set limit of (pmapCurLists*pmapResidentCnt) iterations. + ; Since we walk each list this is the max number of mappings we could visit. + + li r23,0 ; initialize count +mapVer0: + subic. r26,r26,1 ; loop pmapCurLists times (but at least once) + add r23,r23,r21 ; compute (pmapCurLists*pmapResidentCnt) + bgt mapVer0 ; this will be a 64-bit qty on 64-bit machines + + li r22,kSkipListMaxLists ; initialize list# + bf-- pf64Bitb,mapVer32 ; go handle a 32-bit processor + + ; 64-bit machine. + ; + ; Loop over each list, counting mappings in each. We first check whether or not + ; the list is empty (ie, if the pmapSlipLists ptr is null.) All lists above + ; pmapCurLists should be empty, and no list at or below pmapCurLists should be. + ; r20 = pmap ptr + ; r21 = decrementing counter of mappings in this pmap + ; r22 = next list# (1...kSkipListMaxLists) + ; r23 = decrementing counter for infinite loop check + +mapVer64: + slwi r25,r22,3 ; get offset to next skiplist + la r26,pmapSkipLists(r20) ; get ptr to base of skiplist vector + subi r25,r25,8 + ldx r26,r25,r26 ; get 1st mapping on this list, if any + lbz r28,pmapCurLists(r20) ; get #lists in use + cmpdi cr6,r26,0 ; set cr6_eq if this list is null ("null") + cmpw cr7,r22,r28 ; set cr7_gt if this list is > pmapCurLists ("high") + crxor cr0_eq,cr6_eq,cr7_gt ; cr0_eq <-- (null & !high) | (!null & high) + beql-- mapVerifyDie ; die if this list is null when it should not be, etc + b mapVer64g + + ; Loop over each node in the list. + ; r20 = pmap ptr + ; r21 = decrementing counter of mappings in this pmap + ; r22 = this list# (1...kSkipListMaxLists) + ; r23 = decrementing counter for infinite loop check + ; r25 = offset to this skiplist (ie, ((r22<<3)-8)) + ; r26 = mapping + +mapVer64a: + lwz r29,mpFlags(r26) ; get bits for this mapping + ld r28,mpVAddr(r26) ; get key + subic. r23,r23,1 ; check for loops + bltl-- mapVerifyDie ; we have visited > (pmapCurLists*pmapResidentCnt) nodes + andi. r30,r26,mpBasicSize-1 ; test address for alignment + bnel-- mapVerifyDie ; not aligned + andi. r27,r29,mpLists ; get #lists this mapping is supposed to be on + cmpw cr1,r27,r22 ; is it supposed to be on this list? + bltl-- cr1,mapVerifyDie ; mappings mpLists is too low + cmpwi r27,kSkipListMaxLists ; too big? + bgtl-- mapVerifyDie ; mappings mpLists > max + rldicr r28,r28,0,51 ; clear low 12 bits of va + bne++ cr1,mapVer64f ; jump if this is not highest list for this node + + ; This is the "highest" (last) list this mapping is on. + ; Do some additional checks (so we only do them once per mapping.) + ; First, if a block mapping or nested pmap, compute block end. + + andi. r29,r29,mpBlock+mpNest ; is it block mapping or nested pmap? + subi r21,r21,1 ; count mappings in this pmap + beq++ mapVer64b ; not nested or pmap + lhz r27,mpBSize(r26) ; get #pages or #segments + cmpwi r29,mpBlock ; which one is it? + sldi r29,r27,12 ; assume block mapping, units are (pages-1) + beq mapVer64b ; guessed correctly + addi r27,r27,1 ; units of nested pmap are (#segs-1) + sldi r29,r27,28 ; convert to #bytes + subi r29,r29,4096 ; get offset to last byte in nested pmap + + ; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page. + +mapVer64b: + add r24,r28,r29 ; r24 <- address of last valid page in this mapping + la r28,mpList0(r26) ; get base of this mappings vector + lwz r27,mpFlags(r26) ; Get the number of lists + andi. r27,r27,mpLists ; get #lists this mapping is on (1<=n<=27) + cmplwi r27,mpBasicLists ; Into bigger mapping? + li r27,mpBasicLists*8-8 ; Assume normal + ble+ mapVer64c ; It is... + li r27,kSkipListMaxLists*8-8 ; initialize list offset for inner loop + + ; Inner loop over each list link in this mappingss mpList vector. + ; r24 = address of last valid page in this mapping + ; r27 = offset for next list in inner loop + ; r28 = base of this mappings list links + +mapVer64c: + cmpw cr1,r27,r25 ; higher, lower, or same? + ldx r29,r27,r28 ; get link to next mapping at this level + mr. r29,r29 ; null? + beq mapVer64d ; link null, which is always OK + bgtl-- cr1,mapVerifyDie ; a mapping has a non-null list higher than its mpLists + ld r30,mpVAddr(r29) ; get next mappings va + rldicr r30,r30,0,51 ; zero low 12 bits + cmpld r30,r24 ; compare next key with ours + blel-- mapVerifyDie ; a next node has key <= to ours +mapVer64d: + subic. r27,r27,8 ; move on to next list + bne++ mapVer64c ; loop if more to go + + ; Next node on current list, or next list if current done, or return if no more lists. + +mapVer64f: + la r28,mpList0(r26) ; get base of this mappings vector + ldx r26,r25,r28 ; get next mapping on this list +mapVer64g: + mr. r26,r26 ; is there one? + bne++ mapVer64a ; yes, handle + subic. r22,r22,1 ; is there another list? + bgt++ mapVer64 ; loop if so + + cmpwi r21,0 ; did we find all the mappings in the pmap? + bnel-- mapVerifyDie ; no + mtlr r31 ; restore return address + li r3,0 + blr + + + ; Handle 32-bit machine. + +mapVer32: + lwz r24,mpFlags(r20) ; Get number of lists + la r30,pmapSkipLists(r20) ; first, check the pmap list hdrs + andi. r24,r24,mpLists ; Clean the number of lists + bl mapVerUpperWordsAre0 ; are the upper words of each list all 0? + + ; Loop over each list, counting mappings in each. We first check whether or not + ; the list is empty. All lists above pmapCurLists should be empty, and no list + ; at or below pmapCurLists should be. + ; + ; r20 = pmap ptr + ; r21 = decrementing counter of mappings in this pmap + ; r22 = next list# (1...kSkipListMaxLists) + ; r23 = decrementing counter for infinite loop check + +mapVer32NextList: + lbz r28,pmapCurLists(r20) ; get #lists in use + slwi r25,r22,3 ; get offset to next skiplist + la r26,pmapSkipLists+4(r20) ; get ptr to base of skiplist vector + subi r25,r25,8 + lwzx r26,r25,r26 ; get the 1st mapping on this list, or 0 + cmpw cr7,r22,r28 ; set cr7_gt if this list is > pmapCurLists ("high") + cmpwi cr6,r26,0 ; set cr6_eq if this list is null ("null") + crxor cr0_eq,cr6_eq,cr7_gt ; cr0_eq <-- (null & !high) | (!null & high) + beql- mapVerifyDie ; die if this list is null when it should not be, etc + b mapVer32g + + ; Loop over each node in the list. + ; r20 = pmap ptr + ; r21 = decrementing counter of mappings in this pmap + ; r22 = this list# (1...kSkipListMaxLists) + ; r23 = decrementing counter for infinite loop check + ; r25 = offset to this skiplist (ie, ((r22<<3)-8)) + ; r26 = mapping + +mapVer32a: + lwz r29,mpFlags(r26) ; get bits for this mapping + andi. r30,r26,mpBasicSize-1 ; test address for alignment + lwz r24,mpVAddr+0(r26) ; get upper word of key + bnel- mapVerifyDie ; mapping address not 64-byte aligned + lwz r28,mpVAddr+4(r26) ; get lower word of key + subic. r23,r23,1 ; check for loops + bltl- mapVerifyDie ; we have visited > (pmapCurLists*pmapResidentCnt) nodes + cmpwi r24,0 ; upper word of key (ie, va) should be 0 + bnel- mapVerifyDie ; was not + andi. r27,r29,mpLists ; get #lists this mapping is supposed to be on + cmpw cr1,r27,r22 ; is it supposed to be on this list? + bltl- cr1,mapVerifyDie ; mappings mpLists is too low + cmpwi r27,kSkipListMaxLists ; too big? + bgtl- mapVerifyDie ; mappings mpLists > max + rlwinm r28,r28,0,0,19 ; clear low 12 bits of va + bne+ cr1,mapVer32f ; jump if this is not highest list for this node + + ; This is the "highest" (last) list this mapping is on. + ; Do some additional checks (so we only do them once per mapping.) + ; First, make sure upper words of the mpList vector are 0. + + subi r21,r21,1 ; count mappings in this pmap + lwz r24,mpFlags(r26) ; Get number of lists + la r30,mpList0(r26) ; point to base of skiplist vector + andi. r24,r24,mpLists ; Clean the number of lists + bl mapVerUpperWordsAre0 ; make sure upper words are all 0 (uses r24 and r27) + + ; Then, if a block mapping or nested pmap, compute block end. + + andi. r29,r29,mpBlock+mpNest ; is it block mapping or nested pmap? + beq+ mapVer32b ; no + lhz r27,mpBSize(r26) ; get #pages or #segments + cmpwi r29,mpBlock ; which one is it? + slwi r29,r27,12 ; assume block mapping, units are pages + beq mapVer32b ; guessed correctly + addi r27,r27,1 ; units of nested pmap are (#segs-1) + slwi r29,r27,28 ; convert to #bytes + subi r29,r29,4096 ; get offset to last byte in nested pmap + + ; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page. + +mapVer32b: + add r24,r28,r29 ; r24 <- address of last valid page in this mapping + la r28,mpList0+4(r26) ; get base of this mappings vector + lwz r27,mpFlags(r26) ; Get the number of lists + andi. r27,r27,mpLists ; get #lists this mapping is on (1<=n<=27) + cmplwi r27,mpBasicLists ; Into bigger mapping? + li r27,mpBasicLists*8-8 ; Assume normal + ble+ mapVer32c ; It is... + li r27,kSkipListMaxLists*8-8 ; initialize list offset for inner loop + + ; Inner loop over each list in this mappings mpList vector. + ; r24 = address of last valid page in this mapping + ; r27 = offset for next list in inner loop + ; r28 = base of this mappings list links + +mapVer32c: + cmpw cr1,r27,r25 ; higher, lower, or same? + lwzx r29,r27,r28 ; get link to next mapping at this level + mr. r29,r29 ; null? + beq mapVer32d ; link null, which is always OK + + + bgtl- cr1,mapVerifyDie ; a mapping has a non-null list higher than its mpLists + lwz r30,mpVAddr+4(r29) ; get next mappings va + rlwinm r30,r30,0,0,19 ; zero low 12 bits + cmplw r30,r24 ; compare next key with ours + blel- mapVerifyDie ; a next node has key <= to ours +mapVer32d: + subic. r27,r27,8 ; move on to next list + bne+ mapVer32c ; loop if more to go + + ; Next node on current list, or next list if current done, or return if no more lists. + +mapVer32f: + la r28,mpList0+4(r26) ; get base of this mappings vector again + lwzx r26,r25,r28 ; get next mapping on this list +mapVer32g: + mr. r26,r26 ; is there one? + bne+ mapVer32a ; yes, handle + subic. r22,r22,1 ; is there another list? + bgt+ mapVer32NextList ; loop if so + + cmpwi r21,0 ; did we find all the mappings in the pmap? + bnel- mapVerifyDie ; no + mtlr r31 ; restore return address + li r3,0 + blr + + ; Subroutine to verify that the upper words of a vector of kSkipListMaxLists + ; doublewords are 0. + ; r30 = ptr to base of vector + ; Uses r24 and r27. + +mapVerUpperWordsAre0: + cmplwi r24,mpBasicLists ; Do we have more than basic? + li r24,mpBasicLists*8 ; Assume basic + ble++ mapVerUpper1 ; We have the basic size + li r24,kSkipListMaxLists*8 ; Use max size + +mapVerUpper1: + subic. r24,r24,8 ; get offset to next doubleword + lwzx r27,r24,r30 ; get upper word + cmpwi cr1,r27,0 ; 0 ? + bne- cr1,mapVerifyDie ; die if not, passing callers LR + bgt+ mapVerUpper1 ; loop if more to go + blr + + ; bl here if mapSkipListVerify detects an inconsistency. + +mapVerifyDie: + mflr r3 + mtlr r31 ; Restore return + lis r31,hi16(EXT(DebugWork)) + ori r31,r31,lo16(EXT(DebugWork)) + lwz r0,4(r31) ; Get the explicit entry flag + cmplwi r0,0x4262 ; Should we run anyway? + beqlr-- ; Explicit call, return... + + li r0,1 + stw r0,0(r31) ; Lock out further calls + BREAKPOINT_TRAP ; hopefully, enter debugger + b .-4 + + +/* + * Panic (choke, to be exact) because of messed up skip lists. The LR points back + * to the original caller of the skip-list function. + */ + +mapSkipListPanic: ; skip-lists are screwed up + lis r0,hi16(Choke) + ori r0,r0,lo16(Choke) + li r3,failSkipLists ; get choke code + sc ; choke + b .-4 + + diff --git a/osfmk/ppc/spec_reg.h b/osfmk/ppc/spec_reg.h index e237c5f2e..ad15ec476 100644 --- a/osfmk/ppc/spec_reg.h +++ b/osfmk/ppc/spec_reg.h @@ -30,18 +30,13 @@ #define _PPC_SPEC_REG_H_ /* Defines for PVRs */ -#define PROCESSOR_VERSION_601 1 -#define PROCESSOR_VERSION_603 3 -#define PROCESSOR_VERSION_604 4 -#define PROCESSOR_VERSION_603e 6 -#define PROCESSOR_VERSION_750 8 +#define PROCESSOR_VERSION_750 8 /* ? */ #define PROCESSOR_VERSION_750FX 0x7000 /* ? */ -#define PROCESSOR_VERSION_604e 9 -#define PROCESSOR_VERSION_604ev 10 /* ? */ #define PROCESSOR_VERSION_7400 12 /* ? */ #define PROCESSOR_VERSION_7410 0x800C /* ? */ #define PROCESSOR_VERSION_7450 0x8000 /* ? */ #define PROCESSOR_VERSION_7455 0x8001 /* ? */ #define PROCESSOR_VERSION_7457 0x8002 /* ? */ +#define PROCESSOR_VERSION_970 0x0039 /* ? */ #endif /* _PPC_SPEC_REG_H_ */ diff --git a/osfmk/ppc/start.s b/osfmk/ppc/start.s index e5cf7b6cf..29c228362 100644 --- a/osfmk/ppc/start.s +++ b/osfmk/ppc/start.s @@ -44,14 +44,18 @@ #define ptRevision 6 #define ptFeatures 8 #define ptCPUCap 12 -#define ptInitRout 16 -#define ptRptdProc 20 -#define ptTempMax 24 -#define ptTempThr 28 -#define ptLineSize 32 -#define ptl1iSize 36 -#define ptl1dSize 40 -#define ptSize 44 +#define ptPatch 16 +#define ptInitRout 20 +#define ptRptdProc 24 +#define ptTempMax 28 +#define ptTempThr 32 +#define ptLineSize 36 +#define ptl1iSize 40 +#define ptl1dSize 44 +#define ptPTEG 48 +#define ptMaxVAddr 52 +#define ptMaxPAddr 56 +#define ptSize 60 #define bootCPU 10 #define firstInit 9 @@ -63,59 +67,26 @@ .file "start.s" - .data - - /* Align on page boundry */ - .align PPC_PGSHIFT - /* Red zone for interrupt stack, one page (will be unmapped)*/ - .set ., .+PPC_PGBYTES - /* intstack itself */ - - .globl EXT(FixedStackStart) -EXT(FixedStackStart): - - .globl EXT(intstack) -EXT(intstack): - .set ., .+INTSTACK_SIZE*NCPUS - - /* Debugger stack - used by the debugger if present */ - /* NOTE!!! Keep the debugger stack right after the interrupt stack */ -#if MACH_KDP || MACH_KDB - .globl EXT(debstack) -EXT(debstack): - .set ., .+KERNEL_STACK_SIZE*NCPUS - - .globl EXT(FixedStackEnd) -EXT(FixedStackEnd): - - .align ALIGN - .globl EXT(intstack_top_ss) -EXT(intstack_top_ss): - .long EXT(intstack)+INTSTACK_SIZE-FM_SIZE /* intstack_top_ss points to the top of interrupt stack */ - - .align ALIGN - .globl EXT(debstack_top_ss) -EXT(debstack_top_ss): - - .long EXT(debstack)+KERNEL_STACK_SIZE-FM_SIZE /* debstack_top_ss points to the top of debug stack */ - - .globl EXT(debstackptr) -EXT(debstackptr): - .long EXT(debstack)+KERNEL_STACK_SIZE-FM_SIZE - -#endif /* MACH_KDP || MACH_KDB */ - - /* * All CPUs start here. * * This code is called from SecondaryLoader * * Various arguments are passed via a table: - * ARG0 = pointer to other startup parameters + * R3 = pointer to other startup parameters */ .text - + +ENTRY(resetPOR,TAG_NO_FRAME_USED) + + li r12,0 ; Get a 0 + stw r12,0xF0(0) ; Make sure the special flag is clear + mtmsrd r12 ; Make sure we are in 32-bit mode + isync ; Really make sure + lwz r3,0xF4(0) ; Get the boot_args pointer + b startJoin ; Join up... + + ENTRY(_start_cpu,TAG_NO_FRAME_USED) crclr bootCPU ; Set non-boot processor crclr firstInit ; Set not first time init @@ -135,11 +106,12 @@ ENTRY(_start_cpu,TAG_NO_FRAME_USED) mtspr tbl,r17 ; Clear bottom so we do not tick mtspr tbu,r15 ; Set top mtspr tbl,r16 ; Then bottom again - b allstart ENTRY(_start,TAG_NO_FRAME_USED) +startJoin: + mflr r2 ; Save the return address lis r30,hi16(EXT(per_proc_info)) ; Set current per_proc ori r30,r30,lo16(EXT(per_proc_info)) ; Set current per_proc crset bootCPU ; Set boot processor @@ -162,51 +134,17 @@ allstart: crand firstBoot,bootCPU,firstInit ; Indicate if we are on the initial first processor startup - lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc - ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc - mtsprg 0,r30 ; Set the per_proc - mfspr r6,hid0 ; Get the HID0 - li r7,MSR_VM_OFF ; Get real mode MSR - rlwinm r6,r6,0,sleep+1,doze-1 ; Remove any vestiges of sleep - mtspr hid0,r6 ; Set the insominac HID0 - mtmsr r7 ; Set the real mode SRR - isync - -; Map in the first 256Mb in both instruction and data BATs - - li r7,((0x7FF<<2)|2) ; Set up for V=R 256MB in supervisor space - li r8,((2<<3)|2) ; Physical address = 0, coherent, R/W li r9,0 ; Clear out a register - - mtsprg 1,r9 ; Clear the extra SPRGs + mtsprg 1,r9 ; Clear the SPRGs mtsprg 2,r9 mtsprg 3,r9 - sync - isync - mtdbatu 0,r7 ; Map bottom 256MB - mtdbatl 0,r8 ; Map bottom 256MB - mtdbatu 1,r9 ; Invalidate maps - mtdbatl 1,r9 ; Invalidate maps - mtdbatu 2,r9 ; Invalidate maps - mtdbatl 2,r9 ; Invalidate maps - mtdbatu 3,r9 ; Invalidate maps - mtdbatl 3,r9 ; Invalidate maps - sync - isync - mtibatu 0,r7 ; Map bottom 256MB - mtibatl 0,r8 ; Map bottom 256MB - mtibatu 1,r9 ; Invalidate maps - mtibatl 1,r9 ; Invalidate maps - mtibatu 2,r9 ; Invalidate maps - mtibatl 2,r9 ; Invalidate maps - mtibatu 3,r9 ; Invalidate maps - mtibatl 3,r9 ; Invalidate maps - sync - isync - + li r7,MSR_VM_OFF ; Get real mode MSR + mtmsr r7 ; Set the real mode SRR + isync + lis r26,hi16(processor_types) ; Point to processor table ori r26,r26,lo16(processor_types) ; Other half mfpvr r10 ; Get the PVR @@ -227,20 +165,21 @@ donePVR: lwz r20,ptInitRout(r26) ; Grab the special init routine ; ; The following code just does a general initialization of the features just ; after the initial first-time boot. This is not done after waking up or on -; any "secondary" processor. +; any "secondary" processor. Just after the boot-processor init, we copy the +; features to any possible per_proc. ; ; We are just setting defaults. The specific initialization code will modify these ; if necessary. ; - lis r13,hi16(EXT(_cpu_capabilities)) ; Get the address of _cpu_capabilities - ori r13,r13,lo16(EXT(_cpu_capabilities)) + lis r18,hi16(EXT(_cpu_capabilities)) ; Get the address of _cpu_capabilities + ori r18,r18,lo16(EXT(_cpu_capabilities)) lwz r17,ptCPUCap(r26) ; Get the default cpu capabilities - stw r17, 0(r13) ; Save the default value in _cpu_capabilities + stw r17, 0(r18) ; Save the default value in _cpu_capabilities lwz r17,ptFeatures(r26) ; Pick up the features - lwz r13,ptRptdProc(r26) ; Get the reported processor - sth r13,pfrptdProc(r30) ; Set the reported processor + lwz r18,ptRptdProc(r26) ; Get the reported processor + sth r18,pfrptdProc(r30) ; Set the reported processor lwz r13,ptTempMax(r26) ; Get maximum operating temperature stw r13,thrmmaxTemp(r30) ; Set the maximum @@ -253,18 +192,63 @@ donePVR: lwz r20,ptInitRout(r26) ; Grab the special init routine stw r13,pfl1iSize(r30) ; Save it lwz r13,ptl1dSize(r26) ; Get dcache size stw r13,pfl1dSize(r30) ; Save it + lwz r13,ptPTEG(r26) ; Get PTEG size address + stw r13,pfPTEG(r30) ; Save it + lwz r13,ptMaxVAddr(r26) ; Get max virtual address + stw r13,pfMaxVAddr(r30) ; Save it + lwz r13,ptMaxPAddr(r26) ; Get max physical address + stw r13,pfMaxPAddr(r30) ; Save it + lis r11,hi16(EXT(patch_table)) + ori r11,r11,lo16(EXT(patch_table)) + lwz r19,ptPatch(r26) ; Get ptPatch field + li r12,PATCH_TABLE_SIZE + mtctr r12 +patch_loop: + lwz r16,patchType(r11) ; Load the patch type + lwz r15,patchValue(r11) ; Load the patch value + cmplwi cr1,r16,PATCH_FEATURE ; Is it a patch feature entry + and. r14,r15,r19 ; Is it set in the patch feature + crandc cr0_eq,cr1_eq,cr0_eq ; Do we have a match + beq patch_apply ; Yes, patch memory + cmplwi cr1,r16,PATCH_PROCESSOR ; Is it a patch processor entry + cmplw cr0,r15,r18 ; Check matching processor + crand cr0_eq,cr1_eq,cr0_eq ; Do we have a match + bne patch_skip ; No, skip patch memory +patch_apply: + lwz r13,patchAddr(r11) ; Load the address to patch + lwz r14,patchData(r11) ; Load the patch data + stw r14,0(r13) ; Patch the location + dcbf 0,r13 ; Flush the old one + sync ; Make sure we see it all + icbi 0,r13 ; Flush the i-cache + isync ; Hang out + sync ; Hang out some more... +patch_skip: + addi r11,r11,peSize ; Point to the next patch entry + bdnz patch_loop ; Loop if in the range b doOurInit ; Go do processor specific initialization... notFirst: lwz r17,pfAvailable(r30) ; Get our features - rlwinm. r0,r17,0,pfValidb,pfValidb ; Have we set up this CPU yet? - bne doOurInit ; Yeah, must be wakeup... +doOurInit: mr. r20,r20 ; See if initialization routine + crand firstBoot,bootCPU,firstInit ; Indicate if we are on the initial first processor startup + bnelrl ; Do the initialization + + ori r17,r17,lo16(pfValid) ; Set the valid bit + stw r17,pfAvailable(r30) ; Set the available features + + bf firstBoot,nofeatcpy ; Skip feature propagate if not first time boot... + + li r2,NCPUS ; Get number of CPUs lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc + addi r6,r23,ppSize ; Point to the next one - la r7,pfAvailable(r30) ; Point to features of our processor +cpyFCpu: addic. r2,r2,-1 ; Count down la r8,pfAvailable(r23) ; Point to features of boot processor + la r7,pfAvailable(r6) ; Point to features of our processor li r9,(pfSize+thrmSize)/4 ; Get size of a features area + ble-- nofeatcpy ; Copied all we need cpyFeat: subi r9,r9,1 ; Count word lwz r0,0(r8) ; Get boot cpu features @@ -273,17 +257,75 @@ cpyFeat: subi r9,r9,1 ; Count word addi r7,r7,4 ; Next out addi r8,r8,4 ; Next in bgt cpyFeat ; Copy all boot cpu features to us... - - lwz r17,pfAvailable(r30) ; Get our newly initialized features -doOurInit: - mr. r20,r20 ; See if initialization routine - bnelrl ; Do the initialization + lwz r17,pfAvailable(r6) ; Get our newly initialized features + addi r6,r6,ppSize ; Point to the next one + b cpyFCpu ; Do the next per_proc... - ori r17,r17,lo16(pfValid) ; Set the valid bit - stw r17,pfAvailable(r30) ; Set the available features + +nofeatcpy: rlwinm. r0,r17,0,pf64Bitb,pf64Bitb ; Is this a 64-bit machine? mtsprg 2,r17 ; Remember the feature flags + + bne++ start64 ; Skip following if 64-bit... + + mfspr r6,hid0 ; Get the HID0 + rlwinm r6,r6,0,sleep+1,doze-1 ; Remove any vestiges of sleep + mtspr hid0,r6 ; Set the insominac HID0 + isync + +; Clear the BAT registers + + li r9,0 ; Clear out a register + sync + isync + mtdbatu 0,r9 ; Invalidate maps + mtdbatl 0,r9 ; Invalidate maps + mtdbatu 1,r9 ; Invalidate maps + mtdbatl 1,r9 ; Invalidate maps + mtdbatu 2,r9 ; Invalidate maps + mtdbatl 2,r9 ; Invalidate maps + mtdbatu 3,r9 ; Invalidate maps + mtdbatl 3,r9 ; Invalidate maps + sync + isync + mtibatu 0,r9 ; Invalidate maps + mtibatl 0,r9 ; Invalidate maps + mtibatu 1,r9 ; Invalidate maps + mtibatl 1,r9 ; Invalidate maps + mtibatu 2,r9 ; Invalidate maps + mtibatl 2,r9 ; Invalidate maps + mtibatu 3,r9 ; Invalidate maps + mtibatl 3,r9 ; Invalidate maps + sync + isync + b startcommon ; Go join up the common start routine +start64: lis r5,hi16(startcommon) ; Get top of address of continue point + mfspr r6,hid0 ; Get the HID0 + ori r5,r5,lo16(startcommon) ; Get low of address of continue point + lis r9,hi16(MASK(MSR_HV)) ; ? + lis r20,hi16(dozem|napm|sleepm) ; Get mask of power saving features + li r7,MSR_VM_OFF ; Get real mode MSR, 64-bit off + sldi r9,r9,32 ; Slide into position + sldi r20,r20,32 ; Slide power stuff into position + or r9,r9,r7 ; Form initial MSR + andc r6,r6,r20 ; Remove any vestiges of sleep + isync + mtspr hid0,r6 ; Set the insominac HID0 + mfspr r6,hid0 ; Get it + mfspr r6,hid0 ; Get it + mfspr r6,hid0 ; Get it + mfspr r6,hid0 ; Get it + mfspr r6,hid0 ; Get it + mfspr r6,hid0 ; Get it + isync + mtsrr0 r5 ; Set the continue point + mtsrr1 r9 ; Set our normal disabled MSR + rfid ; Tally ho... + + .align 5 + +startcommon: rlwinm. r0,r17,0,pfFloatb,pfFloatb ; See if there is floating point beq- noFloat ; Nope, this is a really stupid machine... @@ -345,7 +387,9 @@ noFloat: rlwinm. r0,r17,0,pfAltivecb,pfAltivecb ; See if there is Altivec mtspr vrsave,r0 ; Set that no VRs are used yet */ - vspltisw v1,0 ; Clear a register + vspltish v1,1 ; Turn on the non-Java bit and saturate + vspltisw v0,1 ; Turn on the saturate bit + vxor v1,v1,v0 ; Turn off saturate and leave non-Java set lvx v0,br0,r5 ; Initialize VR0 mtvscr v1 ; Clear the vector status register vor v2,v0,v0 ; Copy into the next register @@ -404,12 +448,16 @@ noSMP: rlwinm. r0,r17,0,pfThermalb,pfThermalb ; See if there is an TAU noThermometer: bl EXT(cacheInit) ; Initializes all caches (including the TLB) + + rlwinm. r0,r17,0,pf64Bitb,pf64Bitb ; Is this a 64-bit machine? + beq++ isnot64 ; Skip following if not 64-bit... - li r0,MSR_SUPERVISOR_INT_OFF ; Make sure we do not have FP enabled - mtmsr r0 ; Set the standard MSR values - isync + mfmsr r29 ; Get the MSR + rlwinm r29,r29,0,0,31 ; Make sure that 64-bit mode is off + mtmsrd r29 ; Set it + isync ; Make sure - bf bootCPU,callcpu ; Not the boot processor... +isnot64: bf bootCPU,callcpu lis r29,HIGH_ADDR(EXT(intstack_top_ss)) ; move onto interrupt stack ori r29,r29,LOW_ADDR(EXT(intstack_top_ss)) @@ -421,6 +469,8 @@ noThermometer: mr r1,r29 mr r3,r31 ; Restore any arguments we may have trashed +; Note that we exit from here with translation still off + bl EXT(ppc_init) ; Jump into boot init code BREAKPOINT_TRAP @@ -434,6 +484,8 @@ callcpu: mr r1,r29 ; move onto new stack mr r3,r31 ; Restore any arguments we may have trashed +; Note that we exit from here with translation still off + bl EXT(ppc_init_cpu) ; Jump into cpu init code BREAKPOINT_TRAP ; Should never return @@ -486,10 +538,10 @@ init750CX: bf firstBoot, init750 ; No init for wakeup.... mfspr r13,hid1 ; Get HID1 li r14,lo16(0xFD5F) ; Get valid - rlwinm r13,r13,4,28,31 ; Isolate - slw r14,r14,r13 ; Position - rlwimi r17,r14,15-pfCanNapb,pfCanNapb,pfCanNapb ; Set it - b init750 ; Join common... + rlwinm r13,r13,4,28,31 ; Isolate + slw r14,r14,r13 ; Position + rlwimi r17,r14,15-pfCanNapb,pfCanNapb,pfCanNapb ; Set it + b init750 ; Join common... ; 750FX @@ -504,9 +556,9 @@ init750FXnb: lwz r13, pfHID0(r30) ; Get HID0 lwz r11, pfHID1(r30) ; Get HID1 - rlwinm. r0, r11, 0, hid1ps, hid1ps ; Isolate the hid1ps bit + rlwinm. r0, r11, 0, hid1ps, hid1ps ; Isolate the hid1ps bit beq init750FXnb2 ; Clear BTIC if hid1ps set - rlwinm r13, r13, 0, btic+1, btic-1 ; Clear the BTIC bit + rlwinm r13, r13, 0, btic+1, btic-1 ; Clear the BTIC bit init750FXnb2: sync @@ -514,9 +566,9 @@ init750FXnb2: isync sync - rlwinm r12, r11, 0, hid1ps+1, hid1ps-1 ; Select PLL0 + rlwinm r12, r11, 0, hid1ps+1, hid1ps-1 ; Select PLL0 mtspr hid1, r12 ; Restore PLL config - mftb r13 ; Wait 5000 ticks (> 200 us) + mftb r13 ; Wait 5000 ticks (> 200 us) init750FXnbloop: mftb r14 @@ -554,7 +606,7 @@ i7400hl2: lis r14,hi16(256*1024) ; Base L2 size rlwinm r15,r15,4,30,31 slw r14,r14,r15 ; Set 256KB, 512KB, 1MB, or 2MB - stw r13,pfl2crOriginal(r30) ; Shadow the L2CR + stw r13,pfl2crOriginal(r30) ; Shadow the L2CR stw r13,pfl2cr(r30) ; Shadow the L2CR stw r14,pfl2Size(r30) ; Set the L2 size @@ -568,12 +620,11 @@ i7400hl2: lis r14,hi16(256*1024) ; Base L2 size stw r11,pfMSSCR0(r30) ; Save the MSSCR0 value mfspr r11,msscr1 ; Get the msscr1 register stw r11,pfMSSCR1(r30) ; Save the MSSCR1 value - blr ; Return... i7400nb: li r11,0 - mtspr l2cr,r11 ; Make sure L2CR is zero + mtspr l2cr,r11 ; Make sure L2CR is zero lwz r11,pfHID0(r30) ; Get HID0 sync mtspr hid0,r11 ; Set the HID @@ -613,7 +664,7 @@ init7410: li r13,0 ; Clear ; 745X - Any 7450 family processor init745X: - bf firstBoot,init745Xnb ; Do different if not initial boot... + bf firstBoot,init745Xnb ; Do different if not initial boot... mfspr r13,l2cr ; Get the L2CR rlwinm. r0,r13,0,l2e,l2e ; Any L2? @@ -621,8 +672,8 @@ init745X: rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No L2, turn off feature init745Xhl2: - mfpvr r14 ; Get processor version - rlwinm r14,r14,16,16,31 ; Isolate processor version + mfpvr r14 ; Get processor version + rlwinm r14,r14,16,16,31 ; Isolate processor version cmpli cr0, r14, PROCESSOR_VERSION_7457 lis r14,hi16(512*1024) ; 512KB L2 beq init745Xhl2_2 @@ -643,24 +694,24 @@ init745Xhl2_2: bne+ init745Xhl3 ; Yes... rlwinm r17,r17,0,pfL3b+1,pfL3b-1 ; No L3, turn off feature -init745Xhl3: cmplwi cr0,r13,0 ; No L3 if L3CR is zero - beq- init745Xnone ; Go turn off the features... +init745Xhl3: cmplwi cr0,r13,0 ; No L3 if L3CR is zero + beq- init745Xnone ; Go turn off the features... lis r14,hi16(1024*1024) ; Base L3 size rlwinm r15,r13,4,31,31 ; Get size multiplier slw r14,r14,r15 ; Set 1 or 2MB - stw r13,pfl3crOriginal(r30) ; Shadow the L3CR + stw r13,pfl3crOriginal(r30) ; Shadow the L3CR stw r13,pfl3cr(r30) ; Shadow the L3CR stw r14,pfl3Size(r30) ; Set the L3 size b init745Xfin ; Return.... init745Xnone: - rlwinm r17,r17,0,pfL3fab+1,pfL3b-1 ; No 3rd level cache or assist + rlwinm r17,r17,0,pfL3fab+1,pfL3b-1 ; No 3rd level cache or assist rlwinm r11,r17,pfWillNapb-pfCanNapb,pfCanNapb,pfCanNapb ; Set pfCanNap if pfWillNap is set or r17,r17,r11 init745Xfin: - rlwinm r17,r17,0,pfWillNapb+1,pfWillNapb-1 ; Make sure pfWillNap is not set + rlwinm r17,r17,0,pfWillNapb+1,pfWillNapb-1 ; Make sure pfWillNap is not set mfspr r11,hid0 ; Get the current HID0 stw r11,pfHID0(r30) ; Save the HID0 value @@ -678,7 +729,6 @@ init745Xfin: stw r11,pfLDSTDB(r30) ; Save the LDSTDB value mfspr r11,pir ; Get the pir register stw r11,pfBootConfig(r30) ; Save the BootConfig value - blr ; Return.... @@ -728,6 +778,73 @@ init7450done: b init745X ; Continue with standard init +init970: + li r20,0 ; Clear this + mtspr hior,r20 ; Make sure that 0 is interrupt prefix + bf firstBoot,init970nb ; No init for wakeup or second processor.... + + + mfspr r11,hid0 ; Get original hid0 + std r11,pfHID0(r30) ; Save original + mfspr r11,hid1 ; Get original hid1 + std r11,pfHID1(r30) ; Save original + mfspr r11,hid4 ; Get original hid4 + std r11,pfHID4(r30) ; Save original + mfspr r11,hid5 ; Get original hid5 + std r11,pfHID5(r30) ; Save original + +; +; We can not query or change the L2 size. We will just +; phoney up a L2CR to make sysctl "happy" and set the +; L2 size to 512K. +; + + lis r0,0x8000 ; Synthesize a "valid" but non-existant L2CR + stw r0,pfl2crOriginal(r30) ; Set a dummy L2CR + stw r0,pfl2cr(r30) ; Set a dummy L2CR + lis r0,8 ; Get 512K + stw r0,pfl2Size(r30) ; Set the L2 size + + blr + +; +; Start up code for second processor or wake up from sleep +; + +init970nb: ld r11,pfHID0(r30) ; Get it + isync + mtspr hid0,r11 ; Stuff it + mfspr r11,hid0 ; Get it + mfspr r11,hid0 ; Get it + mfspr r11,hid0 ; Get it + mfspr r11,hid0 ; Get it + mfspr r11,hid0 ; Get it + mfspr r11,hid0 ; Get it + isync + + ld r11,pfHID1(r30) ; Get it + isync + mtspr hid1,r11 ; Stick it + mtspr hid1,r11 ; Stick it again + isync + + ld r11,pfHID4(r30) ; Get it + sync + mtspr hid4,r11 ; Stick it + isync + + ld r11,pfHID5(r30) ; Get it + mtspr hid5,r11 ; Set it + isync + blr ; Leave... + + +; Unsupported Processors +initUnsupported: + mtlr r2 ; Restore the return address + blr ; Return to the booter + + ; ; Processor to feature table @@ -738,6 +855,7 @@ init7450done: ; .short ptRevision - Revision code from PVR. A zero value denotes the generic attributes if not specific ; .long ptFeatures - Available features ; .long ptCPUCap - Default value for _cpu_capabilities +; .long ptPatch - Patch features ; .long ptInitRout - Initilization routine. Can modify any of the other attributes. ; .long ptRptdProc - Processor type reported ; .long ptTempMax - Maximum operating temprature @@ -745,278 +863,371 @@ init7450done: ; .long ptLineSize - Level 1 cache line size ; .long ptl1iSize - Level 1 instruction cache size ; .long ptl1dSize - Level 1 data cache size +; .long ptPTEG - Size of PTEG +; .long ptMaxVAddr - Maximum effective address +; .long ptMaxPAddr - Maximum physical address +; .align 2 processor_types: ; 750 (ver 2.2) - .align 2 - .long 0xFFFFFFFF ; Exact match - .short PROCESSOR_VERSION_750 - .short 0x4202 - .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfL1i | pfL1d | pfL2 - .long kCache32 | kHasGraphicsOps | kHasStfiwx - .long init750 - .long CPU_SUBTYPE_POWERPC_750 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFFFFFF ; Exact match + .short PROCESSOR_VERSION_750 + .short 0x4202 + .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pf32Byte | pfL2 + .long kCache32 | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init750 + .long CPU_SUBTYPE_POWERPC_750 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 32 ; 750CX (ver 2.x) - .align 2 - .long 0xFFFF0F00 ; 2.x vers - .short PROCESSOR_VERSION_750 - .short 0x0200 - .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfL1i | pfL1d | pfL2 - .long kCache32 | kHasGraphicsOps | kHasStfiwx - .long init750CX - .long CPU_SUBTYPE_POWERPC_750 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 - + .align 2 + .long 0xFFFF0F00 ; 2.x vers + .short PROCESSOR_VERSION_750 + .short 0x0200 + .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pf32Byte | pfL2 + .long kCache32 | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init750CX + .long CPU_SUBTYPE_POWERPC_750 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 32 + ; 750 (generic) - .align 2 - .long 0xFFFF0000 ; All revisions - .short PROCESSOR_VERSION_750 - .short 0 - .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfThermal | pfL1i | pfL1d | pfL2 - .long kCache32 | kHasGraphicsOps | kHasStfiwx - .long init750 - .long CPU_SUBTYPE_POWERPC_750 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 - + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_750 + .short 0 + .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfThermal | pf32Byte | pfL2 + .long kCache32 | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init750 + .long CPU_SUBTYPE_POWERPC_750 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 32 + ; 750FX (ver 1.x) - .align 2 - .long 0xFFFF0F00 ; 1.x vers - .short PROCESSOR_VERSION_750FX - .short 0x0100 - .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfSlowNap | pfNoMuMMCK | pfL1i | pfL1d | pfL2 - .long kCache32 | kHasGraphicsOps | kHasStfiwx - .long init750FX - .long CPU_SUBTYPE_POWERPC_750 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 - + .align 2 + .long 0xFFFF0F00 ; 1.x vers + .short PROCESSOR_VERSION_750FX + .short 0x0100 + .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfSlowNap | pfNoMuMMCK | pf32Byte | pfL2 + .long kCache32 | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init750FX + .long CPU_SUBTYPE_POWERPC_750 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 32 + ; 750FX (generic) - .align 2 - .long 0xFFFF0000 ; All revisions - .short PROCESSOR_VERSION_750FX - .short 0 - .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfSlowNap | pfNoMuMMCK | pfL1i | pfL1d | pfL2 - .long kCache32 | kHasGraphicsOps | kHasStfiwx - .long init750FXV2 - .long CPU_SUBTYPE_POWERPC_750 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 - + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_750FX + .short 0 + .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfSlowNap | pfNoMuMMCK | pf32Byte | pfL2 + .long kCache32 | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init750FXV2 + .long CPU_SUBTYPE_POWERPC_750 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 32 + ; 7400 (ver 2.0 - ver 2.7) - .align 2 - .long 0xFFFFFFF8 ; All revisions - .short PROCESSOR_VERSION_7400 - .short 0x0200 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfThermal | pfL1i | pfL1d | pfL1fa | pfL2 | pfL2fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init7400v2_7 - .long CPU_SUBTYPE_POWERPC_7400 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 - + .align 2 + .long 0xFFFFFFF8 ; ver 2.0 - 2.7 + .short PROCESSOR_VERSION_7400 + .short 0x0200 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfThermal | pf32Byte | pfL1fa | pfL2 | pfL2fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init7400v2_7 + .long CPU_SUBTYPE_POWERPC_7400 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 32 + ; 7400 (generic) - .align 2 - .long 0xFFFF0000 ; All revisions - .short PROCESSOR_VERSION_7400 - .short 0 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfThermal | pfL1i | pfL1d | pfL1fa | pfL2 | pfL2fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init7400 - .long CPU_SUBTYPE_POWERPC_7400 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 - + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_7400 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfThermal | pf32Byte | pfL1fa | pfL2 | pfL2fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init7400 + .long CPU_SUBTYPE_POWERPC_7400 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 + ; 7410 (ver 1.1) - .align 2 - .long 0xFFFFFFFF ; Exact match - .short PROCESSOR_VERSION_7400 - .short 0x1101 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfL1i | pfL1d | pfL1fa | pfL2 | pfL2fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init7410 - .long CPU_SUBTYPE_POWERPC_7400 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFFFFFF ; Exact match + .short PROCESSOR_VERSION_7400 + .short 0x1101 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pf32Byte | pfL1fa | pfL2 | pfL2fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init7410 + .long CPU_SUBTYPE_POWERPC_7400 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 ; 7410 (generic) - .align 2 - .long 0xFFFF0000 ; All other revisions - .short PROCESSOR_VERSION_7410 - .short 0 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfL1i | pfL1d | pfL1fa | pfL2 | pfL2fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init7410 - .long CPU_SUBTYPE_POWERPC_7400 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFF0000 ; All other revisions + .short PROCESSOR_VERSION_7410 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pf32Byte | pfL1fa | pfL2 | pfL2fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init7410 + .long CPU_SUBTYPE_POWERPC_7400 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 ; 7450 (ver 1.xx) - .align 2 - .long 0xFFFFFF00 ; Just revisions 1.xx - .short PROCESSOR_VERSION_7450 - .short 0x0100 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfNoL2PFNap | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init7450 - .long CPU_SUBTYPE_POWERPC_7450 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFFFF00 ; Just revisions 1.xx + .short PROCESSOR_VERSION_7450 + .short 0x0100 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfNoL2PFNap | pfLClck | pf32Byte | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init7450 + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 ; 7450 (2.0) - .align 2 - .long 0xFFFFFFFF ; Just revision 2.0 - .short PROCESSOR_VERSION_7450 - .short 0x0200 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfNoL2PFNap | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init7450 - .long CPU_SUBTYPE_POWERPC_7450 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFFFFFF ; Just revision 2.0 + .short PROCESSOR_VERSION_7450 + .short 0x0200 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfNoL2PFNap | pfLClck | pf32Byte | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init7450 + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 ; 7450 (2.1) - .align 2 - .long 0xFFFF0000 ; All other revisions - .short PROCESSOR_VERSION_7450 - .short 0 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfWillNap | pfNoMSRir | pfNoL2PFNap | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init7450 - .long CPU_SUBTYPE_POWERPC_7450 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFF0000 ; All other revisions + .short PROCESSOR_VERSION_7450 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfWillNap | pfNoMSRir | pfNoL2PFNap | pfLClck | pf32Byte | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init7450 + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 ; 7455 (1.xx) Just like 7450 2.0 - .align 2 - .long 0xFFFFFF00 ; Just revisions 1.xx - .short PROCESSOR_VERSION_7455 - .short 0x0100 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfNoL2PFNap | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init745X - .long CPU_SUBTYPE_POWERPC_7450 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFFFF00 ; Just revisions 1.xx + .short PROCESSOR_VERSION_7455 + .short 0x0100 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfNoL2PFNap | pfLClck | pf32Byte | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init745X + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 ; 7455 (2.0) - .align 2 - .long 0xFFFFFFFF ; Just revision 2.0 - .short PROCESSOR_VERSION_7455 - .short 0x0200 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfWillNap | pfNoMSRir | pfNoL2PFNap | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init745X - .long CPU_SUBTYPE_POWERPC_7450 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFFFFFF ; Just revision 2.0 + .short PROCESSOR_VERSION_7455 + .short 0x0200 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfWillNap | pfNoMSRir | pfNoL2PFNap | pfLClck | pf32Byte | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init745X + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 ; 7455 (2.1) - .align 2 - .long 0xFFFF0000 ; All other revisions - .short PROCESSOR_VERSION_7455 - .short 0 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfNoMSRir | pfNoL2PFNap | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init745X - .long CPU_SUBTYPE_POWERPC_7450 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 + .align 2 + .long 0xFFFF0000 ; All other revisions + .short PROCESSOR_VERSION_7455 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfNoMSRir | pfNoL2PFNap | pfLClck | pf32Byte | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init745X + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 ; 7457 - .align 2 - .long 0xFFFF0000 ; All other revisions - .short PROCESSOR_VERSION_7457 - .short 0 - .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfNoMSRir | pfNoL2PFNap | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa - .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx - .long init745X - .long CPU_SUBTYPE_POWERPC_7450 - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 - -; Default dumb loser machine + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_7457 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfNoMSRir | pfNoL2PFNap | pfLClck | pf32Byte | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfHasDcba + .long kHasAltivec | kCache32 | kDcbaAvailable | kDataStreamsRecommended | kDataStreamsAvailable | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long init745X + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 36 + +; 970 + + .align 2 + .long 0xFFFF0000 ; All versions so far + .short PROCESSOR_VERSION_970 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pf128Byte | pf64Bit | pfL2 | pfSCOMFixUp + .long kHasAltivec | k64Bit | kCache128 | kDataStreamsAvailable | kDcbtStreamsRecommended | kDcbtStreamsAvailable | kHasGraphicsOps | kHasStfiwx | kHasFsqrt + .long PatchLwsync + .long init970 + .long CPU_SUBTYPE_POWERPC_970 + .long 105 + .long 90 + .long 128 + .long 64*1024 + .long 32*1024 + .long 128 + .long 65 + .long 42 + +; All other processors are not supported + + .align 2 + .long 0x00000000 ; Matches everything + .short 0 + .short 0 + .long pfFloat | pf32Byte + .long kCache32 | kHasGraphicsOps | kHasStfiwx + .long PatchExt32 + .long initUnsupported + .long CPU_SUBTYPE_POWERPC_ALL + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + .long 64 + .long 52 + .long 32 - .align 2 - .long 0x00000000 ; Matches everything - .short 0 - .short 0 - .long pfFloat | pfL1i | pfL1d - .long kCache32 | kHasGraphicsOps | kHasStfiwx - .long 0 - .long CPU_SUBTYPE_POWERPC_ALL - .long 105 - .long 90 - .long 32 - .long 32*1024 - .long 32*1024 diff --git a/osfmk/ppc/status.c b/osfmk/ppc/status.c index 398b0b719..e0776a7af 100644 --- a/osfmk/ppc/status.c +++ b/osfmk/ppc/status.c @@ -47,6 +47,8 @@ extern void thread_bootstrap_return(void); extern struct Saveanchor saveanchor; extern int real_ncpus; /* Number of actual CPUs */ +#define USRSTACK 0xc0000000 + kern_return_t thread_userstack( thread_t, @@ -69,6 +71,7 @@ thread_entrypoint( unsigned int get_msr_exportmask(void); unsigned int get_msr_nbits(void); unsigned int get_msr_rbits(void); +void ppc_checkthreadstate(void *, int); void thread_set_child(thread_act_t child, int pid); void thread_set_parent(thread_act_t parent, int pid); @@ -80,6 +83,9 @@ unsigned int state_count[] = { PPC_THREAD_STATE_COUNT, PPC_FLOAT_STATE_COUNT, PPC_EXCEPTION_STATE_COUNT, + PPC_VECTOR_STATE_COUNT, + PPC_THREAD_STATE64_COUNT, + PPC_EXCEPTION_STATE64_COUNT, }; /* @@ -89,7 +95,7 @@ unsigned int state_count[] = { */ kern_return_t -act_machine_get_state( +machine_thread_get_state( thread_act_t thr_act, thread_flavor_t flavor, thread_state_t tstate, @@ -104,32 +110,29 @@ act_machine_get_state( unsigned int vrvalidwrk; register struct ppc_thread_state *ts; + register struct ppc_thread_state64 *xts; register struct ppc_exception_state *es; + register struct ppc_exception_state64 *xes; register struct ppc_float_state *fs; register struct ppc_vector_state *vs; - -#if MACH_ASSERT - if (watchacts & WA_STATE) - printf("act_%x act_machine_get_state(thr_act=%x,flav=%x,st=%x,cnt@%x=%x)\n", - current_act(), thr_act, flavor, tstate, - count, (count ? *count : 0)); -#endif /* MACH_ASSERT */ - genuser = find_user_regs(thr_act); /* Find the current user general context for this activation */ switch (flavor) { case THREAD_STATE_FLAVOR_LIST: - if (*count < 3) { + if (*count < 6) { return (KERN_INVALID_ARGUMENT); } tstate[0] = PPC_THREAD_STATE; tstate[1] = PPC_FLOAT_STATE; tstate[2] = PPC_EXCEPTION_STATE; - *count = 3; + tstate[3] = PPC_VECTOR_STATE; + tstate[4] = PPC_THREAD_STATE64; + tstate[5] = PPC_EXCEPTION_STATE64; + *count = 6; return KERN_SUCCESS; @@ -144,46 +147,46 @@ act_machine_get_state( sv = genuser; /* Copy this over */ if(sv) { /* Is there a save area yet? */ - ts->r0 = sv->save_r0; - ts->r1 = sv->save_r1; - ts->r2 = sv->save_r2; - ts->r3 = sv->save_r3; - ts->r4 = sv->save_r4; - ts->r5 = sv->save_r5; - ts->r6 = sv->save_r6; - ts->r7 = sv->save_r7; - ts->r8 = sv->save_r8; - ts->r9 = sv->save_r9; - ts->r10 = sv->save_r10; - ts->r11 = sv->save_r11; - ts->r12 = sv->save_r12; - ts->r13 = sv->save_r13; - ts->r14 = sv->save_r14; - ts->r15 = sv->save_r15; - ts->r16 = sv->save_r16; - ts->r17 = sv->save_r17; - ts->r18 = sv->save_r18; - ts->r19 = sv->save_r19; - ts->r20 = sv->save_r20; - ts->r21 = sv->save_r21; - ts->r22 = sv->save_r22; - ts->r23 = sv->save_r23; - ts->r24 = sv->save_r24; - ts->r25 = sv->save_r25; - ts->r26 = sv->save_r26; - ts->r27 = sv->save_r27; - ts->r28 = sv->save_r28; - ts->r29 = sv->save_r29; - ts->r30 = sv->save_r30; - ts->r31 = sv->save_r31; - ts->cr = sv->save_cr; - ts->xer = sv->save_xer; - ts->lr = sv->save_lr; - ts->ctr = sv->save_ctr; - ts->srr0 = sv->save_srr0; - ts->srr1 = sv->save_srr1; + ts->r0 = (unsigned int)sv->save_r0; + ts->r1 = (unsigned int)sv->save_r1; + ts->r2 = (unsigned int)sv->save_r2; + ts->r3 = (unsigned int)sv->save_r3; + ts->r4 = (unsigned int)sv->save_r4; + ts->r5 = (unsigned int)sv->save_r5; + ts->r6 = (unsigned int)sv->save_r6; + ts->r7 = (unsigned int)sv->save_r7; + ts->r8 = (unsigned int)sv->save_r8; + ts->r9 = (unsigned int)sv->save_r9; + ts->r10 = (unsigned int)sv->save_r10; + ts->r11 = (unsigned int)sv->save_r11; + ts->r12 = (unsigned int)sv->save_r12; + ts->r13 = (unsigned int)sv->save_r13; + ts->r14 = (unsigned int)sv->save_r14; + ts->r15 = (unsigned int)sv->save_r15; + ts->r16 = (unsigned int)sv->save_r16; + ts->r17 = (unsigned int)sv->save_r17; + ts->r18 = (unsigned int)sv->save_r18; + ts->r19 = (unsigned int)sv->save_r19; + ts->r20 = (unsigned int)sv->save_r20; + ts->r21 = (unsigned int)sv->save_r21; + ts->r22 = (unsigned int)sv->save_r22; + ts->r23 = (unsigned int)sv->save_r23; + ts->r24 = (unsigned int)sv->save_r24; + ts->r25 = (unsigned int)sv->save_r25; + ts->r26 = (unsigned int)sv->save_r26; + ts->r27 = (unsigned int)sv->save_r27; + ts->r28 = (unsigned int)sv->save_r28; + ts->r29 = (unsigned int)sv->save_r29; + ts->r30 = (unsigned int)sv->save_r30; + ts->r31 = (unsigned int)sv->save_r31; + ts->cr = (unsigned int)sv->save_cr; + ts->xer = (unsigned int)sv->save_xer; + ts->lr = (unsigned int)sv->save_lr; + ts->ctr = (unsigned int)sv->save_ctr; + ts->srr0 = (unsigned int)sv->save_srr0; + ts->srr1 = (unsigned int)sv->save_srr1; ts->mq = 0; /* MQ register (601 only) */ - ts->vrsave = sv->save_vrsave; /* VRSAVE register (Altivec only) */ + ts->vrsave = (unsigned int)sv->save_vrsave; /* VRSAVE register (Altivec only) */ } else { /* No user state yet. Save seemingly random values. */ @@ -204,6 +207,75 @@ act_machine_get_state( *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */ return KERN_SUCCESS; + + case PPC_THREAD_STATE64: + + if (*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */ + return KERN_INVALID_ARGUMENT; + } + + xts = (struct ppc_thread_state64 *) tstate; + + sv = genuser; /* Copy this over */ + + if(sv) { /* Is there a save area yet? */ + xts->r0 = sv->save_r0; + xts->r1 = sv->save_r1; + xts->r2 = sv->save_r2; + xts->r3 = sv->save_r3; + xts->r4 = sv->save_r4; + xts->r5 = sv->save_r5; + xts->r6 = sv->save_r6; + xts->r7 = sv->save_r7; + xts->r8 = sv->save_r8; + xts->r9 = sv->save_r9; + xts->r10 = sv->save_r10; + xts->r11 = sv->save_r11; + xts->r12 = sv->save_r12; + xts->r13 = sv->save_r13; + xts->r14 = sv->save_r14; + xts->r15 = sv->save_r15; + xts->r16 = sv->save_r16; + xts->r17 = sv->save_r17; + xts->r18 = sv->save_r18; + xts->r19 = sv->save_r19; + xts->r20 = sv->save_r20; + xts->r21 = sv->save_r21; + xts->r22 = sv->save_r22; + xts->r23 = sv->save_r23; + xts->r24 = sv->save_r24; + xts->r25 = sv->save_r25; + xts->r26 = sv->save_r26; + xts->r27 = sv->save_r27; + xts->r28 = sv->save_r28; + xts->r29 = sv->save_r29; + xts->r30 = sv->save_r30; + xts->r31 = sv->save_r31; + xts->cr = sv->save_cr; + xts->xer = sv->save_xer; + xts->lr = sv->save_lr; + xts->ctr = sv->save_ctr; + xts->srr0 = sv->save_srr0; + xts->srr1 = sv->save_srr1; + xts->vrsave = sv->save_vrsave; /* VRSAVE register (Altivec only) */ + } + else { /* No user state yet. Save seemingly random values. */ + + for(i=0; i < 32; i++) { /* Fill up with defaults */ + ((unsigned long long *)&xts->r0)[i] = ((unsigned long long *)&FloatInit)[0]; + } + xts->cr = 0; + xts->xer = 0; + xts->lr = ((unsigned long long *)&FloatInit)[0]; + xts->ctr = ((unsigned long long *)&FloatInit)[0]; + xts->srr0 = ((unsigned long long *)&FloatInit)[0]; + xts->srr1 = MSR_EXPORT_MASK_SET; + xts->vrsave = 0; /* VRSAVE register (Altivec only) */ + } + + *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */ + return KERN_SUCCESS; + case PPC_EXCEPTION_STATE: if (*count < PPC_EXCEPTION_STATE_COUNT) { @@ -211,17 +283,10 @@ act_machine_get_state( } es = (struct ppc_exception_state *) tstate; + sv = genuser; /* Copy this over */ - sv = thr_act->mact.pcb; /* Start with the normal savearea */ - while(sv) { /* Find the user context */ - if(sv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ - break; /* Outta here */ - } - sv = sv->save_hdr.save_prev; /* Back chain */ - } - if(sv) { /* See if valid state yet */ - es->dar = sv->save_dar; + es->dar = (unsigned int)sv->save_dar; es->dsisr = sv->save_dsisr; es->exception = sv->save_exception; } @@ -234,6 +299,29 @@ act_machine_get_state( *count = PPC_EXCEPTION_STATE_COUNT; return KERN_SUCCESS; + case PPC_EXCEPTION_STATE64: + + if (*count < PPC_EXCEPTION_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + xes = (struct ppc_exception_state64 *) tstate; + sv = genuser; /* Copy this over */ + + if(sv) { /* See if valid state yet */ + xes->dar = sv->save_dar; + xes->dsisr = sv->save_dsisr; + xes->exception = sv->save_exception; + } + else { /* Nope, not yet */ + xes->dar = 0; + xes->dsisr = 0; + xes->exception = ((unsigned int *)&FloatInit)[0]; + } + + *count = PPC_EXCEPTION_STATE64_COUNT; + return KERN_SUCCESS; + case PPC_FLOAT_STATE: if (*count < PPC_FLOAT_STATE_COUNT) { @@ -244,14 +332,7 @@ act_machine_get_state( fs = (struct ppc_float_state *) tstate; /* Point to destination */ - fsv = (savearea_fpu *)thr_act->mact.curctx->FPUsave; /* Start with the top FPU savearea */ - - while(fsv) { /* Find the user context */ - if(!fsv->save_hdr.save_level) { /* Are we looking at the user context? */ - break; /* Outta here */ - } - fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Back chain */ - } + fsv = find_user_fpu(thr_act); /* Get the user's fpu savearea */ if(fsv) { /* See if we have any */ bcopy((char *)&fsv->save_fp0, (char *)fs, 32*8); /* 32 registers */ @@ -282,14 +363,7 @@ act_machine_get_state( vs = (struct ppc_vector_state *) tstate; /* Point to destination */ - vsv = (savearea_vec *)thr_act->mact.curctx->VMXsave; /* Start with the top vector savearea */ - - while(vsv) { /* Find the user context */ - if(!vsv->save_hdr.save_level) { /* Are we looking at the user context? */ - break; /* Outta here */ - } - vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Back chain */ - } + vsv = find_user_vec(thr_act); /* Find the vector savearea */ if(vsv) { /* See if we have any */ @@ -300,7 +374,7 @@ act_machine_get_state( vs->save_vscr[0] = 0; /* Set an initial value if no general user yet */ vs->save_vscr[1] = 0; vs->save_vscr[2] = 0; - vs->save_vscr[3] = 0x00010000; + vs->save_vscr[3] = 0x00010000; /* Always start with Java mode off */ } for(i=0; i < 32; i++) { /* Copy the saved registers and invalidate the others */ for(j=0; j < 4; j++) { @@ -322,7 +396,7 @@ act_machine_get_state( vs->save_vscr[0] = 0; /* Set an initial value if no general user yet */ vs->save_vscr[1] = 0; vs->save_vscr[2] = 0; - vs->save_vscr[3] = 0x00010000; + vs->save_vscr[3] = 0x00010000; /* Always start with Java mode off */ } vs->save_vrvalid = 0; /* Clear the valid flags */ } @@ -337,6 +411,241 @@ act_machine_get_state( return KERN_INVALID_ARGUMENT; } } +/* Close cousin of machine_thread_get_state(). + * This function is currently incomplete since we don't really need vector + * or FP for the core dump (the save area can be accessed directly if the + * user is so inclined). Also the function name is something of a misnomer, + * see the comment above find_kern_regs(). + */ + +kern_return_t +machine_thread_get_kern_state( + thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) +{ + + register struct savearea *sv; /* Pointer to the context savearea */ + savearea *genkern; + int i, j; + unsigned int vrvalidwrk; + + register struct ppc_thread_state *ts; + register struct ppc_thread_state64 *xts; + register struct ppc_exception_state *es; + register struct ppc_exception_state64 *xes; + + genkern = find_kern_regs(thr_act); + + switch (flavor) { + + case THREAD_STATE_FLAVOR_LIST: + + if (*count < 6) { + return (KERN_INVALID_ARGUMENT); + } + + tstate[0] = PPC_THREAD_STATE; + tstate[1] = PPC_FLOAT_STATE; + tstate[2] = PPC_EXCEPTION_STATE; + tstate[3] = PPC_VECTOR_STATE; + tstate[4] = PPC_THREAD_STATE64; + tstate[5] = PPC_EXCEPTION_STATE64; + *count = 6; + + return KERN_SUCCESS; + + case PPC_THREAD_STATE: + + if (*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */ + return KERN_INVALID_ARGUMENT; + } + + ts = (struct ppc_thread_state *) tstate; + + sv = genkern; /* Copy this over */ + + if(sv) { /* Is there a save area yet? */ + ts->r0 = (unsigned int)sv->save_r0; + ts->r1 = (unsigned int)sv->save_r1; + ts->r2 = (unsigned int)sv->save_r2; + ts->r3 = (unsigned int)sv->save_r3; + ts->r4 = (unsigned int)sv->save_r4; + ts->r5 = (unsigned int)sv->save_r5; + ts->r6 = (unsigned int)sv->save_r6; + ts->r7 = (unsigned int)sv->save_r7; + ts->r8 = (unsigned int)sv->save_r8; + ts->r9 = (unsigned int)sv->save_r9; + ts->r10 = (unsigned int)sv->save_r10; + ts->r11 = (unsigned int)sv->save_r11; + ts->r12 = (unsigned int)sv->save_r12; + ts->r13 = (unsigned int)sv->save_r13; + ts->r14 = (unsigned int)sv->save_r14; + ts->r15 = (unsigned int)sv->save_r15; + ts->r16 = (unsigned int)sv->save_r16; + ts->r17 = (unsigned int)sv->save_r17; + ts->r18 = (unsigned int)sv->save_r18; + ts->r19 = (unsigned int)sv->save_r19; + ts->r20 = (unsigned int)sv->save_r20; + ts->r21 = (unsigned int)sv->save_r21; + ts->r22 = (unsigned int)sv->save_r22; + ts->r23 = (unsigned int)sv->save_r23; + ts->r24 = (unsigned int)sv->save_r24; + ts->r25 = (unsigned int)sv->save_r25; + ts->r26 = (unsigned int)sv->save_r26; + ts->r27 = (unsigned int)sv->save_r27; + ts->r28 = (unsigned int)sv->save_r28; + ts->r29 = (unsigned int)sv->save_r29; + ts->r30 = (unsigned int)sv->save_r30; + ts->r31 = (unsigned int)sv->save_r31; + ts->cr = (unsigned int)sv->save_cr; + ts->xer = (unsigned int)sv->save_xer; + ts->lr = (unsigned int)sv->save_lr; + ts->ctr = (unsigned int)sv->save_ctr; + ts->srr0 = (unsigned int)sv->save_srr0; + ts->srr1 = (unsigned int)sv->save_srr1; + ts->mq = 0; /* MQ register (601 only) */ + ts->vrsave = (unsigned int)sv->save_vrsave; /* VRSAVE register (Altivec only) */ + } + else { /* No state yet. Save seemingly random values. */ + + for(i=0; i < 32; i+=2) { /* Fill up with defaults */ + ((unsigned int *)&ts->r0)[i] = ((unsigned int *)&FloatInit)[0]; + ((unsigned int *)&ts->r0)[i+1] = ((unsigned int *)&FloatInit)[1]; + } + ts->cr = 0; + ts->xer = 0; + ts->lr = ((unsigned int *)&FloatInit)[0]; + ts->ctr = ((unsigned int *)&FloatInit)[1]; + ts->srr0 = ((unsigned int *)&FloatInit)[0]; + ts->srr1 = MSR_EXPORT_MASK_SET; + ts->mq = 0; + ts->vrsave = 0; /* VRSAVE register (Altivec only) */ + } + + *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */ + return KERN_SUCCESS; + + + case PPC_THREAD_STATE64: + + if (*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */ + return KERN_INVALID_ARGUMENT; + } + + xts = (struct ppc_thread_state64 *) tstate; + + sv = genkern; /* Copy this over */ + + if(sv) { /* Is there a save area yet? */ + xts->r0 = sv->save_r0; + xts->r1 = sv->save_r1; + xts->r2 = sv->save_r2; + xts->r3 = sv->save_r3; + xts->r4 = sv->save_r4; + xts->r5 = sv->save_r5; + xts->r6 = sv->save_r6; + xts->r7 = sv->save_r7; + xts->r8 = sv->save_r8; + xts->r9 = sv->save_r9; + xts->r10 = sv->save_r10; + xts->r11 = sv->save_r11; + xts->r12 = sv->save_r12; + xts->r13 = sv->save_r13; + xts->r14 = sv->save_r14; + xts->r15 = sv->save_r15; + xts->r16 = sv->save_r16; + xts->r17 = sv->save_r17; + xts->r18 = sv->save_r18; + xts->r19 = sv->save_r19; + xts->r20 = sv->save_r20; + xts->r21 = sv->save_r21; + xts->r22 = sv->save_r22; + xts->r23 = sv->save_r23; + xts->r24 = sv->save_r24; + xts->r25 = sv->save_r25; + xts->r26 = sv->save_r26; + xts->r27 = sv->save_r27; + xts->r28 = sv->save_r28; + xts->r29 = sv->save_r29; + xts->r30 = sv->save_r30; + xts->r31 = sv->save_r31; + xts->cr = sv->save_cr; + xts->xer = sv->save_xer; + xts->lr = sv->save_lr; + xts->ctr = sv->save_ctr; + xts->srr0 = sv->save_srr0; + xts->srr1 = sv->save_srr1; + xts->vrsave = sv->save_vrsave; /* VRSAVE register (Altivec only) */ + } + else { /* No user state yet. Save seemingly random values. */ + + for(i=0; i < 32; i++) { /* Fill up with defaults */ + ((unsigned long long *)&xts->r0)[i] = ((unsigned long long *)&FloatInit)[0]; + } + xts->cr = 0; + xts->xer = 0; + xts->lr = ((unsigned long long *)&FloatInit)[0]; + xts->ctr = ((unsigned long long *)&FloatInit)[0]; + xts->srr0 = ((unsigned long long *)&FloatInit)[0]; + xts->srr1 = MSR_EXPORT_MASK_SET; + xts->vrsave = 0; /* VRSAVE register (Altivec only) */ + } + + *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */ + return KERN_SUCCESS; + + case PPC_EXCEPTION_STATE: + + if (*count < PPC_EXCEPTION_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + es = (struct ppc_exception_state *) tstate; + sv = genkern; /* Copy this over */ + + if(sv) { /* See if valid state yet */ + es->dar = (unsigned int)sv->save_dar; + es->dsisr = sv->save_dsisr; + es->exception = sv->save_exception; + } + else { /* Nope, not yet */ + es->dar = 0; + es->dsisr = 0; + es->exception = ((unsigned int *)&FloatInit)[0]; + } + + *count = PPC_EXCEPTION_STATE_COUNT; + return KERN_SUCCESS; + + case PPC_EXCEPTION_STATE64: + + if (*count < PPC_EXCEPTION_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + xes = (struct ppc_exception_state64 *) tstate; + sv = genkern; /* Copy this over */ + + if(sv) { /* See if valid state yet */ + xes->dar = sv->save_dar; + xes->dsisr = sv->save_dsisr; + xes->exception = sv->save_exception; + } + else { /* Nope, not yet */ + xes->dar = 0; + xes->dsisr = 0; + xes->exception = ((unsigned int *)&FloatInit)[0]; + } + + *count = PPC_EXCEPTION_STATE64_COUNT; + return KERN_SUCCESS; + + default: + return KERN_INVALID_ARGUMENT; + } +} /* @@ -345,7 +654,7 @@ act_machine_get_state( * Set the status of the specified thread. */ kern_return_t -act_machine_set_state( +machine_thread_set_state( thread_act_t thr_act, thread_flavor_t flavor, thread_state_t tstate, @@ -358,18 +667,12 @@ act_machine_set_state( unsigned int i; int clgn; register struct ppc_thread_state *ts; + register struct ppc_thread_state64 *xts; register struct ppc_exception_state *es; + register struct ppc_exception_state *xes; register struct ppc_float_state *fs; register struct ppc_vector_state *vs; - int kernel_act = thr_act->kernel_loading || thr_act->kernel_loaded; - -#if MACH_ASSERT - if (watchacts & WA_STATE) - printf("act_%x act_machine_set_state(thr_act=%x,flav=%x,st=%x,cnt=%x)\n", - current_act(), thr_act, flavor, tstate, count); -#endif /* MACH_ASSERT */ - // dbgTrace((unsigned int)thr_act, (unsigned int)sv, flavor); /* (TEST/DEBUG) */ clgn = count; /* Get the count */ @@ -381,6 +684,13 @@ act_machine_set_state( return KERN_INVALID_ARGUMENT; /* Yeah, just leave... */ } break; + + case PPC_THREAD_STATE64: + + if (clgn < PPC_THREAD_STATE64_COUNT) { /* Is it too short? */ + return KERN_INVALID_ARGUMENT; /* Yeah, just leave... */ + } + break; case PPC_EXCEPTION_STATE: @@ -388,6 +698,12 @@ act_machine_set_state( return KERN_INVALID_ARGUMENT; /* Yeah, just leave... */ } + case PPC_EXCEPTION_STATE64: + + if (clgn < PPC_EXCEPTION_STATE64_COUNT) { /* Is it too short? */ + return KERN_INVALID_ARGUMENT; /* Yeah, just leave... */ + } + break; case PPC_FLOAT_STATE: @@ -416,70 +732,135 @@ act_machine_set_state( switch (flavor) { case PPC_THREAD_STATE: - case PPC_EXCEPTION_STATE: ts = (struct ppc_thread_state *)tstate; - - if(flavor == PPC_THREAD_STATE) { /* Are we updating plain state? */ - - genuser->save_r0 = ts->r0; - genuser->save_r1 = ts->r1; - genuser->save_r2 = ts->r2; - genuser->save_r3 = ts->r3; - genuser->save_r4 = ts->r4; - genuser->save_r5 = ts->r5; - genuser->save_r6 = ts->r6; - genuser->save_r7 = ts->r7; - genuser->save_r8 = ts->r8; - genuser->save_r9 = ts->r9; - genuser->save_r10 = ts->r10; - genuser->save_r11 = ts->r11; - genuser->save_r12 = ts->r12; - genuser->save_r13 = ts->r13; - genuser->save_r14 = ts->r14; - genuser->save_r15 = ts->r15; - genuser->save_r16 = ts->r16; - genuser->save_r17 = ts->r17; - genuser->save_r18 = ts->r18; - genuser->save_r19 = ts->r19; - genuser->save_r20 = ts->r20; - genuser->save_r21 = ts->r21; - genuser->save_r22 = ts->r22; - genuser->save_r23 = ts->r23; - genuser->save_r24 = ts->r24; - genuser->save_r25 = ts->r25; - genuser->save_r26 = ts->r26; - genuser->save_r27 = ts->r27; - genuser->save_r28 = ts->r28; - genuser->save_r29 = ts->r29; - genuser->save_r30 = ts->r30; - genuser->save_r31 = ts->r31; - - genuser->save_cr = ts->cr; - genuser->save_xer = ts->xer; - genuser->save_lr = ts->lr; - genuser->save_ctr = ts->ctr; - genuser->save_srr0 = ts->srr0; - genuser->save_vrsave = ts->vrsave; /* VRSAVE register (Altivec only) */ - genuser->save_srr1 = MSR_PREPARE_FOR_IMPORT(genuser->save_srr1, ts->srr1); /* Set the bits we can change */ - - if(!kernel_act) genuser->save_srr1 |= MSR_EXPORT_MASK_SET; /* If not a kernel guy, force the magic bits on */ - - genuser->save_srr1 &= ~(MASK(MSR_FP) | MASK(MSR_VEC)); /* Make sure we don't enable the floating point unit */ - - return KERN_SUCCESS; + genuser->save_r0 = (uint64_t)ts->r0; + genuser->save_r1 = (uint64_t)ts->r1; + genuser->save_r2 = (uint64_t)ts->r2; + genuser->save_r3 = (uint64_t)ts->r3; + genuser->save_r4 = (uint64_t)ts->r4; + genuser->save_r5 = (uint64_t)ts->r5; + genuser->save_r6 = (uint64_t)ts->r6; + genuser->save_r7 = (uint64_t)ts->r7; + genuser->save_r8 = (uint64_t)ts->r8; + genuser->save_r9 = (uint64_t)ts->r9; + genuser->save_r10 = (uint64_t)ts->r10; + genuser->save_r11 = (uint64_t)ts->r11; + genuser->save_r12 = (uint64_t)ts->r12; + genuser->save_r13 = (uint64_t)ts->r13; + genuser->save_r14 = (uint64_t)ts->r14; + genuser->save_r15 = (uint64_t)ts->r15; + genuser->save_r16 = (uint64_t)ts->r16; + genuser->save_r17 = (uint64_t)ts->r17; + genuser->save_r18 = (uint64_t)ts->r18; + genuser->save_r19 = (uint64_t)ts->r19; + genuser->save_r20 = (uint64_t)ts->r20; + genuser->save_r21 = (uint64_t)ts->r21; + genuser->save_r22 = (uint64_t)ts->r22; + genuser->save_r23 = (uint64_t)ts->r23; + genuser->save_r24 = (uint64_t)ts->r24; + genuser->save_r25 = (uint64_t)ts->r25; + genuser->save_r26 = (uint64_t)ts->r26; + genuser->save_r27 = (uint64_t)ts->r27; + genuser->save_r28 = (uint64_t)ts->r28; + genuser->save_r29 = (uint64_t)ts->r29; + genuser->save_r30 = (uint64_t)ts->r30; + genuser->save_r31 = (uint64_t)ts->r31; + + genuser->save_cr = ts->cr; + genuser->save_xer = (uint64_t)ts->xer; + genuser->save_lr = (uint64_t)ts->lr; + genuser->save_ctr = (uint64_t)ts->ctr; + genuser->save_srr0 = (uint64_t)ts->srr0; + genuser->save_vrsave = ts->vrsave; /* VRSAVE register (Altivec only) */ + + genuser->save_srr1 = MSR_PREPARE_FOR_IMPORT(genuser->save_srr1, ts->srr1); /* Set the bits we can change */ + + genuser->save_srr1 |= MSR_EXPORT_MASK_SET; + + genuser->save_srr1 &= ~(MASK(MSR_FP) | MASK(MSR_VEC)); /* Make sure we don't enable the floating point unit */ + + return KERN_SUCCESS; + + + case PPC_THREAD_STATE64: - } + xts = (struct ppc_thread_state64 *)tstate; + + genuser->save_r0 = xts->r0; + genuser->save_r1 = xts->r1; + genuser->save_r2 = xts->r2; + genuser->save_r3 = xts->r3; + genuser->save_r4 = xts->r4; + genuser->save_r5 = xts->r5; + genuser->save_r6 = xts->r6; + genuser->save_r7 = xts->r7; + genuser->save_r8 = xts->r8; + genuser->save_r9 = xts->r9; + genuser->save_r10 = xts->r10; + genuser->save_r11 = xts->r11; + genuser->save_r12 = xts->r12; + genuser->save_r13 = xts->r13; + genuser->save_r14 = xts->r14; + genuser->save_r15 = xts->r15; + genuser->save_r16 = xts->r16; + genuser->save_r17 = xts->r17; + genuser->save_r18 = xts->r18; + genuser->save_r19 = xts->r19; + genuser->save_r20 = xts->r20; + genuser->save_r21 = xts->r21; + genuser->save_r22 = xts->r22; + genuser->save_r23 = xts->r23; + genuser->save_r24 = xts->r24; + genuser->save_r25 = xts->r25; + genuser->save_r26 = xts->r26; + genuser->save_r27 = xts->r27; + genuser->save_r28 = xts->r28; + genuser->save_r29 = xts->r29; + genuser->save_r30 = xts->r30; + genuser->save_r31 = xts->r31; + + genuser->save_cr = xts->cr; + genuser->save_xer = xts->xer; + genuser->save_lr = xts->lr; + genuser->save_ctr = xts->ctr; + genuser->save_srr0 = xts->srr0; + genuser->save_vrsave = xts->vrsave; /* VRSAVE register (Altivec only) */ + + genuser->save_srr1 = MSR_PREPARE_FOR_IMPORT(genuser->save_srr1, xts->srr1); /* Set the bits we can change */ + genuser->save_srr1 |= MSR_EXPORT_MASK_SET; + + genuser->save_srr1 &= ~(MASK(MSR_FP) | MASK(MSR_VEC)); /* Make sure we don't enable the floating point unit */ + + return KERN_SUCCESS; + + + case PPC_EXCEPTION_STATE: + es = (struct ppc_exception_state *) tstate; - genuser->save_dar = es->dar; + genuser->save_dar = (uint64_t)es->dar; genuser->save_dsisr = es->dsisr; genuser->save_exception = es->exception; return KERN_SUCCESS; +/* + * It's pretty worthless to try to change this stuff, but we'll do it anyway. + */ + + case PPC_EXCEPTION_STATE64: + + xes = (struct ppc_exception_state *) tstate; + + genuser->save_dar = xes->dar; + genuser->save_dsisr = xes->dsisr; + genuser->save_exception = xes->exception; + + return KERN_SUCCESS; + case PPC_FLOAT_STATE: toss_live_fpu(thr_act->mact.curctx); /* Toss my floating point if live anywhere */ @@ -489,7 +870,7 @@ act_machine_set_state( if(!fsv) { /* Do we have one yet? */ fsv = (savearea_fpu *)save_alloc(); /* If we still don't have one, get a new one */ fsv->save_hdr.save_flags = (fsv->save_hdr.save_flags & ~SAVtype) | (SAVfloat << SAVtypeshft); /* Mark as in use as float */ - fsv->save_hdr.save_act = thr_act; /* Point to the activation */ + fsv->save_hdr.save_act = (struct thread_activation *)thr_act; /* Point to the activation */ fsv->save_hdr.save_prev = 0; /* Mark no more */ fsv->save_hdr.save_level = 0; /* Mark user state */ @@ -500,10 +881,10 @@ act_machine_set_state( while (fsvn) { /* Go until we hit the end */ fsvo = fsvn; /* Remember the previous one */ - fsvn = (savearea_fpu *)fsvo->save_hdr.save_prev; /* Skip on to the next */ + fsvn = CAST_DOWN(savearea_fpu *, fsvo->save_hdr.save_prev); /* Skip on to the next */ } - fsvo->save_hdr.save_prev = (savearea *)fsv; /* Queue us on in */ + fsvo->save_hdr.save_prev = (addr64_t)((uintptr_t)fsv); /* Queue us on in */ } } @@ -527,7 +908,7 @@ act_machine_set_state( if(!vsv) { /* Do we have one yet? */ vsv = (savearea_vec *)save_alloc(); /* If we still don't have one, get a new one */ vsv->save_hdr.save_flags = (vsv->save_hdr.save_flags & ~SAVtype) | (SAVvector << SAVtypeshft); /* Mark as in use as vector */ - vsv->save_hdr.save_act = thr_act; /* Point to the activation */ + vsv->save_hdr.save_act = (struct thread_activation *)thr_act; /* Point to the activation */ vsv->save_hdr.save_prev = 0; /* Mark no more */ vsv->save_hdr.save_level = 0; /* Mark user state */ @@ -538,10 +919,10 @@ act_machine_set_state( while (vsvn) { /* Go until we hit the end */ vsvo = vsvn; /* Remember the previous one */ - vsvn = (savearea_vec *)vsvo->save_hdr.save_prev; /* Skip on to the next */ + vsvn = CAST_DOWN(savearea_vec *, vsvo->save_hdr.save_prev); /* Skip on to the next */ } - vsvo->save_hdr.save_prev = (savearea *)vsv; /* Queue us on in */ + vsvo->save_hdr.save_prev = (addr64_t)((uintptr_t)vsv); /* Queue us on in */ } } @@ -571,22 +952,21 @@ act_machine_set_state( * eliminate any floating point or vector kernel contexts and carry across the user state ones. */ -void act_thread_dup(thread_act_t old, thread_act_t new) { +kern_return_t machine_thread_dup(thread_act_t self, thread_act_t target) { savearea *sv, *osv; savearea_fpu *fsv, *fsvn; savearea_vec *vsv, *vsvn; unsigned int spc, i, *srs; - fpu_save(old->mact.curctx); /* Make certain floating point state is all saved */ - vec_save(old->mact.curctx); /* Make certain the vector state is all saved */ + fpu_save(self->mact.curctx); /* Make certain floating point state is all saved */ + vec_save(self->mact.curctx); /* Make certain the vector state is all saved */ - sv = get_user_regs(new); /* Allocate and initialze context in the new activation */ + sv = get_user_regs(target); /* Allocate and initialze context in the new activation */ - osv = find_user_regs(old); /* Find the original context */ - if(!osv) { - panic("act_thread_dup: old activation (%08X) has no general user context\n", old); - } + osv = find_user_regs(self); /* Find the original context */ + if(!osv) + return (KERN_FAILURE); bcopy((char *)((unsigned int)osv + sizeof(savearea_comm)), /* Copy everything but the headers */ (char *)((unsigned int)sv + sizeof(savearea_comm)), @@ -594,43 +974,43 @@ void act_thread_dup(thread_act_t old, thread_act_t new) { sv->save_srr1 &= ~(MASK(MSR_FP) | MASK(MSR_VEC)); /* Make certain that floating point and vector are turned off */ - fsv = find_user_fpu(old); /* Get any user floating point */ + fsv = find_user_fpu(self); /* Get any user floating point */ - new->mact.curctx->FPUsave = 0; /* Assume no floating point */ + target->mact.curctx->FPUsave = 0; /* Assume no floating point */ if(fsv) { /* Did we find one? */ fsvn = (savearea_fpu *)save_alloc(); /* If we still don't have one, get a new one */ fsvn->save_hdr.save_flags = (fsvn->save_hdr.save_flags & ~SAVtype) | (SAVfloat << SAVtypeshft); /* Mark as in use as float */ - fsvn->save_hdr.save_act = new; /* Point to the activation */ + fsvn->save_hdr.save_act = (struct thread_activation *)target; /* Point to the activation */ fsvn->save_hdr.save_prev = 0; /* Mark no more */ fsvn->save_hdr.save_level = 0; /* Mark user state */ - new->mact.curctx->FPUsave = fsvn; /* Chain in the floating point */ + target->mact.curctx->FPUsave = fsvn; /* Chain in the floating point */ bcopy((char *)((unsigned int)fsv + sizeof(savearea_comm)), /* Copy everything but the headers */ (char *)((unsigned int)fsvn + sizeof(savearea_comm)), sizeof(struct savearea) - sizeof(savearea_comm)); } - vsv = find_user_vec(old); /* Get any user vector */ + vsv = find_user_vec(self); /* Get any user vector */ - new->mact.curctx->VMXsave = 0; /* Assume no vector */ + target->mact.curctx->VMXsave = 0; /* Assume no vector */ if(vsv) { /* Did we find one? */ vsvn = (savearea_vec *)save_alloc(); /* If we still don't have one, get a new one */ vsvn->save_hdr.save_flags = (vsvn->save_hdr.save_flags & ~SAVtype) | (SAVvector << SAVtypeshft); /* Mark as in use as float */ - vsvn->save_hdr.save_act = new; /* Point to the activation */ + vsvn->save_hdr.save_act = (struct thread_activation *)target; /* Point to the activation */ vsvn->save_hdr.save_prev = 0; /* Mark no more */ vsvn->save_hdr.save_level = 0; /* Mark user state */ - new->mact.curctx->VMXsave = vsvn; /* Chain in the floating point */ + target->mact.curctx->VMXsave = vsvn; /* Chain in the floating point */ bcopy((char *)((unsigned int)vsv + sizeof(savearea_comm)), /* Copy everything but the headers */ (char *)((unsigned int)vsvn + sizeof(savearea_comm)), sizeof(struct savearea) - sizeof(savearea_comm)); } - return; /* Bye bye... */ + return (KERN_SUCCESS); } /* @@ -645,28 +1025,30 @@ savearea *get_user_regs(thread_act_t act) { savearea *sv, *osv; unsigned int spc, i, *srs; + if (act->mact.upcb) + return act->mact.upcb; + sv = act->mact.pcb; /* Get the top savearea on the stack */ osv = 0; /* Set no user savearea yet */ while(sv) { /* Find the user context */ - if(sv->save_srr1 & MASK(MSR_PR)) return sv; /* We found a user state context... */ - osv = sv; /* Save the last one */ - sv = sv->save_hdr.save_prev; /* Get the previous context */ + sv = CAST_DOWN(savearea *, sv->save_hdr.save_prev); /* Get the previous context */ } sv = save_alloc(); /* Get one */ sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use as general */ - sv->save_hdr.save_act = act; /* Point to the activation */ + sv->save_hdr.save_act = (struct thread_activation *)act; /* Point to the activation */ sv->save_hdr.save_prev = 0; /* Mark no more */ sv->save_hdr.save_level = 0; /* Mark user state */ if(osv) { /* Did we already have one? */ - osv->save_hdr.save_prev = sv; /* Chain us on the end */ + osv->save_hdr.save_prev = (addr64_t)((uintptr_t)sv); /* Chain us on the end */ } else { /* We are the first */ act->mact.pcb = sv; /* Put it there */ } + act->mact.upcb = sv; /* Set user pcb */ for(i=0; i < 32; i+=2) { /* Fill up with defaults */ ((unsigned int *)&sv->save_r0)[i] = ((unsigned int *)&FloatInit)[0]; @@ -674,10 +1056,10 @@ savearea *get_user_regs(thread_act_t act) { } sv->save_cr = 0; sv->save_xer = 0; - sv->save_lr = ((unsigned int *)&FloatInit)[0]; - sv->save_ctr = ((unsigned int *)&FloatInit)[1]; - sv->save_srr0 = ((unsigned int *)&FloatInit)[0]; - sv->save_srr1 = MSR_EXPORT_MASK_SET; + sv->save_lr = (uint64_t)FloatInit; + sv->save_ctr = (uint64_t)FloatInit; + sv->save_srr0 = (uint64_t)FloatInit; + sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; sv->save_fpscr = 0; /* Clear all floating point exceptions */ @@ -685,14 +1067,7 @@ savearea *get_user_regs(thread_act_t act) { sv->save_vscr[0] = 0x00000000; sv->save_vscr[1] = 0x00000000; sv->save_vscr[2] = 0x00000000; - sv->save_vscr[3] = 0x00010000; /* Supress java mode and clear saturated */ - - spc = (unsigned int)act->map->pmap->space; /* Get the space we're in */ - - srs = (unsigned int *)&sv->save_sr0; /* Point to the SRs */ - for(i = 0; i < 16; i++) { /* Fill in the SRs for the new context */ - srs[i] = SEG_REG_PROT | (i<<20) | spc; /* Set the SR */ - } + sv->save_vscr[3] = 0x00010000; /* Disable java mode and clear saturated */ return sv; /* Bye bye... */ } @@ -703,19 +1078,14 @@ savearea *get_user_regs(thread_act_t act) { */ savearea *find_user_regs(thread_act_t act) { + return act->mact.upcb; +} - savearea *sv; - - sv = act->mact.pcb; /* Get the top savearea on the stack */ - - while(sv) { /* Find the user context */ - if(sv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ - break; /* Outta here */ - } - sv = sv->save_hdr.save_prev; /* Get the previous context */ - } - - return sv; /* Bye bye... */ +/* The name of this call is something of a misnomer since the mact.pcb can + * contain chained saveareas, but it will do for now.. + */ +savearea *find_kern_regs(thread_act_t act) { + return act->mact.pcb; } /* @@ -731,7 +1101,7 @@ savearea_fpu *find_user_fpu(thread_act_t act) { while(fsv) { /* Look until the end or we find it */ if(!(fsv->save_hdr.save_level)) break; /* Is the the user state stuff? (the level is 0 if so) */ - fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Try the previous one */ + fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev); /* Try the previous one */ } return fsv; /* Bye bye... */ @@ -750,7 +1120,30 @@ savearea_vec *find_user_vec(thread_act_t act) { while(vsv) { /* Look until the end or we find it */ if(!(vsv->save_hdr.save_level)) break; /* Is the the user state stuff? (the level is 0 if so) */ - vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Try the previous one */ + vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev); /* Try the previous one */ + } + + return vsv; /* Bye bye... */ +} +/* + * Find the user state vector context for the current thread. If there is no user state context, + * we just return a 0. + */ + +savearea_vec *find_user_vec_curr(void) { + + savearea_vec *vsv; + thread_act_t act; + + act = current_act(); /* Get the current activation */ + + vec_save(act->mact.curctx); /* Force save if live */ + + vsv = act->mact.curctx->VMXsave; /* Get the start of the vector chain */ + + while(vsv) { /* Look until the end or we find it */ + if(!(vsv->save_hdr.save_level)) break; /* Is the the user state stuff? (the level is 0 if so) */ + vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev); /* Try the previous one */ } return vsv; /* Bye bye... */ @@ -774,8 +1167,13 @@ thread_userstack( { struct ppc_thread_state *state; - if (customstack) - *customstack = 0; + /* + * Set a default. + */ + if (*user_stack == 0) + *user_stack = USRSTACK; + if (customstack) + *customstack = 0; switch (flavor) { case PPC_THREAD_STATE: @@ -784,13 +1182,14 @@ thread_userstack( state = (struct ppc_thread_state *) tstate; - /* If a valid user stack is specified, use it. */ - if (state->r1) - *user_stack = state->r1; - - if (customstack && state->r1) - *customstack = 1; + /* + * If a valid user stack is specified, use it. + */ + *user_stack = state->r1 ? state->r1: USRSTACK; + if (customstack && state->r1) + *customstack = 1; + break; default : return (KERN_INVALID_ARGUMENT); @@ -806,13 +1205,13 @@ thread_userstack( * Sets the user stack pointer into the machine * dependent thread state info. */ -void thread_setuserstack(struct thread_activation *act, unsigned int user_stack) +void thread_setuserstack(thread_act_t act, unsigned int user_stack) { savearea *sv; sv = get_user_regs(act); /* Get the user state registers */ - sv->save_r1 = user_stack; + sv->save_r1 = (uint64_t)user_stack; return; } @@ -823,7 +1222,7 @@ void thread_setuserstack(struct thread_activation *act, unsigned int user_stack) * Returns the adjusted user stack pointer from the machine * dependent thread state info. */ -unsigned int thread_adjuserstack(struct thread_activation *act, int adjust) +unsigned int thread_adjuserstack(thread_act_t act, int adjust) { savearea *sv; @@ -831,7 +1230,7 @@ unsigned int thread_adjuserstack(struct thread_activation *act, int adjust) sv->save_r1 += adjust; /* Adjust the stack */ - return sv->save_r1; /* Return the adjusted stack */ + return (unsigned int)sv->save_r1; /* Return the adjusted stack */ } @@ -842,13 +1241,13 @@ unsigned int thread_adjuserstack(struct thread_activation *act, int adjust) * dependent thread state info. */ -void thread_setentrypoint(struct thread_activation *act, unsigned int entry) +void thread_setentrypoint(thread_act_t act, unsigned int entry) { savearea *sv; sv = get_user_regs(act); /* Get the user state registers */ - sv->save_srr0 = entry; + sv->save_srr0 = (uint64_t)entry; return; } @@ -904,14 +1303,32 @@ unsigned int get_msr_rbits(void) return (MASK(MSR_PR)|MASK(MSR_ME)|MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_EE)); } +void ppc_checkthreadstate(void * tsptr, int flavor) +{ + if (flavor == PPC_THREAD_STATE64) { + struct ppc_thread_state64 *ts64 =(struct ppc_thread_state64 *)tsptr; + + /* Make sure naughty bits are off and necessary bits are on */ + ts64->srr1 &= ~(MASK(MSR_POW)|MASK(MSR_ILE)|MASK(MSR_IP)|MASK(MSR_LE)); + ts64->srr1 |= (MASK(MSR_PR)|MASK(MSR_ME)|MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_EE)); + } else { + struct ppc_thread_state *ts =(struct ppc_thread_state *)tsptr; + + /* Make sure naughty bits are off and necessary bits are on */ + ts->srr1 &= ~(MASK(MSR_POW)|MASK(MSR_ILE)|MASK(MSR_IP)|MASK(MSR_LE)); + ts->srr1 |= (MASK(MSR_PR)|MASK(MSR_ME)|MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_EE)); + } + return; +} + void thread_set_child(thread_act_t child, int pid) { struct savearea *child_state; child_state = get_user_regs(child); - child_state->save_r3 = pid; - child_state->save_r4 = 1; + child_state->save_r3 = (uint_t)pid; + child_state->save_r4 = 1ULL; } void thread_set_parent(thread_act_t parent, int pid) { @@ -919,7 +1336,7 @@ void thread_set_parent(thread_act_t parent, int pid) parent_state = get_user_regs(parent); - parent_state->save_r3 = pid; + parent_state->save_r3 = (uint64_t)pid; parent_state->save_r4 = 0; } @@ -955,7 +1372,7 @@ void *act_thread_csave(void) { sv = save_alloc(); /* Get a fresh save area to save into */ sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use as general */ - sv->save_hdr.save_act = act; /* Point to the activation */ + sv->save_hdr.save_act = (struct thread_activation *)act; /* Point to the activation */ sv->save_hdr.save_prev = 0; /* Mark no more */ sv->save_hdr.save_level = 0; /* Mark user state */ @@ -977,13 +1394,13 @@ void *act_thread_csave(void) { if(ofsv) { /* Did we find one? */ fsv = (savearea_fpu *)save_alloc(); /* If we still don't have one, get a new one */ fsv->save_hdr.save_flags = (fsv->save_hdr.save_flags & ~SAVtype) | (SAVfloat << SAVtypeshft); /* Mark as in use as float */ - fsv->save_hdr.save_act = act; /* Point to the activation */ + fsv->save_hdr.save_act = (struct thread_activation *)act; /* Point to the activation */ fsv->save_hdr.save_prev = 0; /* Mark no more */ fsv->save_hdr.save_level = 0; /* Mark user state */ fsv->save_hdr.save_misc2 = 0xDEBB1ED0; /* Eye catcher for debug */ fsv->save_hdr.save_misc3 = 0xE5DA11A5; /* Eye catcher for debug */ - sv->save_hdr.save_misc0 = (unsigned int)fsv; /* Remember this one */ + sv->save_hdr.save_misc0 = (uint64_t)((uintptr_t)fsv); /* Remember this one */ bcopy((char *)((unsigned int)ofsv + sizeof(savearea_comm)), /* Copy everything but the headers */ (char *)((unsigned int)fsv + sizeof(savearea_comm)), @@ -997,13 +1414,13 @@ void *act_thread_csave(void) { if(ovsv) { /* Did we find one? */ vsv = (savearea_vec *)save_alloc(); /* If we still don't have one, get a new one */ vsv->save_hdr.save_flags = (vsv->save_hdr.save_flags & ~SAVtype) | (SAVvector << SAVtypeshft); /* Mark as in use as float */ - vsv->save_hdr.save_act = act; /* Point to the activation */ + vsv->save_hdr.save_act = (struct thread_activation *)act; /* Point to the activation */ vsv->save_hdr.save_prev = 0; /* Mark no more */ vsv->save_hdr.save_level = 0; /* Mark user state */ vsv->save_hdr.save_misc2 = 0xDEBB1ED0; /* Eye catcher for debug */ vsv->save_hdr.save_misc3 = 0xE5DA11A5; /* Eye catcher for debug */ - sv->save_hdr.save_misc1 = (unsigned int)vsv; /* Chain in the floating point */ + sv->save_hdr.save_misc1 = (uint64_t)((uintptr_t)vsv); /* Chain in the floating point */ bcopy((char *)((unsigned int)ovsv + sizeof(savearea_comm)), /* Copy everything but the headers */ (char *)((unsigned int)vsv + sizeof(savearea_comm)), @@ -1036,8 +1453,8 @@ void act_thread_catt(void *ctx) { sv = (savearea *)ctx; /* Make this easier for C */ - fsv = (savearea_fpu *)sv->save_hdr.save_misc0; /* Get a possible floating point savearea */ - vsv = (savearea_vec *)sv->save_hdr.save_misc1; /* Get a possible vector savearea */ + fsv = CAST_DOWN(savearea_fpu *, sv->save_hdr.save_misc0); /* Get a possible floating point savearea */ + vsv = CAST_DOWN(savearea_vec *, sv->save_hdr.save_misc1); /* Get a possible vector savearea */ if((sv->save_hdr.save_misc2 != 0xDEBB1ED0) || (sv->save_hdr.save_misc3 != 0xE5DA11A5)) { /* See if valid savearea */ panic("act_thread_catt: attempt to attach invalid general context savearea - %08X\n", sv); /* Die */ @@ -1058,21 +1475,16 @@ void act_thread_catt(void *ctx) { sv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */ sv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */ - sv->save_hdr.save_act = act; /* Set us as owner */ + sv->save_hdr.save_act = (struct thread_activation *)act; /* Set us as owner */ spc = (unsigned int)act->map->pmap->space; /* Get the space we're in */ - srs = (unsigned int *)&sv->save_sr0; /* Point to the SRs */ - for(i = 0; i < 16; i++) { /* Fill in the SRs for the new context */ - srs[i] = SEG_REG_PROT | (i<<20) | spc; /* Set the SRs */ - } - osv = act->mact.pcb; /* Get the top general savearea */ psv = 0; while(osv) { /* Any saved state? */ if(osv->save_srr1 & MASK(MSR_PR)) break; /* Leave if this is user state */ psv = osv; /* Save previous savearea address */ - osv = osv->save_hdr.save_prev; /* Get one underneath our's */ + osv = CAST_DOWN(savearea *, osv->save_hdr.save_prev); /* Get one underneath our's */ } if(osv) { /* Did we find one? */ @@ -1083,8 +1495,9 @@ void act_thread_catt(void *ctx) { } - if(psv) psv->save_hdr.save_prev = sv; /* Chain us to the end or */ + if(psv) psv->save_hdr.save_prev = (addr64_t)((uintptr_t)sv); /* Chain us to the end or */ else act->mact.pcb = (pcb_t)sv; /* to the start if the only one */ + act->mact.upcb = (pcb_t)sv; /* Set the user pcb */ ovsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */ @@ -1092,7 +1505,7 @@ void act_thread_catt(void *ctx) { while(ovsv) { /* Any VMX saved state? */ if(!(ovsv->save_hdr.save_level)) break; /* Leave if this is user state */ pvsv = ovsv; /* Save previous savearea address */ - ovsv = (savearea_vec *)ovsv->save_hdr.save_prev; /* Get one underneath our's */ + ovsv = CAST_DOWN(savearea_vec *, ovsv->save_hdr.save_prev); /* Get one underneath our's */ } if(ovsv) { /* Did we find one? */ @@ -1103,12 +1516,12 @@ void act_thread_catt(void *ctx) { } if(vsv) { /* Are we sticking any vector on this one? */ - if(pvsv) pvsv->save_hdr.save_prev = (savearea *)vsv; /* Yes, chain us to the end or */ + if(pvsv) pvsv->save_hdr.save_prev = (addr64_t)((uintptr_t)vsv); /* Yes, chain us to the end or */ else act->mact.curctx->VMXsave = vsv; /* to the start if the only one */ vsv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */ vsv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */ - vsv->save_hdr.save_act = act; /* Set us as owner */ + vsv->save_hdr.save_act = (struct thread_activation *)act; /* Set us as owner */ } ofsv = act->mact.curctx->FPUsave; /* Get the top float savearea */ @@ -1117,7 +1530,7 @@ void act_thread_catt(void *ctx) { while(ofsv) { /* Any float saved state? */ if(!(ofsv->save_hdr.save_level)) break; /* Leave if this is user state */ pfsv = ofsv; /* Save previous savearea address */ - ofsv = (savearea_fpu *)ofsv->save_hdr.save_prev; /* Get one underneath our's */ + ofsv = CAST_DOWN(savearea_fpu *, ofsv->save_hdr.save_prev); /* Get one underneath our's */ } if(ofsv) { /* Did we find one? */ @@ -1128,12 +1541,12 @@ void act_thread_catt(void *ctx) { } if(fsv) { /* Are we sticking any vector on this one? */ - if(pfsv) pfsv->save_hdr.save_prev = (savearea *)fsv; /* Yes, chain us to the end or */ + if(pfsv) pfsv->save_hdr.save_prev = (addr64_t)((uintptr_t)fsv); /* Yes, chain us to the end or */ else act->mact.curctx->FPUsave = fsv; /* to the start if the only one */ fsv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */ fsv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */ - fsv->save_hdr.save_act = act; /* Set us as owner */ + fsv->save_hdr.save_act = (struct thread_activation *)act; /* Set us as owner */ } } @@ -1154,8 +1567,8 @@ void act_thread_cfree(void *ctx) { sv = (savearea *)ctx; /* Make this easier for C */ - fsv = (savearea_fpu *)sv->save_hdr.save_misc0; /* Get a possible floating point savearea */ - vsv = (savearea_vec *)sv->save_hdr.save_misc1; /* Get a possible vector savearea */ + fsv = CAST_DOWN(savearea_fpu *, sv->save_hdr.save_misc0); /* Get a possible floating point savearea */ + vsv = CAST_DOWN(savearea_vec *, sv->save_hdr.save_misc1); /* Get a possible vector savearea */ if((sv->save_hdr.save_misc2 != 0xDEBB1ED0) || (sv->save_hdr.save_misc3 != 0xE5DA11A5)) { /* See if valid savearea */ panic("act_thread_cfree: attempt to detatch invalid general context savearea - %08X\n", sv); /* Die */ diff --git a/osfmk/ppc/thread_act.h b/osfmk/ppc/thread_act.h index 46642ba3b..7438f3170 100644 --- a/osfmk/ppc/thread_act.h +++ b/osfmk/ppc/thread_act.h @@ -61,9 +61,11 @@ struct facility_context { savearea_fpu *FPUsave; /* The floating point savearea */ savearea *FPUlevel; /* The floating point context level */ unsigned int FPUcpu; /* The last processor to enable floating point */ + unsigned int FPUsync; /* Sync lock */ savearea_vec *VMXsave; /* The VMX savearea */ savearea *VMXlevel; /* The VMX context level */ unsigned int VMXcpu; /* The last processor to enable vector */ + unsigned int VMXsync; /* Sync lock */ struct thread_activation *facAct; /* Activation associated with context */ }; @@ -87,13 +89,19 @@ typedef struct MachineThrAct { * same saveareas. */ savearea *pcb; /* The "normal" savearea */ + savearea *upcb; /* The "normal" user savearea */ facility_context *curctx; /* Current facility context */ facility_context *deferctx; /* Deferred facility context */ facility_context facctx; /* "Normal" facility context */ struct vmmCntrlEntry *vmmCEntry; /* Pointer current emulation context or 0 */ struct vmmCntrlTable *vmmControl; /* Pointer to virtual machine monitor control table */ uint64_t qactTimer; /* Time thread needs to interrupt. This is a single-shot timer. Zero is unset */ + unsigned int cioSpace; /* Address space ID for in progress copyin/out */ +#define cioSwitchAway 0x80000000 /* Context switched away from thread since MapUserAddressSpace */ +#define cioSwitchAwayb 0 + addr64_t cioRelo; /* Relocation value for in progress copyin/out */ unsigned int ksp; /* points to TOP OF STACK or zero */ + unsigned int preemption_count; /* preemption count */ unsigned int bbDescAddr; /* Points to Blue Box Trap descriptor area in kernel (page aligned) */ unsigned int bbUserDA; /* Points to Blue Box Trap descriptor area in user (page aligned) */ unsigned int bbTableStart; /* Points to Blue Box Trap dispatch area in user */ @@ -101,6 +109,12 @@ typedef struct MachineThrAct { unsigned int bbTaskID; /* Opaque task ID for Blue Box threads */ unsigned int bbTaskEnv; /* Opaque task data reference for Blue Box threads */ unsigned int specFlags; /* Special flags */ + unsigned int pmcovfl[8]; /* PMC overflow count */ + unsigned int perfmonFlags; /* Perfmon facility flags */ + unsigned int bbTrap; /* Blue Box trap vector */ + unsigned int bbSysCall; /* Blue Box syscall vector */ + unsigned int bbInterrupt; /* Blue Box interrupt vector */ + unsigned int bbPending; /* Blue Box pending interrupt vector */ /* special flags bits */ @@ -112,31 +126,33 @@ typedef struct MachineThrAct { #define vectorCngbit 6 #define timerPopbit 7 #define userProtKeybit 8 -#define trapUnalignbit 9 -#define notifyUnalignbit 10 -#define FamVMenabit 11 +#define FamVMenabit 11 #define FamVMmodebit 12 +#define perfMonitorbit 13 /* NOTE: Do not move or assign bit 31 without changing exception vector ultra fast path code */ #define bbThreadbit 28 #define bbNoMachSCbit 29 #define bbPreemptivebit 30 #define spfReserved1 31 /* See note above */ -#define ignoreZeroFault (1<<(31-ignoreZeroFaultbit)) -#define floatUsed (1<<(31-floatUsedbit)) -#define vectorUsed (1<<(31-vectorUsedbit)) -#define runningVM (1<<(31-runningVMbit)) -#define floatCng (1<<(31-floatCngbit)) -#define vectorCng (1<<(31-vectorCngbit)) -#define timerPop (1<<(31-timerPopbit)) -#define userProtKey (1<<(31-userProtKeybit)) -#define trapUnalign (1<<(31-trapUnalignbit)) -#define notifyUnalign (1<<(31-notifyUnalignbit)) -#define FamVMena (1<<(31-FamVMenabit)) -#define FamVMmode (1<<(31-FamVMmodebit)) -#define bbThread (1<<(31-bbThreadbit)) -#define bbNoMachSC (1<<(31-bbNoMachSCbit)) -#define bbPreemptive (1<<(31-bbPreemptivebit)) +#define ignoreZeroFault 0x80000000 /* (1<<(31-ignoreZeroFaultbit)) */ +#define floatUsed 0x40000000 /* (1<<(31-floatUsedbit)) */ +#define vectorUsed 0x20000000 /* (1<<(31-vectorUsedbit)) */ + +#define runningVM 0x08000000 /* (1<<(31-runningVMbit)) */ +#define floatCng 0x04000000 /* (1<<(31-floatCngbit)) */ +#define vectorCng 0x02000000 /* (1<<(31-vectorCngbit)) */ +#define timerPop 0x01000000 /* (1<<(31-timerPopbit)) */ + +#define userProtKey 0x00800000 /* (1<<(31-userProtKeybit)) */ + +#define FamVMena 0x00100000 /* (1<<(31-FamVMenabit)) */ +#define FamVMmode 0x00080000 /* (1<<(31-FamVMmodebit)) */ +#define perfMonitor 0x00040000 /* (1<<(31-perfMonitorbit)) */ + +#define bbThread 0x00000008 /* (1<<(31-bbThreadbit)) */ +#define bbNoMachSC 0x00000004 /* (1<<(31-bbNoMachSCbit)) */ +#define bbPreemptive 0x00000002 /* (1<<(31-bbPreemptivebit)) */ #define fvChkb 0 #define fvChk 0x80000000 @@ -151,8 +167,11 @@ extern struct savearea *find_user_regs(thread_act_t act); extern struct savearea *get_user_regs(thread_act_t); extern struct savearea_fpu *find_user_fpu(thread_act_t act); extern struct savearea_vec *find_user_vec(thread_act_t act); +extern struct savearea_vec *find_user_vec_curr(void); extern int thread_enable_fpe(thread_act_t act, int onoff); +extern struct savearea *find_kern_regs(thread_act_t act); + extern void *act_thread_csave(void); extern void act_thread_catt(void *ctx); extern void act_thread_cfree(void *ctx); diff --git a/osfmk/ppc/trap.c b/osfmk/ppc/trap.c index f138d8edd..d5f4e1b07 100644 --- a/osfmk/ppc/trap.c +++ b/osfmk/ppc/trap.c @@ -44,8 +44,11 @@ #include /* for SR_xxx definitions */ #include #include +#include #include #include +#include +#include #include @@ -70,7 +73,7 @@ extern boolean_t db_breakpoints_inserted; extern int debugger_active[NCPUS]; extern task_t bsd_init_task; extern char init_task_failure_data[]; - +extern int not_in_kdp; #define PROT_EXEC (VM_PROT_EXECUTE) #define PROT_RO (VM_PROT_READ) @@ -81,7 +84,7 @@ extern char init_task_failure_data[]; */ #define UPDATE_PPC_EXCEPTION_STATE { \ thread_act_t thr_act = current_act(); \ - thr_act->mact.pcb->save_dar = dar; \ + thr_act->mact.pcb->save_dar = (uint64_t)dar; \ thr_act->mact.pcb->save_dsisr = dsisr; \ thr_act->mact.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \ } @@ -89,13 +92,13 @@ extern char init_task_failure_data[]; static void unresolved_kernel_trap(int trapno, struct savearea *ssp, unsigned int dsisr, - unsigned int dar, + addr64_t dar, char *message); struct savearea *trap(int trapno, struct savearea *ssp, unsigned int dsisr, - unsigned int dar) + addr64_t dar) { int exception; int code; @@ -106,12 +109,13 @@ struct savearea *trap(int trapno, unsigned int offset; thread_act_t thr_act; boolean_t intr; + #ifdef MACH_BSD time_value_t tv; #endif /* MACH_BSD */ if(perfTrapHook) { /* Is there a hook? */ - if(perfTrapHook(trapno, ssp, dsisr, dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */ + if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */ } #if 0 @@ -141,17 +145,16 @@ struct savearea *trap(int trapno, switch (trapno) { case T_PREEMPT: /* Handle a preempt trap */ - ast_taken(AST_PREEMPT, FALSE); + ast_taken(AST_PREEMPTION, FALSE); break; + case T_PERF_MON: + perfmon_handle_pmi(ssp); + break; + case T_RESET: /* Reset interruption */ -#if 0 - kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n", - ssp->save_srr0, ssp->save_srr1); -#else - panic("Unexpected Reset exception; srr0 = %08X, srr1 = %08X\n", - ssp->save_srr0, ssp->save_srr1); -#endif + if (!Call_Debugger(trapno, ssp)) + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); break; /* We just ignore these */ /* @@ -171,10 +174,40 @@ struct savearea *trap(int trapno, case T_FP_UNAVAILABLE: case T_IO_ERROR: case T_RESERVED: - case T_ALIGNMENT: default: unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); break; + + + case T_ALIGNMENT: +/* +* If enaNotifyEMb is set, we get here, and +* we have actually already emulated the unaligned access. +* All that we want to do here is to ignore the interrupt. This is to allow logging or +* tracing of unaligned accesses. +*/ + + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE, + (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0); + break; + + case T_EMULATE: +/* +* If enaNotifyEMb is set we get here, and +* we have actually already emulated the instruction. +* All that we want to do here is to ignore the interrupt. This is to allow logging or +* tracing of emulated instructions. +*/ + + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE, + (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0); + break; + + + + case T_TRACE: case T_RUNMODE_TRACE: @@ -194,7 +227,6 @@ struct savearea *trap(int trapno, break; case T_DATA_ACCESS: - #if MACH_KDB mp_disable_preemption(); if (debug_mode @@ -207,29 +239,40 @@ struct savearea *trap(int trapno, } mp_enable_preemption(); #endif /* MACH_KDB */ + /* can we take this during normal panic dump operation? */ + if (debug_mode + && debugger_active[cpu_number()] + && !not_in_kdp) { + /* + * Access fault while in kernel core dump. + */ + kdp_dump_trap(trapno, ssp); + } - if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */ - /* simple case : not SR_COPYIN segment, from kernel */ - if ((dar >> 28) != SR_COPYIN_NUM) { - map = kernel_map; + if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */ + panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar); + } - offset = dar; + if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */ + + if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF)) { /* Is this a copy in/out? */ + + offset = (unsigned int)dar; /* Set the failing address */ + map = kernel_map; /* No, this is a normal kernel access */ - /* * Note: Some ROM device drivers will access page 0 when they start. The IOKit will * set a flag to tell us to ignore any access fault on page 0. After the driver is * opened, it will clear the flag. */ - if((0 == (dar & -PAGE_SIZE)) && /* Check for access of page 0 and */ - ((thr_act->mact.specFlags) & ignoreZeroFault)) { - /* special case of ignoring page zero faults */ - ssp->save_srr0 += 4; /* Point to next instruction */ + if((0 == (offset & -PAGE_SIZE)) && /* Check for access of page 0 and */ + ((thr_act->mact.specFlags) & ignoreZeroFault)) { /* special case of ignoring page zero faults */ + ssp->save_srr0 += 4; /* Point to next instruction */ break; } - code = vm_fault(map, trunc_page(offset), + code = vm_fault(map, trunc_page_32(offset), dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, FALSE, THREAD_UNINT, NULL, 0); @@ -237,7 +280,8 @@ struct savearea *trap(int trapno, unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); } else { ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ - ssp->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + ssp->save_dsisr = (ssp->save_dsisr & + ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ } break; } @@ -245,13 +289,10 @@ struct savearea *trap(int trapno, /* If we get here, the fault was due to a copyin/out */ map = thr_act->map; + + offset = (unsigned int)(thr_act->mact.cioRelo + dar); /* Compute the user space address */ - /* Mask out SR_COPYIN and mask in original segment */ - - offset = (dar & 0x0fffffff) | - ((mfsrin(dar)<<8) & 0xF0000000); - - code = vm_fault(map, trunc_page(offset), + code = vm_fault(map, trunc_page_32(offset), dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, FALSE, THREAD_UNINT, NULL, 0); @@ -273,7 +314,8 @@ struct savearea *trap(int trapno, } else { ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ - ssp->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + ssp->save_dsisr = (ssp->save_dsisr & + ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ } break; @@ -299,14 +341,15 @@ struct savearea *trap(int trapno, map = kernel_map; - code = vm_fault(map, trunc_page(ssp->save_srr0), + code = vm_fault(map, trunc_page_64(ssp->save_srr0), PROT_EXEC, FALSE, THREAD_UNINT, NULL, 0); if (code != KERN_SUCCESS) { unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); } else { ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ - ssp->save_srr1 |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + ssp->save_srr1 = (ssp->save_srr1 & + ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ } break; @@ -346,6 +389,10 @@ struct savearea *trap(int trapno, unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); break; + case T_PERF_MON: + perfmon_handle_pmi(ssp); + break; + /* * These trap types should never be seen by trap() * Some are interrupts that should be seen by @@ -365,40 +412,41 @@ struct savearea *trap(int trapno, ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */ - panic("Unexpected user state trap(cpu %d): 0x%08x DSISR=0x%08x DAR=0x%08x PC=0x%08x, MSR=0x%08x\n", + panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n", cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1); break; case T_RESET: -#if 0 - kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n", - ssp->save_srr0, ssp->save_srr1); -#else - panic("Unexpected Reset exception: srr0 = %0x08x, srr1 = %0x08x\n", - ssp->save_srr0, ssp->save_srr1); -#endif + ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */ + if (!Call_Debugger(trapno, ssp)) + panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n", + ssp->save_srr0, ssp->save_srr1); break; /* We just ignore these */ case T_ALIGNMENT: /* -* If notifyUnaligned is set, we have actually already emulated the unaligned access. +* If enaNotifyEMb is set, we get here, and +* we have actually already emulated the unaligned access. * All that we want to do here is to ignore the interrupt. This is to allow logging or -* tracing of unaligned accesses. Note that if trapUnaligned is also set, it takes -* precedence and we will take a bad access fault. +* tracing of unaligned accesses. */ - - if(thr_act->mact.specFlags & notifyUnalign) { - - KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE, - (int)ssp->save_srr0, (int)dar, (int)dsisr, (int)ssp->save_lr, 0); - } - if((!(thr_act->mact.specFlags & notifyUnalign)) || (thr_act->mact.specFlags & trapUnalign)) { - code = EXC_PPC_UNALIGNED; - exception = EXC_BAD_ACCESS; - subcode = dar; - } + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE, + (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0); + break; + + case T_EMULATE: +/* +* If enaNotifyEMb is set we get here, and +* we have actually already emulated the instruction. +* All that we want to do here is to ignore the interrupt. This is to allow logging or +* tracing of emulated instructions. +*/ + + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE, + (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0); break; case T_TRACE: /* Real PPC chips */ @@ -408,11 +456,10 @@ struct savearea *trap(int trapno, } /* fall through */ - case T_INSTRUCTION_BKPT: /* 603 PPC chips */ - case T_RUNMODE_TRACE: /* 601 PPC chips */ + case T_INSTRUCTION_BKPT: exception = EXC_BREAKPOINT; code = EXC_PPC_TRACE; - subcode = ssp->save_srr0; + subcode = (unsigned int)ssp->save_srr0; break; case T_PROGRAM: @@ -431,19 +478,32 @@ struct savearea *trap(int trapno, UPDATE_PPC_EXCEPTION_STATE exception = EXC_BAD_INSTRUCTION; code = EXC_PPC_UNIPL_INST; - subcode = ssp->save_srr0; - } else if (ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) { + subcode = (unsigned int)ssp->save_srr0; + } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) { UPDATE_PPC_EXCEPTION_STATE; exception = EXC_BAD_INSTRUCTION; code = EXC_PPC_PRIVINST; - subcode = ssp->save_srr0; + subcode = (unsigned int)ssp->save_srr0; } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) { unsigned int inst; - - if (copyin((char *) ssp->save_srr0, (char *) &inst, 4 )) - panic("copyin failed\n"); + char *iaddr; + + iaddr = CAST_DOWN(char *, ssp->save_srr0); /* Trim from long long and make a char pointer */ + if (copyin(iaddr, (char *) &inst, 4 )) panic("copyin failed\n"); + + if(dgWork.dgFlags & enaDiagTrap) { /* Is the diagnostic trap enabled? */ + if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */ + if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */ + ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */ + exception = 0; /* Clear exception */ + break; /* All done here */ + } + } + } + UPDATE_PPC_EXCEPTION_STATE; + if (inst == 0x7FE00008) { exception = EXC_BREAKPOINT; code = EXC_PPC_BREAKPOINT; @@ -451,7 +511,7 @@ struct savearea *trap(int trapno, exception = EXC_SOFTWARE; code = EXC_PPC_TRAP; } - subcode = ssp->save_srr0; + subcode = (unsigned int)ssp->save_srr0; } break; @@ -459,23 +519,31 @@ struct savearea *trap(int trapno, UPDATE_PPC_EXCEPTION_STATE; exception = EXC_ARITHMETIC; code = EXC_PPC_ALTIVECASSIST; - subcode = ssp->save_srr0; + subcode = (unsigned int)ssp->save_srr0; break; case T_DATA_ACCESS: map = thr_act->map; + + if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */ + UPDATE_PPC_EXCEPTION_STATE; /* Don't even bother VM with this one */ + exception = EXC_BAD_ACCESS; + subcode = (unsigned int)dar; + break; + } - code = vm_fault(map, trunc_page(dar), + code = vm_fault(map, trunc_page_64(dar), dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, FALSE, THREAD_ABORTSAFE, NULL, 0); if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) { UPDATE_PPC_EXCEPTION_STATE; exception = EXC_BAD_ACCESS; - subcode = dar; + subcode = (unsigned int)dar; } else { ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ - ssp->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + ssp->save_dsisr = (ssp->save_dsisr & + ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ } break; @@ -485,16 +553,17 @@ struct savearea *trap(int trapno, */ map = thr_act->map; - code = vm_fault(map, trunc_page(ssp->save_srr0), + code = vm_fault(map, trunc_page_64(ssp->save_srr0), PROT_EXEC, FALSE, THREAD_ABORTSAFE, NULL, 0); if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) { UPDATE_PPC_EXCEPTION_STATE; exception = EXC_BAD_ACCESS; - subcode = ssp->save_srr0; + subcode = (unsigned int)ssp->save_srr0; } else { ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ - ssp->save_srr1 |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + ssp->save_srr1 = (ssp->save_srr1 & + ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ } break; @@ -516,6 +585,7 @@ struct savearea *trap(int trapno, if (exception) { /* if this is the init task, save the exception information */ /* this probably is a fatal exception */ +#if 0 if(bsd_init_task == current_task()) { char *buf; int i; @@ -524,7 +594,7 @@ struct savearea *trap(int trapno, buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode); - buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%08x\n" + buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n" , dsisr, dar); for (i=0; i<32; i++) { @@ -535,12 +605,12 @@ struct savearea *trap(int trapno, } buf += sprintf(buf, "\n\n"); - buf += sprintf(buf, "cr = 0x%08x\t\t",ssp->save_cr); - buf += sprintf(buf, "xer = 0x%08x\n",ssp->save_xer); - buf += sprintf(buf, "lr = 0x%08x\t\t",ssp->save_lr); - buf += sprintf(buf, "ctr = 0x%08x\n",ssp->save_ctr); - buf += sprintf(buf, "srr0(iar) = 0x%08x\t\t",ssp->save_srr0); - buf += sprintf(buf, "srr1(msr) = 0x%08B\n",ssp->save_srr1, + buf += sprintf(buf, "cr = 0x%08X\t\t",ssp->save_cr); + buf += sprintf(buf, "xer = 0x%08X\n",ssp->save_xer); + buf += sprintf(buf, "lr = 0x%016llX\t\t",ssp->save_lr); + buf += sprintf(buf, "ctr = 0x%016llX\n",ssp->save_ctr); + buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0); + buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1, "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18" "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT"); buf += sprintf(buf, "\n\n"); @@ -555,7 +625,7 @@ struct savearea *trap(int trapno, break; if (!copyin(addr,(char*)stack_buf, 3 * sizeof(int))) { - buf += sprintf(buf, "0x%08x : 0x%08x\n" + buf += sprintf(buf, "0x%08X : 0x%08X\n" ,addr,stack_buf[2]); addr = (char*)stack_buf[0]; } else { @@ -565,6 +635,7 @@ struct savearea *trap(int trapno, } buf[0] = '\0'; } +#endif doexception(exception, code, subcode); } /* AST delivery @@ -592,22 +663,23 @@ extern int pmdebug; int syscall_trace(int retval, struct savearea *ssp) { int i, argc; - int kdarg[3]; - /* Always prepare to trace mach system calls */ - if (kdebug_enable && (ssp->save_r0 & 0x80000000)) { - /* Mach trap */ - kdarg[0]=0; - kdarg[1]=0; - kdarg[2]=0; - argc = mach_trap_table[-(ssp->save_r0)].mach_trap_arg_count; - if (argc > 3) - argc = 3; - for (i=0; i < argc; i++) - kdarg[i] = (int)*(&ssp->save_r3 + i); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START, - kdarg[0], kdarg[1], kdarg[2], 0, 0); - } +/* Always prepare to trace mach system calls */ + + kdarg[0]=0; + kdarg[1]=0; + kdarg[2]=0; + + argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count; + + if (argc > 3) + argc = 3; + + for (i=0; i < argc; i++) + kdarg[i] = (int)*(&ssp->save_r3 + i); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START, + kdarg[0], kdarg[1], kdarg[2], 0, 0); return retval; } @@ -620,11 +692,8 @@ extern int syscall_trace_end(int, struct savearea *); int syscall_trace_end(int retval, struct savearea *ssp) { - if (kdebug_enable && (ssp->save_r0 & 0x80000000)) { - /* Mach trap */ - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(ssp->save_r0))) | DBG_FUNC_END, - retval, 0, 0, 0, 0); - } + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END, + retval, 0, 0, 0, 0); return retval; } @@ -700,7 +769,7 @@ char *trap_type[] = { "INVALID EXCEPTION", "INVALID EXCEPTION", "INVALID EXCEPTION", - "INVALID EXCEPTION", + "Emulate", "0x2000 - Run Mode/Trace", "Signal Processor", "Preemption", @@ -713,7 +782,7 @@ int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]); void unresolved_kernel_trap(int trapno, struct savearea *ssp, unsigned int dsisr, - unsigned int dar, + addr64_t dar, char *message) { char *trap_name; @@ -734,7 +803,7 @@ void unresolved_kernel_trap(int trapno, if (message == NULL) message = trap_name; - kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%08x PC=0x%08x\n", + kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n", cpu_number(), trap_name, dar, ssp->save_srr0); print_backtrace(ssp); @@ -753,7 +822,7 @@ thread_syscall_return( register thread_act_t thr_act = current_act(); register struct savearea *regs = USER_REGS(thr_act); - if (kdebug_enable && (regs->save_r0 & 0x80000000)) { + if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) { /* Mach trap */ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END, ret, 0, 0, 0, 0); diff --git a/osfmk/ppc/trap.h b/osfmk/ppc/trap.h index cd3525a0a..31c2026f3 100644 --- a/osfmk/ppc/trap.h +++ b/osfmk/ppc/trap.h @@ -78,12 +78,13 @@ extern void thread_exception_return(void); extern struct savearea* trap(int trapno, struct savearea *ss, unsigned int dsisr, - unsigned int dar); + addr64_t dar); typedef kern_return_t (*perfTrap)(int trapno, struct savearea *ss, - unsigned int dsisr, unsigned int dar); + unsigned int dsisr, addr64_t dar); extern perfTrap perfTrapHook; +extern perfTrap perfIntHook; extern struct savearea* interrupt(int intno, struct savearea *ss, diff --git a/osfmk/ppc/vmachmon.c b/osfmk/ppc/vmachmon.c index 9fd754cb1..0343e8b0a 100644 --- a/osfmk/ppc/vmachmon.c +++ b/osfmk/ppc/vmachmon.c @@ -27,9 +27,6 @@ ** ** C routines that we are adding to the MacOS X kernel. ** -** Weird Apple PSL stuff goes here... -** -** Until then, Copyright 2000, Connectix -----------------------------------------------------------------------*/ #include @@ -43,7 +40,6 @@ #include #include #include -#include #include #include @@ -76,8 +72,10 @@ vmmCntrlEntry *vmm_get_entry( vmmCntrlTable *CTable; vmmCntrlEntry *CEntry; + index = index & vmmTInum; /* Clean up the index */ + if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */ - if ((index - 1) >= kVmmMaxContextsPerThread) return NULL; /* Index not in range */ + if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */ CTable = act->mact.vmmControl; /* Make the address a bit more convienient */ CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */ @@ -87,6 +85,39 @@ vmmCntrlEntry *vmm_get_entry( return CEntry; } +/*----------------------------------------------------------------------- +** vmm_get_adsp +** +** This function verifies and returns the pmap for an address space. +** If there is none and the request is valid, a pmap will be created. +** +** Inputs: +** act - pointer to current thread activation +** index - index into vmm control table (this is a "one based" value) +** +** Outputs: +** address of a pmap or 0 if not found or could no be created +** Note that if there is no pmap for the address space it will be created. +-----------------------------------------------------------------------*/ + +pmap_t vmm_get_adsp(thread_act_t act, vmm_thread_index_t index) +{ + pmap_t pmap; + + if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */ + if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */ + + pmap = act->mact.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */ + if(pmap) return pmap; /* We've got it... */ + + pmap = pmap_create(0); /* Make a fresh one */ + act->mact.vmmControl->vmmAdsp[index - 1] = pmap; /* Remember it */ +/* + * Note that if the create fails, we will return a null. + */ + return pmap; /* Return it... */ +} + /************************************************************************************* @@ -140,10 +171,60 @@ int vmm_get_version(struct savearea *save) int vmm_get_features(struct savearea *save) { save->save_r3 = kVmmCurrentFeatures; /* Return the features */ + if(per_proc_info->pf.Available & pf64Bit) { + save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */ + save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */ + } return 1; } +/*----------------------------------------------------------------------- +** vmm_max_addr +** +** This function returns the maximum addressable virtual address sported +** +** Outputs: +** Returns max address +-----------------------------------------------------------------------*/ + +addr64_t vmm_max_addr(thread_act_t act) +{ + return vm_max_address; /* Return the maximum address */ +} + +/*----------------------------------------------------------------------- +** vmm_get_XA +** +** This function retrieves the eXtended Architecture flags for the specifed VM. +** +** We need to return the result in the return code rather than in the return parameters +** because we need an architecture independent format so the results are actually +** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs. +** 4 for 32-bit. +** +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** +** Outputs: +** Return code is set to the XA flags. If the index is invalid or the +** context has not been created, we return 0. +-----------------------------------------------------------------------*/ + +unsigned int vmm_get_XA( + thread_act_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */ + + return CEntry->vmmXAFlgs; /* Return the flags */ +} + /*----------------------------------------------------------------------- ** vmm_init_context ** @@ -173,14 +254,14 @@ int vmm_init_context(struct savearea *save) vmmCntrlTable *CTable; vm_offset_t conkern; vmm_state_page_t * vks; - vm_offset_t conphys; + ppnum_t conphys; kern_return_t ret; pmap_t new_pmap; int cvi, i; task_t task; thread_act_t fact, gact; - vmm_user_state = (vmm_state_page_t *)save->save_r4; /* Get the user address of the comm area */ + vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */ if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */ save->save_r3 = KERN_FAILURE; /* Return failure */ return 1; @@ -206,15 +287,15 @@ int vmm_init_context(struct savearea *save) task_lock(task); /* Lock our task */ - fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ + fact = (thread_act_t)task->threads.next; /* Get the first activation on task */ gact = 0; /* Pretend we didn't find it yet */ - for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */ + for(i = 0; i < task->thread_count; i++) { /* All of the activations */ if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */ gact = fact; /* Yeah... */ break; /* Bail the loop... */ } - fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + fact = (thread_act_t)fact->task_threads.next; /* Go to the next one */ } @@ -251,11 +332,11 @@ int vmm_init_context(struct savearea *save) act->mact.vmmControl = CTable; /* Initialize the table anchor */ } - for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */ + for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */ if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */ } - if(cvi >= kVmmMaxContextsPerThread) { /* Did we find one? */ + if(cvi >= kVmmMaxContexts) { /* Did we find one? */ ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */ return 1; @@ -272,7 +353,7 @@ int vmm_init_context(struct savearea *save) goto return_in_shame; /* Map the vmm state into the kernel's address space. */ - conphys = pmap_extract(act->map->pmap, (vm_offset_t)vmm_user_state); + conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state)); /* Find a virtual address to use. */ ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE); @@ -285,6 +366,7 @@ int vmm_init_context(struct savearea *save) } /* Map it into the kernel's address space. */ + pmap_enter(kernel_pmap, conkern, conphys, VM_PROT_READ | VM_PROT_WRITE, VM_WIMG_USE_DEFAULT, TRUE); @@ -293,17 +375,6 @@ int vmm_init_context(struct savearea *save) vks = (vmm_state_page_t *)conkern; bzero((char *)vks, PAGE_SIZE); - /* Allocate a new pmap for the new vmm context. */ - new_pmap = pmap_create(0); - if (new_pmap == PMAP_NULL) { - (void) vm_map_unwire(act->map, /* Couldn't get a pmap, unwire the user page */ - (vm_offset_t)vmm_user_state, - (vm_offset_t)vmm_user_state + PAGE_SIZE, - TRUE); - - kmem_free(kernel_map, conkern, PAGE_SIZE); /* Release the kernel address */ - goto return_in_shame; - } /* We're home free now. Simply fill in the necessary info and return. */ @@ -311,7 +382,6 @@ int vmm_init_context(struct savearea *save) vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */ CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */ - CTable->vmmc[cvi].vmmPmap = new_pmap; /* Remember the pmap for this guy */ CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */ CTable->vmmc[cvi].vmmContextPhys = (vmm_state_page_t *)conphys; /* Remember the state page physical addr */ CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */ @@ -326,9 +396,9 @@ int vmm_init_context(struct savearea *save) hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */ - if (!(act->map->pmap->vflags & pmapVMhost)) { + if (!(act->map->pmap->pmapFlags & pmapVMhost)) { simple_lock(&(act->map->pmap->lock)); - act->map->pmap->vflags |= pmapVMhost; + act->map->pmap->pmapFlags |= pmapVMhost; simple_unlock(&(act->map->pmap->lock)); } @@ -358,6 +428,12 @@ return_in_shame: ** ** Outputs: ** kernel return code indicating success or failure +** +** Strangeness note: +** This call will also trash the address space with the same ID. While this +** is really not too cool, we have to do it because we need to make +** sure that old VMM users (not that we really have any) who depend upon +** the address space going away with the context still work the same. -----------------------------------------------------------------------*/ kern_return_t vmm_tear_down_context( @@ -385,11 +461,14 @@ kern_return_t vmm_tear_down_context( toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */ save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */ } - - mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */ - pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */ - pmap_destroy(CEntry->vmmPmap); /* Toss the pmap for this context */ - CEntry->vmmPmap = NULL; /* Clean it up */ + + CEntry->vmmPmap = 0; /* Remove this trace */ + if(act->mact.vmmControl->vmmAdsp[index - 1]) { /* Check if there is an address space assigned here */ + mapping_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */ + pmap_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */ + pmap_destroy(act->mact.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */ + act->mact.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */ + } (void) vm_map_unwire( /* Unwire the user comm page */ act->map, @@ -399,8 +478,10 @@ kern_return_t vmm_tear_down_context( kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */ + CTable = act->mact.vmmControl; /* Get the control table address */ + CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */ + CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */ - CEntry->vmmPmap = 0; /* Clear pmap pointer */ CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */ CEntry->vmmContextUser = 0; /* Clear the user address of comm area */ @@ -412,14 +493,26 @@ kern_return_t vmm_tear_down_context( CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */ - CTable = act->mact.vmmControl; /* Get the control table address */ - for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */ + for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */ if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */ ml_set_interrupts_enabled(FALSE); /* No more interruptions */ return KERN_SUCCESS; /* Leave... */ } } +/* + * When we have tossed the last context, toss any address spaces left over before releasing + * the VMM control block + */ + + for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */ + if(!act->mact.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */ + mapping_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */ + pmap_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */ + pmap_destroy(act->mact.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */ + act->mact.vmmControl->vmmAdsp[index - 1] = 0; /* Clear just in case */ + } + kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */ act->mact.vmmControl = 0; /* Unmark us as vmm */ @@ -428,6 +521,83 @@ kern_return_t vmm_tear_down_context( return KERN_SUCCESS; } + +/*----------------------------------------------------------------------- +** vmm_set_XA +** +** This function sets the eXtended Architecture flags for the specifed VM. +** +** We need to return the result in the return code rather than in the return parameters +** because we need an architecture independent format so the results are actually +** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs. +** 4 for 32-bit. +** +** Note that this function does a lot of the same stuff as vmm_tear_down_context +** and vmm_init_context. +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** flags - the extended architecture flags +** +** +** Outputs: +** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not. +** Also, the internal flags are set and, additionally, the VM is completely reset. +-----------------------------------------------------------------------*/ + +kern_return_t vmm_set_XA( + thread_act_t act, + vmm_thread_index_t index, + unsigned int xaflags) +{ + vmmCntrlEntry *CEntry; + vmmCntrlTable *CTable; + vmm_state_page_t *vks; + vmm_version_t version; + + if(xaflags & ~vmm64Bit) return KERN_FAILURE; /* We only support this one kind now */ + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ + + ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ + + if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */ + toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */ + save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */ + } + + if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */ + toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */ + save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */ + } + + CTable = act->mact.vmmControl; /* Get the control table address */ + CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */ + + CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */ + CEntry->vmmXAFlgs = (xaflags & vmm64Bit) | (CEntry->vmmXAFlgs & ~vmm64Bit); /* Set the XA flags */ + CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ + + vks = CEntry->vmmContextKern; /* Get address of the context page */ + version = vks->interface_version; /* Save the version code */ + bzero((char *)vks, 4096); /* Clear all */ + + vks->interface_version = version; /* Set our version code */ + vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */ + + ml_set_interrupts_enabled(FALSE); /* No more interruptions */ + + return KERN_SUCCESS; /* Return the flags */ +} + + /*----------------------------------------------------------------------- ** vmm_tear_down_all ** @@ -468,7 +638,8 @@ void vmm_tear_down_all(thread_act_t act) { if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */ - for(cvi = 1; cvi <= kVmmMaxContextsPerThread; cvi++) { /* Look at all slots */ + + for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */ if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */ ret = vmm_tear_down_context(act, cvi); /* Take down the found context */ if(ret != KERN_SUCCESS) { /* Did it go away? */ @@ -477,6 +648,10 @@ void vmm_tear_down_all(thread_act_t act) { } } } + +/* + * Note that all address apces should be gone here. + */ if(act->mact.vmmControl) { /* Did we find one? */ panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */ } @@ -489,8 +664,7 @@ void vmm_tear_down_all(thread_act_t act) { ** vmm_map_page ** ** This function maps a page from within the client's logical -** address space into the alternate address space of the -** Virtual Machine Monitor context. +** address space into the alternate address space. ** ** The page need not be locked or resident. If not resident, it will be faulted ** in by this code, which may take some time. Also, if the page is not locked, @@ -505,7 +679,7 @@ void vmm_tear_down_all(thread_act_t act) { ** ** Inputs: ** act - pointer to current thread activation -** index - index of vmm state for this page +** index - index of address space to map into ** va - virtual address within the client's address ** space ** ava - virtual address within the alternate address @@ -525,74 +699,55 @@ void vmm_tear_down_all(thread_act_t act) { kern_return_t vmm_map_page( thread_act_t act, - vmm_thread_index_t index, - vm_offset_t cva, - vm_offset_t ava, + vmm_adsp_id_t index, + addr64_t cva, + addr64_t ava, vm_prot_t prot) { kern_return_t ret; vmmCntrlEntry *CEntry; - vm_offset_t phys_addr; - register mapping *mpv, *mp, *nmpv, *nmp; + register mapping *mp; struct phys_entry *pp; - pmap_t mpmap; vm_map_t map; + addr64_t ova, nextva; + pmap_t pmap; + + pmap = vmm_get_adsp(act, index); /* Get the pmap for this address space */ + if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */ + + if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */ - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return KERN_FAILURE; /* No good, failure... */ - -/* - * Find out if we have already mapped the address and toss it out if so. - */ - mp = hw_lock_phys_vir(CEntry->vmmPmap->space, ava); /* See if there is already a mapping */ - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava); /* Yeah, scream about it! */ - return KERN_FAILURE; /* Bad hair day, return FALSE... */ - } - if(mp) { /* If it was there, toss it */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ - (void)mapping_remove(CEntry->vmmPmap, ava); /* Throw away the mapping. we're about to replace it */ - } map = current_act()->map; /* Get the current map */ while(1) { /* Keep trying until we get it or until we fail */ - if(hw_cvp_blk(map->pmap, cva)) return KERN_FAILURE; /* Make sure that there is no block map at this address */ - mp = hw_lock_phys_vir(map->pmap->space, cva); /* Lock the physical entry for emulator's page */ - if((unsigned int)mp&1) { /* Did we timeout? */ - panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva); /* Yeah, scream about it! */ - return KERN_FAILURE; /* Bad hair day, return FALSE... */ - } + mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */ - if(mp) { /* We found it... */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - - if(!mpv->physent) return KERN_FAILURE; /* If there is no physical entry (e.g., I/O area), we won't map it */ - - if(!(mpv->PTEr & 1)) break; /* If we are writable go ahead and map it... */ - - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the map before we try to fault the write bit on */ - } + if(mp) break; /* We found it */ ml_set_interrupts_enabled(TRUE); /* Enable interruptions */ - ret = vm_fault(map, trunc_page(cva), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in read/write... */ + ret = vm_fault(map, trunc_page_32((vm_offset_t)cva), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in read/write... */ ml_set_interrupts_enabled(FALSE); /* Disable interruptions */ if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */ } -/* - * Now we make a mapping using all of the attributes of the source page except for protection. - * Also specify that the physical entry is locked. - */ - nmpv = mapping_make(CEntry->vmmPmap, mpv->physent, (ava & -PAGE_SIZE), - (mpv->physent->pte1 & -PAGE_SIZE), prot, ((mpv->physent->pte1 >> 3) & 0xF), 1); - - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry now, we're done with it */ + if(mp->mpFlags & (mpBlock | mpNest | mpSpecial)) { /* If this is a block, a nest, or some other special thing, we can't map it */ + mapping_drop_busy(mp); /* We have everything we need from the mapping */ + return KERN_FAILURE; /* Leave in shame */ + } - CEntry->vmmLastMap = ava & -PAGE_SIZE; /* Remember the last mapping we made */ - if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) - CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ + while(1) { /* Keep trying the enter until it goes in */ + ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */ + if(!ova) break; /* If there were no collisions, we are done... */ + mapping_remove(pmap, ova); /* Remove the mapping that collided */ + } + + mapping_drop_busy(mp); /* We have everything we need from the mapping */ + + if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) { + act->mact.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ + act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */ + } return KERN_SUCCESS; } @@ -607,6 +762,11 @@ kern_return_t vmm_map_page( ** ** See description of vmm_map_page for details. ** +** Inputs: +** Index is used for both the context and the address space ID. +** index[24:31] is the context id and index[16:23] is the address space. +** if the address space ID is 0, the context ID is used for it. +** ** Outputs: ** Normal exit is to run the VM. Abnormal exit is triggered via a ** non-KERN_SUCCESS return from vmm_map_page or later during the @@ -616,76 +776,100 @@ kern_return_t vmm_map_page( vmm_return_code_t vmm_map_execute( thread_act_t act, vmm_thread_index_t index, - vm_offset_t cva, - vm_offset_t ava, + addr64_t cva, + addr64_t ava, vm_prot_t prot) { kern_return_t ret; vmmCntrlEntry *CEntry; + unsigned int adsp; + vmm_thread_index_t cndx; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ + cndx = index & 0xFF; /* Clean it up */ + CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */ if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry)) return kVmmBogusContext; /* Yes, invalid index in Fam */ - ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */ + adsp = (index >> 8) & 0xFF; /* Get any requested address space */ + if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */ + + ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */ + if(ret == KERN_SUCCESS) { - CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ - vmm_execute_vm(act, index); /* Return was ok, launch the VM */ + act->mact.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ + act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */ + vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */ } - return kVmmInvalidAddress; /* We had trouble mapping in the page */ + return ret; /* We had trouble mapping in the page */ } /*----------------------------------------------------------------------- ** vmm_map_list ** -** This function maps a list of pages into the alternate's logical -** address space. +** This function maps a list of pages into various address spaces ** ** Inputs: ** act - pointer to current thread activation -** index - index of vmm state for this page +** index - index of default address space (used if not specifed in list entry ** count - number of pages to release +** flavor - 0 if 32-bit version, 1 if 64-bit ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map ** ** Outputs: ** kernel return code indicating success or failure ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded ** or the vmm_map_page call fails. +** We return kVmmInvalidAddress if virtual address size is not supported -----------------------------------------------------------------------*/ kern_return_t vmm_map_list( thread_act_t act, - vmm_thread_index_t index, - unsigned int cnt) + vmm_adsp_id_t index, + unsigned int cnt, + unsigned int flavor) { vmmCntrlEntry *CEntry; boolean_t ret; unsigned int i; - vmmMapList *lst; - vm_offset_t cva; - vm_offset_t ava; + vmmMList *lst; + vmmMList64 *lstx; + addr64_t cva; + addr64_t ava; vm_prot_t prot; + vmm_adsp_id_t adsp; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return -1; /* No good, failure... */ + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */ if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */ - lst = (vmmMapList *)(&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]); /* Point to the first entry */ + lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ + lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ - cva = lst[i].vmlva; /* Get the actual address */ - ava = lst[i].vmlava & -vmlFlgs; /* Get the alternate address */ - prot = lst[i].vmlava & vmlProt; /* Get the protection bits */ + if(flavor) { /* Check if 32- or 64-bit addresses */ + cva = lstx[i].vmlva; /* Get the 64-bit actual address */ + ava = lstx[i].vmlava; /* Get the 64-bit guest address */ + } + else { + cva = lst[i].vmlva; /* Get the 32-bit actual address */ + ava = lst[i].vmlava; /* Get the 32-bit guest address */ + } + + prot = ava & vmmlProt; /* Extract the protection bits */ + adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */ + if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */ + ava = ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */ + ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */ - if(ret != KERN_SUCCESS) return KERN_FAILURE; /* Bail if any error */ + if(ret != KERN_SUCCESS) return ret; /* Bail if any error */ } return KERN_SUCCESS ; /* Return... */ @@ -711,45 +895,36 @@ kern_return_t vmm_map_list( ** this call could return the wrong one. Moral of the story: no aliases. -----------------------------------------------------------------------*/ -vm_offset_t vmm_get_page_mapping( +addr64_t vmm_get_page_mapping( thread_act_t act, - vmm_thread_index_t index, - vm_offset_t va) + vmm_adsp_id_t index, + addr64_t va) { vmmCntrlEntry *CEntry; - vm_offset_t ova; - register mapping *mpv, *mp, *nmpv, *nmp; + register mapping *mp; pmap_t pmap; + addr64_t nextva, sva; + ppnum_t pa; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return -1; /* No good, failure... */ + pmap = vmm_get_adsp(act, index); /* Get and validate the index */ + if (!pmap)return -1; /* No good, failure... */ - mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */ - return -1; /* Bad hair day, return FALSE... */ - } + mp = mapping_find(pmap, va, &nextva, 0); /* Find our page */ + if(!mp) return -1; /* Not mapped, return -1 */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - pmap = current_act()->map->pmap; /* Get the current pmap */ - ova = -1; /* Assume failure for now */ + pa = mp->mpPAddr; /* Remember the page address */ + + mapping_drop_busy(mp); /* Go ahead and relase the mapping now */ - for(nmpv = hw_cpv(mpv->physent->phys_link); nmpv; nmpv = hw_cpv(nmpv->next)) { /* Scan 'em all */ - - if(nmpv->pmap != pmap) continue; /* Skip all the rest if this is not the right pmap... */ - - ova = ((((unsigned int)nmpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */ - ova = ova | ((nmpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */ - ova = ova | ((nmpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */ - break; /* We're done now, pass virtual address back */ - } + pmap = current_act()->map->pmap; /* Get the current pmap */ + sva = mapping_p2v(pmap, pa); /* Now find the source virtual */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + if(sva != 0) return sva; /* We found it... */ - if(ova == -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va); /* We are bad wrong if we can't find it */ + panic("vmm_get_page_mapping: could not back-map alternate va (%016llX)\n", va); /* We are bad wrong if we can't find it */ - return ova; + return -1; } /*----------------------------------------------------------------------- @@ -770,19 +945,20 @@ vm_offset_t vmm_get_page_mapping( kern_return_t vmm_unmap_page( thread_act_t act, - vmm_thread_index_t index, - vm_offset_t va) + vmm_adsp_id_t index, + addr64_t va) { vmmCntrlEntry *CEntry; - boolean_t ret; + addr64_t nadd; + pmap_t pmap; kern_return_t kern_result = KERN_SUCCESS; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return -1; /* No good, failure... */ + pmap = vmm_get_adsp(act, index); /* Get and validate the index */ + if (!pmap)return -1; /* No good, failure... */ - ret = mapping_remove(CEntry->vmmPmap, va); /* Toss the mapping */ + nadd = mapping_remove(pmap, va); /* Toss the mapping */ - return (ret ? KERN_SUCCESS : KERN_FAILURE); /* Return... */ + return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */ } /*----------------------------------------------------------------------- @@ -795,6 +971,7 @@ kern_return_t vmm_unmap_page( ** act - pointer to current thread activation ** index - index of vmm state for this page ** count - number of pages to release +** flavor - 0 if 32-bit, 1 if 64-bit ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap ** ** Outputs: @@ -804,28 +981,46 @@ kern_return_t vmm_unmap_page( kern_return_t vmm_unmap_list( thread_act_t act, - vmm_thread_index_t index, - unsigned int cnt) + vmm_adsp_id_t index, + unsigned int cnt, + unsigned int flavor) { vmmCntrlEntry *CEntry; boolean_t ret; kern_return_t kern_result = KERN_SUCCESS; unsigned int *pgaddr, i; + addr64_t gva; + vmmUMList *lst; + vmmUMList64 *lstx; + pmap_t pmap; + int adsp; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return -1; /* No good, failure... */ - - if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */ - if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */ + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ - pgaddr = &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ + if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */ + if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */ - for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ + lst = (vmmUMList *)lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ - (void)mapping_remove(CEntry->vmmPmap, pgaddr[i]); /* Toss the mapping */ + for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ + if(flavor) { /* Check if 32- or 64-bit addresses */ + gva = lstx[i].vmlava; /* Get the 64-bit guest address */ + } + else { + gva = lst[i].vmlava; /* Get the 32-bit guest address */ + } + + adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */ + if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */ + pmap = act->mact.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */ + if(!pmap) continue; /* Ain't nuthin' mapped here, no durn map... */ + + gva = gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */ + (void)mapping_remove(pmap, gva); /* Toss the mapping */ } - return KERN_SUCCESS ; /* Return... */ + return KERN_SUCCESS ; /* Return... */ } /*----------------------------------------------------------------------- @@ -847,18 +1042,19 @@ kern_return_t vmm_unmap_list( void vmm_unmap_all_pages( thread_act_t act, - vmm_thread_index_t index) + vmm_adsp_id_t index) { vmmCntrlEntry *CEntry; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Convert index to entry */ - if (CEntry == NULL) return; /* Either this isn't vmm thread or the index is bogus */ + pmap = vmm_get_adsp(act, index); /* Convert index to entry */ + if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */ /* * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly */ - mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */ - pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */ + mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */ + pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */ return; } @@ -886,30 +1082,36 @@ void vmm_unmap_all_pages( boolean_t vmm_get_page_dirty_flag( thread_act_t act, - vmm_thread_index_t index, - vm_offset_t va, + vmm_adsp_id_t index, + addr64_t va, unsigned int reset) { vmmCntrlEntry *CEntry; register mapping *mpv, *mp; unsigned int RC; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Convert index to entry */ - if (CEntry == NULL) return 1; /* Either this isn't vmm thread or the index is bogus */ - - mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */ - return 1; /* Bad hair day, return dirty... */ - } - if(!mp) return 1; /* Not mapped, return dirty... */ + pmap = vmm_get_adsp(act, index); /* Convert index to entry */ + if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */ - RC = hw_test_rc(mp, reset); /* Fetch the RC bits and clear if requested */ + RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + switch (RC & mapRetCode) { /* Decode return code */ + + case mapRtOK: /* Changed */ + return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */ + break; + + case mapRtNotFnd: /* Didn't find it */ + return 1; /* Return dirty */ + break; + + default: + panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC, pmap, va); + + } - return (RC & 1); /* Return the change bit */ + return 1; /* Return the change bit */ } @@ -933,32 +1135,38 @@ boolean_t vmm_get_page_dirty_flag( kern_return_t vmm_protect_page( thread_act_t act, - vmm_thread_index_t index, - vm_offset_t va, + vmm_adsp_id_t index, + addr64_t va, vm_prot_t prot) { vmmCntrlEntry *CEntry; - register mapping *mpv, *mp; - unsigned int RC; + addr64_t nextva; + int ret; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Convert index to entry */ - if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ + pmap = vmm_get_adsp(act, index); /* Convert index to entry */ + if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ - mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("vmm_protect_page: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */ - return 1; /* Bad hair day, return dirty... */ - } - if(!mp) return KERN_SUCCESS; /* Not mapped, just return... */ - - hw_prot_virt(mp, prot); /* Set the protection */ + ret = hw_protect(pmap, va, prot, &nextva); /* Try to change the protect here */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + switch (ret) { /* Decode return code */ + + case mapRtOK: /* All ok... */ + break; /* Outta here */ + + case mapRtNotFnd: /* Didn't find it */ + return KERN_SUCCESS; /* Ok, return... */ + break; + + default: + panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, (addr64_t)va); + + } - CEntry->vmmLastMap = va & -PAGE_SIZE; /* Remember the last mapping we changed */ - if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) - CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ + if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) { + act->mact.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ + act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */ + } return KERN_SUCCESS; /* Return */ } @@ -970,7 +1178,10 @@ kern_return_t vmm_protect_page( ** This function sets the protection bits of a mapped page ** and then directly starts executing. ** -** See description of vmm_protect_page for details. +** See description of vmm_protect_page for details +** +** Inputs: +** See vmm_protect_page and vmm_map_execute ** ** Outputs: ** Normal exit is to run the VM. Abnormal exit is triggered via a @@ -981,27 +1192,33 @@ kern_return_t vmm_protect_page( vmm_return_code_t vmm_protect_execute( thread_act_t act, vmm_thread_index_t index, - vm_offset_t va, + addr64_t va, vm_prot_t prot) { kern_return_t ret; vmmCntrlEntry *CEntry; + unsigned int adsp; + vmm_thread_index_t cndx; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - - if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ + cndx = index & 0xFF; /* Clean it up */ + CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */ + if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ + + adsp = (index >> 8) & 0xFF; /* Get any requested address space */ + if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */ if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry)) return kVmmBogusContext; /* Yes, invalid index in Fam */ - ret = vmm_protect_page(act, index, va, prot); /* Go try to change access */ + ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */ if(ret == KERN_SUCCESS) { - CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ - vmm_execute_vm(act, index); /* Return was ok, launch the VM */ + act->mact.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ + act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */ + vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */ } - return kVmmInvalidAddress; /* We had trouble of some kind (shouldn't happen) */ + return ret; /* We had trouble of some kind (shouldn't happen) */ } @@ -1038,9 +1255,6 @@ kern_return_t vmm_get_float_state( fpu_save(&CEntry->vmmFacCtx); /* Save context if live */ - CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[0] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[0]; /* Copy FPSCR */ - CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[1] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[1]; /* Copy FPSCR */ - if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */ bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */ return KERN_SUCCESS; @@ -1087,10 +1301,6 @@ kern_return_t vmm_get_vector_state( act->mact.specFlags &= ~vectorCng; /* Clear the special flag */ CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */ - for(j=0; j < 4; j++) { /* Set value for vscr */ - CEntry->vmmContextKern->vmm_proc_state.ppcVSCRshadow.i[j] = CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j]; - } - if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */ vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */ @@ -1169,6 +1379,10 @@ kern_return_t vmm_set_timer( ** ** This function causes the timer for a specified VM to be ** returned in return_params[0] and return_params[1]. +** Note that this is kind of funky for 64-bit VMs because we +** split the timer into two parts so that we still set parms 0 and 1. +** Obviously, we don't need to do this because the parms are 8 bytes +** wide. ** ** ** Inputs: @@ -1190,14 +1404,18 @@ kern_return_t vmm_get_timer( CEntry = vmm_get_entry(act, index); /* Convert index to entry */ if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ - CEntry->vmmContextKern->return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */ - CEntry->vmmContextKern->return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ - + if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */ + CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */ + CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ + } + else { + CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */ + CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ + } return KERN_SUCCESS; } - /*----------------------------------------------------------------------- ** vmm_timer_pop ** @@ -1235,7 +1453,7 @@ void vmm_timer_pop( CTable = act->mact.vmmControl; /* Make this easier */ any = 0; /* Haven't found a running unexpired timer yet */ - for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Cycle through all and check time now */ + for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */ if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */ @@ -1318,15 +1536,15 @@ int vmm_stop_vm(struct savearea *save) task_lock(task); /* Lock our task */ - fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ + fact = (thread_act_t)task->threads.next; /* Get the first activation on task */ act = 0; /* Pretend we didn't find it yet */ - for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */ + for(i = 0; i < task->thread_count; i++) { /* All of the activations */ if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */ act = fact; /* Yeah... */ break; /* Bail the loop... */ } - fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + fact = (thread_act_t)fact->task_threads.next; /* Go to the next one */ } if(!((unsigned int)act)) { /* See if we have VMMs yet */ @@ -1353,7 +1571,7 @@ int vmm_stop_vm(struct savearea *save) return 1; /* Return... */ } - for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search slots */ + for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */ if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */ hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */ } diff --git a/osfmk/ppc/vmachmon.h b/osfmk/ppc/vmachmon.h index 830ad2482..c2af36c86 100644 --- a/osfmk/ppc/vmachmon.h +++ b/osfmk/ppc/vmachmon.h @@ -27,10 +27,6 @@ ** ** C routines that we are adding to the MacOS X kernel. ** -** Wierd Apple PSL stuff goes here... -** -** Until then, Copyright 2000, Connectix -** -----------------------------------------------------------------------*/ #include @@ -55,44 +51,78 @@ typedef union vmm_fp_register_t { unsigned char b[8]; } vmm_fp_register_t; -typedef struct vmm_processor_state_t { - unsigned long ppcPC; - unsigned long ppcMSR; +typedef struct vmm_regs32_t { - unsigned long ppcGPRs[32]; + unsigned long ppcPC; /* 000 */ + unsigned long ppcMSR; /* 004 */ - unsigned long ppcCR; - unsigned long ppcXER; - unsigned long ppcLR; - unsigned long ppcCTR; - unsigned long ppcMQ; /* Obsolete */ - unsigned long ppcVRSave; - /* 32-byte bndry */ - vmm_vector_register_t ppcVSCR; - vmm_fp_register_t ppcFPSCR; + unsigned long ppcGPRs[32]; /* 008 */ + + unsigned long ppcCR; /* 088 */ + unsigned long ppcXER; /* 08C */ + unsigned long ppcLR; /* 090 */ + unsigned long ppcCTR; /* 094 */ + unsigned long ppcMQ; /* 098 - Obsolete */ + unsigned long ppcVRSave; /* 09C */ + unsigned long ppcRsrvd0A0[40]; /* 0A0 */ + /* 140 */ +} vmm_regs32_t; + +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct vmm_regs64_t { + + unsigned long long ppcPC; /* 000 */ + unsigned long long ppcMSR; /* 008 */ + + unsigned long long ppcGPRs[32]; /* 010 */ + + unsigned long long ppcXER; /* 110 */ + unsigned long long ppcLR; /* 118 */ + unsigned long long ppcCTR; /* 120 */ + unsigned long ppcCR; /* 128 */ + unsigned long ppcVRSave; /* 12C */ + unsigned long ppcRsvd130[4]; /* 130 */ + /* 140 */ +} vmm_regs64_t; +#pragma pack() - unsigned long ppcReserved1[34]; /* Future processor state can go here */ + +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef union vmm_regs_t { + vmm_regs32_t ppcRegs32; + vmm_regs64_t ppcRegs64; +} vmm_regs_t; +#pragma pack() + +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct vmm_processor_state_t { + /* 32-byte bndry */ + vmm_regs_t ppcRegs; /* Define registers areas */ /* We must be 16-byte aligned here */ - vmm_vector_register_t ppcVRs[32]; - vmm_vector_register_t ppcVSCRshadow; + vmm_vector_register_t ppcVRs[32]; /* These are only valid after a kVmmGetVectorState */ + vmm_vector_register_t ppcVSCR; /* This is always loaded/saved at host/guest transition */ /* We must be 8-byte aligned here */ - vmm_fp_register_t ppcFPRs[32]; - vmm_fp_register_t ppcFPSCRshadow; + vmm_fp_register_t ppcFPRs[32]; /* These are only valid after a kVmmGetFloatState */ + vmm_fp_register_t ppcFPSCR; /* This is always loaded/saved at host/guest transition */ unsigned long ppcReserved2[2]; /* Pad out to multiple of 16 bytes */ } vmm_processor_state_t; +#pragma pack() typedef unsigned long vmm_return_code_t; typedef unsigned long vmm_thread_index_t; +#define vmmTInum 0x000000FF +#define vmmTIadsp 0x0000FF00 +typedef unsigned long vmm_adsp_id_t; enum { kVmmCurMajorVersion = 0x0001, - kVmmCurMinorVersion = 0x0005, + kVmmCurMinorVersion = 0x0006, kVmmMinMajorVersion = 0x0001, }; #define kVmmCurrentVersion ((kVmmCurMajorVersion << 16) | kVmmCurMinorVersion) @@ -104,17 +134,38 @@ enum { kVmmFeature_ExtendedMapping = 0x00000004, kVmmFeature_ListMapping = 0x00000008, kVmmFeature_FastAssist = 0x00000010, + kVmmFeature_XA = 0x00000020, + kVmmFeature_SixtyFourBit = 0x00000040, + kVmmFeature_MultAddrSpace = 0x00000080, +}; +#define kVmmCurrentFeatures (kVmmFeature_LittleEndian | kVmmFeature_Stop | kVmmFeature_ExtendedMapping \ + | kVmmFeature_ListMapping | kVmmFeature_FastAssist | kVmmFeature_XA | kVmmFeature_MultAddrSpace) + +enum { + vmm64Bit = 0x80000000, }; -#define kVmmCurrentFeatures (kVmmFeature_LittleEndian | \ - kVmmFeature_Stop | \ - kVmmFeature_ExtendedMapping | \ - kVmmFeature_ListMapping | \ - kVmmFeature_FastAssist) typedef unsigned long vmm_version_t; -typedef struct vmm_fastassist_state_t { +typedef struct vmm_ret_parms32_t { + unsigned long return_params[4]; +} vmm_ret_parms32_t; + +typedef struct vmm_ret_parms64_t { + unsigned long long return_params[4]; +} vmm_ret_parms64_t; + +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef union vmm_ret_parms_t { + vmm_ret_parms64_t vmmrp64; /* 64-bit flavor */ + vmm_ret_parms32_t vmmrp32; /* 32-bit flavor */ + unsigned int retgas[11]; /* Force this to be 11 words long */ +} vmm_ret_parms_t; +#pragma pack() + +#pragma pack(4) /* Make sure the structure stays as we defined it */ +typedef struct vmm_fastassist_state32_t { unsigned long fastassist_dispatch; unsigned long fastassist_refcon; @@ -128,8 +179,31 @@ typedef struct vmm_fastassist_state_t { unsigned long fastassist_intercepts; unsigned long fastassist_reserved1; +} vmm_fastassist_state32_t; + +typedef struct vmm_fastassist_state64_t { + unsigned long long fastassist_dispatch; + unsigned long long fastassist_refcon; + + unsigned long long fastassist_dispatch_code; + unsigned long long fastassist_parameter[5]; + + unsigned long long guest_register[8]; + + unsigned long long guest_pc; + unsigned long long guest_msr; + + unsigned long fastassist_intercepts; + unsigned long fastassist_reserved1; +} vmm_fastassist_state64_t; + +typedef union vmm_fastassist_state_t { + vmm_fastassist_state64_t vmmfs64; /* 64-bit flavor */ + vmm_fastassist_state32_t vmmfs32; /* 32-bit flavor */ } vmm_fastassist_state_t; +#pragma pack() +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct vmm_state_page_t { /* This structure must remain below 4Kb (one page) in size */ vmm_version_t interface_version; @@ -154,8 +228,7 @@ typedef struct vmm_state_page_t { #define vmmFamSetb 7 vmm_return_code_t return_code; - unsigned long return_params[4]; - unsigned long gas[7]; /* For alignment */ + vmm_ret_parms_t vmmRet; /* The next portion of the structure must remain 32-byte aligned */ vmm_processor_state_t vmm_proc_state; @@ -164,7 +237,9 @@ typedef struct vmm_state_page_t { vmm_fastassist_state_t vmm_fastassist_state; } vmm_state_page_t; +#pragma pack() +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct vmm_comm_page_t { union { vmm_state_page_t vmcpState; /* Reserve area for state */ @@ -172,33 +247,48 @@ typedef struct vmm_comm_page_t { } vmcpfirst; unsigned int vmcpComm[256]; /* Define last 1024 bytes as a communications area - function specific */ } vmm_comm_page_t; +#pragma pack() enum { /* Function Indices (passed in r3) */ - kVmmGetVersion = 0, - kVmmvGetFeatures, - kVmmInitContext, - kVmmTearDownContext, - kVmmTearDownAll, - kVmmMapPage, - kVmmGetPageMapping, - kVmmUnmapPage, - kVmmUnmapAllPages, - kVmmGetPageDirtyFlag, - kVmmGetFloatState, - kVmmGetVectorState, - kVmmSetTimer, - kVmmGetTimer, - kVmmExecuteVM, - kVmmProtectPage, - kVmmMapExecute, - kVmmProtectExecute, - kVmmMapList, - kVmmUnmapList, + kVmmGetVersion = 0, /* Get VMM system version */ + kVmmvGetFeatures, /* Get VMM supported features */ + kVmmInitContext, /* Initialize a context */ + kVmmTearDownContext, /* Destroy a context */ + kVmmTearDownAll, /* Destory all contexts */ + kVmmMapPage, /* Map a host to guest address space */ + kVmmGetPageMapping, /* Get host address of a guest page */ + kVmmUnmapPage, /* Unmap a guest page */ + kVmmUnmapAllPages, /* Unmap all pages in a guest address space */ + kVmmGetPageDirtyFlag, /* Check if guest page modified */ + kVmmGetFloatState, /* Retrieve guest floating point context */ + kVmmGetVectorState, /* Retrieve guest vector context */ + kVmmSetTimer, /* Set a guest timer */ + kVmmGetTimer, /* Get a guest timer */ + kVmmExecuteVM, /* Launch a guest */ + kVmmProtectPage, /* Set protection attributes for a guest page */ + kVmmMapExecute, /* Map guest page and launch */ + kVmmProtectExecute, /* Set prot attributes and launch */ + kVmmMapList, /* Map a list of pages into guest address spaces */ + kVmmUnmapList, /* Unmap a list of pages from guest address spaces */ kvmmExitToHost, kvmmResumeGuest, kvmmGetGuestRegister, kvmmSetGuestRegister, + + kVmmSetXA, /* Set extended architecture features for a VM */ + kVmmGetXA, /* Get extended architecture features from a VM */ + + kVmmMapPage64, /* Map a host to guest address space - supports 64-bit */ + kVmmGetPageMapping64, /* Get host address of a guest page - supports 64-bit */ + kVmmUnmapPage64, /* Unmap a guest page - supports 64-bit */ + kVmmGetPageDirtyFlag64, /* Check if guest page modified - supports 64-bit */ + kVmmProtectPage64, /* Set protection attributes for a guest page - supports 64-bit */ + kVmmMapExecute64, /* Map guest page and launch - supports 64-bit */ + kVmmProtectExecute64, /* Set prot attributes and launch - supports 64-bit */ + kVmmMapList64, /* Map a list of pages into guest address spaces - supports 64-bit */ + kVmmUnmapList64, /* Unmap a list of pages from guest address spaces - supports 64-bit */ + kVmmMaxAddr, /* Returns the maximum virtual address that is mappable */ }; #define kVmmReturnNull 0 @@ -211,7 +301,37 @@ enum { #define kVmmReturnSystemCall 12 #define kVmmReturnTraceException 13 #define kVmmAltivecAssist 22 -#define kVmmInvalidAddress 4096 +#define kVmmInvalidAddress 0x1000 +#define kVmmInvalidAdSpace 0x1001 + +/* + * Notes on guest address spaces. + * + * Address spaces are loosely coupled to virtual machines. The default is for + * a guest with an index of 1 to use address space 1, 2 to use 2, etc. However, + * any guest may be launched using any address space and any address space may be the + * target for a map or unmap function. Note that the (un)map list functions may pass in + * an address space ID on a page-by-page basis. + * + * An address space is instantiated either explicitly by mapping something into it, or + * implicitly by launching a guest with it. + * + * An address space is destroyed explicitly by kVmmTearDownAll or kVmmUnmapAllPages. It is + * destroyed implicitly by kVmmTearDownContext. The latter is done in order to remain + * backwards compatible with the previous implementation, which does not have decoupled + * guests and address spaces. + * + * An address space supports the maximum virtual address supported by the processor. + * The 64-bit variant of the mapping functions can be used on non-64-bit machines. If an + * unmappable address (e.g., an address larger than 4GB-1 on a 32-bit machine) is requested, + * the operation fails with a kVmmInvalidAddress return code. + * + * Note that for 64-bit calls, both host and guest are specified at 64-bit values. + * + */ + + + /* * Storage Extended Protection modes @@ -236,25 +356,45 @@ enum { #define kVmmProtRORO (kVmmProtXtnd | 0x00000003) /* - * Map list format + * Map list formats + * The last 12 bits in the guest virtual address is used as flags as follows: + * 0x007 - for the map calls, this is the key to set + * 0x3F0 - for both map and unmap, this is the address space ID upon which to operate. + * Note that if 0, the address space ID from the function call is used instead. */ -typedef struct vmmMapList { - unsigned int vmlva; /* Virtual address in emulator address space */ - unsigned int vmlava; /* Virtual address in alternate address space */ -#define vmlFlgs 0x00000FFF /* Flags passed in in vmlava low order 12 bits */ -#define vmlProt 0x00000003 /* Protection flags for the page */ -} vmmMapList; +typedef struct vmmMList { + unsigned int vmlva; /* Virtual address in host address space */ + unsigned int vmlava; /* Virtual address in guest address space */ +} vmmMList; + +typedef struct vmmMList64 { + unsigned long long vmlva; /* Virtual address in host address space */ + unsigned long long vmlava; /* Virtual address in guest address space */ +} vmmMList64; + +typedef struct vmmUMList { + unsigned int vmlava; /* Virtual address in guest address space */ +} vmmUMList; + +typedef struct vmmUMList64 { + unsigned long long vmlava; /* Virtual address in guest address space */ +} vmmUMList64; +#define vmmlFlgs 0x00000FFF /* Flags passed in in vmlava low order 12 bits */ +#define vmmlProt 0x00000007 /* Protection flags for the page */ +#define vmmlAdID 0x000003F0 /* Guest address space ID - used only if non-zero */ +#define vmmlRsvd 0x00000C08 /* Reserved for future */ /************************************************************************************* Internal Emulation Types **************************************************************************************/ -#define kVmmMaxContextsPerThread 32 +#define kVmmMaxContexts 32 #define kVmmMaxUnmapPages 64 #define kVmmMaxMapPages 64 +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct vmmCntrlEntry { /* Virtual Machine Monitor control table entry */ unsigned int vmmFlags; /* Assorted control flags */ #define vmmInUse 0x80000000 @@ -265,27 +405,32 @@ typedef struct vmmCntrlEntry { /* Virtual Machine Monitor control table ent #define vmmVectCngdb 2 #define vmmTimerPop 0x10000000 #define vmmTimerPopb 3 -#define vmmMapDone 0x08000000 -#define vmmMapDoneb 4 #define vmmFAMmode 0x04000000 #define vmmFAMmodeb 5 #define vmmXStop 0x00800000 #define vmmXStopb 8 #define vmmSpfSave 0x000000FF #define vmmSpfSaveb 24 - pmap_t vmmPmap; /* pmap for alternate context's view of task memory */ + unsigned int vmmXAFlgs; /* Extended Architecture flags */ vmm_state_page_t *vmmContextKern; /* Kernel address of context communications area */ - vmm_state_page_t *vmmContextPhys; /* Physical address of context communications area */ + ppnum_t vmmContextPhys; /* Physical address of context communications area */ vmm_state_page_t *vmmContextUser; /* User address of context communications area */ facility_context vmmFacCtx; /* Header for vector and floating point contexts */ + pmap_t vmmPmap; /* Last dispatched pmap */ uint64_t vmmTimer; /* Last set timer value. Zero means unset */ - vm_offset_t vmmLastMap; /* Last vaddr mapping into virtual machine */ unsigned int vmmFAMintercept; /* FAM intercepted exceptions */ } vmmCntrlEntry; +#pragma pack() +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct vmmCntrlTable { /* Virtual Machine Monitor Control table */ - vmmCntrlEntry vmmc[kVmmMaxContextsPerThread]; /* One entry for each possible Virtual Machine Monitor context */ + unsigned int vmmGFlags; /* Global flags */ +#define vmmLastAdSp 0xFF /* Remember the address space that was mapped last */ + addr64_t vmmLastMap; /* Last vaddr mapping made */ + vmmCntrlEntry vmmc[kVmmMaxContexts]; /* One entry for each possible Virtual Machine Monitor context */ + pmap_t vmmAdsp[kVmmMaxContexts]; /* Guest address space pmaps */ } vmmCntrlTable; +#pragma pack() /* function decls for kernel level routines... */ extern void vmm_execute_vm(thread_act_t act, vmm_thread_index_t index); @@ -296,20 +441,22 @@ extern kern_return_t vmm_get_vector_state(thread_act_t act, vmm_thread_index_t i extern kern_return_t vmm_set_timer(thread_act_t act, vmm_thread_index_t index, unsigned int timerhi, unsigned int timerlo); extern kern_return_t vmm_get_timer(thread_act_t act, vmm_thread_index_t index); extern void vmm_tear_down_all(thread_act_t act); -extern kern_return_t vmm_map_page(thread_act_t act, vmm_thread_index_t hindex, vm_offset_t cva, - vm_offset_t ava, vm_prot_t prot); -extern vmm_return_code_t vmm_map_execute(thread_act_t act, vmm_thread_index_t hindex, vm_offset_t cva, - vm_offset_t ava, vm_prot_t prot); -extern kern_return_t vmm_protect_page(thread_act_t act, vmm_thread_index_t hindex, vm_offset_t va, +extern kern_return_t vmm_map_page(thread_act_t act, vmm_thread_index_t hindex, addr64_t cva, + addr64_t ava, vm_prot_t prot); +extern vmm_return_code_t vmm_map_execute(thread_act_t act, vmm_thread_index_t hindex, addr64_t cva, + addr64_t ava, vm_prot_t prot); +extern kern_return_t vmm_protect_page(thread_act_t act, vmm_thread_index_t hindex, addr64_t va, vm_prot_t prot); -extern vmm_return_code_t vmm_protect_execute(thread_act_t act, vmm_thread_index_t hindex, vm_offset_t va, +extern vmm_return_code_t vmm_protect_execute(thread_act_t act, vmm_thread_index_t hindex, addr64_t va, vm_prot_t prot); -extern vm_offset_t vmm_get_page_mapping(thread_act_t act, vmm_thread_index_t index, - vm_offset_t va); -extern kern_return_t vmm_unmap_page(thread_act_t act, vmm_thread_index_t index, vm_offset_t va); +extern addr64_t vmm_get_page_mapping(thread_act_t act, vmm_thread_index_t index, + addr64_t va); +extern kern_return_t vmm_unmap_page(thread_act_t act, vmm_thread_index_t index, addr64_t va); extern void vmm_unmap_all_pages(thread_act_t act, vmm_thread_index_t index); extern boolean_t vmm_get_page_dirty_flag(thread_act_t act, vmm_thread_index_t index, - vm_offset_t va, unsigned int reset); + addr64_t va, unsigned int reset); +extern kern_return_t vmm_set_XA(thread_act_t act, vmm_thread_index_t index, unsigned int xaflags); +extern unsigned int vmm_get_XA(thread_act_t act, vmm_thread_index_t index); extern int vmm_get_features(struct savearea *); extern int vmm_get_version(struct savearea *); extern int vmm_init_context(struct savearea *); @@ -319,13 +466,14 @@ extern void vmm_force_exit(thread_act_t act, struct savearea *); extern int vmm_stop_vm(struct savearea *save); extern void vmm_timer_pop(thread_act_t act); extern void vmm_interrupt(ReturnHandler *rh, thread_act_t act); -extern kern_return_t vmm_map_list(thread_act_t act, vmm_thread_index_t index, unsigned int cnt); -extern kern_return_t vmm_unmap_list(thread_act_t act, vmm_thread_index_t index, unsigned int cnt); +extern kern_return_t vmm_map_list(thread_act_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor); +extern kern_return_t vmm_unmap_list(thread_act_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor); extern vmm_return_code_t vmm_resume_guest(vmm_thread_index_t index, unsigned long pc, unsigned long vmmCntrl, unsigned long vmmCntrMaskl); extern vmm_return_code_t vmm_exit_to_host(vmm_thread_index_t index); extern unsigned long vmm_get_guest_register(vmm_thread_index_t index, unsigned long reg_index); extern vmm_return_code_t vmm_set_guest_register(vmm_thread_index_t index, unsigned long reg_index, unsigned long reg_value); +extern addr64_t vmm_max_addr(thread_act_t act); #endif diff --git a/osfmk/ppc/vmachmon_asm.s b/osfmk/ppc/vmachmon_asm.s index 9d4a9cd4a..1a4a67381 100644 --- a/osfmk/ppc/vmachmon_asm.s +++ b/osfmk/ppc/vmachmon_asm.s @@ -33,6 +33,9 @@ * facility. */ +#define vmmMapDone 31 +#define vmmDoing64 30 + /* * int vmm_dispatch(savearea, act); @@ -47,7 +50,7 @@ * R30 = current savearea */ - .align 5 /* Line up on cache line */ + .align 5 ; Line up on cache line .globl EXT(vmm_dispatch_table) LEXT(vmm_dispatch_table) @@ -65,15 +68,15 @@ LEXT(vmm_dispatch_table) .long 0 ; Not valid in Fam .long EXT(vmm_tear_down_all) ; Tears down all VMMs .long 0 ; Not valid in Fam - .long EXT(vmm_map_page) ; Maps a page from the main address space into the VM space + .long EXT(vmm_map_page32) ; Maps a page from the main address space into the VM space - supports 32-bit .long 1 ; Valid in Fam - .long EXT(vmm_get_page_mapping) ; Returns client va associated with VM va + .long EXT(vmm_get_page_mapping32) ; Returns client va associated with VM va - supports 32-bit .long 1 ; Valid in Fam - .long EXT(vmm_unmap_page) ; Unmaps a page from the VM space + .long EXT(vmm_unmap_page32) ; Unmaps a page from the VM space - supports 32-bit .long 1 ; Valid in Fam - .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space + .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space .long 1 ; Valid in Fam - .long EXT(vmm_get_page_dirty_flag) ; Gets the change bit for a page and optionally clears it + .long EXT(vmm_get_page_dirty_flag32) ; Gets the change bit for a page and optionally clears it - supports 32-bit .long 1 ; Valid in Fam .long EXT(vmm_get_float_state) ; Gets current floating point state .long 0 ; not valid in Fam @@ -85,15 +88,15 @@ LEXT(vmm_dispatch_table) .long 1 ; Valid in Fam .long EXT(switchIntoVM) ; Switches to the VM context .long 1 ; Valid in Fam - .long EXT(vmm_protect_page) ; Sets protection values for a page + .long EXT(vmm_protect_page32) ; Sets protection values for a page - supports 32-bit .long 1 ; Valid in Fam - .long EXT(vmm_map_execute) ; Maps a page an launches VM + .long EXT(vmm_map_execute32) ; Maps a page an launches VM - supports 32-bit .long 1 ; Not valid in Fam - .long EXT(vmm_protect_execute) ; Sets protection values for a page and launches VM + .long EXT(vmm_protect_execute32) ; Sets protection values for a page and launches VM - supports 32-bit .long 1 ; Valid in Fam - .long EXT(vmm_map_list) ; Maps a list of pages + .long EXT(vmm_map_list32) ; Maps a list of pages - supports 32-bit .long 1 ; Valid in Fam - .long EXT(vmm_unmap_list) ; Unmaps a list of pages + .long EXT(vmm_unmap_list32) ; Unmaps a list of pages - supports 32-bit .long 1 ; Valid in Fam .long EXT(vmm_fam_reserved) ; exit from Fam to host .long 1 ; Valid in Fam @@ -103,6 +106,31 @@ LEXT(vmm_dispatch_table) .long 1 ; Valid in Fam .long EXT(vmm_fam_reserved) ; Set guest register from Fam .long 1 ; Valid in Fam + .long EXT(vmm_set_XA) ; Set extended architecture features for a VM + .long 0 ; Not valid in Fam + .long EXT(vmm_get_XA) ; Get extended architecture features from a VM + .long 1 ; Valid in Fam + .long EXT(vmm_map_page) ; Map a host to guest address space - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_get_page_mapping) ; Get host address of a guest page - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_unmap_page) ; Unmap a guest page - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_get_page_dirty_flag) ; Check if guest page modified - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_protect_page) ; Sets protection values for a page - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_map_execute) ; Map guest page and launch - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_protect_execute) ; Set prot attributes and launch - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_map_list64) ; Map a list of pages into guest address spaces - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_unmap_list64) ; Unmap a list of pages from guest address spaces - supports 64-bit + .long 1 ; Valid in Fam + .long EXT(vmm_max_addr) ; Returns the maximum virtual address + .long 1 ; Valid in Fam + .set vmm_count,(.-EXT(vmm_dispatch_table))/8 ; Get the top number @@ -112,16 +140,16 @@ LEXT(vmm_dispatch_table) LEXT(vmm_dispatch) - lwz r11,saver3(r30) ; Get the selector + lwz r11,saver3+4(r30) ; Get the selector mr r3,r4 ; All of our functions want the activation as the first parm lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now? cmplwi cr1,r11,vmm_count ; See if we have a valid selector ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table - lwz r4,saver4(r30) ; Get 1st parameter after selector + lwz r4,saver4+4(r30) ; Get 1st parameter after selector beq+ EXT(switchIntoVM) ; Yes, go switch to it.... rlwinm r11,r11,3,0,28 ; Index into table - bgt- cr1,vmmBogus ; It is a bogus entry + bge- cr1,vmmBogus ; It is a bogus entry add r12,r10,r11 ; Get the vmm dispatch syscall entry mfsprg r10,0 ; Get the per_proc lwz r13,0(r12) ; Get address of routine @@ -131,19 +159,24 @@ LEXT(vmm_dispatch) rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit crand cr0_eq,cr1_eq,cr0_gt ; In Fam and Invalid syscall beq vmmBogus ; Intercept to host - lwz r5,saver5(r30) ; Get 2nd parameter after selector - lwz r6,saver6(r30) ; Get 3rd parameter after selector + lwz r5,saver5+4(r30) ; Get 2nd parameter after selector - note that some of these parameters may actually be long longs + lwz r6,saver6+4(r30) ; Get 3rd parameter after selector mtlr r13 ; Set the routine address - lwz r7,saver7(r30) ; Get 4th parameter after selector + lwz r7,saver7+4(r30) ; Get 4th parameter after selector + lwz r8,saver8+4(r30) ; Get 5th parameter after selector + lwz r9,saver9+4(r30) ; Get 6th parameter after selector ; -; NOTE: currently the most paramters for any call is 4. We will support at most 8 because we -; do not want to get into any stack based parms. However, here is where we need to add -; code for the 5th - 8th parms if we need them. +; NOTE: some of the above parameters are actually long longs. We have glue code that transforms +; all needed parameters and/or adds 32-/64-bit flavors to the needed functions. ; blrl ; Call function - - stw r3,saver3(r30) ; Pass back the return code + +vmmRetPt: li r0,0 ; Clear this out + stw r0,saver3(r30) ; Make sure top of RC is clear + stw r3,saver3+4(r30) ; Pass back the return code + stw r0,saver4(r30) ; Make sure bottom of RC is clear (just in case) + stw r4,saver4+4(r30) ; Pass back the bottom return code (just in case) li r3,1 ; Set normal return with check for AST b EXT(ppcscret) ; Go back to handler... @@ -182,18 +215,113 @@ LEXT(vmm_get_features_sel) ; Selector based version of get features LEXT(vmm_init_context_sel) ; Selector based version of init context - lwz r4,saver4(r30) ; Get the passed in version - lwz r5,saver5(r30) ; Get the passed in comm area + lwz r4,saver4+4(r30) ; Get the passed in version + lwz r5,saver5+4(r30) ; Get the passed in comm area lis r3,hi16(EXT(vmm_init_context)) - stw r4,saver3(r30) ; Cheat and move this parameter over + stw r4,saver3+4(r30) ; Cheat and move this parameter over ori r3,r3,lo16(EXT(vmm_init_context)) - stw r5,saver4(r30) ; Cheat and move this parameter over + stw r5,saver4+4(r30) ; Cheat and move this parameter over selcomm: mtlr r3 ; Set the real routine address mr r3,r30 ; Pass in the savearea blrl ; Call the function b EXT(ppcscret) ; Go back to handler... + .align 5 + .globl EXT(vmm_map_page32) + +LEXT(vmm_map_page32) + mr r9,r7 ; Move prot to correct parm + mr r8,r6 ; Move guest address to low half of long long + li r7,0 ; Clear high half of guest address + mr r6,r5 ; Move host address to low half of long long + li r5,0 ; Clear high half of host address + b EXT(vmm_map_page) ; Transition to real function... + + .align 5 + .globl EXT(vmm_get_page_mapping32) + +LEXT(vmm_get_page_mapping32) + mr r6,r5 ; Move guest address to low half of long long + li r5,0 ; Clear high half of guest address + bl EXT(vmm_get_page_mapping) ; Transition to real function... + mr r3,r4 ; Convert addr64_t to vm_offset_t, dropping top half + b vmmRetPt ; Join normal return... + + .align 5 + .globl EXT(vmm_unmap_page32) + +LEXT(vmm_unmap_page32) + mr r6,r5 ; Move guest address to low half of long long + li r5,0 ; Clear high half of guest address + b EXT(vmm_unmap_page) ; Transition to real function... + + .align 5 + .globl EXT(vmm_get_page_dirty_flag32) + +LEXT(vmm_get_page_dirty_flag32) + mr r7,r6 ; Move reset flag + mr r6,r5 ; Move guest address to low half of long long + li r5,0 ; Clear high half of guest address + b EXT(vmm_get_page_dirty_flag) ; Transition to real function... + + .align 5 + .globl EXT(vmm_protect_page32) + +LEXT(vmm_protect_page32) + mr r7,r6 ; Move protection bits + mr r6,r5 ; Move guest address to low half of long long + li r5,0 ; Clear high half of guest address + b EXT(vmm_protect_page) ; Transition to real function... + + .align 5 + .globl EXT(vmm_map_execute32) + +LEXT(vmm_map_execute32) + mr r9,r7 ; Move prot to correct parm + mr r8,r6 ; Move guest address to low half of long long + li r7,0 ; Clear high half of guest address + mr r6,r5 ; Move host address to low half of long long + li r5,0 ; Clear high half of host address + b EXT(vmm_map_execute) ; Transition to real function... + + .align 5 + .globl EXT(vmm_protect_execute32) + +LEXT(vmm_protect_execute32) + mr r7,r6 ; Move protection bits + mr r6,r5 ; Move guest address to low half of long long + li r5,0 ; Clear high half of guest address + b EXT(vmm_protect_execute) ; Transition to real function... + + .align 5 + .globl EXT(vmm_map_list32) + +LEXT(vmm_map_list32) + li r6,0 ; Set 32-bit flavor + b EXT(vmm_map_list) ; Go to common routine... + + .align 5 + .globl EXT(vmm_map_list64) + +LEXT(vmm_map_list64) + li r6,1 ; Set 64-bit flavor + b EXT(vmm_map_list) ; Go to common routine... + + .align 5 + .globl EXT(vmm_map_list32) + +LEXT(vmm_unmap_list32) + li r6,0 ; Set 32-bit flavor + b EXT(vmm_unmap_list) ; Go to common routine... + + .align 5 + .globl EXT(vmm_map_list64) + +LEXT(vmm_unmap_list64) + li r6,1 ; Set 64-bit flavor + b EXT(vmm_unmap_list) ; Go to common routine... + /* * Here is where we transition to the virtual machine. * @@ -208,12 +336,6 @@ selcomm: mtlr r3 ; Set the real routine address * calls. This is called, but never returned from. We always go directly back to the * user from here. * - * Still need to figure out final floats and vectors. For now, we will go brute - * force and when we go into the VM, we will force save any normal floats and - * vectors. Then we will hide them and swap the VM copy (if any) into the normal - * chain. When we exit VM we will do the opposite. This is not as fast as I would - * like it to be. - * * */ @@ -231,29 +353,42 @@ LEXT(vmm_execute_vm) LEXT(switchIntoVM) mfsprg r10,0 ; Get the per_proc - lwz r15,spcFlags(r10) ; Get per_proc special flags - rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit - bne vmmFamGuestResume - lwz r5,vmmControl(r3) ; Pick up the control table address + rlwinm r31,r4,24,24,31 ; Get the address space + rlwinm r4,r4,0,24,31 ; Isolate the context id + lwz r28,vmmControl(r3) ; Pick up the control table address subi r4,r4,1 ; Switch to zero offset - rlwinm. r2,r5,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we + rlwinm. r2,r28,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we ; do not try this while we are transitioning off to on - cmplwi cr1,r4,kVmmMaxContextsPerThread ; Is the index valid? + cmplwi cr1,r4,kVmmMaxContexts ; Is the index valid? beq- vmmBogus ; Not started, treat like a bogus system call + subic. r31,r31,1 ; Make address space 0 based and test if we use default mulli r2,r4,vmmCEntrySize ; Get displacement from index - bgt- cr1,swvmmBogus ; Index is bogus... - add r2,r2,r5 ; Point to the entry - + bge- cr1,swvmmBogus ; Index is bogus... + add r2,r2,r28 ; Point to the entry + bge-- swvmmDAdsp ; There was an explicit address space request + mr r31,r4 ; Default the address space to the context ID + +swvmmDAdsp: la r2,vmmc(r2) ; Get the offset to the context array + lwz r8,vmmGFlags(r28) ; Get the general flags lwz r4,vmmFlags(r2) ; Get the flags for the selected entry + crset vmmMapDone ; Assume we will be mapping something lwz r5,vmmContextKern(r2) ; Get the context area address rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use - bne+ swvmChkIntcpt ; We are so cool. Go do check for immediate intercepts... - -swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return - li r3,1 ; Set normal return with check for AST - stw r2,saver3(r30) ; Pass back the return code - b EXT(ppcscret) ; Go back to handler... - + cmplwi cr1,r31,kVmmMaxContexts ; See if we have a valid address space ID + rlwinm r8,r8,0,24,31 ; Clean up address space + beq-- swvmmBogus ; This context is no good... + + la r26,vmmAdsp(r28) ; Point to the pmaps + sub r8,r8,r31 ; Get diff between launching address space - 1 and last mapped into (should be 1 if the same) + rlwinm r31,r31,2,0,29 ; Index to the pmap + cmplwi r8,1 ; See if we have the same address space + bge-- cr1,swvmmBogAdsp ; Address space is no good... + lwzx r31,r26,r31 ; Get the requested address space pmap + li r0,0 ; Get a 0 in case we need to trash redrive + lwz r15,spcFlags(r10) ; Get per_proc special flags + beq swvmmAdspOk ; Do not invalidate address space if we are launching the same + crclr vmmMapDone ; Clear map done flag + stb r0,vmmGFlags+3(r28) ; Clear the last mapped address space ID so we will not redrive later ; ; Here we check for any immediate intercepts. So far, the only ; two of these are a timer pop and and external stop. We will not dispatch if @@ -261,7 +396,10 @@ swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return ; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag. ; -swvmChkIntcpt: +swvmmAdspOk: + rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit + stw r31,vmmPmap(r2) ; Save the last dispatched address space + bne vmmFamGuestResume lwz r6,vmmCntrl(r5) ; Get the control field rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit beq+ swvmChkStop ; Do not reset stop @@ -276,23 +414,18 @@ swvmtryx: lwarx r4,r8,r2 ; Pick up the flags swvmChkStop: rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped? - beq+ swvmNoStop ; Nope... - - li r2,kVmmStopped ; Set stopped return - li r3,1 ; Set normal return with check for AST - stw r2,saver3(r30) ; Pass back the return code - stw r2,return_code(r5) ; Save the exit code - b EXT(ppcscret) ; Go back to handler... + bne-- swvmSetStop ; Yes... -swvmNoStop: rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer go pop? - beq+ swvmDoSwitch ; No... - - li r2,kVmmReturnNull ; Set null return - li r3,1 ; Set normal return with check for AST - stw r2,saver3(r30) ; Pass back the return code - stw r2,return_code(r5) ; Save the exit code - b EXT(ppcscret) ; Go back to handler... + cmplwi cr1,r31,0 ; Is there actually an address space defined? + bne-- svvmTimerPop ; Yes... + +; +; Special note: we need to intercept any attempt to launch a guest into a non-existent address space. +; We will just go emulate an ISI if there is not one. +; + + beq-- cr1,swvmEmulateISI ; We are trying to launch into an undefined address space. This is not so good... ; ; Here is where we actually swap into the VM (alternate) context. @@ -316,29 +449,34 @@ swvmDoSwitch: stw r11,deferctx(r3) ; Start using the virtual machine facility context when we exit lwz r11,ACT_MACT_SPF(r26) ; Get the special flags - lwz r3,vmmPmap(r27) ; Get the pointer to the PMAP + mr r3,r31 ; Get the pointer to the PMAP oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now bl EXT(hw_set_user_space_dis) ; Swap the address spaces lwz r17,vmmFlags(r27) ; Get the status flags lwz r20,vmmContextKern(r27) ; Get the state page kernel addr lwz r21,vmmCntrl(r20) ; Get vmmCntrl rlwinm. r22,r21,0,vmmFamEnab,vmmFamEnab ; Is vmmFamEnab set? + lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags + stw r22,VMMXAFlgs(r10) ; Store vmmXAFlgs in per_proc VMMXAFlgs beq swvmNoFam ; No Fam intercept + rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine? rlwimi r15,r21,32+vmmFamSetb-FamVMmodebit,FamVMmodebit,FamVMmodebit ; Set FamVMmode bit rlwinm r21,r21,0,vmmFamSetb+1,vmmFamSetb-1 ; Clear FamSet bit + bne swvmXfamintercpt lwz r22,famintercepts(r20) ; Load intercept bit field + b swvmfamintercptres +swvmXfamintercpt: + lwz r22,faminterceptsX(r20) ; Load intercept bit field +swvmfamintercptres: stw r21,vmmCntrl(r20) ; Update vmmCntrl lwz r19,vmmContextPhys(r27) ; Get vmmFAMarea address stw r22,vmmFAMintercept(r27) ; Get vmmFAMintercept stw r22,FAMintercept(r10) ; Store vmmFAMintercept in per_proc FAMintercept - stw r19,vmmContextPhys(r27) ; Store vmmContextPhys stw r19,VMMareaPhys(r10) ; Store VMMareaPhys oris r15,r15,hi16(FamVMena) ; Set FamVMenabit swvmNoFam: - rlwinm. r0,r17,0,vmmMapDoneb,vmmMapDoneb ; Did we just do a map function? stw r27,vmmCEntry(r26) ; Remember what context we are running - andc r17,r17,r0 ; Turn off map flag - beq+ swvmNoMap ; No mapping done... + bf++ vmmMapDone,swvmNoMap ; We have not mapped anything or it was not for this address space ; ; This little bit of hoopala here (triggered by vmmMapDone) is @@ -349,17 +487,24 @@ swvmNoFam: ; double faults from happening. Note that there is only a gain if the VM ; takes a fault, then the emulator resolves it only, and then begins ; the VM execution again. It seems like this should be the normal case. +; +; Note that we need to revisit this when we move the virtual machines to the task because +; then it will be possible for more than one thread to access this stuff at the same time. ; lwz r3,SAVflags(r30) ; Pick up the savearea flags - lwz r2,vmmLastMap(r27) ; Get the last mapped address + lwz r2,vmmLastMap(r28) ; Get the last mapped address + lwz r14,vmmLastMap+4(r28) ; Get the last mapped address low half li r20,T_DATA_ACCESS ; Change to DSI fault oris r3,r3,hi16(SAVredrive) ; Set exception redrive stw r2,savedar(r30) ; Set the DAR to the last thing we mapped + stw r14,savedar+4(r30) ; Set the DAR to the last thing we mapped stw r3,SAVflags(r30) ; Turn on the redrive request lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss + li r0,0 ; Clear stw r20,saveexception(r30) ; Say we need to emulate a DSI stw r2,savedsisr(r30) ; Pretend we have a PTE miss + stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits @@ -399,15 +544,18 @@ swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc li r16,FPUowner ; Displacement to float owner add r19,r18,r19 ; Point to the owner per_proc - li r0,0 ; Clear this out swvminvfpu: lwarx r18,r16,r19 ; Get the owner - cmplw r18,r25 ; Does he still have this context? - bne swvminvfpv ; Nope... - stwcx. r0,r16,r19 ; Try to invalidate it - bne- swvminvfpu ; Try again if there was a collision... - -swvminvfpv: lwz r3,FPUsave(r25) ; Get the FPU savearea + + sub r0,r18,r25 ; Subtract one from the other + sub r3,r25,r18 ; Subtract the other from the one + or r3,r3,r0 ; Combine them + srawi r3,r3,31 ; Get a 0 if equal or -1 of not + and r18,r18,r3 ; Make 0 if same, unchanged if not + stwcx. r18,r16,r19 ; Try to invalidate it + bne-- swvminvfpu ; Try again if there was a collision... + + lwz r3,FPUsave(r25) ; Get the FPU savearea dcbt r14,r17 ; Touch in first line of new stuff mr. r3,r3 ; Is there one? bne+ swvmGotFloat ; Yes... @@ -418,7 +566,7 @@ swvminvfpv: lwz r3,FPUsave(r25) ; Get the FPU savearea stw r26,SAVact(r3) ; Save our activation li r0,0 ; Get a zero stb r7,SAVflags+2(r3) ; Set that this is floating point - stw r0,SAVprev(r3) ; Clear the back chain + stw r0,SAVprev+4(r3) ; Clear the back chain stw r0,SAVlevel(r3) ; We are always at level 0 (user state) stw r3,FPUsave(r25) ; Chain us to context @@ -431,11 +579,6 @@ swvmGotFloat: bl EXT(bcopy) ; Copy the new values - lwz r14,vmmppcFPSCRshadow(r17) ; Get the fpscr pad - lwz r10,vmmppcFPSCRshadow+4(r17) ; Get the fpscr - stw r14,savefpscrpad(r30) ; Save the new fpscr pad - stw r10,savefpscr(r30) ; Save the new fpscr - lwz r11,ACT_MACT_SPF(r26) ; Get the special flags stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here @@ -467,13 +610,16 @@ swvmNoNewFloats: ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc li r16,VMXowner ; Displacement to vector owner add r19,r18,r19 ; Point to the owner per_proc - li r0,0 ; Clear this out swvminvvec: lwarx r18,r16,r19 ; Get the owner - cmplw r18,r25 ; Does he still have this context? - bne swvminvved ; Nope... - stwcx. r0,r16,r19 ; Try to invalidate it - bne- swvminvvec ; Try again if there was a collision... + + sub r0,r18,r25 ; Subtract one from the other + sub r3,r25,r18 ; Subtract the other from the one + or r3,r3,r0 ; Combine them + srawi r3,r3,31 ; Get a 0 if equal or -1 of not + and r18,r18,r3 ; Make 0 if same, unchanged if not + stwcx. r18,r16,r19 ; Try to invalidate it + bne-- swvminvfpu ; Try again if there was a collision... swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea dcbt r14,r17 ; Touch in first line of new stuff @@ -486,7 +632,7 @@ swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea stw r26,SAVact(r3) ; Save our activation li r0,0 ; Get a zero stb r7,SAVflags+2(r3) ; Set that this is vector - stw r0,SAVprev(r3) ; Clear the back chain + stw r0,SAVprev+4(r3) ; Clear the back chain stw r0,SAVlevel(r3) ; We are always at level 0 (user state) stw r3,VMXsave(r25) ; Chain us to context @@ -499,21 +645,12 @@ swvmGotVect: bl EXT(bcopy) ; Copy the new values - lwz r11,vmmppcVSCRshadow+0(r17) ; Get the VSCR - lwz r14,vmmppcVSCRshadow+4(r17) ; Get the VSCR - lwz r10,vmmppcVSCRshadow+8(r17) ; Get the VSCR - lwz r9,vmmppcVSCRshadow+12(r17) ; Get the VSCR lwz r8,savevrsave(r30) ; Get the current VRSave - stw r11,savevscr+0(r30) ; Set the VSCR - stw r14,savevscr+4(r30) ; Set the VSCR - stw r10,savevscr+8(r30) ; Set the VSCR - stw r9,savevscr+12(r30) ; Set the VSCR - stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved - lwz r11,ACT_MACT_SPF(r26) ; Get the special flags stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here + stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved lwz r14,vmmStat(r17) ; Get the status flags mfsprg r10,0 ; Get the per_proc stw r11,ACT_MACT_SPF(r26) ; Get the special flags @@ -526,6 +663,65 @@ swvmNoNewVects: lwz r16,ACT_THREAD(r26) ; Restore the thread pointer b EXT(ppcscret) ; Go back to handler... + .align 5 + +swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return + li r0,0 ; Clear + li r3,1 ; Set normal return with check for AST + stw r0,saver3(r30) ; Clear upper half + stw r2,saver3+4(r30) ; Pass back the return code + b EXT(ppcscret) ; Go back to handler... + +swvmmBogAdsp: + li r2,kVmmInvalidAdSpace ; Set bogus address space return + li r0,0 ; Clear + li r3,1 ; Set normal return with check for AST + stw r0,saver3(r30) ; Clear upper half + stw r2,saver3+4(r30) ; Pass back the return code + b EXT(ppcscret) ; Go back to handler... + +swvmSetStop: + li r2,kVmmStopped ; Set stopped return + li r0,0 ; Clear + li r3,1 ; Set normal return with check for AST + stw r0,saver3(r30) ; Clear upper half + stw r2,saver3+4(r30) ; Pass back the return code + stw r2,return_code(r5) ; Save the exit code + b EXT(ppcscret) ; Go back to handler... + +svvmTimerPop: + li r2,kVmmReturnNull ; Set null return + li r0,0 ; Clear + li r3,1 ; Set normal return with check for AST + stw r0,saver3(r30) ; Clear upper half + stw r2,saver3+4(r30) ; Pass back the return code + stw r2,return_code(r5) ; Save the exit code + b EXT(ppcscret) ; Go back to handler... + +swvmEmulateISI: + mfsprg r10,2 ; Get feature flags + lwz r11,vmmXAFlgs(r28) ; Get the eXtended Architecture flags + mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6 + rlwinm. r11,r11,0,0,0 ; Are we doing a 64-bit virtual machine? + li r2,kVmmReturnInstrPageFault ; Set ISI + crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM + li r0,0 ; Clear + li r3,1 ; Set normal return with check for AST + stw r0,saver3(r30) ; Clear upper half + stw r2,saver3+4(r30) ; Pass back the return code + stw r2,return_code(r5) ; Save the exit code + lis r7,hi16(MASK(DSISR_HASH)) ; Pretend like we got a PTE miss + bt vmmDoing64,vmISI64 ; Go do this for a 64-bit VM... + + lwz r10,vmmppcpc(r5) ; Get the PC as failing address + stw r10,return_params+0(r5) ; Save PC as first return parm + stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm + b EXT(ppcscret) ; Go back to handler... + +vmISI64: ld r10,vmmppcXpc(r5) ; Get the PC as failing address + std r10,return_paramsX+0(r5) ; Save PC as first return parm + std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm + b EXT(ppcscret) ; Go back to handler... ; ; These syscalls are invalid, FAM syscall fast path @@ -594,7 +790,7 @@ vmmexitcall: mr r3,r16 ; Restore activation address stw r19,vmmStat(r5) ; Save the changed and popped flags bl swapCtxt ; Exchange the VM context for the emulator one - stw r8,saver3(r30) ; Set the return code as the return value also + stw r8,saver3+4(r30) ; Set the return code as the return value also b EXT(retFromVM) ; Go back to handler... @@ -666,7 +862,7 @@ LEXT(vmm_force_exit) lis r9,hi16(SAVredrive) ; Get exception redrive bit rlwinm r8,r8,30,24,31 ; Convert exception to return code andc r7,r7,r9 ; Make sure redrive is off because we are intercepting - stw r8,saver3(r30) ; Set the return code as the return value also + stw r8,saver3+4(r30) ; Set the return code as the return value also stw r7,SAVflags(r30) ; Set the savearea flags @@ -684,20 +880,25 @@ vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers ; .align 5 -swapCtxt: la r6,vmmppcpc(r5) ; Point to the first line +swapCtxt: + mfsprg r10,2 ; Get feature flags + la r6,vmmppcpc(r5) ; Point to the first line + mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6 lwz r14,saveexception(r30) ; Get the exception code dcbt 0,r6 ; Touch in the first line of the context area - lwz r7,savesrr0(r30) ; Start moving context - lwz r8,savesrr1(r30) - lwz r9,saver0(r30) + bt++ pf64Bitb,swap64 ; Go do this swap on a 64-bit machine... + + lwz r7,savesrr0+4(r30) ; Start moving context + lwz r8,savesrr1+4(r30) + lwz r9,saver0+4(r30) cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call? - lwz r10,saver1(r30) - lwz r11,saver2(r30) - lwz r12,saver3(r30) - lwz r13,saver4(r30) + lwz r10,saver1+4(r30) + lwz r11,saver2+4(r30) + lwz r12,saver3+4(r30) + lwz r13,saver4+4(r30) la r6,vmmppcr6(r5) ; Point to second line - lwz r14,saver5(r30) + lwz r14,saver5+4(r30) dcbt 0,r6 ; Touch second line of context area @@ -734,28 +935,31 @@ swapCtxt: la r6,vmmppcpc(r5) ; Point to the first line stw r13,return_params+4(r5) ; Save the second return stw r14,return_params+8(r5) ; Save the third return -swapnotsc: stw r15,savesrr0(r30) ; Save vm context into the savearea - stw r23,savesrr1(r30) - stw r17,saver0(r30) - stw r18,saver1(r30) - stw r19,saver2(r30) - stw r20,saver3(r30) - stw r21,saver4(r30) +swapnotsc: li r6,0 ; Clear this out + stw r6,savesrr0(r30) ; Insure that high order is clear + stw r15,savesrr0+4(r30) ; Save vm context into the savearea + stw r6,savesrr1(r30) ; Insure that high order is clear + stw r23,savesrr1+4(r30) + stw r17,saver0+4(r30) + stw r18,saver1+4(r30) + stw r19,saver2+4(r30) + stw r20,saver3+4(r30) + stw r21,saver4+4(r30) la r6,vmmppcr14(r5) ; Point to fourth line - stw r22,saver5(r30) + stw r22,saver5+4(r30) dcbt 0,r6 ; Touch fourth line ; Swap 8 registers - lwz r7,saver6(r30) ; Read savearea - lwz r8,saver7(r30) - lwz r9,saver8(r30) - lwz r10,saver9(r30) - lwz r11,saver10(r30) - lwz r12,saver11(r30) - lwz r13,saver12(r30) - lwz r14,saver13(r30) + lwz r7,saver6+4(r30) ; Read savearea + lwz r8,saver7+4(r30) + lwz r9,saver8+4(r30) + lwz r10,saver9+4(r30) + lwz r11,saver10+4(r30) + lwz r12,saver11+4(r30) + lwz r13,saver12+4(r30) + lwz r14,saver13+4(r30) lwz r15,vmmppcr6(r5) ; Read vm context lwz r24,vmmppcr7(r5) @@ -778,25 +982,25 @@ swapnotsc: stw r15,savesrr0(r30) ; Save vm context into the savearea dcbt 0,r6 ; Touch fifth line - stw r15,saver6(r30) ; Write vm context - stw r24,saver7(r30) - stw r17,saver8(r30) - stw r18,saver9(r30) - stw r19,saver10(r30) - stw r20,saver11(r30) - stw r21,saver12(r30) - stw r22,saver13(r30) + stw r15,saver6+4(r30) ; Write vm context + stw r24,saver7+4(r30) + stw r17,saver8+4(r30) + stw r18,saver9+4(r30) + stw r19,saver10+4(r30) + stw r20,saver11+4(r30) + stw r21,saver12+4(r30) + stw r22,saver13+4(r30) ; Swap 8 registers - lwz r7,saver14(r30) ; Read savearea - lwz r8,saver15(r30) - lwz r9,saver16(r30) - lwz r10,saver17(r30) - lwz r11,saver18(r30) - lwz r12,saver19(r30) - lwz r13,saver20(r30) - lwz r14,saver21(r30) + lwz r7,saver14+4(r30) ; Read savearea + lwz r8,saver15+4(r30) + lwz r9,saver16+4(r30) + lwz r10,saver17+4(r30) + lwz r11,saver18+4(r30) + lwz r12,saver19+4(r30) + lwz r13,saver20+4(r30) + lwz r14,saver21+4(r30) lwz r15,vmmppcr14(r5) ; Read vm context lwz r24,vmmppcr15(r5) @@ -819,25 +1023,25 @@ swapnotsc: stw r15,savesrr0(r30) ; Save vm context into the savearea dcbt 0,r6 ; Touch sixth line - stw r15,saver14(r30) ; Write vm context - stw r24,saver15(r30) - stw r17,saver16(r30) - stw r18,saver17(r30) - stw r19,saver18(r30) - stw r20,saver19(r30) - stw r21,saver20(r30) - stw r22,saver21(r30) + stw r15,saver14+4(r30) ; Write vm context + stw r24,saver15+4(r30) + stw r17,saver16+4(r30) + stw r18,saver17+4(r30) + stw r19,saver18+4(r30) + stw r20,saver19+4(r30) + stw r21,saver20+4(r30) + stw r22,saver21+4(r30) ; Swap 8 registers - lwz r7,saver22(r30) ; Read savearea - lwz r8,saver23(r30) - lwz r9,saver24(r30) - lwz r10,saver25(r30) - lwz r11,saver26(r30) - lwz r12,saver27(r30) - lwz r13,saver28(r30) - lwz r14,saver29(r30) + lwz r7,saver22+4(r30) ; Read savearea + lwz r8,saver23+4(r30) + lwz r9,saver24+4(r30) + lwz r10,saver25+4(r30) + lwz r11,saver26+4(r30) + lwz r12,saver27+4(r30) + lwz r13,saver28+4(r30) + lwz r14,saver29+4(r30) lwz r15,vmmppcr22(r5) ; Read vm context lwz r24,vmmppcr23(r5) @@ -860,23 +1064,23 @@ swapnotsc: stw r15,savesrr0(r30) ; Save vm context into the savearea dcbt 0,r6 ; Touch seventh line - stw r15,saver22(r30) ; Write vm context - stw r24,saver23(r30) - stw r17,saver24(r30) - stw r18,saver25(r30) - stw r19,saver26(r30) - stw r20,saver27(r30) - stw r21,saver28(r30) - stw r22,saver29(r30) + stw r15,saver22+4(r30) ; Write vm context + stw r24,saver23+4(r30) + stw r17,saver24+4(r30) + stw r18,saver25+4(r30) + stw r19,saver26+4(r30) + stw r20,saver27+4(r30) + stw r21,saver28+4(r30) + stw r22,saver29+4(r30) ; Swap 8 registers - lwz r7,saver30(r30) ; Read savearea - lwz r8,saver31(r30) + lwz r7,saver30+4(r30) ; Read savearea + lwz r8,saver31+4(r30) lwz r9,savecr(r30) - lwz r10,savexer(r30) - lwz r11,savelr(r30) - lwz r12,savectr(r30) + lwz r10,savexer+4(r30) + lwz r11,savelr+4(r30) + lwz r12,savectr+4(r30) lwz r14,savevrsave(r30) lwz r15,vmmppcr30(r5) ; Read vm context @@ -895,12 +1099,12 @@ swapnotsc: stw r15,savesrr0(r30) ; Save vm context into the savearea stw r12,vmmppcctr(r5) stw r14,vmmppcvrsave(r5) - stw r15,saver30(r30) ; Write vm context - stw r24,saver31(r30) + stw r15,saver30+4(r30) ; Write vm context + stw r24,saver31+4(r30) stw r17,savecr(r30) - stw r18,savexer(r30) - stw r19,savelr(r30) - stw r20,savectr(r30) + stw r18,savexer+4(r30) + stw r19,savelr+4(r30) + stw r20,savectr+4(r30) stw r22,savevrsave(r30) ; Swap 8 registers @@ -956,7 +1160,7 @@ swapnotsc: stw r15,savesrr0(r30) ; Save vm context into the savearea ; Set exit returns for a DSI or alignment exception ; -swapDSI: lwz r10,savedar(r30) ; Get the DAR +swapDSI: lwz r10,savedar+4(r30) ; Get the DAR lwz r7,savedsisr(r30) ; and the DSISR stw r10,return_params+0(r5) ; Save DAR as first return parm stw r7,return_params+4(r5) ; Save DSISR as second return parm @@ -982,6 +1186,457 @@ swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter stw r10,return_params+12(r5) ; Save it blr ; Return... +; +; Here is the swap for 64-bit machines +; + +swap64: lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags + ld r7,savesrr0(r30) ; Start moving context + ld r8,savesrr1(r30) + ld r9,saver0(r30) + cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call? + ld r10,saver1(r30) + ld r11,saver2(r30) + rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine? + ld r12,saver3(r30) + crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM + ld r13,saver4(r30) + la r6,vmmppcr6(r5) ; Point to second line + ld r14,saver5(r30) + + dcbt 0,r6 ; Touch second line of context area + + bt vmmDoing64,sw64x1 ; Skip to 64-bit stuff + + lwz r15,vmmppcpc(r5) ; First line of context + lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user + lwz r23,vmmppcmsr(r5) + ori r22,r25,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user + lwz r17,vmmppcr0(r5) + lwz r18,vmmppcr1(r5) + and r23,r23,r22 ; Keep only the controllable bits + lwz r19,vmmppcr2(r5) + oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits + lwz r20,vmmppcr3(r5) + ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits + lwz r21,vmmppcr4(r5) + lwz r22,vmmppcr5(r5) + + dcbt 0,r6 ; Touch third line of context area + + stw r7,vmmppcpc(r5) ; Save emulator context into the context area + stw r8,vmmppcmsr(r5) + stw r9,vmmppcr0(r5) + stw r10,vmmppcr1(r5) + stw r11,vmmppcr2(r5) + stw r12,vmmppcr3(r5) + stw r13,vmmppcr4(r5) + stw r14,vmmppcr5(r5) + +; +; Save the first 3 parameters if we are an SC (we will take care of the last later) +; + bne+ cr1,sw64x1done ; Skip next if not an SC exception... + stw r12,return_params+0(r5) ; Save the first return + stw r13,return_params+4(r5) ; Save the second return + stw r14,return_params+8(r5) ; Save the third return + b sw64x1done ; We are done with this section... + +sw64x1: ld r15,vmmppcXpc(r5) ; First line of context + li r0,1 ; Get a 1 to turn on 64-bit + lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user (we will also allow 64-bit here) + sldi r0,r0,63 ; Get 64-bit bit + ld r23,vmmppcXmsr(r5) + ori r22,r25,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user + ld r17,vmmppcXr0(r5) + or r22,r22,r0 ; Add the 64-bit bit + ld r18,vmmppcXr1(r5) + and r23,r23,r22 ; Keep only the controllable bits + ld r19,vmmppcXr2(r5) + oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits + ld r20,vmmppcXr3(r5) + ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits + ld r21,vmmppcXr4(r5) + ld r22,vmmppcXr5(r5) + + dcbt 0,r6 ; Touch third line of context area + + std r7,vmmppcXpc(r5) ; Save emulator context into the context area + std r8,vmmppcXmsr(r5) + std r9,vmmppcXr0(r5) + std r10,vmmppcXr1(r5) + std r11,vmmppcXr2(r5) + std r12,vmmppcXr3(r5) + std r13,vmmppcXr4(r5) + std r14,vmmppcXr5(r5) + +; +; Save the first 3 parameters if we are an SC (we will take care of the last later) +; + bne+ cr1,sw64x1done ; Skip next if not an SC exception... + std r12,return_paramsX+0(r5) ; Save the first return + std r13,return_paramsX+8(r5) ; Save the second return + std r14,return_paramsX+16(r5) ; Save the third return + +sw64x1done: + std r15,savesrr0(r30) ; Save vm context into the savearea + std r23,savesrr1(r30) + std r17,saver0(r30) + std r18,saver1(r30) + std r19,saver2(r30) + std r20,saver3(r30) + std r21,saver4(r30) + la r6,vmmppcr14(r5) ; Point to fourth line + std r22,saver5(r30) + + dcbt 0,r6 ; Touch fourth line + +; Swap 8 registers + + ld r7,saver6(r30) ; Read savearea + ld r8,saver7(r30) + ld r9,saver8(r30) + ld r10,saver9(r30) + ld r11,saver10(r30) + ld r12,saver11(r30) + ld r13,saver12(r30) + ld r14,saver13(r30) + + bt vmmDoing64,sw64x2 ; Skip to 64-bit stuff + + lwz r15,vmmppcr6(r5) ; Read vm context + lwz r24,vmmppcr7(r5) + lwz r17,vmmppcr8(r5) + lwz r18,vmmppcr9(r5) + lwz r19,vmmppcr10(r5) + lwz r20,vmmppcr11(r5) + lwz r21,vmmppcr12(r5) + lwz r22,vmmppcr13(r5) + + stw r7,vmmppcr6(r5) ; Write context + stw r8,vmmppcr7(r5) + stw r9,vmmppcr8(r5) + stw r10,vmmppcr9(r5) + stw r11,vmmppcr10(r5) + stw r12,vmmppcr11(r5) + stw r13,vmmppcr12(r5) + la r6,vmmppcr22(r5) ; Point to fifth line + stw r14,vmmppcr13(r5) + + dcbt 0,r6 ; Touch fifth line + b sw64x2done ; We are done with this section... + +sw64x2: ld r15,vmmppcXr6(r5) ; Read vm context + ld r24,vmmppcXr7(r5) + ld r17,vmmppcXr8(r5) + ld r18,vmmppcXr9(r5) + ld r19,vmmppcXr10(r5) + ld r20,vmmppcXr11(r5) + ld r21,vmmppcXr12(r5) + ld r22,vmmppcXr13(r5) + + std r7,vmmppcXr6(r5) ; Write context + std r8,vmmppcXr7(r5) + std r9,vmmppcXr8(r5) + std r10,vmmppcXr9(r5) + std r11,vmmppcXr10(r5) + std r12,vmmppcXr11(r5) + std r13,vmmppcXr12(r5) + la r6,vmmppcXr22(r5) ; Point to fifth line + std r14,vmmppcXr13(r5) + + dcbt 0,r6 ; Touch fifth line + +sw64x2done: std r15,saver6(r30) ; Write vm context + std r24,saver7(r30) + std r17,saver8(r30) + std r18,saver9(r30) + std r19,saver10(r30) + std r20,saver11(r30) + std r21,saver12(r30) + std r22,saver13(r30) + +; Swap 8 registers + + ld r7,saver14(r30) ; Read savearea + ld r8,saver15(r30) + ld r9,saver16(r30) + ld r10,saver17(r30) + ld r11,saver18(r30) + ld r12,saver19(r30) + ld r13,saver20(r30) + ld r14,saver21(r30) + + bt vmmDoing64,sw64x3 ; Skip to 64-bit stuff + + lwz r15,vmmppcr14(r5) ; Read vm context + lwz r24,vmmppcr15(r5) + lwz r17,vmmppcr16(r5) + lwz r18,vmmppcr17(r5) + lwz r19,vmmppcr18(r5) + lwz r20,vmmppcr19(r5) + lwz r21,vmmppcr20(r5) + lwz r22,vmmppcr21(r5) + + stw r7,vmmppcr14(r5) ; Write context + stw r8,vmmppcr15(r5) + stw r9,vmmppcr16(r5) + stw r10,vmmppcr17(r5) + stw r11,vmmppcr18(r5) + stw r12,vmmppcr19(r5) + stw r13,vmmppcr20(r5) + la r6,vmmppcr30(r5) ; Point to sixth line + stw r14,vmmppcr21(r5) + + dcbt 0,r6 ; Touch sixth line + b sw64x3done ; Done with this section... + +sw64x3: ld r15,vmmppcXr14(r5) ; Read vm context + ld r24,vmmppcXr15(r5) + ld r17,vmmppcXr16(r5) + ld r18,vmmppcXr17(r5) + ld r19,vmmppcXr18(r5) + ld r20,vmmppcXr19(r5) + ld r21,vmmppcXr20(r5) + ld r22,vmmppcXr21(r5) + + std r7,vmmppcXr14(r5) ; Write context + std r8,vmmppcXr15(r5) + std r9,vmmppcXr16(r5) + std r10,vmmppcXr17(r5) + std r11,vmmppcXr18(r5) + std r12,vmmppcXr19(r5) + std r13,vmmppcXr20(r5) + la r6,vmmppcXr30(r5) ; Point to sixth line + std r14,vmmppcXr21(r5) + + dcbt 0,r6 ; Touch sixth line + +sw64x3done: std r15,saver14(r30) ; Write vm context + std r24,saver15(r30) + std r17,saver16(r30) + std r18,saver17(r30) + std r19,saver18(r30) + std r20,saver19(r30) + std r21,saver20(r30) + std r22,saver21(r30) + +; Swap 8 registers + + ld r7,saver22(r30) ; Read savearea + ld r8,saver23(r30) + ld r9,saver24(r30) + ld r10,saver25(r30) + ld r11,saver26(r30) + ld r12,saver27(r30) + ld r13,saver28(r30) + ld r14,saver29(r30) + + bt vmmDoing64,sw64x4 ; Skip to 64-bit stuff + + lwz r15,vmmppcr22(r5) ; Read vm context + lwz r24,vmmppcr23(r5) + lwz r17,vmmppcr24(r5) + lwz r18,vmmppcr25(r5) + lwz r19,vmmppcr26(r5) + lwz r20,vmmppcr27(r5) + lwz r21,vmmppcr28(r5) + lwz r22,vmmppcr29(r5) + + stw r7,vmmppcr22(r5) ; Write context + stw r8,vmmppcr23(r5) + stw r9,vmmppcr24(r5) + stw r10,vmmppcr25(r5) + stw r11,vmmppcr26(r5) + stw r12,vmmppcr27(r5) + stw r13,vmmppcr28(r5) + la r6,vmmppcvscr(r5) ; Point to seventh line + stw r14,vmmppcr29(r5) + dcbt 0,r6 ; Touch seventh line + b sw64x4done ; Done with this section... + +sw64x4: ld r15,vmmppcXr22(r5) ; Read vm context + ld r24,vmmppcXr23(r5) + ld r17,vmmppcXr24(r5) + ld r18,vmmppcXr25(r5) + ld r19,vmmppcXr26(r5) + ld r20,vmmppcXr27(r5) + ld r21,vmmppcXr28(r5) + ld r22,vmmppcXr29(r5) + + std r7,vmmppcXr22(r5) ; Write context + std r8,vmmppcXr23(r5) + std r9,vmmppcXr24(r5) + std r10,vmmppcXr25(r5) + std r11,vmmppcXr26(r5) + std r12,vmmppcXr27(r5) + std r13,vmmppcXr28(r5) + la r6,vmmppcvscr(r5) ; Point to seventh line + std r14,vmmppcXr29(r5) + + dcbt 0,r6 ; Touch seventh line + +sw64x4done: std r15,saver22(r30) ; Write vm context + std r24,saver23(r30) + std r17,saver24(r30) + std r18,saver25(r30) + std r19,saver26(r30) + std r20,saver27(r30) + std r21,saver28(r30) + std r22,saver29(r30) + +; Swap 8 registers + + ld r7,saver30(r30) ; Read savearea + ld r8,saver31(r30) + lwz r9,savecr(r30) + ld r10,savexer(r30) + ld r11,savelr(r30) + ld r12,savectr(r30) + lwz r14,savevrsave(r30) + + bt vmmDoing64,sw64x5 ; Skip to 64-bit stuff + + lwz r15,vmmppcr30(r5) ; Read vm context + lwz r24,vmmppcr31(r5) + lwz r17,vmmppccr(r5) + lwz r18,vmmppcxer(r5) + lwz r19,vmmppclr(r5) + lwz r20,vmmppcctr(r5) + lwz r22,vmmppcvrsave(r5) + + stw r7,vmmppcr30(r5) ; Write context + stw r8,vmmppcr31(r5) + stw r9,vmmppccr(r5) + stw r10,vmmppcxer(r5) + stw r11,vmmppclr(r5) + stw r12,vmmppcctr(r5) + stw r14,vmmppcvrsave(r5) + b sw64x5done ; Done here... + +sw64x5: ld r15,vmmppcXr30(r5) ; Read vm context + ld r24,vmmppcXr31(r5) + lwz r17,vmmppcXcr(r5) + ld r18,vmmppcXxer(r5) + ld r19,vmmppcXlr(r5) + ld r20,vmmppcXctr(r5) + lwz r22,vmmppcXvrsave(r5) + + std r7,vmmppcXr30(r5) ; Write context + std r8,vmmppcXr31(r5) + stw r9,vmmppcXcr(r5) + std r10,vmmppcXxer(r5) + std r11,vmmppcXlr(r5) + std r12,vmmppcXctr(r5) + stw r14,vmmppcXvrsave(r5) + +sw64x5done: std r15,saver30(r30) ; Write vm context + std r24,saver31(r30) + stw r17,savecr(r30) + std r18,savexer(r30) + std r19,savelr(r30) + std r20,savectr(r30) + stw r22,savevrsave(r30) + +; Swap 8 registers + + lwz r7,savevscr+0(r30) ; Read savearea + lwz r8,savevscr+4(r30) + lwz r9,savevscr+8(r30) + lwz r10,savevscr+12(r30) + lwz r11,savefpscrpad(r30) + lwz r12,savefpscr(r30) + + lwz r15,vmmppcvscr+0(r5) ; Read vm context + lwz r24,vmmppcvscr+4(r5) + lwz r17,vmmppcvscr+8(r5) + lwz r18,vmmppcvscr+12(r5) + lwz r19,vmmppcfpscrpad(r5) + lwz r20,vmmppcfpscr(r5) + + stw r7,vmmppcvscr+0(r5) ; Write context + stw r8,vmmppcvscr+4(r5) + stw r9,vmmppcvscr+8(r5) + stw r10,vmmppcvscr+12(r5) + stw r11,vmmppcfpscrpad(r5) + stw r12,vmmppcfpscr(r5) + + stw r15,savevscr+0(r30) ; Write vm context + stw r24,savevscr+4(r30) + stw r17,savevscr+8(r30) + stw r18,savevscr+12(r30) + stw r19,savefpscrpad(r30) + stw r20,savefpscr(r30) + + +; +; Cobble up the exception return code and save any specific return values +; + + lwz r7,saveexception(r30) ; Pick up the exception code + rlwinm r8,r7,30,24,31 ; Convert exception to return code + cmplwi r7,T_DATA_ACCESS ; Was this a DSI? + stw r8,return_code(r5) ; Save the exit code + cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI? + beq+ swapDSI64 ; Yeah... + cmplwi r7,T_ALIGNMENT ; Alignment exception? + beq+ cr1,swapISI64 ; We had an ISI... + cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call? + beq+ swapDSI64 ; An alignment exception looks like a DSI... + beq+ cr1,swapSC64 ; We had a system call... + + blr ; Return... + +; +; Set exit returns for a DSI or alignment exception +; + +swapDSI64: ld r10,savedar(r30) ; Get the DAR + lwz r7,savedsisr(r30) ; and the DSISR + bt vmmDoing64,sw64DSI ; Skip to 64-bit stuff... + + + stw r10,return_params+0(r5) ; Save DAR as first return parm + stw r7,return_params+4(r5) ; Save DSISR as second return parm + blr ; Return... + +sw64DSI: std r10,return_paramsX+0(r5) ; Save DAR as first return parm + std r7,return_paramsX+8(r5) ; Save DSISR as second return parm (note that this is expanded to 64 bits) + blr ; Return... + +; +; Set exit returns for a ISI +; + +swapISI64: bt vmmDoing64,sw64ISI ; Skip to 64-bit stuff... + lwz r7,vmmppcmsr(r5) ; Get the SRR1 value + lwz r10,vmmppcpc(r5) ; Get the PC as failing address + rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR + stw r10,return_params+0(r5) ; Save PC as first return parm + stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm + blr ; Return... + +sw64ISI: ld r7,vmmppcXmsr(r5) ; Get the SRR1 value + ld r10,vmmppcXpc(r5) ; Get the PC as failing address + rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR + std r10,return_paramsX+0(r5) ; Save PC as first return parm + std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm + blr ; Return... + +; +; Set exit returns for a system call (note: we did the first 3 earlier) +; Do we really need to pass parameters back here???? +; + +swapSC64: bt vmmDoing64,sw64SC ; Skip to 64-bit stuff... + lwz r10,vmmppcr6(r5) ; Get the fourth paramter + stw r10,return_params+12(r5) ; Save it + blr ; Return... + +sw64SC: ld r10,vmmppcXr6(r5) ; Get the fourth paramter + std r10,return_paramsX+24(r5) ; Save it + blr ; Return... + ; ; vmmFamGuestResume: ; Restore Guest context from Fam mode. @@ -990,29 +1645,32 @@ swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter vmmFamGuestResume: mfsprg r10,0 ; Get the per_proc lwz r27,vmmCEntry(r3) ; Get the context that is active + lwz r4,VMMXAFlgs(r10) ; Get the eXtended Architecture flags + rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine? lwz r15,spcFlags(r10) ; Get per_proc special flags mr r26,r3 ; Save the activation pointer - lwz r17,vmmFlags(r27) ; Get the status flags lwz r20,vmmContextKern(r27) ; Get the comm area rlwinm r15,r15,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit stw r15,spcFlags(r10) ; Update the special flags - rlwinm. r0,r17,0,vmmMapDoneb,vmmMapDoneb ; Did we just do a map function? + bne fgrX lwz r7,famguestpc(r20) ; Load famguest ctx pc - andc r17,r17,r0 ; Turn off map flag - stw r17,vmmFlags(r27) ; Update vmmFlags - beq+ vmmFamRetNoMap ; No mapping done... + bf++ vmmMapDone,fgrNoMap ; No mapping done for this space. lwz r3,SAVflags(r30) ; Pick up the savearea flags - lwz r2,vmmLastMap(r27) ; Get the last mapped address + lwz r2,vmmLastMap(r28) ; Get the last mapped address + lwz r6,vmmLastMap+4(r28) ; Get the last mapped address li r4,T_DATA_ACCESS ; Change to DSI fault oris r3,r3,hi16(SAVredrive) ; Set exception redrive stw r2,savedar(r30) ; Set the DAR to the last thing we mapped + stw r6,savedar+4(r30) ; Set the DAR to the last thing we mapped stw r3,SAVflags(r30) ; Turn on the redrive request lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss stw r4,saveexception(r30) ; Say we need to emulate a DSI + li r0,0 ; Clear stw r2,savedsisr(r30) ; Pretend we have a PTE miss -vmmFamRetNoMap: - mfsrr1 r4 ; Get the current MSR value - stw r7,savesrr0(r30) ; Set savearea pc + stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of +fgrNoMap: + lwz r4,savesrr1+4(r30) ; Get the saved MSR value + stw r7,savesrr0+4(r30) ; Set savearea pc lwz r5,famguestmsr(r20) ; Load famguest ctx msr lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user @@ -1021,48 +1679,105 @@ vmmFamRetNoMap: ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector - stw r5,savesrr1(r30) ; Set savearea srr1 + stw r5,savesrr1+4(r30) ; Set savearea srr1 lwz r4,famguestr0(r20) ; Load famguest ctx r0 lwz r5,famguestr1(r20) ; Load famguest ctx r1 lwz r6,famguestr2(r20) ; Load famguest ctx r2 lwz r7,famguestr3(r20) ; Load famguest ctx r3 - stw r4,saver0(r30) ; Set savearea r0 - stw r5,saver1(r30) ; Set savearea r1 - stw r6,saver2(r30) ; Set savearea r2 - stw r7,saver3(r30) ; Set savearea r3 + stw r4,saver0+4(r30) ; Set savearea r0 + stw r5,saver1+4(r30) ; Set savearea r1 + stw r6,saver2+4(r30) ; Set savearea r2 + stw r7,saver3+4(r30) ; Set savearea r3 lwz r4,famguestr4(r20) ; Load famguest ctx r4 lwz r5,famguestr5(r20) ; Load famguest ctx r5 lwz r6,famguestr6(r20) ; Load famguest ctx r6 lwz r7,famguestr7(r20) ; Load famguest ctx r7 - stw r4,saver4(r30) ; Set savearea r4 - stw r5,saver5(r30) ; Set savearea r5 - stw r6,saver6(r30) ; Set savearea r6 - stw r7,saver7(r30) ; Set savearea r7 - + stw r4,saver4+4(r30) ; Set savearea r4 + stw r5,saver5+4(r30) ; Set savearea r5 + stw r6,saver6+4(r30) ; Set savearea r6 + stw r7,saver7+4(r30) ; Set savearea r7 + b fgrret +fgrX: + ld r7,famguestXpc(r20) ; Load famguest ctx pc + bf++ vmmMapDone,fgrXNoMap ; No mapping done for this space. + lwz r3,SAVflags(r30) ; Pick up the savearea flags + ld r2,vmmLastMap(r28) ; Get the last mapped address + li r4,T_DATA_ACCESS ; Change to DSI fault + oris r3,r3,hi16(SAVredrive) ; Set exception redrive + std r2,savedar(r30) ; Set the DAR to the last thing we mapped + stw r3,SAVflags(r30) ; Turn on the redrive request + lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss + stw r4,saveexception(r30) ; Say we need to emulate a DSI + li r0,0 ; Clear + stw r2,savedsisr(r30) ; Pretend we have a PTE miss + stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of +fgrXNoMap: + ld r4,savesrr1(r30) ; Get the saved MSR value + std r7,savesrr0(r30) ; Set savearea pc + ld r5,famguestXmsr(r20) ; Load famguest ctx msr + lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user + ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user + and r5,r5,r6 ; Keep only the controllable bits + oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits + ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits + rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP + rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector + std r5,savesrr1(r30) ; Set savearea srr1 + ld r4,famguestXr0(r20) ; Load famguest ctx r0 + ld r5,famguestXr1(r20) ; Load famguest ctx r1 + ld r6,famguestXr2(r20) ; Load famguest ctx r2 + ld r7,famguestXr3(r20) ; Load famguest ctx r3 + std r4,saver0(r30) ; Set savearea r0 + std r5,saver1(r30) ; Set savearea r1 + std r6,saver2(r30) ; Set savearea r2 + std r7,saver3(r30) ; Set savearea r3 + ld r4,famguestXr4(r20) ; Load famguest ctx r4 + ld r5,famguestXr5(r20) ; Load famguest ctx r5 + ld r6,famguestXr6(r20) ; Load famguest ctx r6 + ld r7,famguestXr7(r20) ; Load famguest ctx r7 + std r4,saver4(r30) ; Set savearea r4 + std r5,saver5(r30) ; Set savearea r5 + std r6,saver6(r30) ; Set savearea r6 + std r7,saver7(r30) ; Set savearea r7 +fgrret: li r3,1 ; Show normal exit with check for AST lwz r16,ACT_THREAD(r26) ; Restore the thread pointer b EXT(ppcscret) ; Go back to handler... ; -; FAM Intercept handler +; FAM Intercept exception handler ; .align 5 - .globl EXT(vmm_fam_handler) - -LEXT(vmm_fam_handler) - lwz r4,saver4(r13) ; Load savearea r0 + .globl EXT(vmm_fam_exc) + +LEXT(vmm_fam_exc) + lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags + lwz r1,pfAvailable(r2) ; Get the CPU features flags + rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine? + bne fexcX + lwz r4,saver4+4(r13) ; Load savearea r4 cmplwi r11,T_ALIGNMENT ; Alignment exception? lwz r3,VMMareaPhys(r2) ; Load phys state page addr + mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG? + bt++ pf64Bitb,fexcVMareaPhys64 ; Go do this on a 64-bit machine... + slwi r3,r3,12 ; Change ppnum to physical address + b fexcVMareaPhysres +fexcVMareaPhys64: + mtxer r5 ; Restore xer + lwz r5,saver5+4(r13) ; Load savearea r5 + lwz r6,saver6+4(r13) ; Load savearea r6 + sldi r3,r3,12 ; Change ppnum to physical address +fexcVMareaPhysres: stw r4,famguestr4(r3) ; Save r4 in famguest ctx stw r5,famguestr5(r3) ; Save r5 in famguest ctx stw r6,famguestr6(r3) ; Save r6 in famguest ctx stw r7,famguestr7(r3) ; Save r7 in famguest ctx - lwz r4,saver0(r13) ; Load savearea r0 - lwz r5,saver1(r13) ; Load savearea r1 - lwz r6,saver2(r13) ; Load savearea r2 - lwz r7,saver3(r13) ; Load savearea r3 + lwz r4,saver0+4(r13) ; Load savearea r0 + lwz r5,saver1+4(r13) ; Load savearea r1 + lwz r6,saver2+4(r13) ; Load savearea r2 + lwz r7,saver3+4(r13) ; Load savearea r3 stw r4,famguestr0(r3) ; Save r0 in famguest ctx stw r5,famguestr1(r3) ; Save r1 in famguest ctx stw r6,famguestr2(r3) ; Save r2 in famguest ctx @@ -1079,24 +1794,25 @@ LEXT(vmm_fam_handler) mtsrr1 r6 ; Set srr1 mr r6,r3 ; Set r6 with phys state page addr rlwinm r7,r11,30,24,31 ; Convert exception to return code - beq+ cr1,famPRG ; We had a program exception... - bne+ famRet + beq+ cr1,fexcPRG ; We had a program exception... + bne+ fexcret ; We had an Alignment... mfdar r3 ; Load dar mfdsisr r4 ; Load dsisr stw r3,famparam+0x4(r6) ; Set famparam 1 with dar stw r4,famparam+0x8(r6) ; Set famparam 2 with dsir - b famRet ; -famPRG: + b fexcret ; +fexcPRG: stw r4,famparam+0x4(r6) ; Set famparam 1 with srr1 mr r3,r4 ; Set r3 with dsisr lwz r4,famguestr4(r6) ; Load r4 from famguest context -famRet: +fexcret: lwz r5,famguestr5(r6) ; Load r5 from famguest context lwz r13,famhandler(r6) ; Load user address to resume stw r2,famparam(r6) ; Set famparam 0 with srr0 stw r7,famdispcode(r6) ; Save the exit code lwz r1,famrefcon(r6) ; load refcon + bt++ pf64Bitb,fexcrfi64 ; Go do this on a 64-bit machine... mtcr r0 ; Restore cr mtsrr0 r13 ; Load srr0 mr r0,r7 ; Set dispatch code @@ -1105,63 +1821,191 @@ famRet: mfsprg r13,2 ; Restore r13 mfsprg r11,3 ; Restore r11 rfi +fexcrfi64: + mtcr r0 ; Restore cr + mtsrr0 r13 ; Load srr0 + mr r0,r7 ; Set dispatch code + lwz r7,famguestr7(r6) ; Load r7 from famguest context + lwz r6,famguestr6(r6) ; Load r6 from famguest context + mfsprg r13,2 ; Restore r13 + mfsprg r11,3 ; Restore r11 + rfid +fexcX: + mtxer r5 ; Restore xer + ld r4,saver4(r13) ; Load savearea r4 + ld r5,saver5(r13) ; Load savearea r5 + ld r6,saver6(r13) ; Load savearea r6 + cmplwi r11,T_ALIGNMENT ; Alignment exception? + lwz r3,VMMareaPhys(r2) ; Load phys state page addr + mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6 + cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG? + sldi r3,r3,12 ; Change ppnum to physical address + std r4,famguestXr4(r3) ; Save r4 in famguest ctx + std r5,famguestXr5(r3) ; Save r5 in famguest ctx + std r6,famguestXr6(r3) ; Save r6 in famguest ctx + std r7,famguestXr7(r3) ; Save r7 in famguest ctx + ld r4,saver0(r13) ; Load savearea r0 + ld r5,saver1(r13) ; Load savearea r1 + ld r6,saver2(r13) ; Load savearea r2 + ld r7,saver3(r13) ; Load savearea r3 + std r4,famguestXr0(r3) ; Save r0 in famguest ctx + std r5,famguestXr1(r3) ; Save r1 in famguest ctx + std r6,famguestXr2(r3) ; Save r2 in famguest ctx + std r7,famguestXr3(r3) ; Save r3 in famguest ctx + lwz r4,spcFlags(r2) ; Load per_proc spcFlags + oris r4,r4,hi16(FamVMmode) ; Set FAM mode + stw r4,spcFlags(r2) ; Update per_proc spcFlags + mfsrr0 r2 ; Get the interrupt srr0 + mfsrr1 r4 ; Get the interrupt srr1 + std r2,famguestXpc(r3) ; Save srr0 in famguest ctx + std r4,famguestXmsr(r3) ; Save srr1 in famguest ctx + li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1)) + andc r6,r4,r6 ; Clear SE BE FE0 FE1 + mtsrr1 r6 ; Set srr1 + mr r6,r3 ; Set r6 with phys state page addr + rlwinm r7,r11,30,24,31 ; Convert exception to return code + beq+ cr1,fexcXPRG ; We had a program exception... + bne+ fexcXret + ; We had an Alignment... + mfdar r3 ; Load dar + mfdsisr r4 ; Load dsisr + std r3,famparamX+0x8(r6) ; Set famparam 1 with dar + std r4,famparamX+0x10(r6) ; Set famparam 2 with dsir + b fexcXret +fexcXPRG: + std r4,famparamX+0x8(r6) ; Set famparam 1 with srr1 + mr r3,r4 ; Set r3 with dsisr + ld r4,famguestXr4(r6) ; Load r4 from famguest context +fexcXret: + ld r5,famguestXr5(r6) ; Load r5 from famguest context + ld r13,famhandlerX(r6) ; Load user address to resume + std r2,famparamX(r6) ; Set famparam 0 with srr0 + std r7,famdispcodeX(r6) ; Save the exit code + ld r1,famrefconX(r6) ; load refcon + mtcr r0 ; Restore cr + mtsrr0 r13 ; Load srr0 + mr r0,r7 ; Set dispatch code + ld r7,famguestXr7(r6) ; Load r7 from famguest context + ld r6,famguestXr6(r6) ; Load r6 from famguest context + mfsprg r13,2 ; Restore r13 + mfsprg r11,3 ; Restore r11 + rfid ; ; FAM Intercept DSI ISI fault handler ; .align 5 - .globl EXT(vmm_fam_pf_handler) + .globl EXT(vmm_fam_pf) -LEXT(vmm_fam_pf_handler) +LEXT(vmm_fam_pf) + lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags lwz r3,VMMareaPhys(r2) ; Load phys state page addr - lwz r4,saver0(r13) ; Load savearea r0 - lwz r5,saver1(r13) ; Load savearea r1 - lwz r6,saver2(r13) ; Load savearea r2 - lwz r7,saver3(r13) ; Load savearea r3 + rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine? + bne fpfX + lwz r4,saver0+4(r13) ; Load savearea r0 + lwz r5,saver1+4(r13) ; Load savearea r1 + lwz r6,saver2+4(r13) ; Load savearea r2 + lwz r7,saver3+4(r13) ; Load savearea r3 + bt++ pf64Bitb,fpfVMareaPhys64 ; Go do this on a 64-bit machine... + slwi r3,r3,12 ; Change ppnum to physical address + b fpfVMareaPhysret +fpfVMareaPhys64: + sldi r3,r3,12 ; Change ppnum to physical address +fpfVMareaPhysret: stw r4,famguestr0(r3) ; Save r0 in famguest stw r5,famguestr1(r3) ; Save r1 in famguest stw r6,famguestr2(r3) ; Save r2 in famguest stw r7,famguestr3(r3) ; Save r3 in famguest - lwz r4,saver4(r13) ; Load savearea r0 - lwz r5,saver5(r13) ; Load savearea r1 - lwz r6,saver6(r13) ; Load savearea r2 - lwz r7,saver7(r13) ; Load savearea r3 + lwz r4,saver4+4(r13) ; Load savearea r0 + lwz r5,saver5+4(r13) ; Load savearea r1 + lwz r6,saver6+4(r13) ; Load savearea r2 + lwz r7,saver7+4(r13) ; Load savearea r3 stw r4,famguestr4(r3) ; Save r4 in famguest lwz r4,spcFlags(r2) ; Load spcFlags stw r5,famguestr5(r3) ; Save r5 in famguest - lwz r5,savesrr0(r13) ; Get the interrupt srr0 + lwz r5,savesrr0+4(r13) ; Get the interrupt srr0 stw r6,famguestr6(r3) ; Save r6 in famguest - lwz r6,savesrr1(r13) ; Load srr1 + lwz r6,savesrr1+4(r13) ; Load srr1 oris r4,r4,hi16(FamVMmode) ; Set FAM mode stw r7,famguestr7(r3) ; Save r7 in famguest stw r4,spcFlags(r2) ; Update spcFlags lwz r1,famrefcon(r3) ; Load refcon lwz r2,famhandler(r3) ; Load famhandler to resume stw r5,famguestpc(r3) ; Save srr0 - stw r5,saver2(r13) ; Store srr0 in savearea r2 + stw r5,saver2+4(r13) ; Store srr0 in savearea r2 stw r5,famparam(r3) ; Store srr0 in fam param 0 stw r6,famguestmsr(r3) ; Save srr1 in famguestmsr cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI? rlwinm r7,r11,30,24,31 ; Convert exception to return code - beq+ cr1,FamPfISI ; We had an ISI... -; FamPfDSI - lwz r6,savedar(r13) ; Load dar from savearea + beq+ cr1,fpfISI ; We had an ISI... +; fpfDSI + lwz r6,savedar+4(r13) ; Load dar from savearea lwz r4,savedsisr(r13) ; Load dsisr from savearea stw r6,famparam+0x4(r3) ; Store dar in fam param 1 - stw r6,saver3(r13) ; Store dar in savearea r3 + stw r6,saver3+4(r13) ; Store dar in savearea r3 stw r4,famparam+0x8(r3) ; Store dsisr in fam param 2 - stw r4,saver4(r13) ; Store dsisr in savearea r4 - b FamPfRet -FamPfISI: + stw r4,saver4+4(r13) ; Store dsisr in savearea r4 + b fpfret +fpfISI: rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR stw r6,famparam+0x4(r3) ; Store srr1 in fam param 1 - stw r6,saver3(r13) ; Store srr1 in savearea r3 -FamPfRet: - stw r7,saver0(r13) ; Set dispatch code + stw r6,saver3+4(r13) ; Store srr1 in savearea r3 +fpfret: + stw r7,saver0+4(r13) ; Set dispatch code stw r7,famdispcode(r3) ; Set dispatch code - stw r1,saver1(r13) ; Store refcon in savearea r1 - stw r2,savesrr0(r13) ; Store famhandler in srr0 + stw r1,saver1+4(r13) ; Store refcon in savearea r1 + stw r2,savesrr0+4(r13) ; Store famhandler in srr0 + blr +fpfX: + ld r4,saver0(r13) ; Load savearea r0 + ld r5,saver1(r13) ; Load savearea r1 + ld r6,saver2(r13) ; Load savearea r2 + ld r7,saver3(r13) ; Load savearea r3 + sldi r3,r3,12 ; Change ppnum to physical address + std r4,famguestXr0(r3) ; Save r0 in famguest + std r5,famguestXr1(r3) ; Save r1 in famguest + std r6,famguestXr2(r3) ; Save r2 in famguest + std r7,famguestXr3(r3) ; Save r3 in famguest + ld r4,saver4(r13) ; Load savearea r0 + ld r5,saver5(r13) ; Load savearea r1 + ld r6,saver6(r13) ; Load savearea r2 + ld r7,saver7(r13) ; Load savearea r3 + std r4,famguestXr4(r3) ; Save r4 in famguest + lwz r4,spcFlags(r2) ; Load spcFlags + std r5,famguestXr5(r3) ; Save r5 in famguest + ld r5,savesrr0(r13) ; Get the interrupt srr0 + std r6,famguestXr6(r3) ; Save r6 in famguest + ld r6,savesrr1(r13) ; Load srr1 + oris r4,r4,hi16(FamVMmode) ; Set FAM mode + std r7,famguestXr7(r3) ; Save r7 in famguest + stw r4,spcFlags(r2) ; Update spcFlags + ld r1,famrefconX(r3) ; Load refcon + ld r2,famhandlerX(r3) ; Load famhandler to resume + std r5,famguestXpc(r3) ; Save srr0 + std r5,saver2(r13) ; Store srr0 in savearea r2 + std r5,famparamX(r3) ; Store srr0 in fam param 0 + std r6,famguestXmsr(r3) ; Save srr1 in famguestmsr + cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI? + rlwinm r7,r11,30,24,31 ; Convert exception to return code + beq+ cr1,fpfXISI ; We had an ISI... +; fpfXDSI + ld r6,savedar(r13) ; Load dar from savearea + lwz r4,savedsisr(r13) ; Load dsisr from savearea + std r6,famparamX+0x8(r3) ; Store dar in fam param 1 + std r6,saver3(r13) ; Store dar in savearea r3 + std r4,famparamX+0x10(r3) ; Store dsisr in fam param 2 + std r4,saver4(r13) ; Store dsisr in savearea r4 + b fpfXret +fpfXISI: + rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR + std r6,famparamX+0x8(r3) ; Store srr1 in fam param 1 + std r6,saver3(r13) ; Store srr1 in savearea r3 +fpfXret: + std r7,saver0(r13) ; Set dispatch code + std r7,famdispcodeX(r3) ; Set dispatch code + std r1,saver1(r13) ; Store refcon in savearea r1 + std r2,savesrr0(r13) ; Store famhandler in srr0 blr ; @@ -1173,92 +2017,325 @@ FamPfRet: LEXT(vmm_ufp) mfsprg r3,0 ; Get the per_proc area - bt cr5_eq,ResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest - lwz r3,VMMareaPhys(r3) ; Load fast assist area + mr r11,r13 ; Saved cr in r11 + lwz r13,VMMXAFlgs(r3) ; Get the eXtended Architecture flags + rlwinm. r13,r13,0,0,0 ; Are we doing a 64-bit virtual machine? + lwz r13,pfAvailable(r3) ; Get feature flags + mtcrf 0x02,r13 ; Put pf64Bitb etc in cr6 + lwz r13,VMMareaPhys(r3) ; Load fast assist area + bt++ pf64Bitb,ufpVMareaPhys64 ; Go do this on a 64-bit machine... + slwi r13,r13,12 ; Change ppnum to physical address + b ufpVMareaPhysret +ufpVMareaPhys64: + sldi r13,r13,12 ; Change ppnum to physical address +ufpVMareaPhysret: + bne ufpX + bt cr5_eq,ufpResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest cmpwi cr7,r4,0 ; Compare first arg with 0 cmpwi cr5,r4,7 ; Compare first arg with 7 cror cr1_eq,cr7_lt,cr5_gt ; Is it in 0 to 7 range beq cr1,ufpVMret ; Return if not in the range slwi r4,r4,2 ; multiply index by 4 - la r3,famguestr0(r3) ; Load the base address - bt cr6_eq,SetGuestReg ; Set/get selector -; GetGuestReg + la r3,famguestr0(r13) ; Load the base address + bt cr2_eq,ufpSetGuestReg ; Set/get selector +; ufpGetGuestReg lwzx r3,r4,r3 ; Load the guest register b ufpVMret ; Return -SetGuestReg: +ufpSetGuestReg: stwx r5,r4,r3 ; Update the guest register li r3,0 ; Set return value b ufpVMret ; Return -ResumeGuest: +ufpResumeGuest: lwz r7,spcFlags(r3) ; Pick up the special flags - lwz r13,VMMareaPhys(r3) ; Load fast assist area mtsrr0 r4 ; Set srr0 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit - beq ResumeGuest_nokey ; Branch if not key switch + stw r7,spcFlags(r3) ; Update the special flags + mfsrr1 r6 ; Get the current MSR value + + lwz r4,famguestmsr(r13) ; Load guest srr1 + lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user + ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user + and r4,r4,r1 ; Keep only the controllable bits + oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits + ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits + rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP + rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector + beq ufpnokey ; Branch if not key switch mr r2,r7 ; Save r7 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key cmpw cr0,r7,r2 ; Is userProtKeybit changed? - beq ResumeGuest_nokey ; No, go to ResumeGuest_nokey - lwz r2,PP_USERPMAP(r3) ; Get user pmap phys addr - rlwinm r6,r7,userProtKeybit-2,2,2 ; Extract and shift the key bit - lwz r5,PMAP_SPACE(r2) ; Load the space id - oris r5,r5,hi16(SEG_REG_PROT) ; Set the protection - xor r5,r5,r6 ; Flip to proper segment register key - addis r4,r5,0x0000 ; Get SR0 value - mtsr sr0,r4 ; Load up the SR - addis r4,r5,0x0010 ; Get SR1 value - mtsr sr1,r4 ; Load up the SR - addis r4,r5,0x0020 ; Get SR2 value - mtsr sr2,r4 ; Load up the SR - addis r4,r5,0x0030 ; Get SR3 value - mtsr sr3,r4 ; Load up the SR - addis r4,r5,0x0040 ; Get SR4 value - mtsr sr4,r4 ; Load up the SR - addis r4,r5,0x0050 ; Get SR5 value - mtsr sr5,r4 ; Load up the SR - addis r4,r5,0x0060 ; Get SR6 value - mtsr sr6,r4 ; Load up the SR - addis r4,r5,0x0070 ; Get SR7 value - mtsr sr7,r4 ; Load up the SR - addis r4,r5,0x0080 ; Get SR8 value - mtsr sr8,r4 ; Load up the SR - addis r4,r5,0x0090 ; Get SR9 value - mtsr sr9,r4 ; Load up the SR - addis r4,r5,0x00a0 ; Get SR10 value - mtsr sr10,r4 ; Load up the SR - addis r4,r5,0x00b0 ; Get SR11 value - mtsr sr11,r4 ; Load up the SR - addis r4,r5,0x00c0 ; Get SR12 value - mtsr sr12,r4 ; Load up the SR - addis r4,r5,0x00d0 ; Get SR13 value - mtsr sr13,r4 ; Load up the SR - addis r4,r5,0x00e0 ; Get SR14 value - mtsr sr14,r4 ; Load up the SR - addis r4,r5,0x00f0 ; Get SR15 value - mtsr sr15,r4 ; Load up the SR -ResumeGuest_nokey: - mfsrr1 r6 ; Get the current MSR value + beq ufpnokey ; No, go to ResumeGuest_nokey + mr r5,r3 ; Get the per_proc area + stw r7,spcFlags(r3) ; Update the special flags + + bt++ pf64Bitb,ufpsave64 ; Go do this on a 64-bit machine... + + lwz r3,next_savearea+4(r5) ; Get the exception save area + stw r8,saver8+4(r3) ; Save r8 + stw r9,saver9+4(r3) ; Save r9 + stw r10,saver10+4(r3) ; Save r10 + stw r11,saver11+4(r3) ; Save r11 + stw r12,saver12+4(r3) ; Save r12 + stw r13,saver13+4(r3) ; Save r12 + stw r14,saver14+4(r3) ; Save r14 + stw r15,saver15+4(r3) ; Save r15 + stw r16,saver16+4(r3) ; Save r16 + stw r17,saver17+4(r3) ; Save r17 + stw r18,saver18+4(r3) ; Save r18 + stw r19,saver19+4(r3) ; Save r19 + stw r20,saver20+4(r3) ; Save r20 + stw r21,saver21+4(r3) ; Save r21 + stw r22,saver22+4(r3) ; Save r22 + stw r23,saver23+4(r3) ; Save r23 + stw r24,saver24+4(r3) ; Save r24 + stw r25,saver25+4(r3) ; Save r25 + stw r26,saver26+4(r3) ; Save r26 + stw r27,saver27+4(r3) ; Save r27 + stw r28,saver28+4(r3) ; Save r28 + stw r29,saver29+4(r3) ; Save r29 + stw r30,saver30+4(r3) ; Save r30 + stw r31,saver31+4(r3) ; Save r31 + b ufpsaveres ; Continue + +ufpsave64: + ld r3,next_savearea(r5) ; Get the exception save area + std r8,saver8(r3) ; Save r8 + std r9,saver9(r3) ; Save r9 + std r10,saver10(r3) ; Save r10 + std r11,saver11(r3) ; Save r11 + std r12,saver12(r3) ; Save r12 + std r13,saver13(r3) ; Save r12 + std r14,saver14(r3) ; Save r14 + std r15,saver15(r3) ; Save r15 + std r16,saver16(r3) ; Save r16 + std r17,saver17(r3) ; Save r17 + std r18,saver18(r3) ; Save r18 + std r19,saver19(r3) ; Save r19 + std r20,saver20(r3) ; Save r20 + std r21,saver21(r3) ; Save r21 + std r22,saver22(r3) ; Save r22 + std r23,saver23(r3) ; Save r23 + std r24,saver24(r3) ; Save r24 + std r25,saver25(r3) ; Save r25 + std r26,saver26(r3) ; Save r26 + std r27,saver27(r3) ; Save r27 + std r28,saver28(r3) ; Save r28 + std r29,saver29(r3) ; Save r29 + mfxer r2 ; Get xer + std r30,saver30(r3) ; Save r30 + std r31,saver31(r3) ; Save r31 + std r2,savexer(r3) ; Save xer + +ufpsaveres: + mflr r20 ; Get lr + li r2,1 ; Set to 1 + stw r7,spcFlags(r5) ; Update the special flags + mr r13,r3 ; Set current savearea + mr r21,r4 ; Save r4 + sth r2,ppInvSeg(r5) ; Force a reload of the SRs + mr r29,r5 ; Get the per_proc area + mr r3,r4 ; Set MSR value we going to + bl EXT(switchSegs) ; Go handle the segment registers/STB + mr r3,r13 ; Set current savearea + mr r4,r21 ; Restore r4 + mtlr r20 ; Set lr + + bt++ pf64Bitb,ufprestore64 ; Go do this on a 64-bit machine... + lwz r8,saver8+4(r3) ; Load r8 + lwz r9,saver9+4(r3) ; Load r9 + lwz r10,saver10+4(r3) ; Load r10 + lwz r11,saver11+4(r3) ; Load r11 + lwz r12,saver12+4(r3) ; Load r12 + lwz r13,saver13+4(r3) ; Load r12 + lwz r14,saver14+4(r3) ; Load r14 + lwz r15,saver15+4(r3) ; Load r15 + lwz r16,saver16+4(r3) ; Load r16 + lwz r17,saver17+4(r3) ; Load r17 + lwz r18,saver18+4(r3) ; Load r18 + lwz r19,saver19+4(r3) ; Load r19 + lwz r20,saver20+4(r3) ; Load r20 + lwz r21,saver21+4(r3) ; Load r21 + lwz r22,saver22+4(r3) ; Load r22 + lwz r23,saver23+4(r3) ; Load r23 + lwz r24,saver24+4(r3) ; Load r24 + lwz r25,saver25+4(r3) ; Load r25 + lwz r26,saver26+4(r3) ; Load r26 + lwz r27,saver27+4(r3) ; Load r27 + lwz r28,saver28+4(r3) ; Load r28 + lwz r29,saver29+4(r3) ; Load r29 + lwz r30,saver30+4(r3) ; Load r30 + lwz r31,saver31+4(r3) ; Load r31 + b ufpnokey ; Continue +ufprestore64: + ld r2,savexer(r3) ; Load xer + ld r8,saver8(r3) ; Load r8 + ld r9,saver9(r3) ; Load r9 + ld r10,saver10(r3) ; Load r10 + mtxer r2 ; Restore xer + ld r11,saver11(r3) ; Load r11 + ld r12,saver12(r3) ; Load r12 + ld r13,saver13(r3) ; Load r12 + ld r14,saver14(r3) ; Load r14 + ld r15,saver15(r3) ; Load r15 + ld r16,saver16(r3) ; Load r16 + ld r17,saver17(r3) ; Load r17 + ld r18,saver18(r3) ; Load r18 + ld r19,saver19(r3) ; Load r19 + ld r20,saver20(r3) ; Load r20 + ld r21,saver21(r3) ; Load r21 + ld r22,saver22(r3) ; Load r22 + ld r23,saver23(r3) ; Load r23 + ld r24,saver24(r3) ; Load r24 + ld r25,saver25(r3) ; Load r25 + ld r26,saver26(r3) ; Load r26 + ld r27,saver27(r3) ; Load r27 + ld r28,saver28(r3) ; Load r28 + ld r29,saver29(r3) ; Load r29 + ld r30,saver30(r3) ; Load r30 + ld r31,saver31(r3) ; Load r31 +ufpnokey: + mfsprg r3,0 ; Get the per_proc area + mtsrr1 r4 ; Set srr1 lwz r0,famguestr0(r13) ; Load r0 lwz r1,famguestr1(r13) ; Load r1 - lwz r4,famguestmsr(r13) ; Load guest srr1 - stw r7,spcFlags(r3) ; Update the special flags - lis r5,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user lwz r2,famguestr2(r13) ; Load r2 lwz r3,famguestr3(r13) ; Load r3 - ori r5,r5,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user - and r7,r4,r5 ; Keep only the controllable bits lwz r4,famguestr4(r13) ; Load r4 - oris r7,r7,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits lwz r5,famguestr5(r13) ; Load r5 - ori r7,r7,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits - rlwimi r7,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP - rlwimi r7,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector - mtsrr1 r7 ; Set srr1 lwz r6,famguestr6(r13) ; Load r6 lwz r7,famguestr7(r13) ; Load r7 ufpVMret: + mfsprg r13,2 ; Restore R13 + bt++ pf64Bitb,ufpVMrfi64 ; Go do this on a 64-bit machine... mtcrf 0xFF,r11 ; Restore CR mfsprg r11,3 ; Restore R11 - mfsprg r13,2 ; Restore R13 rfi ; All done, go back... +ufpVMrfi64: + mtcrf 0xFF,r11 ; Restore CR + mfsprg r11,3 ; Restore R11 + rfid + +ufpX: + bt cr5_eq,ufpXResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest + cmpwi cr7,r4,0 ; Compare first arg with 0 + cmpwi cr5,r4,7 ; Compare first arg with 7 + cror cr1_eq,cr7_lt,cr5_gt ; Is it in 0 to 7 range + beq cr1,ufpXVMret ; Return if not in the range + slwi r4,r4,3 ; multiply index by 8 + la r3,famguestXr0(r13) ; Load the base address + bt cr2_eq,ufpXSetGuestReg ; Set/get selector +; ufpXGetGuestReg + ldx r3,r4,r3 ; Load the guest register + b ufpXVMret ; Return +ufpXSetGuestReg: + stdx r5,r4,r3 ; Update the guest register + li r3,0 ; Set return value + b ufpXVMret ; Return +ufpXResumeGuest: + lwz r7,spcFlags(r3) ; Pick up the special flags + mtsrr0 r4 ; Set srr0 + rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl + rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit + stw r7,spcFlags(r3) ; Update the special flags + mfsrr1 r6 ; Get the current MSR value + + ld r4,famguestXmsr(r13) ; Load guest srr1 + lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user + ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user + and r4,r4,r1 ; Keep only the controllable bits + oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits + ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits + rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP + rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector + beq ufpXnokey ; Branch if not key switch + mr r2,r7 ; Save r7 + rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key + cmpw cr0,r7,r2 ; Is userProtKeybit changed? + beq ufpXnokey ; No, go to ResumeGuest_nokey + mr r5,r3 ; Get the per_proc area + stw r7,spcFlags(r3) ; Update the special flags + + ld r3,next_savearea(r5) ; Get the exception save area + std r8,saver8(r3) ; Save r8 + std r9,saver9(r3) ; Save r9 + std r10,saver10(r3) ; Save r10 + std r11,saver11(r3) ; Save r11 + std r12,saver12(r3) ; Save r12 + std r13,saver13(r3) ; Save r12 + std r14,saver14(r3) ; Save r14 + std r15,saver15(r3) ; Save r15 + std r16,saver16(r3) ; Save r16 + std r17,saver17(r3) ; Save r17 + std r18,saver18(r3) ; Save r18 + std r19,saver19(r3) ; Save r19 + std r20,saver20(r3) ; Save r20 + std r21,saver21(r3) ; Save r21 + std r22,saver22(r3) ; Save r22 + std r23,saver23(r3) ; Save r23 + std r24,saver24(r3) ; Save r24 + std r25,saver25(r3) ; Save r25 + std r26,saver26(r3) ; Save r26 + std r27,saver27(r3) ; Save r27 + std r28,saver28(r3) ; Save r28 + std r29,saver29(r3) ; Save r29 + mfxer r2 ; Get xer + std r30,saver30(r3) ; Save r30 + std r31,saver31(r3) ; Save r31 + std r2,savexer(r3) ; Save xer + + mflr r20 ; Get lr + li r2,1 ; Set to 1 + stw r7,spcFlags(r5) ; Update the special flags + mr r13,r3 ; Set current savearea + mr r21,r4 ; Save r4 + sth r2,ppInvSeg(r5) ; Force a reload of the SRs + mr r29,r5 ; Get the per_proc area + mr r3,r4 ; Set MSR value we going to + bl EXT(switchSegs) ; Go handle the segment registers/STB + mr r3,r13 ; Set current savearea + mr r4,r21 ; Restore r4 + mtlr r20 ; Set lr + + ld r2,savexer(r3) ; Load xer + ld r8,saver8(r3) ; Load r8 + ld r9,saver9(r3) ; Load r9 + ld r10,saver10(r3) ; Load r10 + mtxer r2 ; Restore xer + ld r11,saver11(r3) ; Load r11 + ld r12,saver12(r3) ; Load r12 + ld r13,saver13(r3) ; Load r12 + ld r14,saver14(r3) ; Load r14 + ld r15,saver15(r3) ; Load r15 + ld r16,saver16(r3) ; Load r16 + ld r17,saver17(r3) ; Load r17 + ld r18,saver18(r3) ; Load r18 + ld r19,saver19(r3) ; Load r19 + ld r20,saver20(r3) ; Load r20 + ld r21,saver21(r3) ; Load r21 + ld r22,saver22(r3) ; Load r22 + ld r23,saver23(r3) ; Load r23 + ld r24,saver24(r3) ; Load r24 + ld r25,saver25(r3) ; Load r25 + ld r26,saver26(r3) ; Load r26 + ld r27,saver27(r3) ; Load r27 + ld r28,saver28(r3) ; Load r28 + ld r29,saver29(r3) ; Load r29 + ld r30,saver30(r3) ; Load r30 + ld r31,saver31(r3) ; Load r31 +ufpXnokey: + mtsrr1 r4 ; Set srr1 + ld r0,famguestXr0(r13) ; Load r0 + ld r1,famguestXr1(r13) ; Load r1 + ld r2,famguestXr2(r13) ; Load r2 + ld r3,famguestXr3(r13) ; Load r3 + ld r4,famguestXr4(r13) ; Load r4 + ld r5,famguestXr5(r13) ; Load r5 + ld r6,famguestXr6(r13) ; Load r6 + ld r7,famguestXr7(r13) ; Load r7 +ufpXVMret: + mfsprg r13,2 ; Restore R13 + mtcrf 0xFF,r11 ; Restore CR + mfsprg r11,3 ; Restore R11 + rfid + diff --git a/osfmk/profiling/i386/profile-asm.s b/osfmk/profiling/i386/profile-asm.s index 65c6760a9..aadae0800 100644 --- a/osfmk/profiling/i386/profile-asm.s +++ b/osfmk/profiling/i386/profile-asm.s @@ -666,9 +666,7 @@ ENDDATA(_profile_do_stats) #if defined (MACH_KERNEL) && NCPUS > 1 #define ASSEMBLER -#if AT386 -#include -#endif +#include #if SQT #include @@ -798,13 +796,13 @@ LCL(alloc_new): #if DO_STATS SLOCK addl %esi,V_wasted(%ebx,%edi,4) /* udpate global counters */ - SLOCK addl $M_size,V_overhead(%ebx,%edi,4) + SLOCK addl $(M_size),V_overhead(%ebx,%edi,4) #endif popl %ecx /* context block */ movl %eax,%edx /* memory block pointer */ movl %esi,M_nfree(%edx) /* # free bytes */ - addl $M_size,%eax /* bump past overhead */ + addl $(M_size),%eax /* bump past overhead */ movl A_plist(%ecx),%esi /* previous memory block or 0 */ movl %eax,M_first(%edx) /* first space available */ movl %eax,M_ptr(%edx) /* current address available */ @@ -975,8 +973,8 @@ LCL(pnew): SLOCK incl V_prof_records(%ebx) pushl %edx - movl $P_size,%eax /* allocation size */ - movl $C_prof,%ecx /* allocation pool */ + movl $(P_size),%eax /* allocation size */ + movl $(C_prof),%ecx /* allocation pool */ call EXT(_profile_alloc_asm) /* allocate a new record */ popl %edx @@ -1146,8 +1144,8 @@ LCL(gnew): SLOCK incl V_prof_records(%ebx) movl %edx,%esi /* save unique function ptr */ movl %ecx,%edi /* and caller's caller address */ - movl $H_size,%eax /* memory block size */ - movl $C_gfunc,%ecx /* gprof function header memory pool */ + movl $(H_size),%eax /* memory block size */ + movl $(C_gfunc),%ecx /* gprof function header memory pool */ call EXT(_profile_alloc_asm) movl V_hash_ptr(%ebx),%ecx /* copy hash_ptr to func header */ @@ -1196,8 +1194,8 @@ LCL(gnocache): movl %ecx,%eax /* caller's caller address */ imull %edi,%eax /* multiply to get hash */ movl H_hash_ptr(%esi),%edx /* hash pointer */ - shrl $GPROF_HASH_SHIFT,%eax /* eliminate low order bits */ - andl $GPROF_HASH_MASK,%eax /* mask to get hash value */ + shrl $(GPROF_HASH_SHIFT),%eax /* eliminate low order bits */ + andl $(GPROF_HASH_MASK),%eax /* mask to get hash value */ leal 0(%edx,%eax,4),%eax /* pointer to hash bucket */ movl %eax,%edx /* save hash bucket address */ @@ -1261,8 +1259,8 @@ LCL(ghashnew): SLOCK incl V_gprof_records(%ebx) pushl %edx movl %ecx,%edi /* save caller's caller */ - movl $G_size,%eax /* arc size */ - movl $C_gprof,%ecx /* gprof memory pool */ + movl $(G_size),%eax /* arc size */ + movl $(C_gprof),%ecx /* gprof memory pool */ call EXT(_profile_alloc_asm) popl %edx diff --git a/osfmk/profiling/i386/profile-md.c b/osfmk/profiling/i386/profile-md.c index 4bb803fd9..6ecbcbe89 100644 --- a/osfmk/profiling/i386/profile-md.c +++ b/osfmk/profiling/i386/profile-md.c @@ -182,6 +182,7 @@ */ #include +#include #include #include @@ -192,8 +193,6 @@ #define DEBUG_PROFILE 1 #endif -extern int printf(const char *, ...); -extern void panic(const char *); #else #include #define panic(str) exit(1) @@ -238,6 +237,59 @@ static void _profile_reset_alloc(struct profile_vars *, acontext_type_t); extern void _bogus_function(void); + + +#if NCPUS > 1 +struct profile_vars *_profile_vars_cpus[NCPUS] = { &_profile_vars }; +struct profile_vars _profile_vars_aux[NCPUS-1]; +#define PROFILE_VARS(cpu) (_profile_vars_cpus[(cpu)]) +#else +#define PROFILE_VARS(cpu) (&_profile_vars) +#endif + +void * +_profile_alloc_pages (size_t size) +{ + vm_offset_t addr; + + /* + * For the MK, we can't support allocating pages at runtime, because we + * might be at interrupt level, so abort if we didn't size the table + * properly. + */ + + if (PROFILE_VARS(0)->active) { + panic("Call to _profile_alloc_pages while profiling is running."); + } + + if (kmem_alloc(kernel_map, &addr, size)) { + panic("Could not allocate memory for profiling"); + } + + memset((void *)addr, '\0', size); + if (PROFILE_VARS(0)->debug) { + printf("Allocated %d bytes for profiling, address 0x%x\n", (int)size, (int)addr); + } + + return((caddr_t)addr); +} + +void +_profile_free_pages(void *addr, size_t size) +{ + if (PROFILE_VARS(0)->debug) { + printf("Freed %d bytes for profiling, address 0x%x\n", (int)size, (int)addr); + } + + kmem_free(kernel_map, (vm_offset_t)addr, size); + return; +} + +void _profile_error(struct profile_vars *pv) +{ + panic("Fatal error in profiling"); +} + /* * Function to set up the initial allocation for a context block. diff --git a/osfmk/profiling/i386/profile-md.h b/osfmk/profiling/i386/profile-md.h index c7bcdb124..6158151d0 100644 --- a/osfmk/profiling/i386/profile-md.h +++ b/osfmk/profiling/i386/profile-md.h @@ -143,6 +143,8 @@ #ifndef _PROFILE_MD_H #define _PROFILE_MD_H +#include + /* * Define the interfaces between the assembly language profiling support * that is common between the kernel, mach servers, and user space library. diff --git a/osfmk/vm/bsd_vm.c b/osfmk/vm/bsd_vm.c index 4db7b48bd..602eb774d 100644 --- a/osfmk/vm/bsd_vm.c +++ b/osfmk/vm/bsd_vm.c @@ -284,20 +284,24 @@ macx_triggers( /* * Set thread scheduling priority and policy for the current thread * it is assumed for the time being that the thread setting the alert - * is the same one which will be servicing it. + * is the same one which will be servicing it. + * + * XXX This does not belong in the kernel XXX */ { - struct policy_timeshare_base fifo_base; - struct policy_timeshare_limit fifo_limit; - policy_base_t base; - processor_set_t pset; - policy_limit_t limit; - - pset = (current_thread())->processor_set; - base = (policy_base_t) &fifo_base; - limit = (policy_limit_t) &fifo_limit; - fifo_limit.max_priority = fifo_base.base_priority = MAXPRI_STANDARD; - thread_set_policy((current_thread())->top_act, pset, POLICY_FIFO, base, POLICY_TIMESHARE_BASE_COUNT, limit, POLICY_TIMESHARE_LIMIT_COUNT); + thread_precedence_policy_data_t pre; + thread_extended_policy_data_t ext; + + ext.timeshare = FALSE; + pre.importance = INT32_MAX; + + thread_policy_set(current_act(), + THREAD_EXTENDED_POLICY, (thread_policy_t)&ext, + THREAD_EXTENDED_POLICY_COUNT); + + thread_policy_set(current_act(), + THREAD_PRECEDENCE_POLICY, (thread_policy_t)&pre, + THREAD_PRECEDENCE_POLICY_COUNT); } current_thread()->vm_privilege = TRUE; diff --git a/osfmk/vm/device_vm.c b/osfmk/vm/device_vm.c index a6636ce12..bbc1726db 100644 --- a/osfmk/vm/device_vm.c +++ b/osfmk/vm/device_vm.c @@ -202,7 +202,7 @@ kern_return_t device_pager_populate_object( memory_object_t device, memory_object_offset_t offset, - vm_offset_t phys_addr, + ppnum_t page_num, vm_size_t size) { device_pager_t device_object; @@ -221,7 +221,7 @@ device_pager_populate_object( return KERN_FAILURE; kr = vm_object_populate_with_private( - vm_object, offset, phys_addr, size); + vm_object, offset, page_num, size); if(kr != KERN_SUCCESS) return kr; diff --git a/osfmk/vm/memory_object.c b/osfmk/vm/memory_object.c index e82bd42b6..c1898e3c5 100644 --- a/osfmk/vm/memory_object.c +++ b/osfmk/vm/memory_object.c @@ -102,7 +102,6 @@ #include #endif /* MACH_PAGEMAP */ - memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; vm_size_t memory_manager_default_cluster = 0; decl_mutex_data(, memory_manager_default_lock) @@ -135,7 +134,7 @@ vm_object_update(vm_object_t, vm_object_offset_t, #define memory_object_should_return_page(m, should_return) \ (should_return != MEMORY_OBJECT_RETURN_NONE && \ - (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_addr))) || \ + (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \ ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \ (should_return) == MEMORY_OBJECT_RETURN_ANYTHING)) @@ -258,7 +257,7 @@ memory_object_lock_page( if (prot != VM_PROT_NO_CHANGE) { if ((m->page_lock ^ prot) & prot) { - pmap_page_protect(m->phys_addr, VM_PROT_ALL & ~prot); + pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot); } #if 0 /* code associated with the vestigial @@ -303,7 +302,7 @@ memory_object_lock_page( vm_page_unlock_queues(); if (!should_flush) - pmap_page_protect(m->phys_addr, VM_PROT_NONE); + pmap_page_protect(m->phys_page, VM_PROT_NONE); if (m->dirty) return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); @@ -409,7 +408,7 @@ memory_object_lock_request( if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) return (KERN_INVALID_ARGUMENT); - size = round_page(size); + size = round_page_64(size); /* * Lock the object, and acquire a paging reference to @@ -629,7 +628,7 @@ vm_object_update( if(copy_size < 0) copy_size = 0; - copy_size+=offset; + copy_size+=copy_offset; vm_object_unlock(object); vm_object_lock(copy_object); @@ -963,7 +962,7 @@ vm_object_set_attributes_common( temporary = TRUE; if (cluster_size != 0) { int pages_per_cluster; - pages_per_cluster = atop(cluster_size); + pages_per_cluster = atop_32(cluster_size); /* * Cluster size must be integral multiple of page size, * and be a power of 2 number of pages. @@ -1099,7 +1098,7 @@ memory_object_change_attributes( perf = (memory_object_perf_info_t) attributes; may_cache = perf->may_cache; - cluster_size = round_page(perf->cluster_size); + cluster_size = round_page_32(perf->cluster_size); break; } @@ -1295,6 +1294,128 @@ memory_object_get_attributes( } +kern_return_t +memory_object_iopl_request( + ipc_port_t port, + memory_object_offset_t offset, + vm_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags) +{ + vm_object_t object; + kern_return_t ret; + int caller_flags; + + caller_flags = *flags; + + if (ip_kotype(port) == IKOT_NAMED_ENTRY) { + vm_named_entry_t named_entry; + + named_entry = (vm_named_entry_t)port->ip_kobject; + /* a few checks to make sure user is obeying rules */ + if(*upl_size == 0) { + if(offset >= named_entry->size) + return(KERN_INVALID_RIGHT); + *upl_size = named_entry->size - offset; + } + if(caller_flags & UPL_COPYOUT_FROM) { + if((named_entry->protection & VM_PROT_READ) + != VM_PROT_READ) { + return(KERN_INVALID_RIGHT); + } + } else { + if((named_entry->protection & + (VM_PROT_READ | VM_PROT_WRITE)) + != (VM_PROT_READ | VM_PROT_WRITE)) { + return(KERN_INVALID_RIGHT); + } + } + if(named_entry->size < (offset + *upl_size)) + return(KERN_INVALID_ARGUMENT); + + /* the callers parameter offset is defined to be the */ + /* offset from beginning of named entry offset in object */ + offset = offset + named_entry->offset; + + if(named_entry->is_sub_map) + return (KERN_INVALID_ARGUMENT); + + named_entry_lock(named_entry); + + if(named_entry->object) { + /* This is the case where we are going to map */ + /* an already mapped object. If the object is */ + /* not ready it is internal. An external */ + /* object cannot be mapped until it is ready */ + /* we can therefore avoid the ready check */ + /* in this case. */ + vm_object_reference(named_entry->object); + object = named_entry->object; + named_entry_unlock(named_entry); + } else { + object = vm_object_enter(named_entry->backing.pager, + named_entry->offset + named_entry->size, + named_entry->internal, + FALSE, + FALSE); + if (object == VM_OBJECT_NULL) { + named_entry_unlock(named_entry); + return(KERN_INVALID_OBJECT); + } + vm_object_lock(object); + + /* create an extra reference for the named entry */ + vm_object_reference_locked(object); + named_entry->object = object; + named_entry_unlock(named_entry); + + /* wait for object to be ready */ + while (!object->pager_ready) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); + vm_object_lock(object); + } + vm_object_unlock(object); + } + } else { + memory_object_control_t control; + control = (memory_object_control_t)port->ip_kobject; + if (control == NULL) + return (KERN_INVALID_ARGUMENT); + object = memory_object_control_to_vm_object(control); + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); + vm_object_reference(object); + } + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); + + if (!object->private) { + if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) + *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); + if (object->phys_contiguous) { + *flags = UPL_PHYS_CONTIG; + } else { + *flags = 0; + } + } else { + *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; + } + + ret = vm_object_iopl_request(object, + offset, + *upl_size, + upl_ptr, + user_page_list, + page_list_count, + caller_flags); + vm_object_deallocate(object); + return ret; +} + /* * Routine: memory_object_upl_request [interface] * Purpose: @@ -1424,7 +1545,7 @@ host_default_memory_manager( mutex_unlock(&memory_manager_default_lock); return KERN_INVALID_ARGUMENT; #else - cluster_size = round_page(cluster_size); + cluster_size = round_page_32(cluster_size); #endif } memory_manager_default_cluster = cluster_size; @@ -1551,12 +1672,12 @@ memory_object_deactivate_pages( if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) { m->reference = FALSE; - pmap_clear_reference(m->phys_addr); + pmap_clear_reference(m->phys_page); if ((kill_page) && (object->internal)) { m->precious = FALSE; m->dirty = FALSE; - pmap_clear_modify(m->phys_addr); + pmap_clear_modify(m->phys_page); vm_external_state_clr(object->existence_map, offset); } VM_PAGE_QUEUES_REMOVE(m); @@ -1610,7 +1731,7 @@ memory_object_page_op( memory_object_control_t control, memory_object_offset_t offset, int ops, - vm_offset_t *phys_entry, + ppnum_t *phys_entry, int *flags) { vm_object_t object; @@ -1626,8 +1747,8 @@ memory_object_page_op( if(ops & UPL_POP_PHYSICAL) { if(object->phys_contiguous) { if (phys_entry) { - *phys_entry = (vm_offset_t) - object->shadow_offset; + *phys_entry = (ppnum_t) + (object->shadow_offset >> 12); } vm_object_unlock(object); return KERN_SUCCESS; @@ -1636,13 +1757,12 @@ memory_object_page_op( return KERN_INVALID_OBJECT; } } + if(object->phys_contiguous) { + vm_object_unlock(object); + return KERN_INVALID_OBJECT; + } while(TRUE) { - if(object->phys_contiguous) { - vm_object_unlock(object); - return KERN_INVALID_OBJECT; - } - if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) { vm_object_unlock(object); return KERN_FAILURE; @@ -1659,10 +1779,14 @@ memory_object_page_op( } if (ops & UPL_POP_DUMP) { - vm_page_lock_queues(); - vm_page_free(dst_page); - vm_page_unlock_queues(); - break; + vm_page_lock_queues(); + + if (dst_page->no_isync == FALSE) + pmap_page_protect(dst_page->phys_page, VM_PROT_NONE); + vm_page_free(dst_page); + + vm_page_unlock_queues(); + break; } if (flags) { @@ -1678,7 +1802,7 @@ memory_object_page_op( if(dst_page->busy) *flags |= UPL_POP_BUSY; } if (phys_entry) - *phys_entry = dst_page->phys_addr; + *phys_entry = dst_page->phys_page; /* The caller should have made a call either contingent with */ /* or prior to this call to set UPL_POP_BUSY */ @@ -1717,6 +1841,88 @@ memory_object_page_op( } +/* + * memory_object_range_op offers performance enhancement over + * memory_object_page_op for page_op functions which do not require page + * level state to be returned from the call. Page_op was created to provide + * a low-cost alternative to page manipulation via UPLs when only a single + * page was involved. The range_op call establishes the ability in the _op + * family of functions to work on multiple pages where the lack of page level + * state handling allows the caller to avoid the overhead of the upl structures. + */ + +kern_return_t +memory_object_range_op( + memory_object_control_t control, + memory_object_offset_t offset_beg, + memory_object_offset_t offset_end, + int ops, + int *range) +{ + memory_object_offset_t offset; + vm_object_t object; + vm_page_t dst_page; + + object = memory_object_control_to_vm_object(control); + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); + + if (object->resident_page_count == 0) { + if (range) { + if (ops & UPL_ROP_PRESENT) + *range = 0; + else + *range = offset_end - offset_beg; + } + return KERN_SUCCESS; + } + vm_object_lock(object); + + if (object->phys_contiguous) + return KERN_INVALID_OBJECT; + + offset = offset_beg; + + while (offset < offset_end) { + if (dst_page = vm_page_lookup(object, offset)) { + if (ops & UPL_ROP_DUMP) { + if (dst_page->busy || dst_page->cleaning) { + /* + * someone else is playing with the + * page, we will have to wait + */ + PAGE_SLEEP(object, + dst_page, THREAD_UNINT); + /* + * need to relook the page up since it's + * state may have changed while we slept + * it might even belong to a different object + * at this point + */ + continue; + } + vm_page_lock_queues(); + + if (dst_page->no_isync == FALSE) + pmap_page_protect(dst_page->phys_page, VM_PROT_NONE); + vm_page_free(dst_page); + + vm_page_unlock_queues(); + } else if (ops & UPL_ROP_ABSENT) + break; + } else if (ops & UPL_ROP_PRESENT) + break; + + offset += PAGE_SIZE; + } + vm_object_unlock(object); + + if (range) + *range = offset - offset_beg; + + return KERN_SUCCESS; +} + static zone_t mem_obj_control_zone; __private_extern__ void diff --git a/osfmk/vm/pmap.h b/osfmk/vm/pmap.h index cf3e38c86..02793058b 100644 --- a/osfmk/vm/pmap.h +++ b/osfmk/vm/pmap.h @@ -87,6 +87,20 @@ * many address spaces. */ +/* Copy between a physical page and a virtual address */ +extern kern_return_t copypv( + addr64_t source, + addr64_t sink, + unsigned int size, + int which); +#define cppvPsnk 1 +#define cppvPsrc 2 +#define cppvFsnk 4 +#define cppvFsrc 8 +#define cppvNoModSnk 16 +#define cppvNoRefSrc 32 +#define cppvKmap 64 /* User the kernel's vm_map */ + #if !defined(MACH_KERNEL_PRIVATE) typedef void *pmap_t; @@ -141,7 +155,7 @@ extern void pmap_init(void); /* Initialization, * However, for best performance pmap_free_pages should be accurate. */ -extern boolean_t pmap_next_page(vm_offset_t *paddr); +extern boolean_t pmap_next_page(ppnum_t *pnum); /* During VM initialization, * return the next unused * physical page. @@ -168,14 +182,14 @@ extern void pmap_switch(pmap_t); extern void pmap_enter( /* Enter a mapping */ pmap_t pmap, vm_offset_t v, - vm_offset_t pa, + ppnum_t pn, vm_prot_t prot, unsigned int flags, boolean_t wired); extern void pmap_remove_some_phys( pmap_t pmap, - vm_offset_t pa); + ppnum_t pn); /* @@ -183,36 +197,36 @@ extern void pmap_remove_some_phys( */ extern void pmap_page_protect( /* Restrict access to page. */ - vm_offset_t phys, + ppnum_t phys, vm_prot_t prot); extern void (pmap_zero_page)( - vm_offset_t phys); + ppnum_t pn); extern void (pmap_zero_part_page)( - vm_offset_t p, + ppnum_t pn, vm_offset_t offset, vm_size_t len); extern void (pmap_copy_page)( - vm_offset_t src, - vm_offset_t dest); + ppnum_t src, + ppnum_t dest); extern void (pmap_copy_part_page)( - vm_offset_t src, + ppnum_t src, vm_offset_t src_offset, - vm_offset_t dst, + ppnum_t dst, vm_offset_t dst_offset, vm_size_t len); extern void (pmap_copy_part_lpage)( vm_offset_t src, - vm_offset_t dst, + ppnum_t dst, vm_offset_t dst_offset, vm_size_t len); extern void (pmap_copy_part_rpage)( - vm_offset_t src, + ppnum_t src, vm_offset_t src_offset, vm_offset_t dst, vm_size_t len); @@ -221,7 +235,7 @@ extern void (pmap_copy_part_rpage)( * debug/assertions. pmap_verify_free returns true iff * the given physical page is mapped into no pmap. */ -extern boolean_t pmap_verify_free(vm_offset_t paddr); +extern boolean_t pmap_verify_free(ppnum_t pn); /* * Statistics routines @@ -269,8 +283,8 @@ extern kern_return_t (pmap_attribute)( /* Get/Set special memory extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate * cache based on - * phys addr sent */ - vm_offset_t addr, + * page number sent */ + ppnum_t pn, vm_size_t size, vm_machine_attribute_t attribute, vm_machine_attribute_val_t* value); @@ -317,7 +331,7 @@ extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate pmap_enter( \ (pmap), \ (virtual_address), \ - (page)->phys_addr, \ + (page)->phys_page, \ (protection) & ~(page)->page_lock, \ flags, \ (wired) \ @@ -331,15 +345,15 @@ extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate * by the hardware. */ /* Clear reference bit */ -extern void pmap_clear_reference(vm_offset_t paddr); +extern void pmap_clear_reference(ppnum_t pn); /* Return reference bit */ -extern boolean_t (pmap_is_referenced)(vm_offset_t paddr); +extern boolean_t (pmap_is_referenced)(ppnum_t pn); /* Set modify bit */ -extern void pmap_set_modify(vm_offset_t paddr); +extern void pmap_set_modify(ppnum_t pn); /* Clear modify bit */ -extern void pmap_clear_modify(vm_offset_t paddr); +extern void pmap_clear_modify(ppnum_t pn); /* Return modify bit */ -extern boolean_t pmap_is_modified(vm_offset_t paddr); +extern boolean_t pmap_is_modified(ppnum_t pn); /* * Routines that operate on ranges of virtual addresses. @@ -394,8 +408,9 @@ extern void pmap_change_wiring( /* Specify pageability */ extern void pmap_remove( /* Remove mappings. */ pmap_t map, - vm_offset_t s, - vm_offset_t e); + addr64_t s, + addr64_t e); + #endif /* __APPLE_API_PRIVATE */ diff --git a/osfmk/vm/task_working_set.c b/osfmk/vm/task_working_set.c index 8d7bc30fb..da8ecbac7 100644 --- a/osfmk/vm/task_working_set.c +++ b/osfmk/vm/task_working_set.c @@ -137,7 +137,7 @@ tws_hash_create( if((tws->table_ele[0] = (tws_hash_ptr_t) kalloc(sizeof(struct tws_hash_ptr) * lines * rows)) == NULL) { - kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ele_t) + kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows); kfree((vm_offset_t)tws, sizeof(struct tws_hash)); return (tws_hash_t)NULL; @@ -293,8 +293,8 @@ tws_hash_line_clear( && (dump_pmap == 1)) { pmap_remove_some_phys((pmap_t) vm_map_pmap( - hash_ele->map), - p->phys_addr); + current_map()), + p->phys_page); } } local_off += PAGE_SIZE_64; @@ -577,14 +577,6 @@ printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", ask_for_startup_cache_release = 1; } } - if((tws->startup_name != NULL) && (tws->mod == 0)) { - /* Ensure as good a working set as possible */ - pmap_remove(map->pmap, 0, GLOBAL_SHARED_TEXT_SEGMENT); - pmap_remove(map->pmap, - GLOBAL_SHARED_DATA_SEGMENT - + SHARED_DATA_REGION_SIZE, 0xFFFFF000); - } - /* This next bit of code, the and alternate hash */ /* are all made necessary because of IPC COW */ @@ -767,6 +759,10 @@ printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", tws_unlock(tws); return KERN_NO_SPACE; } + /* object persistence is guaranteed by */ + /* an elevated paging or object */ + /* reference count in the caller. */ + vm_object_unlock(object); if((tws->table[set] = (tws_hash_ptr_t *) kalloc(sizeof(tws_hash_ptr_t) * tws->number_of_lines @@ -790,12 +786,12 @@ printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", * tws->number_of_lines * tws->number_of_elements)) == NULL) { - kfree((vm_offset_t)tws->table_ele[set], - sizeof(tws_hash_ptr_t) + kfree((vm_offset_t)tws->table_ele[set], + sizeof(struct tws_hash_ptr) * tws->number_of_lines * tws->number_of_elements); kfree((vm_offset_t)tws->table[set], - sizeof(struct tws_hash_ptr) + sizeof(tws_hash_ptr_t) * tws->number_of_lines * tws->number_of_elements); tws->table[set] = NULL; @@ -807,16 +803,16 @@ printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", (struct tws_hash_line) * tws->number_of_lines)) == NULL) { - kfree((vm_offset_t)tws->table[set], - sizeof(tws_hash_ptr_t) + kfree((vm_offset_t)tws->alt_ele[set], + sizeof(struct tws_hash_ptr) * tws->number_of_lines * tws->number_of_elements); - kfree((vm_offset_t)tws->table_ele[set], + kfree((vm_offset_t)tws->table_ele[set], sizeof(struct tws_hash_ptr) * tws->number_of_lines * tws->number_of_elements); - kfree((vm_offset_t)tws->alt_ele[set], - sizeof(struct tws_hash_ptr) + kfree((vm_offset_t)tws->table[set], + sizeof(tws_hash_ptr_t) * tws->number_of_lines * tws->number_of_elements); tws->table[set] = NULL; @@ -843,6 +839,7 @@ printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", sizeof(struct tws_hash_line) * tws->number_of_lines); } + vm_object_lock(object); } else { int age_of_cache; age_of_cache = @@ -1039,8 +1036,8 @@ tws_build_cluster( int age_of_cache; int pre_heat_size; unsigned int ele_cache; - unsigned int end_cache = NULL; - unsigned int start_cache = NULL; + unsigned int end_cache = 0; + unsigned int start_cache = 0; if((object->private) || !(object->pager)) return; @@ -1086,7 +1083,7 @@ tws_build_cluster( *start = *start & TWS_HASH_OFF_MASK; *end = *start + (32 * PAGE_SIZE_64); if(*end > object_size) { - *end = trunc_page(object_size); + *end = trunc_page_64(object_size); max_length = 0; if(before >= *end) { *end = after; @@ -1109,7 +1106,7 @@ tws_build_cluster( *end = after + (32 * PAGE_SIZE_64); if(*end > object_size) { - *end = trunc_page(object_size); + *end = trunc_page_64(object_size); max_length = 0; if(*start >= *end) { *end = after; @@ -1133,7 +1130,7 @@ tws_build_cluster( break; } - if(start_cache != NULL) { + if(start_cache != 0) { unsigned int mask; for (mask = 1; mask != 0; mask = mask << 1) { @@ -1145,7 +1142,7 @@ tws_build_cluster( break; } } - if(end_cache != NULL) { + if(end_cache != 0) { unsigned int mask; for (mask = 0x80000000; @@ -1189,10 +1186,10 @@ tws_build_cluster( } if (vm_page_lookup(object, after) != VM_PAGE_NULL) { - /* we can bridge resident pages */ - after += PAGE_SIZE_64; - length += PAGE_SIZE; - continue; + /* + * don't bridge resident pages + */ + break; } if (object->internal) { @@ -1250,10 +1247,10 @@ tws_build_cluster( } if (vm_page_lookup(object, before) != VM_PAGE_NULL) { - /* we can bridge resident pages */ - *start -= PAGE_SIZE_64; - length += PAGE_SIZE; - continue; + /* + * don't bridge resident pages + */ + break; } if (object->internal) { @@ -1709,13 +1706,11 @@ tws_handle_startup_file( return KERN_SUCCESS; } *new_info = TRUE; + error = tws_write_startup_file(task, fid, mod, app_name, uid); if(error) return error; - /* use the mod in the write case as an init */ - /* flag */ - mod = 0; } else { error = tws_read_startup_file(task, @@ -1847,7 +1842,6 @@ tws_read_startup_file( /* just in case their not, make sure we dealloc correctly */ startup->tws_hash_size = cache_size; - tws->startup_cache = startup; tws_unlock(tws); return KERN_SUCCESS; diff --git a/osfmk/vm/task_working_set.h b/osfmk/vm/task_working_set.h index 78c634588..90af7ef03 100644 --- a/osfmk/vm/task_working_set.h +++ b/osfmk/vm/task_working_set.h @@ -263,7 +263,7 @@ kern_return_t tws_read_startup_file( void tws_hash_ws_flush( - tws_hash_t tws); + tws_hash_t tws); diff --git a/osfmk/vm/vm_debug.c b/osfmk/vm/vm_debug.c index 97ae91e5a..87b81f23c 100644 --- a/osfmk/vm/vm_debug.c +++ b/osfmk/vm/vm_debug.c @@ -252,7 +252,7 @@ mach_vm_region_info( if (size != 0) kmem_free(ipc_kernel_map, addr, size); - size = round_page(2 * used * sizeof(vm_info_object_t)); + size = round_page_32(2 * used * sizeof(vm_info_object_t)); kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); if (kr != KERN_SUCCESS) @@ -272,7 +272,7 @@ mach_vm_region_info( kmem_free(ipc_kernel_map, addr, size); } else { vm_size_t size_used = - round_page(used * sizeof(vm_info_object_t)); + round_page_32(used * sizeof(vm_info_object_t)); kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE); assert(kr == KERN_SUCCESS); @@ -451,7 +451,7 @@ mach_vm_region_info_64( if (size != 0) kmem_free(ipc_kernel_map, addr, size); - size = round_page(2 * used * sizeof(vm_info_object_t)); + size = round_page_32(2 * used * sizeof(vm_info_object_t)); kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); if (kr != KERN_SUCCESS) @@ -471,7 +471,7 @@ mach_vm_region_info_64( kmem_free(ipc_kernel_map, addr, size); } else { vm_size_t size_used = - round_page(used * sizeof(vm_info_object_t)); + round_page_32(used * sizeof(vm_info_object_t)); kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE); assert(kr == KERN_SUCCESS); @@ -514,7 +514,7 @@ vm_mapped_pages_info( pmap = map->pmap; size = pmap_resident_count(pmap) * sizeof(vm_offset_t); - size = round_page(size); + size = round_page_32(size); for (;;) { (void) vm_allocate(ipc_kernel_map, &addr, size, TRUE); @@ -537,7 +537,7 @@ vm_mapped_pages_info( /* * Try again, doubling the size */ - size = round_page(actual * sizeof(vm_offset_t)); + size = round_page_32(actual * sizeof(vm_offset_t)); } if (actual == 0) { *pages = 0; @@ -546,7 +546,7 @@ vm_mapped_pages_info( } else { *pages_count = actual; - size_used = round_page(actual * sizeof(vm_offset_t)); + size_used = round_page_32(actual * sizeof(vm_offset_t)); (void) vm_map_wire(ipc_kernel_map, addr, addr + size, VM_PROT_READ|VM_PROT_WRITE, FALSE); @@ -612,7 +612,7 @@ host_virtual_physical_table_info( if (info != *infop) kmem_free(ipc_kernel_map, addr, size); - size = round_page(actual * sizeof *info); + size = round_page_32(actual * sizeof *info); kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; @@ -633,7 +633,7 @@ host_virtual_physical_table_info( vm_map_copy_t copy; vm_size_t used; - used = round_page(actual * sizeof *info); + used = round_page_32(actual * sizeof *info); if (used != size) kmem_free(ipc_kernel_map, addr + used, size - used); diff --git a/osfmk/vm/vm_external.c b/osfmk/vm/vm_external.c index 859cda6c4..555039930 100644 --- a/osfmk/vm/vm_external.c +++ b/osfmk/vm/vm_external.c @@ -235,7 +235,7 @@ _vm_external_state_get( assert (map != VM_EXTERNAL_NULL); - bit = atop(offset); + bit = atop_32(offset); byte = bit >> 3; if (map[byte] & (1 << (bit & 07))) { return VM_EXTERNAL_STATE_EXISTS; @@ -255,7 +255,7 @@ vm_external_state_set( if (map == VM_EXTERNAL_NULL) return; - bit = atop(offset); + bit = atop_32(offset); byte = bit >> 3; map[byte] |= (1 << (bit & 07)); } @@ -271,7 +271,7 @@ vm_external_state_clr( if (map == VM_EXTERNAL_NULL) return; - bit = atop(offset); + bit = atop_32(offset); byte = bit >> 3; map[byte] &= ~(1 << (bit & 07)); } diff --git a/osfmk/vm/vm_external.h b/osfmk/vm/vm_external.h index f7d8daf0c..fd38ce629 100644 --- a/osfmk/vm/vm_external.h +++ b/osfmk/vm/vm_external.h @@ -82,7 +82,7 @@ typedef int vm_external_state_t; /* * Useful macros */ -#define stob(s) ((atop((s)) + 07) >> 3) +#define stob(s) ((atop_32((s)) + 07) >> 3) /* * Routines exported by this module. diff --git a/osfmk/vm/vm_fault.c b/osfmk/vm/vm_fault.c index 4fd45fabf..c83ae023c 100644 --- a/osfmk/vm/vm_fault.c +++ b/osfmk/vm/vm_fault.c @@ -1,6 +1,5 @@ - /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -80,11 +79,11 @@ extern int device_pager_workaround; #include #include #include -#include #include #include #include #include +#include #include #include #include @@ -106,8 +105,6 @@ extern int device_pager_workaround; int vm_object_absent_max = 50; int vm_fault_debug = 0; -boolean_t vm_page_deactivate_behind = TRUE; - #if !VM_FAULT_STATIC_CONFIG boolean_t vm_fault_dirty_handling = FALSE; @@ -207,13 +204,107 @@ struct { boolean_t vm_allow_clustered_pagein = FALSE; int vm_pagein_cluster_used = 0; +#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0) + + +boolean_t vm_page_deactivate_behind = TRUE; /* * Prepage default sizes given VM_BEHAVIOR_DEFAULT reference behavior */ -int vm_default_ahead = 1; /* Number of pages to prepage ahead */ -int vm_default_behind = 0; /* Number of pages to prepage behind */ +int vm_default_ahead = 0; +int vm_default_behind = MAX_UPL_TRANSFER; + +/* + * vm_page_deactivate_behind + * + * Determine if sequential access is in progress + * in accordance with the behavior specified. If + * so, compute a potential page to deactive and + * deactivate it. + * + * The object must be locked. + */ +static +boolean_t +vm_fault_deactivate_behind( + vm_object_t object, + vm_offset_t offset, + vm_behavior_t behavior) +{ + vm_page_t m; + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */ +#endif + + switch (behavior) { + case VM_BEHAVIOR_RANDOM: + object->sequential = PAGE_SIZE_64; + m = VM_PAGE_NULL; + break; + case VM_BEHAVIOR_SEQUENTIAL: + if (offset && + object->last_alloc == offset - PAGE_SIZE_64) { + object->sequential += PAGE_SIZE_64; + m = vm_page_lookup(object, offset - PAGE_SIZE_64); + } else { + object->sequential = PAGE_SIZE_64; /* reset */ + m = VM_PAGE_NULL; + } + break; + case VM_BEHAVIOR_RSEQNTL: + if (object->last_alloc && + object->last_alloc == offset + PAGE_SIZE_64) { + object->sequential += PAGE_SIZE_64; + m = vm_page_lookup(object, offset + PAGE_SIZE_64); + } else { + object->sequential = PAGE_SIZE_64; /* reset */ + m = VM_PAGE_NULL; + } + break; + case VM_BEHAVIOR_DEFAULT: + default: + if (offset && + object->last_alloc == offset - PAGE_SIZE_64) { + vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; + + object->sequential += PAGE_SIZE_64; + m = (offset >= behind && + object->sequential >= behind) ? + vm_page_lookup(object, offset - behind) : + VM_PAGE_NULL; + } else if (object->last_alloc && + object->last_alloc == offset + PAGE_SIZE_64) { + vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; + + object->sequential += PAGE_SIZE_64; + m = (offset < -behind && + object->sequential >= behind) ? + vm_page_lookup(object, offset + behind) : + VM_PAGE_NULL; + } else { + object->sequential = PAGE_SIZE_64; + m = VM_PAGE_NULL; + } + break; + } + + object->last_alloc = offset; + + if (m) { + if (!m->busy) { + vm_page_lock_queues(); + vm_page_deactivate(m); + vm_page_unlock_queues(); +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ +#endif + } + return TRUE; + } + return FALSE; +} -#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0) /* * Routine: vm_fault_page @@ -613,12 +704,38 @@ vm_fault_page( * need to allocate a real page. */ if (VM_PAGE_THROTTLED() || - (real_m = vm_page_grab()) == VM_PAGE_NULL) { - vm_fault_cleanup(object, first_m); - thread_interrupt_level(interruptible_state); - return(VM_FAULT_MEMORY_SHORTAGE); + (real_m = vm_page_grab()) + == VM_PAGE_NULL) { + vm_fault_cleanup( + object, first_m); + thread_interrupt_level( + interruptible_state); + return( + VM_FAULT_MEMORY_SHORTAGE); + } + + /* + * are we protecting the system from + * backing store exhaustion. If so + * sleep unless we are privileged. + */ + + if(vm_backing_store_low) { + if(!(current_task()->priv_flags + & VM_BACKING_STORE_PRIV)) { + assert_wait((event_t) + &vm_backing_store_low, + THREAD_UNINT); + vm_fault_cleanup(object, + first_m); + thread_block((void(*)(void)) 0); + thread_interrupt_level( + interruptible_state); + return(VM_FAULT_RETRY); + } } + XPR(XPR_VM_FAULT, "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n", (integer_t)object, offset, @@ -654,17 +771,19 @@ vm_fault_page( if (!no_zero_fill) { vm_object_unlock(object); vm_page_zero_fill(m); - if (type_of_fault) - *type_of_fault = DBG_ZERO_FILL_FAULT; - VM_STAT(zero_fill_count++); - - if (bumped_pagein == TRUE) { - VM_STAT(pageins--); - current_task()->pageins--; - } vm_object_lock(object); } - pmap_clear_modify(m->phys_addr); + if (type_of_fault) + *type_of_fault = DBG_ZERO_FILL_FAULT; + VM_STAT(zero_fill_count++); + + if (bumped_pagein == TRUE) { + VM_STAT(pageins--); + current_task()->pageins--; + } +#if 0 + pmap_clear_modify(m->phys_page); +#endif vm_page_lock_queues(); VM_PAGE_QUEUES_REMOVE(m); m->page_ticket = vm_page_ticket; @@ -1248,6 +1367,19 @@ no_clustering: assert(m->object == object); first_m = VM_PAGE_NULL; + if(m == VM_PAGE_NULL) { + m = vm_page_grab(); + if (m == VM_PAGE_NULL) { + vm_fault_cleanup( + object, VM_PAGE_NULL); + thread_interrupt_level( + interruptible_state); + return(VM_FAULT_MEMORY_SHORTAGE); + } + vm_page_insert( + m, object, offset); + } + if (object->shadow_severed) { VM_PAGE_FREE(m); vm_fault_cleanup(object, VM_PAGE_NULL); @@ -1255,6 +1387,27 @@ no_clustering: return VM_FAULT_MEMORY_ERROR; } + /* + * are we protecting the system from + * backing store exhaustion. If so + * sleep unless we are privileged. + */ + + if(vm_backing_store_low) { + if(!(current_task()->priv_flags + & VM_BACKING_STORE_PRIV)) { + assert_wait((event_t) + &vm_backing_store_low, + THREAD_UNINT); + VM_PAGE_FREE(m); + vm_fault_cleanup(object, VM_PAGE_NULL); + thread_block((void (*)(void)) 0); + thread_interrupt_level( + interruptible_state); + return(VM_FAULT_RETRY); + } + } + if (VM_PAGE_THROTTLED() || (m->fictitious && !vm_page_convert(m))) { VM_PAGE_FREE(m); @@ -1267,16 +1420,17 @@ no_clustering: if (!no_zero_fill) { vm_object_unlock(object); vm_page_zero_fill(m); - if (type_of_fault) - *type_of_fault = DBG_ZERO_FILL_FAULT; - VM_STAT(zero_fill_count++); - - if (bumped_pagein == TRUE) { - VM_STAT(pageins--); - current_task()->pageins--; - } vm_object_lock(object); } + if (type_of_fault) + *type_of_fault = DBG_ZERO_FILL_FAULT; + VM_STAT(zero_fill_count++); + + if (bumped_pagein == TRUE) { + VM_STAT(pageins--); + current_task()->pageins--; + } + vm_page_lock_queues(); VM_PAGE_QUEUES_REMOVE(m); if(m->object->size > 0x80000) { @@ -1303,7 +1457,9 @@ no_clustering: m->inactive = TRUE; vm_page_inactive_count++; vm_page_unlock_queues(); - pmap_clear_modify(m->phys_addr); +#if 0 + pmap_clear_modify(m->phys_page); +#endif break; } else { @@ -1375,6 +1531,27 @@ no_clustering: assert(!must_be_resident); + /* + * are we protecting the system from + * backing store exhaustion. If so + * sleep unless we are privileged. + */ + + if(vm_backing_store_low) { + if(!(current_task()->priv_flags + & VM_BACKING_STORE_PRIV)) { + assert_wait((event_t) + &vm_backing_store_low, + THREAD_UNINT); + RELEASE_PAGE(m); + vm_fault_cleanup(object, first_m); + thread_block((void (*)(void)) 0); + thread_interrupt_level( + interruptible_state); + return(VM_FAULT_RETRY); + } + } + /* * If we try to collapse first_object at this * point, we may deadlock when we try to get @@ -1424,7 +1601,7 @@ no_clustering: vm_page_lock_queues(); assert(!m->cleaning); - pmap_page_protect(m->phys_addr, VM_PROT_NONE); + pmap_page_protect(m->phys_page, VM_PROT_NONE); vm_page_deactivate(m); copy_m->dirty = TRUE; /* @@ -1465,7 +1642,7 @@ no_clustering: */ vm_object_paging_end(object); - vm_object_collapse(object); + vm_object_collapse(object, offset); vm_object_paging_begin(object); } @@ -1585,6 +1762,31 @@ no_clustering: * We must copy the page to the copy object. */ + /* + * are we protecting the system from + * backing store exhaustion. If so + * sleep unless we are privileged. + */ + + if(vm_backing_store_low) { + if(!(current_task()->priv_flags + & VM_BACKING_STORE_PRIV)) { + assert_wait((event_t) + &vm_backing_store_low, + THREAD_UNINT); + RELEASE_PAGE(m); + VM_OBJ_RES_DECR(copy_object); + copy_object->ref_count--; + assert(copy_object->ref_count > 0); + vm_object_unlock(copy_object); + vm_fault_cleanup(object, first_m); + thread_block((void (*)(void)) 0); + thread_interrupt_level( + interruptible_state); + return(VM_FAULT_RETRY); + } + } + /* * Allocate a page for the copy */ @@ -1615,7 +1817,7 @@ no_clustering: vm_page_lock_queues(); assert(!m->cleaning); - pmap_page_protect(m->phys_addr, VM_PROT_NONE); + pmap_page_protect(m->phys_page, VM_PROT_NONE); copy_m->dirty = TRUE; vm_page_unlock_queues(); @@ -1730,37 +1932,22 @@ no_clustering: * mark read-only data as dirty.] */ + + if(m != VM_PAGE_NULL) { #if !VM_FAULT_STATIC_CONFIG - if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE) && - (m != VM_PAGE_NULL)) { - m->dirty = TRUE; - } -#endif -#if TRACEFAULTPAGE - dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_page_deactivate_behind); /* (TEST/DEBUG) */ -#endif - if (vm_page_deactivate_behind) { - if (offset && /* don't underflow */ - (object->last_alloc == (offset - PAGE_SIZE_64))) { - m = vm_page_lookup(object, object->last_alloc); - if ((m != VM_PAGE_NULL) && !m->busy) { - vm_page_lock_queues(); - vm_page_deactivate(m); - vm_page_unlock_queues(); - } -#if TRACEFAULTPAGE - dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ + if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE)) + m->dirty = TRUE; #endif - } - object->last_alloc = offset; + if (vm_page_deactivate_behind) + vm_fault_deactivate_behind(object, offset, behavior); + } else { + vm_object_unlock(object); } + thread_interrupt_level(interruptible_state); + #if TRACEFAULTPAGE dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ #endif - thread_interrupt_level(interruptible_state); - if(*result_page == VM_PAGE_NULL) { - vm_object_unlock(object); - } return(VM_FAULT_SUCCESS); #if 0 @@ -1780,6 +1967,97 @@ no_clustering: #undef RELEASE_PAGE } +/* + * Routine: vm_fault_tws_insert + * Purpose: + * Add fault information to the task working set. + * Implementation: + * We always insert the base object/offset pair + * rather the actual object/offset. + * Assumptions: + * Map and pmap_map locked. + * Object locked and referenced. + * Returns: + * TRUE if startup file should be written. + * With object locked and still referenced. + * But we may drop the object lock temporarily. + */ +static boolean_t +vm_fault_tws_insert( + vm_map_t map, + vm_map_t pmap_map, + vm_offset_t vaddr, + vm_object_t object, + vm_object_offset_t offset) +{ + tws_hash_line_t line; + task_t task; + kern_return_t kr; + boolean_t result = FALSE; + extern vm_map_t kalloc_map; + + /* Avoid possible map lock deadlock issues */ + if (map == kernel_map || map == kalloc_map || + pmap_map == kernel_map || pmap_map == kalloc_map) + return result; + + task = current_task(); + if (task->dynamic_working_set != 0) { + vm_object_t base_object; + vm_object_t base_shadow; + vm_object_offset_t base_offset; + base_object = object; + base_offset = offset; + while(base_shadow = base_object->shadow) { + vm_object_lock(base_shadow); + vm_object_unlock(base_object); + base_offset += + base_object->shadow_offset; + base_object = base_shadow; + } + kr = tws_lookup((tws_hash_t) + task->dynamic_working_set, + base_offset, base_object, + &line); + if (kr == KERN_OPERATION_TIMED_OUT){ + result = TRUE; + if (base_object != object) { + vm_object_unlock(base_object); + vm_object_lock(object); + } + } else if (kr != KERN_SUCCESS) { + if(base_object != object) + vm_object_reference_locked(base_object); + kr = tws_insert((tws_hash_t) + task->dynamic_working_set, + base_offset, base_object, + vaddr, pmap_map); + if(base_object != object) { + vm_object_unlock(base_object); + vm_object_deallocate(base_object); + } + if(kr == KERN_NO_SPACE) { + if (base_object == object) + vm_object_unlock(object); + tws_expand_working_set( + task->dynamic_working_set, + TWS_HASH_LINE_COUNT, + FALSE); + if (base_object == object) + vm_object_lock(object); + } else if(kr == KERN_OPERATION_TIMED_OUT) { + result = TRUE; + } + if(base_object != object) + vm_object_lock(object); + } else if (base_object != object) { + vm_object_unlock(base_object); + vm_object_lock(object); + } + } + return result; +} + /* * Routine: vm_fault * Purpose: @@ -1836,8 +2114,9 @@ vm_fault( unsigned int cache_attr; int write_startup_file = 0; vm_prot_t full_fault_type; - + if (get_preemption_level() != 0) + return (KERN_FAILURE); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START, vaddr, @@ -1846,7 +2125,6 @@ vm_fault( 0, 0); - cur_thread = current_thread(); /* at present we do not fully check for execute permission */ /* we generally treat it is read except in certain device */ /* memory settings */ @@ -1871,6 +2149,8 @@ vm_fault( /* * drop funnel if it is already held. Then restore while returning */ + cur_thread = current_thread(); + if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) { funnel_set = TRUE; curflock = cur_thread->funnel_lock; @@ -1958,7 +2238,7 @@ vm_fault( while (TRUE) { m = vm_page_lookup(cur_object, cur_offset); if (m != VM_PAGE_NULL) { - if (m->busy) { + if (m->busy) { wait_result_t result; if (object != cur_object) @@ -2008,6 +2288,7 @@ vm_fault( goto FastMapInFault; if ((fault_type & VM_PROT_WRITE) == 0) { + boolean_t sequential; prot &= ~VM_PROT_WRITE; @@ -2047,12 +2328,28 @@ FastPmapEnter: prot &= ~VM_PROT_WRITE; #endif /* MACH_KDB */ #endif /* STATIC_CONFIG */ + cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK; + + sequential = FALSE; if (m->no_isync == TRUE) { - pmap_sync_caches_phys(m->phys_addr); m->no_isync = FALSE; + pmap_sync_caches_phys(m->phys_page); + if (type_of_fault == DBG_CACHE_HIT_FAULT) { + /* + * found it in the cache, but this + * is the first fault-in of the page (no_isync == TRUE) + * so it must have come in as part of + * a cluster... account 1 pagein against it + */ + VM_STAT(pageins++); + current_task()->pageins++; + type_of_fault = DBG_PAGEIN_FAULT; + sequential = TRUE; + } + } else if (cache_attr != VM_WIMG_DEFAULT) { + pmap_sync_caches_phys(m->phys_page); } - cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK; if(caller_pmap) { PMAP_ENTER(caller_pmap, caller_pmap_addr, m, @@ -2063,7 +2360,7 @@ FastPmapEnter: } /* - * Grab the queues lock to manipulate + * Hold queues lock to manipulate * the page queues. Change wiring * case is obvious. In soft ref bits * case activate page only if it fell @@ -2075,7 +2372,6 @@ FastPmapEnter: * queue. This code doesn't. */ vm_page_lock_queues(); - if (m->clustered) { vm_pagein_cluster_used++; m->clustered = FALSE; @@ -2108,55 +2404,23 @@ FastPmapEnter: * That's it, clean up and return. */ PAGE_WAKEUP_DONE(m); - vm_object_paging_end(object); - { - tws_hash_line_t line; - task_t task; - - task = current_task(); - if((map != NULL) && - (task->dynamic_working_set != 0) && - !(object->private)) { - kern_return_t kr; - vm_object_t base_object; - vm_object_offset_t base_offset; - base_object = object; - base_offset = cur_offset; - while(base_object->shadow) { - base_offset += - base_object->shadow_offset; - base_object = - base_object->shadow; - } - kr = tws_lookup((tws_hash_t) - task->dynamic_working_set, - base_offset, base_object, - &line); - if(kr == KERN_OPERATION_TIMED_OUT){ - write_startup_file = 1; - } else if (kr != KERN_SUCCESS) { - kr = tws_insert((tws_hash_t) - task->dynamic_working_set, - base_offset, base_object, - vaddr, pmap_map); - if(kr == KERN_NO_SPACE) { - vm_object_unlock(object); - - tws_expand_working_set( - task->dynamic_working_set, - TWS_HASH_LINE_COUNT, - FALSE); - - vm_object_lock(object); - } - if(kr == - KERN_OPERATION_TIMED_OUT) { - write_startup_file = 1; - } - } - } + sequential = (sequential && vm_page_deactivate_behind) ? + vm_fault_deactivate_behind(object, cur_offset, behavior) : + FALSE; + + /* + * Add non-sequential pages to the working set. + * The sequential pages will be brought in through + * normal clustering behavior. + */ + if (!sequential && !object->private) { + write_startup_file = + vm_fault_tws_insert(map, pmap_map, vaddr, + object, cur_offset); } + + vm_object_paging_end(object); vm_object_unlock(object); vm_map_unlock_read(map); @@ -2238,7 +2502,7 @@ FastPmapEnter: vm_page_lock_queues(); vm_page_deactivate(cur_m); m->dirty = TRUE; - pmap_page_protect(cur_m->phys_addr, + pmap_page_protect(cur_m->phys_page, VM_PROT_NONE); vm_page_unlock_queues(); @@ -2253,7 +2517,7 @@ FastPmapEnter: */ vm_object_paging_end(object); - vm_object_collapse(object); + vm_object_collapse(object, offset); vm_object_paging_begin(object); goto FastPmapEnter; @@ -2301,9 +2565,18 @@ FastPmapEnter: * page, then drop any lower lock. * Give up if no page. */ - if ((vm_page_free_target - - ((vm_page_free_target-vm_page_free_min)>>2)) - > vm_page_free_count) { + if (VM_PAGE_THROTTLED()) { + break; + } + + /* + * are we protecting the system from + * backing store exhaustion. If so + * sleep unless we are privileged. + */ + if(vm_backing_store_low) { + if(!(current_task()->priv_flags + & VM_BACKING_STORE_PRIV)) break; } m = vm_page_alloc(object, offset); @@ -2420,54 +2693,12 @@ FastPmapEnter: vm_object_paging_begin(object); XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0); - { - tws_hash_line_t line; - task_t task; - kern_return_t kr; - - task = current_task(); - if((map != NULL) && - (task->dynamic_working_set != 0) - && !(object->private)) { - vm_object_t base_object; - vm_object_offset_t base_offset; - base_object = object; - base_offset = offset; - while(base_object->shadow) { - base_offset += - base_object->shadow_offset; - base_object = - base_object->shadow; - } - kr = tws_lookup((tws_hash_t) - task->dynamic_working_set, - base_offset, base_object, - &line); - if(kr == KERN_OPERATION_TIMED_OUT){ - write_startup_file = 1; - } else if (kr != KERN_SUCCESS) { - tws_insert((tws_hash_t) - task->dynamic_working_set, - base_offset, base_object, - vaddr, pmap_map); - kr = tws_insert((tws_hash_t) - task->dynamic_working_set, - base_offset, base_object, - vaddr, pmap_map); - if(kr == KERN_NO_SPACE) { - vm_object_unlock(object); - tws_expand_working_set( - task->dynamic_working_set, - TWS_HASH_LINE_COUNT, - FALSE); - vm_object_lock(object); - } - if(kr == KERN_OPERATION_TIMED_OUT) { - write_startup_file = 1; - } - } - } + + if (!object->private) { + write_startup_file = + vm_fault_tws_insert(map, pmap_map, vaddr, object, offset); } + kr = vm_fault_page(object, offset, fault_type, (change_wiring && !wired), interruptible, @@ -2666,11 +2897,22 @@ FastPmapEnter: */ if (m != VM_PAGE_NULL) { if (m->no_isync == TRUE) { - pmap_sync_caches_phys(m->phys_addr); - + pmap_sync_caches_phys(m->phys_page); + + if (type_of_fault == DBG_CACHE_HIT_FAULT) { + /* + * found it in the cache, but this + * is the first fault-in of the page (no_isync == TRUE) + * so it must have come in as part of + * a cluster... account 1 pagein against it + */ + VM_STAT(pageins++); + current_task()->pageins++; + + type_of_fault = DBG_PAGEIN_FAULT; + } m->no_isync = FALSE; } - cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK; if(caller_pmap) { @@ -2681,58 +2923,19 @@ FastPmapEnter: PMAP_ENTER(pmap, vaddr, m, prot, cache_attr, wired); } - { - tws_hash_line_t line; - task_t task; - kern_return_t kr; - - task = current_task(); - if((map != NULL) && - (task->dynamic_working_set != 0) - && (object->private)) { - vm_object_t base_object; - vm_object_offset_t base_offset; - base_object = m->object; - base_offset = m->offset; - while(base_object->shadow) { - base_offset += - base_object->shadow_offset; - base_object = - base_object->shadow; - } - kr = tws_lookup((tws_hash_t) - task->dynamic_working_set, - base_offset, base_object, &line); - if(kr == KERN_OPERATION_TIMED_OUT){ - write_startup_file = 1; - } else if (kr != KERN_SUCCESS) { - tws_insert((tws_hash_t) - task->dynamic_working_set, - base_offset, base_object, - vaddr, pmap_map); - kr = tws_insert((tws_hash_t) - task->dynamic_working_set, - base_offset, base_object, - vaddr, pmap_map); - if(kr == KERN_NO_SPACE) { - vm_object_unlock(m->object); - tws_expand_working_set( - task->dynamic_working_set, - TWS_HASH_LINE_COUNT, - FALSE); - vm_object_lock(m->object); - } - if(kr == KERN_OPERATION_TIMED_OUT) { - write_startup_file = 1; - } - } - } + + /* + * Add working set information for private objects here. + */ + if (m->object->private) { + write_startup_file = + vm_fault_tws_insert(map, pmap_map, vaddr, + m->object, m->offset); } } else { #ifndef i386 int memattr; - struct phys_entry *pp; vm_map_entry_t entry; vm_offset_t laddr; vm_offset_t ldelta, hdelta; @@ -2741,22 +2944,16 @@ FastPmapEnter: * do a pmap block mapping from the physical address * in the object */ - if(pp = pmap_find_physentry( - (vm_offset_t)object->shadow_offset)) { - memattr = ((pp->pte1 & 0x00000078) >> 3); - } else { - memattr = VM_WIMG_MASK & (int)object->wimg_bits; - } + /* While we do not worry about execution protection in */ + /* general, certian pages may have instruction execution */ + /* disallowed. We will check here, and if not allowed */ + /* to execute, we return with a protection failure. */ - /* While we do not worry about execution protection in */ - /* general, we may be able to read device memory and */ - /* still not be able to execute it. Here we check for */ - /* the guarded bit. If its set and we are attempting */ - /* to execute, we return with a protection failure. */ + if((full_fault_type & VM_PROT_EXECUTE) && + (pmap_canExecute((ppnum_t) + (object->shadow_offset >> 12)) < 1)) { - if((memattr & VM_MEM_GUARDED) && - (full_fault_type & VM_PROT_EXECUTE)) { vm_map_verify_done(map, &version); if(pmap_map != map) vm_map_unlock(pmap_map); @@ -2766,8 +2963,6 @@ FastPmapEnter: goto done; } - - if(pmap_map != map) { vm_map_unlock(pmap_map); } @@ -2812,32 +3007,36 @@ FastPmapEnter: if(caller_pmap) { + /* Set up a block mapped area */ pmap_map_block(caller_pmap, - caller_pmap_addr - ldelta, - ((vm_offset_t) + (addr64_t)(caller_pmap_addr - ldelta), + (((vm_offset_t) (entry->object.vm_object->shadow_offset)) + entry->offset + - (laddr - entry->vme_start) - ldelta, + (laddr - entry->vme_start) + - ldelta)>>12, ldelta + hdelta, prot, - memattr, 0); /* Set up a block mapped area */ - } else { - pmap_map_block(pmap_map->pmap, vaddr - ldelta, - ((vm_offset_t) + (VM_WIMG_MASK & (int)object->wimg_bits), 0); + } else { + /* Set up a block mapped area */ + pmap_map_block(pmap_map->pmap, + (addr64_t)(vaddr - ldelta), + (((vm_offset_t) (entry->object.vm_object->shadow_offset)) - + entry->offset + - (laddr - entry->vme_start) - ldelta, - ldelta + hdelta, prot, - memattr, 0); /* Set up a block mapped area */ + + entry->offset + + (laddr - entry->vme_start) - ldelta)>>12, + ldelta + hdelta, prot, + (VM_WIMG_MASK & (int)object->wimg_bits), 0); } } #else #ifdef notyet if(caller_pmap) { pmap_enter(caller_pmap, caller_pmap_addr, - object->shadow_offset, prot, 0, TRUE); + object->shadow_offset>>12, prot, 0, TRUE); } else { pmap_enter(pmap, vaddr, - object->shadow_offset, prot, 0, TRUE); + object->shadow_offset>>12, prot, 0, TRUE); } /* Map it in */ #endif @@ -3052,7 +3251,7 @@ vm_fault_unwire( result_object = result_page->object; if (deallocate) { assert(!result_page->fictitious); - pmap_page_protect(result_page->phys_addr, + pmap_page_protect(result_page->phys_page, VM_PROT_NONE); VM_PAGE_FREE(result_page); } else { @@ -3232,7 +3431,7 @@ vm_fault_wire_fast( * may cause other faults. */ if (m->no_isync == TRUE) { - pmap_sync_caches_phys(m->phys_addr); + pmap_sync_caches_phys(m->phys_page); m->no_isync = FALSE; } diff --git a/osfmk/vm/vm_init.c b/osfmk/vm/vm_init.c index 37e0e6a75..26bd959e5 100644 --- a/osfmk/vm/vm_init.c +++ b/osfmk/vm/vm_init.c @@ -106,8 +106,9 @@ vm_mem_bootstrap(void) if (PE_parse_boot_arg("zsize", &zsize)) zsize = zsize * 1024 * 1024; else { - zsize = mem_size >> 2; /* Get target zone size as 1/4 of physical memory */ + zsize = sane_size >> 2; /* Get target zone size as 1/4 of physical memory */ } + if(zsize < ZONE_MAP_MIN) zsize = ZONE_MAP_MIN; /* Clamp to min */ if(zsize > ZONE_MAP_MAX) zsize = ZONE_MAP_MAX; /* Clamp to max */ zone_init(zsize); /* Allocate address space for zones */ diff --git a/osfmk/vm/vm_kern.c b/osfmk/vm/vm_kern.c index 10cb0eebd..5b5b9cbc8 100644 --- a/osfmk/vm/vm_kern.c +++ b/osfmk/vm/vm_kern.c @@ -120,7 +120,7 @@ kmem_alloc_contig( return KERN_INVALID_ARGUMENT; } - size = round_page(size); + size = round_page_32(size); if ((flags & KMA_KOBJECT) == 0) { object = vm_object_allocate(size); kr = vm_map_find_space(map, &addr, size, mask, &entry); @@ -213,7 +213,7 @@ kernel_memory_allocate( vm_offset_t i; kern_return_t kr; - size = round_page(size); + size = round_page_32(size); if ((flags & KMA_KOBJECT) == 0) { /* * Allocate a new object. We must do this before locking @@ -356,10 +356,10 @@ kmem_realloc( vm_page_t mem; kern_return_t kr; - oldmin = trunc_page(oldaddr); - oldmax = round_page(oldaddr + oldsize); + oldmin = trunc_page_32(oldaddr); + oldmax = round_page_32(oldaddr + oldsize); oldsize = oldmax - oldmin; - newsize = round_page(newsize); + newsize = round_page_32(newsize); /* @@ -506,7 +506,7 @@ kmem_alloc_pageable( #else addr = vm_map_min(map); #endif - kr = vm_map_enter(map, &addr, round_page(size), + kr = vm_map_enter(map, &addr, round_page_32(size), (vm_offset_t) 0, TRUE, VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); @@ -533,8 +533,9 @@ kmem_free( { kern_return_t kr; - kr = vm_map_remove(map, trunc_page(addr), - round_page(addr + size), VM_MAP_REMOVE_KUNWIRE); + kr = vm_map_remove(map, trunc_page_32(addr), + round_page_32(addr + size), + VM_MAP_REMOVE_KUNWIRE); if (kr != KERN_SUCCESS) panic("kmem_free"); } @@ -550,7 +551,7 @@ kmem_alloc_pages( register vm_size_t size) { - size = round_page(size); + size = round_page_32(size); vm_object_lock(object); while (size) { register vm_page_t mem; @@ -617,7 +618,9 @@ kmem_remap_pages( * but this shouldn't be a problem because it is wired. */ PMAP_ENTER(kernel_pmap, start, mem, protection, - VM_WIMG_USE_DEFAULT, TRUE); + ((unsigned int)(mem->object->wimg_bits)) + & VM_WIMG_MASK, + TRUE); start += PAGE_SIZE; offset += PAGE_SIZE; @@ -651,7 +654,7 @@ kmem_suballoc( vm_map_t map; kern_return_t kr; - size = round_page(size); + size = round_page_32(size); /* * Need reference on submap object because it is internal @@ -723,9 +726,9 @@ kmem_init( /* * Account for kernel memory (text, data, bss, vm shenanigans). * This may include inaccessible "holes" as determined by what - * the machine-dependent init code includes in mem_size. + * the machine-dependent init code includes in max_mem. */ - vm_page_wire_count = (atop(mem_size) - (vm_page_free_count + vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count + vm_page_active_count + vm_page_inactive_count)); } @@ -750,7 +753,7 @@ kmem_io_object_trunc(copy, new_size) old_size = (vm_size_t)round_page_64(copy->size); copy->size = new_size; - new_size = round_page(new_size); + new_size = round_page_32(new_size); vm_object_lock(copy->cpy_object); vm_object_page_remove(copy->cpy_object, @@ -886,6 +889,7 @@ vm_conflict_check( } if (entry->is_sub_map) { vm_map_t old_map; + old_map = map; vm_map_lock(entry->object.sub_map); map = entry->object.sub_map; @@ -906,21 +910,46 @@ vm_conflict_check( return KERN_FAILURE; } kr = KERN_ALREADY_WAITING; - } else if( - ((file_off < ((obj->paging_offset) + obj_off)) && - ((file_off + len) > - ((obj->paging_offset) + obj_off))) || - ((file_off > ((obj->paging_offset) + obj_off)) && - (((((obj->paging_offset) + obj_off)) + len) - > file_off))) { - vm_map_unlock(map); - return KERN_FAILURE; + } else { + vm_object_offset_t obj_off_aligned; + vm_object_offset_t file_off_aligned; + + obj_off_aligned = obj_off & ~PAGE_MASK; + file_off_aligned = file_off & ~PAGE_MASK; + + if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) { + /* + * the target map and the file offset start in the same page + * but are not identical... + */ + vm_map_unlock(map); + return KERN_FAILURE; + } + if ((file_off < (obj->paging_offset + obj_off_aligned)) && + ((file_off + len) > (obj->paging_offset + obj_off_aligned))) { + /* + * some portion of the tail of the I/O will fall + * within the encompass of the target map + */ + vm_map_unlock(map); + return KERN_FAILURE; + } + if ((file_off_aligned > (obj->paging_offset + obj_off)) && + (file_off_aligned < (obj->paging_offset + obj_off) + len)) { + /* + * the beginning page of the file offset falls within + * the target map's encompass + */ + vm_map_unlock(map); + return KERN_FAILURE; + } } } else if(kr != KERN_SUCCESS) { + vm_map_unlock(map); return KERN_FAILURE; } - if(len < ((entry->vme_end - entry->vme_start) - + if(len <= ((entry->vme_end - entry->vme_start) - (off - entry->vme_start))) { vm_map_unlock(map); return kr; @@ -940,6 +969,4 @@ vm_conflict_check( vm_map_unlock(map); return kr; - - } diff --git a/osfmk/vm/vm_map.c b/osfmk/vm/vm_map.c index 76c63d645..aa82856e8 100644 --- a/osfmk/vm/vm_map.c +++ b/osfmk/vm/vm_map.c @@ -69,6 +69,7 @@ #include #include #include +#include #include #include #include @@ -84,6 +85,7 @@ #include #include #include +#include #include /* Internal prototypes @@ -302,6 +304,9 @@ int kentry_count = 2048; /* to init kentry_data_size */ */ vm_size_t vm_map_aggressive_enter_max; /* set by bootstrap */ +/* Skip acquiring locks if we're in the midst of a kernel core dump */ +extern unsigned int not_in_kdp; + void vm_map_init( void) @@ -335,7 +340,7 @@ void vm_map_steal_memory( void) { - map_data_size = round_page(10 * sizeof(struct vm_map)); + map_data_size = round_page_32(10 * sizeof(struct vm_map)); map_data = pmap_steal_memory(map_data_size); #if 0 @@ -351,7 +356,7 @@ vm_map_steal_memory( kentry_data_size = - round_page(kentry_count * sizeof(struct vm_map_entry)); + round_page_32(kentry_count * sizeof(struct vm_map_entry)); kentry_data = pmap_steal_memory(kentry_data_size); } @@ -477,8 +482,8 @@ first_free_is_valid( entry = vm_map_to_entry(map); next = entry->vme_next; - while (trunc_page(next->vme_start) == trunc_page(entry->vme_end) || - (trunc_page(next->vme_start) == trunc_page(entry->vme_start) && + while (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_end) || + (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_start) && next != vm_map_to_entry(map))) { entry = next; next = entry->vme_next; @@ -508,10 +513,10 @@ MACRO_BEGIN \ UFF_map = (map); \ UFF_first_free = (new_first_free); \ UFF_next_entry = UFF_first_free->vme_next; \ - while (trunc_page(UFF_next_entry->vme_start) == \ - trunc_page(UFF_first_free->vme_end) || \ - (trunc_page(UFF_next_entry->vme_start) == \ - trunc_page(UFF_first_free->vme_start) && \ + while (trunc_page_32(UFF_next_entry->vme_start) == \ + trunc_page_32(UFF_first_free->vme_end) || \ + (trunc_page_32(UFF_next_entry->vme_start) == \ + trunc_page_32(UFF_first_free->vme_start) && \ UFF_next_entry != vm_map_to_entry(UFF_map))) { \ UFF_first_free = UFF_next_entry; \ UFF_next_entry = UFF_first_free->vme_next; \ @@ -657,7 +662,8 @@ vm_map_destroy( map->max_offset, VM_MAP_NO_FLAGS); vm_map_unlock(map); - pmap_destroy(map->pmap); + if(map->pmap) + pmap_destroy(map->pmap); zfree(vm_map_zone, (vm_offset_t) map); } @@ -853,9 +859,11 @@ void vm_map_swapout(vm_map_t map) * future lookups. Performs necessary interlocks. */ #define SAVE_HINT(map,value) \ +MACRO_BEGIN \ mutex_lock(&(map)->s_lock); \ (map)->hint = (value); \ - mutex_unlock(&(map)->s_lock); + mutex_unlock(&(map)->s_lock); \ +MACRO_END /* * vm_map_lookup_entry: [ internal use only ] @@ -880,10 +888,11 @@ vm_map_lookup_entry( * Start looking either from the head of the * list, or from the hint. */ - - mutex_lock(&map->s_lock); + if (not_in_kdp) + mutex_lock(&map->s_lock); cur = map->hint; - mutex_unlock(&map->s_lock); + if (not_in_kdp) + mutex_unlock(&map->s_lock); if (cur == vm_map_to_entry(map)) cur = cur->vme_next; @@ -927,7 +936,8 @@ vm_map_lookup_entry( */ *entry = cur; - SAVE_HINT(map, cur); + if (not_in_kdp) + SAVE_HINT(map, cur); return(TRUE); } break; @@ -935,7 +945,8 @@ vm_map_lookup_entry( cur = cur->vme_next; } *entry = cur->vme_prev; - SAVE_HINT(map, *entry); + if (not_in_kdp) + SAVE_HINT(map, *entry); return(FALSE); } @@ -1112,6 +1123,9 @@ vm_map_pmap_enter( { unsigned int cache_attr; + if(map->pmap == 0) + return; + while (addr < end_addr) { register vm_page_t m; @@ -1138,7 +1152,7 @@ vm_map_pmap_enter( m->busy = TRUE; if (m->no_isync == TRUE) { - pmap_sync_caches_phys(m->phys_addr); + pmap_sync_caches_phys(m->phys_page); m->no_isync = FALSE; } @@ -1364,7 +1378,7 @@ vm_map_enter( (entry->max_protection == max_protection) && (entry->behavior == VM_BEHAVIOR_DEFAULT) && (entry->in_transition == 0) && - ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT) && + ((alias == VM_MEMORY_REALLOC) || ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT)) && (entry->wired_count == 0)) { /* implies user_wired_count == 0 */ if (vm_object_coalesce(entry->object.vm_object, VM_OBJECT_NULL, @@ -1448,14 +1462,14 @@ MACRO_BEGIN \ vm_offset_t pmap_base_addr; \ \ pmap_base_addr = 0xF0000000 & entry->vme_start; \ - pmap_unnest(map->pmap, pmap_base_addr, \ - 0x10000000); \ + pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \ entry->use_pmap = FALSE; \ } else if(entry->object.vm_object \ && !entry->is_sub_map \ && entry->object.vm_object->phys_contiguous) { \ pmap_remove(map->pmap, \ - entry->vme_start, entry->vme_end); \ + (addr64_t)(entry->vme_start), \ + (addr64_t)(entry->vme_end)); \ } \ _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\ } \ @@ -1540,14 +1554,14 @@ MACRO_BEGIN \ vm_offset_t pmap_base_addr; \ \ pmap_base_addr = 0xF0000000 & entry->vme_start; \ - pmap_unnest(map->pmap, pmap_base_addr, \ - 0x10000000); \ + pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \ entry->use_pmap = FALSE; \ } else if(entry->object.vm_object \ && !entry->is_sub_map \ && entry->object.vm_object->phys_contiguous) { \ pmap_remove(map->pmap, \ - entry->vme_start, entry->vme_end); \ + (addr64_t)(entry->vme_start), \ + (addr64_t)(entry->vme_end)); \ } \ _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \ } \ @@ -1741,25 +1755,32 @@ vm_map_submap( (object->copy == VM_OBJECT_NULL) && (object->shadow == VM_OBJECT_NULL) && (!object->pager_created)) { - entry->offset = (vm_object_offset_t)offset; - entry->object.vm_object = VM_OBJECT_NULL; - vm_object_deallocate(object); - entry->is_sub_map = TRUE; - vm_map_reference(entry->object.sub_map = submap); + entry->offset = (vm_object_offset_t)offset; + entry->object.vm_object = VM_OBJECT_NULL; + vm_object_deallocate(object); + entry->is_sub_map = TRUE; + entry->object.sub_map = submap; + vm_map_reference(submap); #ifndef i386 - if ((use_pmap) && (offset == 0)) { - /* nest if platform code will allow */ - result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap, - start, end - start); - if(result) - panic("pmap_nest failed!"); - entry->use_pmap = TRUE; - } + if ((use_pmap) && (offset == 0)) { + /* nest if platform code will allow */ + if(submap->pmap == NULL) { + submap->pmap = pmap_create((vm_size_t) 0); + if(submap->pmap == PMAP_NULL) { + return(KERN_NO_SPACE); + } + } + result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap, + (addr64_t)start, (addr64_t)start, (uint64_t)(end - start)); + if(result) + panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result); + entry->use_pmap = TRUE; + } #endif #ifdef i386 - pmap_remove(map->pmap, start, end); + pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end); #endif - result = KERN_SUCCESS; + result = KERN_SUCCESS; } vm_map_unlock(map); @@ -1911,8 +1932,7 @@ vm_map_protect( local_entry->use_pmap = FALSE; local_entry = local_entry->vme_next; } - pmap_unnest(map->pmap, pmap_base_addr, - (pmap_end_addr - pmap_base_addr) + 1); + pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); #endif } if (!(current->protection & VM_PROT_WRITE)) { @@ -2251,7 +2271,7 @@ vm_map_wire_nested( rc = vm_map_wire_nested(entry->object.sub_map, sub_start, sub_end, access_type, - user_wire, pmap, pmap_addr); + user_wire, map_pmap, pmap_addr); vm_map_lock(map); } s = entry->vme_start; @@ -2686,8 +2706,9 @@ vm_map_unwire_nested( continue; } else { vm_map_unlock(map); - vm_map_unwire_nested(entry->object.sub_map, - sub_start, sub_end, user_wire, pmap, pmap_addr); + vm_map_unwire_nested(entry->object.sub_map, + sub_start, sub_end, user_wire, map_pmap, + pmap_addr); vm_map_lock(map); if (last_timestamp+1 != map->timestamp) { @@ -2912,7 +2933,8 @@ vm_map_submap_pmap_clean( VM_PROT_NONE); } else { pmap_remove(map->pmap, - start, start + remove_size); + (addr64_t)start, + (addr64_t)(start + remove_size)); } } } @@ -2944,9 +2966,10 @@ vm_map_submap_pmap_clean( VM_PROT_NONE); } else { pmap_remove(map->pmap, - (start + entry->vme_start) - offset, - ((start + entry->vme_start) - - offset) + remove_size); + (addr64_t)((start + entry->vme_start) + - offset), + (addr64_t)(((start + entry->vme_start) + - offset) + remove_size)); } } entry = entry->vme_next; @@ -3203,8 +3226,7 @@ vm_map_delete( if(entry->is_sub_map) { if(entry->use_pmap) { #ifndef i386 - pmap_unnest(map->pmap, entry->vme_start, - entry->vme_end - entry->vme_start); + pmap_unnest(map->pmap, (addr64_t)entry->vme_start); #endif if((map->mapped) && (map->ref_count)) { /* clean up parent map/maps */ @@ -3221,19 +3243,46 @@ vm_map_delete( entry->offset); } } else { - if((map->mapped) && (map->ref_count)) { - vm_object_pmap_protect( - entry->object.vm_object, - entry->offset, - entry->vme_end - entry->vme_start, - PMAP_NULL, - entry->vme_start, - VM_PROT_NONE); - } else { + object = entry->object.vm_object; + if((map->mapped) && (map->ref_count)) { + vm_object_pmap_protect( + object, entry->offset, + entry->vme_end - entry->vme_start, + PMAP_NULL, + entry->vme_start, + VM_PROT_NONE); + } else if(object != NULL) { + if ((object->shadow != NULL) || + (object->phys_contiguous) || + (object->resident_page_count > + atop((entry->vme_end - entry->vme_start)/4))) { pmap_remove(map->pmap, - entry->vme_start, - entry->vme_end); + (addr64_t)(entry->vme_start), + (addr64_t)(entry->vme_end)); + } else { + vm_page_t p; + vm_object_offset_t start_off; + vm_object_offset_t end_off; + start_off = entry->offset; + end_off = start_off + + (entry->vme_end - entry->vme_start); + vm_object_lock(object); + queue_iterate(&object->memq, + p, vm_page_t, listq) { + if ((!p->fictitious) && + (p->offset >= start_off) && + (p->offset < end_off)) { + vm_offset_t start; + start = entry->vme_start; + start += p->offset - start_off; + pmap_remove( + map->pmap, start, + start + PAGE_SIZE); + } } + vm_object_unlock(object); + } + } } } @@ -3446,7 +3495,7 @@ vm_map_overwrite_submap_recurse( * splitting entries in strange ways. */ - dst_end = round_page(dst_addr + dst_size); + dst_end = round_page_32(dst_addr + dst_size); vm_map_lock(dst_map); start_pass_1: @@ -3455,7 +3504,7 @@ start_pass_1: return(KERN_INVALID_ADDRESS); } - vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr)); + vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr)); for (entry = tmp_entry;;) { vm_map_entry_t next; @@ -3676,7 +3725,7 @@ vm_map_copy_overwrite_nested( !page_aligned (dst_addr)) { aligned = FALSE; - dst_end = round_page(dst_addr + copy->size); + dst_end = round_page_32(dst_addr + copy->size); } else { dst_end = dst_addr + copy->size; } @@ -3688,7 +3737,7 @@ start_pass_1: vm_map_unlock(dst_map); return(KERN_INVALID_ADDRESS); } - vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr)); + vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr)); for (entry = tmp_entry;;) { vm_map_entry_t next = entry->vme_next; @@ -4173,7 +4222,7 @@ start_overwrite: break; } } - vm_map_clip_start(dst_map, tmp_entry, trunc_page(base_addr)); + vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(base_addr)); entry = tmp_entry; } /* while */ @@ -4574,9 +4623,9 @@ vm_map_copy_overwrite_aligned( entry->vme_start, VM_PROT_NONE); } else { - pmap_remove(dst_map->pmap, - entry->vme_start, - entry->vme_end); + pmap_remove(dst_map->pmap, + (addr64_t)(entry->vme_start), + (addr64_t)(entry->vme_end)); } vm_object_deallocate(old_object); } @@ -4643,7 +4692,10 @@ vm_map_copy_overwrite_aligned( /* No isync here */ PMAP_ENTER(pmap, va, m, prot, - VM_WIMG_USE_DEFAULT, FALSE); + ((unsigned int) + (m->object->wimg_bits)) + & VM_WIMG_MASK, + FALSE); vm_object_lock(object); vm_page_lock_queues(); @@ -4805,8 +4857,8 @@ vm_map_copyin_kernel_buffer( VM_MAP_REMOVE_INTERRUPTIBLE; } if (src_destroy) { - (void) vm_map_remove(src_map, trunc_page(src_addr), - round_page(src_addr + len), + (void) vm_map_remove(src_map, trunc_page_32(src_addr), + round_page_32(src_addr + len), flags); } *copy_result = copy; @@ -4842,7 +4894,7 @@ vm_map_copyout_kernel_buffer( *addr = 0; kr = vm_map_enter(map, addr, - round_page(copy->size), + round_page_32(copy->size), (vm_offset_t) 0, TRUE, VM_OBJECT_NULL, @@ -4966,7 +5018,7 @@ vm_map_copyout( vm_object_offset_t offset; offset = trunc_page_64(copy->offset); - size = round_page(copy->size + + size = round_page_32(copy->size + (vm_size_t)(copy->offset - offset)); *dst_addr = 0; kr = vm_map_enter(dst_map, dst_addr, size, @@ -4997,7 +5049,7 @@ vm_map_copyout( */ vm_copy_start = trunc_page_64(copy->offset); - size = round_page((vm_size_t)copy->offset + copy->size) + size = round_page_32((vm_size_t)copy->offset + copy->size) - vm_copy_start; StartAgain: ; @@ -5148,7 +5200,10 @@ vm_map_copyout( vm_object_unlock(object); PMAP_ENTER(dst_map->pmap, va, m, entry->protection, - VM_WIMG_USE_DEFAULT, TRUE); + ((unsigned int) + (m->object->wimg_bits)) + & VM_WIMG_MASK, + TRUE); vm_object_lock(object); PAGE_WAKEUP_DONE(m); @@ -5198,7 +5253,9 @@ vm_map_copyout( PMAP_ENTER(dst_map->pmap, va, m, prot, - VM_WIMG_USE_DEFAULT, + ((unsigned int) + (m->object->wimg_bits)) + & VM_WIMG_MASK, FALSE); vm_object_lock(object); @@ -5336,8 +5393,8 @@ vm_map_copyin_common( * Compute start and end of region */ - src_start = trunc_page(src_addr); - src_end = round_page(src_addr + len); + src_start = trunc_page_32(src_addr); + src_end = round_page_32(src_addr + len); XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t)src_map, src_addr, len, src_destroy, 0); @@ -5466,13 +5523,14 @@ vm_map_copyin_common( src_entry = tmp_entry; } if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) && - ((tmp_entry->object.vm_object->wimg_bits != VM_WIMG_DEFAULT) || - (tmp_entry->object.vm_object->phys_contiguous))) { - /* This is not, cannot be supported for now */ - /* we need a description of the caching mode */ - /* reflected in the object before we can */ - /* support copyin, and then the support will */ - /* be for direct copy */ + (tmp_entry->object.vm_object->phys_contiguous)) { + /* This is not, supported for now.In future */ + /* we will need to detect the phys_contig */ + /* condition and then upgrade copy_slowly */ + /* to do physical copy from the device mem */ + /* based object. We can piggy-back off of */ + /* the was wired boolean to set-up the */ + /* proper handling */ RETURN(KERN_PROTECTION_FAILURE); } /* @@ -5551,8 +5609,9 @@ RestartCopy: XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n", src_object, new_entry, new_entry->object.vm_object, was_wired, 0); - if (!was_wired && - vm_object_copy_quickly( + if ((src_object == VM_OBJECT_NULL || + (!was_wired && !map_share && !tmp_entry->is_shared)) && + vm_object_copy_quickly( &new_entry->object.vm_object, src_offset, src_size, @@ -5566,49 +5625,17 @@ RestartCopy: */ if (src_needs_copy && !tmp_entry->needs_copy) { - if (tmp_entry->is_shared || - tmp_entry->object.vm_object->true_share || - map_share) { - vm_map_unlock(src_map); - new_entry->object.vm_object = - vm_object_copy_delayed( - src_object, - src_offset, - src_size); - /* dec ref gained in copy_quickly */ - vm_object_lock(src_object); - src_object->ref_count--; - assert(src_object->ref_count > 0); - vm_object_res_deallocate(src_object); - vm_object_unlock(src_object); - vm_map_lock(src_map); - /* - * it turns out that we have - * finished our copy. No matter - * what the state of the map - * we will lock it again here - * knowing that if there is - * additional data to copy - * it will be checked at - * the top of the loop - * - * Don't do timestamp check - */ - - } else { - vm_object_pmap_protect( - src_object, - src_offset, - src_size, - (src_entry->is_shared ? - PMAP_NULL - : src_map->pmap), - src_entry->vme_start, - src_entry->protection & - ~VM_PROT_WRITE); - - tmp_entry->needs_copy = TRUE; - } + vm_object_pmap_protect( + src_object, + src_offset, + src_size, + (src_entry->is_shared ? + PMAP_NULL + : src_map->pmap), + src_entry->vme_start, + src_entry->protection & + ~VM_PROT_WRITE); + tmp_entry->needs_copy = TRUE; } /* @@ -5620,8 +5647,6 @@ RestartCopy: goto CopySuccessful; } - new_entry->needs_copy = FALSE; - /* * Take an object reference, so that we may * release the map lock(s). @@ -5643,6 +5668,7 @@ RestartCopy: */ if (was_wired) { + CopySlowly: vm_object_lock(src_object); result = vm_object_copy_slowly( src_object, @@ -5652,6 +5678,24 @@ RestartCopy: &new_entry->object.vm_object); new_entry->offset = 0; new_entry->needs_copy = FALSE; + + } + else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && + (tmp_entry->is_shared || map_share)) { + vm_object_t new_object; + + vm_object_lock(src_object); + new_object = vm_object_copy_delayed( + src_object, + src_offset, + src_size); + if (new_object == VM_OBJECT_NULL) + goto CopySlowly; + + new_entry->object.vm_object = new_object; + new_entry->needs_copy = TRUE; + result = KERN_SUCCESS; + } else { result = vm_object_copy_strategically(src_object, src_offset, @@ -5661,7 +5705,6 @@ RestartCopy: &new_entry_needs_copy); new_entry->needs_copy = new_entry_needs_copy; - } if (result != KERN_SUCCESS && @@ -5800,7 +5843,7 @@ RestartCopy: */ if (src_destroy) { (void) vm_map_delete(src_map, - trunc_page(src_addr), + trunc_page_32(src_addr), src_end, (src_map == kernel_map) ? VM_MAP_REMOVE_KUNWIRE : @@ -5887,8 +5930,9 @@ vm_map_fork_share( if(old_entry->use_pmap) { result = pmap_nest(new_map->pmap, (old_entry->object.sub_map)->pmap, - old_entry->vme_start, - old_entry->vme_end - old_entry->vme_start); + (addr64_t)old_entry->vme_start, + (addr64_t)old_entry->vme_start, + (uint64_t)(old_entry->vme_end - old_entry->vme_start)); if(result) panic("vm_map_fork_share: pmap_nest failed!"); } @@ -6113,8 +6157,7 @@ vm_map_fork_copy( */ vm_map_lock(old_map); if (!vm_map_lookup_entry(old_map, start, &last) || - last->max_protection & VM_PROT_READ == - VM_PROT_NONE) { + (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) { last = last->vme_next; } *old_entry_p = last; @@ -7149,11 +7192,13 @@ vm_region_recurse_64( recurse_count = *nesting_depth; LOOKUP_NEXT_BASE_ENTRY: - vm_map_lock_read(map); + if (not_in_kdp) + vm_map_lock_read(map); if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { - vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + if (not_in_kdp) + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); } } else { entry = tmp_entry; @@ -7167,7 +7212,8 @@ LOOKUP_NEXT_BASE_ENTRY: while(entry->is_sub_map && recurse_count) { recurse_count--; - vm_map_lock_read(entry->object.sub_map); + if (not_in_kdp) + vm_map_lock_read(entry->object.sub_map); if(entry == base_entry) { @@ -7176,13 +7222,15 @@ LOOKUP_NEXT_BASE_ENTRY: } submap = entry->object.sub_map; - vm_map_unlock_read(map); + if (not_in_kdp) + vm_map_unlock_read(map); map = submap; if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { - vm_map_unlock_read(map); + if (not_in_kdp) + vm_map_unlock_read(map); map = base_map; start = base_next; recurse_count = 0; @@ -7202,7 +7250,8 @@ LOOKUP_NEXT_BASE_ENTRY: } if(base_next <= (base_addr += (entry->vme_start - start))) { - vm_map_unlock_read(map); + if (not_in_kdp) + vm_map_unlock_read(map); map = base_map; start = base_next; recurse_count = 0; @@ -7228,7 +7277,8 @@ LOOKUP_NEXT_BASE_ENTRY: } base_addr += entry->vme_start; if(base_addr >= base_next) { - vm_map_unlock_read(map); + if (not_in_kdp) + vm_map_unlock_read(map); map = base_map; start = base_next; recurse_count = 0; @@ -7268,7 +7318,8 @@ LOOKUP_NEXT_BASE_ENTRY: extended.pages_dirtied = 0; extended.external_pager = 0; extended.shadow_depth = 0; - + + if (not_in_kdp) if(!entry->is_sub_map) { vm_region_walk(entry, &extended, entry->offset, entry->vme_end - start, map, start); @@ -7292,8 +7343,8 @@ LOOKUP_NEXT_BASE_ENTRY: submap_info->pages_dirtied = extended.pages_dirtied; submap_info->external_pager = extended.external_pager; submap_info->shadow_depth = extended.shadow_depth; - - vm_map_unlock_read(map); + if (not_in_kdp) + vm_map_unlock_read(map); return(KERN_SUCCESS); } @@ -7634,12 +7685,13 @@ vm_region_look_for_page( if (shadow && (max_refcnt == 1)) extended->pages_shared_now_private++; - if (p->dirty || pmap_is_modified(p->phys_addr)) + if (!p->fictitious && + (p->dirty || pmap_is_modified(p->phys_page))) extended->pages_dirtied++; extended->pages_resident++; if(object != caller_object) - vm_object_unlock(object); + vm_object_unlock(object); return; } @@ -7649,13 +7701,13 @@ vm_region_look_for_page( extended->pages_swapped_out++; if(object != caller_object) - vm_object_unlock(object); + vm_object_unlock(object); return; } } if (shadow) { - vm_object_lock(shadow); + vm_object_lock(shadow); if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress) ref_count--; @@ -7667,7 +7719,7 @@ vm_region_look_for_page( max_refcnt = ref_count; if(object != caller_object) - vm_object_unlock(object); + vm_object_unlock(object); object = shadow; shadow = object->shadow; @@ -7675,7 +7727,7 @@ vm_region_look_for_page( continue; } if(object != caller_object) - vm_object_unlock(object); + vm_object_unlock(object); break; } } @@ -7693,7 +7745,7 @@ vm_region_count_obj_refs( return(0); if (entry->is_sub_map) - ref_count = vm_region_count_obj_refs((vm_map_entry_t)entry->object.sub_map, object); + return(0); else { ref_count = 0; @@ -7704,9 +7756,9 @@ vm_region_count_obj_refs( if (chk_obj == object) ref_count++; if (tmp_obj = chk_obj->shadow) - vm_object_lock(tmp_obj); + vm_object_lock(tmp_obj); vm_object_unlock(chk_obj); - + chk_obj = tmp_obj; } } @@ -7856,9 +7908,9 @@ vm_map_machine_attribute( } /* Get the starting address */ - start = trunc_page(address); + start = trunc_page_32(address); /* Figure how much memory we need to flush (in page increments) */ - sync_size = round_page(start + size) - start; + sync_size = round_page_32(start + size) - start; ret = KERN_SUCCESS; /* Assume it all worked */ @@ -7901,7 +7953,7 @@ vm_map_machine_attribute( ret = pmap_attribute_cache_sync( - m->phys_addr, + m->phys_page, PAGE_SIZE, attribute, value); } else if (object->shadow) { @@ -8031,7 +8083,7 @@ void vm_map_links_print( struct vm_map_links *links) { - iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n", + iprintf("prev = %08X next = %08X start = %08X end = %08X\n", links->prev, links->next, links->start, @@ -8046,7 +8098,7 @@ vm_map_header_print( struct vm_map_header *header) { vm_map_links_print(&header->links); - iprintf("nentries=0x%x, %sentries_pageable\n", + iprintf("nentries = %08X, %sentries_pageable\n", header->nentries, (header->entries_pageable ? "" : "!")); } @@ -8061,7 +8113,7 @@ vm_follow_entry( extern int db_indent; int shadows; - iprintf("map entry 0x%x:\n", entry); + iprintf("map entry %08X\n", entry); db_indent += 2; @@ -8082,20 +8134,20 @@ vm_map_entry_print( static char *inheritance_name[4] = { "share", "copy", "none", "?"}; static char *behavior_name[4] = { "dflt", "rand", "seqtl", "rseqntl" }; - iprintf("map entry 0x%x:\n", entry); + iprintf("map entry %08X n", entry); db_indent += 2; vm_map_links_print(&entry->links); - iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n", + iprintf("start = %08X end = %08X, prot=%x/%x/%s\n", entry->vme_start, entry->vme_end, entry->protection, entry->max_protection, inheritance_name[(entry->inheritance & 0x3)]); - iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n", + iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n", behavior_name[(entry->behavior & 0x3)], entry->wired_count, entry->user_wired_count); @@ -8104,11 +8156,11 @@ vm_map_entry_print( (entry->needs_wakeup ? "" : "!")); if (entry->is_sub_map) { - iprintf("submap=0x%x, offset=0x%x\n", + iprintf("submap = %08X - offset=%08X\n", entry->object.sub_map, entry->offset); } else { - iprintf("object=0x%x, offset=0x%x, ", + iprintf("object=%08X, offset=%08X, ", entry->object.vm_object, entry->offset); printf("%sis_shared, %sneeds_copy\n", @@ -8129,7 +8181,7 @@ vm_follow_map( register vm_map_entry_t entry; extern int db_indent; - iprintf("task map 0x%x:\n", map); + iprintf("task map %08X\n", map); db_indent += 2; @@ -8147,26 +8199,29 @@ vm_follow_map( */ void vm_map_print( - register vm_map_t map) + db_addr_t inmap) { register vm_map_entry_t entry; + vm_map_t map; extern int db_indent; char *swstate; - iprintf("task map 0x%x:\n", map); + map = (vm_map_t)inmap; /* Make sure we have the right type */ + + iprintf("task map %08X\n", map); db_indent += 2; vm_map_header_print(&map->hdr); - iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n", + iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n", map->pmap, map->size, map->ref_count, map->hint, map->first_free); - iprintf("%swait_for_space, %swiring_required, timestamp=%d\n", + iprintf("%swait_for_space, %swiring_required, timestamp = %d\n", (map->wait_for_space ? "" : "!"), (map->wiring_required ? "" : "!"), map->timestamp); @@ -8183,7 +8238,7 @@ vm_map_print( swstate = "????"; break; } - iprintf("res=%d, sw_state=%s\n", map->res_count, swstate); + iprintf("res = %d, sw_state = %s\n", map->res_count, swstate); #endif /* TASK_SWAPPER */ for (entry = vm_map_first_entry(map); @@ -8203,12 +8258,15 @@ vm_map_print( void vm_map_copy_print( - vm_map_copy_t copy) + db_addr_t incopy) { extern int db_indent; + vm_map_copy_t copy; int i, npages; vm_map_entry_t entry; + copy = (vm_map_copy_t)incopy; /* Make sure we have the right type */ + printf("copy object 0x%x\n", copy); db_indent += 2; @@ -8265,10 +8323,13 @@ vm_map_copy_print( */ vm_size_t db_vm_map_total_size( - vm_map_t map) + db_addr_t inmap) { vm_map_entry_t entry; vm_size_t total; + vm_map_t map; + + map = (vm_map_t)inmap; /* Make sure we have the right type */ total = 0; for (entry = vm_map_first_entry(map); @@ -8378,7 +8439,7 @@ vm_remap_extract( boolean_t new_entry_needs_copy; assert(map != VM_MAP_NULL); - assert(size != 0 && size == round_page(size)); + assert(size != 0 && size == round_page_32(size)); assert(inheritance == VM_INHERIT_NONE || inheritance == VM_INHERIT_COPY || inheritance == VM_INHERIT_SHARE); @@ -8386,8 +8447,8 @@ vm_remap_extract( /* * Compute start and end of region. */ - src_start = trunc_page(addr); - src_end = round_page(src_start + size); + src_start = trunc_page_32(addr); + src_end = round_page_32(src_start + size); /* * Initialize map_header. @@ -8707,7 +8768,7 @@ vm_remap( return KERN_INVALID_ARGUMENT; } - size = round_page(size); + size = round_page_32(size); result = vm_remap_extract(src_map, memory_address, size, copy, &map_header, @@ -8725,7 +8786,7 @@ vm_remap( * Allocate/check a range of free virtual address * space for the target */ - *address = trunc_page(*address); + *address = trunc_page_32(*address); vm_map_lock(target_map); result = vm_remap_range_allocate(target_map, address, size, mask, anywhere, &insp_entry); diff --git a/osfmk/vm/vm_map.h b/osfmk/vm/vm_map.h index fd0422f45..b2553a1a6 100644 --- a/osfmk/vm/vm_map.h +++ b/osfmk/vm/vm_map.h @@ -868,6 +868,12 @@ extern kern_return_t vm_map_region_replace( vm_offset_t start, vm_offset_t end); +extern boolean_t vm_map_check_protection( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t protection); + /* * Macros to invoke vm_map_copyin_common. vm_map_copyin is the * usual form; it handles a copyin based on the current protection @@ -894,6 +900,15 @@ extern kern_return_t vm_map_region_replace( #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 +/* + * Backing store throttle when BS is exhausted + */ +extern unsigned int vm_backing_store_low; + +extern void vm_backing_store_disable( + boolean_t suspend); + + #endif /* __APPLE_API_PRIVATE */ #endif /* _VM_VM_MAP_H_ */ diff --git a/osfmk/vm/vm_object.c b/osfmk/vm/vm_object.c index ac2a0a1f1..2741a89bb 100644 --- a/osfmk/vm/vm_object.c +++ b/osfmk/vm/vm_object.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -89,8 +89,6 @@ extern int vnode_pager_workaround; #include #include - - /* * Virtual memory objects maintain the actual data * associated with allocated virtual memory. A given @@ -409,8 +407,8 @@ vm_object_bootstrap(void) register i; vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object), - round_page(512*1024), - round_page(12*1024), + round_page_32(512*1024), + round_page_32(12*1024), "vm objects"); queue_init(&vm_object_cached_list); @@ -418,8 +416,8 @@ vm_object_bootstrap(void) vm_object_hash_zone = zinit((vm_size_t) sizeof (struct vm_object_hash_entry), - round_page(512*1024), - round_page(12*1024), + round_page_32(512*1024), + round_page_32(12*1024), "vm object hash entries"); for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) @@ -440,7 +438,7 @@ vm_object_bootstrap(void) vm_object_template.copy = VM_OBJECT_NULL; vm_object_template.shadow = VM_OBJECT_NULL; vm_object_template.shadow_offset = (vm_object_offset_t) 0; - vm_object_template.cow_hint = 0; + vm_object_template.cow_hint = ~(vm_offset_t)0; vm_object_template.true_share = FALSE; vm_object_template.pager = MEMORY_OBJECT_NULL; @@ -496,10 +494,17 @@ vm_object_bootstrap(void) /* * Note that in the following size specifications, we need to add 1 because - * VM_MAX_KERNEL_ADDRESS is a maximum address, not a size. + * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size. */ + +#ifdef ppc + _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1, + kernel_object); +#else _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1, kernel_object); +#endif + kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; /* * Initialize the "submap object". Make it as large as the @@ -507,8 +512,15 @@ vm_object_bootstrap(void) */ vm_submap_object = &vm_submap_object_store; +#ifdef ppc + _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1, + vm_submap_object); +#else _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1, vm_submap_object); +#endif + vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; + /* * Create an "extra" reference to this object so that we never * try to deallocate it; zfree doesn't like to be called with @@ -571,10 +583,21 @@ vm_object_deallocate( * the object; we must lock it before removing * the object. */ + for (;;) { + vm_object_cache_lock(); - vm_object_cache_lock(); - vm_object_lock(object); - + /* + * if we try to take a regular lock here + * we risk deadlocking against someone + * holding a lock on this object while + * trying to vm_object_deallocate a different + * object + */ + if (vm_object_lock_try(object)) + break; + vm_object_cache_unlock(); + mutex_pause(); /* wait a bit */ + } assert(object->ref_count > 0); /* @@ -594,8 +617,21 @@ vm_object_deallocate( memory_object_unmap(pager); - vm_object_cache_lock(); - vm_object_lock(object); + for (;;) { + vm_object_cache_lock(); + + /* + * if we try to take a regular lock here + * we risk deadlocking against someone + * holding a lock on this object while + * trying to vm_object_deallocate a different + * object + */ + if (vm_object_lock_try(object)) + break; + vm_object_cache_unlock(); + mutex_pause(); /* wait a bit */ + } assert(object->ref_count > 0); } } @@ -904,6 +940,7 @@ vm_object_terminate( } vm_page_lock_queues(); + p->busy = TRUE; VM_PAGE_QUEUES_REMOVE(p); vm_page_unlock_queues(); @@ -924,16 +961,10 @@ vm_object_terminate( panic("vm_object_terminate.4 0x%x 0x%x", object, p); if (!p->dirty) - p->dirty = pmap_is_modified(p->phys_addr); + p->dirty = pmap_is_modified(p->phys_page); if ((p->dirty || p->precious) && !p->error && object->alive) { - p->busy = TRUE; - vm_object_paging_begin(object); - /* protect the object from re-use/caching while it */ - /* is unlocked */ - vm_object_unlock(object); vm_pageout_cluster(p); /* flush page */ - vm_object_lock(object); vm_object_paging_wait(object, THREAD_UNINT); XPR(XPR_VM_OBJECT, "vm_object_terminate restart, object 0x%X ref %d\n", @@ -982,14 +1013,14 @@ vm_object_terminate( /* * Detach the object from its shadow if we are the shadow's - * copy. + * copy. The reference we hold on the shadow must be dropped + * by our caller. */ if (((shadow_object = object->shadow) != VM_OBJECT_NULL) && !(object->pageout)) { vm_object_lock(shadow_object); - assert((shadow_object->copy == object) || - (shadow_object->copy == VM_OBJECT_NULL)); - shadow_object->copy = VM_OBJECT_NULL; + if (shadow_object->copy == object) + shadow_object->copy = VM_OBJECT_NULL; vm_object_unlock(shadow_object); } @@ -1320,12 +1351,12 @@ vm_object_deactivate_pages( if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) { m->reference = FALSE; - pmap_clear_reference(m->phys_addr); + pmap_clear_reference(m->phys_page); if ((kill_page) && (object->internal)) { m->precious = FALSE; m->dirty = FALSE; - pmap_clear_modify(m->phys_addr); + pmap_clear_modify(m->phys_page); vm_external_state_clr(object->existence_map, offset); } VM_PAGE_QUEUES_REMOVE(m); @@ -1411,10 +1442,10 @@ vm_object_pmap_protect( vm_object_lock(object); - assert(object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC); + assert(object->internal); while (TRUE) { - if (object->resident_page_count > atop(size) / 2 && + if (object->resident_page_count > atop_32(size) / 2 && pmap != PMAP_NULL) { vm_object_unlock(object); pmap_protect(pmap, pmap_start, pmap_start + size, prot); @@ -1424,7 +1455,7 @@ vm_object_pmap_protect( /* if we are doing large ranges with respect to resident */ /* page count then we should interate over pages otherwise */ /* inverse page look-up will be faster */ - if ((object->resident_page_count / 4) < atop(size)) { + if ((object->resident_page_count / 4) < atop_32(size)) { vm_page_t p; vm_object_offset_t end; @@ -1446,7 +1477,7 @@ vm_object_pmap_protect( if (!p->fictitious && (offset <= p->offset) && (p->offset < end)) { - pmap_page_protect(p->phys_addr, + pmap_page_protect(p->phys_page, prot & ~p->page_lock); } } @@ -1472,7 +1503,7 @@ vm_object_pmap_protect( for(target_off = offset; target_off < end; target_off += PAGE_SIZE) { if(p = vm_page_lookup(object, target_off)) { - pmap_page_protect(p->phys_addr, + pmap_page_protect(p->phys_page, prot & ~p->page_lock); } } @@ -1935,7 +1966,8 @@ static int copy_delayed_protect_lookup_wait = 0; * the asymmetric copy-on-write algorithm. * * In/out conditions: - * The object must be unlocked on entry. + * The src_object must be locked on entry. It will be unlocked + * on exit - so the caller must also hold a reference to it. * * This routine will not block waiting for user-generated * events. It is not interruptible. @@ -1949,7 +1981,7 @@ vm_object_copy_delayed( vm_object_t new_copy = VM_OBJECT_NULL; vm_object_t old_copy; vm_page_t p; - vm_object_size_t copy_size; + vm_object_size_t copy_size = src_offset + size; int collisions = 0; /* @@ -1992,8 +2024,13 @@ vm_object_copy_delayed( */ Retry: - vm_object_lock(src_object); + /* + * Wait for paging in progress. + */ + if (!src_object->true_share) + vm_object_paging_wait(src_object, THREAD_UNINT); + /* * See whether we can reuse the result of a previous * copy operation. @@ -2016,6 +2053,7 @@ vm_object_copy_delayed( if (collisions > copy_delayed_max_collisions) copy_delayed_max_collisions = collisions; + vm_object_lock(src_object); goto Retry; } @@ -2030,52 +2068,66 @@ vm_object_copy_delayed( * It has not been modified. * * Return another reference to - * the existing copy-object. + * the existing copy-object if + * we can safely grow it (if + * needed). */ - assert(old_copy->ref_count > 0); - old_copy->ref_count++; - - if (old_copy->size < src_offset+size) - old_copy->size = src_offset+size; - -#if TASK_SWAPPER - /* - * We have to reproduce some of the code from - * vm_object_res_reference because we've taken - * the locks out of order here, and deadlock - * would result if we simply called that function. - */ - if (++old_copy->res_count == 1) { - assert(old_copy->shadow == src_object); - vm_object_res_reference(src_object); - } -#endif /* TASK_SWAPPER */ - - vm_object_unlock(old_copy); - vm_object_unlock(src_object); if (new_copy != VM_OBJECT_NULL) { vm_object_unlock(new_copy); vm_object_deallocate(new_copy); } - return(old_copy); - } - if (new_copy == VM_OBJECT_NULL) { + if (old_copy->size < copy_size) { + /* + * We can't perform a delayed copy if any of the + * pages in the extended range are wired (because + * we can't safely take write permission away from + * wired pages). If the pages aren't wired, then + * go ahead and protect them. + */ + copy_delayed_protect_iterate++; + queue_iterate(&src_object->memq, p, vm_page_t, listq) { + if (!p->fictitious && + p->offset >= old_copy->size && + p->offset < copy_size) { + if (p->wire_count > 0) { + vm_object_unlock(old_copy); + vm_object_unlock(src_object); + return VM_OBJECT_NULL; + } else { + pmap_page_protect(p->phys_page, + (VM_PROT_ALL & ~VM_PROT_WRITE & + ~p->page_lock)); + } + } + } + old_copy->size = copy_size; + } + + vm_object_reference_locked(old_copy); vm_object_unlock(old_copy); vm_object_unlock(src_object); - new_copy = vm_object_allocate(src_offset + size); - vm_object_lock(new_copy); - goto Retry; + return(old_copy); } /* * Adjust the size argument so that the newly-created * copy object will be large enough to back either the - * new old copy object or the new mapping. + * old copy object or the new mapping. */ - if (old_copy->size > src_offset+size) - size = old_copy->size - src_offset; + if (old_copy->size > copy_size) + copy_size = old_copy->size; + + if (new_copy == VM_OBJECT_NULL) { + vm_object_unlock(old_copy); + vm_object_unlock(src_object); + new_copy = vm_object_allocate(copy_size); + vm_object_lock(src_object); + vm_object_lock(new_copy); + goto Retry; + } + new_copy->size = copy_size; /* * The copy-object is always made large enough to @@ -2087,6 +2139,44 @@ vm_object_copy_delayed( assert((old_copy->shadow == src_object) && (old_copy->shadow_offset == (vm_object_offset_t) 0)); + } else if (new_copy == VM_OBJECT_NULL) { + vm_object_unlock(src_object); + new_copy = vm_object_allocate(copy_size); + vm_object_lock(src_object); + vm_object_lock(new_copy); + goto Retry; + } + + /* + * We now have the src object locked, and the new copy object + * allocated and locked (and potentially the old copy locked). + * Before we go any further, make sure we can still perform + * a delayed copy, as the situation may have changed. + * + * Specifically, we can't perform a delayed copy if any of the + * pages in the range are wired (because we can't safely take + * write permission away from wired pages). If the pages aren't + * wired, then go ahead and protect them. + */ + copy_delayed_protect_iterate++; + queue_iterate(&src_object->memq, p, vm_page_t, listq) { + if (!p->fictitious && p->offset < copy_size) { + if (p->wire_count > 0) { + if (old_copy) + vm_object_unlock(old_copy); + vm_object_unlock(src_object); + vm_object_unlock(new_copy); + vm_object_deallocate(new_copy); + return VM_OBJECT_NULL; + } else { + pmap_page_protect(p->phys_page, + (VM_PROT_ALL & ~VM_PROT_WRITE & + ~p->page_lock)); + } + } + } + + if (old_copy != VM_OBJECT_NULL) { /* * Make the old copy-object shadow the new one. * It will receive no more pages from the original @@ -2107,26 +2197,11 @@ vm_object_copy_delayed( #endif vm_object_unlock(old_copy); /* done with old_copy */ - } else if (new_copy == VM_OBJECT_NULL) { - vm_object_unlock(src_object); - new_copy = vm_object_allocate(src_offset + size); - vm_object_lock(new_copy); - goto Retry; - } - - /* - * Readjust the copy-object size if necessary. - */ - copy_size = new_copy->size; - if (copy_size < src_offset+size) { - copy_size = src_offset+size; - new_copy->size = copy_size; } /* * Point the new copy at the existing object. */ - new_copy->shadow = src_object; new_copy->shadow_offset = 0; new_copy->shadowed = TRUE; /* caller must set needs_copy */ @@ -2134,23 +2209,9 @@ vm_object_copy_delayed( src_object->ref_count++; VM_OBJ_RES_INCR(src_object); src_object->copy = new_copy; + vm_object_unlock(src_object); vm_object_unlock(new_copy); - /* - * Mark all (current) pages of the existing object copy-on-write. - * This object may have a shadow chain below it, but - * those pages will already be marked copy-on-write. - */ - - vm_object_paging_wait(src_object, THREAD_UNINT); - copy_delayed_protect_iterate++; - queue_iterate(&src_object->memq, p, vm_page_t, listq) { - if (!p->fictitious) - pmap_page_protect(p->phys_addr, - (VM_PROT_ALL & ~VM_PROT_WRITE & - ~p->page_lock)); - } - vm_object_unlock(src_object); XPR(XPR_VM_OBJECT, "vm_object_copy_delayed: used copy object %X for source %X\n", (integer_t)new_copy, (integer_t)src_object, 0, 0, 0); @@ -2210,6 +2271,18 @@ vm_object_copy_strategically( */ switch (copy_strategy) { + case MEMORY_OBJECT_COPY_DELAY: + *dst_object = vm_object_copy_delayed(src_object, + src_offset, size); + if (*dst_object != VM_OBJECT_NULL) { + *dst_offset = src_offset; + *dst_needs_copy = TRUE; + result = KERN_SUCCESS; + break; + } + vm_object_lock(src_object); + /* fall thru when delayed copy not allowed */ + case MEMORY_OBJECT_COPY_NONE: result = vm_object_copy_slowly(src_object, src_offset, size, interruptible, dst_object); @@ -2228,15 +2301,6 @@ vm_object_copy_strategically( } break; - case MEMORY_OBJECT_COPY_DELAY: - vm_object_unlock(src_object); - *dst_object = vm_object_copy_delayed(src_object, - src_offset, size); - *dst_offset = src_offset; - *dst_needs_copy = TRUE; - result = KERN_SUCCESS; - break; - case MEMORY_OBJECT_COPY_SYMMETRIC: XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n",(natural_t)src_object, src_offset, size, 0, 0); vm_object_unlock(src_object); @@ -2525,69 +2589,53 @@ vm_object_enter( * Look for an object associated with this port. */ -restart: vm_object_cache_lock(); - for (;;) { + do { entry = vm_object_hash_lookup(pager, FALSE); - /* - * If a previous object is being terminated, - * we must wait for the termination message - * to be queued. - * - * We set kobject to a non-null value to let the - * terminator know that someone is waiting. - * Among the possibilities is that the port - * could die while we're waiting. Must restart - * instead of continuing the loop. - */ - - if (entry != VM_OBJECT_HASH_ENTRY_NULL) { - if (entry->object != VM_OBJECT_NULL) - break; - + if (entry == VM_OBJECT_HASH_ENTRY_NULL) { + if (new_object == VM_OBJECT_NULL) { + /* + * We must unlock to create a new object; + * if we do so, we must try the lookup again. + */ + vm_object_cache_unlock(); + assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL); + new_entry = vm_object_hash_entry_alloc(pager); + new_object = vm_object_allocate(size); + vm_object_cache_lock(); + } else { + /* + * Lookup failed twice, and we have something + * to insert; set the object. + */ + vm_object_hash_insert(new_entry); + entry = new_entry; + entry->object = new_object; + new_entry = VM_OBJECT_HASH_ENTRY_NULL; + new_object = VM_OBJECT_NULL; + must_init = TRUE; + } + } else if (entry->object == VM_OBJECT_NULL) { + /* + * If a previous object is being terminated, + * we must wait for the termination message + * to be queued (and lookup the entry again). + */ entry->waiting = TRUE; + entry = VM_OBJECT_HASH_ENTRY_NULL; assert_wait((event_t) pager, THREAD_UNINT); vm_object_cache_unlock(); thread_block((void (*)(void))0); - goto restart; - } - - /* - * We must unlock to create a new object; - * if we do so, we must try the lookup again. - */ - - if (new_object == VM_OBJECT_NULL) { - vm_object_cache_unlock(); - assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL); - new_entry = vm_object_hash_entry_alloc(pager); - new_object = vm_object_allocate(size); vm_object_cache_lock(); - } else { - /* - * Lookup failed twice, and we have something - * to insert; set the object. - */ - - if (entry == VM_OBJECT_HASH_ENTRY_NULL) { - vm_object_hash_insert(new_entry); - entry = new_entry; - new_entry = VM_OBJECT_HASH_ENTRY_NULL; - } - - entry->object = new_object; - new_object = VM_OBJECT_NULL; - must_init = TRUE; } - } + } while (entry == VM_OBJECT_HASH_ENTRY_NULL); object = entry->object; assert(object != VM_OBJECT_NULL); if (!must_init) { vm_object_lock(object); - assert(object->pager_created); assert(!internal || object->internal); if (named) { assert(!object->named); @@ -2958,8 +3006,13 @@ vm_object_do_collapse( } } - assert(object->pager == MEMORY_OBJECT_NULL || - backing_object->pager == MEMORY_OBJECT_NULL); +#if !MACH_PAGEMAP + assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL + || (!backing_object->pager_created + && backing_object->pager == MEMORY_OBJECT_NULL)); +#else + assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL); +#endif /* !MACH_PAGEMAP */ if (backing_object->pager != MEMORY_OBJECT_NULL) { vm_object_hash_entry_t entry; @@ -2972,6 +3025,7 @@ vm_object_do_collapse( * unused portion. */ + assert(!object->paging_in_progress); object->pager = backing_object->pager; entry = vm_object_hash_lookup(object->pager, FALSE); assert(entry != VM_OBJECT_HASH_ENTRY_NULL); @@ -2991,8 +3045,6 @@ vm_object_do_collapse( vm_object_cache_unlock(); - object->paging_offset = backing_object->paging_offset + backing_offset; - #if MACH_PAGEMAP /* * If the shadow offset is 0, the use the existence map from @@ -3028,7 +3080,7 @@ vm_object_do_collapse( object->shadow = backing_object->shadow; object->shadow_offset += backing_object->shadow_offset; assert((object->shadow == VM_OBJECT_NULL) || - (object->shadow->copy == VM_OBJECT_NULL)); + (object->shadow->copy != backing_object)); /* * Discard backing_object. @@ -3168,15 +3220,12 @@ vm_object_do_bypass( */ __private_extern__ void vm_object_collapse( - register vm_object_t object) + register vm_object_t object, + register vm_object_offset_t hint_offset) { register vm_object_t backing_object; - register vm_object_offset_t backing_offset; - register vm_object_size_t size; - register vm_object_offset_t new_offset; - register vm_page_t p; - - vm_offset_t current_offset; + register unsigned int rcount; + register unsigned int size; if (! vm_object_collapse_allowed && ! vm_object_bypass_allowed) { return; @@ -3233,7 +3282,7 @@ vm_object_collapse( * parent object. */ if (backing_object->shadow != VM_OBJECT_NULL && - backing_object->shadow->copy != VM_OBJECT_NULL) { + backing_object->shadow->copy == backing_object) { vm_object_unlock(backing_object); return; } @@ -3248,16 +3297,22 @@ vm_object_collapse( * object, we may be able to collapse it into the * parent. * - * The backing object must not have a pager - * created for it, since collapsing an object - * into a backing_object dumps new pages into - * the backing_object that its pager doesn't - * know about. + * If MACH_PAGEMAP is defined: + * The parent must not have a pager created for it, + * since collapsing a backing_object dumps new pages + * into the parent that its pager doesn't know about + * (and the collapse code can't merge the existence + * maps). + * Otherwise: + * As long as one of the objects is still not known + * to the pager, we can collapse them. */ - if (backing_object->ref_count == 1 && - ! object->pager_created && - vm_object_collapse_allowed) { + (!object->pager_created +#if !MACH_PAGEMAP + || !backing_object->pager_created +#endif /*!MACH_PAGEMAP */ + ) && vm_object_collapse_allowed) { XPR(XPR_VM_OBJECT, "vm_object_collapse: %x to %x, pager %x, pager_request %x\n", @@ -3298,96 +3353,161 @@ vm_object_collapse( /* - * If the backing object has a pager but no pagemap, - * then we cannot bypass it, because we don't know - * what pages it has. + * If the object doesn't have all its pages present, + * we have to make sure no pages in the backing object + * "show through" before bypassing it. */ - if (backing_object->pager_created + size = atop(object->size); + rcount = object->resident_page_count; + if (rcount != size) { + vm_object_size_t size; + vm_object_offset_t offset; + vm_object_offset_t backing_offset; + unsigned int backing_rcount; + unsigned int lookups = 0; + + /* + * If the backing object has a pager but no pagemap, + * then we cannot bypass it, because we don't know + * what pages it has. + */ + if (backing_object->pager_created #if MACH_PAGEMAP - && (backing_object->existence_map == VM_EXTERNAL_NULL) + && (backing_object->existence_map == VM_EXTERNAL_NULL) #endif /* MACH_PAGEMAP */ - ) { - vm_object_unlock(backing_object); - return; - } + ) { + vm_object_unlock(backing_object); + return; + } - /* - * If the object has a pager but no pagemap, - * then we cannot bypass it, because we don't know - * what pages it has. - */ - if (object->pager_created + /* + * If the object has a pager but no pagemap, + * then we cannot bypass it, because we don't know + * what pages it has. + */ + if (object->pager_created #if MACH_PAGEMAP - && (object->existence_map == VM_EXTERNAL_NULL) + && (object->existence_map == VM_EXTERNAL_NULL) #endif /* MACH_PAGEMAP */ - ) { - vm_object_unlock(backing_object); - return; - } + ) { + vm_object_unlock(backing_object); + return; + } - backing_offset = object->shadow_offset; - size = object->size; + /* + * If all of the pages in the backing object are + * shadowed by the parent object, the parent + * object no longer has to shadow the backing + * object; it can shadow the next one in the + * chain. + * + * If the backing object has existence info, + * we must check examine its existence info + * as well. + * + */ - /* - * If all of the pages in the backing object are - * shadowed by the parent object, the parent - * object no longer has to shadow the backing - * object; it can shadow the next one in the - * chain. - * - * If the backing object has existence info, - * we must check examine its existence info - * as well. - * - */ + backing_offset = object->shadow_offset; + backing_rcount = backing_object->resident_page_count; - if(object->cow_hint >= size) - object->cow_hint = 0; - current_offset = object->cow_hint; - while(TRUE) { - if (vm_page_lookup(object, - (vm_object_offset_t)current_offset) - != VM_PAGE_NULL) { - current_offset+=PAGE_SIZE; - } else if ((object->pager_created) && - (object->existence_map != NULL) && - (vm_external_state_get(object->existence_map, - current_offset) - != VM_EXTERNAL_STATE_ABSENT)) { - current_offset+=PAGE_SIZE; - } else if (vm_page_lookup(backing_object, - (vm_object_offset_t)current_offset - + backing_offset)!= VM_PAGE_NULL) { - /* found a dependency */ - object->cow_hint = current_offset; - vm_object_unlock(backing_object); - return; - } else if ((backing_object->pager_created) && - (backing_object->existence_map != NULL) && - (vm_external_state_get( - backing_object->existence_map, - current_offset + backing_offset) - != VM_EXTERNAL_STATE_ABSENT)) { - /* found a dependency */ - object->cow_hint = current_offset; +#define EXISTS_IN_OBJECT(obj, off, rc) \ + (vm_external_state_get((obj)->existence_map, \ + (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \ + ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) + + /* + * Check the hint location first + * (since it is often the quickest way out of here). + */ + if (object->cow_hint != ~(vm_offset_t)0) + hint_offset = (vm_object_offset_t)object->cow_hint; + else + hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ? + (hint_offset - 8 * PAGE_SIZE_64) : 0; + + if (EXISTS_IN_OBJECT(backing_object, hint_offset + + backing_offset, backing_rcount) && + !EXISTS_IN_OBJECT(object, hint_offset, rcount)) { + /* dependency right at the hint */ + object->cow_hint = (vm_offset_t)hint_offset; vm_object_unlock(backing_object); return; - } else { - current_offset+=PAGE_SIZE; } - if(current_offset >= size) { - /* wrap at end of object */ - current_offset = 0; + + /* + * If the object's window onto the backing_object + * is large compared to the number of resident + * pages in the backing object, it makes sense to + * walk the backing_object's resident pages first. + * + * NOTE: Pages may be in both the existence map and + * resident. So, we can't permanently decrement + * the rcount here because the second loop may + * find the same pages in the backing object' + * existence map that we found here and we would + * double-decrement the rcount. We also may or + * may not have found the + */ + if (backing_rcount && size > + ((backing_object->existence_map) ? + backing_rcount : (backing_rcount >> 1))) { + unsigned int rc = rcount; + vm_page_t p; + + backing_rcount = backing_object->resident_page_count; + p = (vm_page_t)queue_first(&backing_object->memq); + do { + /* Until we get more than one lookup lock */ + if (lookups > 256) { + lookups = 0; + delay(1); + } + + offset = (p->offset - backing_offset); + if (offset < object->size && + offset != hint_offset && + !EXISTS_IN_OBJECT(object, offset, rc)) { + /* found a dependency */ + object->cow_hint = (vm_offset_t)offset; + vm_object_unlock(backing_object); + return; + } + p = queue_next(p); + + } while (--backing_rcount); } - if(current_offset == object->cow_hint) { - /* we are free of shadow influence */ - break; + + /* + * Walk through the offsets looking for pages in the + * backing object that show through to the object. + */ + if (backing_rcount || backing_object->existence_map) { + offset = hint_offset; + + while((offset = + (offset + PAGE_SIZE_64 < object->size) ? + (offset + PAGE_SIZE_64) : 0) != hint_offset) { + + /* Until we get more than one lookup lock */ + if (lookups > 256) { + lookups = 0; + delay(1); + } + + if (EXISTS_IN_OBJECT(backing_object, offset + + backing_offset, backing_rcount) && + !EXISTS_IN_OBJECT(object, offset, rcount)) { + /* found a dependency */ + object->cow_hint = (vm_offset_t)offset; + vm_object_unlock(backing_object); + return; + } + } } } - /* reset the cow_hint for any objects deeper in the chain */ - object->cow_hint = 0; - + /* reset the offset hint for any objects deeper in the chain */ + object->cow_hint = (vm_offset_t)0; /* * All interesting pages in the backing object @@ -3433,7 +3553,7 @@ vm_object_page_remove( * It balances vm_object_lookup vs iteration. */ - if (atop(end - start) < (unsigned)object->resident_page_count/16) { + if (atop_64(end - start) < (unsigned)object->resident_page_count/16) { vm_object_page_remove_lookup++; for (; start < end; start += PAGE_SIZE_64) { @@ -3441,7 +3561,7 @@ vm_object_page_remove( if (p != VM_PAGE_NULL) { assert(!p->cleaning && !p->pageout); if (!p->fictitious) - pmap_page_protect(p->phys_addr, + pmap_page_protect(p->phys_page, VM_PROT_NONE); VM_PAGE_FREE(p); } @@ -3455,7 +3575,7 @@ vm_object_page_remove( if ((start <= p->offset) && (p->offset < end)) { assert(!p->cleaning && !p->pageout); if (!p->fictitious) - pmap_page_protect(p->phys_addr, + pmap_page_protect(p->phys_page, VM_PROT_NONE); VM_PAGE_FREE(p); } @@ -3522,7 +3642,7 @@ vm_object_coalesce( /* * Try to collapse the object first */ - vm_object_collapse(prev_object); + vm_object_collapse(prev_object, prev_offset); /* * Can't coalesce if pages not mapped to @@ -3600,7 +3720,7 @@ vm_object_page_map( vm_page_t old_page; vm_object_offset_t addr; - num_pages = atop(size); + num_pages = atop_64(size); for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) { @@ -3965,27 +4085,24 @@ vm_object_find( kern_return_t vm_object_populate_with_private( - vm_object_t object, + vm_object_t object, vm_object_offset_t offset, - vm_offset_t phys_addr, - vm_size_t size) + ppnum_t phys_page, + vm_size_t size) { - vm_offset_t base_addr; + ppnum_t base_page; vm_object_offset_t base_offset; if(!object->private) return KERN_FAILURE; - if((base_addr = trunc_page(phys_addr)) != phys_addr) { - return KERN_FAILURE; - } - + base_page = phys_page; vm_object_lock(object); if(!object->phys_contiguous) { vm_page_t m; - if((base_offset = trunc_page(offset)) != offset) { + if((base_offset = trunc_page_64(offset)) != offset) { vm_object_unlock(object); return KERN_FAILURE; } @@ -3997,7 +4114,7 @@ vm_object_populate_with_private( vm_page_lock_queues(); m->fictitious = FALSE; m->private = TRUE; - m->phys_addr = base_addr; + m->phys_page = base_page; if(!m->busy) { m->busy = TRUE; } @@ -4007,11 +4124,11 @@ vm_object_populate_with_private( } m->list_req_pending = TRUE; vm_page_unlock_queues(); - } else if (m->phys_addr != base_addr) { + } else if (m->phys_page != base_page) { /* pmap call to clear old mapping */ - pmap_page_protect(m->phys_addr, + pmap_page_protect(m->phys_page, VM_PROT_NONE); - m->phys_addr = base_addr; + m->phys_page = base_page; } } else { while ((m = vm_page_grab_fictitious()) @@ -4020,7 +4137,7 @@ vm_object_populate_with_private( vm_page_lock_queues(); m->fictitious = FALSE; m->private = TRUE; - m->phys_addr = base_addr; + m->phys_page = base_page; m->list_req_pending = TRUE; m->absent = TRUE; m->unusual = TRUE; @@ -4028,7 +4145,7 @@ vm_object_populate_with_private( vm_page_unlock_queues(); vm_page_insert(m, object, base_offset); } - base_addr += PAGE_SIZE; + base_page++; /* Go to the next physical page */ base_offset += PAGE_SIZE; size -= PAGE_SIZE; } @@ -4041,7 +4158,7 @@ vm_object_populate_with_private( /* shadows on contiguous memory are not allowed */ /* we therefore can use the offset field */ - object->shadow_offset = (vm_object_offset_t)phys_addr; + object->shadow_offset = (vm_object_offset_t)(phys_page << 12); object->size = size; } vm_object_unlock(object); @@ -4382,7 +4499,7 @@ vm_object_lock_request( if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) return (KERN_INVALID_ARGUMENT); - size = round_page(size); + size = round_page_64(size); /* * Lock the object, and acquire a paging reference to @@ -4390,7 +4507,6 @@ vm_object_lock_request( */ vm_object_lock(object); vm_object_paging_begin(object); - offset -= object->paging_offset; (void)vm_object_update(object, offset, size, should_return, flags, prot); diff --git a/osfmk/vm/vm_object.h b/osfmk/vm/vm_object.h index dbfc2a85e..54fc6f9a9 100644 --- a/osfmk/vm/vm_object.h +++ b/osfmk/vm/vm_object.h @@ -265,12 +265,13 @@ struct vm_object { request queue */ vm_object_offset_t last_alloc; /* last allocation offset */ + vm_object_offset_t sequential; /* sequential access size */ vm_size_t cluster_size; /* size of paging cluster */ #if MACH_PAGEMAP vm_external_map_t existence_map; /* bitmap of pages written to * backing storage */ #endif /* MACH_PAGEMAP */ - int cow_hint; /* last page present in */ + vm_offset_t cow_hint; /* last page present in */ /* shadow but not in object */ #if MACH_ASSERT struct vm_object *paging_object; /* object which pages to be @@ -424,7 +425,8 @@ __private_extern__ boolean_t vm_object_shadow( vm_object_size_t length); __private_extern__ void vm_object_collapse( - vm_object_t object); + vm_object_t object, + vm_object_offset_t offset); __private_extern__ boolean_t vm_object_copy_quickly( vm_object_t *_object, diff --git a/osfmk/vm/vm_page.h b/osfmk/vm/vm_page.h index f037d387b..585c01355 100644 --- a/osfmk/vm/vm_page.h +++ b/osfmk/vm/vm_page.h @@ -180,7 +180,7 @@ struct vm_page { /* a pageout candidate */ /* we've used up all 32 bits */ - vm_offset_t phys_addr; /* Physical address of page, passed + vm_offset_t phys_page; /* Physical address of page, passed * to pmap_enter (read-only) */ }; @@ -252,6 +252,10 @@ extern int vm_page_free_reserved; /* How many pages reserved to do pageout */ extern int vm_page_laundry_count; /* How many pages being laundered? */ +extern +int vm_page_burst_count; /* How many pages being laundered to EMM? */ +extern +int vm_page_throttled_count;/* Count of zero-fill allocations throttled */ decl_mutex_data(,vm_page_queue_lock) /* lock on active and inactive page queues */ @@ -276,8 +280,8 @@ extern void vm_page_bootstrap( extern void vm_page_module_init(void); extern void vm_page_create( - vm_offset_t start, - vm_offset_t end); + ppnum_t start, + ppnum_t end); extern vm_page_t vm_page_lookup( vm_object_t object, @@ -316,7 +320,7 @@ extern vm_page_t vm_page_alloc( extern void vm_page_init( vm_page_t page, - vm_offset_t phys_addr); + ppnum_t phys_page); extern void vm_page_free( vm_page_t page); @@ -419,8 +423,9 @@ extern void vm_page_gobble( MACRO_END #define VM_PAGE_THROTTLED() \ - (vm_page_free_count < (vm_page_free_target - \ - ((vm_page_free_target-vm_page_free_min)>>2))) + (vm_page_free_count < vm_page_free_min && \ + !current_thread()->vm_privilege && \ + ++vm_page_throttled_count) #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) diff --git a/osfmk/vm/vm_pageout.c b/osfmk/vm/vm_pageout.c index 825b29485..19a6eb885 100644 --- a/osfmk/vm/vm_pageout.c +++ b/osfmk/vm/vm_pageout.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -77,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -84,20 +85,17 @@ #include #include + extern ipc_port_t memory_manager_default; #ifndef VM_PAGE_LAUNDRY_MAX -#define VM_PAGE_LAUNDRY_MAX 6 /* outstanding DMM page cleans */ +#define VM_PAGE_LAUNDRY_MAX 16 /* outstanding DMM+EMM page cleans */ #endif /* VM_PAGEOUT_LAUNDRY_MAX */ #ifndef VM_PAGEOUT_BURST_MAX -#define VM_PAGEOUT_BURST_MAX 32 /* simultaneous EMM page cleans */ +#define VM_PAGEOUT_BURST_MAX 6 /* simultaneous EMM page cleans */ #endif /* VM_PAGEOUT_BURST_MAX */ -#ifndef VM_PAGEOUT_DISCARD_MAX -#define VM_PAGEOUT_DISCARD_MAX 68 /* simultaneous EMM page cleans */ -#endif /* VM_PAGEOUT_DISCARD_MAX */ - #ifndef VM_PAGEOUT_BURST_WAIT #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */ #endif /* VM_PAGEOUT_BURST_WAIT */ @@ -150,7 +148,7 @@ extern ipc_port_t memory_manager_default; #ifndef VM_PAGE_FREE_RESERVED #define VM_PAGE_FREE_RESERVED \ - ((16 * VM_PAGE_LAUNDRY_MAX) + NCPUS) + ((6 * VM_PAGE_LAUNDRY_MAX) + NCPUS) #endif /* VM_PAGE_FREE_RESERVED */ /* @@ -178,10 +176,11 @@ unsigned int vm_pageout_reserved_really = 0; unsigned int vm_page_laundry_max = 0; /* # of clusters outstanding */ unsigned int vm_page_laundry_min = 0; +unsigned int vm_pageout_empty_wait = 0; /* milliseconds */ unsigned int vm_pageout_burst_max = 0; unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */ -unsigned int vm_pageout_empty_wait = 0; /* milliseconds */ unsigned int vm_pageout_burst_min = 0; +unsigned int vm_pageout_burst_loop_throttle = 4096; unsigned int vm_pageout_pause_count = 0; unsigned int vm_pageout_pause_max = 0; unsigned int vm_free_page_pause = 100; /* milliseconds */ @@ -225,9 +224,37 @@ unsigned int vm_pageout_scan_inactive_emm_throttle = 0; /* debugging */ unsigned int vm_pageout_scan_inactive_emm_throttle_success = 0; /* debugging */ unsigned int vm_pageout_scan_inactive_emm_throttle_failure = 0; /* debugging */ +/* + * Backing store throttle when BS is exhausted + */ +unsigned int vm_backing_store_low = 0; unsigned int vm_pageout_out_of_line = 0; unsigned int vm_pageout_in_place = 0; + + +/* + * Routine: vm_backing_store_disable + * Purpose: + * Suspend non-privileged threads wishing to extend + * backing store when we are low on backing store + * (Synchronized by caller) + */ +void +vm_backing_store_disable( + boolean_t disable) +{ + if(disable) { + vm_backing_store_low = 1; + } else { + if(vm_backing_store_low) { + vm_backing_store_low = 0; + thread_wakeup((event_t) &vm_backing_store_low); + } + } +} + + /* * Routine: vm_pageout_object_allocate * Purpose: @@ -251,9 +278,6 @@ vm_pageout_object_allocate( assert(object->pager_ready); - if (object->pager_trusted || object->internal) - vm_pageout_throttle(m); - new_object = vm_object_allocate(size); if (object->pager_trusted) { @@ -273,6 +297,9 @@ vm_pageout_object_allocate( */ vm_object_lock(object); vm_object_paging_begin(object); + vm_page_lock_queues(); + vm_pageout_throttle(m); + vm_page_unlock_queues(); vm_object_unlock(object); vm_pageout_in_place++; @@ -310,6 +337,7 @@ vm_pageout_object_terminate( vm_object_t object) { vm_object_t shadow_object; + boolean_t shadow_internal; /* * Deal with the deallocation (last reference) of a pageout object @@ -320,6 +348,7 @@ vm_pageout_object_terminate( assert(object->pageout); shadow_object = object->shadow; vm_object_lock(shadow_object); + shadow_internal = shadow_object->internal; while (!queue_empty(&object->memq)) { vm_page_t p, m; @@ -359,9 +388,12 @@ vm_pageout_object_terminate( /* * Handle the trusted pager throttle. + * Also decrement the burst throttle (if external). */ vm_page_lock_queues(); if (m->laundry) { + if (!shadow_internal) + vm_page_burst_count--; vm_page_laundry_count--; m->laundry = FALSE; if (vm_page_laundry_count < vm_page_laundry_min) { @@ -392,14 +424,14 @@ vm_pageout_object_terminate( * from being dirtied after the pmap_is_modified() call * returns. */ - pmap_page_protect(m->phys_addr, VM_PROT_NONE); + pmap_page_protect(m->phys_page, VM_PROT_NONE); /* * Since the page is left "dirty" but "not modifed", we * can detect whether the page was redirtied during * pageout by checking the modify state. */ - m->dirty = pmap_is_modified(m->phys_addr); + m->dirty = pmap_is_modified(m->phys_page); if (m->dirty) { CLUSTER_STAT(vm_pageout_target_page_dirtied++;) @@ -437,7 +469,7 @@ vm_pageout_object_terminate( /* out the pages but handling outside of this code */ /* will take care of resetting dirty. We clear the */ /* modify however for the Programmed I/O case. */ - pmap_clear_modify(m->phys_addr); + pmap_clear_modify(m->phys_page); if(m->absent) { m->absent = FALSE; if(shadow_object->absent_count == 1) @@ -464,7 +496,7 @@ vm_pageout_object_terminate( * consulted if m->dirty is false. */ #if MACH_CLUSTER_STATS - m->dirty = pmap_is_modified(m->phys_addr); + m->dirty = pmap_is_modified(m->phys_page); if (m->dirty) vm_pageout_cluster_dirtied++; else vm_pageout_cluster_cleaned++; @@ -475,7 +507,6 @@ vm_pageout_object_terminate( } m->cleaning = FALSE; - /* * Wakeup any thread waiting for the page to be un-cleaning. */ @@ -569,7 +600,7 @@ vm_pageout_setup( /* * Set up new page to be private shadow of real page. */ - new_m->phys_addr = m->phys_addr; + new_m->phys_page = m->phys_page; new_m->fictitious = FALSE; new_m->pageout = TRUE; @@ -579,7 +610,7 @@ vm_pageout_setup( * pageout (indicating that the page should be freed * when the pageout completes). */ - pmap_clear_modify(m->phys_addr); + pmap_clear_modify(m->phys_page); vm_page_lock_queues(); new_m->private = TRUE; vm_page_wire(new_m); @@ -677,7 +708,7 @@ vm_pageclean_setup( (integer_t)old_object, m->offset, (integer_t)m, (integer_t)new_m, new_offset); - pmap_clear_modify(m->phys_addr); + pmap_clear_modify(m->phys_page); vm_object_paging_begin(old_object); /* @@ -702,7 +733,7 @@ vm_pageclean_setup( new_m->fictitious = FALSE; new_m->private = TRUE; new_m->pageout = TRUE; - new_m->phys_addr = m->phys_addr; + new_m->phys_page = m->phys_page; vm_page_wire(new_m); vm_page_insert(new_m, new_object, new_offset); @@ -725,7 +756,7 @@ vm_pageclean_copy( assert(!new_m->private && !new_m->fictitious); - pmap_clear_modify(m->phys_addr); + pmap_clear_modify(m->phys_page); m->busy = TRUE; vm_object_paging_begin(m->object); @@ -804,28 +835,27 @@ vm_pageout_initialize_page( object = m->object; paging_offset = m->offset + object->paging_offset; vm_object_paging_begin(object); - vm_object_unlock(object); if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) { VM_PAGE_FREE(m); panic("reservation without pageout?"); /* alan */ + vm_object_unlock(object); return; } /* set the page for future call to vm_fault_list_request */ holding_page = NULL; - vm_object_lock(m->object); vm_page_lock_queues(); - pmap_clear_modify(m->phys_addr); + pmap_clear_modify(m->phys_page); m->dirty = TRUE; - m->busy = TRUE; - m->list_req_pending = TRUE; - m->cleaning = TRUE; + m->busy = TRUE; + m->list_req_pending = TRUE; + m->cleaning = TRUE; m->pageout = TRUE; vm_page_wire(m); - vm_page_unlock_queues(); - vm_object_unlock(m->object); vm_pageout_throttle(m); + vm_page_unlock_queues(); + vm_object_unlock(object); /* * Write the data to its pager. @@ -859,9 +889,11 @@ boolean_t allow_clustered_pageouts = FALSE; * Given a page, page it out, and attempt to clean adjacent pages * in the same operation. * - * The page must be busy, and the object unlocked w/ paging reference - * to prevent deallocation or collapse. The page must not be on any - * pageout queue. + * The page must be busy, and the object locked. We will take a + * paging reference to prevent deallocation or collapse when we + * temporarily release the object lock. + * + * The page must not be on any pageout queue. */ void vm_pageout_cluster( @@ -869,7 +901,7 @@ vm_pageout_cluster( { vm_object_t object = m->object; vm_object_offset_t offset = m->offset; /* from vm_object start */ - vm_object_offset_t paging_offset = m->offset + object->paging_offset; + vm_object_offset_t paging_offset; vm_object_t new_object; vm_object_offset_t new_offset; vm_size_t cluster_size; @@ -892,13 +924,20 @@ vm_pageout_cluster( (integer_t)object, offset, (integer_t)m, 0, 0); CLUSTER_STAT(vm_pageout_cluster_clusters++;) + + /* + * protect the object from collapse - + * locking in the object's paging_offset. + */ + vm_object_paging_begin(object); + paging_offset = m->offset + object->paging_offset; + /* * Only a certain kind of page is appreciated here. */ assert(m->busy && (m->dirty || m->precious) && (m->wire_count == 0)); assert(!m->cleaning && !m->pageout && !m->inactive && !m->active); - vm_object_lock(object); cluster_size = object->cluster_size; assert(cluster_size >= PAGE_SIZE); @@ -911,7 +950,6 @@ vm_pageout_cluster( if (!object->pager_trusted || !allow_clustered_pageouts) cluster_size = PAGE_SIZE; - vm_object_unlock(object); cluster_offset = paging_offset & (vm_object_offset_t)(cluster_size - 1); /* bytes from beginning of cluster */ @@ -931,16 +969,15 @@ vm_pageout_cluster( /* set the page for future call to vm_fault_list_request */ holding_page = NULL; - vm_object_lock(m->object); vm_page_lock_queues(); - m->busy = TRUE; - m->list_req_pending = TRUE; - m->cleaning = TRUE; + m->busy = TRUE; + m->list_req_pending = TRUE; + m->cleaning = TRUE; m->pageout = TRUE; vm_page_wire(m); - vm_page_unlock_queues(); - vm_object_unlock(m->object); vm_pageout_throttle(m); + vm_page_unlock_queues(); + vm_object_unlock(object); /* * Search backward for adjacent eligible pages to clean in @@ -1072,19 +1109,16 @@ vm_pageout_cluster( VM_PAGE_FREE(holding_page); vm_object_paging_end(object); } - - vm_object_unlock(object); } /* * Trusted pager throttle. - * Object must be unlocked, page queues must be unlocked. + * Object and page queues must be locked. */ void vm_pageout_throttle( register vm_page_t m) { - vm_page_lock_queues(); assert(!m->laundry); m->laundry = TRUE; while (vm_page_laundry_count >= vm_page_laundry_max) { @@ -1096,15 +1130,18 @@ vm_pageout_throttle( assert_wait((event_t) &vm_page_laundry_count, THREAD_UNINT); vm_page_unlock_queues(); - + vm_object_unlock(m->object); /* * Pause to let the default pager catch up. */ thread_block((void (*)(void)) 0); + + vm_object_lock(m->object); vm_page_lock_queues(); } + if (!m->object->internal) + vm_page_burst_count++; vm_page_laundry_count++; - vm_page_unlock_queues(); } /* @@ -1154,7 +1191,7 @@ vm_pageout_cluster_page( assert(!m->private); assert(!m->fictitious); - if (!m->dirty) m->dirty = pmap_is_modified(m->phys_addr); + if (!m->dirty) m->dirty = pmap_is_modified(m->phys_page); if (precious_clean) { if (!m->precious || !m->dirty) @@ -1173,14 +1210,22 @@ vm_pageout_cluster_page( */ extern void vm_pageout_scan_continue(void); /* forward; */ +#define DELAYED_UNLOCK_LIMIT 50 +#define LOCAL_FREED_LIMIT 50 + void vm_pageout_scan(void) { - unsigned int burst_count; boolean_t now = FALSE; unsigned int laundry_pages; - boolean_t need_more_inactive_pages; - unsigned int loop_detect; + int loop_count = 0; + int loop_bursted_count = 0; + int active_loop_detect; + vm_page_t local_freeq = 0; + int local_freed = 0; + int delayed_unlock = 0; + int need_internal_inactive = 0; + int need_pause; XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0); @@ -1207,126 +1252,99 @@ vm_pageout_scan(void) * When memory is very tight, we can't rely on external pagers to * clean pages. They probably aren't running, because they * aren't vm-privileged. If we kept sending dirty pages to them, - * we could exhaust the free list. However, we can't just ignore - * pages belonging to external objects, because there might be no - * pages belonging to internal objects. Hence, we get the page - * into an internal object and then immediately double-page it, - * sending it to the default pager. + * we could exhaust the free list. * * consider_zone_gc should be last, because the other operations * might return memory to zones. */ - - Restart: -#if THREAD_SWAPPER - mutex_lock(&vm_page_queue_free_lock); - now = (vm_page_free_count < vm_page_free_min); - mutex_unlock(&vm_page_queue_free_lock); - - swapout_threads(now); -#endif /* THREAD_SWAPPER */ - stack_collect(); consider_task_collect(); - consider_thread_collect(); - consider_zone_gc(); consider_machine_collect(); + consider_zone_gc(); - loop_detect = vm_page_active_count + vm_page_inactive_count; -#if 0 - if (vm_page_free_count <= vm_page_free_reserved) { - need_more_inactive_pages = TRUE; - } else { - need_more_inactive_pages = FALSE; - } -#else - need_more_inactive_pages = FALSE; -#endif - - for (burst_count = 0;;) { + for (;;) { register vm_page_t m; register vm_object_t object; /* * Recalculate vm_page_inactivate_target. */ - - vm_page_lock_queues(); + if (delayed_unlock == 0) + vm_page_lock_queues(); vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count + vm_page_inactive_count); + active_loop_detect = vm_page_active_count; /* * Move pages from active to inactive. */ + while ((need_internal_inactive || + vm_page_inactive_count < vm_page_inactive_target) && + !queue_empty(&vm_page_queue_active) && + ((active_loop_detect--) > 0)) { - while ((vm_page_inactive_count < vm_page_inactive_target || - need_more_inactive_pages) && - !queue_empty(&vm_page_queue_active)) { - register vm_object_t object; - + need_pause = 1; vm_pageout_active++; + m = (vm_page_t) queue_first(&vm_page_queue_active); + object = m->object; /* * If we're getting really low on memory, - * try selecting a page that will go + * or we have already exceed the burst + * count for the external pagers, + * try skipping to a page that will go * directly to the default_pager. - * If there are no such pages, we have to - * page out a page backed by an EMM, - * so that the default_pager can recover - * it eventually. */ - if (need_more_inactive_pages && - (IP_VALID(memory_manager_default))) { + if (need_internal_inactive && + IP_VALID(memory_manager_default)) { vm_pageout_scan_active_emm_throttle++; - do { - assert(m->active && !m->inactive); - object = m->object; - if (vm_object_lock_try(object)) { -#if 0 - if (object->pager_trusted || - object->internal) { - /* found one ! */ - vm_pageout_scan_active_emm_throttle_success++; - goto object_locked_active; - } -#else - vm_pageout_scan_active_emm_throttle_success++; - goto object_locked_active; -#endif - vm_object_unlock(object); - } - m = (vm_page_t) queue_next(&m->pageq); - } while (!queue_end(&vm_page_queue_active, - (queue_entry_t) m)); - if (queue_end(&vm_page_queue_active, - (queue_entry_t) m)) { - vm_pageout_scan_active_emm_throttle_failure++; - m = (vm_page_t) - queue_first(&vm_page_queue_active); + assert(m->active && !m->inactive); + + if (vm_object_lock_try(object)) { + if (object->internal) + goto object_locked_active; + + if (!m->dirty) + m->dirty = pmap_is_modified(m->phys_page); + if (!m->dirty && !m->precious) + goto object_locked_active; + + vm_object_unlock(object); + + need_pause = 0; } + goto object_lock_try_active_failed; } - assert(m->active && !m->inactive); - object = m->object; if (!vm_object_lock_try(object)) { /* * Move page to end and continue. */ - +object_lock_try_active_failed: queue_remove(&vm_page_queue_active, m, vm_page_t, pageq); queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); - vm_page_unlock_queues(); - mutex_pause(); - vm_page_lock_queues(); + if (local_freeq) { + vm_page_free_list(local_freeq); + + local_freeq = 0; + local_freed = 0; + } + if (need_pause) { + delayed_unlock = 0; + + vm_page_unlock_queues(); + mutex_pause(); + vm_page_lock_queues(); + } continue; } @@ -1355,56 +1373,82 @@ vm_pageout_scan(void) * can handle that. */ + if (need_internal_inactive) { + /* found one ! */ + vm_pageout_scan_active_emm_throttle_success++; + need_internal_inactive--; + } vm_page_deactivate(m); vm_object_unlock(object); } - /* * We are done if we have met our target *and* * nobody is still waiting for a page. */ - if (vm_page_free_count >= vm_page_free_target) { + if (vm_page_free_count + local_freed >= vm_page_free_target) { + if (local_freeq) { + vm_page_free_list(local_freeq); + + local_freeq = 0; + local_freed = 0; + } + + consider_machine_adjust(); + mutex_lock(&vm_page_queue_free_lock); + if ((vm_page_free_count >= vm_page_free_target) && (vm_page_free_wanted == 0)) { + + delayed_unlock = 0; vm_page_unlock_queues(); break; } mutex_unlock(&vm_page_queue_free_lock); } + /* * Sometimes we have to pause: * 1) No inactive pages - nothing to do. - * 2) Flow control - wait for untrusted pagers to catch up. + * 2) Flow control - nothing but external pages and + * we have to wait for untrusted pagers to catch up. */ + loop_count++; if ((queue_empty(&vm_page_queue_inactive) && - (queue_empty(&vm_page_queue_zf))) || - ((--loop_detect) == 0) || - (burst_count >= vm_pageout_burst_max)) { + queue_empty(&vm_page_queue_zf)) || + loop_bursted_count >= vm_pageout_burst_loop_throttle) { + unsigned int pages, msecs; int wait_result; - + consider_machine_adjust(); /* * vm_pageout_burst_wait is msecs/page. * If there is nothing for us to do, we wait * at least vm_pageout_empty_wait msecs. */ - pages = burst_count; + pages = vm_page_burst_count; - if (loop_detect == 0) { + if (pages) { + msecs = pages * vm_pageout_burst_wait; + } else { printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n"); msecs = vm_free_page_pause; } - else { - msecs = burst_count * vm_pageout_burst_wait; - } if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_zf) && (msecs < vm_pageout_empty_wait)) msecs = vm_pageout_empty_wait; + + if (local_freeq) { + vm_page_free_list(local_freeq); + + local_freeq = 0; + local_freed = 0; + } + delayed_unlock = 0; vm_page_unlock_queues(); assert_wait_timeout(msecs, THREAD_INTERRUPTIBLE); @@ -1419,6 +1463,18 @@ vm_pageout_scan(void) thread_cancel_timer(); vm_pageout_scan_continue(); + if (loop_count >= vm_page_inactive_count) { + if (vm_page_burst_count >= vm_pageout_burst_max) { + /* + * Make sure we move enough "appropriate" + * pages to the inactive queue before trying + * again. + */ + need_internal_inactive = vm_page_laundry_max; + } + loop_count = 0; + } + loop_bursted_count = 0; goto Restart; /*NOTREACHED*/ } @@ -1442,66 +1498,44 @@ vm_pageout_scan(void) m = (vm_page_t) queue_first(&vm_page_queue_zf); last_page_zf = 1; } + object = m->object; - if ((vm_page_free_count <= vm_page_free_reserved) && - (IP_VALID(memory_manager_default))) { + need_pause = 1; + + if (vm_page_burst_count >= vm_pageout_burst_max && + IP_VALID(memory_manager_default)) { /* - * We're really low on memory. Try to select a page that - * would go directly to the default_pager. - * If there are no such pages, we have to page out a - * page backed by an EMM, so that the default_pager - * can recover it eventually. + * We're throttling external pagers. + * Try to select a page that would + * go directly to the default_pager + * or that is clean... */ vm_pageout_scan_inactive_emm_throttle++; - do { - assert(!m->active && m->inactive); - object = m->object; - if (vm_object_lock_try(object)) { -#if 0 - if (object->pager_trusted || - object->internal) { - /* found one ! */ - vm_pageout_scan_inactive_emm_throttle_success++; - goto object_locked_inactive; - } -#else - vm_pageout_scan_inactive_emm_throttle_success++; - goto object_locked_inactive; -#endif /* 0 */ - vm_object_unlock(object); - } - m = (vm_page_t) queue_next(&m->pageq); - } while ((!queue_end(&vm_page_queue_zf, - (queue_entry_t) m)) - && (!queue_end(&vm_page_queue_inactive, - (queue_entry_t) m))); - - if ((queue_end(&vm_page_queue_zf, - (queue_entry_t) m)) - || (queue_end(&vm_page_queue_inactive, - (queue_entry_t) m))) { - vm_pageout_scan_inactive_emm_throttle_failure++; - /* - * We should check the "active" queue - * for good candidates to page out. - */ - need_more_inactive_pages = TRUE; + assert(!m->active && m->inactive); - if(last_page_zf == 0) { - last_page_zf = 1; - vm_zf_iterator = vm_zf_iterator_count - 1; - } else { - last_page_zf = 0; - vm_zf_iterator = vm_zf_iterator_count - 2; + if (vm_object_lock_try(object)) { + if (object->internal) { + /* found one ! */ + vm_pageout_scan_inactive_emm_throttle_success++; + goto object_locked_inactive; } - vm_page_unlock_queues(); - goto Restart; + if (!m->dirty) + m->dirty = pmap_is_modified(m->phys_page); + if (!m->dirty && !m->precious) { + /* found one ! */ + vm_pageout_scan_inactive_emm_throttle_success++; + goto object_locked_inactive; + } + vm_object_unlock(object); + + need_pause = 0; } + loop_bursted_count++; + goto object_lock_try_inactive_failed; } assert(!m->active && m->inactive); - object = m->object; /* * Try to lock object; since we've got the @@ -1509,11 +1543,12 @@ vm_pageout_scan(void) */ if (!vm_object_lock_try(object)) { +object_lock_try_inactive_failed: /* * Move page to end and continue. * Don't re-issue ticket */ - if(m->zero_fill) { + if (m->zero_fill) { queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq); queue_enter(&vm_page_queue_zf, m, @@ -1524,26 +1559,33 @@ vm_pageout_scan(void) queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); } + if (local_freeq) { + vm_page_free_list(local_freeq); + + local_freeq = 0; + local_freed = 0; + } + delayed_unlock = 0; vm_page_unlock_queues(); - mutex_pause(); - vm_pageout_inactive_nolock++; + if (need_pause) { + mutex_pause(); + vm_pageout_inactive_nolock++; + } continue; } object_locked_inactive: /* - * Paging out pages of objects which pager is being - * created by another thread must be avoided, because - * this thread may claim for memory, thus leading to a - * possible dead lock between it and the pageout thread - * which will wait for pager creation, if such pages are - * finally chosen. The remaining assumption is that there - * will finally be enough available pages in the inactive - * pool to page out in order to satisfy all memory claimed - * by the thread which concurrently creates the pager. + * Paging out pages of external objects which + * are currently being created must be avoided. + * The pager may claim for memory, thus leading to a + * possible dead lock between it and the pageout thread, + * if such pages are finally chosen. The remaining assumption + * is that there will finally be enough available pages in the + * inactive pool to page out in order to satisfy all memory + * claimed by the thread which concurrently creates the pager. */ - if (!object->pager_initialized && object->pager_created) { /* * Move page to end and continue, hoping that @@ -1570,7 +1612,10 @@ vm_pageout_scan(void) last_page_zf = 0; vm_zf_iterator = 1; } - vm_page_unlock_queues(); + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } vm_object_unlock(object); vm_pageout_inactive_avoid++; continue; @@ -1595,7 +1640,10 @@ vm_pageout_scan(void) * Leave it off the pageout queues. */ - vm_page_unlock_queues(); + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } vm_object_unlock(object); vm_pageout_inactive_busy++; continue; @@ -1608,9 +1656,27 @@ vm_pageout_scan(void) if (m->absent || m->error) { vm_pageout_inactive_absent++; reclaim_page: - vm_page_free(m); - vm_page_unlock_queues(); + + if (m->tabled) + vm_page_remove(m); /* clears tabled, object, offset */ + if (m->absent) + vm_object_absent_release(object); + + m->pageq.next = (queue_entry_t)local_freeq; + local_freeq = m; + + if (local_freed++ > LOCAL_FREED_LIMIT) { + vm_page_free_list(local_freeq); + + local_freeq = 0; + local_freed = 0; + } + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } vm_object_unlock(object); + loop_bursted_count = 0; continue; } @@ -1633,7 +1699,12 @@ vm_pageout_scan(void) m->dump_cleaning = TRUE; vm_page_wire(m); vm_object_unlock(object); - vm_page_unlock_queues(); + + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } + loop_bursted_count = 0; continue; } @@ -1642,7 +1713,7 @@ vm_pageout_scan(void) * (Fictitious pages are either busy or absent.) */ - if (m->reference || pmap_is_referenced(m->phys_addr)) { + if (m->reference || pmap_is_referenced(m->phys_page)) { vm_pageout_inactive_used++; reactivate_page: #if ADVISORY_PAGEOUT @@ -1654,7 +1725,11 @@ vm_pageout_scan(void) vm_object_unlock(object); vm_page_activate(m); VM_STAT(reactivations++); - vm_page_unlock_queues(); + + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } continue; } @@ -1695,7 +1770,10 @@ vm_pageout_scan(void) vm_stat_discard_throttle++; #if 0 /* ignore this page and skip to next */ - vm_page_unlock_queues(); + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } vm_object_unlock(object); continue; #else @@ -1710,7 +1788,11 @@ vm_pageout_scan(void) VM_STAT(reactivations++); discard_offset = m->offset + object->paging_offset; vm_stat_discard_sent++; - vm_page_unlock_queues(); + + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } vm_object_unlock(object); /* @@ -1732,10 +1814,12 @@ vm_pageout_scan(void) */ m->busy = TRUE; - pmap_page_protect(m->phys_addr, VM_PROT_NONE); + + if (m->no_isync == FALSE) + pmap_page_protect(m->phys_page, VM_PROT_NONE); if (!m->dirty) - m->dirty = pmap_is_modified(m->phys_addr); + m->dirty = pmap_is_modified(m->phys_page); /* * If it's clean and not precious, we can free the page. */ @@ -1744,6 +1828,13 @@ vm_pageout_scan(void) vm_pageout_inactive_clean++; goto reclaim_page; } + if (local_freeq) { + vm_page_free_list(local_freeq); + + local_freeq = 0; + local_freed = 0; + } + delayed_unlock = 0; vm_page_unlock_queues(); /* @@ -1752,7 +1843,7 @@ vm_pageout_scan(void) */ if (!object->pager_initialized) - vm_object_collapse(object); + vm_object_collapse(object, (vm_object_offset_t)0); if (!object->pager_initialized) vm_object_pager_create(object); if (!object->pager_initialized) { @@ -1785,10 +1876,7 @@ vm_pageout_scan(void) * to top of loop and look for suitable pages. */ continue; - } - - if ((object->pager_initialized) && - (object->pager == MEMORY_OBJECT_NULL)) { + } else if (object->pager == MEMORY_OBJECT_NULL) { /* * This pager has been destroyed by either * memory_object_destroy or vm_object_destroy, and @@ -1797,19 +1885,15 @@ vm_pageout_scan(void) */ VM_PAGE_FREE(m); vm_object_unlock(object); + loop_bursted_count = 0; continue; } vm_pageout_inactive_dirty++; -/* - if (!object->internal) - burst_count++; -*/ - vm_object_paging_begin(object); - vm_object_unlock(object); vm_pageout_cluster(m); /* flush it */ + vm_object_unlock(object); + loop_bursted_count = 0; } - consider_machine_adjust(); } counter(unsigned int c_vm_pageout_scan_continue = 0;) @@ -1867,6 +1951,20 @@ vm_page_free_reserve( * vm_pageout is the high level pageout daemon. */ +void +vm_pageout_continue(void) +{ + vm_pageout_scan_event_counter++; + vm_pageout_scan(); + /* we hold vm_page_queue_free_lock now */ + assert(vm_page_free_wanted == 0); + assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT); + mutex_unlock(&vm_page_queue_free_lock); + + counter(c_vm_pageout_block++); + thread_block(vm_pageout_continue); + /*NOTREACHED*/ +} void vm_pageout(void) @@ -1878,7 +1976,6 @@ vm_pageout(void) * Set thread privileges. */ self->vm_privilege = TRUE; - stack_privilege(self); s = splsched(); thread_lock(self); @@ -1903,6 +2000,14 @@ vm_pageout(void) if (vm_pageout_empty_wait == 0) vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT; + /* + * Set kernel task to low backing store privileged + * status + */ + task_lock(kernel_task); + kernel_task->priv_flags |= VM_BACKING_STORE_PRIV; + task_unlock(kernel_task); + vm_page_free_count_init = vm_page_free_count; vm_zf_iterator = 0; /* @@ -1912,30 +2017,27 @@ vm_pageout(void) * calling it with an arg of 0 will not change the reserve * but will re-calculate free_min and free_target */ - if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED) - vm_page_free_reserve(VM_PAGE_FREE_RESERVED - vm_page_free_reserved); - else + if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED) { + int scale; + + /* + * HFS Journaling exists on the vm_pageout path... + * it can need to allocate a lot more memory than a + * typical driver/filesystem... if it can't allocate + * the transaction buffer(s), we will deadlock... + * the amount is scaled + * based on the physical footprint of the system, so + * let's double our reserve on systems with > 512Mbytes + */ + if (vm_page_free_count > (512 * 1024 * 1024) / PAGE_SIZE) + scale = 2; + else + scale = 1; + vm_page_free_reserve((VM_PAGE_FREE_RESERVED * scale) - vm_page_free_reserved); + } else vm_page_free_reserve(0); - /* - * vm_pageout_scan will set vm_page_inactive_target. - * - * The pageout daemon is never done, so loop forever. - * We should call vm_pageout_scan at least once each - * time we are woken, even if vm_page_free_wanted is - * zero, to check vm_page_free_target and - * vm_page_inactive_target. - */ - for (;;) { - vm_pageout_scan_event_counter++; - vm_pageout_scan(); - /* we hold vm_page_queue_free_lock now */ - assert(vm_page_free_wanted == 0); - assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT); - mutex_unlock(&vm_page_queue_free_lock); - counter(c_vm_pageout_block++); - thread_block((void (*)(void)) 0); - } + vm_pageout_continue(); /*NOTREACHED*/ } @@ -1949,93 +2051,116 @@ vm_pageout_emergency_availability_request() m = (vm_page_t) queue_first(&vm_page_queue_inactive); while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) { - if(m->fictitious) { + + object = m->object; + + if ( !vm_object_lock_try(object)) { m = (vm_page_t) queue_next(&m->pageq); continue; } - if (!m->dirty) - m->dirty = pmap_is_modified(m->phys_addr); - if(m->dirty || m->busy || m->wire_count || m->absent + if ((!object->alive) || (object->pageout)) { + vm_object_unlock(object); + + m = (vm_page_t) queue_next(&m->pageq); + continue; + } + if (m->dirty || m->busy || m->wire_count || m->absent || m->fictitious || m->precious || m->cleaning || m->dump_cleaning || m->error || m->pageout || m->laundry || m->list_req_pending || m->overwriting) { + vm_object_unlock(object); + m = (vm_page_t) queue_next(&m->pageq); continue; } - object = m->object; + m->busy = TRUE; + pmap_page_protect(m->phys_page, VM_PROT_NONE); + m->dirty = pmap_is_modified(m->phys_page); - if (vm_object_lock_try(object)) { - if((!object->alive) || - (object->pageout)) { - vm_object_unlock(object); - m = (vm_page_t) queue_next(&m->pageq); - continue; - } - m->busy = TRUE; - pmap_page_protect(m->phys_addr, VM_PROT_NONE); - vm_page_free(m); + if (m->dirty) { + PAGE_WAKEUP_DONE(m); vm_object_unlock(object); - vm_page_unlock_queues(); - return KERN_SUCCESS; + + m = (vm_page_t) queue_next(&m->pageq); + continue; } - m = (vm_page_t) queue_next(&m->pageq); - } + vm_page_free(m); + vm_object_unlock(object); + vm_page_unlock_queues(); + return KERN_SUCCESS; + } m = (vm_page_t) queue_first(&vm_page_queue_active); while (!queue_end(&vm_page_queue_active, (queue_entry_t) m)) { - if(m->fictitious) { + + object = m->object; + + if ( !vm_object_lock_try(object)) { m = (vm_page_t) queue_next(&m->pageq); continue; } - if (!m->dirty) - m->dirty = pmap_is_modified(m->phys_addr); - if(m->dirty || m->busy || m->wire_count || m->absent + if ((!object->alive) || (object->pageout)) { + vm_object_unlock(object); + + m = (vm_page_t) queue_next(&m->pageq); + continue; + } + if (m->dirty || m->busy || m->wire_count || m->absent || m->fictitious || m->precious || m->cleaning || m->dump_cleaning || m->error || m->pageout || m->laundry || m->list_req_pending || m->overwriting) { + vm_object_unlock(object); + m = (vm_page_t) queue_next(&m->pageq); continue; } - object = m->object; + m->busy = TRUE; + pmap_page_protect(m->phys_page, VM_PROT_NONE); + m->dirty = pmap_is_modified(m->phys_page); - if (vm_object_lock_try(object)) { - if((!object->alive) || - (object->pageout)) { - vm_object_unlock(object); - m = (vm_page_t) queue_next(&m->pageq); - continue; - } - m->busy = TRUE; - pmap_page_protect(m->phys_addr, VM_PROT_NONE); - vm_page_free(m); + if (m->dirty) { + PAGE_WAKEUP_DONE(m); vm_object_unlock(object); - vm_page_unlock_queues(); - return KERN_SUCCESS; + + m = (vm_page_t) queue_next(&m->pageq); + continue; } - m = (vm_page_t) queue_next(&m->pageq); + vm_page_free(m); + vm_object_unlock(object); + vm_page_unlock_queues(); + + return KERN_SUCCESS; } vm_page_unlock_queues(); + return KERN_FAILURE; } static upl_t upl_create( - boolean_t internal, + int flags, vm_size_t size) { upl_t upl; + int page_field_size; /* bit field in word size buf */ - if(internal) { + page_field_size = 0; + if (flags & UPL_CREATE_LITE) { + page_field_size = ((size/PAGE_SIZE) + 7) >> 3; + page_field_size = (page_field_size + 3) & 0xFFFFFFFC; + } + if(flags & UPL_CREATE_INTERNAL) { upl = (upl_t)kalloc(sizeof(struct upl) - + (sizeof(struct upl_page_info)*(size/page_size))); + + (sizeof(struct upl_page_info)*(size/PAGE_SIZE)) + + page_field_size); } else { - upl = (upl_t)kalloc(sizeof(struct upl)); + upl = (upl_t)kalloc(sizeof(struct upl) + page_field_size); } upl->flags = 0; upl->src_object = NULL; @@ -2055,32 +2180,45 @@ static void upl_destroy( upl_t upl) { + int page_field_size; /* bit field in word size buf */ #ifdef UBC_DEBUG { upl_t upl_ele; - vm_object_lock(upl->map_object->shadow); - queue_iterate(&upl->map_object->shadow->uplq, - upl_ele, upl_t, uplq) { + vm_object_t object; + if (upl->map_object->pageout) { + object = upl->map_object->shadow; + } else { + object = upl->map_object; + } + vm_object_lock(object); + queue_iterate(&object->uplq, upl_ele, upl_t, uplq) { if(upl_ele == upl) { - queue_remove(&upl->map_object->shadow->uplq, - upl_ele, upl_t, uplq); + queue_remove(&object->uplq, + upl_ele, upl_t, uplq); break; } } - vm_object_unlock(upl->map_object->shadow); + vm_object_unlock(object); } #endif /* UBC_DEBUG */ -#ifdef notdefcdy - if(!(upl->flags & UPL_DEVICE_MEMORY)) -#endif + /* drop a reference on the map_object whether or */ + /* not a pageout object is inserted */ + if(upl->map_object->pageout) vm_object_deallocate(upl->map_object); + + page_field_size = 0; + if (upl->flags & UPL_LITE) { + page_field_size = ((upl->size/PAGE_SIZE) + 7) >> 3; + page_field_size = (page_field_size + 3) & 0xFFFFFFFC; + } if(upl->flags & UPL_INTERNAL) { kfree((vm_offset_t)upl, sizeof(struct upl) + - (sizeof(struct upl_page_info) * (upl->size/page_size))); + (sizeof(struct upl_page_info) * (upl->size/PAGE_SIZE)) + + page_field_size); } else { - kfree((vm_offset_t)upl, sizeof(struct upl)); + kfree((vm_offset_t)upl, sizeof(struct upl) + page_field_size); } } @@ -2163,19 +2301,20 @@ vm_object_upl_request( vm_size_t xfer_size = size; boolean_t do_m_lock = FALSE; boolean_t dirty; + boolean_t hw_dirty; upl_t upl = NULL; int entry; boolean_t encountered_lrp = FALSE; vm_page_t alias_page = NULL; int page_ticket; - + wpl_array_t lite_list; page_ticket = (cntrl_flags & UPL_PAGE_TICKET_MASK) >> UPL_PAGE_TICKET_SHIFT; - if(((size/page_size) > MAX_UPL_TRANSFER) && !object->phys_contiguous) { - size = MAX_UPL_TRANSFER * page_size; + if(((size/PAGE_SIZE) > MAX_UPL_TRANSFER) && !object->phys_contiguous) { + size = MAX_UPL_TRANSFER * PAGE_SIZE; } if(cntrl_flags & UPL_SET_INTERNAL) @@ -2192,63 +2331,120 @@ vm_object_upl_request( if((cntrl_flags & UPL_COPYOUT_FROM) && (upl_ptr == NULL)) { return KERN_SUCCESS; } + if(upl_ptr) { if(cntrl_flags & UPL_SET_INTERNAL) { - upl = upl_create(TRUE, size); - user_page_list = (upl_page_info_t *) - (((vm_offset_t)upl) + sizeof(struct upl)); - upl->flags |= UPL_INTERNAL; + if(cntrl_flags & UPL_SET_LITE) { + vm_offset_t page_field_size; + upl = upl_create( + UPL_CREATE_INTERNAL | UPL_CREATE_LITE, + size); + user_page_list = (upl_page_info_t *) + (((vm_offset_t)upl) + sizeof(struct upl)); + lite_list = (wpl_array_t) + (((vm_offset_t)user_page_list) + + ((size/PAGE_SIZE) * + sizeof(upl_page_info_t))); + page_field_size = ((size/PAGE_SIZE) + 7) >> 3; + page_field_size = + (page_field_size + 3) & 0xFFFFFFFC; + bzero((char *)lite_list, page_field_size); + upl->flags = + UPL_LITE | UPL_INTERNAL; + } else { + upl = upl_create(UPL_CREATE_INTERNAL, size); + user_page_list = (upl_page_info_t *) + (((vm_offset_t)upl) + + sizeof(struct upl)); + upl->flags = UPL_INTERNAL; + } } else { - upl = upl_create(FALSE, size); + if(cntrl_flags & UPL_SET_LITE) { + vm_offset_t page_field_size; + upl = upl_create(UPL_CREATE_LITE, size); + lite_list = (wpl_array_t) + (((vm_offset_t)upl) + sizeof(struct upl)); + page_field_size = ((size/PAGE_SIZE) + 7) >> 3; + page_field_size = + (page_field_size + 3) & 0xFFFFFFFC; + bzero((char *)lite_list, page_field_size); + upl->flags = UPL_LITE; + } else { + upl = upl_create(UPL_CREATE_EXTERNAL, size); + upl->flags = 0; + } } + if(object->phys_contiguous) { - upl->size = size; + upl->map_object = object; + /* don't need any shadow mappings for this one */ + /* since it is already I/O memory */ + upl->flags |= UPL_DEVICE_MEMORY; + + vm_object_lock(object); + vm_object_paging_begin(object); + vm_object_unlock(object); + + /* paging_in_progress protects paging_offset */ upl->offset = offset + object->paging_offset; + upl->size = size; *upl_ptr = upl; if(user_page_list) { user_page_list[0].phys_addr = - offset + object->shadow_offset; + (offset + object->shadow_offset)>>12; user_page_list[0].device = TRUE; } + + if(page_list_count != NULL) { + if (upl->flags & UPL_INTERNAL) { + *page_list_count = 0; + } else { + *page_list_count = 1; + } + } + return KERN_SUCCESS; + } + if(user_page_list) + user_page_list[0].device = FALSE; + + if(cntrl_flags & UPL_SET_LITE) { + upl->map_object = object; + } else { upl->map_object = vm_object_allocate(size); vm_object_lock(upl->map_object); upl->map_object->shadow = object; - upl->flags = UPL_DEVICE_MEMORY | UPL_INTERNAL; upl->map_object->pageout = TRUE; upl->map_object->can_persist = FALSE; - upl->map_object->copy_strategy - = MEMORY_OBJECT_COPY_NONE; + upl->map_object->copy_strategy = + MEMORY_OBJECT_COPY_NONE; upl->map_object->shadow_offset = offset; + upl->map_object->wimg_bits = object->wimg_bits; vm_object_unlock(upl->map_object); - return KERN_SUCCESS; } - - - upl->map_object = vm_object_allocate(size); - vm_object_lock(upl->map_object); - upl->map_object->shadow = object; + } + if (!(cntrl_flags & UPL_SET_LITE)) { + VM_PAGE_GRAB_FICTITIOUS(alias_page); + } + vm_object_lock(object); + vm_object_paging_begin(object); + + /* we can lock in the paging_offset once paging_in_progress is set */ + if(upl_ptr) { upl->size = size; upl->offset = offset + object->paging_offset; - upl->map_object->pageout = TRUE; - upl->map_object->can_persist = FALSE; - upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; - upl->map_object->shadow_offset = offset; - upl->map_object->wimg_bits = object->wimg_bits; - vm_object_unlock(upl->map_object); *upl_ptr = upl; - } - VM_PAGE_GRAB_FICTITIOUS(alias_page); - vm_object_lock(object); #ifdef UBC_DEBUG - if(upl_ptr) queue_enter(&object->uplq, upl, upl_t, uplq); #endif /* UBC_DEBUG */ - vm_object_paging_begin(object); + } + entry = 0; if(cntrl_flags & UPL_COPYOUT_FROM) { upl->flags |= UPL_PAGE_SYNC_DONE; + while (xfer_size) { - if(alias_page == NULL) { + if((alias_page == NULL) && + !(cntrl_flags & UPL_SET_LITE)) { vm_object_unlock(object); VM_PAGE_GRAB_FICTITIOUS(alias_page); vm_object_lock(object); @@ -2261,26 +2457,27 @@ vm_object_upl_request( (dst_page->wire_count != 0 && !dst_page->pageout) || ((!(dst_page->dirty || dst_page->precious || - pmap_is_modified(dst_page->phys_addr))) + pmap_is_modified(dst_page->phys_page))) && (cntrl_flags & UPL_RET_ONLY_DIRTY)) || ((!(dst_page->inactive)) && (dst_page->page_ticket != page_ticket) && ((dst_page->page_ticket+1) != page_ticket) - && (cntrl_flags & UPL_PAGEOUT)) || - ((!dst_page->list_req_pending) && + && (cntrl_flags & UPL_FOR_PAGEOUT)) || + ((!dst_page->list_req_pending) && (cntrl_flags & UPL_FOR_PAGEOUT) && (cntrl_flags & UPL_RET_ONLY_DIRTY) && - pmap_is_referenced(dst_page->phys_addr))) { - if(user_page_list) + pmap_is_referenced(dst_page->phys_page))) { + if(user_page_list) { user_page_list[entry].phys_addr = 0; + } } else { if(dst_page->busy && (!(dst_page->list_req_pending && dst_page->pageout))) { if(cntrl_flags & UPL_NOBLOCK) { - if(user_page_list) - user_page_list[entry] - .phys_addr = 0; + if(user_page_list) { + user_page_list[entry].phys_addr = 0; + } entry++; dst_offset += PAGE_SIZE_64; xfer_size -= PAGE_SIZE; @@ -2295,8 +2492,9 @@ vm_object_upl_request( if((dst_page->cleaning || dst_page->absent || dst_page->wire_count != 0) && !dst_page->list_req_pending) { - if(user_page_list) + if(user_page_list) { user_page_list[entry].phys_addr = 0; + } entry++; dst_offset += PAGE_SIZE_64; xfer_size -= PAGE_SIZE; @@ -2306,9 +2504,7 @@ vm_object_upl_request( /* original object and its prodigy */ vm_page_lock_queues(); - if( !(cntrl_flags & UPL_FILE_IO)) { - pmap_page_protect(dst_page->phys_addr, VM_PROT_NONE); - } + /* pageout statistics gathering. count */ /* all the pages we will page out that */ /* were not counted in the initial */ @@ -2336,13 +2532,44 @@ vm_object_upl_request( dst_page->busy = FALSE; dst_page->cleaning = FALSE; - dirty = pmap_is_modified(dst_page->phys_addr); - dirty = dirty ? TRUE : dst_page->dirty; - - /* use pageclean setup, it is more convenient */ - /* even for the pageout cases here */ - vm_pageclean_setup(dst_page, alias_page, - upl->map_object, size - xfer_size); + hw_dirty = pmap_is_modified(dst_page->phys_page); + dirty = hw_dirty ? TRUE : dst_page->dirty; + + if(cntrl_flags & UPL_SET_LITE) { + int pg_num; + pg_num = (dst_offset-offset)/PAGE_SIZE; + lite_list[pg_num>>5] |= + 1 << (pg_num & 31); + if (hw_dirty) + pmap_clear_modify(dst_page->phys_page); + /* + * Record that this page has been + * written out + */ +#if MACH_PAGEMAP + vm_external_state_set( + object->existence_map, + dst_page->offset); +#endif /*MACH_PAGEMAP*/ + + /* + * Mark original page as cleaning + * in place. + */ + dst_page->cleaning = TRUE; + dst_page->dirty = TRUE; + dst_page->precious = FALSE; + } else { + /* use pageclean setup, it is more */ + /* convenient even for the pageout */ + /* cases here */ + vm_pageclean_setup(dst_page, + alias_page, upl->map_object, + size - xfer_size); + + alias_page->absent = FALSE; + alias_page = NULL; + } if(!dirty) { dst_page->dirty = FALSE; @@ -2352,10 +2579,8 @@ vm_object_upl_request( if(dst_page->pageout) dst_page->busy = TRUE; - alias_page->absent = FALSE; - alias_page = NULL; if((!(cntrl_flags & UPL_CLEAN_IN_PLACE)) - || (cntrl_flags & UPL_PAGEOUT)) { + || (cntrl_flags & UPL_FOR_PAGEOUT)) { /* deny access to the target page */ /* while it is being worked on */ if((!dst_page->pageout) && @@ -2367,7 +2592,7 @@ vm_object_upl_request( } if(user_page_list) { user_page_list[entry].phys_addr - = dst_page->phys_addr; + = dst_page->phys_page; user_page_list[entry].dirty = dst_page->dirty; user_page_list[entry].pageout = @@ -2377,7 +2602,6 @@ vm_object_upl_request( user_page_list[entry].precious = dst_page->precious; } - vm_page_unlock_queues(); } entry++; @@ -2386,12 +2610,14 @@ vm_object_upl_request( } } else { while (xfer_size) { - if(alias_page == NULL) { + if((alias_page == NULL) && + !(cntrl_flags & UPL_SET_LITE)) { vm_object_unlock(object); VM_PAGE_GRAB_FICTITIOUS(alias_page); vm_object_lock(object); } dst_page = vm_page_lookup(object, dst_offset); + if(dst_page != VM_PAGE_NULL) { if((cntrl_flags & UPL_RET_ONLY_ABSENT) && !((dst_page->list_req_pending) @@ -2400,8 +2626,9 @@ vm_object_upl_request( /* requests. we want to grab */ /* pages around some which are */ /* already present. */ - if(user_page_list) + if(user_page_list) { user_page_list[entry].phys_addr = 0; + } entry++; dst_offset += PAGE_SIZE_64; xfer_size -= PAGE_SIZE; @@ -2419,9 +2646,11 @@ vm_object_upl_request( /* dump the fictitious page */ dst_page->list_req_pending = FALSE; dst_page->clustered = FALSE; + vm_page_lock_queues(); vm_page_free(dst_page); vm_page_unlock_queues(); + } else if ((dst_page->absent && dst_page->list_req_pending)) { /* the default_pager case */ @@ -2442,9 +2671,9 @@ vm_object_upl_request( * physical page by asking the * backing device. */ - if(user_page_list) - user_page_list[entry] - .phys_addr = 0; + if(user_page_list) { + user_page_list[entry].phys_addr = 0; + } entry++; dst_offset += PAGE_SIZE_64; xfer_size -= PAGE_SIZE; @@ -2492,16 +2721,49 @@ vm_object_upl_request( PAGE_SLEEP(object, dst_page, THREAD_UNINT); continue; } - vm_page_lock_queues(); + if( !(cntrl_flags & UPL_FILE_IO)) { - pmap_page_protect(dst_page->phys_addr, VM_PROT_NONE); + pmap_page_protect(dst_page->phys_page, VM_PROT_NONE); + } + hw_dirty = pmap_is_modified(dst_page->phys_page); + dirty = hw_dirty ? TRUE : dst_page->dirty; + + if(cntrl_flags & UPL_SET_LITE) { + int pg_num; + pg_num = (dst_offset-offset)/PAGE_SIZE; + lite_list[pg_num>>5] |= + 1 << (pg_num & 31); + if (hw_dirty) + pmap_clear_modify(dst_page->phys_page); + /* + * Record that this page has been + * written out + */ +#if MACH_PAGEMAP + vm_external_state_set( + object->existence_map, + dst_page->offset); +#endif /*MACH_PAGEMAP*/ + + /* + * Mark original page as cleaning + * in place. + */ + dst_page->cleaning = TRUE; + dst_page->dirty = TRUE; + dst_page->precious = FALSE; + } else { + /* use pageclean setup, it is more */ + /* convenient even for the pageout */ + /* cases here */ + vm_pageclean_setup(dst_page, + alias_page, upl->map_object, + size - xfer_size); + + alias_page->absent = FALSE; + alias_page = NULL; } - dirty = pmap_is_modified(dst_page->phys_addr); - dirty = dirty ? TRUE : dst_page->dirty; - - vm_pageclean_setup(dst_page, alias_page, - upl->map_object, size - xfer_size); if(cntrl_flags & UPL_CLEAN_IN_PLACE) { /* clean in place for read implies */ @@ -2526,16 +2788,16 @@ vm_object_upl_request( } else { vm_page_wire(dst_page); } - /* expect the page to be used */ + /* + * expect the page to be used + */ dst_page->reference = TRUE; dst_page->precious = (cntrl_flags & UPL_PRECIOUS) ? TRUE : FALSE; - alias_page->absent = FALSE; - alias_page = NULL; if(user_page_list) { user_page_list[entry].phys_addr - = dst_page->phys_addr; + = dst_page->phys_page; user_page_list[entry].dirty = dst_page->dirty; user_page_list[entry].pageout = @@ -2552,7 +2814,6 @@ vm_object_upl_request( xfer_size -= PAGE_SIZE; } } - if (upl->flags & UPL_INTERNAL) { if(page_list_count != NULL) *page_list_count = 0; @@ -2690,8 +2951,9 @@ vm_object_super_upl_request( if(object->paging_offset > offset) return KERN_FAILURE; + assert(object->paging_in_progress); offset = offset - object->paging_offset; - if(cntrl_flags & UPL_PAGEOUT) { + if(cntrl_flags & UPL_FOR_PAGEOUT) { if((target_page = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { ticket = target_page->page_ticket; @@ -2759,7 +3021,82 @@ vm_upl_map( return KERN_FAILURE; } - offset = 0; /* Always map the entire object */ + if((!(upl->map_object->pageout)) && + !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || + (upl->map_object->phys_contiguous))) { + vm_object_t object; + vm_page_t alias_page; + vm_object_offset_t new_offset; + int pg_num; + wpl_array_t lite_list; + + if(upl->flags & UPL_INTERNAL) { + lite_list = (wpl_array_t) + ((((vm_offset_t)upl) + sizeof(struct upl)) + + ((upl->size/PAGE_SIZE) + * sizeof(upl_page_info_t))); + } else { + lite_list = (wpl_array_t) + (((vm_offset_t)upl) + sizeof(struct upl)); + } + object = upl->map_object; + upl->map_object = vm_object_allocate(upl->size); + vm_object_lock(upl->map_object); + upl->map_object->shadow = object; + upl->map_object->pageout = TRUE; + upl->map_object->can_persist = FALSE; + upl->map_object->copy_strategy = + MEMORY_OBJECT_COPY_NONE; + upl->map_object->shadow_offset = + upl->offset - object->paging_offset; + upl->map_object->wimg_bits = object->wimg_bits; + vm_object_unlock(upl->map_object); + offset = upl->map_object->shadow_offset; + new_offset = 0; + size = upl->size; + vm_object_lock(object); + while(size) { + pg_num = (new_offset)/PAGE_SIZE; + if(lite_list[pg_num>>5] & (1 << (pg_num & 31))) { + vm_object_unlock(object); + VM_PAGE_GRAB_FICTITIOUS(alias_page); + vm_object_lock(object); + m = vm_page_lookup(object, offset); + if (m == VM_PAGE_NULL) { + panic("vm_upl_map: page missing\n"); + } + + vm_object_paging_begin(object); + + /* + * Convert the fictitious page to a private + * shadow of the real page. + */ + assert(alias_page->fictitious); + alias_page->fictitious = FALSE; + alias_page->private = TRUE; + alias_page->pageout = TRUE; + alias_page->phys_page = m->phys_page; + vm_page_wire(alias_page); + + vm_page_insert(alias_page, + upl->map_object, new_offset); + assert(!alias_page->wanted); + alias_page->busy = FALSE; + alias_page->absent = FALSE; + } + + size -= PAGE_SIZE; + offset += PAGE_SIZE_64; + new_offset += PAGE_SIZE_64; + } + vm_object_unlock(object); + } + if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || upl->map_object->phys_contiguous) + offset = upl->offset - upl->map_object->paging_offset; + else + offset = 0; + size = upl->size; vm_object_lock(upl->map_object); @@ -2839,178 +3176,303 @@ upl_commit_range( boolean_t *empty) { vm_size_t xfer_size = size; - vm_object_t shadow_object = upl->map_object->shadow; + vm_object_t shadow_object; vm_object_t object = upl->map_object; vm_object_offset_t target_offset; - vm_object_offset_t page_offset; int entry; + wpl_array_t lite_list; + int occupied; + int delayed_unlock = 0; + boolean_t shadow_internal; *empty = FALSE; if (upl == UPL_NULL) return KERN_INVALID_ARGUMENT; + if (count == 0) page_list = NULL; + if(object->pageout) { + shadow_object = object->shadow; + } else { + shadow_object = object; + } + upl_lock(upl); - if(upl->flags & UPL_DEVICE_MEMORY) { + + if (upl->flags & UPL_CLEAR_DIRTY) + flags |= UPL_COMMIT_CLEAR_DIRTY; + + if (upl->flags & UPL_DEVICE_MEMORY) { xfer_size = 0; } else if ((offset + size) > upl->size) { upl_unlock(upl); return KERN_FAILURE; } + if (upl->flags & UPL_INTERNAL) { + lite_list = (wpl_array_t) + ((((vm_offset_t)upl) + sizeof(struct upl)) + + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t))); + } else { + lite_list = (wpl_array_t) + (((vm_offset_t)upl) + sizeof(struct upl)); + } + vm_object_lock(shadow_object); + shadow_internal = shadow_object->internal; entry = offset/PAGE_SIZE; target_offset = (vm_object_offset_t)offset; + while(xfer_size) { vm_page_t t,m; upl_page_info_t *p; - if((t = vm_page_lookup(object, target_offset)) != NULL) { + m = VM_PAGE_NULL; - t->pageout = FALSE; - page_offset = t->offset; - VM_PAGE_FREE(t); - t = VM_PAGE_NULL; - m = vm_page_lookup(shadow_object, - page_offset + object->shadow_offset); - if(m != VM_PAGE_NULL) { - vm_object_paging_end(shadow_object); - vm_page_lock_queues(); - if ((upl->flags & UPL_CLEAR_DIRTY) || - (flags & UPL_COMMIT_CLEAR_DIRTY)) { - pmap_clear_modify(m->phys_addr); - m->dirty = FALSE; - } - if(page_list) { - p = &(page_list[entry]); - if(p->phys_addr && p->pageout && !m->pageout) { - m->busy = TRUE; - m->pageout = TRUE; - vm_page_wire(m); - } else if (page_list[entry].phys_addr && - !p->pageout && m->pageout && - !m->dump_cleaning) { - m->pageout = FALSE; - m->absent = FALSE; - m->overwriting = FALSE; - vm_page_unwire(m); - PAGE_WAKEUP_DONE(m); + if (upl->flags & UPL_LITE) { + int pg_num; + + pg_num = target_offset/PAGE_SIZE; + + if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) { + lite_list[pg_num>>5] &= ~(1 << (pg_num & 31)); + m = vm_page_lookup(shadow_object, + target_offset + (upl->offset - + shadow_object->paging_offset)); + } + } + if (object->pageout) { + if ((t = vm_page_lookup(object, target_offset)) != NULL) { + t->pageout = FALSE; + + if (delayed_unlock) { + delayed_unlock = 0; + vm_page_unlock_queues(); } + VM_PAGE_FREE(t); + + if (m == NULL) { + m = vm_page_lookup( + shadow_object, + target_offset + + object->shadow_offset); + } + if (m != VM_PAGE_NULL) + vm_object_paging_end(m->object); + } + } + if (m != VM_PAGE_NULL) { + + if (upl->flags & UPL_IO_WIRE) { + + if (delayed_unlock == 0) + vm_page_lock_queues(); + + vm_page_unwire(m); + + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } + if (page_list) { page_list[entry].phys_addr = 0; - } - m->dump_cleaning = FALSE; - if(m->laundry) { - vm_page_laundry_count--; - m->laundry = FALSE; - if (vm_page_laundry_count < vm_page_laundry_min) { - vm_page_laundry_min = 0; - thread_wakeup((event_t) - &vm_page_laundry_count); - } - } - if(m->pageout) { - m->cleaning = FALSE; - m->pageout = FALSE; + } + if (flags & UPL_COMMIT_SET_DIRTY) { + m->dirty = TRUE; + } else if (flags & UPL_COMMIT_CLEAR_DIRTY) { + m->dirty = FALSE; + pmap_clear_modify(m->phys_page); + } + if (flags & UPL_COMMIT_INACTIVATE) { + m->reference = FALSE; + vm_page_deactivate(m); + pmap_clear_reference(m->phys_page); + } + target_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + entry++; + continue; + } + if (delayed_unlock == 0) + vm_page_lock_queues(); + /* + * make sure to clear the hardware + * modify or reference bits before + * releasing the BUSY bit on this page + * otherwise we risk losing a legitimate + * change of state + */ + if (flags & UPL_COMMIT_CLEAR_DIRTY) { + m->dirty = FALSE; + pmap_clear_modify(m->phys_page); + } + if (flags & UPL_COMMIT_INACTIVATE) + pmap_clear_reference(m->phys_page); + + if (page_list) { + p = &(page_list[entry]); + if(p->phys_addr && p->pageout && !m->pageout) { + m->busy = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + } else if (page_list[entry].phys_addr && + !p->pageout && m->pageout && + !m->dump_cleaning) { + m->pageout = FALSE; + m->absent = FALSE; + m->overwriting = FALSE; + vm_page_unwire(m); + PAGE_WAKEUP_DONE(m); + } + page_list[entry].phys_addr = 0; + } + m->dump_cleaning = FALSE; + if(m->laundry) { + if (!shadow_internal) + vm_page_burst_count--; + vm_page_laundry_count--; + m->laundry = FALSE; + if (vm_page_laundry_count < vm_page_laundry_min) { + vm_page_laundry_min = 0; + thread_wakeup((event_t) + &vm_page_laundry_count); + } + } + if(m->pageout) { + m->cleaning = FALSE; + m->pageout = FALSE; #if MACH_CLUSTER_STATS - if (m->wanted) vm_pageout_target_collisions++; + if (m->wanted) vm_pageout_target_collisions++; #endif - pmap_page_protect(m->phys_addr, VM_PROT_NONE); - m->dirty = pmap_is_modified(m->phys_addr); - if(m->dirty) { - CLUSTER_STAT( - vm_pageout_target_page_dirtied++;) - vm_page_unwire(m);/* reactivates */ - VM_STAT(reactivations++); - PAGE_WAKEUP_DONE(m); - } else { - CLUSTER_STAT( - vm_pageout_target_page_freed++;) - vm_page_free(m);/* clears busy, etc. */ - VM_STAT(pageouts++); - } - vm_page_unlock_queues(); - target_offset += PAGE_SIZE_64; - xfer_size -= PAGE_SIZE; - entry++; - continue; - } - if (flags & UPL_COMMIT_INACTIVATE) { - vm_page_deactivate(m); - m->reference = FALSE; - pmap_clear_reference(m->phys_addr); - } else if (!m->active && !m->inactive) { - if (m->reference) - vm_page_activate(m); - else - vm_page_deactivate(m); - } + pmap_page_protect(m->phys_page, VM_PROT_NONE); + m->dirty = pmap_is_modified(m->phys_page); + if(m->dirty) { + CLUSTER_STAT( + vm_pageout_target_page_dirtied++;) + vm_page_unwire(m);/* reactivates */ + VM_STAT(reactivations++); + PAGE_WAKEUP_DONE(m); + } else { + CLUSTER_STAT( + vm_pageout_target_page_freed++;) + vm_page_free(m);/* clears busy, etc. */ + + if (page_list[entry].dirty) + VM_STAT(pageouts++); + } + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } + target_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + entry++; + continue; + } #if MACH_CLUSTER_STATS - m->dirty = pmap_is_modified(m->phys_addr); + m->dirty = pmap_is_modified(m->phys_page); - if (m->dirty) vm_pageout_cluster_dirtied++; - else vm_pageout_cluster_cleaned++; - if (m->wanted) vm_pageout_cluster_collisions++; + if (m->dirty) vm_pageout_cluster_dirtied++; + else vm_pageout_cluster_cleaned++; + if (m->wanted) vm_pageout_cluster_collisions++; #else - m->dirty = 0; + m->dirty = 0; #endif - if((m->busy) && (m->cleaning)) { - /* the request_page_list case */ - if(m->absent) { - m->absent = FALSE; - if(shadow_object->absent_count == 1) + if((m->busy) && (m->cleaning)) { + /* the request_page_list case */ + if(m->absent) { + m->absent = FALSE; + if(shadow_object->absent_count == 1) vm_object_absent_release(shadow_object); - else + else shadow_object->absent_count--; - } - m->overwriting = FALSE; - m->busy = FALSE; - m->dirty = FALSE; - } - else if (m->overwriting) { - /* alternate request page list, write to - /* page_list case. Occurs when the original - /* page was wired at the time of the list - /* request */ - assert(m->wire_count != 0); - vm_page_unwire(m);/* reactivates */ - m->overwriting = FALSE; - } - m->cleaning = FALSE; - /* It is a part of the semantic of COPYOUT_FROM */ - /* UPLs that a commit implies cache sync */ - /* between the vm page and the backing store */ - /* this can be used to strip the precious bit */ - /* as well as clean */ - if (upl->flags & UPL_PAGE_SYNC_DONE) - m->precious = FALSE; - - if (flags & UPL_COMMIT_SET_DIRTY) { - m->dirty = TRUE; - } - /* - * Wakeup any thread waiting for the page to be un-cleaning. - */ - PAGE_WAKEUP(m); - vm_page_unlock_queues(); - } + m->overwriting = FALSE; + m->busy = FALSE; + m->dirty = FALSE; + } else if (m->overwriting) { + /* alternate request page list, write to + /* page_list case. Occurs when the original + /* page was wired at the time of the list + /* request */ + assert(m->wire_count != 0); + vm_page_unwire(m);/* reactivates */ + m->overwriting = FALSE; + } + m->cleaning = FALSE; + + /* It is a part of the semantic of COPYOUT_FROM */ + /* UPLs that a commit implies cache sync */ + /* between the vm page and the backing store */ + /* this can be used to strip the precious bit */ + /* as well as clean */ + if (upl->flags & UPL_PAGE_SYNC_DONE) + m->precious = FALSE; + + if (flags & UPL_COMMIT_SET_DIRTY) + m->dirty = TRUE; + + if (flags & UPL_COMMIT_INACTIVATE) { + m->reference = FALSE; + vm_page_deactivate(m); + } else if (!m->active && !m->inactive) { + if (m->reference) + vm_page_activate(m); + else + vm_page_deactivate(m); + } + /* + * Wakeup any thread waiting for the page to be un-cleaning. + */ + PAGE_WAKEUP(m); + + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } } target_offset += PAGE_SIZE_64; xfer_size -= PAGE_SIZE; entry++; } + if (delayed_unlock) + vm_page_unlock_queues(); + + occupied = 1; + + if (upl->flags & UPL_DEVICE_MEMORY) { + occupied = 0; + } else if (upl->flags & UPL_LITE) { + int pg_num; + int i; + pg_num = upl->size/PAGE_SIZE; + pg_num = (pg_num + 31) >> 5; + occupied = 0; + for(i= 0; imap_object->memq)) { + occupied = 0; + } + } - vm_object_unlock(shadow_object); - if(flags & UPL_COMMIT_NOTIFY_EMPTY) { - if((upl->flags & UPL_DEVICE_MEMORY) - || (queue_empty(&upl->map_object->memq))) + if(occupied == 0) { + if(upl->flags & UPL_COMMIT_NOTIFY_EMPTY) { *empty = TRUE; + } + if(object == shadow_object) + vm_object_paging_end(shadow_object); } + vm_object_unlock(shadow_object); upl_unlock(upl); return KERN_SUCCESS; @@ -3025,17 +3487,32 @@ upl_abort_range( boolean_t *empty) { vm_size_t xfer_size = size; - vm_object_t shadow_object = upl->map_object->shadow; + vm_object_t shadow_object; vm_object_t object = upl->map_object; vm_object_offset_t target_offset; vm_object_offset_t page_offset; int entry; + wpl_array_t lite_list; + int occupied; + boolean_t shadow_internal; *empty = FALSE; if (upl == UPL_NULL) return KERN_INVALID_ARGUMENT; + if (upl->flags & UPL_IO_WIRE) { + return upl_commit_range(upl, + offset, size, 0, + NULL, 0, empty); + } + + if(object->pageout) { + shadow_object = object->shadow; + } else { + shadow_object = object; + } + upl_lock(upl); if(upl->flags & UPL_DEVICE_MEMORY) { xfer_size = 0; @@ -3045,6 +3522,16 @@ upl_abort_range( } vm_object_lock(shadow_object); + shadow_internal = shadow_object->internal; + + if(upl->flags & UPL_INTERNAL) { + lite_list = (wpl_array_t) + ((((vm_offset_t)upl) + sizeof(struct upl)) + + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t))); + } else { + lite_list = (wpl_array_t) + (((vm_offset_t)upl) + sizeof(struct upl)); + } entry = offset/PAGE_SIZE; target_offset = (vm_object_offset_t)offset; @@ -3052,16 +3539,33 @@ upl_abort_range( vm_page_t t,m; upl_page_info_t *p; - if((t = vm_page_lookup(object, target_offset)) != NULL) { - - t->pageout = FALSE; - page_offset = t->offset; - VM_PAGE_FREE(t); - t = VM_PAGE_NULL; - m = vm_page_lookup(shadow_object, - page_offset + object->shadow_offset); - if(m != VM_PAGE_NULL) { - vm_object_paging_end(m->object); + m = VM_PAGE_NULL; + if(upl->flags & UPL_LITE) { + int pg_num; + pg_num = target_offset/PAGE_SIZE; + if(lite_list[pg_num>>5] & (1 << (pg_num & 31))) { + lite_list[pg_num>>5] &= ~(1 << (pg_num & 31)); + m = vm_page_lookup(shadow_object, + target_offset + (upl->offset - + shadow_object->paging_offset)); + } + } + if(object->pageout) { + if ((t = vm_page_lookup(object, target_offset)) + != NULL) { + t->pageout = FALSE; + VM_PAGE_FREE(t); + if(m == NULL) { + m = vm_page_lookup( + shadow_object, + target_offset + + object->shadow_offset); + } + if(m != VM_PAGE_NULL) + vm_object_paging_end(m->object); + } + } + if(m != VM_PAGE_NULL) { vm_page_lock_queues(); if(m->absent) { /* COPYOUT = FALSE case */ @@ -3106,13 +3610,15 @@ upl_abort_range( continue; } /* - * Handle the trusted pager throttle. - */ - if (m->laundry) { + * Handle the trusted pager throttle. + */ + if (m->laundry) { + if (!shadow_internal) + vm_page_burst_count--; vm_page_laundry_count--; m->laundry = FALSE; if (vm_page_laundry_count - < vm_page_laundry_min) { + < vm_page_laundry_min) { vm_page_laundry_min = 0; thread_wakeup((event_t) &vm_page_laundry_count); @@ -3134,24 +3640,48 @@ upl_abort_range( #endif /* MACH_PAGEMAP */ if(error & UPL_ABORT_DUMP_PAGES) { vm_page_free(m); - pmap_page_protect(m->phys_addr, VM_PROT_NONE); + pmap_page_protect(m->phys_page, VM_PROT_NONE); } else { PAGE_WAKEUP(m); } vm_page_unlock_queues(); } - } - target_offset += PAGE_SIZE_64; - xfer_size -= PAGE_SIZE; - entry++; + target_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + entry++; } - vm_object_unlock(shadow_object); - if(error & UPL_ABORT_NOTIFY_EMPTY) { - if((upl->flags & UPL_DEVICE_MEMORY) - || (queue_empty(&upl->map_object->memq))) + occupied = 1; + if (upl->flags & UPL_DEVICE_MEMORY) { + occupied = 0; + } else if (upl->flags & UPL_LITE) { + int pg_num; + int i; + pg_num = upl->size/PAGE_SIZE; + pg_num = (pg_num + 31) >> 5; + occupied = 0; + for(i= 0; imap_object->memq)) { + occupied = 0; + } + } + + if(occupied == 0) { + if(upl->flags & UPL_COMMIT_NOTIFY_EMPTY) { *empty = TRUE; + } + if(object == shadow_object) + vm_object_paging_end(shadow_object); } + vm_object_unlock(shadow_object); + upl_unlock(upl); + return KERN_SUCCESS; } @@ -3166,11 +3696,21 @@ upl_abort( vm_object_offset_t shadow_offset; vm_object_offset_t target_offset; int i; + wpl_array_t lite_list; vm_page_t t,m; + int occupied; + boolean_t shadow_internal; if (upl == UPL_NULL) return KERN_INVALID_ARGUMENT; + if (upl->flags & UPL_IO_WIRE) { + boolean_t empty; + return upl_commit_range(upl, + 0, upl->size, 0, + NULL, 0, &empty); + } + upl_lock(upl); if(upl->flags & UPL_DEVICE_MEMORY) { upl_unlock(upl); @@ -3185,15 +3725,51 @@ upl_abort( return KERN_INVALID_ARGUMENT; } - shadow_object = upl->map_object->shadow; - shadow_offset = upl->map_object->shadow_offset; + if(object->pageout) { + shadow_object = object->shadow; + shadow_offset = object->shadow_offset; + } else { + shadow_object = object; + shadow_offset = upl->offset - object->paging_offset; + } + + if(upl->flags & UPL_INTERNAL) { + lite_list = (wpl_array_t) + ((((vm_offset_t)upl) + sizeof(struct upl)) + + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t))); + } else { + lite_list = (wpl_array_t) + (((vm_offset_t)upl) + sizeof(struct upl)); + } offset = 0; vm_object_lock(shadow_object); + shadow_internal = shadow_object->internal; + for(i = 0; i<(upl->size); i+=PAGE_SIZE, offset += PAGE_SIZE_64) { - if((t = vm_page_lookup(object,offset)) != NULL) { - target_offset = t->offset + shadow_offset; - if((m = vm_page_lookup(shadow_object, target_offset)) != NULL) { - vm_object_paging_end(m->object); + m = VM_PAGE_NULL; + target_offset = offset + shadow_offset; + if(upl->flags & UPL_LITE) { + int pg_num; + pg_num = offset/PAGE_SIZE; + if(lite_list[pg_num>>5] & (1 << (pg_num & 31))) { + lite_list[pg_num>>5] &= ~(1 << (pg_num & 31)); + m = vm_page_lookup( + shadow_object, target_offset); + } + } + if(object->pageout) { + if ((t = vm_page_lookup(object, offset)) != NULL) { + t->pageout = FALSE; + VM_PAGE_FREE(t); + if(m == NULL) { + m = vm_page_lookup( + shadow_object, target_offset); + } + if(m != VM_PAGE_NULL) + vm_object_paging_end(m->object); + } + } + if(m != VM_PAGE_NULL) { vm_page_lock_queues(); if(m->absent) { /* COPYOUT = FALSE case */ @@ -3236,6 +3812,8 @@ upl_abort( * Handle the trusted pager throttle. */ if (m->laundry) { + if (!shadow_internal) + vm_page_burst_count--; vm_page_laundry_count--; m->laundry = FALSE; if (vm_page_laundry_count @@ -3261,29 +3839,40 @@ upl_abort( #endif /* MACH_PAGEMAP */ if(error & UPL_ABORT_DUMP_PAGES) { vm_page_free(m); - pmap_page_protect(m->phys_addr, VM_PROT_NONE); + pmap_page_protect(m->phys_page, VM_PROT_NONE); } else { PAGE_WAKEUP(m); } vm_page_unlock_queues(); } - } } - vm_object_unlock(shadow_object); - /* Remove all the pages from the map object so */ - /* vm_pageout_object_terminate will work properly. */ - while (!queue_empty(&upl->map_object->memq)) { - vm_page_t p; - - p = (vm_page_t) queue_first(&upl->map_object->memq); - - assert(p->private); - assert(p->pageout); - p->pageout = FALSE; - assert(!p->cleaning); + occupied = 1; + if (upl->flags & UPL_DEVICE_MEMORY) { + occupied = 0; + } else if (upl->flags & UPL_LITE) { + int pg_num; + int i; + pg_num = upl->size/PAGE_SIZE; + pg_num = (pg_num + 31) >> 5; + occupied = 0; + for(i= 0; imap_object->memq)) { + occupied = 0; + } + } - VM_PAGE_FREE(p); + if(occupied == 0) { + if(object == shadow_object) + vm_object_paging_end(shadow_object); } + vm_object_unlock(shadow_object); + upl_unlock(upl); return KERN_SUCCESS; } @@ -3298,52 +3887,21 @@ upl_commit( if (upl == UPL_NULL) return KERN_INVALID_ARGUMENT; + if(upl->flags & (UPL_LITE | UPL_IO_WIRE)) { + boolean_t empty; + return upl_commit_range(upl, 0, upl->size, 0, + page_list, count, &empty); + } + if (count == 0) page_list = NULL; upl_lock(upl); if (upl->flags & UPL_DEVICE_MEMORY) page_list = NULL; - if ((upl->flags & UPL_CLEAR_DIRTY) || - (upl->flags & UPL_PAGE_SYNC_DONE)) { - vm_object_t shadow_object = upl->map_object->shadow; - vm_object_t object = upl->map_object; - vm_object_offset_t target_offset; - vm_size_t xfer_end; - - vm_page_t t,m; - - vm_object_lock(shadow_object); - - target_offset = object->shadow_offset; - xfer_end = upl->size + object->shadow_offset; - while(target_offset < xfer_end) { - if ((t = vm_page_lookup(object, - target_offset - object->shadow_offset)) - != NULL) { - m = vm_page_lookup( - shadow_object, target_offset); - if(m != VM_PAGE_NULL) { - if (upl->flags & UPL_CLEAR_DIRTY) { - pmap_clear_modify(m->phys_addr); - m->dirty = FALSE; - } - /* It is a part of the semantic of */ - /* COPYOUT_FROM UPLs that a commit */ - /* implies cache sync between the */ - /* vm page and the backing store */ - /* this can be used to strip the */ - /* precious bit as well as clean */ - if (upl->flags & UPL_PAGE_SYNC_DONE) - m->precious = FALSE; - } - } - target_offset += PAGE_SIZE_64; - } - vm_object_unlock(shadow_object); - } - if (page_list) { + if ((upl->flags & UPL_CLEAR_DIRTY) || + (upl->flags & UPL_PAGE_SYNC_DONE) || page_list) { vm_object_t shadow_object = upl->map_object->shadow; vm_object_t object = upl->map_object; vm_object_offset_t target_offset; @@ -3371,15 +3929,29 @@ upl_commit( m = vm_page_lookup(shadow_object, target_offset); if(m != VM_PAGE_NULL) { - p = &(page_list[entry]); - if(page_list[entry].phys_addr && + if (upl->flags & UPL_CLEAR_DIRTY) { + pmap_clear_modify(m->phys_page); + m->dirty = FALSE; + } + /* It is a part of the semantic of */ + /* COPYOUT_FROM UPLs that a commit */ + /* implies cache sync between the */ + /* vm page and the backing store */ + /* this can be used to strip the */ + /* precious bit as well as clean */ + if (upl->flags & UPL_PAGE_SYNC_DONE) + m->precious = FALSE; + + if(page_list) { + p = &(page_list[entry]); + if(page_list[entry].phys_addr && p->pageout && !m->pageout) { vm_page_lock_queues(); m->busy = TRUE; m->pageout = TRUE; vm_page_wire(m); vm_page_unlock_queues(); - } else if (page_list[entry].phys_addr && + } else if (page_list[entry].phys_addr && !p->pageout && m->pageout && !m->dump_cleaning) { vm_page_lock_queues(); @@ -3389,8 +3961,9 @@ upl_commit( vm_page_unwire(m); PAGE_WAKEUP_DONE(m); vm_page_unlock_queues(); + } + page_list[entry].phys_addr = 0; } - page_list[entry].phys_addr = 0; } target_offset += PAGE_SIZE_64; entry++; @@ -3398,10 +3971,378 @@ upl_commit( vm_object_unlock(shadow_object); } + if (upl->flags & UPL_DEVICE_MEMORY) { + vm_object_lock(upl->map_object->shadow); + if(upl->map_object == upl->map_object->shadow) + vm_object_paging_end(upl->map_object->shadow); + vm_object_unlock(upl->map_object->shadow); + } upl_unlock(upl); return KERN_SUCCESS; } + + +kern_return_t +vm_object_iopl_request( + vm_object_t object, + vm_object_offset_t offset, + vm_size_t size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int cntrl_flags) +{ + vm_page_t dst_page; + vm_object_offset_t dst_offset = offset; + vm_size_t xfer_size = size; + upl_t upl = NULL; + int entry; + wpl_array_t lite_list; + int page_field_size; + int delayed_unlock = 0; + + vm_page_t alias_page = NULL; + kern_return_t ret; + vm_prot_t prot; + + + if(cntrl_flags & UPL_COPYOUT_FROM) { + prot = VM_PROT_READ; + } else { + prot = VM_PROT_READ | VM_PROT_WRITE; + } + + if(((size/page_size) > MAX_UPL_TRANSFER) && !object->phys_contiguous) { + size = MAX_UPL_TRANSFER * page_size; + } + + if(cntrl_flags & UPL_SET_INTERNAL) + if(page_list_count != NULL) + *page_list_count = MAX_UPL_TRANSFER; + if(((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) && + ((page_list_count != NULL) && (*page_list_count != 0) + && *page_list_count < (size/page_size))) + return KERN_INVALID_ARGUMENT; + + if((!object->internal) && (object->paging_offset != 0)) + panic("vm_object_upl_request: vnode object with non-zero paging offset\n"); + + if(object->phys_contiguous) { + /* No paging operations are possible against this memory */ + /* and so no need for map object, ever */ + cntrl_flags |= UPL_SET_LITE; + } + + if(upl_ptr) { + if(cntrl_flags & UPL_SET_INTERNAL) { + if(cntrl_flags & UPL_SET_LITE) { + upl = upl_create( + UPL_CREATE_INTERNAL | UPL_CREATE_LITE, + size); + user_page_list = (upl_page_info_t *) + (((vm_offset_t)upl) + sizeof(struct upl)); + lite_list = (wpl_array_t) + (((vm_offset_t)user_page_list) + + ((size/PAGE_SIZE) * + sizeof(upl_page_info_t))); + page_field_size = ((size/PAGE_SIZE) + 7) >> 3; + page_field_size = + (page_field_size + 3) & 0xFFFFFFFC; + bzero((char *)lite_list, page_field_size); + upl->flags = + UPL_LITE | UPL_INTERNAL | UPL_IO_WIRE; + } else { + upl = upl_create(UPL_CREATE_INTERNAL, size); + user_page_list = (upl_page_info_t *) + (((vm_offset_t)upl) + + sizeof(struct upl)); + upl->flags = UPL_INTERNAL | UPL_IO_WIRE; + } + } else { + if(cntrl_flags & UPL_SET_LITE) { + upl = upl_create(UPL_CREATE_LITE, size); + lite_list = (wpl_array_t) + (((vm_offset_t)upl) + sizeof(struct upl)); + page_field_size = ((size/PAGE_SIZE) + 7) >> 3; + page_field_size = + (page_field_size + 3) & 0xFFFFFFFC; + bzero((char *)lite_list, page_field_size); + upl->flags = UPL_LITE | UPL_IO_WIRE; + } else { + upl = upl_create(UPL_CREATE_EXTERNAL, size); + upl->flags = UPL_IO_WIRE; + } + } + + if(object->phys_contiguous) { + upl->map_object = object; + /* don't need any shadow mappings for this one */ + /* since it is already I/O memory */ + upl->flags |= UPL_DEVICE_MEMORY; + + vm_object_lock(object); + vm_object_paging_begin(object); + vm_object_unlock(object); + + /* paging in progress also protects the paging_offset */ + upl->offset = offset + object->paging_offset; + upl->size = size; + *upl_ptr = upl; + if(user_page_list) { + user_page_list[0].phys_addr = + (offset + object->shadow_offset)>>12; + user_page_list[0].device = TRUE; + } + + if(page_list_count != NULL) { + if (upl->flags & UPL_INTERNAL) { + *page_list_count = 0; + } else { + *page_list_count = 1; + } + } + return KERN_SUCCESS; + } + if(user_page_list) + user_page_list[0].device = FALSE; + + if(cntrl_flags & UPL_SET_LITE) { + upl->map_object = object; + } else { + upl->map_object = vm_object_allocate(size); + vm_object_lock(upl->map_object); + upl->map_object->shadow = object; + upl->map_object->pageout = TRUE; + upl->map_object->can_persist = FALSE; + upl->map_object->copy_strategy = + MEMORY_OBJECT_COPY_NONE; + upl->map_object->shadow_offset = offset; + upl->map_object->wimg_bits = object->wimg_bits; + vm_object_unlock(upl->map_object); + } + } + vm_object_lock(object); + vm_object_paging_begin(object); + + if (!object->phys_contiguous) { + /* Protect user space from future COW operations */ + object->true_share = TRUE; + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) + object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + } + + /* we can lock the upl offset now that paging_in_progress is set */ + if(upl_ptr) { + upl->size = size; + upl->offset = offset + object->paging_offset; + *upl_ptr = upl; +#ifdef UBC_DEBUG + queue_enter(&object->uplq, upl, upl_t, uplq); +#endif /* UBC_DEBUG */ + } + + entry = 0; + while (xfer_size) { + if((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) { + if (delayed_unlock) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } + vm_object_unlock(object); + VM_PAGE_GRAB_FICTITIOUS(alias_page); + vm_object_lock(object); + } + dst_page = vm_page_lookup(object, dst_offset); + + if ((dst_page == VM_PAGE_NULL) || (dst_page->busy) || + (dst_page->unusual && (dst_page->error || + dst_page->restart || dst_page->absent || + dst_page->fictitious || + prot & dst_page->page_lock))) { + vm_fault_return_t result; + do { + vm_page_t top_page; + kern_return_t error_code; + int interruptible; + + vm_object_offset_t lo_offset = offset; + vm_object_offset_t hi_offset = offset + size; + + + if (delayed_unlock) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } + + if(cntrl_flags & UPL_SET_INTERRUPTIBLE) { + interruptible = THREAD_ABORTSAFE; + } else { + interruptible = THREAD_UNINT; + } + + result = vm_fault_page(object, dst_offset, + prot | VM_PROT_WRITE, FALSE, + interruptible, + lo_offset, hi_offset, + VM_BEHAVIOR_SEQUENTIAL, + &prot, &dst_page, &top_page, + (int *)0, + &error_code, FALSE, FALSE, NULL, 0); + + switch(result) { + case VM_FAULT_SUCCESS: + + PAGE_WAKEUP_DONE(dst_page); + + /* + * Release paging references and + * top-level placeholder page, if any. + */ + + if(top_page != VM_PAGE_NULL) { + vm_object_t local_object; + local_object = + top_page->object; + if(top_page->object + != dst_page->object) { + vm_object_lock( + local_object); + VM_PAGE_FREE(top_page); + vm_object_paging_end( + local_object); + vm_object_unlock( + local_object); + } else { + VM_PAGE_FREE(top_page); + vm_object_paging_end( + local_object); + } + } + + break; + + + case VM_FAULT_RETRY: + vm_object_lock(object); + vm_object_paging_begin(object); + break; + + case VM_FAULT_FICTITIOUS_SHORTAGE: + vm_page_more_fictitious(); + vm_object_lock(object); + vm_object_paging_begin(object); + break; + + case VM_FAULT_MEMORY_SHORTAGE: + if (vm_page_wait(interruptible)) { + vm_object_lock(object); + vm_object_paging_begin(object); + break; + } + /* fall thru */ + + case VM_FAULT_INTERRUPTED: + error_code = MACH_SEND_INTERRUPTED; + case VM_FAULT_MEMORY_ERROR: + ret = (error_code ? error_code: + KERN_MEMORY_ERROR); + vm_object_lock(object); + for(; offset < dst_offset; + offset += PAGE_SIZE) { + dst_page = vm_page_lookup( + object, offset); + if(dst_page == VM_PAGE_NULL) + panic("vm_object_iopl_request: Wired pages missing. \n"); + vm_page_lock_queues(); + vm_page_unwire(dst_page); + vm_page_unlock_queues(); + VM_STAT(reactivations++); + } + vm_object_unlock(object); + upl_destroy(upl); + return ret; + } + } while ((result != VM_FAULT_SUCCESS) + || (result == VM_FAULT_INTERRUPTED)); + } + if (delayed_unlock == 0) + vm_page_lock_queues(); + vm_page_wire(dst_page); + + if (upl_ptr) { + if (cntrl_flags & UPL_SET_LITE) { + int pg_num; + pg_num = (dst_offset-offset)/PAGE_SIZE; + lite_list[pg_num>>5] |= 1 << (pg_num & 31); + } else { + /* + * Convert the fictitious page to a + * private shadow of the real page. + */ + assert(alias_page->fictitious); + alias_page->fictitious = FALSE; + alias_page->private = TRUE; + alias_page->pageout = TRUE; + alias_page->phys_page = dst_page->phys_page; + vm_page_wire(alias_page); + + vm_page_insert(alias_page, + upl->map_object, size - xfer_size); + assert(!alias_page->wanted); + alias_page->busy = FALSE; + alias_page->absent = FALSE; + } + + /* expect the page to be used */ + dst_page->reference = TRUE; + + if (!(cntrl_flags & UPL_COPYOUT_FROM)) + dst_page->dirty = TRUE; + alias_page = NULL; + + if (user_page_list) { + user_page_list[entry].phys_addr + = dst_page->phys_page; + user_page_list[entry].dirty = + dst_page->dirty; + user_page_list[entry].pageout = + dst_page->pageout; + user_page_list[entry].absent = + dst_page->absent; + user_page_list[entry].precious = + dst_page->precious; + } + } + if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) { + delayed_unlock = 0; + vm_page_unlock_queues(); + } + entry++; + dst_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + } + if (delayed_unlock) + vm_page_unlock_queues(); + + if (upl->flags & UPL_INTERNAL) { + if(page_list_count != NULL) + *page_list_count = 0; + } else if (*page_list_count > entry) { + if(page_list_count != NULL) + *page_list_count = entry; + } + + if (alias_page != NULL) { + vm_page_lock_queues(); + vm_page_free(alias_page); + vm_page_unlock_queues(); + } + + vm_object_unlock(object); + return KERN_SUCCESS; +} + vm_size_t upl_get_internal_pagelist_offset() { diff --git a/osfmk/vm/vm_pageout.h b/osfmk/vm/vm_pageout.h index 60148f75c..0d3fa62fb 100644 --- a/osfmk/vm/vm_pageout.h +++ b/osfmk/vm/vm_pageout.h @@ -158,14 +158,24 @@ struct upl { #define UPL_PAGE_SYNC_DONE 0x20 #define UPL_DEVICE_MEMORY 0x40 #define UPL_PAGEOUT 0x80 +#define UPL_LITE 0x100 +#define UPL_IO_WIRE 0x200 #define UPL_PAGE_TICKET_MASK 0xF00 #define UPL_PAGE_TICKET_SHIFT 8 +/* flags for upl_create flags parameter */ +#define UPL_CREATE_EXTERNAL 0 +#define UPL_CREATE_INTERNAL 0x1 +#define UPL_CREATE_LITE 0x2 + +/* wired page list structure */ +typedef unsigned long *wpl_array_t; + #endif /* _VM_VM_PAGEOUT_H_ */ diff --git a/osfmk/vm/vm_print.h b/osfmk/vm/vm_print.h index fff318eff..6fce7c7bd 100644 --- a/osfmk/vm/vm_print.h +++ b/osfmk/vm/vm_print.h @@ -30,12 +30,13 @@ #define VM_PRINT_H #include +#include extern void vm_map_print( - vm_map_t map); + db_addr_t map); extern void vm_map_copy_print( - vm_map_copy_t copy); + db_addr_t copy); #include @@ -64,6 +65,6 @@ extern void vm_external_print( extern void db_vm(void); extern vm_size_t db_vm_map_total_size( - vm_map_t map); + db_addr_t map); #endif /* VM_PRINT_H */ diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c index 804acaca7..51dfb421c 100644 --- a/osfmk/vm/vm_resident.c +++ b/osfmk/vm/vm_resident.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -77,6 +77,9 @@ #include #include #include +#include /* (BRINGUP) */ +#include /* (BRINGUP) */ + /* Variables used to indicate the relative age of pages in the * inactive list @@ -119,6 +122,7 @@ vm_page_bucket_t *vm_page_buckets; /* Array of buckets */ unsigned int vm_page_bucket_count = 0; /* How big is array? */ unsigned int vm_page_hash_mask; /* Mask for hash function */ unsigned int vm_page_hash_shift; /* Shift for hash function */ +uint32_t vm_page_bucket_hash; /* Basic bucket hash */ decl_simple_lock_data(,vm_page_bucket_lock) #if MACH_PAGE_HASH_STATS @@ -171,6 +175,10 @@ hash_debug(void) vm_size_t page_size = 4096; vm_size_t page_mask = 4095; int page_shift = 12; +#else +vm_size_t page_size = PAGE_SIZE; +vm_size_t page_mask = PAGE_MASK; +int page_shift = PAGE_SHIFT; #endif /* PAGE_SIZE_FIXED */ /* @@ -212,7 +220,7 @@ decl_mutex_data(,vm_page_zero_fill_lock) /* * Fictitious pages don't have a physical address, - * but we must initialize phys_addr to something. + * but we must initialize phys_page to something. * For debugging, this should be a strange value * that the pmap module can recognize in assertions. */ @@ -258,6 +266,8 @@ int vm_page_free_min = 0; int vm_page_inactive_target = 0; int vm_page_free_reserved = 0; int vm_page_laundry_count = 0; +int vm_page_burst_count = 0; +int vm_page_throttled_count = 0; /* * The VM system has a couple of heuristics for deciding @@ -350,7 +360,7 @@ vm_page_bootstrap( m->restart = FALSE; m->zero_fill = FALSE; - m->phys_addr = 0; /* reset later */ + m->phys_page = 0; /* reset later */ m->page_lock = VM_PROT_NONE; m->unlock_request = VM_PROT_NONE; @@ -416,6 +426,10 @@ vm_page_bootstrap( for (log2 = 0; size > 1; log2++) size /= 2; vm_page_hash_shift = log1/2 - log2 + 1; + + vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */ + vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */ + vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */ if (vm_page_hash_mask & vm_page_bucket_count) printf("vm_page_bootstrap: WARNING -- strange page hash\n"); @@ -443,8 +457,8 @@ vm_page_bootstrap( */ pmap_startup(&virtual_space_start, &virtual_space_end); - virtual_space_start = round_page(virtual_space_start); - virtual_space_end = trunc_page(virtual_space_end); + virtual_space_start = round_page_32(virtual_space_start); + virtual_space_end = trunc_page_32(virtual_space_end); *startp = virtual_space_start; *endp = virtual_space_end; @@ -456,7 +470,7 @@ vm_page_bootstrap( * wired, they nonetheless can't be moved. At this moment, * all VM managed pages are "free", courtesy of pmap_startup. */ - vm_page_wire_count = atop(mem_size) - vm_page_free_count; /* initial value */ + vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */ printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count); vm_page_free_count_minimum = vm_page_free_count; @@ -472,7 +486,8 @@ vm_offset_t pmap_steal_memory( vm_size_t size) { - vm_offset_t addr, vaddr, paddr; + vm_offset_t addr, vaddr; + ppnum_t phys_page; /* * We round the size to a round multiple. @@ -493,8 +508,8 @@ pmap_steal_memory( * we don't trust the pmap module to do it right. */ - virtual_space_start = round_page(virtual_space_start); - virtual_space_end = trunc_page(virtual_space_end); + virtual_space_start = round_page_32(virtual_space_start); + virtual_space_end = trunc_page_32(virtual_space_end); } /* @@ -510,10 +525,10 @@ pmap_steal_memory( * Allocate and map physical pages to back new virtual pages. */ - for (vaddr = round_page(addr); + for (vaddr = round_page_32(addr); vaddr < addr + size; vaddr += PAGE_SIZE) { - if (!pmap_next_page(&paddr)) + if (!pmap_next_page(&phys_page)) panic("pmap_steal_memory"); /* @@ -521,7 +536,7 @@ pmap_steal_memory( * but some pmap modules barf if they are. */ - pmap_enter(kernel_pmap, vaddr, paddr, + pmap_enter(kernel_pmap, vaddr, phys_page, VM_PROT_READ|VM_PROT_WRITE, VM_WIMG_USE_DEFAULT, FALSE); /* @@ -539,18 +554,19 @@ pmap_startup( vm_offset_t *startp, vm_offset_t *endp) { - unsigned int i, npages, pages_initialized; - vm_page_t pages; - vm_offset_t paddr; + unsigned int i, npages, pages_initialized, fill, fillval; + vm_page_t pages; + ppnum_t phys_page; + addr64_t tmpaddr; /* * We calculate how many page frames we will have * and then allocate the page structures in one chunk. */ - npages = ((PAGE_SIZE * pmap_free_pages() + - (round_page(virtual_space_start) - virtual_space_start)) / - (PAGE_SIZE + sizeof *pages)); + tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */ + tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */ + npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */ pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages); @@ -559,10 +575,10 @@ pmap_startup( */ for (i = 0, pages_initialized = 0; i < npages; i++) { - if (!pmap_next_page(&paddr)) + if (!pmap_next_page(&phys_page)) break; - vm_page_init(&pages[i], paddr); + vm_page_init(&pages[i], phys_page); vm_page_pages++; pages_initialized++; } @@ -574,16 +590,60 @@ pmap_startup( * they require several consecutive pages. */ +/* + * Check if we want to initialize pages to a known value + */ + + fill = 0; /* Assume no fill */ + if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */ + for (i = pages_initialized; i > 0; i--) { + extern void fillPage(ppnum_t phys_page, unsigned int fillval); + if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */ vm_page_release(&pages[i - 1]); } +#if 0 + { + vm_page_t xx, xxo, xxl; + int j, k, l; + + j = 0; /* (BRINGUP) */ + xxl = 0; + + for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */ + j++; /* (BRINGUP) */ + if(j > vm_page_free_count) { /* (BRINGUP) */ + panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl); + } + + l = vm_page_free_count - j; /* (BRINGUP) */ + k = 0; /* (BRINGUP) */ + + if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count); + + for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */ + k++; + if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l); + if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */ + panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo); + } + } + } + + if(j != vm_page_free_count) { /* (BRINGUP) */ + panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count); + } + } +#endif + + /* * We have to re-align virtual_space_start, * because pmap_steal_memory has been using it. */ - virtual_space_start = round_page(virtual_space_start); + virtual_space_start = round_page_32(virtual_space_start); *startp = virtual_space_start; *endp = virtual_space_end; @@ -632,20 +692,20 @@ vm_page_module_init(void) void vm_page_create( - vm_offset_t start, - vm_offset_t end) + ppnum_t start, + ppnum_t end) { - vm_offset_t paddr; - vm_page_t m; + ppnum_t phys_page; + vm_page_t m; - for (paddr = round_page(start); - paddr < trunc_page(end); - paddr += PAGE_SIZE) { + for (phys_page = start; + phys_page < end; + phys_page++) { while ((m = (vm_page_t) vm_page_grab_fictitious()) == VM_PAGE_NULL) vm_page_more_fictitious(); - vm_page_init(m, paddr); + vm_page_init(m, phys_page); vm_page_pages++; vm_page_release(m); } @@ -656,11 +716,10 @@ vm_page_create( * * Distributes the object/offset key pair among hash buckets. * - * NOTE: To get a good hash function, the bucket count should - * be a power of two. + * NOTE: The bucket count must be a power of 2 */ #define vm_page_hash(object, offset) (\ - ( ((natural_t)(vm_offset_t)object<phys_addr = phys_addr; + mem->phys_page = phys_page; } /* @@ -999,7 +1059,7 @@ vm_page_release_fictitious( assert(!m->free); assert(m->busy); assert(m->fictitious); - assert(m->phys_addr == vm_page_fictitious_addr); + assert(m->phys_page == vm_page_fictitious_addr); c_vm_page_release_fictitious++; @@ -1124,7 +1184,7 @@ vm_page_convert( if (real_m == VM_PAGE_NULL) return FALSE; - m->phys_addr = real_m->phys_addr; + m->phys_page = real_m->phys_page; m->fictitious = FALSE; m->no_isync = TRUE; @@ -1135,7 +1195,7 @@ vm_page_convert( vm_page_inactive_count++; vm_page_unlock_queues(); - real_m->phys_addr = vm_page_fictitious_addr; + real_m->phys_page = vm_page_fictitious_addr; real_m->fictitious = TRUE; vm_page_release_fictitious(real_m); @@ -1234,7 +1294,7 @@ wakeup_pageout: (vm_page_inactive_count < vm_page_inactive_target))) thread_wakeup((event_t) &vm_page_free_wanted); -// dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */ +// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */ return mem; } @@ -1249,9 +1309,21 @@ void vm_page_release( register vm_page_t mem) { + +#if 0 + unsigned int pindex; + phys_entry *physent; + + physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */ + if(physent->ppLink & ppN) { /* (BRINGUP) */ + panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page); + } + physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */ +#endif + assert(!mem->private && !mem->fictitious); -// dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */ +// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */ mutex_lock(&vm_page_queue_free_lock); if (mem->free) @@ -1401,7 +1473,7 @@ vm_page_free( assert(!mem->free); assert(!mem->cleaning); assert(!mem->pageout); - assert(!vm_page_free_verify || pmap_verify_free(mem->phys_addr)); + assert(!vm_page_free_verify || pmap_verify_free(mem->phys_page)); if (mem->tabled) vm_page_remove(mem); /* clears tabled, object, offset */ @@ -1426,6 +1498,8 @@ vm_page_free( if (mem->laundry) { extern int vm_page_laundry_min; + if (!object->internal) + vm_page_burst_count--; vm_page_laundry_count--; mem->laundry = FALSE; /* laundry is now clear */ counter(++c_laundry_pages_freed); @@ -1457,7 +1531,7 @@ vm_page_free( if (mem->private) { mem->private = FALSE; mem->fictitious = TRUE; - mem->phys_addr = vm_page_fictitious_addr; + mem->phys_page = vm_page_fictitious_addr; } if (mem->fictitious) { vm_page_release_fictitious(mem); @@ -1467,11 +1541,98 @@ vm_page_free( vm_zf_count-=1; mem->zero_fill = FALSE; } - vm_page_init(mem, mem->phys_addr); + vm_page_init(mem, mem->phys_page); vm_page_release(mem); } } + +void +vm_page_free_list( + register vm_page_t mem) +{ + register vm_page_t nxt; + register vm_page_t first = NULL; + register vm_page_t last; + register int pg_count = 0; + + + while (mem) { + nxt = (vm_page_t)(mem->pageq.next); + + if (mem->clustered) + vm_pagein_cluster_unused++; + + if (mem->laundry) { + extern int vm_page_laundry_min; + + if (!mem->object->internal) + vm_page_burst_count--; + vm_page_laundry_count--; + counter(++c_laundry_pages_freed); + + if (vm_page_laundry_count < vm_page_laundry_min) { + vm_page_laundry_min = 0; + thread_wakeup((event_t) &vm_page_laundry_count); + } + } + mem->busy = TRUE; + + PAGE_WAKEUP(mem); /* clears wanted */ + + if (mem->private) + mem->fictitious = TRUE; + + if (!mem->fictitious) { + /* depends on the queues lock */ + if (mem->zero_fill) + vm_zf_count -= 1; + vm_page_init(mem, mem->phys_page); + + mem->free = TRUE; + + if (first == NULL) + last = mem; + mem->pageq.next = (queue_t) first; + first = mem; + + pg_count++; + } else { + mem->phys_page = vm_page_fictitious_addr; + vm_page_release_fictitious(mem); + } + mem = nxt; + } + if (first) { + + mutex_lock(&vm_page_queue_free_lock); + + last->pageq.next = (queue_entry_t) vm_page_queue_free; + vm_page_queue_free = first; + + vm_page_free_count += pg_count; + + if ((vm_page_free_wanted > 0) && + (vm_page_free_count >= vm_page_free_reserved)) { + int available_pages; + + available_pages = vm_page_free_count - vm_page_free_reserved; + + if (available_pages >= vm_page_free_wanted) { + vm_page_free_wanted = 0; + thread_wakeup((event_t) &vm_page_free_count); + } else { + while (available_pages--) { + vm_page_free_wanted--; + thread_wakeup_one((event_t) &vm_page_free_count); + } + } + } + mutex_unlock(&vm_page_queue_free_lock); + } +} + + /* * vm_page_wire: * @@ -1576,7 +1737,7 @@ vm_page_deactivate( { VM_PAGE_CHECK(m); -// dbgLog(m->phys_addr, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */ +// dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */ /* * This page is no longer very interesting. If it was @@ -1596,7 +1757,7 @@ vm_page_deactivate( return; if (m->active || (m->inactive && m->reference)) { if (!m->fictitious && !m->absent) - pmap_clear_reference(m->phys_addr); + pmap_clear_reference(m->phys_page); m->reference = FALSE; VM_PAGE_QUEUES_REMOVE(m); } @@ -1687,7 +1848,7 @@ vm_page_part_zero_fill( VM_PAGE_CHECK(m); #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED - pmap_zero_part_page(m->phys_addr, m_pa, len); + pmap_zero_part_page(m->phys_page, m_pa, len); #else while (1) { tmp = vm_page_grab(); @@ -1728,7 +1889,8 @@ vm_page_zero_fill( VM_PAGE_CHECK(m); - pmap_zero_page(m->phys_addr); +// dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */ + pmap_zero_page(m->phys_page); } /* @@ -1748,8 +1910,8 @@ vm_page_part_copy( VM_PAGE_CHECK(src_m); VM_PAGE_CHECK(dst_m); - pmap_copy_part_page(src_m->phys_addr, src_pa, - dst_m->phys_addr, dst_pa, len); + pmap_copy_part_page(src_m->phys_page, src_pa, + dst_m->phys_page, dst_pa, len); } /* @@ -1772,7 +1934,7 @@ vm_page_copy( VM_PAGE_CHECK(src_m); VM_PAGE_CHECK(dest_m); - pmap_copy_page(src_m->phys_addr, dest_m->phys_addr); + pmap_copy_page(src_m->phys_page, dest_m->phys_page); } /* @@ -1840,11 +2002,11 @@ vm_page_free_list_sort(void) while (m != VM_PAGE_NULL) { cpm_counter(++vpfls_pages_handled); next_m = NEXT_PAGE(m); - if (m->phys_addr < sort_list->phys_addr) { + if (m->phys_page < sort_list->phys_page) { cpm_counter(++vpfls_head_insertions); SET_NEXT_PAGE(m, sort_list); sort_list = m; - } else if (m->phys_addr > sort_list_end->phys_addr) { + } else if (m->phys_page > sort_list_end->phys_page) { cpm_counter(++vpfls_tail_insertions); SET_NEXT_PAGE(sort_list_end, m); SET_NEXT_PAGE(m, VM_PAGE_NULL); @@ -1854,7 +2016,7 @@ vm_page_free_list_sort(void) /* general sorted list insertion */ prev = &sort_list; for (m1=sort_list; m1!=VM_PAGE_NULL; m1=NEXT_PAGE(m1)) { - if (m1->phys_addr > m->phys_addr) { + if (m1->phys_page > m->phys_page) { if (*prev != m1) panic("vm_sort_free_list: ugh"); SET_NEXT_PAGE(m, *prev); @@ -1873,11 +2035,11 @@ vm_page_free_list_sort(void) */ for (m = sort_list, npages = 0; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { if (m != sort_list && - m->phys_addr <= addr) { + m->phys_page <= addr) { printf("m 0x%x addr 0x%x\n", m, addr); panic("vm_sort_free_list"); } - addr = m->phys_addr; + addr = m->phys_page; ++npages; } if (old_free_count != vm_page_free_count) @@ -1906,16 +2068,16 @@ vm_page_verify_contiguous( unsigned int page_count; vm_offset_t prev_addr; - prev_addr = pages->phys_addr; + prev_addr = pages->phys_page; page_count = 1; for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { - if (m->phys_addr != prev_addr + page_size) { + if (m->phys_page != prev_addr + 1) { printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n", - m, prev_addr, m->phys_addr); + m, prev_addr, m->phys_page); printf("pages 0x%x page_count %d\n", pages, page_count); panic("vm_page_verify_contiguous: not contiguous!"); } - prev_addr = m->phys_addr; + prev_addr = m->phys_page; ++page_count; } if (page_count != npages) { @@ -1945,18 +2107,18 @@ vm_page_find_contiguous( int npages) { vm_page_t m, *contig_prev, *prev_ptr; - vm_offset_t prev_addr; + ppnum_t prev_page; unsigned int contig_npages; vm_page_t list; if (npages < 1) return VM_PAGE_NULL; - prev_addr = vm_page_queue_free->phys_addr - (page_size + 1); + prev_page = vm_page_queue_free->phys_page - 2; prev_ptr = &vm_page_queue_free; for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { - if (m->phys_addr != prev_addr + page_size) { + if (m->phys_page != prev_page + 1) { /* * Whoops! Pages aren't contiguous. Start over. */ @@ -1991,7 +2153,7 @@ vm_page_find_contiguous( assert(contig_npages < npages); prev_ptr = (vm_page_t *) &m->pageq.next; - prev_addr = m->phys_addr; + prev_page = m->phys_page; } cpm_counter(++vpfc_failed); return VM_PAGE_NULL; @@ -2175,7 +2337,7 @@ vm_page_print( (p->restart ? "" : "!"), (p->unusual ? "" : "!")); - iprintf("phys_addr=0x%x", p->phys_addr); + iprintf("phys_page=0x%x", p->phys_page); printf(", page_error=0x%x", p->page_error); printf(", page_lock=0x%x", p->page_lock); printf(", unlock_request=%d\n", p->unlock_request); diff --git a/osfmk/vm/vm_shared_memory_server.c b/osfmk/vm/vm_shared_memory_server.c index bc3ef97de..1312199fc 100644 --- a/osfmk/vm/vm_shared_memory_server.c +++ b/osfmk/vm/vm_shared_memory_server.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -55,6 +56,7 @@ static load_struct_t * lsf_hash_lookup( queue_head_t *hash_table, void *file_object, + vm_offset_t recognizableOffset, int size, boolean_t alternate, shared_region_task_mappings_t sm_info); @@ -98,13 +100,34 @@ vm_offset_t shared_file_data_region; ipc_port_t shared_text_region_handle; ipc_port_t shared_data_region_handle; vm_offset_t shared_file_mapping_array = 0; -shared_region_mapping_t system_shared_region = NULL; + +shared_region_mapping_t default_environment_shared_regions = NULL; +static decl_mutex_data(,default_regions_list_lock_data) + +#define default_regions_list_lock() \ + mutex_lock(&default_regions_list_lock_data) +#define default_regions_list_lock_try() \ + mutex_try(&default_regions_list_lock_data) +#define default_regions_list_unlock() \ + mutex_unlock(&default_regions_list_lock_data) + ipc_port_t sfma_handle = NULL; zone_t lsf_zone; int shared_file_available_hash_ele; +/* com region support */ +ipc_port_t com_region_handle = NULL; +vm_map_t com_region_map = NULL; +vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH; +shared_region_mapping_t com_mapping_resource = NULL; + +#define GLOBAL_COM_REGION_BASE _COMM_PAGE_BASE_ADDRESS + +/* called for the non-default, private branch shared region support */ +/* system default fields for fs_base and system supported are not */ +/* relevant as the system default flag is not set */ kern_return_t shared_file_create_system_region( shared_region_mapping_t *shared_region) @@ -126,20 +149,230 @@ shared_file_create_system_region( kret = shared_region_mapping_create(text_handle, text_size, data_handle, data_size, mapping_array, GLOBAL_SHARED_TEXT_SEGMENT, shared_region, - 0x9000000, 0x9000000); + SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE); if(kret) return kret; (*shared_region)->flags = 0; + if(com_mapping_resource) { + shared_region_mapping_ref(com_mapping_resource); + (*shared_region)->next = com_mapping_resource; + } + return KERN_SUCCESS; } +/* + * load a new default for a specified environment into the default share + * regions list. If a previous default exists for the envrionment specification + * it is returned along with its reference. It is expected that the new + * sytem region structure passes a reference. + */ + +shared_region_mapping_t +update_default_shared_region( + shared_region_mapping_t new_system_region) +{ + shared_region_mapping_t old_system_region; + unsigned int fs_base; + unsigned int system; + + fs_base = new_system_region->fs_base; + system = new_system_region->system; + new_system_region->flags |= SHARED_REGION_SYSTEM; + default_regions_list_lock(); + old_system_region = default_environment_shared_regions; + + if((old_system_region != NULL) && + (old_system_region->fs_base == fs_base) && + (old_system_region->system == system)) { + new_system_region->default_env_list = + old_system_region->default_env_list; + default_environment_shared_regions = new_system_region; + default_regions_list_unlock(); + old_system_region->flags |= SHARED_REGION_STALE; + return old_system_region; + } + if (old_system_region) { + while(old_system_region->default_env_list != NULL) { + if((old_system_region->default_env_list->fs_base == fs_base) && + (old_system_region->default_env_list->system == system)) { + new_system_region->default_env_list = + old_system_region->default_env_list + ->default_env_list; + old_system_region->default_env_list = + new_system_region; + default_regions_list_unlock(); + old_system_region->flags |= SHARED_REGION_STALE; + return old_system_region; + } + old_system_region = old_system_region->default_env_list; + } + } + /* If we get here, we are at the end of the system list and we */ + /* did not find a pre-existing entry */ + if(old_system_region) { + old_system_region->default_env_list = new_system_region; + } else { + default_environment_shared_regions = new_system_region; + } + default_regions_list_unlock(); + return NULL; +} + +/* + * lookup a system_shared_region for the environment specified. If one is + * found, it is returned along with a reference against the structure + */ + +shared_region_mapping_t +lookup_default_shared_region( + unsigned int fs_base, + unsigned int system) +{ + shared_region_mapping_t system_region; + default_regions_list_lock(); + system_region = default_environment_shared_regions; + + while(system_region != NULL) { + if((system_region->fs_base == fs_base) && + (system_region->system == system)) { + break; + } + system_region = system_region->default_env_list; + } + if(system_region) + shared_region_mapping_ref(system_region); + default_regions_list_unlock(); + return system_region; +} + +/* + * remove a system_region default if it appears in the default regions list. + * Drop a reference on removal. + */ + +__private_extern__ void +remove_default_shared_region_lock( + shared_region_mapping_t system_region, + int need_lock) +{ + shared_region_mapping_t old_system_region; + unsigned int fs_base; + unsigned int system; + + default_regions_list_lock(); + old_system_region = default_environment_shared_regions; + + if(old_system_region == NULL) { + default_regions_list_unlock(); + return; + } + + if (old_system_region == system_region) { + default_environment_shared_regions + = old_system_region->default_env_list; + old_system_region->flags |= SHARED_REGION_STALE; + shared_region_mapping_dealloc_lock(old_system_region, + need_lock); + default_regions_list_unlock(); + return; + } + + while(old_system_region->default_env_list != NULL) { + if(old_system_region->default_env_list == system_region) { + shared_region_mapping_t dead_region; + dead_region = old_system_region->default_env_list; + old_system_region->default_env_list = + old_system_region->default_env_list->default_env_list; + dead_region->flags |= SHARED_REGION_STALE; + shared_region_mapping_dealloc_lock(dead_region, + need_lock); + default_regions_list_unlock(); + return; + } + old_system_region = old_system_region->default_env_list; + } + default_regions_list_unlock(); +} + +/* + * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is + * the only caller. Remove this stub function and the corresponding symbol + * export for Merlot. + */ +void +remove_default_shared_region( + shared_region_mapping_t system_region) +{ + remove_default_shared_region_lock(system_region, 1); +} + +void +remove_all_shared_regions() +{ + shared_region_mapping_t system_region; + shared_region_mapping_t next_system_region; + + default_regions_list_lock(); + system_region = default_environment_shared_regions; + + if(system_region == NULL) { + default_regions_list_unlock(); + return; + } + + while(system_region != NULL) { + next_system_region = system_region->default_env_list; + system_region->flags |= SHARED_REGION_STALE; + shared_region_mapping_dealloc(system_region); + system_region = next_system_region; + } + default_environment_shared_regions = NULL; + default_regions_list_unlock(); +} + +/* shared_com_boot_time_init initializes the common page shared data and */ +/* text region. This region is semi independent of the split libs */ +/* and so its policies have to be handled differently by the code that */ +/* manipulates the mapping of shared region environments. However, */ +/* the shared region delivery system supports both */ +shared_com_boot_time_init() +{ + kern_return_t kret; + vm_named_entry_t named_entry; + + if(com_region_handle) { + panic("shared_com_boot_time_init: " + "com_region_handle already set\n"); + } + + /* create com page region */ + if(kret = vm_region_object_create(kernel_map, + com_region_size, + &com_region_handle)) { + panic("shared_com_boot_time_init: " + "unable to create comm page\n"); + return; + } + /* now set export the underlying region/map */ + named_entry = (vm_named_entry_t)com_region_handle->ip_kobject; + com_region_map = named_entry->backing.map; + /* wrap the com region in its own shared file mapping structure */ + shared_region_mapping_create(com_region_handle, + com_region_size, NULL, 0, 0, + GLOBAL_COM_REGION_BASE, &com_mapping_resource, + 0, 0); + +} + shared_file_boot_time_init( -) + unsigned int fs_base, + unsigned int system) { long shared_text_region_size; long shared_data_region_size; shared_region_mapping_t new_system_region; - shared_region_mapping_t old_system_region; + shared_region_mapping_t old_default_env; shared_text_region_size = 0x10000000; shared_data_region_size = 0x10000000; @@ -151,20 +384,26 @@ shared_file_boot_time_init( shared_text_region_size, shared_data_region_handle, shared_data_region_size, shared_file_mapping_array, GLOBAL_SHARED_TEXT_SEGMENT, &new_system_region, - 0x9000000, 0x9000000); - old_system_region = system_shared_region; - system_shared_region = new_system_region; - system_shared_region->flags = SHARED_REGION_SYSTEM; - /* consume the reference held because this is the */ - /* system shared region */ - if(old_system_region) { - shared_region_mapping_dealloc(old_system_region); - } + SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE); + + new_system_region->fs_base = fs_base; + new_system_region->system = system; + new_system_region->flags = SHARED_REGION_SYSTEM; + + /* grab an extra reference for the caller */ + /* remember to grab before call to update */ + shared_region_mapping_ref(new_system_region); + old_default_env = update_default_shared_region(new_system_region); /* hold an extra reference because these are the system */ /* shared regions. */ - shared_region_mapping_ref(system_shared_region); - vm_set_shared_region(current_task(), system_shared_region); - + if(old_default_env) + shared_region_mapping_dealloc(old_default_env); + if(com_mapping_resource == NULL) { + shared_com_boot_time_init(); + } + shared_region_mapping_ref(com_mapping_resource); + new_system_region->next = com_mapping_resource; + vm_set_shared_region(current_task(), new_system_region); } @@ -229,7 +468,7 @@ shared_file_init( for (b = *mapping_array, alloced = 0; alloced < (hash_size + - round_page(sizeof(struct sf_mapping))); + round_page_32(sizeof(struct sf_mapping))); alloced += PAGE_SIZE, b += PAGE_SIZE) { vm_object_lock(buf_object); p = vm_page_alloc(buf_object, alloced); @@ -238,9 +477,11 @@ shared_file_init( } p->busy = FALSE; vm_object_unlock(buf_object); - pmap_enter(kernel_pmap, b, p->phys_addr, + pmap_enter(kernel_pmap, b, p->phys_page, VM_PROT_READ | VM_PROT_WRITE, - VM_WIMG_USE_DEFAULT, TRUE); + ((unsigned int)(p->object->wimg_bits)) + & VM_WIMG_MASK, + TRUE); } @@ -260,20 +501,24 @@ shared_file_init( if (vm_map_wire(kernel_map, *mapping_array, *mapping_array + - (hash_size + round_page(sizeof(struct sf_mapping))), + (hash_size + round_page_32(sizeof(struct sf_mapping))), VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) { panic("shared_file_init: No memory for data table"); } lsf_zone = zinit(sizeof(struct load_file_ele), data_table_size - - (hash_size + round_page(sizeof(struct sf_mapping))), + (hash_size + round_page_32(sizeof(struct sf_mapping))), 0, "load_file_server"); zone_change(lsf_zone, Z_EXHAUST, TRUE); zone_change(lsf_zone, Z_COLLECT, FALSE); zone_change(lsf_zone, Z_EXPAND, FALSE); zone_change(lsf_zone, Z_FOREIGN, TRUE); + + /* initialize the global default environment lock */ + mutex_init(&default_regions_list_lock_data, ETAP_NO_TRACE); + } else { *mapping_array = shared_file_mapping_array; } @@ -336,7 +581,7 @@ copyin_shared_file( hash_table_size = (shared_file_header->hash_size) * sizeof(struct queue_entry); hash_table_offset = hash_table_size + - round_page(sizeof(struct sf_mapping)); + round_page_32(sizeof(struct sf_mapping)); for (i = 0; i < shared_file_header->hash_size; i++) queue_init(&shared_file_header->hash[i]); @@ -399,7 +644,7 @@ copyin_shared_file( alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE; if (file_entry = lsf_hash_lookup(shared_file_header->hash, - (void *) file_object, shared_file_header->hash_size, + (void *) file_object, mappings[0].file_offset, shared_file_header->hash_size, alternate, sm_info)) { /* File is loaded, check the load manifest for exact match */ /* we simplify by requiring that the elements be the same */ @@ -452,18 +697,28 @@ copyin_shared_file( *flags = 0; if(ret == KERN_NO_SPACE) { shared_region_mapping_t regions; + shared_region_mapping_t system_region; regions = (shared_region_mapping_t)sm_info->self; regions->flags |= SHARED_REGION_FULL; - if(regions == system_shared_region) { + system_region = lookup_default_shared_region( + regions->fs_base, regions->system); + if(system_region == regions) { shared_region_mapping_t new_system_shared_regions; - shared_file_boot_time_init(); + shared_file_boot_time_init( + regions->fs_base, regions->system); /* current task must stay with its current */ /* regions, drop count on system_shared_region */ /* and put back our original set */ - vm_get_shared_region(current_task(), &new_system_shared_regions); - shared_region_mapping_dealloc(new_system_shared_regions); + vm_get_shared_region(current_task(), + &new_system_shared_regions); + shared_region_mapping_dealloc_lock( + new_system_shared_regions, 0); vm_set_shared_region(current_task(), regions); } + if(system_region != NULL) { + shared_region_mapping_dealloc_lock( + system_region, 0); + } } mutex_unlock(&shared_file_header->lock); return ret; @@ -477,6 +732,7 @@ static load_struct_t * lsf_hash_lookup( queue_head_t *hash_table, void *file_object, + vm_offset_t recognizableOffset, int size, boolean_t alternate, shared_region_task_mappings_t sm_info) @@ -490,7 +746,12 @@ lsf_hash_lookup( for (entry = (load_struct_t *)queue_first(bucket); !queue_end(bucket, &entry->links); entry = (load_struct_t *)queue_next(&entry->links)) { - if (entry->file_object == (int)file_object) { + + if ((entry->file_object == (int) file_object) && + (entry->file_offset != recognizableOffset)) { + } + if ((entry->file_object == (int)file_object) && + (entry->file_offset == recognizableOffset)) { target_region = (shared_region_mapping_t)sm_info->self; depth = target_region->depth; while(target_region) { @@ -521,10 +782,11 @@ lsf_hash_lookup( return (load_struct_t *)0; } -load_struct_t * -lsf_remove_regions_mappings( +__private_extern__ load_struct_t * +lsf_remove_regions_mappings_lock( shared_region_mapping_t region, - shared_region_task_mappings_t sm_info) + shared_region_task_mappings_t sm_info, + int need_lock) { int i; register queue_t bucket; @@ -535,9 +797,11 @@ lsf_remove_regions_mappings( shared_file_header = (shared_file_info_t *)sm_info->region_mappings; - mutex_lock(&shared_file_header->lock); + if (need_lock) + mutex_lock(&shared_file_header->lock); if(shared_file_header->hash_init == FALSE) { - mutex_unlock(&shared_file_header->lock); + if (need_lock) + mutex_unlock(&shared_file_header->lock); return NULL; } for(i = 0; ihash_size; i++) { @@ -552,7 +816,21 @@ lsf_remove_regions_mappings( entry = next_entry; } } - mutex_unlock(&shared_file_header->lock); + if (need_lock) + mutex_unlock(&shared_file_header->lock); +} + +/* + * Symbol compatability; we believe shared_region_mapping_dealloc() is the + * only caller. Remove this stub function and the corresponding symbol + * export for Merlot. + */ +load_struct_t * +lsf_remove_regions_mappings( + shared_region_mapping_t region, + shared_region_task_mappings_t sm_info) +{ + return lsf_remove_regions_mappings_lock(region, sm_info, 1); } /* Removes a map_list, (list of loaded extents) for a file from */ @@ -644,6 +922,7 @@ lsf_load( entry->links.next = (queue_entry_t) 0; entry->regions_instance = (shared_region_mapping_t)sm_info->self; entry->depth=((shared_region_mapping_t)sm_info->self)->depth; + entry->file_offset = mappings[0].file_offset; lsf_hash_insert(entry, sm_info); tptr = &(entry->mappings); @@ -667,12 +946,15 @@ lsf_load( + mappings[i].size; } } - if((alternate_load_next + round_page(max_loadfile_offset)) >= + if((alternate_load_next + round_page_32(max_loadfile_offset)) >= (sm_info->data_size - (sm_info->data_size>>9))) { + entry->base_address = + (*base_address) & SHARED_TEXT_REGION_MASK; + lsf_unload(file_object, entry->base_address, sm_info); return KERN_NO_SPACE; } - alternate_load_next += round_page(max_loadfile_offset); + alternate_load_next += round_page_32(max_loadfile_offset); } else { if (((*base_address) & SHARED_TEXT_REGION_MASK) > @@ -686,6 +968,51 @@ lsf_load( entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK; + // Sanity check the mappings -- make sure we don't stray across the + // alternate boundary. If any bit of a library that we're not trying + // to load in the alternate load space strays across that boundary, + // return KERN_INVALID_ARGUMENT immediately so that the caller can + // try to load it in the alternate shared area. We do this to avoid + // a nasty case: if a library tries to load so that it crosses the + // boundary, it'll occupy a bit of the alternate load area without + // the kernel being aware. When loads into the alternate load area + // at the first free address are tried, the load will fail. + // Thus, a single library straddling the boundary causes all sliding + // libraries to fail to load. This check will avoid such a case. + + if (!(flags & ALTERNATE_LOAD_SITE)) { + for (i = 0; ibase_address; + region_end = (mappings[i].size + region_start); + if (region_end >= SHARED_ALTERNATE_LOAD_BASE) { + // No library is permitted to load so any bit of it is in the + // shared alternate space. If they want it loaded, they can put + // it in the alternate space explicitly. +printf("Library trying to load across alternate shared region boundary -- denied!\n"); + lsf_unload(file_object, entry->base_address, sm_info); + return KERN_INVALID_ARGUMENT; + } + } else { + // rw section? + region_mask = SHARED_DATA_REGION_MASK; + region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address; + region_end = (mappings[i].size + region_start); + if (region_end >= SHARED_ALTERNATE_LOAD_BASE) { +printf("Library trying to load across alternate shared region boundary-- denied!\n"); + lsf_unload(file_object, entry->base_address, sm_info); + return KERN_INVALID_ARGUMENT; + } + } // write? + } // for + } // if not alternate load site. + /* copyin mapped file data */ for(i = 0; iip_kobject) ->backing.map, target_address, mappings[i].size); lsf_unload(file_object, entry->base_address, sm_info); @@ -747,13 +1074,13 @@ lsf_load( } vm_map_protect(((vm_named_entry_t)local_map->ip_kobject) ->backing.map, target_address, - round_page(target_address + mappings[i].size), + round_page_32(target_address + mappings[i].size), (mappings[i].protection & (VM_PROT_READ | VM_PROT_EXECUTE)), TRUE); vm_map_protect(((vm_named_entry_t)local_map->ip_kobject) ->backing.map, target_address, - round_page(target_address + mappings[i].size), + round_page_32(target_address + mappings[i].size), (mappings[i].protection & (VM_PROT_READ | VM_PROT_EXECUTE)), FALSE); diff --git a/osfmk/vm/vm_shared_memory_server.h b/osfmk/vm/vm_shared_memory_server.h index 31c6031fb..018595916 100644 --- a/osfmk/vm/vm_shared_memory_server.h +++ b/osfmk/vm/vm_shared_memory_server.h @@ -55,12 +55,20 @@ struct shared_region_task_mappings { vm_offset_t client_base; vm_offset_t alternate_base; vm_offset_t alternate_next; + unsigned int fs_base; + unsigned int system; int flags; vm_offset_t self; }; -#define SHARED_REGION_SYSTEM 0x1 -#define SHARED_REGION_FULL 0x2 +#define SHARED_REGION_SYSTEM 0x1 // Default env for system and fs_root +#define SHARED_REGION_FULL 0x2 // Shared regions are full +#define SHARED_REGION_STALE 0x4 // Indicates no longer in default list + + +/* defines for default environment, and co-resident systems */ + +#define ENV_DEFAULT_ROOT 0 typedef struct shared_region_task_mappings *shared_region_task_mappings_t; typedef struct shared_region_mapping *shared_region_mapping_t; @@ -93,6 +101,7 @@ struct load_struct { vm_offset_t base_address; int mapping_cnt; loaded_mapping_t *mappings; + vm_offset_t file_offset; // start of file we mapped in }; typedef struct load_struct load_struct_t; @@ -125,6 +134,8 @@ typedef struct shared_region_object_chain *shared_region_object_chain_t; struct shared_region_mapping { decl_mutex_data(, Lock) /* Synchronization */ int ref_count; + unsigned int fs_base; + unsigned int system; mach_port_t text_region; vm_size_t text_size; mach_port_t data_region; @@ -135,6 +146,7 @@ struct shared_region_mapping { vm_offset_t alternate_next; int flags; int depth; + shared_region_mapping_t default_env_list; shared_region_object_chain_t object_chain; shared_region_mapping_t self; shared_region_mapping_t next; @@ -171,6 +183,8 @@ extern kern_return_t shared_region_mapping_info( vm_offset_t *client_base, vm_offset_t *alternate_base, vm_offset_t *alternate_next, + unsigned int *fs_base, + unsigned int *system, int *flags, shared_region_mapping_t *next); @@ -191,6 +205,10 @@ extern kern_return_t shared_region_mapping_ref( extern kern_return_t shared_region_mapping_dealloc( shared_region_mapping_t shared_region); +__private_extern__ kern_return_t shared_region_mapping_dealloc_lock( + shared_region_mapping_t shared_region, + int need_lock); + extern kern_return_t shared_region_object_chain_attach( shared_region_mapping_t target_region, shared_region_mapping_t object_chain); @@ -203,6 +221,20 @@ extern kern_return_t vm_set_shared_region( task_t task, shared_region_mapping_t shared_region); +extern shared_region_mapping_t update_default_shared_region( + shared_region_mapping_t new_system_region); + +extern shared_region_mapping_t lookup_default_shared_region( + unsigned int fs_base, + unsigned int system); + +extern void remove_default_shared_region( + shared_region_mapping_t system_region); + +__private_extern__ void remove_default_shared_region_lock( + shared_region_mapping_t system_region, + int need_lock); + extern unsigned int lsf_mapping_pool_gauge(); #endif /* __APPLE_API_PRIVATE */ diff --git a/osfmk/vm/vm_user.c b/osfmk/vm/vm_user.c index 83ec5a11b..a1f18add3 100644 --- a/osfmk/vm/vm_user.c +++ b/osfmk/vm/vm_user.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -83,6 +83,11 @@ #include #include +__private_extern__ load_struct_t * +lsf_remove_regions_mappings_lock( + shared_region_mapping_t region, + shared_region_task_mappings_t sm_info, + int need_lock); vm_size_t upl_offset_to_pagelist = 0; @@ -117,8 +122,8 @@ vm_allocate( if (anywhere) *addr = vm_map_min(map); else - *addr = trunc_page(*addr); - size = round_page(size); + *addr = trunc_page_32(*addr); + size = round_page_32(size); if (size == 0) { return(KERN_INVALID_ARGUMENT); } @@ -155,8 +160,8 @@ vm_deallocate( if (size == (vm_offset_t) 0) return(KERN_SUCCESS); - return(vm_map_remove(map, trunc_page(start), - round_page(start+size), VM_MAP_NO_FLAGS)); + return(vm_map_remove(map, trunc_page_32(start), + round_page_32(start+size), VM_MAP_NO_FLAGS)); } /* @@ -177,8 +182,8 @@ vm_inherit( return(KERN_INVALID_ARGUMENT); return(vm_map_inherit(map, - trunc_page(start), - round_page(start+size), + trunc_page_32(start), + round_page_32(start+size), new_inheritance)); } @@ -200,8 +205,8 @@ vm_protect( return(KERN_INVALID_ARGUMENT); return(vm_map_protect(map, - trunc_page(start), - round_page(start+size), + trunc_page_32(start), + round_page_32(start+size), new_protection, set_maximum)); } @@ -458,8 +463,8 @@ vm_map_64( vm_map_entry_t map_entry; named_entry_unlock(named_entry); - *address = trunc_page(*address); - size = round_page(size); + *address = trunc_page_32(*address); + size = round_page_64(size); vm_object_reference(vm_submap_object); if ((result = vm_map_enter(target_map, address, size, mask, flags, @@ -504,31 +509,79 @@ vm_map_64( vm_object_reference(named_entry->object); object = named_entry->object; } else { - object = vm_object_enter(named_entry->backing.pager, - named_entry->size, - named_entry->internal, - FALSE, - FALSE); + unsigned int access; + vm_prot_t protections; + unsigned int wimg_mode; + boolean_t cache_attr; + + protections = named_entry->protection + & VM_PROT_ALL; + access = GET_MAP_MEM(named_entry->protection); + + object = vm_object_enter( + named_entry->backing.pager, + named_entry->size, + named_entry->internal, + FALSE, + FALSE); if (object == VM_OBJECT_NULL) { named_entry_unlock(named_entry); return(KERN_INVALID_OBJECT); } - object->true_share = TRUE; + + vm_object_lock(object); + + /* create an extra ref for the named entry */ + vm_object_reference_locked(object); named_entry->object = object; named_entry_unlock(named_entry); - /* create an extra reference for the named entry */ - vm_object_reference(named_entry->object); - /* wait for object (if any) to be ready */ - if (object != VM_OBJECT_NULL) { - vm_object_lock(object); + + wimg_mode = object->wimg_bits; + if(access == MAP_MEM_IO) { + wimg_mode = VM_WIMG_IO; + } else if (access == MAP_MEM_COPYBACK) { + wimg_mode = VM_WIMG_USE_DEFAULT; + } else if (access == MAP_MEM_WTHRU) { + wimg_mode = VM_WIMG_WTHRU; + } else if (access == MAP_MEM_WCOMB) { + wimg_mode = VM_WIMG_WCOMB; + } + if ((wimg_mode == VM_WIMG_IO) + || (wimg_mode == VM_WIMG_WCOMB)) + cache_attr = TRUE; + else + cache_attr = FALSE; + + if (named_entry->backing.pager) { + /* wait for object (if any) to be ready */ while (!object->pager_ready) { vm_object_wait(object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); vm_object_lock(object); } - vm_object_unlock(object); } + if(object->wimg_bits != wimg_mode) { + vm_page_t p; + + vm_object_paging_wait(object, THREAD_UNINT); + + object->wimg_bits = wimg_mode; + queue_iterate(&object->memq, p, vm_page_t, listq) { + if (!p->fictitious) { + pmap_page_protect( + p->phys_page, + VM_PROT_NONE); + if(cache_attr) + pmap_sync_caches_phys( + p->phys_page); + } + } + } + object->true_share = TRUE; + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) + object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + vm_object_unlock(object); } } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) { /* @@ -565,8 +618,8 @@ vm_map_64( return (KERN_INVALID_OBJECT); } - *address = trunc_page(*address); - size = round_page(size); + *address = trunc_page_32(*address); + size = round_page_64(size); /* * Perform the copy if requested @@ -629,6 +682,7 @@ vm_map_64( } /* temporary, until world build */ +kern_return_t vm_map( vm_map_t target_map, vm_offset_t *address, @@ -642,7 +696,7 @@ vm_map( vm_prot_t max_protection, vm_inherit_t inheritance) { - vm_map_64(target_map, address, size, mask, flags, + return vm_map_64(target_map, address, size, mask, flags, port, (vm_object_offset_t)offset, copy, cur_protection, max_protection, inheritance); } @@ -682,11 +736,11 @@ vm_wire( return KERN_INVALID_ARGUMENT; if (access != VM_PROT_NONE) { - rc = vm_map_wire(map, trunc_page(start), - round_page(start+size), access, TRUE); + rc = vm_map_wire(map, trunc_page_32(start), + round_page_32(start+size), access, TRUE); } else { - rc = vm_map_unwire(map, trunc_page(start), - round_page(start+size), TRUE); + rc = vm_map_unwire(map, trunc_page_32(start), + round_page_32(start+size), TRUE); } return rc; } @@ -747,8 +801,8 @@ vm_msync( /* * align address and size on page boundaries */ - size = round_page(address + size) - trunc_page(address); - address = trunc_page(address); + size = round_page_32(address + size) - trunc_page_32(address); + address = trunc_page_32(address); if (map == VM_MAP_NULL) return(KERN_INVALID_TASK); @@ -1006,8 +1060,8 @@ vm_behavior_set( if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); - return(vm_map_behavior_set(map, trunc_page(start), - round_page(start+size), new_behavior)); + return(vm_map_behavior_set(map, trunc_page_32(start), + round_page_32(start+size), new_behavior)); } #if VM_CPM @@ -1074,8 +1128,8 @@ vm_allocate_cpm( if (anywhere) *addr = vm_map_min(map); else - *addr = trunc_page(*addr); - size = round_page(size); + *addr = trunc_page_32(*addr); + size = round_page_32(size); if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS) return kr; @@ -1103,7 +1157,7 @@ vm_allocate_cpm( assert(!m->pageout); assert(!m->tabled); assert(m->busy); - assert(m->phys_addr>=avail_start && m->phys_addr<=avail_end); + assert(m->phys_page>=avail_start && m->phys_page<=avail_end); m->busy = FALSE; vm_page_insert(m, cpm_obj, offset); @@ -1178,7 +1232,8 @@ vm_allocate_cpm( vm_object_unlock(cpm_obj); assert(m != VM_PAGE_NULL); PMAP_ENTER(pmap, va, m, VM_PROT_ALL, - VM_WIMG_USE_DEFAULT, TRUE); + ((unsigned int)(m->object->wimg_bits)) & VM_WIMG_MASK, + TRUE); } #if MACH_ASSERT @@ -1203,7 +1258,7 @@ vm_allocate_cpm( assert(!m->precious); assert(!m->clustered); if (offset != 0) { - if (m->phys_addr != prev_addr + PAGE_SIZE) { + if (m->phys_page != prev_addr + 1) { printf("start 0x%x end 0x%x va 0x%x\n", start, end, va); printf("obj 0x%x off 0x%x\n", cpm_obj, offset); @@ -1212,7 +1267,7 @@ vm_allocate_cpm( panic("vm_allocate_cpm: pages not contig!"); } } - prev_addr = m->phys_addr; + prev_addr = m->phys_page; } #endif /* MACH_ASSERT */ @@ -1252,6 +1307,7 @@ mach_memory_object_memory_entry_64( memory_object_t pager, ipc_port_t *entry_handle) { + unsigned int access; vm_named_entry_t user_object; ipc_port_t user_handle; ipc_port_t previous; @@ -1288,7 +1344,9 @@ mach_memory_object_memory_entry_64( user_object->size = size; user_object->offset = 0; user_object->backing.pager = pager; - user_object->protection = permission; + user_object->protection = permission & VM_PROT_ALL; + access = GET_MAP_MEM(permission); + SET_MAP_MEM(access, user_object->protection); user_object->internal = internal; user_object->is_sub_map = FALSE; user_object->ref_count = 1; @@ -1351,10 +1409,85 @@ mach_make_memory_entry_64( vm_object_size_t mappable_size; vm_object_size_t total_size; + unsigned int access; + vm_prot_t protections; + unsigned int wimg_mode; + boolean_t cache_attr; + + protections = permission & VM_PROT_ALL; + access = GET_MAP_MEM(permission); + offset = trunc_page_64(offset); *size = round_page_64(*size); - + + if((parent_entry != NULL) + && (permission & MAP_MEM_ONLY)) { + vm_named_entry_t parent_object; + if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) { + return KERN_INVALID_ARGUMENT; + } + parent_object = (vm_named_entry_t)parent_entry->ip_kobject; + object = parent_object->object; + if(object != VM_OBJECT_NULL) + wimg_mode = object->wimg_bits; + if((access != GET_MAP_MEM(parent_object->protection)) && + !(parent_object->protection & VM_PROT_WRITE)) { + return KERN_INVALID_RIGHT; + } + if(access == MAP_MEM_IO) { + SET_MAP_MEM(access, parent_object->protection); + wimg_mode = VM_WIMG_IO; + } else if (access == MAP_MEM_COPYBACK) { + SET_MAP_MEM(access, parent_object->protection); + wimg_mode = VM_WIMG_DEFAULT; + } else if (access == MAP_MEM_WTHRU) { + SET_MAP_MEM(access, parent_object->protection); + wimg_mode = VM_WIMG_WTHRU; + } else if (access == MAP_MEM_WCOMB) { + SET_MAP_MEM(access, parent_object->protection); + wimg_mode = VM_WIMG_WCOMB; + } + if(object && + (access != MAP_MEM_NOOP) && + (!(object->nophyscache))) { + if(object->wimg_bits != wimg_mode) { + vm_page_t p; + if ((wimg_mode == VM_WIMG_IO) + || (wimg_mode == VM_WIMG_WCOMB)) + cache_attr = TRUE; + else + cache_attr = FALSE; + vm_object_lock(object); + while(object->paging_in_progress) { + vm_object_unlock(object); + vm_object_wait(object, + VM_OBJECT_EVENT_PAGING_IN_PROGRESS, + THREAD_UNINT); + vm_object_lock(object); + } + object->wimg_bits = wimg_mode; + queue_iterate(&object->memq, + p, vm_page_t, listq) { + if (!p->fictitious) { + pmap_page_protect( + p->phys_page, + VM_PROT_NONE); + if(cache_attr) + pmap_sync_caches_phys( + p->phys_page); + } + } + vm_object_unlock(object); + } + } + return KERN_SUCCESS; + } + + if(permission & MAP_MEM_ONLY) { + return KERN_INVALID_ARGUMENT; + } + user_object = (vm_named_entry_t) kalloc(sizeof (struct vm_named_entry)); if(user_object == NULL) @@ -1382,11 +1515,28 @@ mach_make_memory_entry_64( user_object->backing.pager = NULL; user_object->ref_count = 1; + if(permission & MAP_MEM_NAMED_CREATE) { + user_object->object = NULL; + user_object->internal = TRUE; + user_object->is_sub_map = FALSE; + user_object->offset = 0; + user_object->protection = protections; + SET_MAP_MEM(access, user_object->protection); + user_object->size = *size; + + /* user_object pager and internal fields are not used */ + /* when the object field is filled in. */ + + ipc_kobject_set(user_handle, (ipc_kobject_t) user_object, + IKOT_NAMED_ENTRY); + *object_handle = user_handle; + return KERN_SUCCESS; + } + if(parent_entry == NULL) { /* Create a named object based on address range within the task map */ /* Go find the object at given address */ - permission &= VM_PROT_ALL; vm_map_lock_read(target_map); /* get the object associated with the target address */ @@ -1394,14 +1544,14 @@ mach_make_memory_entry_64( /* that requested by the caller */ kr = vm_map_lookup_locked(&target_map, offset, - permission, &version, + protections, &version, &object, &obj_off, &prot, &wired, &behavior, &lo_offset, &hi_offset, &pmap_map); if (kr != KERN_SUCCESS) { vm_map_unlock_read(target_map); goto make_mem_done; } - if (((prot & permission) != permission) + if (((prot & protections) != protections) || (object == kernel_object)) { kr = KERN_INVALID_RIGHT; vm_object_unlock(object); @@ -1449,6 +1599,7 @@ redo_lookup: goto make_mem_done; } if(map_entry->wired_count) { + /* JMM - The check below should be reworked instead. */ object->true_share = TRUE; } break; @@ -1465,7 +1616,7 @@ redo_lookup: local_offset += map_entry->offset; } } - if(((map_entry->max_protection) & permission) != permission) { + if(((map_entry->max_protection) & protections) != protections) { kr = KERN_INVALID_RIGHT; vm_object_unlock(object); vm_map_unlock_read(target_map); @@ -1492,9 +1643,12 @@ redo_lookup: (next_entry->vme_prev->vme_end - next_entry->vme_prev->vme_start))) { if(((next_entry->max_protection) - & permission) != permission) { + & protections) != protections) { break; } + if (next_entry->needs_copy != + map_entry->needs_copy) + break; mappable_size += next_entry->vme_end - next_entry->vme_start; total_size += next_entry->vme_end @@ -1522,7 +1676,13 @@ redo_lookup: goto redo_lookup; } - + /* + * JMM - We need to avoid coming here when the object + * is wired by anybody, not just the current map. Why + * couldn't we use the standard vm_object_copy_quickly() + * approach here? + */ + /* create a shadow object */ vm_object_shadow(&map_entry->object.vm_object, &map_entry->offset, total_size); @@ -1543,8 +1703,11 @@ redo_lookup: map_entry->needs_copy = FALSE; while (total_size) { if(next_entry->object.vm_object == object) { + shadow_object->ref_count++; + vm_object_res_reference(shadow_object); next_entry->object.vm_object = shadow_object; + vm_object_deallocate(object); next_entry->offset = next_entry->vme_prev->offset + (next_entry->vme_prev->vme_end @@ -1580,9 +1743,47 @@ redo_lookup: /* target of ipc's, etc. The code above, protecting */ /* against delayed copy, etc. is mostly defensive. */ - + wimg_mode = object->wimg_bits; + if(!(object->nophyscache)) { + if(access == MAP_MEM_IO) { + wimg_mode = VM_WIMG_IO; + } else if (access == MAP_MEM_COPYBACK) { + wimg_mode = VM_WIMG_USE_DEFAULT; + } else if (access == MAP_MEM_WTHRU) { + wimg_mode = VM_WIMG_WTHRU; + } else if (access == MAP_MEM_WCOMB) { + wimg_mode = VM_WIMG_WCOMB; + } + } object->true_share = TRUE; + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) + object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + + /* we now point to this object, hold on to it */ + vm_object_reference_locked(object); + vm_map_unlock_read(target_map); + if(pmap_map != target_map) + vm_map_unlock_read(pmap_map); + + if(object->wimg_bits != wimg_mode) { + vm_page_t p; + + vm_object_paging_wait(object, THREAD_UNINT); + + queue_iterate(&object->memq, + p, vm_page_t, listq) { + if (!p->fictitious) { + pmap_page_protect( + p->phys_page, + VM_PROT_NONE); + if(cache_attr) + pmap_sync_caches_phys( + p->phys_page); + } + } + object->wimg_bits = wimg_mode; + } user_object->object = object; user_object->internal = object->internal; user_object->is_sub_map = FALSE; @@ -1603,15 +1804,10 @@ redo_lookup: /* user_object pager and internal fields are not used */ /* when the object field is filled in. */ - object->ref_count++; /* we now point to this object, hold on */ - vm_object_res_reference(object); vm_object_unlock(object); ipc_kobject_set(user_handle, (ipc_kobject_t) user_object, IKOT_NAMED_ENTRY); *object_handle = user_handle; - vm_map_unlock_read(target_map); - if(pmap_map != target_map) - vm_map_unlock_read(pmap_map); return KERN_SUCCESS; } else { @@ -1623,10 +1819,6 @@ redo_lookup: goto make_mem_done; } parent_object = (vm_named_entry_t)parent_entry->ip_kobject; - if(permission & parent_object->protection != permission) { - kr = KERN_INVALID_ARGUMENT; - goto make_mem_done; - } if((offset + *size) > parent_object->size) { kr = KERN_INVALID_ARGUMENT; goto make_mem_done; @@ -1635,7 +1827,12 @@ redo_lookup: user_object->object = parent_object->object; user_object->size = *size; user_object->offset = parent_object->offset + offset; - user_object->protection = permission; + user_object->protection = parent_object->protection; + user_object->protection &= ~VM_PROT_ALL; + user_object->protection = permission & VM_PROT_ALL; + if(access != MAP_MEM_NOOP) { + SET_MAP_MEM(access, user_object->protection); + } if(parent_object->is_sub_map) { user_object->backing.map = parent_object->backing.map; vm_map_lock(user_object->backing.map); @@ -1653,6 +1850,10 @@ redo_lookup: vm_object_reference(parent_object->object); vm_object_lock(parent_object->object); parent_object->object->true_share = TRUE; + if (parent_object->object->copy_strategy == + MEMORY_OBJECT_COPY_SYMMETRIC) + parent_object->object->copy_strategy = + MEMORY_OBJECT_COPY_DELAY; vm_object_unlock(parent_object->object); } ipc_kobject_set(user_handle, (ipc_kobject_t) user_object, @@ -1702,16 +1903,12 @@ vm_region_object_create( ipc_port_t user_handle; kern_return_t kr; - pmap_t new_pmap = pmap_create((vm_size_t) 0); ipc_port_t previous; vm_map_t new_map; - if(new_pmap == PMAP_NULL) - return KERN_FAILURE; user_object = (vm_named_entry_t) kalloc(sizeof (struct vm_named_entry)); if(user_object == NULL) { - pmap_destroy(new_pmap); return KERN_FAILURE; } named_entry_lock_init(user_object); @@ -1738,7 +1935,7 @@ vm_region_object_create( /* Create a named object based on a submap of specified size */ - new_map = vm_map_create(new_pmap, 0, size, TRUE); + new_map = vm_map_create(0, 0, size, TRUE); user_object->backing.map = new_map; @@ -1801,7 +1998,15 @@ kern_return_t vm_map_region_replace( vm_map_unlock(target_map); return KERN_SUCCESS; } - vm_map_lookup_entry(target_map, addr, &entry); + } + if ((entry->use_pmap) && + (new_submap->pmap == NULL)) { + new_submap->pmap = pmap_create((vm_size_t) 0); + if(new_submap->pmap == PMAP_NULL) { + vm_map_unlock(old_submap); + vm_map_unlock(target_map); + return(KERN_NO_SPACE); + } } addr = entry->vme_start; vm_map_reference(old_submap); @@ -1810,7 +2015,7 @@ kern_return_t vm_map_region_replace( if((entry->is_sub_map) && (entry->object.sub_map == old_submap)) { if(entry->use_pmap) { - if((start & 0xfffffff) || + if((start & 0x0fffffff) || ((end - start) != 0x10000000)) { vm_map_unlock(old_submap); vm_map_deallocate(old_submap); @@ -1828,14 +2033,15 @@ kern_return_t vm_map_region_replace( } if(nested_pmap) { #ifndef i386 - pmap_unnest(target_map->pmap, start, end - start); + pmap_unnest(target_map->pmap, (addr64_t)start); if(target_map->mapped) { vm_map_submap_pmap_clean(target_map, start, end, old_submap, 0); } pmap_nest(target_map->pmap, new_submap->pmap, - start, end - start); -#endif i386 + (addr64_t)start, (addr64_t)start, + (addr64_t)(end - start)); +#endif /* i386 */ } else { vm_map_submap_pmap_clean(target_map, start, end, old_submap, 0); @@ -1946,12 +2152,12 @@ restart_page_query: if (m->dirty) *disposition |= VM_PAGE_QUERY_PAGE_DIRTY; - else if(pmap_is_modified(m->phys_addr)) + else if(pmap_is_modified(m->phys_page)) *disposition |= VM_PAGE_QUERY_PAGE_DIRTY; if (m->reference) *disposition |= VM_PAGE_QUERY_PAGE_REF; - else if(pmap_is_referenced(m->phys_addr)) + else if(pmap_is_referenced(m->phys_page)) *disposition |= VM_PAGE_QUERY_PAGE_REF; vm_object_unlock(object); @@ -2052,6 +2258,10 @@ REDISCOVER_ENTRY: entry->offset = 0; } if (!(caller_flags & UPL_COPYOUT_FROM)) { + if (!(entry->protection & VM_PROT_WRITE)) { + vm_map_unlock(map); + return KERN_PROTECTION_FAILURE; + } if (entry->needs_copy) { vm_map_t local_map; vm_object_t object; @@ -2119,24 +2329,6 @@ REDISCOVER_ENTRY: flags = MEMORY_OBJECT_COPY_SYNC; } - if((local_object->paging_offset) && - (local_object->pager == 0)) { - /* - * do a little clean-up for our unorthodox - * entry into a pager call from a non-pager - * context. Normally the pager code - * assumes that an object it has been called - * with has a backing pager and so does - * not bother to check the pager field - * before relying on the paging_offset - */ - vm_object_lock(local_object); - if (local_object->pager == 0) { - local_object->paging_offset = 0; - } - vm_object_unlock(local_object); - } - if (entry->object.vm_object->shadow && entry->object.vm_object->copy) { vm_object_lock_request( @@ -2144,8 +2336,7 @@ REDISCOVER_ENTRY: (vm_object_offset_t) ((offset - local_start) + local_offset) + - local_object->shadow_offset + - local_object->paging_offset, + local_object->shadow_offset, *upl_size, FALSE, MEMORY_OBJECT_DATA_SYNC, VM_PROT_NO_CHANGE); @@ -2164,29 +2355,10 @@ REDISCOVER_ENTRY: vm_object_reference(local_object); vm_map_unlock(map); - if((local_object->paging_offset) && - (local_object->pager == 0)) { - /* - * do a little clean-up for our unorthodox - * entry into a pager call from a non-pager - * context. Normally the pager code - * assumes that an object it has been called - * with has a backing pager and so does - * not bother to check the pager field - * before relying on the paging_offset - */ - vm_object_lock(local_object); - if (local_object->pager == 0) { - local_object->paging_offset = 0; - } - vm_object_unlock(local_object); - } - vm_object_lock_request( local_object, (vm_object_offset_t) - ((offset - local_start) + local_offset) + - local_object->paging_offset, + ((offset - local_start) + local_offset), (vm_object_size_t)*upl_size, FALSE, MEMORY_OBJECT_DATA_SYNC, VM_PROT_NO_CHANGE); @@ -2211,14 +2383,27 @@ REDISCOVER_ENTRY: local_start = entry->vme_start; vm_object_reference(local_object); vm_map_unlock(map); - ret = (vm_object_upl_request(local_object, - (vm_object_offset_t) - ((offset - local_start) + local_offset), - *upl_size, - upl, - page_list, - count, - caller_flags)); + if(caller_flags & UPL_SET_IO_WIRE) { + ret = (vm_object_iopl_request(local_object, + (vm_object_offset_t) + ((offset - local_start) + + local_offset), + *upl_size, + upl, + page_list, + count, + caller_flags)); + } else { + ret = (vm_object_upl_request(local_object, + (vm_object_offset_t) + ((offset - local_start) + + local_offset), + *upl_size, + upl, + page_list, + count, + caller_flags)); + } vm_object_deallocate(local_object); return(ret); } @@ -2347,6 +2532,8 @@ shared_region_mapping_info( vm_offset_t *client_base, vm_offset_t *alt_base, vm_offset_t *alt_next, + unsigned int *fs_base, + unsigned int *system, int *flags, shared_region_mapping_t *next) { @@ -2361,6 +2548,8 @@ shared_region_mapping_info( *alt_base = shared_region->alternate_base; *alt_next = shared_region->alternate_next; *flags = shared_region->flags; + *fs_base = shared_region->fs_base; + *system = shared_region->system; *next = shared_region->next; shared_region_mapping_unlock(shared_region); @@ -2407,6 +2596,8 @@ shared_region_mapping_create( shared_region_mapping_lock_init((*shared_region)); (*shared_region)->text_region = text_region; (*shared_region)->text_size = text_size; + (*shared_region)->fs_base = ENV_DEFAULT_ROOT; + (*shared_region)->system = machine_slot[cpu_number()].cpu_type; (*shared_region)->data_region = data_region; (*shared_region)->data_size = data_size; (*shared_region)->region_mappings = region_mappings; @@ -2417,6 +2608,7 @@ shared_region_mapping_create( (*shared_region)->self = *shared_region; (*shared_region)->flags = 0; (*shared_region)->depth = 0; + (*shared_region)->default_env_list = NULL; (*shared_region)->alternate_base = alt_base; (*shared_region)->alternate_next = alt_next; return KERN_SUCCESS; @@ -2441,15 +2633,18 @@ shared_region_mapping_ref( return KERN_SUCCESS; } -kern_return_t -shared_region_mapping_dealloc( - shared_region_mapping_t shared_region) +__private_extern__ kern_return_t +shared_region_mapping_dealloc_lock( + shared_region_mapping_t shared_region, + int need_lock) { struct shared_region_task_mappings sm_info; shared_region_mapping_t next = NULL; + int ref_count; while (shared_region) { - if (hw_atomic_sub(&shared_region->ref_count, 1) == 0) { + if ((ref_count = + hw_atomic_sub(&shared_region->ref_count, 1)) == 0) { shared_region_mapping_lock(shared_region); sm_info.text_region = shared_region->text_region; @@ -2463,14 +2658,21 @@ shared_region_mapping_dealloc( sm_info.flags = shared_region->flags; sm_info.self = (vm_offset_t)shared_region; - lsf_remove_regions_mappings(shared_region, &sm_info); - pmap_remove(((vm_named_entry_t) + if(shared_region->region_mappings) { + lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_lock); + } + if(((vm_named_entry_t) + (shared_region->text_region->ip_kobject)) + ->backing.map->pmap) { + pmap_remove(((vm_named_entry_t) (shared_region->text_region->ip_kobject)) ->backing.map->pmap, sm_info.client_base, sm_info.client_base + sm_info.text_size); + } ipc_port_release_send(shared_region->text_region); - ipc_port_release_send(shared_region->data_region); + if(shared_region->data_region) + ipc_port_release_send(shared_region->data_region); if (shared_region->object_chain) { next = shared_region->object_chain->object_chain_region; kfree((vm_offset_t)shared_region->object_chain, @@ -2483,13 +2685,31 @@ shared_region_mapping_dealloc( sizeof (struct shared_region_mapping)); shared_region = next; } else { + /* Stale indicates that a system region is no */ + /* longer in the default environment list. */ + if((ref_count == 1) && + (shared_region->flags & SHARED_REGION_SYSTEM) + && (shared_region->flags & ~SHARED_REGION_STALE)) { + remove_default_shared_region_lock(shared_region,need_lock); + } break; } } return KERN_SUCCESS; } -vm_offset_t +/* + * Stub function; always indicates that the lock needs to be taken in the + * call to lsf_remove_regions_mappings_lock(). + */ +kern_return_t +shared_region_mapping_dealloc( + shared_region_mapping_t shared_region) +{ + return shared_region_mapping_dealloc_lock(shared_region, 1); +} + +ppnum_t vm_map_get_phys_page( vm_map_t map, vm_offset_t offset) @@ -2497,7 +2717,7 @@ vm_map_get_phys_page( vm_map_entry_t entry; int ops; int flags; - vm_offset_t phys_addr = 0; + ppnum_t phys_page = 0; vm_object_t object; vm_map_lock(map); @@ -2530,7 +2750,9 @@ vm_map_get_phys_page( continue; } offset = entry->offset + (offset - entry->vme_start); - phys_addr = entry->object.vm_object->shadow_offset + offset; + phys_page = (ppnum_t) + ((entry->object.vm_object->shadow_offset + + offset) >> 12); break; } @@ -2552,7 +2774,7 @@ vm_map_get_phys_page( break; } } else { - phys_addr = dst_page->phys_addr; + phys_page = (ppnum_t)(dst_page->phys_page); vm_object_unlock(object); break; } @@ -2562,6 +2784,116 @@ vm_map_get_phys_page( } vm_map_unlock(map); - return phys_addr; + return phys_page; +} + + + +kern_return_t +kernel_object_iopl_request( + vm_named_entry_t named_entry, + memory_object_offset_t offset, + vm_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags) +{ + vm_object_t object; + kern_return_t ret; + + int caller_flags; + + caller_flags = *flags; + + /* a few checks to make sure user is obeying rules */ + if(*upl_size == 0) { + if(offset >= named_entry->size) + return(KERN_INVALID_RIGHT); + *upl_size = named_entry->size - offset; + } + if(caller_flags & UPL_COPYOUT_FROM) { + if((named_entry->protection & VM_PROT_READ) + != VM_PROT_READ) { + return(KERN_INVALID_RIGHT); + } + } else { + if((named_entry->protection & + (VM_PROT_READ | VM_PROT_WRITE)) + != (VM_PROT_READ | VM_PROT_WRITE)) { + return(KERN_INVALID_RIGHT); + } + } + if(named_entry->size < (offset + *upl_size)) + return(KERN_INVALID_ARGUMENT); + + /* the callers parameter offset is defined to be the */ + /* offset from beginning of named entry offset in object */ + offset = offset + named_entry->offset; + + if(named_entry->is_sub_map) + return (KERN_INVALID_ARGUMENT); + + named_entry_lock(named_entry); + + if(named_entry->object) { + /* This is the case where we are going to map */ + /* an already mapped object. If the object is */ + /* not ready it is internal. An external */ + /* object cannot be mapped until it is ready */ + /* we can therefore avoid the ready check */ + /* in this case. */ + vm_object_reference(named_entry->object); + object = named_entry->object; + named_entry_unlock(named_entry); + } else { + object = vm_object_enter(named_entry->backing.pager, + named_entry->offset + named_entry->size, + named_entry->internal, + FALSE, + FALSE); + if (object == VM_OBJECT_NULL) { + named_entry_unlock(named_entry); + return(KERN_INVALID_OBJECT); + } + vm_object_lock(object); + + /* create an extra reference for the named entry */ + vm_object_reference_locked(object); + named_entry->object = object; + named_entry_unlock(named_entry); + + /* wait for object (if any) to be ready */ + while (!object->pager_ready) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); + vm_object_lock(object); + } + vm_object_unlock(object); + } + + if (!object->private) { + if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) + *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); + if (object->phys_contiguous) { + *flags = UPL_PHYS_CONTIG; + } else { + *flags = 0; + } + } else { + *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; + } + + ret = vm_object_iopl_request(object, + offset, + *upl_size, + upl_ptr, + user_page_list, + page_list_count, + caller_flags); + vm_object_deallocate(object); + return ret; } + #endif /* VM_CPM */ diff --git a/pexpert/conf/Makefile b/pexpert/conf/Makefile index bbeee5a37..68774ccc1 100644 --- a/pexpert/conf/Makefile +++ b/pexpert/conf/Makefile @@ -18,7 +18,7 @@ ifndef PEXPERT_KERNEL_CONFIG export PEXPERT_KERNEL_CONFIG = $(KERNEL_CONFIG) endif -COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) +export COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: make build_setup @@ -53,6 +53,7 @@ do_all: do_setup_conf SOURCE=$${next_source} \ TARGET=$(TARGET) \ INCL_MAKEDEP=FALSE \ + KERNEL_CONFIG=$(PEXPERT_KERNEL_CONFIG) \ build_all; \ echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(PEXPERT_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; diff --git a/pexpert/conf/Makefile.i386 b/pexpert/conf/Makefile.i386 index 2f6232c14..c64ebabcc 100644 --- a/pexpert/conf/Makefile.i386 +++ b/pexpert/conf/Makefile.i386 @@ -1,7 +1,7 @@ ###################################################################### #BEGIN Machine dependent Makefile fragment for i386 ###################################################################### - +CFLAGS += -DNCPUS=2 ###################################################################### #END Machine dependent Makefile fragment for i386 ###################################################################### diff --git a/pexpert/conf/Makefile.ppc b/pexpert/conf/Makefile.ppc index 7786ccbd6..657b63741 100644 --- a/pexpert/conf/Makefile.ppc +++ b/pexpert/conf/Makefile.ppc @@ -1,7 +1,7 @@ ###################################################################### #BEGIN Machine dependent Makefile fragment for ppc ###################################################################### - +CFLAGS += -DNCPUS=1 ###################################################################### #END Machine dependent Makefile fragment for ppc ###################################################################### diff --git a/pexpert/conf/Makefile.template b/pexpert/conf/Makefile.template index 624ab530c..8ef62f8b4 100644 --- a/pexpert/conf/Makefile.template +++ b/pexpert/conf/Makefile.template @@ -28,7 +28,7 @@ include $(MakeInc_def) # # -D_KERNEL_BUILD -DKERNEL_BUILD -DARCH_PRIVATE -DBSD_BUILD -DMACH_KERNEL # -CFLAGS+= -DPEXPERT_KERNEL_PRIVATE -DKERNEL -DDRIVER_PRIVATE -DNCPUS=1 \ +CFLAGS+= -DPEXPERT_KERNEL_PRIVATE -DKERNEL -DDRIVER_PRIVATE \ -Wall -Wno-four-char-constants -fno-common \ -DRelease3CompatibilityBuild @@ -89,7 +89,7 @@ LDOBJS = $(OBJS) $(COMPONENT).o: $(LDOBJS) @echo "creating $(COMPONENT).o" $(RM) $(RMFLAGS) vers.c - $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + $(COMPOBJROOT)/newvers \ `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c @echo [ updating $(COMPONENT).o ${PEXPERT_KERNEL_CONFIG} ] diff --git a/pexpert/conf/files.i386 b/pexpert/conf/files.i386 index 466e22eb4..8ae285f05 100644 --- a/pexpert/conf/files.i386 +++ b/pexpert/conf/files.i386 @@ -1,15 +1,13 @@ OPTIONS/gprof optional gprof -pexpert/i386/pe_bootargs.c standard pexpert/i386/pe_init.c standard +pexpert/i386/pe_bootargs.c standard pexpert/i386/pe_identify_machine.c standard -pexpert/i386/pe_interrupt.c standard pexpert/i386/pe_kprintf.c standard +pexpert/i386/pe_interrupt.c standard pexpert/i386/fakePPCDeviceTree.c standard +pexpert/i386/pe_serial.c standard -# Graphics and text console support. -pexpert/i386/text_console.c standard -pexpert/i386/kdasm.s standard # Polled-mode keyboard driver. pexpert/i386/kd.c standard diff --git a/pexpert/conf/version.major b/pexpert/conf/version.major index 1e8b31496..7f8f011eb 100644 --- a/pexpert/conf/version.major +++ b/pexpert/conf/version.major @@ -1 +1 @@ -6 +7 diff --git a/pexpert/conf/version.minor b/pexpert/conf/version.minor index 45a4fb75d..573541ac9 100644 --- a/pexpert/conf/version.minor +++ b/pexpert/conf/version.minor @@ -1 +1 @@ -8 +0 diff --git a/pexpert/conf/version.variant b/pexpert/conf/version.variant index e69de29bb..573541ac9 100644 --- a/pexpert/conf/version.variant +++ b/pexpert/conf/version.variant @@ -0,0 +1 @@ +0 diff --git a/pexpert/gen/bootargs.c b/pexpert/gen/bootargs.c index 63a90941d..06dc52316 100644 --- a/pexpert/gen/bootargs.c +++ b/pexpert/gen/bootargs.c @@ -80,7 +80,12 @@ PE_parse_boot_arg( args = cp+1; goto gotit; } - + if ('_' == *arg_string) /* Force a string copy if the argument name begins with an underscore */ + { + argstrcpy2 (++cp, (char *)arg_ptr, 16); /* Hack - terminate after 16 characters */ + arg_found = TRUE; + break; + } switch (getval(cp, &val)) { case NUM: @@ -129,6 +134,22 @@ argstrcpy( return(i); } +int +argstrcpy2( + char *from, + char *to, + unsigned maxlen) +{ + int i = 0; + + while (!isargsep(*from) && i < maxlen) { + i++; + *to++ = *from++; + } + *to = 0; + return(i); +} + int getval( char *s, @@ -175,6 +196,12 @@ getval( c -= 'a' - 10; else if ((c >= 'A') && (c <= 'F')) c -= 'A' - 10; + else if (c == 'k' || c == 'K') + { sign *= 1024; break; } + else if (c == 'm' || c == 'M') + { sign *= 1024 * 1024; break; } + else if (c == 'g' || c == 'G') + { sign *= 1024 * 1024 * 1024; break; } else if (isargsep(c)) break; else diff --git a/pexpert/i386/boot_images.h b/pexpert/i386/boot_images.h index 716a4635d..95e49e824 100644 --- a/pexpert/i386/boot_images.h +++ b/pexpert/i386/boot_images.h @@ -23,72 +23,87 @@ * @APPLE_LICENSE_HEADER_END@ */ -static const unsigned char bootClut[ 256 * 3 ] = +static const unsigned char bootClut[ 256 * 3 ] = { - 0xff,0xff,0xff, 0xfe,0xfe,0xfe, 0xfd,0xfd,0xfd, 0xb8,0x27,0x2b, - 0xfc,0xfc,0xfc, 0xff,0xff,0x00, 0xfa,0xfa,0xfa, 0xf9,0xf9,0xf9, - 0xf8,0xf8,0xf8, 0xf7,0xf7,0xf7, 0xf6,0xf6,0xf6, 0xf5,0xf5,0xf5, - 0xf4,0xf4,0xf4, 0xf2,0xf2,0xf2, 0xf1,0xf1,0xf1, 0x00,0x00,0x00, - 0xef,0xef,0xef, 0xee,0xee,0xee, 0xed,0xed,0xed, 0xeb,0xeb,0xeb, - 0xe8,0xe8,0xe8, 0xe7,0xe7,0xe7, 0xc9,0x38,0x3e, 0xe5,0xe5,0xe5, - 0xff,0x00,0xff, 0xfb,0xfb,0xfb, 0xde,0x6c,0x72, 0xe0,0xe0,0xe0, - 0xe8,0x86,0x90, 0xde,0xde,0xde, 0xdd,0xdd,0xdd, 0xd3,0x7e,0x8d, - 0xd9,0xd9,0xd9, 0xf3,0x96,0xa6, 0xb1,0x1c,0x39, 0xff,0x00,0x00, - 0xbe,0x5e,0x72, 0xd3,0xd3,0xd3, 0xc6,0x2e,0x67, 0xd1,0xd1,0xd1, - 0xa3,0x06,0x45, 0xce,0xce,0xce, 0xcc,0xcc,0xff, 0xcc,0xcc,0xcc, - 0xc6,0x8f,0xa7, 0xe1,0xd3,0xd9, 0xce,0x9e,0xb4, 0xca,0xca,0xca, - 0xbf,0x3f,0x7d, 0xc9,0xc9,0xc9, 0xf4,0x89,0xbe, 0xc6,0xc6,0xc6, - 0xd6,0x51,0x97, 0xc9,0x2c,0x84, 0x96,0x1a,0x6a, 0xc2,0xc2,0xc2, - 0xf3,0x6f,0xc6, 0xe5,0x4c,0xbb, 0xb7,0x5a,0x9c, 0xbf,0xbf,0xbf, - 0xbe,0xbe,0xbe, 0xbd,0xbd,0xbd, 0xb8,0x21,0xa2, 0xd3,0x44,0xc0, - 0xc2,0x66,0xb7, 0xf4,0x66,0xe6, 0xfc,0x73,0xfd, 0xb9,0xb9,0xb9, - 0xea,0xdf,0xea, 0xd4,0x71,0xd5, 0xf9,0x8b,0xff, 0xf5,0xad,0xff, - 0xbc,0x92,0xc2, 0xc7,0x4f,0xd9, 0xa0,0x44,0xaf, 0xc8,0x8c,0xd5, - 0xd7,0x74,0xf7, 0xb4,0xb4,0xb4, 0xda,0x95,0xf9, 0xed,0xcb,0xff, - 0xb2,0xb2,0xb2, 0xa1,0x61,0xd7, 0xb2,0x85,0xe2, 0x59,0x26,0x9c, - 0x7c,0x51,0xcc, 0xb0,0xb0,0xb0, 0xb4,0x8e,0xfc, 0xd5,0xc0,0xff, - 0x5d,0x32,0xcc, 0x7b,0x5c,0xe5, 0xc0,0xb0,0xfd, 0x60,0x53,0xad, - 0x12,0x0c,0x7e, 0x2e,0x29,0x99, 0x79,0x78,0xe9, 0x5b,0x5c,0xd0, - 0x69,0x6a,0xcc, 0x93,0x94,0xf8, 0x92,0x92,0xc3, 0x41,0x44,0xba, - 0xa8,0xab,0xff, 0xa3,0xa3,0xa3, 0xdb,0xdd,0xea, 0x31,0x49,0xaa, - 0x70,0x8f,0xf9, 0x48,0x66,0xc1, 0x5c,0x7e,0xe9, 0xe2,0xe5,0xeb, - 0xb0,0xcd,0xff, 0x6c,0x89,0xb7, 0x34,0x65,0xaf, 0x8c,0xb9,0xff, - 0x37,0x79,0xd4, 0x5a,0x99,0xea, 0x0e,0x4c,0x95, 0x79,0xb9,0xff, - 0x8a,0xa3,0xbc, 0x20,0x61,0x9d, 0x8f,0xae,0xca, 0x0a,0x60,0xa8, - 0x3f,0x94,0xd9, 0x63,0xb5,0xf9, 0xe2,0xe8,0xed, 0x28,0x6a,0x99, - 0x55,0xb2,0xe7, 0x32,0x89,0xa9, 0xcf,0xda,0xde, 0x29,0xa1,0xc7, - 0x86,0xa9,0xb4, 0x00,0x5f,0x79, 0x0c,0x77,0x8e, 0x12,0x8f,0xab, - 0x41,0xba,0xd5, 0x24,0x82,0x83, 0x2c,0xc4,0xc3, 0x1a,0xab,0xa6, - 0x4b,0xa8,0xa2, 0x0a,0x93,0x85, 0x0d,0xa5,0x96, 0x26,0xbc,0xac, - 0x04,0x81,0x72, 0x19,0xb3,0x86, 0x29,0xc1,0x94, 0x21,0x9c,0x71, - 0x02,0x8c,0x50, 0x35,0xd0,0x89, 0x46,0xa5,0x76, 0x02,0x7d,0x39, - 0x29,0xc9,0x71, 0x57,0xd6,0x8f, 0xa2,0xb5,0xaa, 0x01,0x88,0x2a, - 0x74,0xbe,0x8a, 0x19,0xb6,0x47, 0x2d,0xc6,0x51, 0x38,0xde,0x5d, - 0x4c,0xf4,0x6f, 0x91,0x9c,0x93, 0x00,0x8e,0x19, 0x10,0xaf,0x28, - 0xe3,0xe3,0xe3, 0x08,0xa1,0x1a, 0x59,0xc2,0x61, 0xf0,0xf0,0xf0, - 0x8f,0x9c,0x90, 0x23,0xce,0x2a, 0x12,0xba,0x17, 0x01,0x8a,0x02, - 0x03,0x9a,0x02, 0x40,0xe4,0x40, 0x08,0xb2,0x05, 0x13,0xcc,0x0f, - 0x36,0xd7,0x32, 0x28,0xe9,0x1f, 0x53,0xfb,0x4c, 0x6f,0xaf,0x6a, - 0x71,0xe0,0x67, 0x32,0xc0,0x12, 0x29,0xa5,0x08, 0x5c,0xdd,0x35, - 0x00,0xff,0xff, 0x63,0xc8,0x45, 0x86,0xfd,0x5b, 0x71,0xf6,0x39, - 0x55,0xcc,0x15, 0x00,0xff,0x00, 0x90,0xca,0x6e, 0x43,0xa7,0x01, - 0x8d,0xe4,0x37, 0xb3,0xf0,0x64, 0x85,0x8e,0x7a, 0xb0,0xfa,0x4d, - 0xd6,0xd6,0xd6, 0x88,0xd0,0x1a, 0x6a,0xa7,0x03, 0x98,0xbf,0x41, - 0xcd,0xf8,0x51, 0x94,0xa4,0x55, 0x91,0xb0,0x0a, 0xda,0xf1,0x3c, - 0xba,0xca,0x53, 0xb9,0xc3,0x28, 0xb1,0xba,0x12, 0xd2,0xd9,0x26, - 0xe8,0xec,0x2d, 0x98,0x96,0x02, 0xad,0xad,0x5c, 0xe2,0xd8,0x38, - 0xd9,0xc4,0x38, 0xa8,0x9a,0x50, 0x00,0x00,0xff, 0xbe,0xae,0x5e, - 0x9a,0x98,0x8e, 0xac,0x8d,0x0d, 0xc5,0xa0,0x2b, 0xdb,0xb5,0x48, - 0xdd,0x00,0x00, 0x9c,0x6d,0x03, 0xd4,0xa8,0x47, 0xb7,0x71,0x17, - 0xdc,0xa1,0x5a, 0xb9,0x9c,0x7c, 0xb4,0xab,0xa2, 0x9e,0x4b,0x01, - 0xc8,0x78,0x35, 0xd2,0x8d,0x51, 0xad,0x52,0x0f, 0x00,0xbb,0x00, - 0xb2,0x66,0x38, 0xb1,0xa6,0x9f, 0xb1,0x87,0x6f, 0xa4,0x34,0x03, - 0xee,0x9e,0x85, 0xc9,0x73,0x5a, 0xe6,0x94,0x7c, 0xa9,0x22,0x06, - 0xdb,0x87,0x74, 0xb0,0x2e,0x15, 0xb7,0x5a,0x50, 0xb2,0x42,0x3b, - 0xcd,0x73,0x6e, 0xd9,0x58,0x58, 0xac,0xac,0xac, 0xa0,0xa0,0xa0, - 0x9a,0x9a,0x9a, 0x92,0x92,0x92, 0x8e,0x8e,0x8e, 0xbb,0xbb,0xbb, - 0x81,0x81,0x81, 0x88,0x88,0x88, 0x77,0x77,0x77, 0x55,0x55,0x55, - 0x44,0x44,0x44, 0x22,0x22,0x22, 0x7b,0x7b,0x7b, 0x00,0x00,0x00 + 0xff,0xff,0xff, 0xbf,0xbf,0xbf, 0xbe,0xbe,0xbe, 0xbd,0xbd,0xbd, + 0xbc,0xbc,0xbc, 0xff,0xff,0x00, 0xba,0xba,0xba, 0xb9,0xb9,0xb9, + 0xb8,0xb8,0xb8, 0xb7,0xb7,0xb7, 0xb6,0xb6,0xb6, 0xb5,0xb5,0xb5, + 0xb4,0xb4,0xb4, 0xb3,0xb3,0xb3, 0xb2,0xb2,0xb2, 0x00,0x00,0x00, + + 0xb1,0xb1,0xb1, 0xb0,0xb0,0xb0, 0xaf,0xaf,0xaf, 0xae,0xae,0xae, + 0xad,0xad,0xad, 0xac,0xac,0xac, 0xab,0xab,0xab, 0xaa,0xaa,0xaa, + 0xff,0x00,0xff, 0xa9,0xa9,0xa9, 0xa8,0xa8,0xa8, 0xa7,0xa7,0xa7, + 0xa6,0xa6,0xa6, 0xa5,0xa5,0xa5, 0xa4,0xa4,0xa4, 0xa3,0xa3,0xa3, + + 0xa2,0xa2,0xa2, 0xa1,0xa1,0xa1, 0xa0,0xa0,0xa0, 0xff,0x00,0x00, + 0x9f,0x9f,0x9f, 0x9e,0x9e,0x9e, 0x9d,0x9d,0x9d, 0x9c,0x9c,0x9c, + 0x9b,0x9b,0x9b, 0x9a,0x9a,0x9a, 0xcc,0xcc,0xff, 0xcc,0xcc,0xcc, + 0x99,0x99,0x99, 0x98,0x98,0x98, 0x97,0x97,0x97, 0x96,0x96,0x96, + + 0x95,0x95,0x95, 0x94,0x94,0x94, 0x93,0x93,0x93, 0x92,0x92,0x92, + 0x91,0x91,0x91, 0x90,0x90,0x90, 0x8f,0x8f,0x8f, 0x8e,0x8e,0x8e, + 0x8d,0x8d,0x8d, 0x8c,0x8c,0x8c, 0x8b,0x8b,0x8b, 0x8a,0x8a,0x8a, + 0x89,0x89,0x89, 0x88,0x88,0x88, 0x86,0x86,0x86, 0x85,0x85,0x85, + + 0x84,0x84,0x84, 0x83,0x83,0x83, 0x82,0x82,0x82, 0x81,0x81,0x81, + 0x80,0x80,0x80, 0x7f,0x7f,0x7f, 0x7e,0x7e,0x7e, 0x7d,0x7d,0x7d, + 0x7c,0x7c,0x7c, 0x7b,0x7b,0x7b, 0x7a,0x7a,0x7a, 0x79,0x79,0x79, + 0x78,0x78,0x78, 0x76,0x76,0x76, 0x75,0x75,0x75, 0x74,0x74,0x74, + + 0x73,0x73,0x73, 0x72,0x72,0x72, 0x71,0x71,0x71, 0x70,0x70,0x70, + 0x6f,0x6f,0x6f, 0x6e,0x6e,0x6e, 0x6d,0x6d,0x6d, 0x6c,0x6c,0x6c, + 0x6b,0x6b,0x6b, 0x6a,0x6a,0x6a, 0x69,0x69,0x69, 0x68,0x68,0x68, + 0x67,0x67,0x67, 0x66,0x66,0x66, 0x64,0x64,0x64, 0x63,0x63,0x63, + + 0x62,0x62,0x62, 0x61,0x61,0x61, 0x60,0x60,0x60, 0x5f,0x5f,0x5f, + 0x5e,0x5e,0x5e, 0x5d,0x5d,0x5d, 0x5c,0x5c,0x5c, 0x5b,0x5b,0x5b, + 0x5a,0x5a,0x5a, 0x59,0x59,0x59, 0x58,0x58,0x58, 0x57,0x57,0x57, + 0x56,0x56,0x56, 0x54,0x54,0x54, 0x53,0x53,0x53, 0x52,0x52,0x52, + + 0x51,0x51,0x51, 0x50,0x50,0x50, 0x4f,0x4f,0x4f, 0x4e,0x4e,0x4e, + 0x4d,0x4d,0x4d, 0x4c,0x4c,0x4c, 0x4b,0x4b,0x4b, 0x4a,0x4a,0x4a, + 0x49,0x49,0x49, 0x48,0x48,0x48, 0x47,0x47,0x47, 0x46,0x46,0x46, + 0x45,0x45,0x45, 0x43,0x43,0x43, 0x42,0x42,0x42, 0x41,0x41,0x41, + + 0x40,0x40,0x40, 0x3f,0x3f,0x3f, 0x3e,0x3e,0x3e, 0x3d,0x3d,0x3d, + 0x3c,0x3c,0x3c, 0x3b,0x3b,0x3b, 0x3a,0x3a,0x3a, 0x39,0x39,0x39, + 0x38,0x38,0x38, 0x37,0x37,0x37, 0x36,0x36,0x36, 0x35,0x35,0x35, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x00,0xff,0xff, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x00,0xff,0x00, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x00,0x00,0xff, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0xd0,0x00,0x00, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x00,0xbb,0x00, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, + 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0xbb,0xbb,0xbb, + 0x65,0x65,0x65, 0x88,0x88,0x88, 0x77,0x77,0x77, 0x55,0x55,0x55, + 0x44,0x44,0x44, 0x22,0x22,0x22, 0x65,0x65,0x65, 0x00,0x00,0x00 }; #define kFailedBootWidth 28 diff --git a/pexpert/i386/fakePPCDeviceTree.c b/pexpert/i386/fakePPCDeviceTree.c index 0fae65b2a..e547d9fc0 100644 --- a/pexpert/i386/fakePPCDeviceTree.c +++ b/pexpert/i386/fakePPCDeviceTree.c @@ -31,8 +31,8 @@ boot_args fakePPCBootArgs = { 0, // Revision kBootArgsVersion, // Version "", // CommandLine - 0, // PhysicalDRAM - 0, // machine_type + {{0}}, // PhysicalDRAM + {0}, // machine_type 0, // deviceTreeP 0, // deviceTreeLength 0, // topOfKernelData diff --git a/pexpert/i386/pe_bootargs.c b/pexpert/i386/pe_bootargs.c index cbc426cdf..0e90d2873 100644 --- a/pexpert/i386/pe_bootargs.c +++ b/pexpert/i386/pe_bootargs.c @@ -29,5 +29,5 @@ char * PE_boot_args( void) { - return((char *)((KERNBOOTSTRUCT*)PE_state.bootArgs)->bootString); + return((char *)((KernelBootArgs_t *)PE_state.bootArgs)->bootString); } diff --git a/pexpert/i386/pe_identify_machine.c b/pexpert/i386/pe_identify_machine.c index 83b28849e..a7982f703 100644 --- a/pexpert/i386/pe_identify_machine.c +++ b/pexpert/i386/pe_identify_machine.c @@ -27,7 +27,7 @@ #include /* External declarations */ -unsigned int LockTimeOut = 12500000; /* XXX - Need real value for i386 */ +unsigned int LockTimeOut = 1250000000; /* XXX - Need real value for i386 */ /* Local declarations */ void pe_identify_machine(boot_args *args); diff --git a/pexpert/i386/pe_init.c b/pexpert/i386/pe_init.c index dfe65ebaf..be649de98 100644 --- a/pexpert/i386/pe_init.c +++ b/pexpert/i386/pe_init.c @@ -47,7 +47,6 @@ extern void initialize_screen(void *, unsigned int); static vm_offset_t mapframebuffer(caddr_t,int); static vm_offset_t PE_fb_vaddr = 0; static int PE_fb_mode = TEXT_MODE; -static KERNBOOTSTRUCT * PE_kbp = 0; /* private globals */ PE_state_t PE_state; @@ -111,6 +110,7 @@ void PE_init_iokit(void) { long * dt; int i; + KernelBootArgs_t *kap = (KernelBootArgs_t *)PE_state.bootArgs; typedef struct { char name[32]; @@ -118,12 +118,15 @@ void PE_init_iokit(void) unsigned long value[2]; } DriversPackageProp; + PE_init_kprintf(TRUE); + PE_init_printf(TRUE); + /* * Update the fake device tree with the driver information provided by * the booter. */ - gDriversProp.length = PE_kbp->numBootDrivers * sizeof(DriversPackageProp); + gDriversProp.length = kap->numBootDrivers * sizeof(DriversPackageProp); gMemoryMapNode.length = 2 * sizeof(long); dt = (long *) createdt( fakePPCDeviceTree, @@ -135,28 +138,28 @@ void PE_init_iokit(void) /* Copy driver info in kernBootStruct to fake device tree */ - for ( i = 0; i < PE_kbp->numBootDrivers; i++, prop++ ) + for ( i = 0; i < kap->numBootDrivers; i++, prop++ ) { - switch ( PE_kbp->driverConfig[i].type ) + switch ( kap->driverConfig[i].type ) { case kBootDriverTypeKEXT: - sprintf(prop->name, "Driver-%lx", PE_kbp->driverConfig[i].address); + sprintf(prop->name, "Driver-%lx", kap->driverConfig[i].address); break; case kBootDriverTypeMKEXT: - sprintf(prop->name, "DriversPackage-%lx", PE_kbp->driverConfig[i].address); + sprintf(prop->name, "DriversPackage-%lx", kap->driverConfig[i].address); break; default: - sprintf(prop->name, "DriverBogus-%lx", PE_kbp->driverConfig[i].address); + sprintf(prop->name, "DriverBogus-%lx", kap->driverConfig[i].address); break; } prop->length = sizeof(prop->value); - prop->value[0] = PE_kbp->driverConfig[i].address; - prop->value[1] = PE_kbp->driverConfig[i].size; + prop->value[0] = kap->driverConfig[i].address; + prop->value[1] = kap->driverConfig[i].size; } - *gMemoryMapNode.address = PE_kbp->numBootDrivers + 1; + *gMemoryMapNode.address = kap->numBootDrivers + 1; } /* Setup powermac_info and powermac_machine_info structures */ @@ -174,13 +177,18 @@ void PE_init_iokit(void) /* * Fetch the CLUT and the noroot image. */ - bcopy( bootClut, appleClut8, sizeof(appleClut8) ); + bcopy( (void *) bootClut, appleClut8, sizeof(appleClut8) ); default_noroot.width = kFailedBootWidth; default_noroot.height = kFailedBootHeight; default_noroot.dx = 0; default_noroot.dy = kFailedBootOffset; default_noroot_data = failedBootPict; + + /* + * Initialize the panic UI + */ + panic_ui_initialize( (unsigned char *) appleClut8 ); /* * Initialize the spinning wheel (progress indicator). @@ -188,26 +196,24 @@ void PE_init_iokit(void) vc_progress_initialize( &default_progress, default_progress_data, (unsigned char *) appleClut8 ); - PE_initialize_console( (PE_Video *) 0, kPEAcquireScreen ); - - (void) StartIOKit( (void*)dt, (void*)PE_state.fakePPCBootArgs, 0, 0); + (void) StartIOKit( (void*)dt, PE_state.bootArgs, 0, 0); } void PE_init_platform(boolean_t vm_initialized, void * args) { if (PE_state.initialized == FALSE) { - PE_kbp = (KERNBOOTSTRUCT *) args; + KernelBootArgs_t *kap = (KernelBootArgs_t *) args; PE_state.initialized = TRUE; PE_state.bootArgs = args; - PE_state.video.v_baseAddr = PE_kbp->video.v_baseAddr; - PE_state.video.v_rowBytes = PE_kbp->video.v_rowBytes; - PE_state.video.v_height = PE_kbp->video.v_height; - PE_state.video.v_width = PE_kbp->video.v_width; - PE_state.video.v_depth = PE_kbp->video.v_depth; - PE_state.video.v_display = PE_kbp->video.v_display; - PE_fb_mode = PE_kbp->graphicsMode; + PE_state.video.v_baseAddr = kap->video.v_baseAddr; + PE_state.video.v_rowBytes = kap->video.v_rowBytes; + PE_state.video.v_height = kap->video.v_height; + PE_state.video.v_width = kap->video.v_width; + PE_state.video.v_depth = kap->video.v_depth; + PE_state.video.v_display = kap->video.v_display; + PE_fb_mode = kap->graphicsMode; PE_state.fakePPCBootArgs = (boot_args *)&fakePPCBootArgs; ((boot_args *)PE_state.fakePPCBootArgs)->machineType = 386; @@ -220,20 +226,18 @@ void PE_init_platform(boolean_t vm_initialized, void * args) } } - if (!vm_initialized) - { + if (!vm_initialized) + { /* Hack! FIXME.. */ outb(0x21, 0xff); /* Maskout all interrupts Pic1 */ outb(0xa1, 0xff); /* Maskout all interrupts Pic2 */ pe_identify_machine(args); - } - else - { + } + else + { pe_init_debug(); - - PE_create_console(); - } + } } void PE_create_console( void ) @@ -247,7 +251,7 @@ void PE_create_console( void ) PE_state.video.v_height); } - if (PE_state.video.v_display) + if ( PE_state.video.v_display ) PE_initialize_console( &PE_state.video, kPEGraphicsMode ); else PE_initialize_console( &PE_state.video, kPETextMode ); @@ -333,7 +337,6 @@ mapframebuffer( caddr_t physaddr, /* start of framebuffer */ if (physaddr != (caddr_t)trunc_page(physaddr)) panic("Framebuffer not on page boundary"); - vmaddr = io_map((vm_offset_t)physaddr, length); if (vmaddr == 0) panic("can't alloc VM for framebuffer"); @@ -358,3 +361,6 @@ PE_stub_poll_input(unsigned int options, char * c) */ int (*PE_poll_input)(unsigned int options, char * c) = PE_stub_poll_input; + + + diff --git a/pexpert/i386/pe_interrupt.c b/pexpert/i386/pe_interrupt.c index 2040b0e46..d0f04766f 100644 --- a/pexpert/i386/pe_interrupt.c +++ b/pexpert/i386/pe_interrupt.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include struct i386_interrupt_handler { @@ -42,23 +44,30 @@ void PE_platform_interrupt_initialize(void) { } + + void -PE_incoming_interrupt(int interrupt, void *eip) +PE_incoming_interrupt(int interrupt, void *state) { - boolean_t save_int; i386_interrupt_handler_t *vector; KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, - 0, (unsigned int)eip, 0, 0, 0); + 0, ((unsigned int *)state)[5], 0, 0, 0); vector = &PE_interrupt_handler; - save_int = ml_set_interrupts_enabled(FALSE); - vector->handler(vector->target, vector->refCon, vector->nub, interrupt); - ml_set_interrupts_enabled(save_int); + + switch (interrupt) { + case APIC_ERROR_INTERRUPT: + case SPURIOUS_INTERRUPT: + case INTERPROCESS_INTERRUPT: + lapic_interrupt(interrupt, state); + break; + default: + vector->handler(vector->target, state, vector->nub, interrupt); + } KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); - } void PE_install_interrupt_handler(void *nub, int source, diff --git a/pexpert/i386/pe_kprintf.c b/pexpert/i386/pe_kprintf.c index 3d00e68f9..4383340f0 100644 --- a/pexpert/i386/pe_kprintf.c +++ b/pexpert/i386/pe_kprintf.c @@ -27,17 +27,24 @@ * i386 platform expert debugging output initialization. */ #include +#include #include #include +#include +#include /* extern references */ extern void cnputc(char c); +extern int serial_init(void); +extern void serial_putc(char c); /* Globals */ void (*PE_kputc)(char c) = 0; unsigned int disableSerialOuput = TRUE; +decl_simple_lock_data(static, kprintf_lock) + void PE_init_kprintf(boolean_t vm_initialized) { unsigned int boot_arg; @@ -47,21 +54,52 @@ void PE_init_kprintf(boolean_t vm_initialized) if (!vm_initialized) { + simple_lock_init(&kprintf_lock, 0); + if (PE_parse_boot_arg("debug", &boot_arg)) if (boot_arg & DB_KPRT) disableSerialOuput = FALSE; - - /* FIXME - route output to serial port. */ - PE_kputc = cnputc; + + if (!disableSerialOuput && serial_init()) + PE_kputc = serial_putc; + else + PE_kputc = cnputc; } } +#ifdef MP_DEBUG +static void _kprintf(const char *format, ...) +{ + va_list listp; + + va_start(listp, format); + _doprnt(format, &listp, PE_kputc, 16); + va_end(listp); +} +#define MP_DEBUG_KPRINTF(x...) _kprintf(x) +#else /* MP_DEBUG */ +#define MP_DEBUG_KPRINTF(x...) +#endif /* MP_DEBUG */ + +static int cpu_last_locked = 0; void kprintf(const char *fmt, ...) { - va_list listp; + va_list listp; + boolean_t state; if (!disableSerialOuput) { + state = ml_set_interrupts_enabled(FALSE); + simple_lock(&kprintf_lock); + + if (cpu_number() != cpu_last_locked) { + MP_DEBUG_KPRINTF("[cpu%d...]\n", cpu_number()); + cpu_last_locked = cpu_number(); + } + va_start(listp, fmt); _doprnt(fmt, &listp, PE_kputc, 16); va_end(listp); + + simple_unlock(&kprintf_lock); + ml_set_interrupts_enabled(state); } } diff --git a/pexpert/i386/pe_serial.c b/pexpert/i386/pe_serial.c new file mode 100644 index 000000000..31256faba --- /dev/null +++ b/pexpert/i386/pe_serial.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * file: pe_serial.c + * Polled-mode 16x50 UART driver. + */ + +#include +#include + +/* standard port addresses */ +enum { + COM1_PORT_ADDR = 0x3f8, + COM2_PORT_ADDR = 0x2f8 +}; + +/* UART register offsets */ +enum { + UART_RBR = 0, /* receive buffer Register (R) */ + UART_THR = 0, /* transmit holding register (W) */ + UART_DLL = 0, /* DLAB = 1, divisor latch (LSB) */ + UART_IER = 1, /* interrupt enable register */ + UART_DLM = 1, /* DLAB = 1, divisor latch (MSB) */ + UART_IIR = 2, /* interrupt ident register (R) */ + UART_FCR = 2, /* fifo control register (W) */ + UART_LCR = 3, /* line control register */ + UART_MCR = 4, /* modem control register */ + UART_LSR = 5, /* line status register */ + UART_MSR = 6 /* modem status register */ +}; + +enum { + UART_LCR_8BITS = 0x03, + UART_LCR_DLAB = 0x80 +}; + +enum { + UART_MCR_DTR = 0x01, + UART_MCR_RTS = 0x02, + UART_MCR_OUT1 = 0x04, + UART_MCR_OUT2 = 0x08, + UART_MCR_LOOP = 0x10 +}; + +enum { + UART_LSR_THRE = 0x20 +}; + +#define UART_BAUD_RATE 115200 +#define UART_PORT_ADDR COM1_PORT_ADDR + +#define WRITE(r, v) outb(UART_PORT_ADDR + UART_##r, v) +#define READ(r) inb(UART_PORT_ADDR + UART_##r) +#define DELAY(x) { volatile int _d_; for (_d_ = 0; _d_ < (10000*x); _d_++) ; } + +static int uart_initted = 0; /* 1 if init'ed */ + +static int +uart_probe( void ) +{ + /* Verify that the Divisor Register is accessible */ + + WRITE( LCR, UART_LCR_DLAB ); + WRITE( DLL, 0x5a ); + if (READ(DLL) != 0x5a) return 0; + WRITE( DLL, 0xa5 ); + if (READ(DLL) != 0xa5) return 0; + WRITE( LCR, 0x00 ); + return 1; +} + +static void +uart_set_baud_rate( unsigned long baud_rate ) +{ + #define UART_CLOCK 1843200 /* 1.8432 MHz clock */ + + const unsigned char lcr = READ( LCR ); + unsigned long div; + + if (baud_rate == 0) baud_rate = 9600; + div = UART_CLOCK / 16 / baud_rate; + WRITE( LCR, lcr | UART_LCR_DLAB ); + WRITE( DLM, (unsigned char)(div >> 8) ); + WRITE( DLL, (unsigned char) div ); + WRITE( LCR, lcr & ~UART_LCR_DLAB); +} + +static void +uart_putc( char c ) +{ + if (!uart_initted) return; + + /* Wait for THR empty */ + while ( !(READ(LSR) & UART_LSR_THRE) ) DELAY(1); + + WRITE( THR, c ); +} + +int serial_init( void ) +{ + if ( uart_initted || uart_probe() == 0 ) return 0; + + /* Disable hardware interrupts */ + + WRITE( MCR, 0 ); + WRITE( IER, 0 ); + + /* Disable FIFO's for 16550 devices */ + + WRITE( FCR, 0 ); + + /* Set for 8-bit, no parity, DLAB bit cleared */ + + WRITE( LCR, UART_LCR_8BITS ); + + /* Set baud rate */ + + uart_set_baud_rate( UART_BAUD_RATE ); + + /* Assert DTR# and RTS# lines (OUT2?) */ + + WRITE( MCR, UART_MCR_DTR | UART_MCR_RTS ); + + /* Clear any garbage in the input buffer */ + + READ( RBR ); + + uart_initted = 1; + + return 1; +} + +void serial_putc( char c ) +{ + uart_putc(c); + if (c == '\n') uart_putc('\r'); +} + +int serial_getc( void ) +{ + return 0; /* not supported */ +} diff --git a/pexpert/i386/video_console.h b/pexpert/i386/video_console.h deleted file mode 100644 index 130956005..000000000 --- a/pexpert/i386/video_console.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __PEXPERT_VIDEO_CONSOLE_H -#define __PEXPERT_VIDEO_CONSOLE_H - -/* - * Video console properties. - */ -struct vc_info { - unsigned long v_height; /* pixels */ - unsigned long v_width; /* pixels */ - unsigned long v_depth; - unsigned long v_rowbytes; - unsigned long v_baseaddr; - unsigned long v_type; - char v_name[32]; - unsigned long v_physaddr; - unsigned long v_rows; /* characters */ - unsigned long v_columns; /* characters */ - unsigned long v_rowscanbytes; /* Actualy number of bytes used for display per row */ - unsigned long v_reserved[5]; -}; - -/* - * From text_console.c - */ -extern void tc_putchar(unsigned char ch, int x, int y, int attrs); -extern void tc_scrolldown(int lines); -extern void tc_scrollup(int lines); -extern void tc_clear_screen(int x, int y, int operation); -extern void tc_show_cursor(int x, int y); -extern void tc_hide_cursor(int x, int y); -extern void tc_initialize(struct vc_info * vinfo_p); -extern void tc_update_color(int color, int fore); - -#endif /* !__PEXPERT_VIDEO_CONSOLE_H */ diff --git a/pexpert/pexpert/Makefile b/pexpert/pexpert/Makefile index 553ecfce5..30ff9e4c1 100644 --- a/pexpert/pexpert/Makefile +++ b/pexpert/pexpert/Makefile @@ -31,7 +31,8 @@ INSTALL_MI_LIST = ${DATAFILES} INSTALL_MI_DIR = pexpert -EXPORT_MI_LIST = ${DATAFILES} +EXPORT_MI_LIST = ${DATAFILES} \ + device_tree.h EXPORT_MI_DIR = pexpert diff --git a/pexpert/pexpert/device_tree.h b/pexpert/pexpert/device_tree.h index c009d596f..767110b06 100644 --- a/pexpert/pexpert/device_tree.h +++ b/pexpert/pexpert/device_tree.h @@ -1,3 +1,4 @@ + /* * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * @@ -25,6 +26,13 @@ #ifndef _PEXPERT_DEVICE_TREE_H_ #define _PEXPERT_DEVICE_TREE_H_ +#include + +#ifdef __APPLE_API_PRIVATE + +#ifdef __cplusplus +extern "C" { +#endif /* ------------------------------------------------------------------------------- @@ -239,7 +247,12 @@ extern int DTIterateProperties(DTPropertyIterator iterator, extern int DTRestartPropertyIteration(DTPropertyIterator iterator); +#ifdef __cplusplus +} +#endif #endif /* __MWERKS__ */ +#endif /* __APPLE_API_PRIVATE */ + #endif /* _PEXPERT_DEVICE_TREE_H_ */ diff --git a/pexpert/pexpert/i386/boot.h b/pexpert/pexpert/i386/boot.h index 8b93ca597..14540c813 100644 --- a/pexpert/pexpert/i386/boot.h +++ b/pexpert/pexpert/i386/boot.h @@ -110,12 +110,94 @@ typedef struct boot_video boot_video; #define GRAPHICS_MODE 1 #define TEXT_MODE 0 -#define BOOT_STRING_LEN 160 + +/* + * INT15, E820h - Query System Address Map. + * + * Documented in ACPI Specification Rev 2.0, + * Chapter 15 (System Address Map Interfaces). + */ + +/* + * ACPI defined memory range types. + */ +enum { + kMemoryRangeUsable = 1, // RAM usable by the OS. + kMemoryRangeReserved = 2, // Reserved. (Do not use) + kMemoryRangeACPI = 3, // ACPI tables. Can be reclaimed. + kMemoryRangeNVS = 4, // ACPI NVS memory. (Do not use) + + /* Undefined types should be treated as kMemoryRangeReserved */ +}; + +/* + * Memory range descriptor. + */ +typedef struct MemoryRange { + unsigned long long base; // 64-bit base address + unsigned long long length; // 64-bit length in bytes + unsigned long type; // type of memory range + unsigned long reserved; +} MemoryRange; + +#define kMemoryMapCountMax 40 + +/* + * BIOS drive information. + */ +struct boot_drive_info { + struct drive_params { + unsigned short buf_size; + unsigned short info_flags; + unsigned long phys_cyls; + unsigned long phys_heads; + unsigned long phys_spt; + unsigned long long phys_sectors; + unsigned short phys_nbps; + unsigned short dpte_offset; + unsigned short dpte_segment; + unsigned short key; + unsigned char path_len; + unsigned char reserved1; + unsigned short reserved2; + unsigned char bus_type[4]; + unsigned char interface_type[8]; + unsigned char interface_path[8]; + unsigned char dev_path[8]; + unsigned char reserved3; + unsigned char checksum; + } params __attribute__((packed)); + struct drive_dpte { + unsigned short io_port_base; + unsigned short control_port_base; + unsigned char head_flags; + unsigned char vendor_info; + unsigned char irq : 4; + unsigned char irq_unused : 4; + unsigned char block_count; + unsigned char dma_channel : 4; + unsigned char dma_type : 4; + unsigned char pio_type : 4; + unsigned char pio_unused : 4; + unsigned short option_flags; + unsigned short reserved; + unsigned char revision; + unsigned char checksum; + } dpte __attribute__((packed)); +} __attribute__((packed)); +typedef struct boot_drive_info boot_drive_info_t; + +#define MAX_BIOS_DEVICES 8 + +#define OLD_BOOT_STRING_LEN 160 +#define BOOT_STRING_LEN 1024 #define CONFIG_SIZE (12 * 4096) +/* Old structure for compatibility */ + typedef struct { short version; - char bootString[BOOT_STRING_LEN]; // boot arguments + char bootString[OLD_BOOT_STRING_LEN]; // boot arguments int magicCookie; // KERNBOOTMAGIC int numIDEs; // number of IDE drives int rootdev; // root device @@ -141,10 +223,6 @@ typedef struct { #define KERNSTRUCT_ADDR ((KERNBOOTSTRUCT *) 0x11000) #define KERNBOOTMAGIC 0xa7a7a7a7 -#ifndef KERNEL -extern KERNBOOTSTRUCT * kernBootStruct; -#endif - #define BOOT_LINE_LENGTH 256 /* @@ -193,5 +271,39 @@ typedef struct boot_args { extern boot_args passed_args; +/* New structures */ + + +#define KERNEL_BOOT_MAGIC 0xa5b6d7e8 + +typedef struct KernelBootArgs { + unsigned int magicCookie; // KERNEL_BOOT_MAGIC + unsigned short version; + unsigned short revision; + unsigned int size; // size of KernelBootArgs structure + int numDrives; // number of BIOS drives + int rootdev; // root device + int convmem; // conventional memory + int extmem; // extended memory + unsigned int firstAddr0; // first address for kern convmem + int graphicsMode; // booted in graphics mode? + int kernDev; // device kernel was fetched from + int numBootDrivers; // number of drivers loaded + char * configEnd; // pointer to end of config files + unsigned int kaddr; // kernel load address + unsigned int ksize; // size of kernel + char bootFile[128]; // kernel file name + char bootString[BOOT_STRING_LEN]; // boot arguments + driver_config_t driverConfig[NDRIVERS]; + unsigned long memoryMapCount; + MemoryRange memoryMap[kMemoryMapCountMax]; + boot_drive_info_t driveInfo[MAX_BIOS_DEVICES]; + boot_video video; + PCI_bus_info_t pciInfo; + APM_config_t apmConfig; + char config[CONFIG_SIZE]; +} KernelBootArgs_t; + + #endif /* _PEXPERT_I386_BOOT_H */ diff --git a/pexpert/pexpert/i386/protos.h b/pexpert/pexpert/i386/protos.h index 990c60e72..0cfd992db 100644 --- a/pexpert/pexpert/i386/protos.h +++ b/pexpert/pexpert/i386/protos.h @@ -70,12 +70,16 @@ extern void cninit(void); extern void bcopy(void * from, void * to, int size); extern int sprintf(char * str, const char * format, ...); +//------------------------------------------------------------------------ +// from osfmk/console/panic_dialog.c +extern void panic_ui_initialize(const unsigned char * clut); + //------------------------------------------------------------------------ // from osfmk/i386/AT386/video_console.c -extern boolean_t vc_progress_initialize( void * desc, - const unsigned char * data, - const unsigned char * clut ); +extern void vc_progress_initialize( void * desc, + const unsigned char * data, + const unsigned char * clut ); extern void vc_display_icon( void * desc, const unsigned char * data ); diff --git a/pexpert/pexpert/machine/Makefile b/pexpert/pexpert/machine/Makefile index ad76e611e..cffc94284 100644 --- a/pexpert/pexpert/machine/Makefile +++ b/pexpert/pexpert/machine/Makefile @@ -11,9 +11,9 @@ DATAFILES = \ boot.h \ protos.h -INSTALL_MI_LIST = +INSTALL_MI_LIST = ${DATAFILES} -INSTALL_MI_DIR = +INSTALL_MI_DIR = pexpert/machine EXPORT_MI_LIST = ${DATAFILES} diff --git a/pexpert/pexpert/pe_images.h b/pexpert/pexpert/pe_images.h index 92c741007..82d9c0cee 100644 --- a/pexpert/pexpert/pe_images.h +++ b/pexpert/pexpert/pe_images.h @@ -26,7 +26,7 @@ #include -#warning shared video_console.c +// XXX #warning shared video_console.c struct vc_progress_element { unsigned int version; unsigned int flags; diff --git a/pexpert/pexpert/pexpert.h b/pexpert/pexpert/pexpert.h index 8e86cd31a..fcb9435c1 100644 --- a/pexpert/pexpert/pexpert.h +++ b/pexpert/pexpert/pexpert.h @@ -57,6 +57,8 @@ void PE_init_platform( void PE_init_kprintf( boolean_t vm_initialized); +unsigned int PE_init_taproot(vm_offset_t *taddr); + extern void (*PE_kputc)(char c); void PE_init_printf( @@ -113,8 +115,6 @@ void kprintf( void init_display_putc(unsigned char *baseaddr, int rowbytes, int height); void display_putc(char c); -boolean_t PE_init_ethernet_debugger( void ); - enum { kPEReadTOD, kPEWriteTOD diff --git a/pexpert/pexpert/ppc/Makefile b/pexpert/pexpert/ppc/Makefile index 331d8819f..b39a66718 100644 --- a/pexpert/pexpert/ppc/Makefile +++ b/pexpert/pexpert/ppc/Makefile @@ -9,7 +9,6 @@ include $(MakeInc_def) DATAFILES = \ boot.h \ - dbdma.h \ interrupts.h \ powermac.h diff --git a/pexpert/pexpert/ppc/boot.h b/pexpert/pexpert/ppc/boot.h index abac020f4..498b4e0f9 100644 --- a/pexpert/pexpert/ppc/boot.h +++ b/pexpert/pexpert/ppc/boot.h @@ -60,8 +60,9 @@ typedef struct DRAMBank DRAMBank; /* Boot argument structure - passed into Mach kernel at boot time. */ -#define kBootArgsVersion 1 #define kBootArgsRevision 1 +#define kBootArgsVersion1 1 +#define kBootArgsVersion2 2 typedef struct boot_args { unsigned short Revision; /* Revision of boot_args structure */ diff --git a/pexpert/pexpert/ppc/dbdma.h b/pexpert/pexpert/ppc/dbdma.h deleted file mode 100644 index 12c47f241..000000000 --- a/pexpert/pexpert/ppc/dbdma.h +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * @OSF_COPYRIGHT@ - */ - -#ifndef _PEXPERT_PPC_DBDMA_H_ -#define _PEXPERT_PPC_DBDMA_H_ - -#ifndef ASSEMBLER - -#define DBDMA_CMD_OUT_MORE 0 -#define DBDMA_CMD_OUT_LAST 1 -#define DBDMA_CMD_IN_MORE 2 -#define DBDMA_CMD_IN_LAST 3 -#define DBDMA_CMD_STORE_QUAD 4 -#define DBDMA_CMD_LOAD_QUAD 5 -#define DBDMA_CMD_NOP 6 -#define DBDMA_CMD_STOP 7 - -/* Keys */ - -#define DBDMA_KEY_STREAM0 0 -#define DBDMA_KEY_STREAM1 1 -#define DBDMA_KEY_STREAM2 2 -#define DBDMA_KEY_STREAM3 3 - -/* value 4 is reserved */ -#define DBDMA_KEY_REGS 5 -#define DBDMA_KEY_SYSTEM 6 -#define DBDMA_KEY_DEVICE 7 - -#define DBDMA_INT_NEVER 0 -#define DBDMA_INT_IF_TRUE 1 -#define DBDMA_INT_IF_FALSE 2 -#define DBDMA_INT_ALWAYS 3 - -#define DBDMA_BRANCH_NEVER 0 -#define DBDMA_BRANCH_IF_TRUE 1 -#define DBDMA_BRANCH_IF_FALSE 2 -#define DBDMA_BRANCH_ALWAYS 3 - -#define DBDMA_WAIT_NEVER 0 -#define DBDMA_WAIT_IF_TRUE 1 -#define DBDMA_WAIT_IF_FALSE 2 -#define DBDMA_WAIT_ALWAYS 3 - -/* Control register values (in little endian) */ - -#define DBDMA_STATUS_MASK 0x000000ff /* Status Mask */ -#define DBDMA_CNTRL_BRANCH 0x00000100 - /* 0x200 reserved */ -#define DBDMA_CNTRL_ACTIVE 0x00000400 -#define DBDMA_CNTRL_DEAD 0x00000800 -#define DBDMA_CNTRL_WAKE 0x00001000 -#define DBDMA_CNTRL_FLUSH 0x00002000 -#define DBDMA_CNTRL_PAUSE 0x00004000 -#define DBDMA_CNTRL_RUN 0x00008000 - -#define DBDMA_SET_CNTRL(x) ( ((x) | (x) << 16) ) -#define DBDMA_CLEAR_CNTRL(x) ( (x) << 16) - -#define POWERMAC_IO(a) (a) -#define DBDMA_REGMAP(channel) \ - (dbdma_regmap_t *)((v_u_char *) POWERMAC_IO(PCI_DMA_BASE_PHYS) \ - + (channel << 8)) - - -/* powermac_dbdma_channels hold the physical channel numbers for - * each dbdma device - */ - - -/* This struct is layout in little endian format */ - -struct dbdma_command { - unsigned long d_cmd_count; - unsigned long d_address; - unsigned long d_cmddep; - unsigned long d_status_resid; -}; - -typedef struct dbdma_command dbdma_command_t; - -#define DBDMA_BUILD(d, cmd, key, count, address, interrupt, wait, branch) {\ - DBDMA_ST4_ENDIAN(&d->d_address, address); \ - (d)->d_status_resid = 0; \ - (d)->d_cmddep = 0; \ - DBDMA_ST4_ENDIAN(&d->d_cmd_count, \ - ((cmd) << 28) | ((key) << 24) |\ - ((interrupt) << 20) |\ - ((branch) << 18) | ((wait) << 16) | \ - (count)); \ - } - -static __inline__ void -dbdma_st4_endian(volatile unsigned long *a, unsigned long x) -{ - __asm__ volatile - ("stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); - - return; -} - -static __inline__ unsigned long -dbdma_ld4_endian(volatile unsigned long *a) -{ - unsigned long swap; - - __asm__ volatile - ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); - - return swap; -} - -#define DBDMA_LD4_ENDIAN(a) dbdma_ld4_endian(a) -#define DBDMA_ST4_ENDIAN(a, x) dbdma_st4_endian(a, x) - -/* - * DBDMA Channel layout - * - * NOTE - This structure is in little-endian format. - */ - -struct dbdma_regmap { - unsigned long d_control; /* Control Register */ - unsigned long d_status; /* DBDMA Status Register */ - unsigned long d_cmdptrhi; /* MSB of command pointer (not used yet) */ - unsigned long d_cmdptrlo; /* LSB of command pointer */ - unsigned long d_intselect; /* Interrupt Select */ - unsigned long d_branch; /* Branch selection */ - unsigned long d_wait; /* Wait selection */ - unsigned long d_transmode; /* Transfer modes */ - unsigned long d_dataptrhi; /* MSB of Data Pointer */ - unsigned long d_dataptrlo; /* LSB of Data Pointer */ - unsigned long d_reserved; /* Reserved for the moment */ - unsigned long d_branchptrhi; /* MSB of Branch Pointer */ - unsigned long d_branchptrlo; /* LSB of Branch Pointer */ - /* The remaining fields are undefinied and unimplemented */ -}; - -typedef volatile struct dbdma_regmap dbdma_regmap_t; - -/* DBDMA routines */ - -void dbdma_start(dbdma_regmap_t *channel, dbdma_command_t *commands); -void dbdma_stop(dbdma_regmap_t *channel); -void dbdma_flush(dbdma_regmap_t *channel); -void dbdma_reset(dbdma_regmap_t *channel); -void dbdma_continue(dbdma_regmap_t *channel); -void dbdma_pause(dbdma_regmap_t *channel); - -dbdma_command_t *dbdma_alloc(int); /* Allocate command structures */ - -#endif /* ASSEMBLER */ - -#endif /* _PEXPERT_PPC_DBDMA_H_ */ diff --git a/pexpert/pexpert/ppc/interrupts.h b/pexpert/pexpert/ppc/interrupts.h index a54cba183..192d6f62d 100644 --- a/pexpert/pexpert/ppc/interrupts.h +++ b/pexpert/pexpert/ppc/interrupts.h @@ -27,7 +27,7 @@ #include /* for struct ppc_saved_state */ -extern void (PE_incoming_interrupt)(int type, struct ppc_saved_state *ssp, +extern void (PE_incoming_interrupt)(int type, ppc_saved_state_t *ssp, unsigned int dsisr, unsigned int dar); #endif /* POWERMAC_INTERRUPTS_H_ */ diff --git a/pexpert/pexpert/ppc/protos.h b/pexpert/pexpert/ppc/protos.h index a71a18069..2bd28cc51 100644 --- a/pexpert/pexpert/ppc/protos.h +++ b/pexpert/pexpert/ppc/protos.h @@ -24,18 +24,6 @@ */ #ifndef _PEXPERT_PPC_PROTOS_H_ #define _PEXPERT_PPC_PROTOS_H_ - -#define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg)) -#define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg)) - -#define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg)) -#define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg)) - -#define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg)) -#define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg)) - -#define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg)) -#define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg)) #define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg)) #define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg)) @@ -86,52 +74,26 @@ static __inline__ unsigned int byte_reverse_word(unsigned int word) { extern void initialize_serial(void * scc_phys_base); -//------------------------------------------------------------------------ -// from ppc/POWERMAC/device_tree.h -extern void ofw_init(void *); - //------------------------------------------------------------------------ // from osfmk/ppc/POWERMAC/video_console.c extern void initialize_screen(void *, unsigned int); -extern boolean_t vc_progress_initialize( void * desc, - const unsigned char * data, - const unsigned char * clut ); +extern void vc_progress_initialize( void * desc, + const unsigned char * data, + const unsigned char * clut ); extern void vc_display_icon( void * desc, const unsigned char * data ); +//------------------------------------------------------------------------- +// from osfmk/console/panic_dialog.c +extern void panic_ui_initialize(const unsigned char * clut); + // from osfmk/ppc/serial_console.c extern int switch_to_serial_console(void); extern void switch_to_old_console(int old_console); -//------------------------------------------------------------------------ -// from ppc/spl.h - /* Note also : if any new SPL's are introduced, please add to debugging list*/ -#define SPLOFF 0 /* all interrupts disabled TODO NMGS */ -#define SPLPOWER 1 /* power failure (unused) */ -#define SPLHIGH 2 /* TODO NMGS any non-zero, non-INTPRI value */ -#define SPLSCHED SPLHIGH -#define SPLCLOCK SPLSCHED /* hard clock */ -#define SPLVM 4 /* pmap manipulations */ -#define SPLBIO 8 /* block I/O */ -#define SPLIMP 8 /* network & malloc */ -#define SPLTTY 16 /* TTY */ -#define SPLNET 24 /* soft net */ -#define SPLSCLK 27 /* soft clock */ -#define SPLLO 32 /* no interrupts masked */ - -/* internal - masked in to spl level if ok to lower priority (splx, splon) - * the mask bit is never seen externally - */ -#define SPL_LOWER_MASK 0x8000 - -#define SPL_CMP_GT(a, b) ((unsigned)(a) > (unsigned)(b)) -#define SPL_CMP_LT(a, b) ((unsigned)(a) < (unsigned)(b)) -#define SPL_CMP_GE(a, b) ((unsigned)(a) >= (unsigned)(b)) -#define SPL_CMP_LE(a, b) ((unsigned)(a) <= (unsigned)(b)) - typedef unsigned spl_t; //------------------------------------------------------------------------ @@ -147,60 +109,38 @@ extern void cninit(void); * Temporarily stolen from Firmware.h */ -void dbgDisp(unsigned int port, unsigned int id, unsigned int data); -void dbgDispLL(unsigned int port, unsigned int id, unsigned int data); -void fwSCCinit(unsigned int port); - extern void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3); #if 1 /* (TEST/DEBUG) - eliminate inline */ extern __inline__ void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3) { - __asm__ volatile("mr r3,%0" : : "r" (item1) : "r3"); - __asm__ volatile("mr r4,%0" : : "r" (item2) : "r4"); - __asm__ volatile("mr r5,%0" : : "r" (item3) : "r5"); -#ifdef __ELF__ - __asm__ volatile("lis r0,CutTrace@h" : : : "r0"); - __asm__ volatile("ori r0,r0,CutTrace@l" : : : "r0"); -#else - __asm__ volatile("lis r0,hi16(CutTrace)" : : : "r0"); - __asm__ volatile("ori r0,r0,lo16(CutTrace)" : : : "r0"); -#endif - __asm__ volatile("sc"); - return; + __asm__ volatile("mr r3,%0" : : "r" (item1) : "r3"); + __asm__ volatile("mr r4,%0" : : "r" (item2) : "r4"); + __asm__ volatile("mr r5,%0" : : "r" (item3) : "r5"); + __asm__ volatile("lis r0,hi16(CutTrace)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(CutTrace)" : : : "r0"); + __asm__ volatile("sc"); + return; } #endif extern void DoPreempt(void); extern __inline__ void DoPreempt(void) { -#ifdef __ELF__ - __asm__ volatile("lis r0,DoPreemptCall@h" : : : "r0"); - __asm__ volatile("ori r0,r0,DoPreemptCall@l" : : : "r0"); -#else - __asm__ volatile("lis r0,hi16(DoPreemptCall)" : : : "r0"); - __asm__ volatile("ori r0,r0,lo16(DoPreemptCall)" : : : "r0"); -#endif - __asm__ volatile("sc"); - return; + __asm__ volatile("lis r0,hi16(DoPreemptCall)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(DoPreemptCall)" : : : "r0"); + __asm__ volatile("sc"); + return; } extern void CreateFakeIO(void); extern __inline__ void CreateFakeIO(void) { -#ifdef __ELF__ - __asm__ volatile("lis r0,CreateFakeIOCall@h" : : : "r0"); - __asm__ volatile("ori r0,r0,CreateFakeIOCall@l" : : : "r0"); -#else - __asm__ volatile("lis r0,hi16(CreateFakeIOCall)" : : : "r0"); - __asm__ volatile("ori r0,r0,lo16(CreateFakeIOCall)" : : : "r0"); -#endif - __asm__ volatile("sc"); + __asm__ volatile("lis r0,hi16(CreateFakeIOCall)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(CreateFakeIOCall)" : : : "r0"); + __asm__ volatile("sc"); return; } extern void StoreReal(unsigned int val, unsigned int addr); extern void ReadReal(unsigned int raddr, unsigned int *vaddr); -extern void ClearReal(unsigned int addr, unsigned int lgn); -extern void LoadDBATs(unsigned int *bat); -extern void LoadIBATs(unsigned int *bat); extern unsigned int LLTraceSet(unsigned int tflags); extern void GratefulDebInit(void); extern void GratefulDebDisp(unsigned int coord, unsigned int data); diff --git a/pexpert/pexpert/protos.h b/pexpert/pexpert/protos.h index 8bd07d0ec..4cb806bfb 100644 --- a/pexpert/pexpert/protos.h +++ b/pexpert/pexpert/protos.h @@ -49,6 +49,7 @@ extern char *strcpy(char *dest, const char *src); extern char *strncpy(char *dest, const char *src, unsigned long n); extern void interrupt_enable(void); extern void interrupt_disable(void); +extern void bcopy(void * from, void * to, int size); #if __ppc__ extern void bcopy_nc(char *from, char *to, int size); /* uncached-safe */ #else diff --git a/pexpert/ppc/pe_identify_machine.c b/pexpert/ppc/pe_identify_machine.c index 2266416d7..558d6bd65 100644 --- a/pexpert/ppc/pe_identify_machine.c +++ b/pexpert/ppc/pe_identify_machine.c @@ -138,50 +138,51 @@ vm_offset_t get_io_base_addr(void) return 0; } -boolean_t PE_init_ethernet_debugger(void) +vm_offset_t PE_find_scc(void) { - boolean_t result; -#if 0 - DTEntry entryP; - vm_offset_t *address; - unsigned char *netAddr; - int size; - vm_offset_t io; - - if ((io = get_io_base_addr()) - && (DTFindEntry("name", "mace", &entryP) == kSuccess) - && (DTGetProperty(entryP, "local-mac-address", (void **)&netAddr, &size) == kSuccess) - && (DTGetProperty(entryP, "reg", (void **)&address, &size) == kSuccess) - && (size == (2 * 3 * sizeof(vm_offset_t)) )) - { - extern boolean_t kdp_mace_init(void *baseAddresses[3], - unsigned char *netAddr); - void *maceAddrs[3]; - - // address calculation not correct - maceAddrs[0] = (void *) io_map(io + address[0], address[1]); - maceAddrs[1] = (void *) io_map(io + address[2], 0x1000); - maceAddrs[2] = (void *) (((vm_offset_t)maceAddrs[1]) - + address[4] - address[2]); - result = kdp_mace_init( maceAddrs, netAddr ); - - } else -#endif - result = FALSE; - - return result; + vm_offset_t io, sccadd; + DTEntry entryP; + vm_offset_t *sccregs; + unsigned int sccrsize; + + if(!(io = get_io_base_addr())) { /* Get the I/O controller base address */ + return (vm_offset_t)0; /* Hmmm, no I/O??? What gives??? How'd we even boot? */ + } + + +/* Note: if we find a escc-legacy, we need to kind of hack because it can be either an offset + into the iobase or the actual address itself. ORint the two should provide the correct + for either */ + + sccadd = 0; /* Assume none for now */ + + if(DTFindEntry("name", "escc-legacy", &entryP) == kSuccess) { /* Find the old fashioned serial port */ + if (DTGetProperty(entryP, "reg", (void **)&sccregs, &sccrsize) == kSuccess) { /* Do we have some registers? */ + sccadd = ((vm_offset_t)*sccregs | io); /* Get the address */ + } + } + + if(DTFindEntry("name", "escc", &entryP) == kSuccess) { /* Well, see if we just have the new fangled one */ + sccadd = io + 0x12000; /* Yeah, but still return the oldie goldie... */ + } + + return sccadd; /* Return it if you found it */ } -vm_offset_t PE_find_scc(void) +unsigned int PE_init_taproot(vm_offset_t *taddr) { - vm_offset_t io; - DTEntry entryP; - - if ((io = get_io_base_addr()) - && (DTFindEntry("name", "escc", &entryP) == kSuccess)) - io += 0x12000; /* Offset to legacy SCC Registers */ - else - io = 0; - - return io; + DTEntry entryP; + vm_offset_t *tappdata; + unsigned int tappsize; + + + if(DTFindEntry("name", "memory-map", &entryP) != kSuccess) return 0; /* no memory map */ + + if (DTGetProperty(entryP, "TapRoot", (void **)&tappdata, &tappsize) != kSuccess) return 0; /* No TapRoot */ + + tappdata[1] = (tappdata[1] + 4095 ) & -4096; /* Make sure this is a whole page */ + + *taddr = io_map_spec(tappdata[0], tappdata[1]); /* Map it in and return the address */ + tappdata[0] = *taddr; /* Also change property */ + return tappdata[1]; /* And the size */ } diff --git a/pexpert/ppc/pe_init.c b/pexpert/ppc/pe_init.c index 0172cbd36..2dcfd4ce9 100644 --- a/pexpert/ppc/pe_init.c +++ b/pexpert/ppc/pe_init.c @@ -122,16 +122,11 @@ void PE_init_iokit(void) kern_return_t ret; DTEntry entry; int size; - int i; void ** map; PE_init_kprintf(TRUE); PE_init_printf(TRUE); - // init this now to get mace debugger for iokit startup - PE_init_ethernet_debugger(); - - if( kSuccess == DTLookupEntry(0, "/chosen/memory-map", &entry)) { boot_progress_element * bootPict; @@ -149,12 +144,10 @@ void PE_init_iokit(void) default_noroot_data = &bootPict->data[0]; } } + panic_ui_initialize( (unsigned char *) appleClut8 ); vc_progress_initialize( &default_progress, default_progress_data, (unsigned char *) appleClut8 ); - PE_initialize_console( (PE_Video *) 0, kPEAcquireScreen ); - - ret = StartIOKit( PE_state.deviceTreeHead, PE_state.bootArgs, - (void *)0, (void *)0); + ret = StartIOKit( PE_state.deviceTreeHead, PE_state.bootArgs, (void *)0, (void *)0); } void PE_init_platform(boolean_t vm_initialized, void *_args) @@ -195,15 +188,16 @@ void PE_init_platform(boolean_t vm_initialized, void *_args) void PE_create_console( void ) { - if (PE_state.video.v_display) - PE_initialize_console( &PE_state.video, kPEGraphicsMode ); - else - PE_initialize_console( &PE_state.video, kPETextMode ); + if ( PE_state.video.v_display ) + PE_initialize_console( &PE_state.video, kPEGraphicsMode ); + else + PE_initialize_console( &PE_state.video, kPETextMode ); } int PE_current_console( PE_Video * info ) { *info = PE_state.video; + info->v_baseAddr = 0; return( 0); } diff --git a/pexpert/ppc/pe_kprintf.c b/pexpert/ppc/pe_kprintf.c index 335d52db4..02a62efd6 100644 --- a/pexpert/ppc/pe_kprintf.c +++ b/pexpert/ppc/pe_kprintf.c @@ -48,34 +48,28 @@ void (*PE_kputc)(char c) = 0; unsigned int disableSerialOuput = TRUE; +vm_offset_t scc = 0; -static struct slock kprintf_lock; +struct slock kprintf_lock; void PE_init_kprintf(boolean_t vm_initialized) { - static vm_offset_t scc; unsigned int boot_arg; if (PE_state.initialized == FALSE) panic("Platform Expert not initialized"); - if (!vm_initialized) - { - if (PE_parse_boot_arg("debug", &boot_arg)) - if(boot_arg & DB_KPRT) disableSerialOuput = FALSE; + if (PE_parse_boot_arg("debug", &boot_arg)) + if(boot_arg & DB_KPRT) disableSerialOuput = FALSE; - if( (scc = PE_find_scc())) - { - initialize_serial( (void *) scc ); + if( (scc = PE_find_scc())) { /* See if we can find the serial port */ + scc = io_map_spec(scc, 0x1000); /* Map it in */ + initialize_serial((void *)scc); /* Start up the serial driver */ PE_kputc = serial_putc; simple_lock_init(&kprintf_lock, 0); - } else - PE_kputc = cnputc; - - } else if( scc){ - initialize_serial( (void *) io_map( scc, 0x1000) ); - } + } else + PE_kputc = cnputc; #if 0 /* -- 2.45.2